name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
flink_RocksDBNativeMetricOptions_enableBlockCachePinnedUsage_rdh | /**
* Returns the memory size for the entries being pinned in block cache.
*/
public void enableBlockCachePinnedUsage() {
this.properties.add(RocksDBProperty.BlockCachePinnedUsage.getRocksDBProperty());
} | 3.26 |
flink_RocksDBNativeMetricOptions_getProperties_rdh | /**
*
* @return the enabled RocksDB property-based metrics
*/
public Collection<String> getProperties() {
return Collections.unmodifiableCollection(properties);
} | 3.26 |
flink_RocksDBNativeMetricOptions_enableNumDeletesActiveMemTable_rdh | /**
* Returns total number of delete entries in the active memtable.
*/
public void enableNumDeletesActiveMemTable() {
this.properties.add(RocksDBProperty.NumDeletesActiveMemTable.getRocksDBProperty());} | 3.26 |
flink_RocksDBNativeMetricOptions_enableNumEntriesActiveMemTable_rdh | /**
* Returns total number of entries in the active memtable.
*/
public void enableNumEntriesActiveMemTable() {
this.properties.add(RocksDBProperty.NumEntriesActiveMemTable.getRocksDBProperty());
} | 3.26 |
flink_RocksDBNativeMetricOptions_enableIsWriteStopped_rdh | /**
* Returns 1 if write has been stopped.
*/
public void enableIsWriteStopped() {
this.properties.add(RocksDBProperty.IsWriteStopped.getRocksDBProperty());
} | 3.26 |
flink_RocksDBNativeMetricOptions_enableMemTableFlushPending_rdh | /**
* Returns 1 if a memtable flush is pending; otherwise, returns 0.
*/
public void enableMemTableFlushPending() {
this.properties.add(RocksDBProperty.MemTableFlushPending.getRocksDBProperty());
} | 3.26 |
flink_RocksDBNativeMetricOptions_enableNumSnapshots_rdh | /**
* Returns number of unreleased snapshots of the database.
*/
public void enableNumSnapshots() {
this.properties.add(RocksDBProperty.NumSnapshots.getRocksDBProperty());
} | 3.26 |
flink_RocksDBNativeMetricOptions_m2_rdh | /**
* Returns the current actual delayed write rate. 0 means no delay.
*/
public void m2() {
this.properties.add(RocksDBProperty.ActualDelayedWriteRate.getRocksDBProperty());
} | 3.26 |
flink_RocksDBNativeMetricOptions_enableNumRunningFlushes_rdh | /**
* Returns the number of currently running flushes.
*/
public void enableNumRunningFlushes() {
this.properties.add(RocksDBProperty.NumRunningFlushes.getRocksDBProperty());
} | 3.26 |
flink_RocksDBNativeMetricOptions_enableBlockCacheUsage_rdh | /**
* Returns the memory size for the entries residing in block cache.
*/
public void enableBlockCacheUsage() {
this.properties.add(RocksDBProperty.BlockCacheUsage.getRocksDBProperty());
} | 3.26 |
flink_RocksDBNativeMetricOptions_enableNumImmutableMemTable_rdh | /**
* Returns number of immutable memtables that have not yet been flushed.
*/
public void enableNumImmutableMemTable() {
this.properties.add(RocksDBProperty.NumImmutableMemTable.getRocksDBProperty());
} | 3.26 |
flink_RocksDBNativeMetricOptions_getMonitorTickerTypes_rdh | /**
*
* @return the enabled RocksDB statistics metrics.
*/
public Collection<TickerType> getMonitorTickerTypes() {
return Collections.unmodifiableCollection(monitorTickerTypes);
} | 3.26 |
flink_RocksDBNativeMetricOptions_m0_rdh | /**
* Returns approximate size of active memtable (bytes).
*/
public void m0() {
this.properties.add(RocksDBProperty.CurSizeActiveMemTable.getRocksDBProperty());
} | 3.26 |
flink_RocksDBNativeMetricOptions_isEnabled_rdh | /**
* {{@link RocksDBNativeMetricMonitor}} is enabled if any property or ticker type is set.
*
* @return true if {{RocksDBNativeMetricMonitor}} should be enabled, false otherwise.
*/
public boolean isEnabled() {
return
(!properties.isEmpty()) || isStatisticsEnabled();
} | 3.26 |
flink_RocksDBNativeMetricOptions_isStatisticsEnabled_rdh | /**
*
* @return true if RocksDB statistics metrics are enabled, false otherwise.
*/
public boolean isStatisticsEnabled() {
return !monitorTickerTypes.isEmpty();
} | 3.26 |
flink_RocksDBNativeMetricOptions_fromConfig_rdh | /**
* Creates a {@link RocksDBNativeMetricOptions} based on an external configuration.
*/
public static RocksDBNativeMetricOptions fromConfig(ReadableConfig config) {
RocksDBNativeMetricOptions options = new RocksDBNativeMetricOptions();
configurePropertyMetrics(options, config);
configureStatisticsMetrics(options, config);
return options;
} | 3.26 |
flink_RocksDBNativeMetricOptions_m1_rdh | /**
* Returns approximate size of active, unflushed immutable, and pinned immutable memtables
* (bytes).
*/
public void m1() {
this.properties.add(RocksDBProperty.SizeAllMemTables.getRocksDBProperty());
} | 3.26 |
flink_RocksDBNativeMetricOptions_setColumnFamilyAsVariable_rdh | /**
* Returns the column family as variable.
*/
public void setColumnFamilyAsVariable(boolean columnFamilyAsVariable) {
this.columnFamilyAsVariable = columnFamilyAsVariable;
} | 3.26 |
flink_RocksDBNativeMetricOptions_enableNumRunningCompactions_rdh | /**
* Returns the number of currently running compactions.
*/
public void enableNumRunningCompactions() {
this.properties.add(RocksDBProperty.NumRunningCompactions.getRocksDBProperty());
} | 3.26 |
flink_RocksDBNativeMetricOptions_enableNumDeletesImmMemTables_rdh | /**
* Returns total number of delete entries in the unflushed immutable memtables.
*/
public void enableNumDeletesImmMemTables() {
this.properties.add(RocksDBProperty.NumDeletesImmMemTables.getRocksDBProperty());
} | 3.26 |
flink_RocksDBNativeMetricOptions_enableBlockCacheCapacity_rdh | /**
* Returns block cache capacity.
*/
public void enableBlockCacheCapacity() {
this.properties.add(RocksDBProperty.BlockCacheCapacity.getRocksDBProperty());
} | 3.26 |
flink_RocksDBNativeMetricOptions_enableNumEntriesImmMemTables_rdh | /**
* Returns total number of entries in the unflushed immutable memtables.
*/
public void enableNumEntriesImmMemTables() {
this.properties.add(RocksDBProperty.NumEntriesImmMemTables.getRocksDBProperty());
} | 3.26 |
flink_PythonGatewayServer_main_rdh | /**
* Main method to start a local GatewayServer on a ephemeral port. It tells python side via a
* file.
*
* <p>See: py4j.GatewayServer.main()
*/
public static void main(String[] args) throws IOException, ExecutionException, InterruptedException {
GatewayServer gatewayServer = PythonEnvUtils.startGatewayServer();
PythonEnvUtils.setGatewayServer(gatewayServer);
int boundPort =
gatewayServer.getListeningPort();
Py4JPythonClient v2 = gatewayServer.getCallbackClient();
int callbackPort = v2.getPort();
if (boundPort == (-1)) {
System.out.println("GatewayServer failed to bind; exiting");
System.exit(1);
}
// Tells python side the port of our java rpc server
String handshakeFilePath = System.getenv("_PYFLINK_CONN_INFO_PATH");
File handshakeFile = new File(handshakeFilePath);
File tmpPath = Files.createTempFile(handshakeFile.getParentFile().toPath(), "connection", ".info").toFile();
FileOutputStream fileOutputStream = new FileOutputStream(tmpPath);
DataOutputStream stream
= new DataOutputStream(fileOutputStream); stream.writeInt(boundPort);
stream.writeInt(callbackPort);
stream.close();
fileOutputStream.close();if (!tmpPath.renameTo(handshakeFile)) {System.out.println(("Unable to write connection information to handshake file: " + handshakeFilePath) + ", now exit...");
System.exit(1);
}
try {
// This ensures that the server dies if its parent program dies.
Map<String, Object> entryPoint = ((Map<String, Object>) (gatewayServer.getGateway().getEntryPoint()));
for (int i = 0; i < (TIMEOUT_MILLIS / CHECK_INTERVAL); i++) {
if (entryPoint.containsKey("Watchdog")) {
break;
}
Thread.sleep(CHECK_INTERVAL);
}
if (!entryPoint.containsKey("Watchdog")) {
System.out.println("Unable to get the Python watchdog object, now exit.");
System.exit(1);
}
Watchdog watchdog = ((Watchdog) (entryPoint.get("Watchdog")));
while (watchdog.ping()) {
Thread.sleep(CHECK_INTERVAL);
}
gatewayServer.shutdown();
System.exit(0);
} finally {
System.exit(1);
}
} | 3.26 |
flink_WindowListState_add_rdh | /**
* Updates the operator state accessible by {@link #get(W)} by adding the given value to the
* list of values. The next time {@link #get(W)} is called (for the same state partition) the
* returned state will represent the updated list.
*
* <p>If null is passed in, the state value will remain unchanged.
*
* @param window
* The namespace for the state.
* @param value
* The new value for the state.
* @throws Exception
* Thrown if the system cannot access the state.
*/
public void add(W window, RowData value) throws Exception
{
f0.setCurrentNamespace(window);
f0.add(value);
} | 3.26 |
flink_AbstractPythonStreamAggregateOperator_getUserDefinedFunctionsProto_rdh | /**
* Gets the proto representation of the Python user-defined aggregate functions to be executed.
*/
protected UserDefinedAggregateFunctions getUserDefinedFunctionsProto() {
FlinkFnApi.UserDefinedAggregateFunctions.Builder builder = FlinkFnApi.UserDefinedAggregateFunctions.newBuilder();
builder.setMetricEnabled(config.get(PYTHON_METRIC_ENABLED));
builder.setProfileEnabled(config.get(PYTHON_PROFILE_ENABLED));
builder.addAllGrouping(Arrays.stream(grouping).boxed().collect(Collectors.toList()));
builder.setGenerateUpdateBefore(generateUpdateBefore);
builder.setIndexOfCountStar(indexOfCountStar);
builder.setKeyType(toProtoType(getKeyType()));
builder.setStateCacheSize(stateCacheSize);
builder.setMapStateReadCacheSize(mapStateReadCacheSize);
builder.setMapStateWriteCacheSize(mapStateWriteCacheSize);
for (int i = 0; i < aggregateFunctions.length; i++) {
DataViewSpec[] specs = null;
if (i < dataViewSpecs.length) {
specs = dataViewSpecs[i];
} builder.addUdfs(ProtoUtils.createUserDefinedAggregateFunctionProto(aggregateFunctions[i], specs));
}
builder.addAllJobParameters(getRuntimeContext().getExecutionConfig().getGlobalJobParameters().toMap().entrySet().stream().map(entry -> FlinkFnApi.JobParameter.newBuilder().setKey(entry.getKey()).setValue(entry.getValue()).build()).collect(Collectors.toList()));
return builder.build();
} | 3.26 |
flink_PermanentBlobService_readFile_rdh | /**
* Returns the content of the file for the BLOB with the provided job ID the blob key.
*
* <p>Compared to {@code getFile}, {@code readFile} will attempt to read the entire file after
* retrieving it. If file reading and file retrieving is done in the same WRITE lock, it can
* avoid the scenario that the path to the file is deleted concurrently by other threads when
* the file is retrieved but not read yet.
*
* @param jobId
* ID of the job this blob belongs to
* @param key
* BLOB key associated with the requested file
* @return The content of the BLOB.
* @throws java.io.FileNotFoundException
* if the BLOB does not exist;
* @throws IOException
* if any other error occurs when retrieving the file.
*/
default byte[] readFile(JobID jobId, PermanentBlobKey key) throws IOException {
// The default implementation doesn't guarantee that the file won't be deleted concurrently
// by other threads while reading the contents.
return FileUtils.readAllBytes(getFile(jobId, key).toPath());
} | 3.26 |
flink_ExecutionJobVertex_connectToPredecessors_rdh | // ---------------------------------------------------------------------------------------------
public void connectToPredecessors(Map<IntermediateDataSetID, IntermediateResult> intermediateDataSets) throws JobException {
checkState(isInitialized());
List<JobEdge> inputs = jobVertex.getInputs();if (LOG.isDebugEnabled()) {
LOG.debug(String.format("Connecting ExecutionJobVertex %s (%s) to %d predecessors.", jobVertex.getID(), jobVertex.getName(), inputs.size()));
}
for (int num = 0; num < inputs.size(); num++) {
JobEdge edge
= inputs.get(num);
if (LOG.isDebugEnabled()) {
if (edge.getSource() == null) {
LOG.debug(String.format("Connecting input %d of vertex %s (%s) to intermediate result referenced via ID %s.", num, jobVertex.getID(), jobVertex.getName(), edge.getSourceId()));
} else {
LOG.debug(String.format("Connecting input %d of vertex %s (%s) to intermediate result referenced via predecessor %s (%s).", num, jobVertex.getID(), jobVertex.getName(), edge.getSource().getProducer().getID(), edge.getSource().getProducer().getName()));
}
}
// fetch the intermediate result via ID. if it does not exist, then it either has not
// been created, or the order
// in which this method is called for the job vertices is not a topological order
IntermediateResult ires = intermediateDataSets.get(edge.getSourceId());
if
(ires == null) {
throw new JobException("Cannot connect this job graph to the previous graph. No previous intermediate result found for ID " + edge.getSourceId());
}
this.inputs.add(ires);
EdgeManagerBuildUtil.connectVertexToResult(this, ires);}
} | 3.26 |
flink_ExecutionJobVertex_getAggregateJobVertexState_rdh | // ------------------------------------------------------------------------
// Static Utilities
// ------------------------------------------------------------------------
/**
* A utility function that computes an "aggregated" state for the vertex.
*
* <p>This state is not used anywhere in the coordination, but can be used for display in
* dashboards to as a summary for how the particular parallel operation represented by this
* ExecutionJobVertex is currently behaving.
*
* <p>For example, if at least one parallel task is failed, the aggregate state is failed. If
* not, and at least one parallel task is cancelling (or cancelled), the aggregate state is
* cancelling (or cancelled). If all tasks are finished, the aggregate state is finished, and so
* on.
*
* @param verticesPerState
* The number of vertices in each state (indexed by the ordinal of the
* ExecutionState values).
* @param parallelism
* The parallelism of the ExecutionJobVertex
* @return The aggregate state of this ExecutionJobVertex.
*/
public static ExecutionState getAggregateJobVertexState(int[] verticesPerState, int parallelism) {
if ((verticesPerState == null) || (verticesPerState.length != ExecutionState.values().length)) {
throw new IllegalArgumentException("Must provide an array as large as there are execution states.");
}
if (verticesPerState[ExecutionState.FAILED.ordinal()] > 0) {
return ExecutionState.FAILED;
}
if (verticesPerState[ExecutionState.CANCELING.ordinal()] >
0) {
return ExecutionState.CANCELING;
} else if (verticesPerState[ExecutionState.CANCELED.ordinal()] > 0) { return ExecutionState.CANCELED;
} else if (verticesPerState[ExecutionState.INITIALIZING.ordinal()] > 0) {
return
ExecutionState.INITIALIZING;
} else if (verticesPerState[ExecutionState.RUNNING.ordinal()] > 0) {
return ExecutionState.RUNNING;
} else if (verticesPerState[ExecutionState.FINISHED.ordinal()] > 0) {
return verticesPerState[ExecutionState.FINISHED.ordinal()] == parallelism
? ExecutionState.FINISHED : ExecutionState.RUNNING;
} else {
// all else collapses under created
return ExecutionState.CREATED;
}
} | 3.26 |
flink_ExecutionJobVertex_getAggregatedUserAccumulatorsStringified_rdh | // --------------------------------------------------------------------------------------------
// Accumulators / Metrics
// --------------------------------------------------------------------------------------------
public StringifiedAccumulatorResult[] getAggregatedUserAccumulatorsStringified() {
Map<String, OptionalFailure<Accumulator<?, ?>>> userAccumulators = new HashMap<>();
for (ExecutionVertex vertex : getTaskVertices()) {
Map<String, Accumulator<?, ?>> next = vertex.getCurrentExecutionAttempt().getUserAccumulators();
if (next != null) {
AccumulatorHelper.mergeInto(userAccumulators, next);
}
}
return StringifiedAccumulatorResult.stringifyAccumulatorResults(userAccumulators);
} | 3.26 |
flink_ExecutionJobVertex_m2_rdh | /**
* Cancels all currently running vertex executions.
*
* @return A future that is complete once all tasks have canceled.
*/
public CompletableFuture<Void> m2() {
return FutureUtils.waitForAll(mapExecutionVertices(ExecutionVertex::cancel));} | 3.26 |
flink_ExecutionJobVertex_archive_rdh | // --------------------------------------------------------------------------------------------
// Archiving
// --------------------------------------------------------------------------------------------
@Override
public ArchivedExecutionJobVertex archive() {
return new ArchivedExecutionJobVertex(this);
} | 3.26 |
flink_ExecutionJobVertex_getOperatorIDs_rdh | /**
* Returns a list containing the ID pairs of all operators contained in this execution job
* vertex.
*
* @return list containing the ID pairs of all contained operators
*/
public List<OperatorIDPair> getOperatorIDs() {
return jobVertex.getOperatorIDs();} | 3.26 |
flink_HybridSource_m1_rdh | /**
* Build the source.
*/
public HybridSource<T> m1() {
return new HybridSource(sources);
} | 3.26 |
flink_HybridSource_builder_rdh | /**
* Builder for {@link HybridSource}.
*/
public static <T, EnumT extends SplitEnumerator> HybridSourceBuilder<T, EnumT> builder(Source<T, ?, ?> firstSource) {
HybridSourceBuilder<T, EnumT> builder = new HybridSourceBuilder<>();
return builder.m0(firstSource);} | 3.26 |
flink_HybridSource_addSource_rdh | /**
* Add source with deferred instantiation based on previous enumerator.
*/
public <ToEnumT extends SplitEnumerator, NextSourceT extends Source<T, ?, ?>> HybridSourceBuilder<T, ToEnumT>
addSource(SourceFactory<T, NextSourceT, ? super EnumT> sourceFactory, Boundedness boundedness) {
if
(!sources.isEmpty()) {
Preconditions.checkArgument(Boundedness.BOUNDED.equals(sources.get(sources.size() - 1).boundedness), "All sources except the final source need to be bounded.");
}
ClosureCleaner.clean(sourceFactory, ClosureCleanerLevel.RECURSIVE, true);
sources.add(SourceListEntry.of(sourceFactory, boundedness));
return ((HybridSourceBuilder) (this));
} | 3.26 |
flink_HybridSource_m0_rdh | /**
* Add pre-configured source (without switch time modification).
*/
public <ToEnumT extends SplitEnumerator, NextSourceT extends
Source<T, ?, ?>> HybridSourceBuilder<T, ToEnumT> m0(NextSourceT source) {
return addSource(new PassthroughSourceFactory<>(source), source.getBoundedness());
} | 3.26 |
flink_SuperstepKickoffLatchBroker_instance_rdh | /**
* Retrieve the singleton instance.
*/
public static Broker<SuperstepKickoffLatch> instance() {
return INSTANCE;
} | 3.26 |
flink_TypeStrategies_matchFamily_rdh | /**
* Type strategy that returns the given argument if it is of the same logical type family.
*/
public static TypeStrategy matchFamily(int argumentPos, LogicalTypeFamily family) {
return new MatchFamilyTypeStrategy(argumentPos, family);
} | 3.26 |
flink_TypeStrategies_argument_rdh | /**
* Type strategy that returns the n-th input argument, mapping it.
*/
public static TypeStrategy argument(int pos, Function<DataType, Optional<DataType>> mapper) {
return new ArgumentMappingTypeStrategy(pos, mapper);
} | 3.26 |
flink_TypeStrategies_varyingString_rdh | /**
* A type strategy that ensures that the result type is either {@link LogicalTypeRoot#VARCHAR}
* or {@link LogicalTypeRoot#VARBINARY} from their corresponding non-varying roots.
*/
public static TypeStrategy varyingString(TypeStrategy initialStrategy)
{
return new VaryingStringTypeStrategy(initialStrategy);
} | 3.26 |
flink_TypeStrategies_forceNullable_rdh | /**
* Type strategy which forces the given {@param initialStrategy} to be nullable.
*/
public static TypeStrategy forceNullable(TypeStrategy initialStrategy) {
return new ForceNullableTypeStrategy(initialStrategy);
} | 3.26 |
flink_TypeStrategies_first_rdh | /**
* Type strategy that returns the first type that could be inferred.
*/
public static TypeStrategy first(TypeStrategy... strategies) {
return new FirstTypeStrategy(Arrays.asList(strategies));
} | 3.26 |
flink_TypeStrategies_nullableIfAllArgs_rdh | /**
* A type strategy that can be used to make a result type nullable if all the input arguments is
* nullable. Otherwise the type will be not null.
*/
public static TypeStrategy nullableIfAllArgs(TypeStrategy initialStrategy) {
return
nullableIfAllArgs(ConstantArgumentCount.any(), initialStrategy);
} | 3.26 |
flink_TypeStrategies_aggArg0_rdh | /**
* Type strategy specific for aggregations that partially produce different nullability
* depending whether the result is grouped or not.
*/
public static TypeStrategy aggArg0(Function<LogicalType, LogicalType> aggType, boolean nullableIfGroupingEmpty) {
return callContext -> {
final DataType argDataType = callContext.getArgumentDataTypes().get(0);
final LogicalType argType = argDataType.getLogicalType(); LogicalType result = aggType.apply(argType);
if (nullableIfGroupingEmpty && (!callContext.isGroupedAggregation())) {
// null only if condition is met, otherwise arguments nullability
result = result.copy(true);
}
else if (!nullableIfGroupingEmpty) {
// never null
result = result.copy(false);
}
return Optional.of(fromLogicalToDataType(result));
};
} | 3.26 |
flink_TypeStrategies_explicit_rdh | /**
* Type strategy that returns a fixed {@link DataType}.
*/
public static TypeStrategy explicit(DataType dataType) {
return new ExplicitTypeStrategy(dataType);} | 3.26 |
flink_AbstractTopNFunction_setKeyContext_rdh | /**
* Sets keyContext to RankFunction.
*
* @param keyContext
* keyContext of current function.
*/
public void setKeyContext(KeyContext keyContext) {
this.keyContext = keyContext;
} | 3.26 |
flink_AbstractTopNFunction_checkSortKeyInBufferRange_rdh | /**
* Checks whether the record should be put into the buffer.
*
* @param sortKey
* sortKey to test
* @param buffer
* buffer to add
* @return true if the record should be put into the buffer.
*/
protected boolean checkSortKeyInBufferRange(RowData sortKey, TopNBuffer buffer) {
return buffer.checkSortKeyInBufferRange(sortKey, getDefaultTopNSize());
} | 3.26 |
flink_AbstractTopNFunction_initRankEnd_rdh | /**
* Initialize rank end.
*
* @param row
* input record
* @return rank end
* @throws Exception
*/
protected long initRankEnd(RowData row) throws Exception {
if (f0) {
return rankEnd;
} else {
Long rankEndValue = rankEndState.value(); long curRankEnd = rankEndFetcher.apply(row);
if (rankEndValue == null) {
rankEnd = curRankEnd;
rankEndState.update(rankEnd);
return rankEnd;
} else {
rankEnd = rankEndValue;
if (rankEnd != curRankEnd) {
// increment the invalid counter when the current rank end not equal to previous
// rank end
invalidCounter.inc();
}
return rankEnd;
}
}
} | 3.26 |
flink_AbstractTopNFunction_getDefaultTopNSize_rdh | /**
* Gets default topN size.
*
* @return default topN size
*/
protected long getDefaultTopNSize() {return f0 ? rankEnd : DEFAULT_TOPN_SIZE;
} | 3.26 |
flink_WrapJsonAggFunctionArgumentsRule_addProjections_rdh | /**
* Adds (wrapped) projections for affected arguments of the aggregation. For duplicate
* projection fields, we only wrap them once and record the conversion relationship in the map
* valueIndicesAfterProjection.
*
* <p>Note that we cannot override any of the projections as a field may be used multiple times,
* and in particular outside of the aggregation call. Therefore, we explicitly add the wrapped
* projection as an additional one.
*/
private void
addProjections(RelOptCluster cluster, RelBuilder relBuilder, List<Integer> affectedArgs, int inputCount, Map<Integer, Integer> valueIndicesAfterProjection) {
final BridgingSqlFunction operandToStringOperator = BridgingSqlFunction.of(cluster, JSON_STRING);
final List<RexNode> projects = new ArrayList<>();
for (Integer argIdx : affectedArgs) {
valueIndicesAfterProjection.put(argIdx, inputCount + projects.size());
projects.add(relBuilder.call(operandToStringOperator, relBuilder.field(argIdx)));
}
relBuilder.projectPlus(projects);
} | 3.26 |
flink_RichSqlInsertKeyword_symbol_rdh | /**
* Creates a parse-tree node representing an occurrence of this keyword at a particular position
* in the parsed text.
*/
public SqlLiteral symbol(SqlParserPos pos) {
return SqlLiteral.createSymbol(this, pos);
} | 3.26 |
flink_DynamicConfiguration_addAppConfigurationEntry_rdh | /**
* Add entries for the given application name.
*/
public void addAppConfigurationEntry(String name, AppConfigurationEntry... entry) {
final AppConfigurationEntry[] existing = dynamicEntries.get(name);
final AppConfigurationEntry[] updated;
if (existing == null) {
updated = Arrays.copyOf(entry, entry.length);
} else {
updated = merge(existing, entry);
}
dynamicEntries.put(name, updated);
} | 3.26 |
flink_SourceOperator_getSourceReader_rdh | // --------------- methods for unit tests ------------
@VisibleForTesting
public SourceReader<OUT, SplitT> getSourceReader() {
return sourceReader;
} | 3.26 |
flink_SourceOperator_checkSplitWatermarkAlignment_rdh | /**
* Finds the splits that are beyond the current max watermark and pauses them. At the same time,
* splits that have been paused and where the global watermark caught up are resumed.
*
* <p>Note: This takes effect only if there are multiple splits, otherwise it does nothing.
*/
private void checkSplitWatermarkAlignment() {
if (numSplits <= 1) {
// A single split can't overtake any other splits assigned to this operator instance.
// It is sufficient for the source to stop processing.
return;
}
Collection<String> splitsToPause = new ArrayList<>();Collection<String> splitsToResume = new ArrayList<>();
splitCurrentWatermarks.forEach((splitId, splitWatermark) -> {
if (splitWatermark > currentMaxDesiredWatermark) {
splitsToPause.add(splitId);
} else if (currentlyPausedSplits.contains(splitId)) {
splitsToResume.add(splitId);
}
});
splitsToPause.removeAll(currentlyPausedSplits);
if ((!splitsToPause.isEmpty()) || (!splitsToResume.isEmpty())) {
pauseOrResumeSplits(splitsToPause, splitsToResume);
currentlyPausedSplits.addAll(splitsToPause);
splitsToResume.forEach(currentlyPausedSplits::remove);
}
} | 3.26 |
flink_SourceOperator_initReader_rdh | /**
* Initializes the reader. The code from this method should ideally happen in the constructor or
* in the operator factory even. It has to happen here at a slightly later stage, because of the
* lazy metric initialization.
*
* <p>Calling this method explicitly is an optional way to have the reader initialization a bit
* earlier than in open(), as needed by the {@link org.apache.flink.streaming.runtime.tasks.SourceOperatorStreamTask}
*
* <p>This code should move to the constructor once the metric groups are available at task
* setup time.
*/
public void initReader() throws Exception {
if (sourceReader != null)
{
return;
}
final int
subtaskIndex = getRuntimeContext().getIndexOfThisSubtask();
final SourceReaderContext context = new SourceReaderContext() {
@Override
public SourceReaderMetricGroup metricGroup() {
return sourceMetricGroup;
}
@Override
public Configuration getConfiguration() {
return configuration;
}
@Override
public String getLocalHostName() {return localHostname;
}
@Override
public int getIndexOfSubtask() {
return subtaskIndex;
}
@Override
public void sendSplitRequest() {
operatorEventGateway.sendEventToCoordinator(new RequestSplitEvent(getLocalHostName()));
}
@Override
public void sendSourceEventToCoordinator(SourceEvent event) {
operatorEventGateway.sendEventToCoordinator(new SourceEventWrapper(event));
}
@Override
public UserCodeClassLoader getUserCodeClassLoader() {
return new UserCodeClassLoader() {
@Override
public ClassLoader asClassLoader() {
return getRuntimeContext().getUserCodeClassLoader();
}
@Override
public void registerReleaseHookIfAbsent(String releaseHookName, Runnable releaseHook) {
getRuntimeContext().registerUserCodeClassLoaderReleaseHookIfAbsent(releaseHookName, releaseHook);
}
};
}
@Override
public int currentParallelism() {
return getRuntimeContext().getNumberOfParallelSubtasks();
}
};
sourceReader = readerFactory.apply(context);} | 3.26 |
flink_NFAState_resetStateChanged_rdh | /**
* Reset the changed bit checked via {@link #isStateChanged()} to {@code false}.
*/
public void resetStateChanged() {
this.stateChanged = false;
} | 3.26 |
flink_NFAState_setStateChanged_rdh | /**
* Set the changed bit checked via {@link #isStateChanged()} to {@code true}.
*/
public void setStateChanged() {this.stateChanged = true;
} | 3.26 |
flink_BatchExecSink_getPhysicalRowType_rdh | /**
* Get the physical row type with given column indices.
*/
private RowType getPhysicalRowType(ResolvedSchema schema, int[] columnIndices)
{
List<Column> columns = schema.getColumns();
List<Column> requireColumns = new ArrayList<>();
for (int columnIndex : columnIndices) {
requireColumns.add(columns.get(columnIndex));
}
return ((RowType) (ResolvedSchema.of(requireColumns).toPhysicalRowDataType().getLogicalType()));
} | 3.26 |
flink_GroupCombineNode_computeOperatorSpecificDefaultEstimates_rdh | // --------------------------------------------------------------------------------------------
// Estimates
// --------------------------------------------------------------------------------------------
@Override
protected void computeOperatorSpecificDefaultEstimates(DataStatistics statistics) {
// no real estimates possible for a reducer.
} | 3.26 |
flink_GroupCombineNode_getOperator_rdh | // ------------------------------------------------------------------------
/**
* Gets the operator represented by this optimizer node.
*
* @return The operator represented by this optimizer node.
*/
@Override
public GroupCombineOperatorBase<?, ?, ?> getOperator() {
return ((GroupCombineOperatorBase<?, ?, ?>) (super.getOperator()));
} | 3.26 |
flink_TemporalProcessTimeJoinOperator_cleanupState_rdh | /**
* The method to be called when a cleanup timer fires.
*
* @param time
* The timestamp of the fired timer.
*/
@Override
public void cleanupState(long time) {
rightState.clear();
} | 3.26 |
flink_TemporalProcessTimeJoinOperator_onEventTime_rdh | /**
* Invoked when an event-time timer fires.
*/@Overridepublic void onEventTime(InternalTimer<Object, VoidNamespace> timer) throws Exception {
} | 3.26 |
flink_ImmutableMapState_iterator_rdh | /**
* Iterates over all the mappings in the state. The iterator cannot remove elements.
*
* @return A read-only iterator over all the mappings in the state.
*/
@Override
public Iterator<Map.Entry<K, V>> iterator() {
return Collections.unmodifiableSet(state.entrySet()).iterator();
} | 3.26 |
flink_ImmutableMapState_values_rdh | /**
* Returns all the values in the state in a {@link Collections#unmodifiableCollection(Collection)}.
*
* @return A read-only iterable view of all the values in the state.
*/
@Override
public Iterable<V> values() {
return Collections.unmodifiableCollection(state.values());
} | 3.26 |
flink_ImmutableMapState_entries_rdh | /**
* Returns all the mappings in the state in a {@link Collections#unmodifiableSet(Set)}.
*
* @return A read-only iterable view of all the key-value pairs in the state.
*/
@Override
public Iterable<Map.Entry<K, V>> entries() {
return Collections.unmodifiableSet(state.entrySet());
} | 3.26 |
flink_CompressWriterFactory_withHadoopCompression_rdh | /**
* Compresses the data using the provided Hadoop {@link CompressionCodec} and {@link Configuration}.
*
* @param codecName
* Simple/complete name or alias of the CompressionCodec
* @param hadoopConfig
* Hadoop Configuration
* @return the instance of CompressionWriterFactory
* @throws IOException
*/
public CompressWriterFactory<IN> withHadoopCompression(String codecName, Configuration hadoopConfig) throws IOException {
this.codecExtension = getHadoopCodecExtension(codecName,
hadoopConfig);
this.hadoopCodecName = codecName;
for (Map.Entry<String, String> entry : hadoopConfig) {
hadoopConfigMap.put(entry.getKey(), entry.getValue());
}
return this;
} | 3.26 |
flink_NettyPartitionRequestClient_requestSubpartition_rdh | /**
* Requests a remote intermediate result partition queue.
*
* <p>The request goes to the remote producer, for which this partition request client instance
* has been created.
*/
@Override
public void requestSubpartition(final ResultPartitionID partitionId, final int subpartitionIndex, final RemoteInputChannel inputChannel, int delayMs) throws IOException {
checkNotClosed();
LOG.debug("Requesting subpartition {} of partition {} with {} ms delay.", subpartitionIndex, partitionId, delayMs);
clientHandler.addInputChannel(inputChannel);
final PartitionRequest request = new PartitionRequest(partitionId, subpartitionIndex, inputChannel.getInputChannelId(), inputChannel.getInitialCredit());
final ChannelFutureListener listener = future -> {
if (!future.isSuccess()) {
clientHandler.removeInputChannel(inputChannel);
inputChannel.onError(new LocalTransportException(String.format("Sending the partition request to '%s [%s] (#%d)' failed.", connectionId.getAddress(), connectionId.getResourceID().getStringWithMetadata(), connectionId.getConnectionIndex()), future.channel().localAddress(), future.cause()));
sendToChannel(new ConnectionErrorMessage(future.cause() == null ? new RuntimeException("Cannot send partition request.") : future.cause()));
}
};
if (delayMs == 0) {
ChannelFuture f = tcpChannel.writeAndFlush(request);
f.addListener(listener);
} else {
final ChannelFuture[] f = new ChannelFuture[1];
tcpChannel.eventLoop().schedule(() -> {
f[0] = tcpChannel.writeAndFlush(request);
f[0].addListener(listener);
}, delayMs, TimeUnit.MILLISECONDS);
}
} | 3.26 |
flink_NettyPartitionRequestClient_sendTaskEvent_rdh | /**
* Sends a task event backwards to an intermediate result partition producer.
*
* <p>Backwards task events flow between readers and writers and therefore will only work when
* both are running at the same time, which is only guaranteed to be the case when both the
* respective producer and consumer task run pipelined.
*/
@Override
public void sendTaskEvent(ResultPartitionID partitionId, TaskEvent event, final RemoteInputChannel inputChannel) throws IOException {
checkNotClosed();
tcpChannel.writeAndFlush(new TaskEventRequest(event, partitionId, inputChannel.getInputChannelId())).addListener(((ChannelFutureListener) (future -> {
if (!future.isSuccess()) {
inputChannel.onError(new LocalTransportException(String.format("Sending the task event to '%s [%s] (#%d)' failed.", connectionId.getAddress(), connectionId.getResourceID().getStringWithMetadata(), connectionId.getConnectionIndex()), future.channel().localAddress(), future.cause()));
sendToChannel(new ConnectionErrorMessage(future.cause() == null ? new RuntimeException("Cannot send task event.") : future.cause()));
}
})));
} | 3.26 |
flink_NettyPartitionRequestClient_validateClientAndIncrementReferenceCounter_rdh | /**
* Validate the client and increment the reference counter.
*
* <p>Note: the reference counter has to be incremented before returning the instance of this
* client to ensure correct closing logic.
*
* @return whether this client can be used.
*/
boolean validateClientAndIncrementReferenceCounter() {
if (!clientHandler.hasChannelError()) {
return closeReferenceCounter.incrementAndGet() > 0;}
return false;} | 3.26 |
flink_TypeSerializerSnapshot_readVersionedSnapshot_rdh | /**
* Reads a snapshot from the stream, performing resolving
*
* <p>This method reads snapshots written by {@link #writeVersionedSnapshot(DataOutputView,
* TypeSerializerSnapshot)}.
*/
static <T> TypeSerializerSnapshot<T> readVersionedSnapshot(DataInputView in, ClassLoader cl) throws IOException {
final TypeSerializerSnapshot<T> snapshot = TypeSerializerSnapshotSerializationUtil.readAndInstantiateSnapshotClass(in, cl);
int version = in.readInt();
snapshot.readSnapshot(version, in, cl);
return snapshot;
} | 3.26 |
flink_TypeSerializerSnapshot_writeVersionedSnapshot_rdh | // ------------------------------------------------------------------------
// read / write utilities
// ------------------------------------------------------------------------
/**
* Writes the given snapshot to the out stream. One should always use this method to write
* snapshots out, rather than directly calling {@link #writeSnapshot(DataOutputView)}.
*
* <p>The snapshot written with this method can be read via {@link #readVersionedSnapshot(DataInputView, ClassLoader)}.
*/
static void writeVersionedSnapshot(DataOutputView out, TypeSerializerSnapshot<?> snapshot) throws IOException {
out.writeUTF(snapshot.getClass().getName());
out.writeInt(snapshot.getCurrentVersion());
snapshot.writeSnapshot(out);
} | 3.26 |
flink_ExpandColumnFunctionsRule_isNameRangeCall_rdh | /**
* Whether the expression is a column name range expression, e.g. withColumns(a ~ b).
*/
private boolean isNameRangeCall(UnresolvedCallExpression expression) {
return ((expression.getFunctionDefinition() == RANGE_TO) && (expression.getChildren().get(0) instanceof UnresolvedReferenceExpression)) && (expression.getChildren().get(1) instanceof UnresolvedReferenceExpression);
} | 3.26 |
flink_ExpandColumnFunctionsRule_apply_rdh | /**
* Replaces column functions with all available {@link org.apache.flink.table.expressions.UnresolvedReferenceExpression}s from underlying inputs.
*/
@Internal final class ExpandColumnFunctionsRule implements ResolverRule {
@Override
public List<Expression> apply(List<Expression> expression, ResolutionContext context) {
final List<ColumnExpansionStrategy> strategies = context.configuration().get(TableConfigOptions.TABLE_COLUMN_EXPANSION_STRATEGY);
final ColumnFunctionsExpander columnFunctionsExpander = new ColumnFunctionsExpander(context.referenceLookup().getInputFields(strategies).stream().map(p -> unresolvedRef(p.getName())).collect(Collectors.toList()));
return expression.stream().flatMap(expr -> expr.accept(columnFunctionsExpander).stream()).collect(Collectors.toList());
} | 3.26 |
flink_ExpandColumnFunctionsRule_resolveArgsOfColumns_rdh | /**
* Expand the columns expression in the input Expression List.
*/
private List<Expression> resolveArgsOfColumns(List<Expression> args, boolean isReverseProjection) {
List<Expression> v7 = new LinkedList<>();
List<UnresolvedReferenceExpression> result = args.stream().flatMap(e -> e.accept(this.columnsExpressionExpander).stream()).collect(Collectors.toList());
if (isReverseProjection) {
for (UnresolvedReferenceExpression field : inputFieldReferences) {
if (indexOfName(result, field.getName()) == (-1)) {
v7.add(field);
}
}
} else {
v7.addAll(result);
}
return v7;
} | 3.26 |
flink_ExpandColumnFunctionsRule_indexOfName_rdh | /**
* Find the index of targetName in the list. Return -1 if not found.
*/
private static int indexOfName(List<UnresolvedReferenceExpression> inputFieldReferences, String targetName) {
int i;
for (i = 0; i < inputFieldReferences.size(); ++i) {
if (inputFieldReferences.get(i).getName().equals(targetName)) {
break;
}
}
return i == inputFieldReferences.size() ? -1 : i;
} | 3.26 |
flink_ExpandColumnFunctionsRule_isIndexRangeCall_rdh | /**
* Whether the expression is a column index range expression, e.g. withColumns(1 ~ 2).
*/
private boolean isIndexRangeCall(UnresolvedCallExpression expression) {
return ((expression.getFunctionDefinition() == RANGE_TO) && (expression.getChildren().get(0)
instanceof ValueLiteralExpression)) && (expression.getChildren().get(1) instanceof ValueLiteralExpression);
} | 3.26 |
flink_ParquetColumnarRowSplitReader_clipParquetSchema_rdh | /**
* Clips `parquetSchema` according to `fieldNames`.
*/
private static MessageType clipParquetSchema(GroupType parquetSchema, String[] fieldNames, boolean caseSensitive) {
Type[] types = new Type[fieldNames.length];
if
(caseSensitive) {
for (int i = 0;
i < fieldNames.length; ++i) {
String fieldName = fieldNames[i];
if (parquetSchema.getFieldIndex(fieldName) <
0) {
throw new IllegalArgumentException(fieldName + " does not exist");
}
types[i] = parquetSchema.getType(fieldName);}
} else {
Map<String, Type> caseInsensitiveFieldMap = new HashMap<>();
for (Type type : parquetSchema.getFields()) {
caseInsensitiveFieldMap.compute(type.getName().toLowerCase(Locale.ROOT), (key, previousType) -> {
if (previousType != null) {
throw new FlinkRuntimeException("Parquet with case insensitive mode should have no duplicate key: " + key);}
return type;
});
}
for (int
i = 0; i < fieldNames.length; ++i) {
Type type = caseInsensitiveFieldMap.get(fieldNames[i].toLowerCase(Locale.ROOT));
if (type == null) {
throw new IllegalArgumentException(fieldNames[i] + " does not exist");
}
// TODO clip for array,map,row types.
types[i] = type;
}
}return Types.buildMessage().addFields(types).named("flink-parquet");
} | 3.26 |
flink_ParquetColumnarRowSplitReader_seekToRow_rdh | /**
* Seek to a particular row number.
*/
public void seekToRow(long rowCount) throws IOException {if (totalCountLoadedSoFar != 0) {
throw new UnsupportedOperationException("Only support seek at first.");
}
List<BlockMetaData> blockMetaData = reader.getRowGroups();
for (BlockMetaData metaData : blockMetaData) {
if (metaData.getRowCount() > rowCount) {
break;
} else {
reader.skipNextRowGroup();
rowsReturned += metaData.getRowCount();
totalCountLoadedSoFar += metaData.getRowCount();
f1 = ((int) (metaData.getRowCount()));
nextRow = ((int) (metaData.getRowCount()));
rowCount -= metaData.getRowCount();
}
}
for (int i = 0; i < rowCount; i++) {
boolean end = reachedEnd();
if (end) {
throw new RuntimeException("Seek to many rows.");
}
nextRecord();
}
} | 3.26 |
flink_ParquetColumnarRowSplitReader_reachedEnd_rdh | /**
* Method used to check if the end of the input is reached.
*
* @return True if the end is reached, otherwise false.
* @throws IOException
* Thrown, if an I/O error occurred.
*/
public boolean reachedEnd() throws IOException {
return !ensureBatch();
} | 3.26 |
flink_ParquetColumnarRowSplitReader_createReadableVectors_rdh | /**
* Create readable vectors from writable vectors. Especially for decimal, see {@link ParquetDecimalVector}.
*/
private ColumnVector[] createReadableVectors() {
ColumnVector[] vectors = new ColumnVector[writableVectors.length];
for (int i = 0; i < writableVectors.length; i++) {
vectors[i] = (selectedTypes[i].getTypeRoot() == LogicalTypeRoot.DECIMAL) ? new ParquetDecimalVector(writableVectors[i]) : writableVectors[i];
}
return vectors;
} | 3.26 |
flink_ParquetColumnarRowSplitReader_nextBatch_rdh | /**
* Advances to the next batch of rows. Returns false if there are no more.
*/
private boolean nextBatch() throws IOException {
for (WritableColumnVector v : writableVectors) {
v.reset();
}
columnarBatch.setNumRows(0);
if (rowsReturned >= totalRowCount) {
return false;
}
if (rowsReturned == totalCountLoadedSoFar) {
readNextRowGroup();
}
int num = ((int) (Math.min(batchSize, totalCountLoadedSoFar - rowsReturned)));
for (int i = 0; i < columnReaders.length; ++i) {
// noinspection unchecked
columnReaders[i].readToVector(num, writableVectors[i]);
}
rowsReturned += num;
columnarBatch.setNumRows(num);
f1 = num;
return true;} | 3.26 |
flink_ParquetColumnarRowSplitReader_ensureBatch_rdh | /**
* Checks if there is at least one row left in the batch to return. If no more row are
* available, it reads another batch of rows.
*
* @return Returns true if there is one more row to return, false otherwise.
* @throws IOException
* throw if an exception happens while reading a batch.
*/
private boolean ensureBatch() throws IOException {
if (nextRow >= f1) {
// Try to read the next batch if rows from the file.
if (nextBatch()) {
// No more rows available in the Rows array.
nextRow = 0;
return true;
}
return false;
}
// there is at least one Row left in the Rows array.
return true;
} | 3.26 |
flink_LocatableInputSplitAssigner_getNextUnassignedMinLocalCountSplit_rdh | /**
* Retrieves a LocatableInputSplit with minimum local count. InputSplits which have already
* been assigned (i.e., which are not contained in the provided set) are filtered out. The
* returned input split is NOT removed from the provided set.
*
* @param unassignedSplits
* Set of unassigned input splits.
* @return An input split with minimum local count or null if all splits have been assigned.
*/public
LocatableInputSplitWithCount getNextUnassignedMinLocalCountSplit(Set<LocatableInputSplitWithCount> unassignedSplits) {
if (splits.size() == 0) {
return null;}
do {
elementCycleCount--;
// take first split of the list
LocatableInputSplitWithCount split = splits.pollFirst();
if (unassignedSplits.contains(split)) {
int localCount = split.getLocalCount();
// still unassigned, check local count
if (localCount > minLocalCount) {
// re-insert at end of the list and continue to look for split with smaller
// local count
splits.offerLast(split);
// check and update second smallest local count
if ((f1 == (-1)) || (split.getLocalCount() < f1)) {
f1 = split.getLocalCount();
}
split = null;
}
} else {
// split was already assigned
split = null;
}
if (elementCycleCount == 0) {
// one full cycle, but no split with min local count found
// update minLocalCnt and element cycle count for next pass over the splits
minLocalCount = f1;
f1 = -1;
elementCycleCount
= splits.size();
}
if (split != null) {
// found a split to assign
return split;
}
} while (elementCycleCount > 0 );
// no split left
return null;
} | 3.26 |
flink_LocatableInputSplitAssigner_getNextInputSplit_rdh | // --------------------------------------------------------------------------------------------
@Override
public LocatableInputSplit getNextInputSplit(String host, int taskId) {
// for a null host, we return a remote split
if (host == null) {
synchronized(this.f0) {
synchronized(this.unassigned) {
LocatableInputSplitWithCount split = this.f0.getNextUnassignedMinLocalCountSplit(this.unassigned);
if (split != null) {
// got a split to assign. Double check that it hasn't been assigned before.
if (this.unassigned.remove(split)) {
if (LOG.isInfoEnabled()) {
LOG.info("Assigning split to null host (random assignment).");
}
remoteAssignments++;
return split.getSplit();
} else {
throw new IllegalStateException("Chosen InputSplit has already been assigned. This should not happen!");
}
} else {
// all splits consumed
return null;
}
}
}
}
host = host.toLowerCase(Locale.US);
// for any non-null host, we take the list of non-null splits
LocatableInputSplitChooser localSplits = this.localPerHost.get(host);
// if we have no list for this host yet, create one
if (localSplits == null) {
localSplits = new LocatableInputSplitChooser();
// lock the list, to be sure that others have to wait for that host's local list
synchronized(localSplits) {
LocatableInputSplitChooser prior = this.localPerHost.putIfAbsent(host, localSplits);
// if someone else beat us in the case to create this list, then we do not populate
// this one, but
// simply work with that other list
if (prior
== null) {
// we are the first, we populate
// first, copy the remaining splits to release the lock on the set early
// because that is shared among threads
LocatableInputSplitWithCount[] remaining;
synchronized(this.unassigned) {
remaining = this.unassigned.toArray(new LocatableInputSplitWithCount[this.unassigned.size()]);
}
for (LocatableInputSplitWithCount isw : remaining) {
if (isLocal(host, isw.getSplit().getHostnames())) {
// Split is local on host.
// Increment local count
isw.incrementLocalCount();
// and add to local split list
localSplits.addInputSplit(isw);
}
}
} else {
// someone else was faster
localSplits = prior;
}
}
}
// at this point, we have a list of local splits (possibly empty)
// we need to make sure no one else operates in the current list (that protects against
// list creation races) and that the unassigned set is consistent
// NOTE: we need to obtain the locks in this order, strictly!!!
synchronized(localSplits) {
synchronized(this.unassigned) {
LocatableInputSplitWithCount split = localSplits.getNextUnassignedMinLocalCountSplit(this.unassigned);
if (split != null) {
// found a valid split. Double check that it hasn't been assigned before.
if (this.unassigned.remove(split)) {
if (LOG.isInfoEnabled()) {
LOG.info("Assigning local split to host "
+ host);
}
localAssignments++;
return split.getSplit();
} else {
throw new IllegalStateException("Chosen InputSplit has already been assigned. This should not happen!");
}
}
}
}
// we did not find a local split, return a remote split
synchronized(this.f0) {
synchronized(this.unassigned) {
LocatableInputSplitWithCount split = this.f0.getNextUnassignedMinLocalCountSplit(this.unassigned);
if (split != null) {
// found a valid split. Double check that it hasn't been assigned yet.
if (this.unassigned.remove(split)) {
if (LOG.isInfoEnabled()) {
LOG.info("Assigning remote split to host " + host);
}
remoteAssignments++;
return split.getSplit();
} else {
throw new IllegalStateException("Chosen InputSplit has already been assigned. This should not happen!");
}
} else {// all splits consumed
return null;
}
}
}
} | 3.26 |
flink_IterationAggregatorBroker_instance_rdh | /**
* Retrieve singleton instance.
*/
public static IterationAggregatorBroker instance()
{
return INSTANCE; } | 3.26 |
flink_RawFormatDeserializationSchema_createDataLengthValidator_rdh | // ------------------------------------------------------------------------------------
// Utilities to check received size of data
// ------------------------------------------------------------------------------------
/**
* Creates a validator for the received data.
*/
private static DataLengthValidator
createDataLengthValidator(LogicalType type) {
// please keep the order the same with createNotNullConverter()
switch (type.getTypeRoot()) {
case CHAR :
case VARCHAR : case VARBINARY :
case BINARY :
case RAW :
return data -> {
};
case BOOLEAN :
return createDataLengthValidator(1, "BOOLEAN");
case TINYINT :
return createDataLengthValidator(1, "TINYINT");
case SMALLINT :
return createDataLengthValidator(2, "SMALLINT");
case INTEGER :
return createDataLengthValidator(4, "INT");
case BIGINT :
return createDataLengthValidator(8, "BIGINT");
case FLOAT :
return createDataLengthValidator(4, "FLOAT");
case DOUBLE :
return createDataLengthValidator(8, "DOUBLE");
default :
throw new UnsupportedOperationException("'raw' format currently doesn't support type: " + type);
}
} | 3.26 |
flink_RawFormatDeserializationSchema_createConverter_rdh | /**
* Creates a runtime converter.
*/
private static DeserializationRuntimeConverter createConverter(LogicalType type, String charsetName, boolean isBigEndian) {
switch (type.getTypeRoot()) {
case CHAR :
case VARCHAR :return createStringConverter(charsetName);
case VARBINARY :
case BINARY :
return data -> data;
case RAW :
return RawValueData::fromBytes;
case BOOLEAN :
return data
-> data[0] != 0;
case TINYINT :
return data -> data[0];
case SMALLINT :
return createEndiannessAwareConverter(isBigEndian, segment -> segment.getShortBigEndian(0), segment -> segment.getShortLittleEndian(0));
case INTEGER :
return createEndiannessAwareConverter(isBigEndian, segment -> segment.getIntBigEndian(0), segment ->
segment.getIntLittleEndian(0));
case BIGINT :
return createEndiannessAwareConverter(isBigEndian, segment -> segment.getLongBigEndian(0), segment -> segment.getLongLittleEndian(0));
case FLOAT :
return createEndiannessAwareConverter(isBigEndian, segment -> segment.getFloatBigEndian(0), segment -> segment.getFloatLittleEndian(0));case DOUBLE :
return createEndiannessAwareConverter(isBigEndian, segment -> segment.getDoubleBigEndian(0), segment -> segment.getDoubleLittleEndian(0));
default :
throw new UnsupportedOperationException("'raw' format currently doesn't support type: " +
type);
}
} | 3.26 |
flink_ExpressionResolver_getExpandingResolverRules_rdh | /**
* List of rules for (possibly) expanding the list of unresolved expressions.
*/
public static List<ResolverRule> getExpandingResolverRules() {
return Arrays.asList(ResolverRules.UNWRAP_API_EXPRESSION, ResolverRules.LOOKUP_CALL_BY_NAME, ResolverRules.FLATTEN_STAR_REFERENCE, ResolverRules.EXPAND_COLUMN_FUNCTIONS);
} | 3.26 |
flink_ExpressionResolver_resolve_rdh | /**
* Resolves given expressions with configured set of rules. All expressions of an operation
* should be given at once as some rules might assume the order of expressions.
*
* <p>After this method is applied the returned expressions should be ready to be converted to
* planner specific expressions.
*
* @param expressions
* list of expressions to resolve.
* @return resolved list of expression
*/
public List<ResolvedExpression> resolve(List<Expression> expressions) {
final Function<List<Expression>, List<Expression>> resolveFunction = concatenateRules(getAllResolverRules());
final List<Expression> resolvedExpressions = resolveFunction.apply(expressions);
return resolvedExpressions.stream().map(e -> e.accept(VERIFY_RESOLUTION_VISITOR)).collect(Collectors.toList());
} | 3.26 |
flink_ExpressionResolver_resolverFor_rdh | /**
* Creates a builder for {@link ExpressionResolver}. One can add additional properties to the
* resolver like e.g. {@link GroupWindow} or {@link OverWindow}. You can also add additional
* {@link ResolverRule}.
*
* @param tableConfig
* general configuration
* @param tableCatalog
* a way to lookup a table reference by name
* @param functionLookup
* a way to lookup call by name
* @param typeFactory
* a way to lookup and create data types
* @param inputs
* inputs to use for field resolution
* @return builder for resolver
*/
public static ExpressionResolverBuilder resolverFor(TableConfig tableConfig, ClassLoader userClassLoader, TableReferenceLookup tableCatalog, FunctionLookup functionLookup, DataTypeFactory typeFactory, SqlExpressionResolver sqlExpressionResolver, QueryOperation... inputs) {
return new ExpressionResolverBuilder(inputs, tableConfig, userClassLoader, tableCatalog, functionLookup, typeFactory, sqlExpressionResolver);
} | 3.26 |
flink_LongValueComparator_supportsSerializationWithKeyNormalization_rdh | // --------------------------------------------------------------------------------------------
// unsupported normalization
// --------------------------------------------------------------------------------------------
@Override
public boolean supportsSerializationWithKeyNormalization() {return false;
} | 3.26 |
flink_AbstractNonHaServices_getCheckpointRecoveryFactory_rdh | // ----------------------------------------------------------------------
// HighAvailabilityServices method implementations
// ----------------------------------------------------------------------
@Override
public CheckpointRecoveryFactory getCheckpointRecoveryFactory() {
synchronized(lock) {
checkNotShutdown();
return new StandaloneCheckpointRecoveryFactory();
}
} | 3.26 |
flink_AbstractNonHaServices_checkNotShutdown_rdh | // ----------------------------------------------------------------------
// Helper methods
// ----------------------------------------------------------------------
@GuardedBy("lock")
protected void checkNotShutdown() {
checkState(!shutdown, "high availability services are shut down");
} | 3.26 |
flink_CleanupRetryStrategyFactory_createRetryStrategy_rdh | /**
* Creates the {@link RetryStrategy} instance based on the passed {@link Configuration}.
*/
public RetryStrategy createRetryStrategy(Configuration configuration) {final String configuredRetryStrategy = configuration.getString(CleanupOptions.CLEANUP_STRATEGY);
if (isRetryStrategy(CleanupOptions.FIXED_DELAY_LABEL, configuration.getString(CleanupOptions.CLEANUP_STRATEGY))) {
return createFixedRetryStrategy(configuration);
} else if (isRetryStrategy(CleanupOptions.EXPONENTIAL_DELAY_LABEL, configuration.getString(CleanupOptions.CLEANUP_STRATEGY))) {
return createExponentialBackoffRetryStrategy(configuration);} else if (retryingDisabled(configuredRetryStrategy)) {
return createNoRetryStrategy();
}
throw new IllegalArgumentException(createInvalidCleanupStrategyErrorMessage(configuredRetryStrategy));
} | 3.26 |
flink_FlinkExtendedParser_parseSet_rdh | /**
* Convert the statement to {@link SetOperation} with Flink's parse rule.
*
* @return the {@link SetOperation}, empty if the statement is not set command.
*/
public static Optional<Operation> parseSet(String statement) {
if (SetOperationParseStrategy.INSTANCE.match(statement)) {
return Optional.of(SetOperationParseStrategy.INSTANCE.m0(statement));
}
return Optional.empty();
} | 3.26 |
flink_ListTypeInfo_isBasicType_rdh | // ------------------------------------------------------------------------
// TypeInformation implementation
// ------------------------------------------------------------------------
@Override
public boolean isBasicType() {
return false;
} | 3.26 |
flink_ListTypeInfo_getElementTypeInfo_rdh | // ------------------------------------------------------------------------
// ListTypeInfo specific properties
// ------------------------------------------------------------------------
/**
* Gets the type information for the elements contained in the list
*/
public TypeInformation<T> getElementTypeInfo() {
return elementTypeInfo;
} | 3.26 |
flink_HadoopInputFormatBase_configure_rdh | // --------------------------------------------------------------------------------------------
// InputFormat
// --------------------------------------------------------------------------------------------
@Override
public void configure(Configuration parameters) {
// enforce sequential configuration() calls
synchronized(CONFIGURE_MUTEX) {
// configure MR InputFormat if necessary
if (this.mapredInputFormat instanceof Configurable) {
((Configurable) (this.mapredInputFormat)).setConf(this.jobConf);
} else if (this.mapredInputFormat instanceof JobConfigurable) {
((JobConfigurable) (this.mapredInputFormat)).configure(this.jobConf);
}
}
} | 3.26 |
flink_HadoopInputFormatBase_getFileStats_rdh | // --------------------------------------------------------------------------------------------
// Helper methods
// --------------------------------------------------------------------------------------------
private FileBaseStatistics getFileStats(FileBaseStatistics cachedStats, hadoop[] hadoopFilePaths, ArrayList<FileStatus> files) throws IOException {
long latestModTime = 0L;
// get the file info and check whether the cached statistics are still valid.
for (Path hadoopPath : hadoopFilePaths) {
final Path filePath = new Path(hadoopPath.toUri());
final FileSystem fs = FileSystem.get(filePath.toUri());
final FileStatus file = fs.getFileStatus(filePath);
latestModTime = Math.max(latestModTime, file.getModificationTime());
// enumerate all files and check their modification time stamp.
if (file.isDir()) {
FileStatus[] fss = fs.listStatus(filePath);files.ensureCapacity(files.size() + fss.length);
for (FileStatus s : fss) {
if (!s.isDir()) {
files.add(s);
latestModTime
= Math.max(s.getModificationTime(), latestModTime);
}
}
} else {
files.add(file);
}
}
// check whether the cached statistics are still valid, if we have any
if ((cachedStats != null) && (latestModTime <= cachedStats.getLastModificationTime())) {
return cachedStats;
}
// calculate the whole length
long len = 0;
for (FileStatus s : files) {
len += s.getLen();
}
// sanity check
if (len <= 0) {
len = BaseStatistics.SIZE_UNKNOWN;
}
return new FileBaseStatistics(latestModTime, len, BaseStatistics.AVG_RECORD_BYTES_UNKNOWN);
} | 3.26 |
flink_LocalTimeTypeInfo_hashCode_rdh | // --------------------------------------------------------------------------------------------
@Override
public int hashCode() {
return Objects.hash(clazz, serializer, comparatorClass);
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.