name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
flink_BroadcastVariableKey_hashCode_rdh | // ---------------------------------------------------------------------------------------------
@Override
public int hashCode() {
return ((31 * superstep) + (47 * name.hashCode())) + (83 * vertexId.hashCode());
} | 3.26 |
flink_BroadcastVariableKey_getVertexId_rdh | // ---------------------------------------------------------------------------------------------
public JobVertexID getVertexId() {
return vertexId;
} | 3.26 |
flink_AbstractPythonScalarFunctionOperator_createUserDefinedFunctionsProto_rdh | /**
* Gets the proto representation of the Python user-defined functions to be executed.
*/
@Overridepublic UserDefinedFunctions createUserDefinedFunctionsProto() {
return ProtoUtils.createUserDefinedFunctionsProto(getRuntimeContext(), scalarFunctions, config.get(PYTHON_METRIC_ENABLED), config.get(PYTHON_PROFILE_ENABLED));
} | 3.26 |
flink_PlannerContext_getBuiltinSqlOperatorTable_rdh | /**
* Returns builtin the operator table and external the operator for this environment.
*/
private SqlOperatorTable getBuiltinSqlOperatorTable() {
return SqlOperatorTables.chain(new FunctionCatalogOperatorTable(context.getFunctionCatalog(), context.getCatalogManager().getDataTypeFactory(), typeFactory, context.getRexFactory()), FlinkSqlOperatorTable.instance(context.isBatchMode()));
} | 3.26 |
flink_PlannerContext_getSqlParserConfig_rdh | /**
* Returns the SQL parser config for this environment including a custom Calcite configuration.
*/
private Config getSqlParserConfig() {return // we use Java lex because back ticks are easier than double quotes in
// programming and cases are preserved
JavaScalaConversionUtil.<SqlParser.Config>toJava(getCalciteConfig().getSqlParserConfig()).orElseGet(() -> {
SqlConformance conformance = getSqlConformance();
return SqlParser.config().withParserFactory(FlinkSqlParserFactories.create(conformance)).withConformance(conformance).withLex(Lex.JAVA).withIdentifierMaxLength(256);
});
} | 3.26 |
flink_PlannerContext_getSqlToRelConverterConfig_rdh | /**
* Returns the {@link SqlToRelConverter} config.
*
* <p>`expand` is set as false, and each sub-query becomes a
* [[org.apache.calcite.rex.RexSubQuery]].
*/
private Config getSqlToRelConverterConfig() {
return JavaScalaConversionUtil.<SqlToRelConverter.Config>toJava(getCalciteConfig().getSqlToRelConverterConfig()).orElseGet(() -> {
SqlToRelConverter.Config config = SqlToRelConverter.config().withTrimUnusedFields(false).withHintStrategyTable(FlinkHintStrategies.createHintStrategyTable()).withInSubQueryThreshold(Integer.MAX_VALUE).withExpand(false).withRelBuilderFactory(FlinkRelFactories.FLINK_REL_BUILDER());
// disable project merge in sql2rel phase, let it done by the optimizer
boolean mergeProjectsDuringSqlToRel = context.getTableConfig().getConfiguration().getBoolean(OptimizerConfigOptions.TABLE_OPTIMIZER_SQL2REL_PROJECT_MERGE_ENABLED);
if (!mergeProjectsDuringSqlToRel) {
config = config.addRelBuilderConfigTransform(c -> c.withBloat(-1));
}
return config;
});
} | 3.26 |
flink_PlannerContext_getSqlOperatorTable_rdh | /**
* Returns the operator table for this environment including a custom Calcite configuration.
*/
private SqlOperatorTable getSqlOperatorTable(CalciteConfig calciteConfig) {
return JavaScalaConversionUtil.<SqlOperatorTable>toJava(calciteConfig.getSqlOperatorTable()).map(operatorTable -> {
if (calciteConfig.replacesSqlOperatorTable()) {
return operatorTable;
} else {
return SqlOperatorTables.chain(getBuiltinSqlOperatorTable(), operatorTable);
}
}).orElseGet(this::getBuiltinSqlOperatorTable);
} | 3.26 |
flink_MergeIterator_next_rdh | /**
* Gets the next smallest element, with respect to the definition of order implied by the {@link TypeSerializer} provided to this iterator.
*
* @return The next element if the iterator has another element, null otherwise.
* @see org.apache.flink.util.MutableObjectIterator#next()
*/
@Override
public E next() throws IOException {
if (this.heap.size() > 0) {// get the smallest element
final HeadStream<E> top = this.heap.peek();
E result = top.getHead();
// read an element
if (!top.nextHead()) {
this.heap.poll();
} else {
this.heap.adjustTop();
}
return result;
} else {
return
null;
}
} | 3.26 |
flink_StatsDReporter_reportCounter_rdh | // ------------------------------------------------------------------------
private void reportCounter(final String name, final Counter counter) {
send(name, counter.getCount());
} | 3.26 |
flink_RexLiteralUtil_toFlinkInternalValue_rdh | /**
* Convert a value from Calcite's {@link Comparable} data structures to Flink internal data
* structures and also tries to be a bit flexible by accepting usual Java types such as String
* and boxed numerics.
*
* <p>In case of symbol types, this function will return provided value, checking that it's an
* {@link Enum}.
*
* <p>This function is essentially like {@link FlinkTypeFactory#toLogicalType(RelDataType)} but
* for values.
*
* <p>Check {@link RexLiteral#valueMatchesType(Comparable, SqlTypeName, boolean)} for details on
* the {@link Comparable} data structures and {@link org.apache.flink.table.data.RowData} for
* details on Flink's internal data structures.
*
* @param value
* the value in Calcite's {@link Comparable} data structures
* @param valueType
* the type of the value
* @return the value in Flink's internal data structures
* @throws IllegalArgumentException
* in case the class of value does not match the expectations
* of valueType
*/
public static Object toFlinkInternalValue(Comparable<?> value, LogicalType valueType) {
if (value == null)
{
return null;
}switch (valueType.getTypeRoot()) {
case CHAR :
case VARCHAR :
if (value instanceof NlsString) {
return BinaryStringData.fromString(((NlsString) (value)).getValue());
}
if (value instanceof String) {
return BinaryStringData.fromString(((String) (value)));
}
break;
case BOOLEAN :
if (value instanceof
Boolean) {
return value;
}
break;
case BINARY :
case VARBINARY :
if (value instanceof ByteString) {
return ((ByteString) (value)).getBytes();
}
break;
case DECIMAL :
if (value instanceof BigDecimal) {
return DecimalData.fromBigDecimal(((BigDecimal) (value)), LogicalTypeChecks.getPrecision(valueType), LogicalTypeChecks.getScale(valueType));
}
break;
case TINYINT :
if (value instanceof Number) {
return ((Number) (value)).byteValue();
}
break;
case SMALLINT :
if (value instanceof Number) {
return ((Number) (value)).shortValue(); }
break;
case INTEGER :
case INTERVAL_YEAR_MONTH :
if (value instanceof Number) {
return ((Number) (value)).intValue();
}
break;
case BIGINT :
case INTERVAL_DAY_TIME :
if
(value instanceof Number) {
return ((Number) (value)).longValue();
}
break;
case FLOAT :
if (value instanceof Number) {
return ((Number) (value)).floatValue();
}
break;
case DOUBLE :
if (value instanceof Number) {
return ((Number) (value)).doubleValue();
}
break;
case DATE :
if (value instanceof DateString) {
return ((DateString) (value)).getDaysSinceEpoch();
}
if (value instanceof Number) {
return ((Number) (value)).intValue();
}
break;
case TIME_WITHOUT_TIME_ZONE : if (value instanceof TimeString) {
return ((TimeString) (value)).getMillisOfDay();
}
if (value instanceof Number) {
return ((Number) (value)).intValue();
}
break;
case TIMESTAMP_WITHOUT_TIME_ZONE :
if (value instanceof TimestampString) {
return TimestampData.fromLocalDateTime(toLocalDateTime(((TimestampString) (value))));
}
break;
case TIMESTAMP_WITH_LOCAL_TIME_ZONE :
if (value instanceof TimestampString) {
return TimestampData.fromInstant(toLocalDateTime(((TimestampString)
(value))).atOffset(ZoneOffset.UTC).toInstant());
}
break;
case DISTINCT_TYPE :
return toFlinkInternalValue(value,
((DistinctType) (valueType)).getSourceType());
case SYMBOL :
if (value instanceof Enum) {
return
value;
}
break;
case TIMESTAMP_WITH_TIME_ZONE :
case ARRAY :
case MULTISET :
case MAP :
case ROW :
case STRUCTURED_TYPE :
case NULL :
case UNRESOLVED :
throw new CodeGenException("Type not supported: " + valueType);
}
throw new IllegalStateException((("Unexpected class " + value.getClass()) + " for value of type ") + valueType);
} | 3.26 |
flink_IOManager_close_rdh | /**
* Removes all temporary files.
*/
@Override
public void close() throws Exception {
fileChannelManager.close();
} | 3.26 |
flink_IOManager_getSpillingDirectoriesPaths_rdh | /**
* Gets the directories that the I/O manager spills to, as path strings.
*
* @return The directories that the I/O manager spills to, as path strings.
*/
public String[] getSpillingDirectoriesPaths() {
File[] paths = fileChannelManager.getPaths();
String[] strings = new String[paths.length];
for (int i = 0; i < strings.length; i++) {
strings[i] = paths[i].getAbsolutePath();}
return strings;
} | 3.26 |
flink_IOManager_createBlockChannelWriter_rdh | // ------------------------------------------------------------------------
// Reader / Writer instantiations
// ------------------------------------------------------------------------
/**
* Creates a block channel writer that writes to the given channel. The writer adds the written
* segment to its return-queue afterwards (to allow for asynchronous implementations).
*
* @param channelID
* The descriptor for the channel to write to.
* @return A block channel writer that writes to the given channel.
* @throws IOException
* Thrown, if the channel for the writer could not be opened.
*/
public BlockChannelWriter<MemorySegment>
createBlockChannelWriter(ID channelID) throws IOException {
return createBlockChannelWriter(channelID, new LinkedBlockingQueue<>());
} | 3.26 |
flink_IOManager_createChannel_rdh | // ------------------------------------------------------------------------
// Channel Instantiations
// ------------------------------------------------------------------------
/**
* Creates a new {@link ID} in one of the temp directories. Multiple invocations of this method
* spread the channels evenly across the different directories.
*
* @return A channel to a temporary directory.
*/
public ID createChannel() {
return fileChannelManager.createChannel();
} | 3.26 |
flink_IOManager_deleteChannel_rdh | /**
* Deletes the file underlying the given channel. If the channel is still open, this call may
* fail.
*
* @param channel
* The channel to be deleted.
*/
public static void deleteChannel(ID channel) {
if (channel != null) {
if (channel.getPathFile().exists() && (!channel.getPathFile().delete())) {
LOG.warn("IOManager failed to delete temporary file {}", channel.getPath());
}
}
} | 3.26 |
flink_IOManager_createChannelEnumerator_rdh | /**
* Creates a new {@link Enumerator}, spreading the channels in a round-robin fashion across the
* temporary file directories.
*
* @return An enumerator for channels.
*/
public Enumerator createChannelEnumerator() {
return fileChannelManager.createChannelEnumerator();
} | 3.26 |
flink_IOManager_createBlockChannelReader_rdh | /**
* Creates a block channel reader that reads blocks from the given channel. The reader pushed
* full memory segments (with the read data) to its "return queue", to allow for asynchronous
* read implementations.
*
* @param channelID
* The descriptor for the channel to write to.
* @return A block channel reader that reads from the given channel.
* @throws IOException
* Thrown, if the channel for the reader could not be opened.
*/
public BlockChannelReader<MemorySegment> createBlockChannelReader(ID channelID) throws IOException {
return createBlockChannelReader(channelID, new LinkedBlockingQueue<>());
} | 3.26 |
flink_ProducerMergedPartitionFileWriter_calculateSizeAndFlushBuffers_rdh | /**
* Compute buffer's file offset and create buffers to be flushed.
*
* @param toWrite
* all buffers to write to create {@link ProducerMergedPartitionFileIndex.FlushedBuffer}s
* @param buffers
* receive the created {@link ProducerMergedPartitionFileIndex.FlushedBuffer}
*/
private void calculateSizeAndFlushBuffers(List<SubpartitionBufferContext> toWrite, List<ProducerMergedPartitionFileIndex.FlushedBuffer> buffers) throws IOException {
List<Tuple2<Buffer, Integer>> buffersToFlush = new ArrayList<>();
long v3 = 0;
for (SubpartitionBufferContext subpartitionBufferContext : toWrite) {
int v5 = subpartitionBufferContext.getSubpartitionId();
for (SegmentBufferContext segmentBufferContext : subpartitionBufferContext.getSegmentBufferContexts()) {
List<Tuple2<Buffer, Integer>> bufferAndIndexes = segmentBufferContext.getBufferAndIndexes();
buffersToFlush.addAll(bufferAndIndexes);
for (Tuple2<Buffer, Integer> bufferWithIndex : segmentBufferContext.getBufferAndIndexes()) {
Buffer buffer = bufferWithIndex.f0;
buffers.add(new ProducerMergedPartitionFileIndex.FlushedBuffer(v5, bufferWithIndex.f1, totalBytesWritten + v3, buffer.readableBytes() + BufferReaderWriterUtil.HEADER_LENGTH));
v3 += buffer.readableBytes() + BufferReaderWriterUtil.HEADER_LENGTH;
}
}
}
flushBuffers(buffersToFlush,
v3);
buffersToFlush.forEach(bufferWithIndex -> bufferWithIndex.f0.recycleBuffer());
} | 3.26 |
flink_ProducerMergedPartitionFileWriter_flushBuffers_rdh | /**
* Write all buffers to the disk.
*/
private void flushBuffers(List<Tuple2<Buffer, Integer>> bufferAndIndexes, long expectedBytes) throws IOException {
if (bufferAndIndexes.isEmpty()) {
return;
}
ByteBuffer[] bufferWithHeaders = generateBufferWithHeaders(bufferAndIndexes);
BufferReaderWriterUtil.writeBuffers(dataFileChannel, expectedBytes, bufferWithHeaders);
totalBytesWritten += expectedBytes;
} | 3.26 |
flink_ProducerMergedPartitionFileWriter_flush_rdh | // ------------------------------------------------------------------------
// Internal Methods
// ------------------------------------------------------------------------
/**
* Called in single-threaded ioExecutor. Order is guaranteed.
*/private void flush(List<SubpartitionBufferContext>
toWrite, CompletableFuture<Void> flushSuccessNotifier) {
try {
List<ProducerMergedPartitionFileIndex.FlushedBuffer> buffers = new ArrayList<>();
calculateSizeAndFlushBuffers(toWrite, buffers);
partitionFileIndex.addBuffers(buffers);
flushSuccessNotifier.complete(null);
} catch (IOException exception) {
ExceptionUtils.rethrow(exception);
}
} | 3.26 |
flink_PageSizeUtil_getSystemPageSize_rdh | /**
* Tries to get the system page size. If the page size cannot be determined, this returns -1.
*
* <p>This internally relies on the presence of "unsafe" and the resolution via some Netty
* utilities.
*/
public static int getSystemPageSize() {
try {return PageSizeUtilInternal.getSystemPageSize();
} catch (Throwable t) {
ExceptionUtils.rethrowIfFatalError(t);
return PAGE_SIZE_UNKNOWN;
}
} | 3.26 |
flink_PageSizeUtil_getSystemPageSizeOrDefault_rdh | /**
* Tries to get the system page size. If the page size cannot be determined, this returns the
* {@link #DEFAULT_PAGE_SIZE}.
*/
public static int getSystemPageSizeOrDefault() {
final int pageSize = getSystemPageSize();
return pageSize == PAGE_SIZE_UNKNOWN ? DEFAULT_PAGE_SIZE : pageSize;
} | 3.26 |
flink_PageSizeUtil_getSystemPageSizeOrConservativeMultiple_rdh | /**
* Tries to get the system page size. If the page size cannot be determined, this returns the
* {@link #CONSERVATIVE_PAGE_SIZE_MULTIPLE}.
*/
public static int getSystemPageSizeOrConservativeMultiple() {
final int pageSize = getSystemPageSize();
return pageSize == PAGE_SIZE_UNKNOWN ? CONSERVATIVE_PAGE_SIZE_MULTIPLE : pageSize;
} | 3.26 |
flink_PojoFieldUtils_writeField_rdh | /**
* Writes a field to the given {@link DataOutputView}.
*
* <p>This write method avoids Java serialization, by writing only the classname of the field's
* declaring class and the field name. The written field can be read using {@link #readField(DataInputView, ClassLoader)}.
*
* @param out
* the output view to write to.
* @param field
* the field to write.
*/
static void writeField(DataOutputView out, Field field) throws IOException {
Class<?> declaringClass = field.getDeclaringClass();
out.writeUTF(declaringClass.getName());
out.writeUTF(field.getName());
} | 3.26 |
flink_PojoFieldUtils_readField_rdh | /**
* Reads a field from the given {@link DataInputView}.
*
* <p>This read methods avoids Java serialization, by reading the classname of the field's
* declaring class and dynamically loading it. The field is also read by field name and obtained
* via reflection.
*
* @param in
* the input view to read from.
* @param userCodeClassLoader
* the user classloader.
* @return the read field.
*/
static Field readField(DataInputView in, ClassLoader userCodeClassLoader) throws IOException {
Class<?> declaringClass = InstantiationUtil.resolveClassByName(in, userCodeClassLoader);
String fieldName = in.readUTF();
return getField(fieldName, declaringClass);} | 3.26 |
flink_PojoFieldUtils_getField_rdh | /**
* Finds a field by name from its declaring class. This also searches for the field in super
* classes.
*
* @param fieldName
* the name of the field to find.
* @param declaringClass
* the declaring class of the field.
* @return the field.
*/
@Nullable
static Field getField(String fieldName, Class<?> declaringClass) {
Class<?> clazz = declaringClass;
while (clazz != null) {
try {
Field field = clazz.getDeclaredField(fieldName);field.setAccessible(true);
return field;
} catch (NoSuchFieldException e) {
clazz = clazz.getSuperclass();
}
}
return null;
} | 3.26 |
flink_AbstractStreamingJoinOperator_of_rdh | /**
* Creates an {@link AssociatedRecords} which represents the records associated to the input
* row.
*/
public static AssociatedRecords
of(RowData input, boolean inputIsLeft, JoinRecordStateView otherSideStateView, JoinCondition condition) throws Exception {
List<OuterRecord> associations
= new ArrayList<>();
if (otherSideStateView instanceof OuterJoinRecordStateView) {
OuterJoinRecordStateView outerStateView = ((OuterJoinRecordStateView) (otherSideStateView));
Iterable<Tuple2<RowData, Integer>> records = outerStateView.getRecordsAndNumOfAssociations();
for (Tuple2<RowData, Integer> record : records) {boolean matched = (inputIsLeft) ?
condition.apply(input, record.f0) : condition.apply(record.f0, input);
if (matched) {
associations.add(new OuterRecord(record.f0, record.f1));
}
}
} else {
Iterable<RowData> records = otherSideStateView.getRecords();
for (RowData record : records) {
boolean matched = (inputIsLeft) ? condition.apply(input, record) : condition.apply(record, input);
if (matched) {
// use -1 as the default number of associations
associations.add(new OuterRecord(record, -1));
}
}
}
return new AssociatedRecords(associations);
} | 3.26 |
flink_AbstractStreamingJoinOperator_getRecords_rdh | /**
* Gets the iterable of records. This is usually be called when the {@link AssociatedRecords} is from inner side.
*/
public Iterable<RowData> getRecords() {
return new RecordsIterable(records);
} | 3.26 |
flink_SideOutputDataStream_cache_rdh | /**
* Caches the intermediate result of the transformation. Only support bounded streams and
* currently only block mode is supported. The cache is generated lazily at the first time the
* intermediate result is computed. The cache will be clear when {@link CachedDataStream#invalidate()} called or the {@link StreamExecutionEnvironment} close.
*
* @return CachedDataStream that can use in later job to reuse the cached intermediate result.
*/
@PublicEvolving
public CachedDataStream<T> cache() {
return new CachedDataStream<>(this.environment, this.transformation);
} | 3.26 |
flink_AbstractStreamOperator_m2_rdh | // ------------------------------------------------------------------------
// Watermark handling
// ------------------------------------------------------------------------
/**
* Returns a {@link InternalTimerService} that can be used to query current processing time and
* event time and to set timers. An operator can have several timer services, where each has its
* own namespace serializer. Timer services are differentiated by the string key that is given
* when requesting them, if you call this method with the same key multiple times you will get
* the same timer service instance in subsequent requests.
*
* <p>Timers are always scoped to a key, the currently active key of a keyed stream operation.
* When a timer fires, this key will also be set as the currently active key.
*
* <p>Each timer has attached metadata, the namespace. Different timer services can have a
* different namespace type. If you don't need namespace differentiation you can use {@link VoidNamespaceSerializer} as the namespace serializer.
*
* @param name
* The name of the requested timer service. If no service exists under the given
* name a new one will be created and returned.
* @param namespaceSerializer
* {@code TypeSerializer} for the timer namespace.
* @param triggerable
* The {@link Triggerable} that should be invoked when timers fire
* @param <N>
* The type of the timer namespace.
*/
public <K, N> InternalTimerService<N> m2(String name, TypeSerializer<N> namespaceSerializer, Triggerable<K, N> triggerable) {
if (timeServiceManager == null) {
throw new RuntimeException("The timer service has not been initialized.");
}
@SuppressWarnings("unchecked")InternalTimeServiceManager<K> keyedTimeServiceHandler = ((InternalTimeServiceManager<K>)
(timeServiceManager));
KeyedStateBackend<K> keyedStateBackend = getKeyedStateBackend();
checkState(keyedStateBackend != null, "Timers can only be used on keyed operators.");
return keyedTimeServiceHandler.getInternalTimerService(name, keyedStateBackend.getKeySerializer(), namespaceSerializer, triggerable);
} | 3.26 |
flink_AbstractStreamOperator_initializeState_rdh | /**
* Stream operators with state which can be restored need to override this hook method.
*
* @param context
* context that allows to register different states.
*/
@Override
public void initializeState(StateInitializationContext context) throws Exception {
} | 3.26 |
flink_AbstractStreamOperator_isUsingCustomRawKeyedState_rdh | /**
* Indicates whether or not implementations of this class is writing to the raw keyed state
* streams on snapshots, using {@link #snapshotState(StateSnapshotContext)}. If yes, subclasses
* should override this method to return {@code true}.
*
* <p>Subclasses need to explicitly indicate the use of raw keyed state because, internally, the
* {@link AbstractStreamOperator} may attempt to read from it as well to restore heap-based
* timers and ultimately fail with read errors. By setting this flag to {@code true}, this
* allows the {@link AbstractStreamOperator} to know that the data written in the raw keyed
* states were not written by the timer services, and skips the timer restore attempt.
*
* <p>Please refer to FLINK-19741 for further details.
*
* <p>TODO: this method can be removed once all timers are moved to be managed by state
* backends.
*
* @return flag indicating whether or not this operator is writing to raw keyed state via {@link #snapshotState(StateSnapshotContext)}.
*/
@Internal
protected boolean isUsingCustomRawKeyedState() {
return false;
} | 3.26 |
flink_AbstractStreamOperator_snapshotState_rdh | /**
* Stream operators with state, which want to participate in a snapshot need to override this
* hook method.
*
* @param context
* context that provides information and means required for taking a snapshot
*/@Override
public void snapshotState(StateSnapshotContext context) throws Exception {
} | 3.26 |
flink_AbstractStreamOperator_getPartitionedState_rdh | /**
* Creates a partitioned state handle, using the state backend configured for this task.
*
* @throws IllegalStateException
* Thrown, if the key/value state was already initialized.
* @throws Exception
* Thrown, if the state backend cannot create the key/value state.
*/
protected <S extends State, N> S getPartitionedState(N namespace, TypeSerializer<N> namespaceSerializer, StateDescriptor<S, ?> stateDescriptor) throws Exception {
return stateHandler.getPartitionedState(namespace, namespaceSerializer, stateDescriptor);
}
@Override
@SuppressWarnings({ "unchecked", "rawtypes"
} | 3.26 |
flink_AbstractStreamOperator_getExecutionConfig_rdh | // ------------------------------------------------------------------------
// Properties and Services
// ------------------------------------------------------------------------
/**
* Gets the execution config defined on the execution environment of the job to which this
* operator belongs.
*
* @return The job's execution config.
*/
public ExecutionConfig getExecutionConfig() {
return container.getExecutionConfig();
} | 3.26 |
flink_AbstractStreamOperator_m1_rdh | // ------------------------------------------------------------------------
// Metrics
// ------------------------------------------------------------------------
// ------- One input stream
public void m1(LatencyMarker latencyMarker) throws Exception {
reportOrForwardLatencyMarker(latencyMarker);
} | 3.26 |
flink_AbstractStreamOperator_getRuntimeContext_rdh | /**
* Returns a context that allows the operator to query information about the execution and also
* to interact with systems such as broadcast variables and managed state. This also allows to
* register timers.
*/@VisibleForTesting
public StreamingRuntimeContext getRuntimeContext() {
return runtimeContext;
} | 3.26 |
flink_AbstractStreamOperator_setup_rdh | // ------------------------------------------------------------------------
// Life Cycle
// ------------------------------------------------------------------------
@Override
public void setup(StreamTask<?, ?> containingTask, StreamConfig config, Output<StreamRecord<OUT>> output) {
final Environment environment = containingTask.getEnvironment();
this.container = containingTask;
this.config = config;
this.output = output;
this.metrics = environment.getMetricGroup().getOrAddOperator(config.getOperatorID(), config.getOperatorName());
this.combinedWatermark = IndexedCombinedWatermarkStatus.forInputsCount(2);
try {
Configuration v1 = environment.getTaskManagerInfo().getConfiguration();
int historySize = v1.getInteger(MetricOptions.LATENCY_HISTORY_SIZE); if (historySize <= 0) {
LOG.warn("{} has been set to a value equal or below 0: {}. Using default.", MetricOptions.LATENCY_HISTORY_SIZE, historySize);
historySize = MetricOptions.LATENCY_HISTORY_SIZE.defaultValue();
}
final String configuredGranularity = v1.getString(MetricOptions.LATENCY_SOURCE_GRANULARITY);
LatencyStats.Granularity granularity;try {
granularity = LatencyStats.Granularity.valueOf(configuredGranularity.toUpperCase(Locale.ROOT));
} catch (IllegalArgumentException iae) {
granularity = Granularity.OPERATOR;
LOG.warn("Configured value {} option for {} is invalid. Defaulting to {}.", configuredGranularity, MetricOptions.LATENCY_SOURCE_GRANULARITY.key(), granularity);
}
MetricGroup jobMetricGroup = this.metrics.getJobMetricGroup();
this.latencyStats = new LatencyStats(jobMetricGroup.addGroup("latency"), historySize, container.getIndexInSubtaskGroup(), getOperatorID(), granularity);
} catch (Exception e) {
LOG.warn("An error occurred while instantiating latency metrics.", e);
this.latencyStats = new LatencyStats(UnregisteredMetricGroups.createUnregisteredTaskManagerJobMetricGroup().addGroup("latency"), 1, 0, new OperatorID(), Granularity.SINGLE);
}
this.runtimeContext = new StreamingRuntimeContext(environment, environment.getAccumulatorRegistry().getUserMap(), getMetricGroup(), getOperatorID(), getProcessingTimeService(), null, environment.getExternalResourceInfoProvider());
stateKeySelector1 = config.getStatePartitioner(0, getUserCodeClassloader());stateKeySelector2 = config.getStatePartitioner(1, getUserCodeClassloader());
}
/**
*
* @deprecated The {@link ProcessingTimeService} instance should be passed by the operator
constructor and this method will be removed along with {@link SetupableStreamOperator} | 3.26 |
flink_AbstractStreamOperator_open_rdh | /**
* This method is called immediately before any elements are processed, it should contain the
* operator's initialization logic, e.g. state initialization.
*
* <p>The default implementation does nothing.
*
* @throws Exception
* An exception in this method causes the operator to fail.
*/
@Override
public void open() throws Exception
{
} | 3.26 |
flink_Tuple14_setFields_rdh | /**
* Sets new values to all fields of the tuple.
*
* @param f0
* The value for field 0
* @param f1
* The value for field 1
* @param f2
* The value for field 2
* @param f3
* The value for field 3
* @param f4
* The value for field 4
* @param f5
* The value for field 5
* @param f6
* The value for field 6
* @param f7
* The value for field 7
* @param f8
* The value for field 8
* @param f9
* The value for field 9
* @param f10
* The value for field 10
* @param f11
* The value for field 11
* @param f12
* The value for field 12
* @param f13
* The value for field 13
*/
public void
setFields(T0 f0, T1 f1, T2 f2, T3 f3, T4 f4, T5 f5, T6 f6, T7 f7, T8 f8, T9 f9, T10 f10, T11 f11, T12 f12, T13 f13) {
this.f0 = f0;
this.f1 = f1;
this.f2 = f2;
this.f3 = f3;
this.f4 = f4;
this.f5 = f5;
this.f6 = f6;
this.f7 = f7;
this.f8 = f8;
this.f9 = f9;
this.f10 = f10;
this.f11 = f11;
this.f12 = f12;
this.f13 = f13;
}
// -------------------------------------------------------------------------------------------------
// standard utilities
// -------------------------------------------------------------------------------------------------
/**
* Creates a string representation of the tuple in the form (f0, f1, f2, f3, f4, f5, f6, f7, f8,
* f9, f10, f11, f12, f13), where the individual fields are the value returned by calling {@link Object#toString} | 3.26 |
flink_Tuple14_of_rdh | /**
* Creates a new tuple and assigns the given values to the tuple's fields. This is more
* convenient than using the constructor, because the compiler can infer the generic type
* arguments implicitly. For example: {@code Tuple3.of(n, x, s)} instead of {@code new
* Tuple3<Integer, Double, String>(n, x, s)}
*/
public static <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13> Tuple14<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13> of(T0 f0, T1 f1, T2 f2, T3 f3, T4 f4, T5 f5, T6 f6, T7 f7, T8 f8, T9 f9, T10 f10, T11 f11, T12 f12, T13 f13) {
return new Tuple14<>(f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11, f12, f13);
} | 3.26 |
flink_Tuple14_copy_rdh | /**
* Shallow tuple copy.
*
* @return A new Tuple with the same fields as this.
*/
@Override
@SuppressWarnings("unchecked")
public Tuple14<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13> copy() {
return new Tuple14<>(this.f0,
this.f1, this.f2, this.f3, this.f4, this.f5, this.f6, this.f7, this.f8, this.f9, this.f10, this.f11, this.f12, this.f13);
} | 3.26 |
flink_Tuple14_equals_rdh | /**
* Deep equality for tuples by calling equals() on the tuple members.
*
* @param o
* the object checked for equality
* @return true if this is equal to o.
*/
@Override
public boolean equals(Object o) {if (this == o) {
return true;
}
if (!(o instanceof Tuple14)) {
return false;
}
@SuppressWarnings("rawtypes")
Tuple14 tuple = ((Tuple14) (o));
if (f0 != null ? !f0.equals(tuple.f0) : tuple.f0 != null) {
return false;
}
if (f1
!= null ? !f1.equals(tuple.f1) : tuple.f1 != null) {
return false;
}
if (f2 != null ? !f2.equals(tuple.f2) : tuple.f2 != null) {
return false;
}
if (f3 != null ? !f3.equals(tuple.f3) : tuple.f3 != null) {
return false;
}
if (f4 != null
? !f4.equals(tuple.f4) : tuple.f4 != null) {
return false;
}
if (f5 != null ? !f5.equals(tuple.f5) : tuple.f5 != null) {
return false;
}
if (f6 != null ? !f6.equals(tuple.f6) : tuple.f6
!= null) {
return false;
}
if (f7 != null ? !f7.equals(tuple.f7) : tuple.f7 != null) {
return false;
}
if (f8 != null ? !f8.equals(tuple.f8) : tuple.f8 != null) {
return false;
}
if (f9 != null ? !f9.equals(tuple.f9) : tuple.f9 != null) {
return false;
}
if (f10 != null ? !f10.equals(tuple.f10) : tuple.f10 != null) {
return false;
}
if (f11 != null ? !f11.equals(tuple.f11) : tuple.f11 != null) {
return false;
}
if (f12 != null ? !f12.equals(tuple.f12) : tuple.f12 != null) {
return false;
}
if (f13 != null ? !f13.equals(tuple.f13) : tuple.f13 != null) {
return false;}
return true;
} | 3.26 |
flink_HiveParserDefaultGraphWalker_dispatchAndReturn_rdh | // Returns dispatch result
public <T> T dispatchAndReturn(Node nd, Stack<Node> ndStack) throws SemanticException {
Object[] nodeOutputs =
null;
if (nd.getChildren() != null) {
nodeOutputs = new Object[nd.getChildren().size()];
int i = 0;
for (Node child : nd.getChildren()) {
nodeOutputs[i++] = retMap.get(child);
}
}
Object retVal = dispatcher.dispatch(nd, ndStack, nodeOutputs);
retMap.put(nd, retVal);
return ((T) (retVal));
} | 3.26 |
flink_HiveParserDefaultGraphWalker_startWalking_rdh | // starting point for walking.
public void
startWalking(Collection<Node> startNodes, HashMap<Node, Object> nodeOutput) throws SemanticException {
toWalk.addAll(startNodes);
while (toWalk.size() >
0) {
Node nd = toWalk.remove(0);
walk(nd);
// Some walkers extending DefaultGraphWalker e.g. ForwardWalker
// do not use opQueue and rely uniquely in the toWalk structure,
// thus we store the results produced by the dispatcher here
// TODO: rewriting the logic of those walkers to use opQueue
if ((nodeOutput != null) && getDispatchedList().contains(nd)) {
nodeOutput.put(nd, retMap.get(nd));
}
}
// Store the results produced by the dispatcher
while (!opQueue.isEmpty()) {
Node node = opQueue.poll();
if ((nodeOutput != null) && getDispatchedList().contains(node)) {
nodeOutput.put(node, retMap.get(node));
}
}
} | 3.26 |
flink_HiveParserDefaultGraphWalker_walk_rdh | // walk the current operator and its descendants.
protected void walk(Node nd) throws SemanticException {
// Push the node in the stack
opStack.push(nd);
// While there are still nodes to dispatch...
while (!opStack.empty()) {
Node node = opStack.peek();
if ((node.getChildren() == null) || getDispatchedList().containsAll(node.getChildren())) {
// Dispatch current node
if (!getDispatchedList().contains(node)) {
dispatch(node, opStack);
opQueue.add(node);
}
opStack.pop();
continue;
}
// Add a single child and restart the loop
for (Node childNode : node.getChildren()) {
if (!getDispatchedList().contains(childNode)) {
opStack.push(childNode);
break;
}
}
} // end while
} | 3.26 |
flink_HiveParserDefaultGraphWalker_dispatch_rdh | // Dispatch the current operator.
public void dispatch(Node nd, Stack<Node> ndStack) throws SemanticException {
dispatchAndReturn(nd, ndStack);
} | 3.26 |
flink_TaskTracker_add_rdh | /**
*
* @return true, if this checkpoint id need be committed.
*/
public boolean add(long checkpointId, int task) {
Set<Integer> tasks = notifiedTasks.computeIfAbsent(checkpointId, k -> new HashSet<>());
tasks.add(task);
if (tasks.size() == numberOfTasks) {
notifiedTasks.headMap(checkpointId, true).clear(); return true;
}
return false;
} | 3.26 |
flink_EndOfData_write_rdh | // ------------------------------------------------------------------------
//
// These methods are inherited form the generic serialization of AbstractEvent
// but would require the CheckpointBarrier to be mutable. Since all serialization
// for events goes through the EventSerializer class, which has special serialization
// for the CheckpointBarrier, we don't need these methods
//
@Override
public void write(DataOutputView out) throws IOException {
throw new UnsupportedOperationException("This method should never be called");
} | 3.26 |
flink_EndOfData_equals_rdh | // ------------------------------------------------------------------------
@Override
public boolean equals(Object o) {
if (this == o) {
return true;}
if ((o == null) || (getClass() != o.getClass())) {
return false;
}
EndOfData endOfData = ((EndOfData) (o));
return
mode == endOfData.mode;
} | 3.26 |
flink_StreamSQLExample_main_rdh | // *************************************************************************
// PROGRAM
// *************************************************************************
public static void main(String[] args) throws Exception {
// set up the Java DataStream API
final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
// set up the Java Table API
final StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env);
final DataStream<Order> orderA = env.fromCollection(Arrays.asList(new Order(1L, "beer", 3), new Order(1L, "diaper", 4), new Order(3L, "rubber", 2)));
final DataStream<Order> orderB = env.fromCollection(Arrays.asList(new Order(2L,
"pen", 3), new Order(2L, "rubber", 3), new Order(4L, "beer", 1)));
// convert the first DataStream to a Table object
// it will be used "inline" and is not registered in a catalog
final Table tableA = tableEnv.fromDataStream(orderA);// convert the second DataStream and register it as a view
// it will be accessible under a name
tableEnv.createTemporaryView("TableB", orderB);
// union the two tables
final Table result = tableEnv.sqlQuery((("SELECT * FROM " + tableA) + " WHERE amount > 2 UNION ALL ") + "SELECT * FROM TableB WHERE amount < 2");
// convert the Table back to an insert-only DataStream of type `Order`
tableEnv.toDataStream(result, StreamSQLExample.Order.class).print();
// after the table program is converted to a DataStream program,
// we must use `env.execute()` to submit the job
env.execute();
} | 3.26 |
flink_PythonDriver_constructPythonCommands_rdh | /**
* Constructs the Python commands which will be executed in python process.
*
* @param pythonDriverOptions
* parsed Python command options
*/
static List<String> constructPythonCommands(final PythonDriverOptions pythonDriverOptions) {
final List<String> commands = new ArrayList<>();
// disable output buffer
commands.add("-u");
if
(pythonDriverOptions.getEntryPointScript().isPresent()) {
commands.add(pythonDriverOptions.getEntryPointScript().get());
} else {
commands.add("-m");
commands.add(pythonDriverOptions.getEntryPointModule());
}commands.addAll(pythonDriverOptions.getProgramArgs());
return commands;
} | 3.26 |
flink_FlinkSqlOperatorTable_instance_rdh | /**
* Returns the Flink operator table, creating it if necessary.
*/
public static synchronized FlinkSqlOperatorTable instance(boolean isBatchMode) {
FlinkSqlOperatorTable instance = cachedInstances.get(isBatchMode);
if (instance == null) {
// Creates and initializes the standard operator table.
// Uses two-phase construction, because we can't initialize the
// table until the constructor of the sub-class has completed.
instance = new FlinkSqlOperatorTable();
instance.init();
// ensure no dynamic function declares directly
validateNoDynamicFunction(instance);
// register functions based on batch or streaming mode
final FlinkSqlOperatorTable finalInstance = instance;
dynamicFunctions(isBatchMode).forEach(f -> finalInstance.register(f));
cachedInstances.put(isBatchMode, finalInstance);
}
return instance;
} | 3.26 |
flink_SqlGatewayServiceImpl_getSession_rdh | // --------------------------------------------------------------------------------------------
@VisibleForTesting public Session getSession(SessionHandle
sessionHandle) {
return sessionManager.getSession(sessionHandle);
} | 3.26 |
flink_DeclarativeSlotPoolService_onReleaseTaskManager_rdh | /**
* This method is called when a TaskManager is released. It can be overridden by subclasses.
*
* @param previouslyFulfilledRequirement
* previouslyFulfilledRequirement by the released
* TaskManager
*/
protected void onReleaseTaskManager(ResourceCounter previouslyFulfilledRequirement) {
} | 3.26 |
flink_DeclarativeSlotPoolService_onClose_rdh | /**
* This method is called when the slot pool service is closed. It can be overridden by
* subclasses.
*/
protected void onClose() {
} | 3.26 |
flink_DeclarativeSlotPoolService_onFailAllocation_rdh | /**
* This method is called when an allocation fails. It can be overridden by subclasses.
*
* @param previouslyFulfilledRequirements
* previouslyFulfilledRequirements by the failed
* allocation
*/
protected void onFailAllocation(ResourceCounter previouslyFulfilledRequirements) {
} | 3.26 |
flink_DataStreamSinkProvider_getParallelism_rdh | /**
* {@inheritDoc }
*
* <p>Note: If a custom parallelism is returned and {@link #consumeDataStream(ProviderContext,
* DataStream)} applies multiple transformations, make sure to set the same custom parallelism
* to each operator to not mess up the changelog.
*/
@Override
default Optional<Integer> getParallelism() {
return Optional.empty();
} | 3.26 |
flink_DataStreamSinkProvider_m0_rdh | /**
* Consumes the given Java {@link DataStream} and returns the sink transformation {@link DataStreamSink}.
*
* <p>Note: If the {@link CompiledPlan} feature should be supported, this method MUST set a
* unique identifier for each transformation/operator in the data stream. This enables stateful
* Flink version upgrades for streaming jobs. The identifier is used to map state back from a
* savepoint to an actual operator in the topology. The framework can generate topology-wide
* unique identifiers with {@link ProviderContext#generateUid(String)}.
*
* @see SingleOutputStreamOperator#uid(String)
*/
default DataStreamSink<?> m0(ProviderContext providerContext, DataStream<RowData> dataStream) {
return consumeDataStream(dataStream);
}
/**
* Consumes the given Java {@link DataStream} and returns the sink transformation {@link DataStreamSink}.
*
* @deprecated Use {@link DataStreamSinkProvider#consumeDataStream(ProviderContext, DataStream)} | 3.26 |
flink_AbstractCsvInputFormat_findNextLineStartOffset_rdh | /**
* Find next legal line separator to return next offset (first byte offset of next line).
*
* <p>NOTE: Because of the particularity of UTF-8 encoding, we can determine the number of bytes
* of this character only by comparing the first byte, so we do not need to traverse M*N in
* comparison.
*/
private long findNextLineStartOffset() throws IOException {
boolean usesEscapeChar = csvSchema.usesEscapeChar();
byte[] escapeBytes = Character.toString(((char) (csvSchema.getEscapeChar()))).getBytes(StandardCharsets.UTF_8);
long startPos = stream.getPos();
byte b;
while ((b = ((byte) (stream.read()))) != (-1)) {
if ((b == '\r') || (b == '\n')) {
// If there may be escape tags ahead
if (usesEscapeChar && ((stream.getPos() - startPos) <= escapeBytes.length)) {
long front = (stream.getPos() - escapeBytes.length) - 1;
if (front > 0) {
stream.seek(front);
byte[] readBytes = new byte[escapeBytes.length];
stream.read(readBytes);// we have judge front must bigger than zero
stream.read();// back to current next one
if (Arrays.equals(escapeBytes, readBytes)) {
// equal, we should skip this one line separator
continue;
}
}
}
long pos = stream.getPos();
// deal with "\r\n", next one maybe '\n', so we need skip it.
if ((b == '\r') && (((byte)
(stream.read())) == '\n')) {
return stream.getPos();
} else {
return pos;
}
} else if (usesEscapeChar && (b == escapeBytes[0])) {
boolean v9 = true;
for (int i = 1; i < escapeBytes.length; i++) {
if (((byte) (stream.read())) != escapeBytes[i]) {
v9 = false;
break;
}
}
if (v9) {
// equal, we should skip next one
stream.skip(1);
}
}
}
return stream.getPos();
} | 3.26 |
flink_HiveWriterFactory_createRecordWriter_rdh | /**
* Create a {@link RecordWriter} from path.
*/
public RecordWriter createRecordWriter(Path path) {
try {
checkInitialize();
JobConf conf = new JobConf(confWrapper.conf());
if (isCompressed) {String codecStr = conf.get(COMPRESSINTERMEDIATECODEC.varname);
if (!StringUtils.isNullOrWhitespaceOnly(codecStr)) {
// noinspection unchecked
Class<? extends CompressionCodec> codec = ((Class<?
extends CompressionCodec>) (Class.forName(codecStr, true, Thread.currentThread().getContextClassLoader())));
FileOutputFormat.setOutputCompressorClass(conf, codec);
}
String typeStr =
conf.get(COMPRESSINTERMEDIATETYPE.varname);
if (!StringUtils.isNullOrWhitespaceOnly(typeStr)) {
SequenceFile.CompressionType style = SequenceFile.CompressionType.valueOf(typeStr);
SequenceFileOutputFormat.setOutputCompressionType(conf, style);
}
}
return hiveShim.getHiveRecordWriter(conf, hiveOutputFormatClz, recordSerDe.getSerializedClass(), isCompressed, f0, path);
} catch (Exception e) {
throw new FlinkHiveException(e);
}
} | 3.26 |
flink_ListAggWsWithRetractAggFunction_getArgumentDataTypes_rdh | // --------------------------------------------------------------------------------------------
// Planning
// --------------------------------------------------------------------------------------------
@Override
public List<DataType> getArgumentDataTypes() {
return Arrays.asList(DataTypes.STRING().bridgedTo(StringData.class), DataTypes.STRING().bridgedTo(StringData.class));
} | 3.26 |
flink_DefaultGroupCache_onCacheRemoval_rdh | /**
* Removal listener that remove the cache key of this group .
*
* @param removalNotification
* of removed element.
*/
private void onCacheRemoval(RemovalNotification<CacheKey<G, K>, V> removalNotification) {
CacheKey<G, K> cacheKey = removalNotification.getKey();
V value = removalNotification.getValue();
if ((cacheKey != null) && (value != null))
{
cachedBlobKeysPerJob.computeIfPresent(cacheKey.getGroup(), (group, keys) -> {
keys.remove(cacheKey);
if (keys.isEmpty()) {
return null;
} else {
return keys;
}
});
}
} | 3.26 |
flink_MetricDumpSerialization_deserialize_rdh | /**
* De-serializes metrics from the given byte array and returns them as a list of {@link MetricDump}.
*
* @param data
* serialized metrics
* @return A list containing the deserialized metrics.
*/public List<MetricDump> deserialize(MetricDumpSerialization.MetricSerializationResult data) {
DataInputView countersInputView = new DataInputDeserializer(data.serializedCounters, 0, data.serializedCounters.length);
DataInputView gaugesInputView = new DataInputDeserializer(data.serializedGauges, 0, data.serializedGauges.length);
DataInputView metersInputView = new DataInputDeserializer(data.serializedMeters, 0, data.serializedMeters.length);
DataInputView histogramsInputView = new DataInputDeserializer(data.serializedHistograms, 0, data.serializedHistograms.length);
List<MetricDump> metrics = new ArrayList<>(((data.numCounters + data.numGauges) + data.numMeters) + data.numHistograms);
for (int x = 0; x < data.numCounters; x++) {
try {
metrics.add(deserializeCounter(countersInputView));
} catch (Exception e) {
LOG.debug("Failed to deserialize counter.", e);
}
}
for (int x = 0; x < data.numGauges; x++) {
try {
metrics.add(deserializeGauge(gaugesInputView));
} catch (Exception e) {
LOG.debug("Failed to deserialize gauge.", e);
}
}
for (int x = 0; x < data.numMeters; x++) {
try {
metrics.add(deserializeMeter(metersInputView));
} catch (Exception e) {
LOG.debug("Failed to deserialize meter.", e);
}
}
for (int x = 0; x < data.numHistograms; x++) {
try {
metrics.add(deserializeHistogram(histogramsInputView));
} catch (Exception e) {
LOG.debug("Failed to deserialize histogram.", e);
}
}
return metrics;
} | 3.26 |
flink_NormalizedKeySorter_compare_rdh | // -------------------------------------------------------------------------
// Indexed Sorting
// -------------------------------------------------------------------------
@Override
public int compare(int i, int j) {
final int
v6 = i / this.indexEntriesPerSegment;
final int segmentOffsetI = (i % this.indexEntriesPerSegment) * this.indexEntrySize;
final int segmentNumberJ = j / this.indexEntriesPerSegment;
final int segmentOffsetJ = (j % this.indexEntriesPerSegment) * this.indexEntrySize;
return compare(v6, segmentOffsetI, segmentNumberJ, segmentOffsetJ);
} | 3.26 |
flink_NormalizedKeySorter_writeToOutput_rdh | /**
* Writes a subset of the records in this buffer in their logical order to the given output.
*
* @param output
* The output view to write the records to.
* @param start
* The logical start position of the subset.
* @param num
* The number of elements to write.
* @throws IOException
* Thrown, if an I/O exception occurred writing to the output view.
*/
@Override
public void writeToOutput(final ChannelWriterOutputView output, final int start, int num) throws IOException {int currentMemSeg = start / this.indexEntriesPerSegment;
int offset = (start % this.indexEntriesPerSegment) * this.indexEntrySize;
while (num > 0)
{
final MemorySegment currentIndexSegment = this.sortIndex.get(currentMemSeg++);
// check whether we have a full or partially full segment
if ((num >= this.indexEntriesPerSegment) && (offset == 0)) {
// full segment
for (; offset <= this.lastIndexEntryOffset; offset += this.indexEntrySize) {
final long pointer = currentIndexSegment.getLong(offset)
& POINTER_MASK;
this.recordBuffer.setReadPosition(pointer);
this.serializer.copy(this.recordBuffer, output);
}
num -= this.indexEntriesPerSegment;
} else {
// partially filled segment
for (;
(num
> 0) && (offset <= this.lastIndexEntryOffset); num-- , offset += this.indexEntrySize) {
final long pointer = currentIndexSegment.getLong(offset) & POINTER_MASK;
this.recordBuffer.setReadPosition(pointer);
this.serializer.copy(this.recordBuffer, output);}
}
offset = 0;
}
} | 3.26 |
flink_NormalizedKeySorter_isEmpty_rdh | /**
* Checks whether the buffer is empty.
*
* @return True, if no record is contained, false otherwise.
*/
@Override
public boolean isEmpty() {
return this.numRecords == 0;
} | 3.26 |
flink_NormalizedKeySorter_write_rdh | /**
* Writes a given record to this sort buffer. The written record will be appended and take the
* last logical position.
*
* @param record
* The record to be written.
* @return True, if the record was successfully written, false, if the sort buffer was full.
* @throws IOException
* Thrown, if an error occurred while serializing the record into the
* buffers.
*/
@Override
public boolean write(T record) throws IOException {
// check whether we need a new memory segment for the sort index
if (this.currentSortIndexOffset > this.lastIndexEntryOffset) {if (memoryAvailable()) {
this.currentSortIndexSegment = nextMemorySegment();
this.sortIndex.add(this.currentSortIndexSegment);
this.currentSortIndexOffset = 0;
this.sortIndexBytes += this.segmentSize;
} else {
return false;
}
}
// serialize the record into the data buffers
try {
this.serializer.serialize(record, this.recordCollector);
} catch (EOFException e) {
return false;
}
final long newOffset = this.recordCollector.getCurrentOffset();
final boolean shortRecord = (newOffset - this.currentDataBufferOffset) < LARGE_RECORD_THRESHOLD;
if ((!shortRecord) && LOG.isDebugEnabled()) {
LOG.debug(("Put a large record ( >" + LARGE_RECORD_THRESHOLD) + " into the sort buffer");
}
// add the pointer and the normalized key
this.currentSortIndexSegment.putLong(this.currentSortIndexOffset, shortRecord ? this.currentDataBufferOffset : this.currentDataBufferOffset | f0);
if (this.numKeyBytes != 0) {
this.comparator.putNormalizedKey(record, this.currentSortIndexSegment, this.currentSortIndexOffset + OFFSET_LEN, this.numKeyBytes);
}
this.currentSortIndexOffset += this.indexEntrySize;
this.currentDataBufferOffset = newOffset;
this.numRecords++;
return true;
} | 3.26 |
flink_NormalizedKeySorter_getRecord_rdh | // -------------------------------------------------------------------------
// Retrieving and Writing
// -------------------------------------------------------------------------
@Override
public T getRecord(int
logicalPosition) throws IOException {
return getRecordFromBuffer(readPointer(logicalPosition));
} | 3.26 |
flink_NormalizedKeySorter_getIterator_rdh | // -------------------------------------------------------------------------
/**
* Gets an iterator over all records in this buffer in their logical order.
*
* @return An iterator returning the records in their logical order.
*/
@Override
public final MutableObjectIterator<T> getIterator() {
return new MutableObjectIterator<T>() {
private final int size = size();
private int current = 0;
private int currentSegment = 0;
private int currentOffset = 0;
private MemorySegment currentIndexSegment = sortIndex.get(0);
@Override
public T next(T target) {
if (this.current < this.size) {
this.current++;
if (this.currentOffset > lastIndexEntryOffset) {
this.currentOffset = 0;
this.currentIndexSegment = sortIndex.get(++this.currentSegment);
}
long pointer =
this.currentIndexSegment.getLong(this.currentOffset) & POINTER_MASK;
this.currentOffset += indexEntrySize;
try {
return getRecordFromBuffer(target, pointer);
} catch (IOException ioe) {
throw new RuntimeException(ioe);
}
} else {
return null;
}
}
@Override
public T next() {
if (this.current < this.size) {
this.current++;
if (this.currentOffset > lastIndexEntryOffset) {
this.currentOffset = 0;
this.currentIndexSegment = sortIndex.get(++this.currentSegment);
}long pointer = this.currentIndexSegment.getLong(this.currentOffset);
this.currentOffset += indexEntrySize;
try {
return getRecordFromBuffer(pointer);
} catch (IOException ioe) {
throw new RuntimeException(ioe);}
} else {
return null;
}
}
};
} | 3.26 |
flink_NormalizedKeySorter_readPointer_rdh | // ------------------------------------------------------------------------
// Access Utilities
// ------------------------------------------------------------------------
private long readPointer(int logicalPosition) {
if ((logicalPosition < 0) || (logicalPosition >= this.numRecords)) {
throw new IndexOutOfBoundsException();
}
final int bufferNum = logicalPosition / this.indexEntriesPerSegment;
final int segmentOffset = logicalPosition % this.indexEntriesPerSegment;
return this.sortIndex.get(bufferNum).getLong(segmentOffset * this.indexEntrySize) & POINTER_MASK;
} | 3.26 |
flink_NormalizedKeySorter_reset_rdh | // -------------------------------------------------------------------------
// Memory Segment
// -------------------------------------------------------------------------
/**
* Resets the sort buffer back to the state where it is empty. All contained data is discarded.
*/
@Override
public void reset()
{
// reset all offsets
this.numRecords = 0;
this.currentSortIndexOffset = 0;
this.currentDataBufferOffset = 0;
this.sortIndexBytes = 0;
// return all memory
this.freeMemory.addAll(this.sortIndex);
this.freeMemory.addAll(this.recordBufferSegments);
this.sortIndex.clear();
this.recordBufferSegments.clear();
// grab first buffers
this.currentSortIndexSegment = nextMemorySegment();
this.sortIndex.add(this.currentSortIndexSegment);
this.recordCollector.reset();
} | 3.26 |
flink_SqlCreateTableConverter_convertCreateTableAS_rdh | /**
* Convert the {@link SqlCreateTableAs} node.
*/
Operation convertCreateTableAS(FlinkPlannerImpl flinkPlanner, SqlCreateTableAs sqlCreateTableAs) {
UnresolvedIdentifier unresolvedIdentifier = UnresolvedIdentifier.of(sqlCreateTableAs.fullTableName());
ObjectIdentifier identifier = catalogManager.qualifyIdentifier(unresolvedIdentifier);
PlannerQueryOperation query = ((PlannerQueryOperation) (SqlNodeToOperationConversion.convert(flinkPlanner, catalogManager, sqlCreateTableAs.getAsQuery()).orElseThrow(() -> new TableException("CTAS unsupported node type " + sqlCreateTableAs.getAsQuery().getClass().getSimpleName()))));
CatalogTable catalogTable = createCatalogTable(sqlCreateTableAs);
CreateTableOperation createTableOperation = new CreateTableOperation(identifier, CatalogTable.of(Schema.newBuilder().fromResolvedSchema(query.getResolvedSchema()).build(), catalogTable.getComment(), catalogTable.getPartitionKeys(), catalogTable.getOptions()), sqlCreateTableAs.isIfNotExists(), sqlCreateTableAs.isTemporary());
return new CreateTableASOperation(createTableOperation, Collections.emptyMap(), query, false);
} | 3.26 |
flink_SqlCreateTableConverter_convertCreateTable_rdh | /**
* Convert the {@link SqlCreateTable} node.
*/
Operation convertCreateTable(SqlCreateTable sqlCreateTable) {
CatalogTable catalogTable = createCatalogTable(sqlCreateTable);
UnresolvedIdentifier
unresolvedIdentifier = UnresolvedIdentifier.of(sqlCreateTable.fullTableName());
ObjectIdentifier identifier = catalogManager.qualifyIdentifier(unresolvedIdentifier);
return new CreateTableOperation(identifier, catalogTable, sqlCreateTable.isIfNotExists(), sqlCreateTable.isTemporary());
} | 3.26 |
flink_InternalTimeServiceManagerImpl_snapshotToRawKeyedState_rdh | // //////////////// Fault Tolerance Methods ///////////////////
@Override
public void snapshotToRawKeyedState(KeyedStateCheckpointOutputStream out, String operatorName) throws Exception {
try {
KeyGroupsList allKeyGroups = out.getKeyGroupList();
for (int keyGroupIdx : allKeyGroups) {
out.startNewKeyGroup(keyGroupIdx);
snapshotStateForKeyGroup(new DataOutputViewStreamWrapper(out), keyGroupIdx);
}
} catch (Exception exception) {
throw new Exception(("Could not write timer service of " + operatorName) + " to checkpoint state stream.", exception);
} finally {
try {
out.close();
} catch (Exception closeException) {
LOG.warn("Could not close raw keyed operator state stream for {}. This " + "might have prevented deleting some state data.",
operatorName, closeException);
}
}
} | 3.26 |
flink_InternalTimeServiceManagerImpl_numProcessingTimeTimers_rdh | // ////////////////// Methods used ONLY IN TESTS ////////////////////
@VisibleForTestingpublic int numProcessingTimeTimers() {
int count = 0;
for (InternalTimerServiceImpl<?, ?> timerService : timerServices.values()) {
count += timerService.numProcessingTimeTimers();
}
return count;} | 3.26 |
flink_InternalTimeServiceManagerImpl_create_rdh | /**
* A factory method for creating the {@link InternalTimeServiceManagerImpl}.
*
* <p><b>IMPORTANT:</b> Keep in sync with {@link InternalTimeServiceManager.Provider}.
*/
public static <K> InternalTimeServiceManagerImpl<K> create(CheckpointableKeyedStateBackend<K> keyedStateBackend, ClassLoader userClassloader, KeyContext keyContext, ProcessingTimeService processingTimeService, Iterable<KeyGroupStatePartitionStreamProvider> rawKeyedStates, StreamTaskCancellationContext cancellationContext) throws Exception {
final KeyGroupRange v0 = keyedStateBackend.getKeyGroupRange();
final InternalTimeServiceManagerImpl<K> timeServiceManager =
new InternalTimeServiceManagerImpl<>(v0, keyContext,
keyedStateBackend, processingTimeService, cancellationContext);// and then initialize the timer services
for (KeyGroupStatePartitionStreamProvider streamProvider : rawKeyedStates)
{
int keyGroupIdx = streamProvider.getKeyGroupId();
Preconditions.checkArgument(v0.contains(keyGroupIdx), ("Key Group " + keyGroupIdx) + " does not belong to the local range.");
timeServiceManager.restoreStateForKeyGroup(streamProvider.getStream(), keyGroupIdx, userClassloader);
}
return timeServiceManager;
} | 3.26 |
flink_RowDataVectorizer_convert_rdh | /**
* Converting ArrayData to RowData for calling {@link RowDataVectorizer#setColumn(int,
* ColumnVector, LogicalType, RowData, int)} recursively with array.
*
* @param arrayData
* input ArrayData.
* @param arrayFieldType
* LogicalType of input ArrayData.
* @return RowData.
*/
private static RowData convert(ArrayData arrayData, LogicalType arrayFieldType) {
GenericRowData rowData = new GenericRowData(arrayData.size());
ArrayData.ElementGetter elementGetter = ArrayData.createElementGetter(arrayFieldType);
for (int i = 0; i < arrayData.size(); i++) {
rowData.setField(i, elementGetter.getElementOrNull(arrayData, i));
}
return rowData;
} | 3.26 |
flink_RemoteRpcInvocation_writeObject_rdh | // -------------------------------------------------------------------
// Serialization methods
// -------------------------------------------------------------------
private void writeObject(ObjectOutputStream oos) throws IOException {// Translate it to byte array so that we can deserialize classes which cannot be found in
// pekko class loader.
byte[] bytes = InstantiationUtil.serializeObject(methodInvocation);
oos.writeObject(bytes);
} | 3.26 |
flink_TestStreamEnvironment_unsetAsContext_rdh | /**
* Resets the streaming context environment to null.
*/
public static void unsetAsContext() {
resetContextEnvironment();
} | 3.26 |
flink_TestStreamEnvironment_randomizeConfiguration_rdh | /**
* This is the place for randomization the configuration that relates to DataStream API such as
* ExecutionConf, CheckpointConf, StreamExecutionEnvironment. List of the configurations can be
* found here {@link StreamExecutionEnvironment#configure(ReadableConfig, ClassLoader)}. All
* other configuration should be randomized here {@link org.apache.flink.runtime.testutils.MiniClusterResource#randomizeConfiguration(Configuration)}.
*/
private static void randomizeConfiguration(MiniCluster miniCluster, Configuration conf) {
// randomize ITTests for enabling unaligned checkpoint
if (RANDOMIZE_CHECKPOINTING_CONFIG) {
randomize(conf, ExecutionCheckpointingOptions.ENABLE_UNALIGNED, true, false);
randomize(conf, ExecutionCheckpointingOptions.ALIGNED_CHECKPOINT_TIMEOUT, Duration.ofSeconds(0), Duration.ofMillis(100), Duration.ofSeconds(2));
randomize(conf, CheckpointingOptions.CLEANER_PARALLEL_MODE, true, false);
}
// randomize ITTests for enabling state change log
if (isConfigurationSupportedByChangelog(miniCluster.getConfiguration())) {
if (STATE_CHANGE_LOG_CONFIG.equalsIgnoreCase(STATE_CHANGE_LOG_CONFIG_ON)) {
if (!conf.contains(StateChangelogOptions.ENABLE_STATE_CHANGE_LOG)) {
conf.set(StateChangelogOptions.ENABLE_STATE_CHANGE_LOG, true);
miniCluster.overrideRestoreModeForChangelogStateBackend();
}
} else if (STATE_CHANGE_LOG_CONFIG.equalsIgnoreCase(STATE_CHANGE_LOG_CONFIG_RAND)) {
boolean enabled
= randomize(conf, StateChangelogOptions.ENABLE_STATE_CHANGE_LOG, true, false);
if (enabled) {
randomize(conf, StateChangelogOptions.PERIODIC_MATERIALIZATION_INTERVAL, Duration.ofMillis(100), Duration.ofMillis(500), Duration.ofSeconds(1), Duration.ofSeconds(5), Duration.ofSeconds(-1));
miniCluster.overrideRestoreModeForChangelogStateBackend();
}
}
}
} | 3.26 |
flink_TestStreamEnvironment_setAsContext_rdh | /**
* Sets the streaming context environment to a TestStreamEnvironment that runs its programs on
* the given cluster with the given default parallelism.
*
* @param miniCluster
* The MiniCluster to execute jobs on.
* @param parallelism
* The default parallelism for the test programs.
*/
public static void setAsContext(final MiniCluster miniCluster, final int parallelism) {
setAsContext(miniCluster, parallelism, Collections.emptyList(), Collections.emptyList());
} | 3.26 |
flink_MissingTypeInfo_isBasicType_rdh | // --------------------------------------------------------------------------------------------
@Override
public boolean isBasicType() {
throw new UnsupportedOperationException("The missing type information cannot be used as a type information.");
} | 3.26 |
flink_MissingTypeInfo_getFunctionName_rdh | // --------------------------------------------------------------------------------------------
public String getFunctionName() {
return functionName;
} | 3.26 |
flink_InputGate_getPriorityEventAvailableFuture_rdh | /**
* Notifies when a priority event has been enqueued. If this future is queried from task thread,
* it is guaranteed that a priority event is available and retrieved through {@link #getNext()}.
*/
public CompletableFuture<?> getPriorityEventAvailableFuture() {
return f0.getAvailableFuture();
} | 3.26 |
flink_InputGate_getChannelInfos_rdh | /**
* Returns the channel infos of this gate.
*/
public List<InputChannelInfo> getChannelInfos() {
return IntStream.range(0, getNumberOfInputChannels()).mapToObj(index -> getChannel(index).getChannelInfo()).collect(Collectors.toList());
} | 3.26 |
flink_CoGroupOperator_sortSecondGroup_rdh | /**
* Sorts Pojo or {@link org.apache.flink.api.java.tuple.Tuple} elements within a
* group in the second input on the specified field in the specified {@link Order}.
*
* <p>Groups can be sorted by multiple fields by chaining {@link #sortSecondGroup(String, Order)} calls.
*
* @param fieldExpression
* The expression to the field on which the group is to be
* sorted.
* @param order
* The Order in which the specified Tuple field is sorted.
* @return A SortedGrouping with specified order of group element.
* @see Order
*/
public CoGroupOperatorWithoutFunction sortSecondGroup(String fieldExpression, Order order) {
ExpressionKeys<I2> ek = new ExpressionKeys<>(fieldExpression, input2.getType());
int[] groupOrderKeys = ek.computeLogicalKeyPositions();
for (int key : groupOrderKeys) {
this.groupSortKeyOrderSecond.add(new ImmutablePair<>(key, order));
}
return this;
} | 3.26 |
flink_CoGroupOperator_with_rdh | /**
* Finalizes a CoGroup transformation by applying a {@link org.apache.flink.api.common.functions.RichCoGroupFunction} to groups of elements
* with identical keys.
*
* <p>Each CoGroupFunction call returns an arbitrary number of keys.
*
* @param function
* The CoGroupFunction that is called for all groups of elements
* with identical keys.
* @return An CoGroupOperator that represents the co-grouped result DataSet.
* @see org.apache.flink.api.common.functions.RichCoGroupFunction
* @see DataSet
*/
public <R> CoGroupOperator<I1, I2, R> with(CoGroupFunction<I1, I2, R> function) {
if (function == null) {
throw new NullPointerException("CoGroup function must not be null.");
}
TypeInformation<R> returnType = TypeExtractor.getCoGroupReturnTypes(function, input1.getType(), input2.getType(), Utils.getCallLocationName(), true);
return new CoGroupOperator<>(input1, input2, keys1, keys2, input1.clean(function), returnType, groupSortKeyOrderFirst, groupSortKeyOrderSecond, customPartitioner, Utils.getCallLocationName());
} | 3.26 |
flink_CoGroupOperator_sortFirstGroup_rdh | /**
* Sorts Pojo or {@link org.apache.flink.api.java.tuple.Tuple} elements within a
* group in the first input on the specified field in the specified {@link Order}.
*
* <p>Groups can be sorted by multiple fields by chaining {@link #sortFirstGroup(String, Order)} calls.
*
* @param fieldExpression
* The expression to the field on which the group is to be
* sorted.
* @param order
* The Order in which the specified Tuple field is sorted.
* @return A SortedGrouping with specified order of group element.
* @see Order
*/
public CoGroupOperatorWithoutFunction sortFirstGroup(String fieldExpression, Order order) {
ExpressionKeys<I1> ek = new ExpressionKeys<>(fieldExpression, input1.getType());
int[] groupOrderKeys = ek.computeLogicalKeyPositions();
for (int key : groupOrderKeys) {
this.groupSortKeyOrderFirst.add(new ImmutablePair<>(key, order));
}
return this;
} | 3.26 |
flink_CoGroupOperator_createCoGroupOperator_rdh | /**
* Intermediate step of a CoGroup transformation.
*
* <p>To continue the CoGroup transformation, provide a {@link org.apache.flink.api.common.functions.RichCoGroupFunction} by calling {@link org.apache.flink.api.java.operators.CoGroupOperator.CoGroupOperatorSets.CoGroupOperatorSetsPredicate.CoGroupOperatorWithoutFunction#with(org.apache.flink.api.common.functions.CoGroupFunction)}.
*/
private CoGroupOperatorWithoutFunction createCoGroupOperator(Keys<I2> keys2) {
if (keys2 == null) {
throw new NullPointerException();
}if (keys2.isEmpty()) {
throw new InvalidProgramException("The co-group keys must not be empty.");
}
try {
keys1.areCompatible(keys2);
} catch (IncompatibleKeysException ike) {
throw new InvalidProgramException("The pair of co-group keys are not compatible with each other.", ike);
}
return new CoGroupOperatorWithoutFunction(keys2);
} | 3.26 |
flink_CoGroupOperator_withPartitioner_rdh | /**
* Sets a custom partitioner for the CoGroup operation. The partitioner will be
* called on the join keys to determine the partition a key should be assigned to.
* The partitioner is evaluated on both inputs in the same way.
*
* <p>NOTE: A custom partitioner can only be used with single-field CoGroup keys,
* not with composite CoGroup keys.
*
* @param partitioner
* The custom partitioner to be used.
* @return This CoGroup operator, to allow for function chaining.
*/
public CoGroupOperatorWithoutFunction withPartitioner(Partitioner<?> partitioner) {if (partitioner != null) {
keys1.validateCustomPartitioner(partitioner, null);
keys2.validateCustomPartitioner(partitioner, null);
}
this.customPartitioner = input1.clean(partitioner);
return this;
} | 3.26 |
flink_CoGroupOperator_getPartitioner_rdh | /**
* Gets the custom partitioner used by this join, or {@code null}, if none is set.
*
* @return The custom partitioner used by this join;
*/
public Partitioner<?> getPartitioner() {return customPartitioner;
} | 3.26 |
flink_NullableSerializer_wrapIfNullIsNotSupported_rdh | /**
* This method tries to serialize {@code null} value with the {@code originalSerializer} and
* wraps it in case of {@link NullPointerException}, otherwise it returns the {@code originalSerializer}.
*
* @param originalSerializer
* serializer to wrap and add {@code null} support
* @param padNullValueIfFixedLen
* pad null value to preserve the fixed length of original
* serializer
* @return serializer which supports {@code null} values
*/
public static <T> TypeSerializer<T> wrapIfNullIsNotSupported(@Nonnull
TypeSerializer<T> originalSerializer, boolean padNullValueIfFixedLen)
{
return checkIfNullSupported(originalSerializer) ? originalSerializer : wrap(originalSerializer, padNullValueIfFixedLen);
} | 3.26 |
flink_NullableSerializer_checkIfNullSupported_rdh | /**
* This method checks if {@code serializer} supports {@code null} value.
*
* @param serializer
* serializer to check
*/
public static <T> boolean checkIfNullSupported(@Nonnull
TypeSerializer<T> serializer) {
int length = (serializer.getLength() > 0) ? serializer.getLength() : 1;
DataOutputSerializer dos = new DataOutputSerializer(length);
try {
serializer.serialize(null, dos);
} catch (IOException | RuntimeException e) {
return false;
}
checkArgument((serializer.getLength() < 0) || (serializer.getLength() == dos.getCopyOfBuffer().length), "The serialized form of the null value should have the same length " + "as any other if the length is fixed in the serializer");
DataInputDeserializer dis = new DataInputDeserializer(dos.getSharedBuffer());
try {
checkArgument(serializer.deserialize(dis) == null);} catch (IOException e) {
throw new RuntimeException(String.format("Unexpected failure to deserialize just serialized null value with %s", serializer.getClass().getName()), e);
}
checkArgument(serializer.copy(null)
== null, "Serializer %s has to be able properly copy null value if it can serialize it", serializer.getClass().getName());
return true;
} | 3.26 |
flink_NullableSerializer_wrap_rdh | /**
* This method wraps the {@code originalSerializer} with the {@code NullableSerializer} if not
* already wrapped.
*
* @param originalSerializer
* serializer to wrap and add {@code null} support
* @param padNullValueIfFixedLen
* pad null value to preserve the fixed length of original
* serializer
* @return wrapped serializer which supports {@code null} values
*/
public static <T> TypeSerializer<T> wrap(@Nonnull
TypeSerializer<T> originalSerializer, boolean padNullValueIfFixedLen) {
return originalSerializer instanceof NullableSerializer ? originalSerializer : new NullableSerializer<>(originalSerializer, padNullValueIfFixedLen);
} | 3.26 |
flink_BlobServerConnection_put_rdh | /**
* Handles an incoming PUT request from a BLOB client.
*
* @param inputStream
* The input stream to read incoming data from
* @param outputStream
* The output stream to send data back to the client
* @param buf
* An auxiliary buffer for data serialization/deserialization
* @throws IOException
* thrown if an I/O error occurs while reading/writing data from/to the
* respective streams
*/
private void put(InputStream inputStream, OutputStream outputStream, byte[] buf) throws IOException {
File incomingFile = null;
try
{
// read HEADER contents: job ID, HA mode/permanent or transient BLOB
final int mode = inputStream.read();
if (mode < 0) {
throw new EOFException("Premature end of PUT request");
}
final JobID jobId;if (mode == JOB_UNRELATED_CONTENT) {
jobId = null;
} else if (mode == JOB_RELATED_CONTENT) {byte[] jidBytes = new byte[JobID.SIZE];
readFully(inputStream, jidBytes, 0, JobID.SIZE, "JobID");
jobId = JobID.fromByteArray(jidBytes);
} else {
throw new IOException("Unknown type of BLOB addressing.");
}
final BlobKey.BlobType blobType;
{
final int read = inputStream.read();
if (read < 0) {
throw new EOFException("Read an incomplete BLOB type");
} else if (read == TRANSIENT_BLOB.ordinal()) {
blobType = TRANSIENT_BLOB;
} else if (read == PERMANENT_BLOB.ordinal()) {
blobType = PERMANENT_BLOB;
checkArgument(jobId != null, "Invalid BLOB addressing for permanent BLOBs");
} else {
throw new IOException("Invalid data received for the BLOB type: " + read);
}
}
if (LOG.isDebugEnabled()) {
LOG.debug("Received PUT request for BLOB of job {} with from {}.", jobId, f0.getInetAddress());
}
incomingFile = blobServer.createTemporaryFilename();
byte[] digest = readFileFully(inputStream, incomingFile, buf);
BlobKey blobKey =
blobServer.moveTempFileToStore(incomingFile, jobId, digest, blobType);
// Return computed key to client for validation
outputStream.write(RETURN_OKAY);
blobKey.writeToOutputStream(outputStream);
} catch (SocketException e) {
// happens when the other side disconnects
LOG.debug("Socket connection closed", e);} catch (Throwable t) {
LOG.error("PUT operation failed", t);
try {
writeErrorToStream(outputStream, t);
} catch (IOException e) {
// since we are in an exception case, it means not much that we could not send the
// error
// ignore this
}
f0.close();
} finally {
if (incomingFile != null) {
if ((!incomingFile.delete()) && incomingFile.exists()) {
LOG.warn("Cannot delete BLOB server staging file " + incomingFile.getAbsolutePath());
}
}
}
} | 3.26 |
flink_BlobServerConnection_close_rdh | /**
* Closes the connection socket and lets the thread exit.
*/
public void close()
{
closeSilently(f0, LOG);
interrupt();
} | 3.26 |
flink_BlobServerConnection_get_rdh | // --------------------------------------------------------------------------------------------
// Actions
// --------------------------------------------------------------------------------------------
/**
* Handles an incoming GET request from a BLOB client.
*
* <p>Transient BLOB files are deleted after a successful read operation by the client. Note
* that we do not enforce atomicity here, i.e. multiple clients reading from the same BLOB may
* still succeed.
*
* @param inputStream
* the input stream to read incoming data from
* @param outputStream
* the output stream to send data back to the client
* @param buf
* an auxiliary buffer for data serialization/deserialization
* @throws IOException
* thrown if an I/O error occurs while reading/writing data from/to the
* respective streams
*/
private void get(InputStream inputStream, OutputStream outputStream, byte[] buf) throws IOException { /* Retrieve the file from the (distributed?) BLOB store and store it
locally, then send it to the service which requested it.
Instead, we could send it from the distributed store directly but
chances are high that if there is one request, there will be more
so a local cache makes more sense.
*/
final File blobFile;
final JobID jobId;
final BlobKey blobKey;
try {
// read HEADER contents: job ID, key, HA mode/permanent or transient BLOB
final int mode
= inputStream.read();
if (mode < 0) {
throw new EOFException("Premature end of GET request");
}
// Receive the jobId and key
if (mode == JOB_UNRELATED_CONTENT) {jobId = null;
} else if (mode == JOB_RELATED_CONTENT) {
byte[] jidBytes = new byte[JobID.SIZE];
readFully(inputStream, jidBytes, 0, JobID.SIZE, "JobID");
jobId = JobID.fromByteArray(jidBytes);
} else {
throw new IOException(("Unknown type of BLOB addressing: " + mode) + '.');
}
blobKey = BlobKey.readFromInputStream(inputStream);
checkArgument((blobKey instanceof TransientBlobKey) || (jobId != null), "Invalid BLOB addressing for permanent BLOBs");
if (LOG.isDebugEnabled()) {
LOG.debug("Received GET request for BLOB {}/{} from {}.", jobId, blobKey, f0.getInetAddress());
}
// up to here, an error can give a good message
} catch (Throwable t) {
LOG.error("GET operation from {} failed.",
f0.getInetAddress(), t);
try {
writeErrorToStream(outputStream, t);
} catch (IOException e) {
// since we are in an exception case, it means that we could not send the error
// ignore this
}f0.close();
return;
}try {
readLock.lock();
try {
// copy the file to local store if it does not exist yet
try {
blobFile = blobServer.getFileInternal(jobId, blobKey);
// enforce a 2GB max for now (otherwise the protocol's length field needs to be
// increased)
if (blobFile.length() > Integer.MAX_VALUE) {
throw new IOException("BLOB size exceeds the maximum size (2 GB).");
}
outputStream.write(RETURN_OKAY);
} catch (Throwable t) {
LOG.error("GET operation failed for BLOB {}/{} from {}.", jobId, blobKey, f0.getInetAddress(), t);
try {
writeErrorToStream(outputStream, t);
} catch (IOException e) {
// since we are in an exception case, it means that we could not send the
// error
// ignore this
}
f0.close();
return;
}// from here on, we started sending data, so all we can do is close the connection
// when something happens
int blobLen =
((int) (blobFile.length()));
writeLength(blobLen, outputStream);
try (FileInputStream fis = new FileInputStream(blobFile)) {
int bytesRemaining = blobLen;while (bytesRemaining > 0) {
int read = fis.read(buf);
if (read < 0) {
throw new IOException("Premature end of BLOB file stream for " + blobFile.getAbsolutePath());
}
outputStream.write(buf, 0, read);
bytesRemaining -= read;
}
}
} finally {
readLock.unlock();
}
// on successful transfer, delete transient files
int result = inputStream.read();
if (result < 0) {
throw new EOFException("Premature end of GET request");
} else if
((blobKey instanceof TransientBlobKey) && (result == RETURN_OKAY))
{
// ignore the result from the operation
if (!blobServer.deleteInternal(jobId, ((TransientBlobKey) (blobKey)))) {
LOG.warn("DELETE operation failed for BLOB {}/{} from {}.", jobId, blobKey, f0.getInetAddress());
}
}
} catch (SocketException e) {
// happens when the other side disconnects
LOG.debug("Socket connection closed", e);
} catch (Throwable t) {
LOG.error("GET operation failed", t);
f0.close();
}
} | 3.26 |
flink_BlobServerConnection_run_rdh | // --------------------------------------------------------------------------------------------
// Connection / Thread methods
// --------------------------------------------------------------------------------------------
/**
* Main connection work method. Accepts requests until the other side closes the connection.
*/
@Override
public void run() {
try {
final InputStream inputStream = this.f0.getInputStream();
final OutputStream outputStream = this.f0.getOutputStream();
while (true) {
// Read the requested operation
final
int operation = inputStream.read();
if (operation < 0) {
// done, no one is asking anything from us
return;
}
switch (operation) {
case PUT_OPERATION :
put(inputStream, outputStream, new byte[BUFFER_SIZE]);
break;
case GET_OPERATION :
get(inputStream, outputStream, new byte[BUFFER_SIZE]);
break;
default :
throw new IOException("Unknown operation " + operation);
}
}
} catch (SocketException e) {
// this happens when the remote site closes the connection
LOG.debug("Socket connection closed", e);
} catch (Throwable t) {
LOG.error("Error while executing BLOB connection from {}.", f0.getRemoteSocketAddress(), t);
} finally {
closeSilently(f0, LOG);
blobServer.unregisterConnection(this);
}
} | 3.26 |
flink_BlobServerConnection_readFileFully_rdh | /**
* Reads a full file from <tt>inputStream</tt> into <tt>incomingFile</tt> returning its
* checksum.
*
* @param inputStream
* stream to read from
* @param incomingFile
* file to write to
* @param buf
* An auxiliary buffer for data serialization/deserialization
* @return the received file's content hash
* @throws IOException
* thrown if an I/O error occurs while reading/writing data from/to the
* respective streams
*/
private static byte[] readFileFully(final InputStream inputStream, final File incomingFile, final byte[] buf) throws IOException
{
MessageDigest
md = BlobUtils.createMessageDigest();
try (FileOutputStream fos = new FileOutputStream(incomingFile)) {
while (true) {
final int bytesExpected = readLength(inputStream);
if (bytesExpected == (-1)) {
// done
break;
}
if (bytesExpected > BUFFER_SIZE) {
throw new IOException("Unexpected number of incoming bytes: " + bytesExpected);
}
readFully(inputStream, buf, 0, bytesExpected, "buffer");
fos.write(buf, 0, bytesExpected);
md.update(buf, 0, bytesExpected);
}
return md.digest();
}
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.