code
stringlengths 67
466k
| docstring
stringlengths 1
13.2k
|
---|---|
@PublicEvolving
public float getFloat(ConfigOption<Float> configOption, float overrideDefault) {
Object o = getRawValueFromOption(configOption);
if (o == null) {
return overrideDefault;
}
return convertToFloat(o, configOption.defaultValue());
} | Returns the value associated with the given config option as a float.
If no value is mapped under any key of the option, it returns the specified
default instead of the option's default value.
@param configOption The configuration option
@param overrideDefault The value to return if no value was mapper for any key of the option
@return the configured value associated with the given config option, or the overrideDefault |
@PublicEvolving
public void setFloat(ConfigOption<Float> key, float value) {
setValueInternal(key.key(), value);
} | Adds the given value to the configuration object.
The main key of the config option will be used to map the value.
@param key
the option specifying the key to be added
@param value
the value of the key/value pair to be added |
public double getDouble(String key, double defaultValue) {
Object o = getRawValue(key);
if (o == null) {
return defaultValue;
}
return convertToDouble(o, defaultValue);
} | Returns the value associated with the given key as a double.
@param key
the key pointing to the associated value
@param defaultValue
the default value which is returned in case there is no value associated with the given key
@return the (default) value associated with the given key |
@PublicEvolving
public double getDouble(ConfigOption<Double> configOption) {
Object o = getValueOrDefaultFromOption(configOption);
return convertToDouble(o, configOption.defaultValue());
} | Returns the value associated with the given config option as a {@code double}.
@param configOption The configuration option
@return the (default) value associated with the given config option |
@PublicEvolving
public double getDouble(ConfigOption<Double> configOption, double overrideDefault) {
Object o = getRawValueFromOption(configOption);
if (o == null) {
return overrideDefault;
}
return convertToDouble(o, configOption.defaultValue());
} | Returns the value associated with the given config option as a {@code double}.
If no value is mapped under any key of the option, it returns the specified
default instead of the option's default value.
@param configOption The configuration option
@param overrideDefault The value to return if no value was mapper for any key of the option
@return the configured value associated with the given config option, or the overrideDefault |
@PublicEvolving
public void setDouble(ConfigOption<Double> key, double value) {
setValueInternal(key.key(), value);
} | Adds the given value to the configuration object.
The main key of the config option will be used to map the value.
@param key
the option specifying the key to be added
@param value
the value of the key/value pair to be added |
@SuppressWarnings("EqualsBetweenInconvertibleTypes")
public byte[] getBytes(String key, byte[] defaultValue) {
Object o = getRawValue(key);
if (o == null) {
return defaultValue;
}
else if (o.getClass().equals(byte[].class)) {
return (byte[]) o;
}
else {
LOG.warn("Configuration cannot evaluate value {} as a byte[] value", o);
return defaultValue;
}
} | Returns the value associated with the given key as a byte array.
@param key
The key pointing to the associated value.
@param defaultValue
The default value which is returned in case there is no value associated with the given key.
@return the (default) value associated with the given key. |
@PublicEvolving
public String getValue(ConfigOption<?> configOption) {
Object o = getValueOrDefaultFromOption(configOption);
return o == null ? null : o.toString();
} | Returns the value associated with the given config option as a string.
@param configOption The configuration option
@return the (default) value associated with the given config option |
@PublicEvolving
public <T extends Enum<T>> T getEnum(
final Class<T> enumClass,
final ConfigOption<String> configOption) {
checkNotNull(enumClass, "enumClass must not be null");
checkNotNull(configOption, "configOption must not be null");
final String configValue = getString(configOption);
try {
return Enum.valueOf(enumClass, configValue.toUpperCase(Locale.ROOT));
} catch (final IllegalArgumentException | NullPointerException e) {
final String errorMessage = String.format("Value for config option %s must be one of %s (was %s)",
configOption.key(),
Arrays.toString(enumClass.getEnumConstants()),
configValue);
throw new IllegalArgumentException(errorMessage, e);
}
} | Returns the value associated with the given config option as an enum.
@param enumClass The return enum class
@param configOption The configuration option
@throws IllegalArgumentException If the string associated with the given config option cannot
be parsed as a value of the provided enum class. |
public void addAllToProperties(Properties props) {
synchronized (this.confData) {
for (Map.Entry<String, Object> entry : this.confData.entrySet()) {
props.put(entry.getKey(), entry.getValue());
}
}
} | Adds all entries in this {@code Configuration} to the given {@link Properties}. |
public void addAll(Configuration other, String prefix) {
final StringBuilder bld = new StringBuilder();
bld.append(prefix);
final int pl = bld.length();
synchronized (this.confData) {
synchronized (other.confData) {
for (Map.Entry<String, Object> entry : other.confData.entrySet()) {
bld.setLength(pl);
bld.append(entry.getKey());
this.confData.put(bld.toString(), entry.getValue());
}
}
}
} | Adds all entries from the given configuration into this configuration. The keys
are prepended with the given prefix.
@param other
The configuration whose entries are added to this configuration.
@param prefix
The prefix to prepend. |
@PublicEvolving
public boolean contains(ConfigOption<?> configOption) {
synchronized (this.confData){
// first try the current key
if (this.confData.containsKey(configOption.key())) {
return true;
}
else if (configOption.hasFallbackKeys()) {
// try the fallback keys
for (FallbackKey fallbackKey : configOption.fallbackKeys()) {
if (this.confData.containsKey(fallbackKey.getKey())) {
loggingFallback(fallbackKey, configOption);
return true;
}
}
}
return false;
}
} | Checks whether there is an entry for the given config option.
@param configOption The configuration option
@return <tt>true</tt> if a valid (current or deprecated) key of the config option is stored,
<tt>false</tt> otherwise |
@Override
public Map<String, String> toMap() {
synchronized (this.confData){
Map<String, String> ret = new HashMap<>(this.confData.size());
for (Map.Entry<String, Object> entry : confData.entrySet()) {
ret.put(entry.getKey(), entry.getValue().toString());
}
return ret;
}
} | -------------------------------------------------------------------------------------------- |
public <T> boolean removeConfig(ConfigOption<T> configOption){
synchronized (this.confData){
// try the current key
Object oldValue = this.confData.remove(configOption.key());
if (oldValue == null){
for (FallbackKey fallbackKey : configOption.fallbackKeys()){
oldValue = this.confData.remove(fallbackKey.getKey());
if (oldValue != null){
loggingFallback(fallbackKey, configOption);
return true;
}
}
return false;
}
return true;
}
} | Removes given config option from the configuration.
@param configOption config option to remove
@param <T> Type of the config option
@return true is config has been removed, false otherwise |
<T> void setValueInternal(String key, T value) {
if (key == null) {
throw new NullPointerException("Key must not be null.");
}
if (value == null) {
throw new NullPointerException("Value must not be null.");
}
synchronized (this.confData) {
this.confData.put(key, value);
}
} | -------------------------------------------------------------------------------------------- |
private int convertToInt(Object o, int defaultValue) {
if (o.getClass() == Integer.class) {
return (Integer) o;
}
else if (o.getClass() == Long.class) {
long value = (Long) o;
if (value <= Integer.MAX_VALUE && value >= Integer.MIN_VALUE) {
return (int) value;
} else {
LOG.warn("Configuration value {} overflows/underflows the integer type.", value);
return defaultValue;
}
}
else {
try {
return Integer.parseInt(o.toString());
}
catch (NumberFormatException e) {
LOG.warn("Configuration cannot evaluate value {} as an integer number", o);
return defaultValue;
}
}
} | -------------------------------------------------------------------------------------------- |
@Override
public void read(DataInputView in) throws IOException {
synchronized (this.confData) {
final int numberOfProperties = in.readInt();
for (int i = 0; i < numberOfProperties; i++) {
String key = StringValue.readString(in);
Object value;
byte type = in.readByte();
switch (type) {
case TYPE_STRING:
value = StringValue.readString(in);
break;
case TYPE_INT:
value = in.readInt();
break;
case TYPE_LONG:
value = in.readLong();
break;
case TYPE_FLOAT:
value = in.readFloat();
break;
case TYPE_DOUBLE:
value = in.readDouble();
break;
case TYPE_BOOLEAN:
value = in.readBoolean();
break;
case TYPE_BYTES:
byte[] bytes = new byte[in.readInt()];
in.readFully(bytes);
value = bytes;
break;
default:
throw new IOException("Unrecognized type: " + type);
}
this.confData.put(key, value);
}
}
} | -------------------------------------------------------------------------------------------- |
@Override
public int compareTo(LongValue o) {
final long other = o.value;
return this.value < other ? -1 : this.value > other ? 1 : 0;
} | -------------------------------------------------------------------------------------------- |
public Map<StateHandleID, StreamStateHandle> uploadFilesToCheckpointFs(
@Nonnull Map<StateHandleID, Path> files,
CheckpointStreamFactory checkpointStreamFactory,
CloseableRegistry closeableRegistry) throws Exception {
Map<StateHandleID, StreamStateHandle> handles = new HashMap<>();
Map<StateHandleID, CompletableFuture<StreamStateHandle>> futures =
createUploadFutures(files, checkpointStreamFactory, closeableRegistry);
try {
FutureUtils.waitForAll(futures.values()).get();
for (Map.Entry<StateHandleID, CompletableFuture<StreamStateHandle>> entry : futures.entrySet()) {
handles.put(entry.getKey(), entry.getValue().get());
}
} catch (ExecutionException e) {
Throwable throwable = ExceptionUtils.stripExecutionException(e);
throwable = ExceptionUtils.stripException(throwable, RuntimeException.class);
if (throwable instanceof IOException) {
throw (IOException) throwable;
} else {
throw new FlinkRuntimeException("Failed to download data for state handles.", e);
}
}
return handles;
} | Upload all the files to checkpoint fileSystem using specified number of threads.
@param files The files will be uploaded to checkpoint filesystem.
@param checkpointStreamFactory The checkpoint streamFactory used to create outputstream.
@throws Exception Thrown if can not upload all the files. |
@Override
public void uploadPart(RefCountedFSOutputStream file) throws IOException {
// this is to guarantee that nobody is
// writing to the file we are uploading.
checkState(file.isClosed());
final CompletableFuture<PartETag> future = new CompletableFuture<>();
uploadsInProgress.add(future);
final long partLength = file.getPos();
currentUploadInfo.registerNewPart(partLength);
file.retain(); // keep the file while the async upload still runs
uploadThreadPool.execute(new UploadTask(s3AccessHelper, currentUploadInfo, file, future));
} | Adds a part to the uploads without any size limitations.
<p>This method is non-blocking and does not wait for the part upload to complete.
@param file The file with the part data.
@throws IOException If this method throws an exception, the RecoverableS3MultiPartUpload
should not be used any more, but recovered instead. |
@Override
public S3Recoverable snapshotAndGetRecoverable(@Nullable final RefCountedFSOutputStream incompletePartFile) throws IOException {
final String incompletePartObjectName = safelyUploadSmallPart(incompletePartFile);
// make sure all other uploads are complete
// this currently makes the method blocking,
// to be made non-blocking in the future
awaitPendingPartsUpload();
final String objectName = currentUploadInfo.getObjectName();
final String uploadId = currentUploadInfo.getUploadId();
final List<PartETag> completedParts = currentUploadInfo.getCopyOfEtagsOfCompleteParts();
final long sizeInBytes = currentUploadInfo.getExpectedSizeInBytes();
if (incompletePartObjectName == null) {
return new S3Recoverable(objectName, uploadId, completedParts, sizeInBytes);
} else {
return new S3Recoverable(objectName, uploadId, completedParts, sizeInBytes, incompletePartObjectName, incompletePartFile.getPos());
}
} | Creates a snapshot of this MultiPartUpload, from which the upload can be resumed.
<p>Data buffered locally which is less than
{@link org.apache.flink.fs.s3.common.FlinkS3FileSystem#S3_MULTIPART_MIN_PART_SIZE S3_MULTIPART_MIN_PART_SIZE},
and cannot be uploaded as part of the MPU and set to S3 as independent objects.
<p>This implementation currently blocks until all part uploads are complete and returns
a completed future. |
@VisibleForTesting
static String createIncompletePartObjectNamePrefix(String objectName) {
checkNotNull(objectName);
final int lastSlash = objectName.lastIndexOf('/');
final String parent;
final String child;
if (lastSlash == -1) {
parent = "";
child = objectName;
} else {
parent = objectName.substring(0, lastSlash + 1);
child = objectName.substring(lastSlash + 1);
}
return parent + (child.isEmpty() ? "" : '_') + child + "_tmp_";
} | ------------------------------------------------------------------------ |
public static RecoverableMultiPartUploadImpl newUpload(
final S3AccessHelper s3AccessHelper,
final Executor uploadThreadPool,
final String objectName) throws IOException {
final String multiPartUploadId = s3AccessHelper.startMultiPartUpload(objectName);
return new RecoverableMultiPartUploadImpl(
s3AccessHelper,
uploadThreadPool,
multiPartUploadId,
objectName,
new ArrayList<>(),
0L,
Optional.empty());
} | ------------------------------------------------------------------------ |
public static void main(String[] args) throws Exception {
ExecutionEnvironment env = ExecutionEnvironment.createCollectionsEnvironment();
BatchTableEnvironment tEnv = BatchTableEnvironment.create(env);
DataSet<WC> input = env.fromElements(
new WC("Hello", 1),
new WC("Ciao", 1),
new WC("Hello", 1));
Table table = tEnv.fromDataSet(input);
Table filtered = table
.groupBy("word")
.select("word, frequency.sum as frequency")
.filter("frequency = 2");
DataSet<WC> result = tEnv.toDataSet(filtered, WC.class);
result.print();
} | ************************************************************************* |
public ConnectedStreams<IN1, IN2> keyBy(int keyPosition1, int keyPosition2) {
return new ConnectedStreams<>(this.environment, inputStream1.keyBy(keyPosition1),
inputStream2.keyBy(keyPosition2));
} | KeyBy operation for connected data stream. Assigns keys to the elements of
input1 and input2 according to keyPosition1 and keyPosition2.
@param keyPosition1
The field used to compute the hashcode of the elements in the
first input stream.
@param keyPosition2
The field used to compute the hashcode of the elements in the
second input stream.
@return The grouped {@link ConnectedStreams} |
public ConnectedStreams<IN1, IN2> keyBy(int[] keyPositions1, int[] keyPositions2) {
return new ConnectedStreams<>(environment, inputStream1.keyBy(keyPositions1),
inputStream2.keyBy(keyPositions2));
} | KeyBy operation for connected data stream. Assigns keys to the elements of
input1 and input2 according to keyPositions1 and keyPositions2.
@param keyPositions1
The fields used to group the first input stream.
@param keyPositions2
The fields used to group the second input stream.
@return The grouped {@link ConnectedStreams} |
public ConnectedStreams<IN1, IN2> keyBy(String field1, String field2) {
return new ConnectedStreams<>(environment, inputStream1.keyBy(field1),
inputStream2.keyBy(field2));
} | KeyBy operation for connected data stream using key expressions. Assigns keys to
the elements of input1 and input2 according to field1 and field2. A field
expression is either the name of a public field or a getter method with
parentheses of the {@link DataStream}S underlying type. A dot can be used
to drill down into objects, as in {@code "field1.getInnerField2()" }.
@param field1
The grouping expression for the first input
@param field2
The grouping expression for the second input
@return The grouped {@link ConnectedStreams} |
public ConnectedStreams<IN1, IN2> keyBy(String[] fields1, String[] fields2) {
return new ConnectedStreams<>(environment, inputStream1.keyBy(fields1),
inputStream2.keyBy(fields2));
} | KeyBy operation for connected data stream using key expressions.
the elements of input1 and input2 according to fields1 and fields2. A
field expression is either the name of a public field or a getter method
with parentheses of the {@link DataStream}S underlying type. A dot can be
used to drill down into objects, as in {@code "field1.getInnerField2()" }
.
@param fields1
The grouping expressions for the first input
@param fields2
The grouping expressions for the second input
@return The grouped {@link ConnectedStreams} |
public ConnectedStreams<IN1, IN2> keyBy(KeySelector<IN1, ?> keySelector1, KeySelector<IN2, ?> keySelector2) {
return new ConnectedStreams<>(environment, inputStream1.keyBy(keySelector1),
inputStream2.keyBy(keySelector2));
} | KeyBy operation for connected data stream. Assigns keys to the elements of
input1 and input2 using keySelector1 and keySelector2.
@param keySelector1
The {@link KeySelector} used for grouping the first input
@param keySelector2
The {@link KeySelector} used for grouping the second input
@return The partitioned {@link ConnectedStreams} |
public <KEY> ConnectedStreams<IN1, IN2> keyBy(
KeySelector<IN1, KEY> keySelector1,
KeySelector<IN2, KEY> keySelector2,
TypeInformation<KEY> keyType) {
return new ConnectedStreams<>(
environment,
inputStream1.keyBy(keySelector1, keyType),
inputStream2.keyBy(keySelector2, keyType));
} | KeyBy operation for connected data stream. Assigns keys to the elements of
input1 and input2 using keySelector1 and keySelector2 with explicit type information
for the common key type.
@param keySelector1
The {@link KeySelector} used for grouping the first input
@param keySelector2
The {@link KeySelector} used for grouping the second input
@param keyType The type information of the common key type.
@return The partitioned {@link ConnectedStreams} |
public <R> SingleOutputStreamOperator<R> map(CoMapFunction<IN1, IN2, R> coMapper) {
TypeInformation<R> outTypeInfo = TypeExtractor.getBinaryOperatorReturnType(
coMapper,
CoMapFunction.class,
0,
1,
2,
TypeExtractor.NO_INDEX,
getType1(),
getType2(),
Utils.getCallLocationName(),
true);
return transform("Co-Map", outTypeInfo, new CoStreamMap<>(inputStream1.clean(coMapper)));
} | Applies a CoMap transformation on a {@link ConnectedStreams} and maps
the output to a common type. The transformation calls a
{@link CoMapFunction#map1} for each element of the first input and
{@link CoMapFunction#map2} for each element of the second input. Each
CoMapFunction call returns exactly one element.
@param coMapper The CoMapFunction used to jointly transform the two input DataStreams
@return The transformed {@link DataStream} |
public <R> SingleOutputStreamOperator<R> flatMap(
CoFlatMapFunction<IN1, IN2, R> coFlatMapper) {
TypeInformation<R> outTypeInfo = TypeExtractor.getBinaryOperatorReturnType(
coFlatMapper,
CoFlatMapFunction.class,
0,
1,
2,
TypeExtractor.NO_INDEX,
getType1(),
getType2(),
Utils.getCallLocationName(),
true);
return transform("Co-Flat Map", outTypeInfo, new CoStreamFlatMap<>(inputStream1.clean(coFlatMapper)));
} | Applies a CoFlatMap transformation on a {@link ConnectedStreams} and
maps the output to a common type. The transformation calls a
{@link CoFlatMapFunction#flatMap1} for each element of the first input
and {@link CoFlatMapFunction#flatMap2} for each element of the second
input. Each CoFlatMapFunction call returns any number of elements
including none.
@param coFlatMapper
The CoFlatMapFunction used to jointly transform the two input
DataStreams
@return The transformed {@link DataStream} |
@PublicEvolving
public <R> SingleOutputStreamOperator<R> process(
CoProcessFunction<IN1, IN2, R> coProcessFunction) {
TypeInformation<R> outTypeInfo = TypeExtractor.getBinaryOperatorReturnType(
coProcessFunction,
CoProcessFunction.class,
0,
1,
2,
TypeExtractor.NO_INDEX,
getType1(),
getType2(),
Utils.getCallLocationName(),
true);
return process(coProcessFunction, outTypeInfo);
} | Applies the given {@link CoProcessFunction} on the connected input streams,
thereby creating a transformed output stream.
<p>The function will be called for every element in the input streams and can produce zero or
more output elements. Contrary to the {@link #flatMap(CoFlatMapFunction)} function, this
function can also query the time and set timers. When reacting to the firing of set timers
the function can directly emit elements and/or register yet more timers.
@param coProcessFunction The {@link CoProcessFunction} that is called for each element
in the stream.
@param <R> The type of elements emitted by the {@code CoProcessFunction}.
@return The transformed {@link DataStream}. |
@Internal
public <R> SingleOutputStreamOperator<R> process(
CoProcessFunction<IN1, IN2, R> coProcessFunction,
TypeInformation<R> outputType) {
TwoInputStreamOperator<IN1, IN2, R> operator;
if ((inputStream1 instanceof KeyedStream) && (inputStream2 instanceof KeyedStream)) {
operator = new KeyedCoProcessOperator<>(inputStream1.clean(coProcessFunction));
} else {
operator = new CoProcessOperator<>(inputStream1.clean(coProcessFunction));
}
return transform("Co-Process", outputType, operator);
} | Applies the given {@link CoProcessFunction} on the connected input streams,
thereby creating a transformed output stream.
<p>The function will be called for every element in the input streams and can produce zero
or more output elements. Contrary to the {@link #flatMap(CoFlatMapFunction)} function,
this function can also query the time and set timers. When reacting to the firing of set
timers the function can directly emit elements and/or register yet more timers.
@param coProcessFunction The {@link CoProcessFunction} that is called for each element
in the stream.
@param <R> The type of elements emitted by the {@code CoProcessFunction}.
@return The transformed {@link DataStream}. |
public List<MemorySegment> close() throws IOException
{
// send off set last segment
writeSegment(getCurrentSegment(), getCurrentPositionInSegment(), true);
clear();
// close the writer and gather all segments
final LinkedBlockingQueue<MemorySegment> queue = this.writer.getReturnQueue();
this.writer.close();
// re-collect all memory segments
ArrayList<MemorySegment> list = new ArrayList<MemorySegment>(this.numSegments);
for (int i = 0; i < this.numSegments; i++) {
final MemorySegment m = queue.poll();
if (m == null) {
// we get null if the queue is empty. that should not be the case if the reader was properly closed.
throw new RuntimeException("ChannelWriterOutputView: MemorySegments have been taken from return queue by different actor.");
}
list.add(m);
}
return list;
} | Closes this OutputView, closing the underlying writer and returning all memory segments.
@return A list containing all memory segments originally supplied to this view.
@throws IOException Thrown, if the underlying writer could not be properly closed. |
protected final MemorySegment nextSegment(MemorySegment current, int posInSegment) throws IOException
{
if (current != null) {
writeSegment(current, posInSegment, false);
}
final MemorySegment next = this.writer.getNextReturnedBlock();
this.blockCount++;
return next;
} | -------------------------------------------------------------------------------------------- |
@Override
public void open(int taskNumber, int numTasks) throws IOException {
this.session = cluster.connect();
this.prepared = session.prepare(insertQuery);
this.callback = new FutureCallback<ResultSet>() {
@Override
public void onSuccess(ResultSet ignored) {
onWriteSuccess(ignored);
}
@Override
public void onFailure(Throwable t) {
onWriteFailure(t);
}
};
} | Opens a Session to Cassandra and initializes the prepared statement.
@param taskNumber The number of the parallel instance.
@throws IOException Thrown, if the output could not be opened due to an
I/O problem. |
@Override
public void close() throws IOException {
try {
if (session != null) {
session.close();
}
} catch (Exception e) {
LOG.error("Error while closing session.", e);
}
try {
if (cluster != null) {
cluster.close();
}
} catch (Exception e) {
LOG.error("Error while closing cluster.", e);
}
} | Closes all resources used. |
public TypeSerializer<UK> getKeySerializer() {
final TypeSerializer<Map<UK, UV>> rawSerializer = getSerializer();
if (!(rawSerializer instanceof MapSerializer)) {
throw new IllegalStateException("Unexpected serializer type.");
}
return ((MapSerializer<UK, UV>) rawSerializer).getKeySerializer();
} | Gets the serializer for the keys in the state.
@return The serializer for the keys in the state. |
private void clearAllState(
W window,
AppendingState<IN, ACC> windowState,
MergingWindowSet<W> mergingWindows) throws Exception {
windowState.clear();
triggerContext.clear();
processContext.window = window;
processContext.clear();
if (mergingWindows != null) {
mergingWindows.retireWindow(window);
mergingWindows.persist();
}
} | Drops all state for the given window and calls
{@link Trigger#clear(Window, Trigger.TriggerContext)}.
<p>The caller must ensure that the
correct key is set in the state backend and the triggerContext object. |
@SuppressWarnings("unchecked")
private void emitWindowContents(W window, ACC contents) throws Exception {
timestampedCollector.setAbsoluteTimestamp(window.maxTimestamp());
processContext.window = window;
userFunction.process(triggerContext.key, window, processContext, contents, timestampedCollector);
} | Emits the contents of the given window using the {@link InternalWindowFunction}. |
protected MergingWindowSet<W> getMergingWindowSet() throws Exception {
@SuppressWarnings("unchecked")
MergingWindowAssigner<? super IN, W> mergingAssigner = (MergingWindowAssigner<? super IN, W>) windowAssigner;
return new MergingWindowSet<>(mergingAssigner, mergingSetsState);
} | Retrieves the {@link MergingWindowSet} for the currently active key.
The caller must ensure that the correct key is set in the state backend.
<p>The caller must also ensure to properly persist changes to state using
{@link MergingWindowSet#persist()}. |
protected boolean isElementLate(StreamRecord<IN> element){
return (windowAssigner.isEventTime()) &&
(element.getTimestamp() + allowedLateness <= internalTimerService.currentWatermark());
} | Decide if a record is currently late, based on current watermark and allowed lateness.
@param element The element to check
@return The element for which should be considered when sideoutputs |
protected void deleteCleanupTimer(W window) {
long cleanupTime = cleanupTime(window);
if (cleanupTime == Long.MAX_VALUE) {
// no need to clean up because we didn't set one
return;
}
if (windowAssigner.isEventTime()) {
triggerContext.deleteEventTimeTimer(cleanupTime);
} else {
triggerContext.deleteProcessingTimeTimer(cleanupTime);
}
} | Deletes the cleanup timer set for the contents of the provided window.
@param window
the window whose state to discard |
public Rowtime timestampsFromField(String fieldName) {
internalProperties.putString(ROWTIME_TIMESTAMPS_TYPE, ROWTIME_TIMESTAMPS_TYPE_VALUE_FROM_FIELD);
internalProperties.putString(ROWTIME_TIMESTAMPS_FROM, fieldName);
return this;
} | Sets a built-in timestamp extractor that converts an existing {@link Long} or
{@link Types#SQL_TIMESTAMP} field into the rowtime attribute.
@param fieldName The field to convert into a rowtime attribute. |
public Rowtime watermarksPeriodicBounded(long delay) {
internalProperties.putString(ROWTIME_WATERMARKS_TYPE, ROWTIME_WATERMARKS_TYPE_VALUE_PERIODIC_BOUNDED);
internalProperties.putLong(ROWTIME_WATERMARKS_DELAY, delay);
return this;
} | Sets a built-in watermark strategy for rowtime attributes which are out-of-order by a bounded
time interval.
<p>Emits watermarks which are the maximum observed timestamp minus the specified delay.
@param delay delay in milliseconds |
@Override
public Map<String, String> toProperties() {
final DescriptorProperties properties = new DescriptorProperties();
properties.putProperties(internalProperties);
return properties.asMap();
} | Converts this descriptor into a set of properties. |
public Map<String, Accumulator<?, ?>> deserializeUserAccumulators(ClassLoader classLoader) throws IOException, ClassNotFoundException {
return userAccumulators.deserializeValue(classLoader);
} | Gets the user-defined accumulators values.
@return the serialized map |
public void reset() {
nextWindow = null;
watermark = Long.MIN_VALUE;
triggerWindowStartIndex = 0;
emptyWindowTriggered = true;
resetBuffer();
} | Reset for next group. |
public boolean hasTriggerWindow() {
skipEmptyWindow();
Preconditions.checkState(watermark == Long.MIN_VALUE || nextWindow != null,
"next trigger window cannot be null.");
return nextWindow != null && nextWindow.getEnd() <= watermark;
} | Check if there are windows could be triggered according to the current watermark.
@return true when there are windows to be triggered.
It is designed to be idempotent. |
public static final Timestamp parseField(byte[] bytes, int startPos, int length, char delimiter) {
final int limitedLen = nextStringLength(bytes, startPos, length, delimiter);
if (limitedLen > 0 &&
(Character.isWhitespace(bytes[startPos]) || Character.isWhitespace(bytes[startPos + limitedLen - 1]))) {
throw new NumberFormatException("There is leading or trailing whitespace in the numeric field.");
}
final String str = new String(bytes, startPos, limitedLen, ConfigConstants.DEFAULT_CHARSET);
return Timestamp.valueOf(str);
} | Static utility to parse a field of type Timestamp from a byte sequence that represents text
characters
(such as when read from a file stream).
@param bytes The bytes containing the text data that should be parsed.
@param startPos The offset to start the parsing.
@param length The length of the byte sequence (counting from the offset).
@param delimiter The delimiter that terminates the field.
@return The parsed value.
@throws IllegalArgumentException Thrown when the value cannot be parsed because the text
represents not a correct number. |
@Override
public void addInputChannel(RemoteInputChannel listener) throws IOException {
checkError();
inputChannels.putIfAbsent(listener.getInputChannelId(), listener);
} | ------------------------------------------------------------------------ |
@Override
public void channelActive(final ChannelHandlerContext ctx) throws Exception {
if (this.ctx == null) {
this.ctx = ctx;
}
super.channelActive(ctx);
} | ------------------------------------------------------------------------ |
@Override
public void userEventTriggered(ChannelHandlerContext ctx, Object msg) throws Exception {
if (msg instanceof RemoteInputChannel) {
boolean triggerWrite = inputChannelsWithCredit.isEmpty();
inputChannelsWithCredit.add((RemoteInputChannel) msg);
if (triggerWrite) {
writeAndFlushNextMessageIfPossible(ctx.channel());
}
} else {
ctx.fireUserEventTriggered(msg);
}
} | Triggered by notifying credit available in the client handler pipeline.
<p>Enqueues the input channel and will trigger write&flush unannounced credits
for this input channel if it is the first one in the queue. |
private void checkError() throws IOException {
final Throwable t = channelError.get();
if (t != null) {
if (t instanceof IOException) {
throw (IOException) t;
} else {
throw new IOException("There has been an error in the channel.", t);
}
}
} | Checks for an error and rethrows it if one was reported. |
private void writeAndFlushNextMessageIfPossible(Channel channel) {
if (channelError.get() != null || !channel.isWritable()) {
return;
}
while (true) {
RemoteInputChannel inputChannel = inputChannelsWithCredit.poll();
// The input channel may be null because of the write callbacks
// that are executed after each write.
if (inputChannel == null) {
return;
}
//It is no need to notify credit for the released channel.
if (!inputChannel.isReleased()) {
AddCredit msg = new AddCredit(
inputChannel.getPartitionId(),
inputChannel.getAndResetUnannouncedCredit(),
inputChannel.getInputChannelId());
// Write and flush and wait until this is done before
// trying to continue with the next input channel.
channel.writeAndFlush(msg).addListener(writeListener);
return;
}
}
} | Tries to write&flush unannounced credits for the next input channel in queue.
<p>This method may be called by the first input channel enqueuing, or the complete
future's callback in previous input channel, or the channel writability changed event. |
public List<KafkaTopicPartitionLeader> getPartitionLeadersForTopics(List<String> topics) {
List<KafkaTopicPartitionLeader> partitions = new LinkedList<>();
retryLoop: for (int retry = 0; retry < numRetries; retry++) {
brokersLoop: for (int arrIdx = 0; arrIdx < seedBrokerAddresses.length; arrIdx++) {
LOG.info("Trying to get topic metadata from broker {} in try {}/{}", seedBrokerAddresses[currentContactSeedBrokerIndex], retry, numRetries);
try {
// clear in case we have an incomplete list from previous tries
partitions.clear();
for (TopicMetadata item : consumer.send(new TopicMetadataRequest(topics)).topicsMetadata()) {
if (item.errorCode() != ErrorMapping.NoError()) {
// warn and try more brokers
LOG.warn("Error while getting metadata from broker {} to find partitions for {}. Error: {}.",
seedBrokerAddresses[currentContactSeedBrokerIndex], topics.toString(), ErrorMapping.exceptionFor(item.errorCode()).getMessage());
useNextAddressAsNewContactSeedBroker();
continue brokersLoop;
}
if (!topics.contains(item.topic())) {
LOG.warn("Received metadata from topic " + item.topic() + " even though it was not requested. Skipping ...");
useNextAddressAsNewContactSeedBroker();
continue brokersLoop;
}
for (PartitionMetadata part : item.partitionsMetadata()) {
Node leader = brokerToNode(part.leader());
KafkaTopicPartition ktp = new KafkaTopicPartition(item.topic(), part.partitionId());
KafkaTopicPartitionLeader pInfo = new KafkaTopicPartitionLeader(ktp, leader);
partitions.add(pInfo);
}
}
break retryLoop; // leave the loop through the brokers
}
catch (Exception e) {
//validates seed brokers in case of a ClosedChannelException
validateSeedBrokers(seedBrokerAddresses, e);
LOG.warn("Error communicating with broker {} to find partitions for {}. {} Message: {}",
seedBrokerAddresses[currentContactSeedBrokerIndex], topics, e.getClass().getName(), e.getMessage());
LOG.debug("Detailed trace", e);
// we sleep a bit. Retrying immediately doesn't make sense in cases where Kafka is reorganizing the leader metadata
try {
Thread.sleep(500);
} catch (InterruptedException e1) {
// sleep shorter.
}
useNextAddressAsNewContactSeedBroker();
}
} // brokers loop
} // retries loop
return partitions;
} | Send request to Kafka to get partitions for topics.
@param topics The name of the topics. |
private void useNextAddressAsNewContactSeedBroker() {
if (++currentContactSeedBrokerIndex == seedBrokerAddresses.length) {
currentContactSeedBrokerIndex = 0;
}
URL newContactUrl = NetUtils.getCorrectHostnamePort(seedBrokerAddresses[currentContactSeedBrokerIndex]);
this.consumer = new SimpleConsumer(newContactUrl.getHost(), newContactUrl.getPort(), soTimeout, bufferSize, dummyClientId);
} | Re-establish broker connection using the next available seed broker address. |
private static Node brokerToNode(Broker broker) {
return new Node(broker.id(), broker.host(), broker.port());
} | Turn a broker instance into a node instance.
@param broker broker instance
@return Node representing the given broker |
private static void validateSeedBrokers(String[] seedBrokers, Exception exception) {
if (!(exception instanceof ClosedChannelException)) {
return;
}
int unknownHosts = 0;
for (String broker : seedBrokers) {
URL brokerUrl = NetUtils.getCorrectHostnamePort(broker.trim());
try {
InetAddress.getByName(brokerUrl.getHost());
} catch (UnknownHostException e) {
unknownHosts++;
}
}
// throw meaningful exception if all the provided hosts are invalid
if (unknownHosts == seedBrokers.length) {
throw new IllegalArgumentException("All the servers provided in: '"
+ ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG + "' config are invalid. (unknown hosts)");
}
} | Validate that at least one seed broker is valid in case of a
ClosedChannelException.
@param seedBrokers
array containing the seed brokers e.g. ["host1:port1",
"host2:port2"]
@param exception
instance |
public CompletableFuture<AccessExecutionGraph> getExecutionGraph(JobID jobId, RestfulGateway restfulGateway) {
return getExecutionGraphInternal(jobId, restfulGateway).thenApply(Function.identity());
} | Gets the {@link AccessExecutionGraph} for the given {@link JobID} and caches it. The
{@link AccessExecutionGraph} will be requested again after the refresh interval has passed
or if the graph could not be retrieved from the given gateway.
@param jobId identifying the {@link ArchivedExecutionGraph} to get
@param restfulGateway to request the {@link ArchivedExecutionGraph} from
@return Future containing the requested {@link ArchivedExecutionGraph} |
public void cleanup() {
long currentTime = System.currentTimeMillis();
// remove entries which have exceeded their time to live
cachedExecutionGraphs.values().removeIf(
(ExecutionGraphEntry entry) -> currentTime >= entry.getTTL());
} | Perform the cleanup of out dated {@link ExecutionGraphEntry}. |
public static void maxNormalizedKey(MemorySegment target, int offset, int numBytes) {
//write max value.
for (int i = 0; i < numBytes; i++) {
target.put(offset + i, (byte) -1);
}
} | Max unsigned byte is -1. |
public static void putStringNormalizedKey(
BinaryString value, MemorySegment target, int offset, int numBytes) {
final int limit = offset + numBytes;
final int end = value.getSizeInBytes();
for (int i = 0; i < end && offset < limit; i++) {
target.put(offset++, value.getByte(i));
}
for (int i = offset; i < limit; i++) {
target.put(i, (byte) 0);
}
} | UTF-8 supports bytes comparison. |
public static void putDecimalNormalizedKey(
Decimal record, MemorySegment target, int offset, int len) {
assert record.getPrecision() <= Decimal.MAX_COMPACT_PRECISION;
putLongNormalizedKey(record.toUnscaledLong(), target, offset, len);
} | Just support the compact precision decimal. |
public static void putFloatNormalizedKey(float value, MemorySegment target, int offset,
int numBytes) {
int iValue = Float.floatToIntBits(value);
iValue ^= ((iValue >> (Integer.SIZE - 1)) | Integer.MIN_VALUE);
NormalizedKeyUtil.putUnsignedIntegerNormalizedKey(iValue, target, offset, numBytes);
} | See http://stereopsis.com/radix.html for more details. |
public static void putDoubleNormalizedKey(double value, MemorySegment target, int offset,
int numBytes) {
long lValue = Double.doubleToLongBits(value);
lValue ^= ((lValue >> (Long.SIZE - 1)) | Long.MIN_VALUE);
NormalizedKeyUtil.putUnsignedLongNormalizedKey(lValue, target, offset, numBytes);
} | See http://stereopsis.com/radix.html for more details. |
public static void mergeHadoopConf(Configuration hadoopConfig) {
// we have to load the global configuration here, because the HadoopInputFormatBase does not
// have access to a Flink configuration object
org.apache.flink.configuration.Configuration flinkConfiguration = GlobalConfiguration.loadConfiguration();
Configuration hadoopConf =
org.apache.flink.api.java.hadoop.mapred.utils.HadoopUtils.getHadoopConfiguration(flinkConfiguration);
for (Map.Entry<String, String> e : hadoopConf) {
if (hadoopConfig.get(e.getKey()) == null) {
hadoopConfig.set(e.getKey(), e.getValue());
}
}
} | Merge HadoopConfiguration into Configuration. This is necessary for the HDFS configuration. |
public JobExecutionResult execute(Plan program) throws Exception {
long startTime = System.currentTimeMillis();
initCache(program.getCachedFiles());
Collection<? extends GenericDataSinkBase<?>> sinks = program.getDataSinks();
for (Operator<?> sink : sinks) {
execute(sink);
}
long endTime = System.currentTimeMillis();
Map<String, OptionalFailure<Object>> accumulatorResults = AccumulatorHelper.toResultMap(accumulators);
return new JobExecutionResult(null, endTime - startTime, accumulatorResults);
} | -------------------------------------------------------------------------------------------- |
private <IN> void executeDataSink(GenericDataSinkBase<?> sink, int superStep) throws Exception {
Operator<?> inputOp = sink.getInput();
if (inputOp == null) {
throw new InvalidProgramException("The data sink " + sink.getName() + " has no input.");
}
@SuppressWarnings("unchecked")
List<IN> input = (List<IN>) execute(inputOp);
@SuppressWarnings("unchecked")
GenericDataSinkBase<IN> typedSink = (GenericDataSinkBase<IN>) sink;
// build the runtime context and compute broadcast variables, if necessary
TaskInfo taskInfo = new TaskInfo(typedSink.getName(), 1, 0, 1, 0);
RuntimeUDFContext ctx;
MetricGroup metrics = new UnregisteredMetricsGroup();
if (RichOutputFormat.class.isAssignableFrom(typedSink.getUserCodeWrapper().getUserCodeClass())) {
ctx = superStep == 0 ? new RuntimeUDFContext(taskInfo, userCodeClassLoader, executionConfig, cachedFiles, accumulators, metrics) :
new IterationRuntimeUDFContext(taskInfo, userCodeClassLoader, executionConfig, cachedFiles, accumulators, metrics);
} else {
ctx = null;
}
typedSink.executeOnCollections(input, ctx, executionConfig);
} | -------------------------------------------------------------------------------------------- |
public TableStats copy() {
TableStats copy = new TableStats(this.rowCount);
for (Map.Entry<String, ColumnStats> entry : this.colStats.entrySet()) {
copy.colStats.put(entry.getKey(), entry.getValue().copy());
}
return copy;
} | Create a deep copy of "this" instance.
@return a deep copy |
public void setKeyAndKeyGroup(@Nonnull K key, @Nonnegative int keyGroupId) {
try {
serializeKeyGroupAndKey(key, keyGroupId);
} catch (IOException shouldNeverHappen) {
throw new FlinkRuntimeException(shouldNeverHappen);
}
} | Sets the key and key-group as prefix. This will serialize them into the buffer and the will be used to create
composite keys with provided namespaces.
@param key the key.
@param keyGroupId the key-group id for the key. |
@Nonnull
public <N> byte[] buildCompositeKeyNamespace(@Nonnull N namespace, @Nonnull TypeSerializer<N> namespaceSerializer) {
try {
serializeNamespace(namespace, namespaceSerializer);
final byte[] result = keyOutView.getCopyOfBuffer();
resetToKey();
return result;
} catch (IOException shouldNeverHappen) {
throw new FlinkRuntimeException(shouldNeverHappen);
}
} | Returns a serialized composite key, from the key and key-group provided in a previous call to
{@link #setKeyAndKeyGroup(Object, int)} and the given namespace.
@param namespace the namespace to concatenate for the serialized composite key bytes.
@param namespaceSerializer the serializer to obtain the serialized form of the namespace.
@param <N> the type of the namespace.
@return the bytes for the serialized composite key of key-group, key, namespace. |
@Nonnull
public <N, UK> byte[] buildCompositeKeyNamesSpaceUserKey(
@Nonnull N namespace,
@Nonnull TypeSerializer<N> namespaceSerializer,
@Nonnull UK userKey,
@Nonnull TypeSerializer<UK> userKeySerializer) throws IOException {
serializeNamespace(namespace, namespaceSerializer);
userKeySerializer.serialize(userKey, keyOutView);
byte[] result = keyOutView.getCopyOfBuffer();
resetToKey();
return result;
} | Returns a serialized composite key, from the key and key-group provided in a previous call to
{@link #setKeyAndKeyGroup(Object, int)} and the given namespace, folloed by the given user-key.
@param namespace the namespace to concatenate for the serialized composite key bytes.
@param namespaceSerializer the serializer to obtain the serialized form of the namespace.
@param userKey the user-key to concatenate for the serialized composite key, after the namespace.
@param userKeySerializer the serializer to obtain the serialized form of the user-key.
@param <N> the type of the namespace.
@param <UK> the type of the user-key.
@return the bytes for the serialized composite key of key-group, key, namespace. |
@Override
public UV get(UK userKey) throws IOException, RocksDBException {
byte[] rawKeyBytes = serializeCurrentKeyWithGroupAndNamespacePlusUserKey(userKey, userKeySerializer);
byte[] rawValueBytes = backend.db.get(columnFamily, rawKeyBytes);
return (rawValueBytes == null ? null : deserializeUserValue(dataInputView, rawValueBytes, userValueSerializer));
} | ------------------------------------------------------------------------ |
private static <UK> UK deserializeUserKey(
DataInputDeserializer dataInputView,
int userKeyOffset,
byte[] rawKeyBytes,
TypeSerializer<UK> keySerializer) throws IOException {
dataInputView.setBuffer(rawKeyBytes, userKeyOffset, rawKeyBytes.length - userKeyOffset);
return keySerializer.deserialize(dataInputView);
} | ------------------------------------------------------------------------ |
public static <T> void writeSerializer(DataOutputView out, TypeSerializer<T> serializer) throws IOException {
new TypeSerializerSerializationUtil.TypeSerializerSerializationProxy<>(serializer).write(out);
} | Writes a {@link TypeSerializer} to the provided data output view.
<p>It is written with a format that can be later read again using
{@link #tryReadSerializer(DataInputView, ClassLoader, boolean)}.
@param out the data output view.
@param serializer the serializer to write.
@param <T> Data type of the serializer.
@throws IOException |
public static <T> TypeSerializer<T> tryReadSerializer(DataInputView in, ClassLoader userCodeClassLoader) throws IOException {
return tryReadSerializer(in, userCodeClassLoader, false);
} | Reads from a data input view a {@link TypeSerializer} that was previously
written using {@link #writeSerializer(DataOutputView, TypeSerializer)}.
<p>If deserialization fails for any reason (corrupted serializer bytes, serializer class
no longer in classpath, serializer class no longer valid, etc.), an {@link IOException} is thrown.
@param in the data input view.
@param userCodeClassLoader the user code class loader to use.
@param <T> Data type of the serializer.
@return the deserialized serializer. |
public static <T> TypeSerializer<T> tryReadSerializer(
DataInputView in,
ClassLoader userCodeClassLoader,
boolean useDummyPlaceholder) throws IOException {
final TypeSerializerSerializationUtil.TypeSerializerSerializationProxy<T> proxy =
new TypeSerializerSerializationUtil.TypeSerializerSerializationProxy<>(userCodeClassLoader);
try {
proxy.read(in);
return proxy.getTypeSerializer();
} catch (UnloadableTypeSerializerException e) {
if (useDummyPlaceholder) {
LOG.warn("Could not read a requested serializer. Replaced with a UnloadableDummyTypeSerializer.", e.getCause());
return new UnloadableDummyTypeSerializer<>(e.getSerializerBytes(), e.getCause());
} else {
throw e;
}
}
} | Reads from a data input view a {@link TypeSerializer} that was previously
written using {@link #writeSerializer(DataOutputView, TypeSerializer)}.
<p>If deserialization fails due to any exception, users can opt to use a dummy
{@link UnloadableDummyTypeSerializer} to hold the serializer bytes, otherwise an {@link IOException} is thrown.
@param in the data input view.
@param userCodeClassLoader the user code class loader to use.
@param useDummyPlaceholder whether or not to use a dummy {@link UnloadableDummyTypeSerializer} to hold the
serializer bytes in the case of a {@link ClassNotFoundException} or
{@link InvalidClassException}.
@param <T> Data type of the serializer.
@return the deserialized serializer. |
public static void writeSerializersAndConfigsWithResilience(
DataOutputView out,
List<Tuple2<TypeSerializer<?>, TypeSerializerSnapshot<?>>> serializersAndConfigs) throws IOException {
try (
ByteArrayOutputStreamWithPos bufferWithPos = new ByteArrayOutputStreamWithPos();
DataOutputViewStreamWrapper bufferWrapper = new DataOutputViewStreamWrapper(bufferWithPos)) {
out.writeInt(serializersAndConfigs.size());
for (Tuple2<TypeSerializer<?>, TypeSerializerSnapshot<?>> serAndConfSnapshot : serializersAndConfigs) {
out.writeInt(bufferWithPos.getPosition());
writeSerializer(bufferWrapper, serAndConfSnapshot.f0);
out.writeInt(bufferWithPos.getPosition());
TypeSerializerSnapshotSerializationUtil.writeSerializerSnapshot(
bufferWrapper, (TypeSerializerSnapshot) serAndConfSnapshot.f1, serAndConfSnapshot.f0);
}
out.writeInt(bufferWithPos.getPosition());
out.write(bufferWithPos.getBuf(), 0, bufferWithPos.getPosition());
}
} | Write a list of serializers and their corresponding config snapshots to the provided
data output view. This method writes in a fault tolerant way, so that when read again
using {@link #readSerializersAndConfigsWithResilience(DataInputView, ClassLoader)}, if
deserialization of the serializer fails, its configuration snapshot will remain intact.
<p>Specifically, all written serializers and their config snapshots are indexed by their
offset positions within the serialized bytes. The serialization format is as follows:
<ul>
<li>1. number of serializer and configuration snapshot pairs.</li>
<li>2. offsets of each serializer and configuration snapshot, in order.</li>
<li>3. total number of bytes for the serialized serializers and the config snapshots.</li>
<li>4. serialized serializers and the config snapshots.</li>
</ul>
@param out the data output view.
@param serializersAndConfigs serializer and configuration snapshot pairs
@throws IOException |
public static List<Tuple2<TypeSerializer<?>, TypeSerializerSnapshot<?>>> readSerializersAndConfigsWithResilience(
DataInputView in,
ClassLoader userCodeClassLoader) throws IOException {
int numSerializersAndConfigSnapshots = in.readInt();
int[] offsets = new int[numSerializersAndConfigSnapshots * 2];
for (int i = 0; i < numSerializersAndConfigSnapshots; i++) {
offsets[i * 2] = in.readInt();
offsets[i * 2 + 1] = in.readInt();
}
int totalBytes = in.readInt();
byte[] buffer = new byte[totalBytes];
in.readFully(buffer);
List<Tuple2<TypeSerializer<?>, TypeSerializerSnapshot<?>>> serializersAndConfigSnapshots =
new ArrayList<>(numSerializersAndConfigSnapshots);
TypeSerializer<?> serializer;
TypeSerializerSnapshot<?> configSnapshot;
try (
ByteArrayInputStreamWithPos bufferWithPos = new ByteArrayInputStreamWithPos(buffer);
DataInputViewStreamWrapper bufferWrapper = new DataInputViewStreamWrapper(bufferWithPos)) {
for (int i = 0; i < numSerializersAndConfigSnapshots; i++) {
bufferWithPos.setPosition(offsets[i * 2]);
serializer = tryReadSerializer(bufferWrapper, userCodeClassLoader, true);
bufferWithPos.setPosition(offsets[i * 2 + 1]);
configSnapshot = TypeSerializerSnapshotSerializationUtil.readSerializerSnapshot(
bufferWrapper, userCodeClassLoader, serializer);
if (serializer instanceof LegacySerializerSnapshotTransformer) {
configSnapshot = transformLegacySnapshot(serializer, configSnapshot);
}
serializersAndConfigSnapshots.add(new Tuple2<>(serializer, configSnapshot));
}
}
return serializersAndConfigSnapshots;
} | Reads from a data input view a list of serializers and their corresponding config snapshots
written using {@link #writeSerializersAndConfigsWithResilience(DataOutputView, List)}.
<p>If deserialization for serializers fails due to any exception, users can opt to use a dummy
{@link UnloadableDummyTypeSerializer} to hold the serializer bytes
@param in the data input view.
@param userCodeClassLoader the user code class loader to use.
@return the deserialized serializer and config snapshot pairs.
@throws IOException |
@Override
public PojoSerializerSnapshot<T> snapshotConfiguration() {
return buildSnapshot(
clazz,
registeredClasses,
registeredSerializers,
fields,
fieldSerializers,
subclassSerializerCache);
} | -------------------------------------------------------------------------------------------- |
private static LinkedHashSet<Class<?>> getRegisteredSubclassesFromExecutionConfig(
Class<?> basePojoClass,
ExecutionConfig executionConfig) {
LinkedHashSet<Class<?>> subclassesInRegistrationOrder = new LinkedHashSet<>(executionConfig.getRegisteredPojoTypes().size());
for (Class<?> registeredClass : executionConfig.getRegisteredPojoTypes()) {
if (registeredClass.equals(basePojoClass)) {
continue;
}
if (!basePojoClass.isAssignableFrom(registeredClass)) {
continue;
}
subclassesInRegistrationOrder.add(registeredClass);
}
return subclassesInRegistrationOrder;
} | Extracts the subclasses of the base POJO class registered in the execution config. |
private static LinkedHashMap<Class<?>, Integer> createRegisteredSubclassTags(LinkedHashSet<Class<?>> registeredSubclasses) {
final LinkedHashMap<Class<?>, Integer> classToTag = new LinkedHashMap<>();
int id = 0;
for (Class<?> registeredClass : registeredSubclasses) {
classToTag.put(registeredClass, id);
id ++;
}
return classToTag;
} | Builds map of registered subclasses to their class tags.
Class tags will be integers starting from 0, assigned incrementally with the order of provided subclasses. |
private static TypeSerializer<?>[] createRegisteredSubclassSerializers(
LinkedHashSet<Class<?>> registeredSubclasses,
ExecutionConfig executionConfig) {
final TypeSerializer<?>[] subclassSerializers = new TypeSerializer[registeredSubclasses.size()];
int i = 0;
for (Class<?> registeredClass : registeredSubclasses) {
subclassSerializers[i] = TypeExtractor.createTypeInfo(registeredClass).createSerializer(executionConfig);
i++;
}
return subclassSerializers;
} | Creates an array of serializers for provided list of registered subclasses.
Order of returned serializers will correspond to order of provided subclasses. |
TypeSerializer<?> getSubclassSerializer(Class<?> subclass) {
TypeSerializer<?> result = subclassSerializerCache.get(subclass);
if (result == null) {
result = createSubclassSerializer(subclass);
subclassSerializerCache.put(subclass, result);
}
return result;
} | Fetches cached serializer for a non-registered subclass;
also creates the serializer if it doesn't exist yet.
This method is also exposed to package-private access
for testing purposes. |
private int findField(String fieldName) {
int foundIndex = 0;
for (Field field : fields) {
if (field != null && fieldName.equals(field.getName())) {
return foundIndex;
}
foundIndex++;
}
return -1;
} | Finds and returns the order (0-based) of a POJO field.
Returns -1 if the field does not exist for this POJO. |
private static <T> PojoSerializerSnapshot<T> buildSnapshot(
Class<T> pojoType,
LinkedHashMap<Class<?>, Integer> registeredSubclassesToTags,
TypeSerializer<?>[] registeredSubclassSerializers,
Field[] fields,
TypeSerializer<?>[] fieldSerializers,
Map<Class<?>, TypeSerializer<?>> nonRegisteredSubclassSerializerCache) {
final LinkedHashMap<Class<?>, TypeSerializer<?>> subclassRegistry = new LinkedHashMap<>(registeredSubclassesToTags.size());
for (Map.Entry<Class<?>, Integer> entry : registeredSubclassesToTags.entrySet()) {
subclassRegistry.put(entry.getKey(), registeredSubclassSerializers[entry.getValue()]);
}
return new PojoSerializerSnapshot<>(
pojoType,
fields,
fieldSerializers,
subclassRegistry,
nonRegisteredSubclassSerializerCache);
} | Build and return a snapshot of the serializer's parameters and currently cached serializers. |
public static <T> Class<T> compile(ClassLoader cl, String name, String code) {
Tuple2<ClassLoader, String> cacheKey = Tuple2.of(cl, name);
Class<?> clazz = COMPILED_CACHE.getIfPresent(cacheKey);
if (clazz == null) {
clazz = doCompile(cl, name, code);
COMPILED_CACHE.put(cacheKey, clazz);
}
//noinspection unchecked
return (Class<T>) clazz;
} | Compiles a generated code to a Class.
@param cl the ClassLoader used to load the class
@param name the class name
@param code the generated code
@param <T> the class type
@return the compiled class |
private static String addLineNumber(String code) {
String[] lines = code.split("\n");
StringBuilder builder = new StringBuilder();
for (int i = 0; i < lines.length; i++) {
builder.append("/* ").append(i + 1).append(" */").append(lines[i]).append("\n");
}
return builder.toString();
} | To output more information when an error occurs.
Generally, when cook fails, it shows which line is wrong. This line number starts at 1. |
ChannelFuture connect(final InetSocketAddress serverSocketAddress) {
checkState(bootstrap != null, "Client has not been initialized yet.");
// --------------------------------------------------------------------
// Child channel pipeline for accepted connections
// --------------------------------------------------------------------
bootstrap.handler(new ChannelInitializer<SocketChannel>() {
@Override
public void initChannel(SocketChannel channel) throws Exception {
// SSL handler should be added first in the pipeline
if (clientSSLFactory != null) {
SslHandler sslHandler = clientSSLFactory.createNettySSLHandler(
serverSocketAddress.getAddress().getCanonicalHostName(),
serverSocketAddress.getPort());
channel.pipeline().addLast("ssl", sslHandler);
}
channel.pipeline().addLast(protocol.getClientChannelHandlers());
}
});
try {
return bootstrap.connect(serverSocketAddress);
}
catch (ChannelException e) {
if ((e.getCause() instanceof java.net.SocketException &&
e.getCause().getMessage().equals("Too many open files")) ||
(e.getCause() instanceof ChannelException &&
e.getCause().getCause() instanceof java.net.SocketException &&
e.getCause().getCause().getMessage().equals("Too many open files")))
{
throw new ChannelException(
"The operating system does not offer enough file handles to open the network connection. " +
"Please increase the number of available file handles.", e.getCause());
}
else {
throw e;
}
}
} | ------------------------------------------------------------------------ |
public final void registerCloseable(C closeable) throws IOException {
if (null == closeable) {
return;
}
synchronized (getSynchronizationLock()) {
if (!closed) {
doRegister(closeable, closeableToRef);
return;
}
}
IOUtils.closeQuietly(closeable);
throw new IOException("Cannot register Closeable, registry is already closed. Closing argument.");
} | Registers a {@link Closeable} with the registry. In case the registry is already closed, this method throws an
{@link IllegalStateException} and closes the passed {@link Closeable}.
@param closeable Closeable tor register
@throws IOException exception when the registry was closed before |
public final boolean unregisterCloseable(C closeable) {
if (null == closeable) {
return false;
}
synchronized (getSynchronizationLock()) {
return doUnRegister(closeable, closeableToRef);
}
} | Removes a {@link Closeable} from the registry.
@param closeable instance to remove from the registry.
@return true if the closeable was previously registered and became unregistered through this call. |
protected final void addCloseableInternal(Closeable closeable, T metaData) {
synchronized (getSynchronizationLock()) {
closeableToRef.put(closeable, metaData);
}
} | Adds a mapping to the registry map, respecting locking. |
private void bufferRows1() throws IOException {
BinaryRow copy = key1.copy();
buffer1.reset();
do {
buffer1.add(row1);
} while (nextRow1() && keyComparator.compare(key1, copy) == 0);
buffer1.complete();
} | Buffer rows from iterator1 with same key. |
private void bufferRows2() throws IOException {
BinaryRow copy = key2.copy();
buffer2.reset();
do {
buffer2.add(row2);
} while (nextRow2() && keyComparator.compare(key2, copy) == 0);
buffer2.complete();
} | Buffer rows from iterator2 with same key. |
public static <T> TypeSerializerSchemaCompatibility<T> compatibleWithReconfiguredSerializer(TypeSerializer<T> reconfiguredSerializer) {
return new TypeSerializerSchemaCompatibility<>(
Type.COMPATIBLE_WITH_RECONFIGURED_SERIALIZER,
Preconditions.checkNotNull(reconfiguredSerializer));
} | Returns a result that indicates a reconfigured version of the new serializer is compatible, and should be
used instead of the original new serializer.
@param reconfiguredSerializer the reconfigured version of the new serializer.
@return a result that indicates a reconfigured version of the new serializer is compatible, and should be
used instead of the original new serializer. |
public TypeSerializer<T> getReconfiguredSerializer() {
Preconditions.checkState(
isCompatibleWithReconfiguredSerializer(),
"It is only possible to get a reconfigured serializer if the compatibility type is %s, but the type is %s",
Type.COMPATIBLE_WITH_RECONFIGURED_SERIALIZER, resultType);
return reconfiguredNewSerializer;
} | Gets the reconfigured serializer. This throws an exception if
{@link #isCompatibleWithReconfiguredSerializer()} is {@code false}. |
@SuppressWarnings("unchecked")
@Override
public <K, V> BroadcastState<K, V> getBroadcastState(final MapStateDescriptor<K, V> stateDescriptor) throws StateMigrationException {
Preconditions.checkNotNull(stateDescriptor);
String name = Preconditions.checkNotNull(stateDescriptor.getName());
BackendWritableBroadcastState<K, V> previous =
(BackendWritableBroadcastState<K, V>) accessedBroadcastStatesByName.get(name);
if (previous != null) {
checkStateNameAndMode(
previous.getStateMetaInfo().getName(),
name,
previous.getStateMetaInfo().getAssignmentMode(),
OperatorStateHandle.Mode.BROADCAST);
return previous;
}
stateDescriptor.initializeSerializerUnlessSet(getExecutionConfig());
TypeSerializer<K> broadcastStateKeySerializer = Preconditions.checkNotNull(stateDescriptor.getKeySerializer());
TypeSerializer<V> broadcastStateValueSerializer = Preconditions.checkNotNull(stateDescriptor.getValueSerializer());
BackendWritableBroadcastState<K, V> broadcastState =
(BackendWritableBroadcastState<K, V>) registeredBroadcastStates.get(name);
if (broadcastState == null) {
broadcastState = new HeapBroadcastState<>(
new RegisteredBroadcastStateBackendMetaInfo<>(
name,
OperatorStateHandle.Mode.BROADCAST,
broadcastStateKeySerializer,
broadcastStateValueSerializer));
registeredBroadcastStates.put(name, broadcastState);
} else {
// has restored state; check compatibility of new state access
checkStateNameAndMode(
broadcastState.getStateMetaInfo().getName(),
name,
broadcastState.getStateMetaInfo().getAssignmentMode(),
OperatorStateHandle.Mode.BROADCAST);
RegisteredBroadcastStateBackendMetaInfo<K, V> restoredBroadcastStateMetaInfo = broadcastState.getStateMetaInfo();
// check whether new serializers are incompatible
TypeSerializerSchemaCompatibility<K> keyCompatibility =
restoredBroadcastStateMetaInfo.updateKeySerializer(broadcastStateKeySerializer);
if (keyCompatibility.isIncompatible()) {
throw new StateMigrationException("The new key typeSerializer for broadcast state must not be incompatible.");
}
TypeSerializerSchemaCompatibility<V> valueCompatibility =
restoredBroadcastStateMetaInfo.updateValueSerializer(broadcastStateValueSerializer);
if (valueCompatibility.isIncompatible()) {
throw new StateMigrationException("The new value typeSerializer for broadcast state must not be incompatible.");
}
broadcastState.setStateMetaInfo(restoredBroadcastStateMetaInfo);
}
accessedBroadcastStatesByName.put(name, broadcastState);
return broadcastState;
} | ------------------------------------------------------------------------------------------- |
@Nonnull
@Override
public RunnableFuture<SnapshotResult<OperatorStateHandle>> snapshot(
long checkpointId,
long timestamp,
@Nonnull CheckpointStreamFactory streamFactory,
@Nonnull CheckpointOptions checkpointOptions) throws Exception {
long syncStartTime = System.currentTimeMillis();
RunnableFuture<SnapshotResult<OperatorStateHandle>> snapshotRunner =
snapshotStrategy.snapshot(checkpointId, timestamp, streamFactory, checkpointOptions);
snapshotStrategy.logSyncCompleted(streamFactory, syncStartTime);
return snapshotRunner;
} | ------------------------------------------------------------------------------------------- |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.