code
stringlengths 67
466k
| docstring
stringlengths 1
13.2k
|
---|---|
public List<T> getVariable() throws InitializationTypeConflictException {
if (!materialized) {
throw new IllegalStateException("The Broadcast Variable has not yet been materialized.");
}
if (disposed) {
throw new IllegalStateException("The Broadcast Variable has been disposed");
}
synchronized (references) {
if (transformed != null) {
if (transformed instanceof List) {
@SuppressWarnings("unchecked")
List<T> casted = (List<T>) transformed;
return casted;
} else {
throw new InitializationTypeConflictException(transformed.getClass());
}
}
else {
return data;
}
}
} | -------------------------------------------------------------------------------------------- |
public static <T> TypeSerializer<T> wrapIfNullIsNotSupported(
@Nonnull TypeSerializer<T> originalSerializer, boolean padNullValueIfFixedLen) {
return checkIfNullSupported(originalSerializer) ?
originalSerializer : wrap(originalSerializer, padNullValueIfFixedLen);
} | This method tries to serialize {@code null} value with the {@code originalSerializer}
and wraps it in case of {@link NullPointerException}, otherwise it returns the {@code originalSerializer}.
@param originalSerializer serializer to wrap and add {@code null} support
@param padNullValueIfFixedLen pad null value to preserve the fixed length of original serializer
@return serializer which supports {@code null} values |
public static <T> boolean checkIfNullSupported(@Nonnull TypeSerializer<T> serializer) {
int length = serializer.getLength() > 0 ? serializer.getLength() : 1;
DataOutputSerializer dos = new DataOutputSerializer(length);
try {
serializer.serialize(null, dos);
}
catch (IOException | RuntimeException e) {
return false;
}
checkArgument(
serializer.getLength() < 0 || serializer.getLength() == dos.getCopyOfBuffer().length,
"The serialized form of the null value should have the same length " +
"as any other if the length is fixed in the serializer");
DataInputDeserializer dis = new DataInputDeserializer(dos.getSharedBuffer());
try {
checkArgument(serializer.deserialize(dis) == null);
}
catch (IOException e) {
throw new RuntimeException(
String.format("Unexpected failure to deserialize just serialized null value with %s",
serializer.getClass().getName()), e);
}
checkArgument(
serializer.copy(null) == null,
"Serializer %s has to be able properly copy null value if it can serialize it",
serializer.getClass().getName());
return true;
} | This method checks if {@code serializer} supports {@code null} value.
@param serializer serializer to check |
public static <T> TypeSerializer<T> wrap(
@Nonnull TypeSerializer<T> originalSerializer, boolean padNullValueIfFixedLen) {
return originalSerializer instanceof NullableSerializer ?
originalSerializer : new NullableSerializer<>(originalSerializer, padNullValueIfFixedLen);
} | This method wraps the {@code originalSerializer} with the {@code NullableSerializer} if not already wrapped.
@param originalSerializer serializer to wrap and add {@code null} support
@param padNullValueIfFixedLen pad null value to preserve the fixed length of original serializer
@return wrapped serializer which supports {@code null} values |
PartitionRequestClient createPartitionRequestClient(ConnectionID connectionId) throws IOException, InterruptedException {
Object entry;
PartitionRequestClient client = null;
while (client == null) {
entry = clients.get(connectionId);
if (entry != null) {
// Existing channel or connecting channel
if (entry instanceof PartitionRequestClient) {
client = (PartitionRequestClient) entry;
}
else {
ConnectingChannel future = (ConnectingChannel) entry;
client = future.waitForChannel();
clients.replace(connectionId, future, client);
}
}
else {
// No channel yet. Create one, but watch out for a race.
// We create a "connecting future" and atomically add it to the map.
// Only the thread that really added it establishes the channel.
// The others need to wait on that original establisher's future.
ConnectingChannel connectingChannel = new ConnectingChannel(connectionId, this);
Object old = clients.putIfAbsent(connectionId, connectingChannel);
if (old == null) {
nettyClient.connect(connectionId.getAddress()).addListener(connectingChannel);
client = connectingChannel.waitForChannel();
clients.replace(connectionId, connectingChannel, client);
}
else if (old instanceof ConnectingChannel) {
client = ((ConnectingChannel) old).waitForChannel();
clients.replace(connectionId, old, client);
}
else {
client = (PartitionRequestClient) old;
}
}
// Make sure to increment the reference count before handing a client
// out to ensure correct bookkeeping for channel closing.
if (!client.incrementReferenceCounter()) {
destroyPartitionRequestClient(connectionId, client);
client = null;
}
}
return client;
} | Atomically establishes a TCP connection to the given remote address and
creates a {@link PartitionRequestClient} instance for this connection. |
public Statistics tableStats(TableStats tableStats) {
rowCount(tableStats.getRowCount());
columnStats.clear();
tableStats.getColumnStats().forEach(this::columnStats);
return this;
} | Sets the statistics from a {@link TableStats} instance.
<p>This method overwrites all existing statistics.
@param tableStats the table statistics |
public Statistics columnStats(String columnName, ColumnStats columnStats) {
Map<String, String> map = normalizeColumnStats(columnStats);
this.columnStats.put(columnName, map);
return this;
} | Sets statistics for a column. Overwrites all existing statistics for this column.
@param columnName the column name
@param columnStats expected statistics for the column |
public Statistics columnDistinctCount(String columnName, Long ndv) {
this.columnStats
.computeIfAbsent(columnName, column -> new HashMap<>())
.put(DISTINCT_COUNT, String.valueOf(ndv));
return this;
} | Sets the number of distinct values statistic for the given column. |
public Statistics columnNullCount(String columnName, Long nullCount) {
this.columnStats
.computeIfAbsent(columnName, column -> new HashMap<>())
.put(NULL_COUNT, String.valueOf(nullCount));
return this;
} | Sets the number of null values statistic for the given column. |
public Statistics columnAvgLength(String columnName, Double avgLen) {
this.columnStats
.computeIfAbsent(columnName, column -> new HashMap<>())
.put(AVG_LENGTH, String.valueOf(avgLen));
return this;
} | Sets the average length statistic for the given column. |
public Statistics columnMaxLength(String columnName, Integer maxLen) {
this.columnStats
.computeIfAbsent(columnName, column -> new HashMap<>())
.put(MAX_LENGTH, String.valueOf(maxLen));
return this;
} | Sets the maximum length statistic for the given column. |
public Statistics columnMaxValue(String columnName, Number max) {
this.columnStats
.computeIfAbsent(columnName, column -> new HashMap<>())
.put(MAX_VALUE, String.valueOf(max));
return this;
} | Sets the maximum value statistic for the given column. |
public Statistics columnMinValue(String columnName, Number min) {
this.columnStats
.computeIfAbsent(columnName, column -> new HashMap<>())
.put(MIN_VALUE, String.valueOf(min));
return this;
} | Sets the minimum value statistic for the given column. |
@Override
public final Map<String, String> toProperties() {
final DescriptorProperties properties = new DescriptorProperties();
properties.putProperties(internalProperties);
properties.putInt(STATISTICS_PROPERTY_VERSION, 1);
List<Map<String, String>> namedStats = new ArrayList<>();
for (Map.Entry<String, Map<String, String>> entry : columnStats.entrySet()) {
Map<String, String> columnStat = entry.getValue();
columnStat.put(NAME, entry.getKey());
namedStats.add(columnStat);
}
properties.putIndexedVariableProperties(STATISTICS_COLUMNS, namedStats);
return properties.asMap();
} | Converts this descriptor into a set of properties. |
@Override
public int size() {
if (allElementsInCache) {
return orderedCache.size();
} else {
int count = 0;
try (final RocksBytesIterator iterator = orderedBytesIterator()) {
while (iterator.hasNext()) {
iterator.next();
++count;
}
}
return count;
}
} | This implementation comes at a relatively high cost per invocation. It should not be called repeatedly when it is
clear that the value did not change. Currently this is only truly used to realize certain higher-level tests. |
@Override
public void onTaskFailure(Execution taskExecution, Throwable cause) {
final ExecutionVertex ev = taskExecution.getVertex();
final FailoverRegion failoverRegion = vertexToRegion.get(ev);
if (failoverRegion == null) {
executionGraph.failGlobal(new FlinkException(
"Can not find a failover region for the execution " + ev.getTaskNameWithSubtaskIndex(), cause));
}
else {
LOG.info("Recovering task failure for {} #{} ({}) via restart of failover region",
taskExecution.getVertex().getTaskNameWithSubtaskIndex(),
taskExecution.getAttemptNumber(),
taskExecution.getAttemptId());
failoverRegion.onExecutionFail(taskExecution, cause);
}
} | ------------------------------------------------------------------------ |
private void generateAllFailoverRegion(List<ExecutionJobVertex> newJobVerticesTopological) {
final IdentityHashMap<ExecutionVertex, ArrayList<ExecutionVertex>> vertexToRegion = new IdentityHashMap<>();
// we use the map (list -> null) to imitate an IdentityHashSet (which does not exist)
final IdentityHashMap<ArrayList<ExecutionVertex>, Object> distinctRegions = new IdentityHashMap<>();
// this loop will worst case iterate over every edge in the graph (complexity is O(#edges))
for (ExecutionJobVertex ejv : newJobVerticesTopological) {
// currently, jobs with a co-location constraint fail as one
// we want to improve that in the future (or get rid of co-location constraints)
if (ejv.getCoLocationGroup() != null) {
makeAllOneRegion(newJobVerticesTopological);
return;
}
// see if this JobVertex one has pipelined inputs at all
final List<IntermediateResult> inputs = ejv.getInputs();
final int numInputs = inputs.size();
boolean hasPipelinedInputs = false;
for (IntermediateResult input : inputs) {
if (input.getResultType().isPipelined()) {
hasPipelinedInputs = true;
break;
}
}
if (hasPipelinedInputs) {
// build upon the predecessors
for (ExecutionVertex ev : ejv.getTaskVertices()) {
// remember the region in which we are
ArrayList<ExecutionVertex> thisRegion = null;
for (int inputNum = 0; inputNum < numInputs; inputNum++) {
if (inputs.get(inputNum).getResultType().isPipelined()) {
for (ExecutionEdge edge : ev.getInputEdges(inputNum)) {
final ExecutionVertex predecessor = edge.getSource().getProducer();
final ArrayList<ExecutionVertex> predecessorRegion = vertexToRegion.get(predecessor);
if (thisRegion != null) {
// we already have a region. see if it is the same as the predecessor's region
if (predecessorRegion != thisRegion) {
// we need to merge our region and the predecessor's region
predecessorRegion.addAll(thisRegion);
distinctRegions.remove(thisRegion);
thisRegion = predecessorRegion;
// remap the vertices from that merged region
for (ExecutionVertex inPredRegion: predecessorRegion) {
vertexToRegion.put(inPredRegion, thisRegion);
}
}
}
else if (predecessor != null) {
// first case, make this our region
thisRegion = predecessorRegion;
thisRegion.add(ev);
vertexToRegion.put(ev, thisRegion);
}
else {
// throw an uncaught exception here
// this is a bug and not a recoverable situation
throw new FlinkRuntimeException(
"bug in the logic to construct the pipelined failover regions");
}
}
}
}
}
}
else {
// no pipelined inputs, start a new region
for (ExecutionVertex ev : ejv.getTaskVertices()) {
ArrayList<ExecutionVertex> region = new ArrayList<>(1);
region.add(ev);
vertexToRegion.put(ev, region);
distinctRegions.put(region, null);
}
}
}
// now that we have all regions, create the failover region objects
LOG.info("Creating {} individual failover regions for job {} ({})",
distinctRegions.size(), executionGraph.getJobName(), executionGraph.getJobID());
for (List<ExecutionVertex> region : distinctRegions.keySet()) {
final FailoverRegion failoverRegion = createFailoverRegion(executionGraph, region);
for (ExecutionVertex ev : region) {
this.vertexToRegion.put(ev, failoverRegion);
}
}
} | Generate all the FailoverRegion from the new added job vertexes |
public OverWindow as(Expression alias) {
return new OverWindow(
alias,
partitionBy,
orderBy,
new CallExpression(BuiltInFunctionDefinitions.UNBOUNDED_RANGE, Collections.emptyList()),
Optional.empty());
} | Assigns an alias for this window that the following {@code select()} clause can refer to.
@param alias alias for this over window
@return the fully defined over window |
@Override
public void reset() {
this.cursor = fixedSize;
for (int i = 0; i < nullBitsSizeInBytes; i += 8) {
segment.putLong(i, 0L);
}
this.segment.putInt(0, numElements);
} | First, reset. |
@SuppressWarnings("unchecked")
public PythonSingleOutputStreamOperator reduce(ReduceFunction<PyObject> fun) throws IOException {
return new PythonSingleOutputStreamOperator(stream.reduce(new PythonReduceFunction(fun)));
} | A thin wrapper layer over {@link WindowedStream#reduce(org.apache.flink.api.common.functions.ReduceFunction)}.
@param fun The reduce function.
@return The data stream that is the result of applying the reduce function to the window. |
public PythonSingleOutputStreamOperator apply(
WindowFunction<PyObject, Object, Object, W> fun) throws IOException {
return new PythonSingleOutputStreamOperator(stream.apply(new PythonApplyFunction<>(fun)));
} | A thin wrapper layer over {@link WindowedStream#apply(WindowFunction)}.
@param fun The window function.
@return The data stream that is the result of applying the window function to the window. |
public static InetAddress findConnectingAddress(InetSocketAddress targetAddress,
long maxWaitMillis, long startLoggingAfter) throws IOException {
if (targetAddress == null) {
throw new NullPointerException("targetAddress must not be null");
}
if (maxWaitMillis <= 0) {
throw new IllegalArgumentException("Max wait time must be positive");
}
final long startTimeNanos = System.nanoTime();
long currentSleepTime = MIN_SLEEP_TIME;
long elapsedTimeMillis = 0;
final List<AddressDetectionState> strategies = Collections.unmodifiableList(
Arrays.asList(
AddressDetectionState.LOCAL_HOST,
AddressDetectionState.ADDRESS,
AddressDetectionState.FAST_CONNECT,
AddressDetectionState.SLOW_CONNECT));
// loop while there is time left
while (elapsedTimeMillis < maxWaitMillis) {
boolean logging = elapsedTimeMillis >= startLoggingAfter;
if (logging) {
LOG.info("Trying to connect to " + targetAddress);
}
// Try each strategy in order
for (AddressDetectionState strategy : strategies) {
InetAddress address = findAddressUsingStrategy(strategy, targetAddress, logging);
if (address != null) {
return address;
}
}
// we have made a pass with all strategies over all interfaces
// sleep for a while before we make the next pass
elapsedTimeMillis = (System.nanoTime() - startTimeNanos) / 1_000_000;
long toWait = Math.min(maxWaitMillis - elapsedTimeMillis, currentSleepTime);
if (toWait > 0) {
if (logging) {
LOG.info("Could not connect. Waiting for {} msecs before next attempt", toWait);
} else {
LOG.debug("Could not connect. Waiting for {} msecs before next attempt", toWait);
}
try {
Thread.sleep(toWait);
}
catch (InterruptedException e) {
throw new IOException("Connection attempts have been interrupted.");
}
}
// increase the exponential backoff timer
currentSleepTime = Math.min(2 * currentSleepTime, MAX_SLEEP_TIME);
}
// our attempts timed out. use the heuristic fallback
LOG.warn("Could not connect to {}. Selecting a local address using heuristics.", targetAddress);
InetAddress heuristic = findAddressUsingStrategy(AddressDetectionState.HEURISTIC, targetAddress, true);
if (heuristic != null) {
return heuristic;
}
else {
LOG.warn("Could not find any IPv4 address that is not loopback or link-local. Using localhost address.");
return InetAddress.getLocalHost();
}
} | Finds the local network address from which this machine can connect to the target
address. This method tries to establish a proper network connection to the
given target, so it only succeeds if the target socket address actually accepts
connections. The method tries various strategies multiple times and uses an exponential
backoff timer between tries.
<p>If no connection attempt was successful after the given maximum time, the method
will choose some address based on heuristics (excluding link-local and loopback addresses.)
<p>This method will initially not log on info level (to not flood the log while the
backoff time is still very low). It will start logging after a certain time
has passes.
@param targetAddress The address that the method tries to connect to.
@param maxWaitMillis The maximum time that this method tries to connect, before falling
back to the heuristics.
@param startLoggingAfter The time after which the method will log on INFO level. |
private static InetAddress tryLocalHostBeforeReturning(
InetAddress preliminaryResult, SocketAddress targetAddress, boolean logging) throws IOException {
InetAddress localhostName = InetAddress.getLocalHost();
if (preliminaryResult.equals(localhostName)) {
// preliminary result is equal to the local host name
return preliminaryResult;
}
else if (tryToConnect(localhostName, targetAddress, AddressDetectionState.SLOW_CONNECT.getTimeout(), logging)) {
// success, we were able to use local host to connect
LOG.debug("Preferring {} (InetAddress.getLocalHost()) for local bind point over previous candidate {}",
localhostName, preliminaryResult);
return localhostName;
}
else {
// we have to make the preliminary result the final result
return preliminaryResult;
}
} | This utility method tries to connect to the JobManager using the InetAddress returned by
InetAddress.getLocalHost(). The purpose of the utility is to have a final try connecting to
the target address using the LocalHost before using the address returned.
We do a second try because the JM might have been unavailable during the first check.
@param preliminaryResult The address detected by the heuristic
@return either the preliminaryResult or the address returned by InetAddress.getLocalHost() (if
we are able to connect to targetAddress from there) |
private static InetAddress findAddressUsingStrategy(AddressDetectionState strategy,
InetSocketAddress targetAddress,
boolean logging) throws IOException {
// try LOCAL_HOST strategy independent of the network interfaces
if (strategy == AddressDetectionState.LOCAL_HOST) {
InetAddress localhostName;
try {
localhostName = InetAddress.getLocalHost();
} catch (UnknownHostException uhe) {
LOG.warn("Could not resolve local hostname to an IP address: {}", uhe.getMessage());
return null;
}
if (tryToConnect(localhostName, targetAddress, strategy.getTimeout(), logging)) {
LOG.debug("Using InetAddress.getLocalHost() immediately for the connecting address");
// Here, we are not calling tryLocalHostBeforeReturning() because it is the LOCAL_HOST strategy
return localhostName;
} else {
return null;
}
}
final InetAddress address = targetAddress.getAddress();
if (address == null) {
return null;
}
final byte[] targetAddressBytes = address.getAddress();
// for each network interface
Enumeration<NetworkInterface> e = NetworkInterface.getNetworkInterfaces();
while (e.hasMoreElements()) {
NetworkInterface netInterface = e.nextElement();
// for each address of the network interface
Enumeration<InetAddress> ee = netInterface.getInetAddresses();
while (ee.hasMoreElements()) {
InetAddress interfaceAddress = ee.nextElement();
switch (strategy) {
case ADDRESS:
if (hasCommonPrefix(targetAddressBytes, interfaceAddress.getAddress())) {
LOG.debug("Target address {} and local address {} share prefix - trying to connect.",
targetAddress, interfaceAddress);
if (tryToConnect(interfaceAddress, targetAddress, strategy.getTimeout(), logging)) {
return tryLocalHostBeforeReturning(interfaceAddress, targetAddress, logging);
}
}
break;
case FAST_CONNECT:
case SLOW_CONNECT:
LOG.debug("Trying to connect to {} from local address {} with timeout {}",
targetAddress, interfaceAddress, strategy.getTimeout());
if (tryToConnect(interfaceAddress, targetAddress, strategy.getTimeout(), logging)) {
return tryLocalHostBeforeReturning(interfaceAddress, targetAddress, logging);
}
break;
case HEURISTIC:
if (LOG.isDebugEnabled()) {
LOG.debug("Choosing InetAddress.getLocalHost() address as a heuristic.");
}
return InetAddress.getLocalHost();
default:
throw new RuntimeException("Unsupported strategy: " + strategy);
}
} // end for each address of the interface
} // end for each interface
return null;
} | Try to find a local address which allows as to connect to the targetAddress using the given
strategy.
@param strategy Depending on the strategy, the method will enumerate all interfaces, trying to connect
to the target address
@param targetAddress The address we try to connect to
@param logging Boolean indicating the logging verbosity
@return null if we could not find an address using this strategy, otherwise, the local address.
@throws IOException |
@Override
public void add(BufferOrEvent boe) throws IOException {
try {
ByteBuffer contents;
if (boe.isBuffer()) {
Buffer buf = boe.getBuffer();
contents = buf.getNioBufferReadable();
}
else {
contents = EventSerializer.toSerializedEvent(boe.getEvent());
}
headBuffer.clear();
headBuffer.putInt(boe.getChannelIndex());
headBuffer.putInt(contents.remaining());
headBuffer.put((byte) (boe.isBuffer() ? 0 : 1));
headBuffer.flip();
bytesWritten += (headBuffer.remaining() + contents.remaining());
FileUtils.writeCompletely(currentChannel, headBuffer);
FileUtils.writeCompletely(currentChannel, contents);
}
finally {
if (boe.isBuffer()) {
boe.getBuffer().recycleBuffer();
}
}
} | Adds a buffer or event to the sequence of spilled buffers and events.
@param boe The buffer or event to add and spill.
@throws IOException Thrown, if the buffer of event could not be spilled. |
@SuppressWarnings("resource")
private void createSpillingChannel() throws IOException {
currentSpillFile = new File(tempDir, spillFilePrefix + (fileCounter++) + ".buffer");
currentChannel = new RandomAccessFile(currentSpillFile, "rw").getChannel();
} | ------------------------------------------------------------------------ |
@Override
@SuppressWarnings("unchecked")
public Tuple6<T0, T1, T2, T3, T4, T5> copy() {
return new Tuple6<>(this.f0,
this.f1,
this.f2,
this.f3,
this.f4,
this.f5);
} | Shallow tuple copy.
@return A new Tuple with the same fields as this. |
public static <T0, T1, T2, T3, T4, T5> Tuple6<T0, T1, T2, T3, T4, T5> of(T0 value0, T1 value1, T2 value2, T3 value3, T4 value4, T5 value5) {
return new Tuple6<>(value0,
value1,
value2,
value3,
value4,
value5);
} | Creates a new tuple and assigns the given values to the tuple's fields.
This is more convenient than using the constructor, because the compiler can
infer the generic type arguments implicitly. For example:
{@code Tuple3.of(n, x, s)}
instead of
{@code new Tuple3<Integer, Double, String>(n, x, s)} |
public static void main(String[] args) throws Exception {
// set up execution environment
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
StreamTableEnvironment tEnv = StreamTableEnvironment.create(env);
DataStream<Order> orderA = env.fromCollection(Arrays.asList(
new Order(1L, "beer", 3),
new Order(1L, "diaper", 4),
new Order(3L, "rubber", 2)));
DataStream<Order> orderB = env.fromCollection(Arrays.asList(
new Order(2L, "pen", 3),
new Order(2L, "rubber", 3),
new Order(4L, "beer", 1)));
// convert DataStream to Table
Table tableA = tEnv.fromDataStream(orderA, "user, product, amount");
// register DataStream as Table
tEnv.registerDataStream("OrderB", orderB, "user, product, amount");
// union the two tables
Table result = tEnv.sqlQuery("SELECT * FROM " + tableA + " WHERE amount > 2 UNION ALL " +
"SELECT * FROM OrderB WHERE amount < 2");
tEnv.toAppendStream(result, Order.class).print();
env.execute();
} | ************************************************************************* |
public static BaseRowKeySelector getBaseRowSelector(int[] keyFields, BaseRowTypeInfo rowType) {
if (keyFields.length > 0) {
InternalType[] inputFieldTypes = rowType.getInternalTypes();
String[] inputFieldNames = rowType.getFieldNames();
InternalType[] keyFieldTypes = new InternalType[keyFields.length];
String[] keyFieldNames = new String[keyFields.length];
for (int i = 0; i < keyFields.length; ++i) {
keyFieldTypes[i] = inputFieldTypes[keyFields[i]];
keyFieldNames[i] = inputFieldNames[keyFields[i]];
}
RowType returnType = new RowType(keyFieldTypes, keyFieldNames);
RowType inputType = new RowType(inputFieldTypes, rowType.getFieldNames());
GeneratedProjection generatedProjection = ProjectionCodeGenerator.generateProjection(
CodeGeneratorContext.apply(new TableConfig()),
"KeyProjection",
inputType,
returnType, keyFields);
BaseRowTypeInfo keyRowType = returnType.toTypeInfo();
// check if type implements proper equals/hashCode
TypeCheckUtils.validateEqualsHashCode("grouping", keyRowType);
return new BinaryRowKeySelector(keyRowType, generatedProjection);
} else {
return NullBinaryRowKeySelector.INSTANCE;
}
} | Create a BaseRowKeySelector to extract keys from DataStream which type is BaseRowTypeInfo.
@param keyFields key fields
@param rowType type of DataStream to extract keys
@return the BaseRowKeySelector to extract keys from DataStream which type is BaseRowTypeInfo. |
public void close() {
synchronized (this) {
if (this.closed) {
return;
}
this.closed = true;
}
this.numRecordsInBuffer = 0;
this.numRecordsReturned = 0;
// add the full segments to the empty ones
for (int i = this.fullSegments.size() - 1; i >= 0; i--) {
this.emptySegments.add(this.fullSegments.remove(i));
}
// release the memory segment
this.memoryManager.release(this.emptySegments);
this.emptySegments.clear();
if (LOG.isDebugEnabled()) {
LOG.debug("Block Resettable Iterator closed.");
}
} | This method closes the iterator and releases all resources. This method works both as a regular
shutdown and as a canceling method. The method may be called multiple times and will not produce
an error. |
protected boolean writeNextRecord(T record) throws IOException {
try {
this.serializer.serialize(record, this.collectingView);
this.numRecordsInBuffer++;
return true;
} catch (EOFException eofex) {
return false;
}
} | -------------------------------------------------------------------------------------------- |
private Map<Long, List<TimestampedFileInputSplit>> getInputSplitsSortedByModTime(
Map<Path, FileStatus> eligibleFiles) throws IOException {
Map<Long, List<TimestampedFileInputSplit>> splitsByModTime = new TreeMap<>();
if (eligibleFiles.isEmpty()) {
return splitsByModTime;
}
for (FileInputSplit split: format.createInputSplits(readerParallelism)) {
FileStatus fileStatus = eligibleFiles.get(split.getPath());
if (fileStatus != null) {
Long modTime = fileStatus.getModificationTime();
List<TimestampedFileInputSplit> splitsToForward = splitsByModTime.get(modTime);
if (splitsToForward == null) {
splitsToForward = new ArrayList<>();
splitsByModTime.put(modTime, splitsToForward);
}
splitsToForward.add(new TimestampedFileInputSplit(
modTime, split.getSplitNumber(), split.getPath(),
split.getStart(), split.getLength(), split.getHostnames()));
}
}
return splitsByModTime;
} | Creates the input splits to be forwarded to the downstream tasks of the
{@link ContinuousFileReaderOperator}. Splits are sorted <b>by modification time</b> before
being forwarded and only splits belonging to files in the {@code eligibleFiles}
list will be processed.
@param eligibleFiles The files to process. |
private Map<Path, FileStatus> listEligibleFiles(FileSystem fileSystem, Path path) throws IOException {
final FileStatus[] statuses;
try {
statuses = fileSystem.listStatus(path);
} catch (IOException e) {
// we may run into an IOException if files are moved while listing their status
// delay the check for eligible files in this case
return Collections.emptyMap();
}
if (statuses == null) {
LOG.warn("Path does not exist: {}", path);
return Collections.emptyMap();
} else {
Map<Path, FileStatus> files = new HashMap<>();
// handle the new files
for (FileStatus status : statuses) {
if (!status.isDir()) {
Path filePath = status.getPath();
long modificationTime = status.getModificationTime();
if (!shouldIgnore(filePath, modificationTime)) {
files.put(filePath, status);
}
} else if (format.getNestedFileEnumeration() && format.acceptFile(status)){
files.putAll(listEligibleFiles(fileSystem, status.getPath()));
}
}
return files;
}
} | Returns the paths of the files not yet processed.
@param fileSystem The filesystem where the monitored directory resides. |
private boolean shouldIgnore(Path filePath, long modificationTime) {
assert (Thread.holdsLock(checkpointLock));
boolean shouldIgnore = modificationTime <= globalModificationTime;
if (shouldIgnore && LOG.isDebugEnabled()) {
LOG.debug("Ignoring " + filePath + ", with mod time= " + modificationTime +
" and global mod time= " + globalModificationTime);
}
return shouldIgnore;
} | Returns {@code true} if the file is NOT to be processed further.
This happens if the modification time of the file is smaller than
the {@link #globalModificationTime}.
@param filePath the path of the file to check.
@param modificationTime the modification time of the file. |
@Override
public void snapshotState(FunctionSnapshotContext context) throws Exception {
Preconditions.checkState(this.checkpointedState != null,
"The " + getClass().getSimpleName() + " state has not been properly initialized.");
this.checkpointedState.clear();
this.checkpointedState.add(this.globalModificationTime);
if (LOG.isDebugEnabled()) {
LOG.debug("{} checkpointed {}.", getClass().getSimpleName(), globalModificationTime);
}
} | --------------------- Checkpointing -------------------------- |
private TimeWindow mergeWindow(TimeWindow curWindow, TimeWindow other, Collection<TimeWindow> mergedWindow) {
if (curWindow.intersects(other)) {
mergedWindow.add(other);
return curWindow.cover(other);
} else {
return curWindow;
}
} | Merge curWindow and other, return a new window which covers curWindow and other
if they are overlapped. Otherwise, returns the curWindow itself. |
protected Configuration applyCommandLineOptionsToConfiguration(CommandLine commandLine) throws FlinkException {
final Configuration resultingConfiguration = new Configuration(configuration);
if (commandLine.hasOption(addressOption.getOpt())) {
String addressWithPort = commandLine.getOptionValue(addressOption.getOpt());
InetSocketAddress jobManagerAddress = ClientUtils.parseHostPortAddress(addressWithPort);
setJobManagerAddressInConfig(resultingConfiguration, jobManagerAddress);
}
if (commandLine.hasOption(zookeeperNamespaceOption.getOpt())) {
String zkNamespace = commandLine.getOptionValue(zookeeperNamespaceOption.getOpt());
resultingConfiguration.setString(HighAvailabilityOptions.HA_CLUSTER_ID, zkNamespace);
}
return resultingConfiguration;
} | Override configuration settings by specified command line options.
@param commandLine containing the overriding values
@return Effective configuration with the overridden configuration settings |
public void addForwardedField(int sourceField, int targetField) {
if(isTargetFieldPresent(targetField)) {
throw new InvalidSemanticAnnotationException("Target field "+targetField+" was added twice.");
}
FieldSet targetFields = fieldMapping.get(sourceField);
if (targetFields != null) {
fieldMapping.put(sourceField, targetFields.addField(targetField));
} else {
fieldMapping.put(sourceField, new FieldSet(targetField));
}
} | Adds, to the existing information, a field that is forwarded directly
from the source record(s) to the destination record(s).
@param sourceField the position in the source record(s)
@param targetField the position in the destination record(s) |
public void addReadFields(FieldSet readFields) {
if (this.readFields == null) {
this.readFields = readFields;
} else {
this.readFields = this.readFields.addFields(readFields);
}
} | Adds, to the existing information, field(s) that are read in
the source record(s).
@param readFields the position(s) in the source record(s) |
@Override
public void invoke() throws Exception {
this.headEventReader = new MutableRecordReader<>(
getEnvironment().getInputGate(0),
getEnvironment().getTaskManagerInfo().getTmpDirectories());
TaskConfig taskConfig = new TaskConfig(getTaskConfiguration());
// store all aggregators
this.aggregators = new HashMap<>();
for (AggregatorWithName<?> aggWithName : taskConfig.getIterationAggregators(getUserCodeClassLoader())) {
aggregators.put(aggWithName.getName(), aggWithName.getAggregator());
}
// store the aggregator convergence criterion
if (taskConfig.usesConvergenceCriterion()) {
convergenceCriterion = taskConfig.getConvergenceCriterion(getUserCodeClassLoader());
convergenceAggregatorName = taskConfig.getConvergenceCriterionAggregatorName();
Preconditions.checkNotNull(convergenceAggregatorName);
}
// store the default aggregator convergence criterion
if (taskConfig.usesImplicitConvergenceCriterion()) {
implicitConvergenceCriterion = taskConfig.getImplicitConvergenceCriterion(getUserCodeClassLoader());
implicitConvergenceAggregatorName = taskConfig.getImplicitConvergenceCriterionAggregatorName();
Preconditions.checkNotNull(implicitConvergenceAggregatorName);
}
maxNumberOfIterations = taskConfig.getNumberOfIterations();
// set up the event handler
int numEventsTillEndOfSuperstep = taskConfig.getNumberOfEventsUntilInterruptInIterativeGate(0);
eventHandler = new SyncEventHandler(numEventsTillEndOfSuperstep, aggregators,
getEnvironment().getUserClassLoader());
headEventReader.registerTaskEventListener(eventHandler, WorkerDoneEvent.class);
IntValue dummy = new IntValue();
while (!terminationRequested()) {
if (log.isInfoEnabled()) {
log.info(formatLogString("starting iteration [" + currentIteration + "]"));
}
// this call listens for events until the end-of-superstep is reached
readHeadEventChannel(dummy);
if (log.isInfoEnabled()) {
log.info(formatLogString("finishing iteration [" + currentIteration + "]"));
}
if (checkForConvergence()) {
if (log.isInfoEnabled()) {
log.info(formatLogString("signaling that all workers are to terminate in iteration ["
+ currentIteration + "]"));
}
requestTermination();
sendToAllWorkers(new TerminationEvent());
} else {
if (log.isInfoEnabled()) {
log.info(formatLogString("signaling that all workers are done in iteration [" + currentIteration
+ "]"));
}
AllWorkersDoneEvent allWorkersDoneEvent = new AllWorkersDoneEvent(aggregators);
sendToAllWorkers(allWorkersDoneEvent);
// reset all aggregators
for (Aggregator<?> agg : aggregators.values()) {
agg.reset();
}
currentIteration++;
}
}
} | -------------------------------------------------------------------------------------------- |
@Override
protected List<OUT> executeOnCollections(List<IN1> inputData1, List<IN2> inputData2, RuntimeContext ctx, ExecutionConfig executionConfig) throws Exception {
CrossFunction<IN1, IN2, OUT> function = this.userFunction.getUserCodeObject();
FunctionUtils.setFunctionRuntimeContext(function, ctx);
FunctionUtils.openFunction(function, this.parameters);
ArrayList<OUT> result = new ArrayList<OUT>(inputData1.size() * inputData2.size());
TypeSerializer<IN1> inSerializer1 = getOperatorInfo().getFirstInputType().createSerializer(executionConfig);
TypeSerializer<IN2> inSerializer2 = getOperatorInfo().getSecondInputType().createSerializer(executionConfig);
TypeSerializer<OUT> outSerializer = getOperatorInfo().getOutputType().createSerializer(executionConfig);
for (IN1 element1 : inputData1) {
for (IN2 element2 : inputData2) {
IN1 copy1 = inSerializer1.copy(element1);
IN2 copy2 = inSerializer2.copy(element2);
OUT o = function.cross(copy1, copy2);
result.add(outSerializer.copy(o));
}
}
FunctionUtils.closeFunction(function);
return result;
} | -------------------------------------------------------------------------------------------- |
@Override
public DefaultConfigurableOptionsFactory configure(Configuration configuration) {
for (String key : CANDIDATE_CONFIGS) {
String newValue = configuration.getString(key, null);
if (newValue != null) {
if (checkArgumentValid(key, newValue)) {
this.configuredOptions.put(key, newValue);
}
}
}
return this;
} | Creates a {@link DefaultConfigurableOptionsFactory} instance from a {@link Configuration}.
<p>If no options within {@link RocksDBConfigurableOptions} has ever been configured,
the created OptionsFactory would not override anything defined in {@link PredefinedOptions}.
@param configuration Configuration to be used for the ConfigurableOptionsFactory creation
@return A ConfigurableOptionsFactory created from the given configuration |
private static boolean checkArgumentValid(String key, String value) {
if (POSITIVE_INT_CONFIG_SET.contains(key)) {
Preconditions.checkArgument(Integer.parseInt(value) > 0,
"Configured value for key: " + key + " must be larger than 0.");
} else if (SIZE_CONFIG_SET.contains(key)) {
Preconditions.checkArgument(MemorySize.parseBytes(value) > 0,
"Configured size for key" + key + " must be larger than 0.");
} else if (BOOLEAN_CONFIG_SET.contains(key)) {
Preconditions.checkArgument("true".equalsIgnoreCase(value) || "false".equalsIgnoreCase(value),
"The configured boolean value: " + value + " for key: " + key + " is illegal.");
} else if (key.equals(COMPACTION_STYLE.key())) {
value = value.toLowerCase();
Preconditions.checkArgument(COMPACTION_STYLE_SET.contains(value),
"Compression type: " + value + " is not recognized with legal types: " + String.join(", ", COMPACTION_STYLE_SET));
}
return true;
} | Helper method to check whether the (key,value) is valid through given configuration and returns the formatted value.
@param key The configuration key which is configurable in {@link RocksDBConfigurableOptions}.
@param value The value within given configuration.
@return whether the given key and value in string format is legal. |
private void setInternal(String key, String value) {
Preconditions.checkArgument(value != null && !value.isEmpty(),
"The configuration value must not be empty.");
configuredOptions.put(key, value);
} | Sets the configuration with (key, value) if the key is predefined, otherwise throws IllegalArgumentException.
@param key The configuration key, if key is not predefined, throws IllegalArgumentException out.
@param value The configuration value. |
private String getInternal(String key) {
Preconditions.checkArgument(configuredOptions.containsKey(key),
"The configuration " + key + " has not been configured.");
return configuredOptions.get(key);
} | Returns the value in string format with the given key.
@param key The configuration-key to query in string format. |
protected void checkError() throws IOException {
final Throwable t = cause.get();
if (t != null) {
if (t instanceof CancelTaskException) {
throw (CancelTaskException) t;
}
if (t instanceof IOException) {
throw (IOException) t;
}
else {
throw new IOException(t);
}
}
} | Checks for an error and rethrows it if one was reported. |
protected boolean increaseBackoff() {
// Backoff is disabled
if (currentBackoff < 0) {
return false;
}
// This is the first time backing off
if (currentBackoff == 0) {
currentBackoff = initialBackoff;
return true;
}
// Continue backing off
else if (currentBackoff < maxBackoff) {
currentBackoff = Math.min(currentBackoff * 2, maxBackoff);
return true;
}
// Reached maximum backoff
return false;
} | Increases the current backoff and returns whether the operation was successful.
@return <code>true</code>, iff the operation was successful. Otherwise, <code>false</code>. |
public int resetErrorStateAndParse(byte[] bytes, int startPos, int limit, byte[] delim, T reuse) {
resetParserState();
return parseField(bytes, startPos, limit, delim, reuse);
} | Parses the value of a field from the byte array, taking care of properly reset
the state of this parser.
The start position within the byte array and the array's valid length is given.
The content of the value is delimited by a field delimiter.
@param bytes The byte array that holds the value.
@param startPos The index where the field starts
@param limit The limit unto which the byte contents is valid for the parser. The limit is the
position one after the last valid byte.
@param delim The field delimiter character
@param reuse An optional reusable field to hold the value
@return The index of the next delimiter, if the field was parsed correctly. A value less than 0 otherwise. |
public static final boolean delimiterNext(byte[] bytes, int startPos, byte[] delim) {
for(int pos = 0; pos < delim.length; pos++) {
// check each position
if(delim[pos] != bytes[startPos+pos]) {
return false;
}
}
return true;
} | Checks if the delimiter starts at the given start position of the byte array.
Attention: This method assumes that enough characters follow the start position for the delimiter check!
@param bytes The byte array that holds the value.
@param startPos The index of the byte array where the check for the delimiter starts.
@param delim The delimiter to check for.
@return true if a delimiter starts at the given start position, false otherwise. |
public static final boolean endsWithDelimiter(byte[] bytes, int endPos, byte[] delim) {
if (endPos < delim.length - 1) {
return false;
}
for (int pos = 0; pos < delim.length; ++pos) {
if (delim[pos] != bytes[endPos - delim.length + 1 + pos]) {
return false;
}
}
return true;
} | Checks if the given bytes ends with the delimiter at the given end position.
@param bytes The byte array that holds the value.
@param endPos The index of the byte array where the check for the delimiter ends.
@param delim The delimiter to check for.
@return true if a delimiter ends at the given end position, false otherwise. |
protected final int nextStringEndPos(byte[] bytes, int startPos, int limit, byte[] delimiter) {
int endPos = startPos;
final int delimLimit = limit - delimiter.length + 1;
while (endPos < limit) {
if (endPos < delimLimit && delimiterNext(bytes, endPos, delimiter)) {
break;
}
endPos++;
}
if (endPos == startPos) {
setErrorState(ParseErrorState.EMPTY_COLUMN);
return -1;
}
return endPos;
} | Returns the end position of a string. Sets the error state if the column is empty.
@return the end position of the string or -1 if an error occurred |
protected static final int nextStringLength(byte[] bytes, int startPos, int length, char delimiter) {
if (length <= 0) {
throw new IllegalArgumentException("Invalid input: Empty string");
}
int limitedLength = 0;
final byte delByte = (byte) delimiter;
while (limitedLength < length && bytes[startPos + limitedLength] != delByte) {
limitedLength++;
}
return limitedLength;
} | Returns the length of a string. Throws an exception if the column is empty.
@return the length of the string |
public static <T> Class<FieldParser<T>> getParserForType(Class<T> type) {
Class<? extends FieldParser<?>> parser = PARSERS.get(type);
if (parser == null) {
return null;
} else {
@SuppressWarnings("unchecked")
Class<FieldParser<T>> typedParser = (Class<FieldParser<T>>) parser;
return typedParser;
}
} | Gets the parser for the type specified by the given class. Returns null, if no parser for that class
is known.
@param type The class of the type to get the parser for.
@return The parser for the given type, or null, if no such parser exists. |
public StateSnapshot.StateKeyGroupWriter partitionByKeyGroup() {
if (computedResult == null) {
reportAllElementKeyGroups();
int outputNumberOfElements = buildHistogramByAccumulatingCounts();
executePartitioning(outputNumberOfElements);
}
return computedResult;
} | Partitions the data into key-groups and returns the result via {@link PartitioningResult}. |
protected void reportAllElementKeyGroups() {
Preconditions.checkState(partitioningSource.length >= numberOfElements);
for (int i = 0; i < numberOfElements; ++i) {
int keyGroup = KeyGroupRangeAssignment.assignToKeyGroup(
keyExtractorFunction.extractKeyFromElement(partitioningSource[i]), totalKeyGroups);
reportKeyGroupOfElementAtIndex(i, keyGroup);
}
} | This method iterates over the input data and reports the key-group for each element. |
protected void reportKeyGroupOfElementAtIndex(int index, int keyGroup) {
final int keyGroupIndex = keyGroup - firstKeyGroup;
elementKeyGroups[index] = keyGroupIndex;
++counterHistogram[keyGroupIndex];
} | This method reports in the bookkeeping data that the element at the given index belongs to the given key-group. |
private int buildHistogramByAccumulatingCounts() {
int sum = 0;
for (int i = 0; i < counterHistogram.length; ++i) {
int currentSlotValue = counterHistogram[i];
counterHistogram[i] = sum;
sum += currentSlotValue;
}
return sum;
} | This method creates a histogram from the counts per key-group in {@link #counterHistogram}. |
@Override
public void setup(StreamTask<?, ?> containingTask, StreamConfig config, Output<StreamRecord<OUT>> output) {
super.setup(containingTask, config, output);
FunctionUtils.setFunctionRuntimeContext(userFunction, getRuntimeContext());
} | ------------------------------------------------------------------------ |
@Override
public void notifyCheckpointComplete(long checkpointId) throws Exception {
super.notifyCheckpointComplete(checkpointId);
if (userFunction instanceof CheckpointListener) {
((CheckpointListener) userFunction).notifyCheckpointComplete(checkpointId);
}
} | ------------------------------------------------------------------------ |
@Override
public void setOutputType(TypeInformation<OUT> outTypeInfo, ExecutionConfig executionConfig) {
StreamingFunctionUtils.setOutputType(userFunction, outTypeInfo, executionConfig);
} | ------------------------------------------------------------------------ |
@Override
public void registerTaskEventListener(EventListener<TaskEvent> listener, Class<? extends TaskEvent> eventType) {
taskEventHandler.subscribe(listener, eventType);
} | ------------------------------------------------------------------------ |
protected boolean handleEvent(AbstractEvent event) throws IOException {
final Class<?> eventType = event.getClass();
try {
// ------------------------------------------------------------
// Runtime events
// ------------------------------------------------------------
// This event is also checked at the (single) input gate to release the respective
// channel, at which it was received.
if (eventType == EndOfPartitionEvent.class) {
return true;
}
else if (eventType == EndOfSuperstepEvent.class) {
return incrementEndOfSuperstepEventAndCheck();
}
// ------------------------------------------------------------
// Task events (user)
// ------------------------------------------------------------
else if (event instanceof TaskEvent) {
taskEventHandler.publish((TaskEvent) event);
return false;
}
else {
throw new IllegalStateException("Received unexpected event of type " + eventType + " at reader.");
}
}
catch (Throwable t) {
throw new IOException("Error while handling event of type " + eventType + ": " + t.getMessage(), t);
}
} | Handles the event and returns whether the reader reached an end-of-stream event (either the
end of the whole stream or the end of an superstep). |
public static Tuple2<String, Integer> getJobManagerAddress(Configuration configuration) throws ConfigurationException {
final String hostname = configuration.getString(JobManagerOptions.ADDRESS);
final int port = configuration.getInteger(JobManagerOptions.PORT);
if (hostname == null) {
throw new ConfigurationException("Config parameter '" + JobManagerOptions.ADDRESS +
"' is missing (hostname/address of JobManager to connect to).");
}
if (port <= 0 || port >= 65536) {
throw new ConfigurationException("Invalid value for '" + JobManagerOptions.PORT +
"' (port of the JobManager actor system) : " + port +
". it must be greater than 0 and less than 65536.");
}
return Tuple2.of(hostname, port);
} | Returns the JobManager's hostname and port extracted from the given
{@link Configuration}.
@param configuration Configuration to extract the JobManager's address from
@return The JobManager's hostname and port
@throws ConfigurationException if the JobManager's address cannot be extracted from the configuration |
public LocalProperties addUniqueFields(FieldSet uniqueFields) {
LocalProperties copy = clone();
if (copy.uniqueFields == null) {
copy.uniqueFields = new HashSet<FieldSet>();
}
copy.uniqueFields.add(uniqueFields);
return copy;
} | Adds a combination of fields that are unique in these data properties.
@param uniqueFields The fields that are unique in these data properties. |
public LocalProperties filterBySemanticProperties(SemanticProperties props, int input) {
if (props == null) {
throw new NullPointerException("SemanticProperties may not be null.");
}
LocalProperties returnProps = new LocalProperties();
// check if sorting is preserved
if (this.ordering != null) {
Ordering newOrdering = new Ordering();
for (int i = 0; i < this.ordering.getInvolvedIndexes().size(); i++) {
int sourceField = this.ordering.getInvolvedIndexes().get(i);
FieldSet targetField = props.getForwardingTargetFields(input, sourceField);
if (targetField == null || targetField.size() == 0) {
if (i == 0) {
// order fully destroyed
newOrdering = null;
break;
} else {
// order partially preserved
break;
}
} else {
// use any field of target fields for now. We should use something like field equivalence sets in the future.
if(targetField.size() > 1) {
LOG.warn("Found that a field is forwarded to more than one target field in " +
"semantic forwarded field information. Will only use the field with the lowest index.");
}
newOrdering.appendOrdering(targetField.toArray()[0], this.ordering.getType(i), this.ordering.getOrder(i));
}
}
returnProps.ordering = newOrdering;
if (newOrdering != null) {
returnProps.groupedFields = newOrdering.getInvolvedIndexes();
} else {
returnProps.groupedFields = null;
}
}
// check if grouping is preserved
else if (this.groupedFields != null) {
FieldList newGroupedFields = new FieldList();
for (Integer sourceField : this.groupedFields) {
FieldSet targetField = props.getForwardingTargetFields(input, sourceField);
if (targetField == null || targetField.size() == 0) {
newGroupedFields = null;
break;
} else {
// use any field of target fields for now. We should use something like field equivalence sets in the future.
if(targetField.size() > 1) {
LOG.warn("Found that a field is forwarded to more than one target field in " +
"semantic forwarded field information. Will only use the field with the lowest index.");
}
newGroupedFields = newGroupedFields.addField(targetField.toArray()[0]);
}
}
returnProps.groupedFields = newGroupedFields;
}
if (this.uniqueFields != null) {
Set<FieldSet> newUniqueFields = new HashSet<FieldSet>();
for (FieldSet fields : this.uniqueFields) {
FieldSet newFields = new FieldSet();
for (Integer sourceField : fields) {
FieldSet targetField = props.getForwardingTargetFields(input, sourceField);
if (targetField == null || targetField.size() == 0) {
newFields = null;
break;
} else {
// use any field of target fields for now. We should use something like field equivalence sets in the future.
if(targetField.size() > 1) {
LOG.warn("Found that a field is forwarded to more than one target field in " +
"semantic forwarded field information. Will only use the field with the lowest index.");
}
newFields = newFields.addField(targetField.toArray()[0]);
}
}
if (newFields != null) {
newUniqueFields.add(newFields);
}
}
if (!newUniqueFields.isEmpty()) {
returnProps.uniqueFields = newUniqueFields;
} else {
returnProps.uniqueFields = null;
}
}
return returnProps;
} | Filters these LocalProperties by the fields that are forwarded to the output
as described by the SemanticProperties.
@param props The semantic properties holding information about forwarded fields.
@param input The index of the input.
@return The filtered LocalProperties |
public static LocalProperties combine(LocalProperties lp1, LocalProperties lp2) {
if (lp1.ordering != null) {
return lp1;
} else if (lp2.ordering != null) {
return lp2;
} else if (lp1.groupedFields != null) {
return lp1;
} else if (lp2.groupedFields != null) {
return lp2;
} else if (lp1.uniqueFields != null && !lp1.uniqueFields.isEmpty()) {
return lp1;
} else if (lp2.uniqueFields != null && !lp2.uniqueFields.isEmpty()) {
return lp2;
} else {
return lp1;
}
} | -------------------------------------------------------------------------------------------- |
public static LocalProperties forOrdering(Ordering o) {
LocalProperties props = new LocalProperties();
props.ordering = o;
props.groupedFields = o.getInvolvedIndexes();
return props;
} | -------------------------------------------------------------------------------------------- |
@Override
public void open(RuntimeContext runtimeContext) {
this.runtimeContext = runtimeContext;
localRateBytesPerSecond = globalRateBytesPerSecond / runtimeContext.getNumberOfParallelSubtasks();
this.rateLimiter = RateLimiter.create(localRateBytesPerSecond);
} | Creates a rate limiter with the runtime context provided.
@param runtimeContext |
private MapViewSerializer<T, List<Long>> getValueToOrderMapViewSerializer() {
return new MapViewSerializer<>(
new MapSerializer<>(
createValueSerializer(),
new ListSerializer<>(LongSerializer.INSTANCE)));
} | MapView<T, List<Long>> |
public static <W extends Window> ContinuousEventTimeTrigger<W> of(Time interval) {
return new ContinuousEventTimeTrigger<>(interval.toMilliseconds());
} | Creates a trigger that continuously fires based on the given interval.
@param interval The time interval at which to fire.
@param <W> The type of {@link Window Windows} on which this trigger can operate. |
public TableOperation createSort(List<Expression> orders, TableOperation child) {
failIfStreaming();
List<Expression> convertedOrders = orders.stream()
.map(f -> f.accept(orderWrapper))
.collect(Collectors.toList());
return new SortTableOperation(convertedOrders, child);
} | Creates a valid {@link SortTableOperation} operation.
<p><b>NOTE:</b> if the collation is not explicitly specified for any expression, it is wrapped in a
default ascending order
@param orders expressions describing order,
@param child relational expression on top of which to apply the sort operation
@return valid sort operation |
public TableOperation createLimitWithOffset(int offset, TableOperation child) {
SortTableOperation previousSort = validateAndGetChildSort(child);
if (offset < 0) {
throw new ValidationException("Offset should be greater or equal 0");
}
if (previousSort.getOffset() != -1) {
throw new ValidationException("OFFSET already defined");
}
return new SortTableOperation(previousSort.getOrder(), previousSort.getChild(), offset, -1);
} | Adds offset to the underlying {@link SortTableOperation} if it is a valid one.
@param offset offset to add
@param child should be {@link SortTableOperation}
@return valid sort operation with applied offset |
public TableOperation createLimitWithFetch(int fetch, TableOperation child) {
SortTableOperation previousSort = validateAndGetChildSort(child);
if (fetch < 0) {
throw new ValidationException("Fetch should be greater or equal 0");
}
int offset = Math.max(previousSort.getOffset(), 0);
return new SortTableOperation(previousSort.getOrder(), previousSort.getChild(), offset, fetch);
} | Adds fetch to the underlying {@link SortTableOperation} if it is a valid one.
@param fetch fetch number to add
@param child should be {@link SortTableOperation}
@return valid sort operation with applied fetch |
@Override
protected List<T> executeOnCollections(List<T> inputData, RuntimeContext ctx, ExecutionConfig executionConfig) throws Exception {
// make sure we can handle empty inputs
if (inputData.isEmpty()) {
return Collections.emptyList();
}
ReduceFunction<T> function = this.userFunction.getUserCodeObject();
UnaryOperatorInformation<T, T> operatorInfo = getOperatorInfo();
TypeInformation<T> inputType = operatorInfo.getInputType();
int[] inputColumns = getKeyColumns(0);
if (!(inputType instanceof CompositeType) && inputColumns.length > 1) {
throw new InvalidProgramException("Grouping is only possible on composite types.");
}
FunctionUtils.setFunctionRuntimeContext(function, ctx);
FunctionUtils.openFunction(function, this.parameters);
TypeSerializer<T> serializer = getOperatorInfo().getInputType().createSerializer(executionConfig);
if (inputColumns.length > 0) {
boolean[] inputOrderings = new boolean[inputColumns.length];
TypeComparator<T> inputComparator = inputType instanceof AtomicType
? ((AtomicType<T>) inputType).createComparator(false, executionConfig)
: ((CompositeType<T>) inputType).createComparator(inputColumns, inputOrderings, 0, executionConfig);
Map<TypeComparable<T>, T> aggregateMap = new HashMap<TypeComparable<T>, T>(inputData.size() / 10);
for (T next : inputData) {
TypeComparable<T> wrapper = new TypeComparable<T>(next, inputComparator);
T existing = aggregateMap.get(wrapper);
T result;
if (existing != null) {
result = function.reduce(existing, serializer.copy(next));
} else {
result = next;
}
result = serializer.copy(result);
aggregateMap.put(wrapper, result);
}
FunctionUtils.closeFunction(function);
return new ArrayList<T>(aggregateMap.values());
}
else {
T aggregate = inputData.get(0);
aggregate = serializer.copy(aggregate);
for (int i = 1; i < inputData.size(); i++) {
T next = function.reduce(aggregate, serializer.copy(inputData.get(i)));
aggregate = serializer.copy(next);
}
FunctionUtils.setFunctionRuntimeContext(function, ctx);
return Collections.singletonList(aggregate);
}
} | -------------------------------------------------------------------------------------------- |
@Override
public void putNormalizedKey(BigInteger record, MemorySegment target, int offset, int len) {
// add normalized bit length (the larger the length, the larger the value)
int bitLen = 0;
if (len > 0) {
final int signum = record.signum();
bitLen = record.bitLength();
// normalize dependent on sign
// from 0 to Integer.MAX
// OR from Integer.MAX to 0
int normBitLen = signum < 0 ? Integer.MAX_VALUE - bitLen : bitLen;
// add sign
if (signum >= 0) {
normBitLen |= (1 << 31);
}
for (int i = 0; i < 4 && len > 0; i++, len--) {
final byte b = (byte) (normBitLen >>> (8 * (3 - i)));
target.put(offset++, b);
}
}
// fill remaining bytes with most significant bits
int bitPos = bitLen - 1;
for (; len > 0; len--) {
byte b = 0;
for (int bytePos = 0; bytePos < 8 && bitPos >= 0; bytePos++, bitPos--) {
b <<= 1;
if (record.testBit(bitPos)) {
b |= 1;
}
}
// the last byte might be partially filled, but that's ok within an equal bit length.
// no need for padding bits.
target.put(offset++, b);
}
} | Adds a normalized key containing the normalized number of bits and MSBs of the given record.
1 bit determines the sign (negative, zero/positive), 31 bit the bit length of the record.
Remaining bytes contain the most significant bits of the record. |
protected static void validateZooKeeperConfig(Properties props) {
if (props.getProperty("zookeeper.connect") == null) {
throw new IllegalArgumentException("Required property 'zookeeper.connect' has not been set in the properties");
}
if (props.getProperty(ConsumerConfig.GROUP_ID_CONFIG) == null) {
throw new IllegalArgumentException("Required property '" + ConsumerConfig.GROUP_ID_CONFIG
+ "' has not been set in the properties");
}
try {
//noinspection ResultOfMethodCallIgnored
Integer.parseInt(props.getProperty("zookeeper.session.timeout.ms", "0"));
}
catch (NumberFormatException e) {
throw new IllegalArgumentException("Property 'zookeeper.session.timeout.ms' is not a valid integer");
}
try {
//noinspection ResultOfMethodCallIgnored
Integer.parseInt(props.getProperty("zookeeper.connection.timeout.ms", "0"));
}
catch (NumberFormatException e) {
throw new IllegalArgumentException("Property 'zookeeper.connection.timeout.ms' is not a valid integer");
}
} | Validate the ZK configuration, checking for required parameters.
@param props Properties to check |
private static void validateAutoOffsetResetValue(Properties config) {
final String val = config.getProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "largest");
if (!(val.equals("largest") || val.equals("latest") || val.equals("earliest") || val.equals("smallest"))) {
// largest/smallest is kafka 0.8, latest/earliest is kafka 0.9
throw new IllegalArgumentException("Cannot use '" + ConsumerConfig.AUTO_OFFSET_RESET_CONFIG
+ "' value '" + val + "'. Possible values: 'latest', 'largest', 'earliest', or 'smallest'.");
}
} | Check for invalid "auto.offset.reset" values. Should be called in constructor for eager checking before submitting
the job. Note that 'none' is also considered invalid, as we don't want to deliberately throw an exception
right after a task is started.
@param config kafka consumer properties to check |
public boolean isCanceledOrFailed() {
return executionState == ExecutionState.CANCELING ||
executionState == ExecutionState.CANCELED ||
executionState == ExecutionState.FAILED;
} | Checks whether the task has failed, is canceled, or is being canceled at the moment.
@return True is the task in state FAILED, CANCELING, or CANCELED, false otherwise. |
@Override
public void run() {
// ----------------------------
// Initial State transition
// ----------------------------
while (true) {
ExecutionState current = this.executionState;
if (current == ExecutionState.CREATED) {
if (transitionState(ExecutionState.CREATED, ExecutionState.DEPLOYING)) {
// success, we can start our work
break;
}
}
else if (current == ExecutionState.FAILED) {
// we were immediately failed. tell the TaskManager that we reached our final state
notifyFinalState();
if (metrics != null) {
metrics.close();
}
return;
}
else if (current == ExecutionState.CANCELING) {
if (transitionState(ExecutionState.CANCELING, ExecutionState.CANCELED)) {
// we were immediately canceled. tell the TaskManager that we reached our final state
notifyFinalState();
if (metrics != null) {
metrics.close();
}
return;
}
}
else {
if (metrics != null) {
metrics.close();
}
throw new IllegalStateException("Invalid state for beginning of operation of task " + this + '.');
}
}
// all resource acquisitions and registrations from here on
// need to be undone in the end
Map<String, Future<Path>> distributedCacheEntries = new HashMap<>();
AbstractInvokable invokable = null;
try {
// ----------------------------
// Task Bootstrap - We periodically
// check for canceling as a shortcut
// ----------------------------
// activate safety net for task thread
LOG.info("Creating FileSystem stream leak safety net for task {}", this);
FileSystemSafetyNet.initializeSafetyNetForThread();
blobService.getPermanentBlobService().registerJob(jobId);
// first of all, get a user-code classloader
// this may involve downloading the job's JAR files and/or classes
LOG.info("Loading JAR files for task {}.", this);
userCodeClassLoader = createUserCodeClassloader();
final ExecutionConfig executionConfig = serializedExecutionConfig.deserializeValue(userCodeClassLoader);
if (executionConfig.getTaskCancellationInterval() >= 0) {
// override task cancellation interval from Flink config if set in ExecutionConfig
taskCancellationInterval = executionConfig.getTaskCancellationInterval();
}
if (executionConfig.getTaskCancellationTimeout() >= 0) {
// override task cancellation timeout from Flink config if set in ExecutionConfig
taskCancellationTimeout = executionConfig.getTaskCancellationTimeout();
}
if (isCanceledOrFailed()) {
throw new CancelTaskException();
}
// ----------------------------------------------------------------
// register the task with the network stack
// this operation may fail if the system does not have enough
// memory to run the necessary data exchanges
// the registration must also strictly be undone
// ----------------------------------------------------------------
LOG.info("Registering task at network: {}.", this);
network.registerTask(this);
for (ResultPartition partition : producedPartitions) {
taskEventDispatcher.registerPartition(partition.getPartitionId());
}
// add metrics for buffers
this.metrics.getIOMetricGroup().initializeBufferMetrics(this);
// register detailed network metrics, if configured
if (taskManagerConfig.getConfiguration().getBoolean(TaskManagerOptions.NETWORK_DETAILED_METRICS)) {
// similar to MetricUtils.instantiateNetworkMetrics() but inside this IOMetricGroup
MetricGroup networkGroup = this.metrics.getIOMetricGroup().addGroup("Network");
MetricGroup outputGroup = networkGroup.addGroup("Output");
MetricGroup inputGroup = networkGroup.addGroup("Input");
// output metrics
for (int i = 0; i < producedPartitions.length; i++) {
ResultPartitionMetrics.registerQueueLengthMetrics(
outputGroup.addGroup(i), producedPartitions[i]);
}
for (int i = 0; i < inputGates.length; i++) {
InputGateMetrics.registerQueueLengthMetrics(
inputGroup.addGroup(i), inputGates[i]);
}
}
// next, kick off the background copying of files for the distributed cache
try {
for (Map.Entry<String, DistributedCache.DistributedCacheEntry> entry :
DistributedCache.readFileInfoFromConfig(jobConfiguration)) {
LOG.info("Obtaining local cache file for '{}'.", entry.getKey());
Future<Path> cp = fileCache.createTmpFile(entry.getKey(), entry.getValue(), jobId, executionId);
distributedCacheEntries.put(entry.getKey(), cp);
}
}
catch (Exception e) {
throw new Exception(
String.format("Exception while adding files to distributed cache of task %s (%s).", taskNameWithSubtask, executionId), e);
}
if (isCanceledOrFailed()) {
throw new CancelTaskException();
}
// ----------------------------------------------------------------
// call the user code initialization methods
// ----------------------------------------------------------------
TaskKvStateRegistry kvStateRegistry = kvStateService.createKvStateTaskRegistry(jobId, getJobVertexId());
Environment env = new RuntimeEnvironment(
jobId,
vertexId,
executionId,
executionConfig,
taskInfo,
jobConfiguration,
taskConfiguration,
userCodeClassLoader,
memoryManager,
ioManager,
broadcastVariableManager,
taskStateManager,
aggregateManager,
accumulatorRegistry,
kvStateRegistry,
inputSplitProvider,
distributedCacheEntries,
producedPartitions,
inputGates,
taskEventDispatcher,
checkpointResponder,
taskManagerConfig,
metrics,
this);
// now load and instantiate the task's invokable code
invokable = loadAndInstantiateInvokable(userCodeClassLoader, nameOfInvokableClass, env);
// ----------------------------------------------------------------
// actual task core work
// ----------------------------------------------------------------
// we must make strictly sure that the invokable is accessible to the cancel() call
// by the time we switched to running.
this.invokable = invokable;
// switch to the RUNNING state, if that fails, we have been canceled/failed in the meantime
if (!transitionState(ExecutionState.DEPLOYING, ExecutionState.RUNNING)) {
throw new CancelTaskException();
}
// notify everyone that we switched to running
taskManagerActions.updateTaskExecutionState(new TaskExecutionState(jobId, executionId, ExecutionState.RUNNING));
// make sure the user code classloader is accessible thread-locally
executingThread.setContextClassLoader(userCodeClassLoader);
// run the invokable
invokable.invoke();
// make sure, we enter the catch block if the task leaves the invoke() method due
// to the fact that it has been canceled
if (isCanceledOrFailed()) {
throw new CancelTaskException();
}
// ----------------------------------------------------------------
// finalization of a successful execution
// ----------------------------------------------------------------
// finish the produced partitions. if this fails, we consider the execution failed.
for (ResultPartition partition : producedPartitions) {
if (partition != null) {
partition.finish();
}
}
// try to mark the task as finished
// if that fails, the task was canceled/failed in the meantime
if (!transitionState(ExecutionState.RUNNING, ExecutionState.FINISHED)) {
throw new CancelTaskException();
}
}
catch (Throwable t) {
// unwrap wrapped exceptions to make stack traces more compact
if (t instanceof WrappingRuntimeException) {
t = ((WrappingRuntimeException) t).unwrap();
}
// ----------------------------------------------------------------
// the execution failed. either the invokable code properly failed, or
// an exception was thrown as a side effect of cancelling
// ----------------------------------------------------------------
try {
// check if the exception is unrecoverable
if (ExceptionUtils.isJvmFatalError(t) ||
(t instanceof OutOfMemoryError && taskManagerConfig.shouldExitJvmOnOutOfMemoryError())) {
// terminate the JVM immediately
// don't attempt a clean shutdown, because we cannot expect the clean shutdown to complete
try {
LOG.error("Encountered fatal error {} - terminating the JVM", t.getClass().getName(), t);
} finally {
Runtime.getRuntime().halt(-1);
}
}
// transition into our final state. we should be either in DEPLOYING, RUNNING, CANCELING, or FAILED
// loop for multiple retries during concurrent state changes via calls to cancel() or
// to failExternally()
while (true) {
ExecutionState current = this.executionState;
if (current == ExecutionState.RUNNING || current == ExecutionState.DEPLOYING) {
if (t instanceof CancelTaskException) {
if (transitionState(current, ExecutionState.CANCELED)) {
cancelInvokable(invokable);
break;
}
}
else {
if (transitionState(current, ExecutionState.FAILED, t)) {
// proper failure of the task. record the exception as the root cause
failureCause = t;
cancelInvokable(invokable);
break;
}
}
}
else if (current == ExecutionState.CANCELING) {
if (transitionState(current, ExecutionState.CANCELED)) {
break;
}
}
else if (current == ExecutionState.FAILED) {
// in state failed already, no transition necessary any more
break;
}
// unexpected state, go to failed
else if (transitionState(current, ExecutionState.FAILED, t)) {
LOG.error("Unexpected state in task {} ({}) during an exception: {}.", taskNameWithSubtask, executionId, current);
break;
}
// else fall through the loop and
}
}
catch (Throwable tt) {
String message = String.format("FATAL - exception in exception handler of task %s (%s).", taskNameWithSubtask, executionId);
LOG.error(message, tt);
notifyFatalError(message, tt);
}
}
finally {
try {
LOG.info("Freeing task resources for {} ({}).", taskNameWithSubtask, executionId);
// clear the reference to the invokable. this helps guard against holding references
// to the invokable and its structures in cases where this Task object is still referenced
this.invokable = null;
// stop the async dispatcher.
// copy dispatcher reference to stack, against concurrent release
final BlockingCallMonitoringThreadPool dispatcher = this.asyncCallDispatcher;
if (dispatcher != null && !dispatcher.isShutdown()) {
dispatcher.shutdownNow();
}
// free the network resources
releaseNetworkResources();
// free memory resources
if (invokable != null) {
memoryManager.releaseAll(invokable);
}
// remove all of the tasks library resources
libraryCache.unregisterTask(jobId, executionId);
fileCache.releaseJob(jobId, executionId);
blobService.getPermanentBlobService().releaseJob(jobId);
// close and de-activate safety net for task thread
LOG.info("Ensuring all FileSystem streams are closed for task {}", this);
FileSystemSafetyNet.closeSafetyNetAndGuardedResourcesForThread();
notifyFinalState();
}
catch (Throwable t) {
// an error in the resource cleanup is fatal
String message = String.format("FATAL - exception in resource cleanup of task %s (%s).", taskNameWithSubtask, executionId);
LOG.error(message, t);
notifyFatalError(message, t);
}
// un-register the metrics at the end so that the task may already be
// counted as finished when this happens
// errors here will only be logged
try {
metrics.close();
}
catch (Throwable t) {
LOG.error("Error during metrics de-registration of task {} ({}).", taskNameWithSubtask, executionId, t);
}
}
} | The core work method that bootstraps the task and executes its code. |
private void releaseNetworkResources() {
LOG.debug("Release task {} network resources (state: {}).", taskNameWithSubtask, getExecutionState());
for (ResultPartition partition : producedPartitions) {
taskEventDispatcher.unregisterPartition(partition.getPartitionId());
if (isCanceledOrFailed()) {
partition.fail(getFailureCause());
}
}
closeNetworkResources();
} | Releases network resources before task exits. We should also fail the partition to release if the task
has failed, is canceled, or is being canceled at the moment. |
private void closeNetworkResources() {
for (ResultPartition partition : producedPartitions) {
try {
partition.close();
} catch (Throwable t) {
ExceptionUtils.rethrowIfFatalError(t);
LOG.error("Failed to release result partition for task {}.", taskNameWithSubtask, t);
}
}
for (InputGate inputGate : inputGates) {
try {
inputGate.close();
} catch (Throwable t) {
ExceptionUtils.rethrowIfFatalError(t);
LOG.error("Failed to release input gate for task {}.", taskNameWithSubtask, t);
}
}
} | There are two scenarios to close the network resources. One is from {@link TaskCanceler} to early
release partitions and gates. Another is from task thread during task exiting. |
private boolean transitionState(ExecutionState currentState, ExecutionState newState, Throwable cause) {
if (STATE_UPDATER.compareAndSet(this, currentState, newState)) {
if (cause == null) {
LOG.info("{} ({}) switched from {} to {}.", taskNameWithSubtask, executionId, currentState, newState);
} else {
LOG.info("{} ({}) switched from {} to {}.", taskNameWithSubtask, executionId, currentState, newState, cause);
}
return true;
} else {
return false;
}
} | Try to transition the execution state from the current state to the new state.
@param currentState of the execution
@param newState of the execution
@param cause of the transition change or null
@return true if the transition was successful, otherwise false |
@Override
public void failExternally(Throwable cause) {
LOG.info("Attempting to fail task externally {} ({}).", taskNameWithSubtask, executionId);
cancelOrFailAndCancelInvokable(ExecutionState.FAILED, cause);
} | Marks task execution failed for an external reason (a reason other than the task code itself
throwing an exception). If the task is already in a terminal state
(such as FINISHED, CANCELED, FAILED), or if the task is already canceling this does nothing.
Otherwise it sets the state to FAILED, and, if the invokable code is running,
starts an asynchronous thread that aborts that code.
<p>This method never blocks.</p> |
@Override
public void triggerPartitionProducerStateCheck(
JobID jobId,
final IntermediateDataSetID intermediateDataSetId,
final ResultPartitionID resultPartitionId) {
CompletableFuture<ExecutionState> futurePartitionState =
partitionProducerStateChecker.requestPartitionProducerState(
jobId,
intermediateDataSetId,
resultPartitionId);
futurePartitionState.whenCompleteAsync(
(ExecutionState executionState, Throwable throwable) -> {
try {
if (executionState != null) {
onPartitionStateUpdate(
intermediateDataSetId,
resultPartitionId,
executionState);
} else if (throwable instanceof TimeoutException) {
// our request timed out, assume we're still running and try again
onPartitionStateUpdate(
intermediateDataSetId,
resultPartitionId,
ExecutionState.RUNNING);
} else if (throwable instanceof PartitionProducerDisposedException) {
String msg = String.format("Producer %s of partition %s disposed. Cancelling execution.",
resultPartitionId.getProducerId(), resultPartitionId.getPartitionId());
LOG.info(msg, throwable);
cancelExecution();
} else {
failExternally(throwable);
}
} catch (IOException | InterruptedException e) {
failExternally(e);
}
},
executor);
} | ------------------------------------------------------------------------ |
public void triggerCheckpointBarrier(
final long checkpointID,
final long checkpointTimestamp,
final CheckpointOptions checkpointOptions,
final boolean advanceToEndOfEventTime) {
final AbstractInvokable invokable = this.invokable;
final CheckpointMetaData checkpointMetaData = new CheckpointMetaData(checkpointID, checkpointTimestamp);
if (executionState == ExecutionState.RUNNING && invokable != null) {
// build a local closure
final String taskName = taskNameWithSubtask;
final SafetyNetCloseableRegistry safetyNetCloseableRegistry =
FileSystemSafetyNet.getSafetyNetCloseableRegistryForThread();
Runnable runnable = new Runnable() {
@Override
public void run() {
// set safety net from the task's context for checkpointing thread
LOG.debug("Creating FileSystem stream leak safety net for {}", Thread.currentThread().getName());
FileSystemSafetyNet.setSafetyNetCloseableRegistryForThread(safetyNetCloseableRegistry);
try {
boolean success = invokable.triggerCheckpoint(checkpointMetaData, checkpointOptions, advanceToEndOfEventTime);
if (!success) {
checkpointResponder.declineCheckpoint(
getJobID(), getExecutionId(), checkpointID,
new CheckpointDeclineTaskNotReadyException(taskName));
}
}
catch (Throwable t) {
if (getExecutionState() == ExecutionState.RUNNING) {
failExternally(new Exception(
"Error while triggering checkpoint " + checkpointID + " for " +
taskNameWithSubtask, t));
} else {
LOG.debug("Encountered error while triggering checkpoint {} for " +
"{} ({}) while being not in state running.", checkpointID,
taskNameWithSubtask, executionId, t);
}
} finally {
FileSystemSafetyNet.setSafetyNetCloseableRegistryForThread(null);
}
}
};
executeAsyncCallRunnable(
runnable,
String.format("Checkpoint Trigger for %s (%s).", taskNameWithSubtask, executionId),
checkpointOptions.getCheckpointType().isSynchronous());
}
else {
LOG.debug("Declining checkpoint request for non-running task {} ({}).", taskNameWithSubtask, executionId);
// send back a message that we did not do the checkpoint
checkpointResponder.declineCheckpoint(jobId, executionId, checkpointID,
new CheckpointDeclineTaskNotReadyException(taskNameWithSubtask));
}
} | Calls the invokable to trigger a checkpoint.
@param checkpointID The ID identifying the checkpoint.
@param checkpointTimestamp The timestamp associated with the checkpoint.
@param checkpointOptions Options for performing this checkpoint.
@param advanceToEndOfEventTime Flag indicating if the source should inject a {@code MAX_WATERMARK} in the pipeline
to fire any registered event-time timers. |
@VisibleForTesting
void onPartitionStateUpdate(
IntermediateDataSetID intermediateDataSetId,
ResultPartitionID resultPartitionId,
ExecutionState producerState) throws IOException, InterruptedException {
if (executionState == ExecutionState.RUNNING) {
final SingleInputGate inputGate = inputGatesById.get(intermediateDataSetId);
if (inputGate != null) {
if (producerState == ExecutionState.SCHEDULED
|| producerState == ExecutionState.DEPLOYING
|| producerState == ExecutionState.RUNNING
|| producerState == ExecutionState.FINISHED) {
// Retrigger the partition request
inputGate.retriggerPartitionRequest(resultPartitionId.getPartitionId());
} else if (producerState == ExecutionState.CANCELING
|| producerState == ExecutionState.CANCELED
|| producerState == ExecutionState.FAILED) {
// The producing execution has been canceled or failed. We
// don't need to re-trigger the request since it cannot
// succeed.
if (LOG.isDebugEnabled()) {
LOG.debug("Cancelling task {} after the producer of partition {} with attempt ID {} has entered state {}.",
taskNameWithSubtask,
resultPartitionId.getPartitionId(),
resultPartitionId.getProducerId(),
producerState);
}
cancelExecution();
} else {
// Any other execution state is unexpected. Currently, only
// state CREATED is left out of the checked states. If we
// see a producer in this state, something went wrong with
// scheduling in topological order.
String msg = String.format("Producer with attempt ID %s of partition %s in unexpected state %s.",
resultPartitionId.getProducerId(),
resultPartitionId.getPartitionId(),
producerState);
failExternally(new IllegalStateException(msg));
}
} else {
failExternally(new IllegalStateException("Received partition producer state for " +
"unknown input gate " + intermediateDataSetId + "."));
}
} else {
LOG.debug("Task {} ignored a partition producer state notification, because it's not running.", taskNameWithSubtask);
}
} | Answer to a partition state check issued after a failed partition request. |
private void executeAsyncCallRunnable(Runnable runnable, String callName, boolean blocking) {
// make sure the executor is initialized. lock against concurrent calls to this function
synchronized (this) {
if (executionState != ExecutionState.RUNNING) {
return;
}
// get ourselves a reference on the stack that cannot be concurrently modified
BlockingCallMonitoringThreadPool executor = this.asyncCallDispatcher;
if (executor == null) {
// first time use, initialize
checkState(userCodeClassLoader != null, "userCodeClassLoader must not be null");
// Under normal execution, we expect that one thread will suffice, this is why we
// keep the core threads to 1. In the case of a synchronous savepoint, we will block
// the checkpointing thread, so we need an additional thread to execute the
// notifyCheckpointComplete() callback. Finally, we aggressively purge (potentially)
// idle thread so that we do not risk to have many idle thread on machines with multiple
// tasks on them. Either way, only one of them can execute at a time due to the
// checkpoint lock.
executor = new BlockingCallMonitoringThreadPool(
new DispatcherThreadFactory(
TASK_THREADS_GROUP,
"Async calls on " + taskNameWithSubtask,
userCodeClassLoader));
this.asyncCallDispatcher = executor;
// double-check for execution state, and make sure we clean up after ourselves
// if we created the dispatcher while the task was concurrently canceled
if (executionState != ExecutionState.RUNNING) {
executor.shutdown();
asyncCallDispatcher = null;
return;
}
}
LOG.debug("Invoking async call {} on task {}", callName, taskNameWithSubtask);
try {
executor.submit(runnable, blocking);
}
catch (RejectedExecutionException e) {
// may be that we are concurrently finished or canceled.
// if not, report that something is fishy
if (executionState == ExecutionState.RUNNING) {
throw new RuntimeException("Async call with a " + (blocking ? "" : "non-") + "blocking call was rejected, even though the task is running.", e);
}
}
}
} | Utility method to dispatch an asynchronous call on the invokable.
@param runnable The async call runnable.
@param callName The name of the call, for logging purposes. |
private void cancelInvokable(AbstractInvokable invokable) {
// in case of an exception during execution, we still call "cancel()" on the task
if (invokable != null && invokableHasBeenCanceled.compareAndSet(false, true)) {
try {
invokable.cancel();
}
catch (Throwable t) {
LOG.error("Error while canceling task {}.", taskNameWithSubtask, t);
}
}
} | ------------------------------------------------------------------------ |
private static AbstractInvokable loadAndInstantiateInvokable(
ClassLoader classLoader,
String className,
Environment environment) throws Throwable {
final Class<? extends AbstractInvokable> invokableClass;
try {
invokableClass = Class.forName(className, true, classLoader)
.asSubclass(AbstractInvokable.class);
} catch (Throwable t) {
throw new Exception("Could not load the task's invokable class.", t);
}
Constructor<? extends AbstractInvokable> statelessCtor;
try {
statelessCtor = invokableClass.getConstructor(Environment.class);
} catch (NoSuchMethodException ee) {
throw new FlinkException("Task misses proper constructor", ee);
}
// instantiate the class
try {
//noinspection ConstantConditions --> cannot happen
return statelessCtor.newInstance(environment);
} catch (InvocationTargetException e) {
// directly forward exceptions from the eager initialization
throw e.getTargetException();
} catch (Exception e) {
throw new FlinkException("Could not instantiate the task's invokable class.", e);
}
} | Instantiates the given task invokable class, passing the given environment (and possibly
the initial task state) to the task's constructor.
<p>The method will first try to instantiate the task via a constructor accepting both
the Environment and the TaskStateSnapshot. If no such constructor exists, and there is
no initial state, the method will fall back to the stateless convenience constructor that
accepts only the Environment.
@param classLoader The classloader to load the class through.
@param className The name of the class to load.
@param environment The task environment.
@return The instantiated invokable task object.
@throws Throwable Forwards all exceptions that happen during initialization of the task.
Also throws an exception if the task class misses the necessary constructor. |
@Override
public void open(FileInputSplit split) throws IOException {
super.open(split);
// instantiate the parsers
FieldParser<?>[] parsers = new FieldParser<?>[fieldTypes.length];
for (int i = 0; i < fieldTypes.length; i++) {
if (fieldTypes[i] != null) {
Class<? extends FieldParser<?>> parserType = FieldParser.getParserForType(fieldTypes[i]);
if (parserType == null) {
throw new RuntimeException("No parser available for type '" + fieldTypes[i].getName() + "'.");
}
FieldParser<?> p = InstantiationUtil.instantiate(parserType, FieldParser.class);
p.setCharset(getCharset());
if (this.quotedStringParsing) {
if (p instanceof StringParser) {
((StringParser)p).enableQuotedStringParsing(this.quoteCharacter);
} else if (p instanceof StringValueParser) {
((StringValueParser)p).enableQuotedStringParsing(this.quoteCharacter);
}
}
parsers[i] = p;
}
}
this.fieldParsers = parsers;
// skip the first line, if we are at the beginning of a file and have the option set
if (this.skipFirstLineAsHeader && this.splitStart == 0) {
readLine(); // read and ignore
}
} | -------------------------------------------------------------------------------------------- |
public static <K, V> LinkedOptionalMap<K, V> optionalMapOf(Map<K, V> sourceMap, Function<K, String> keyNameGetter) {
LinkedHashMap<String, KeyValue<K, V>> underlyingMap = new LinkedHashMap<>(sourceMap.size());
sourceMap.forEach((k, v) -> {
String keyName = keyNameGetter.apply(k);
underlyingMap.put(keyName, new KeyValue<>(k, v));
});
return new LinkedOptionalMap<>(underlyingMap);
} | Creates an {@code LinkedOptionalMap} from the provided map.
<p>This method is the equivalent of {@link Optional#of(Object)} but for maps. To support more than one {@code NULL}
key, an optional map requires a unique string name to be associated with each key (provided by keyNameGetter)
@param sourceMap a source map to wrap as an optional map.
@param keyNameGetter function that assigns a unique name to the keys of the source map.
@param <K> key type
@param <V> value type
@return an {@code LinkedOptionalMap} with optional named keys, and optional values. |
public static <K, V> MergeResult<K, V> mergeRightIntoLeft(LinkedOptionalMap<K, V> left, LinkedOptionalMap<K, V> right) {
LinkedOptionalMap<K, V> merged = new LinkedOptionalMap<>(left);
merged.putAll(right);
return new MergeResult<>(merged, isLeftPrefixOfRight(left, right));
} | Tries to merges the keys and the values of @right into @left. |
public Set<String> absentKeysOrValues() {
return underlyingMap.entrySet()
.stream()
.filter(LinkedOptionalMap::keyOrValueIsAbsent)
.map(Entry::getKey)
.collect(Collectors.toCollection(LinkedHashSet::new));
} | Returns the key names of any keys or values that are absent. |
public boolean hasAbsentKeysOrValues() {
for (Entry<String, KeyValue<K, V>> entry : underlyingMap.entrySet()) {
if (keyOrValueIsAbsent(entry)) {
return true;
}
}
return false;
} | Checks whether there are entries with absent keys or values. |
public LinkedHashMap<K, V> unwrapOptionals() {
final LinkedHashMap<K, V> unwrapped = new LinkedHashMap<>(underlyingMap.size());
for (Entry<String, KeyValue<K, V>> entry : underlyingMap.entrySet()) {
String namedKey = entry.getKey();
KeyValue<K, V> kv = entry.getValue();
if (kv.key == null) {
throw new IllegalStateException("Missing key '" + namedKey + "'");
}
if (kv.value == null) {
throw new IllegalStateException("Missing value for the key '" + namedKey + "'");
}
unwrapped.put(kv.key, kv.value);
}
return unwrapped;
} | Assuming all the entries of this map are present (keys and values) this method would return
a map with these key and values, stripped from their Optional wrappers.
NOTE: please note that if any of the key or values are absent this method would throw an {@link IllegalStateException}. |
private static <K, V> boolean keyOrValueIsAbsent(Entry<String, KeyValue<K, V>> entry) {
KeyValue<K, V> kv = entry.getValue();
return kv.key == null || kv.value == null;
} | -------------------------------------------------------------------------------------------------------- |
public void addDiscoveredPartitions(List<KafkaTopicPartition> newPartitions) throws IOException, ClassNotFoundException {
List<KafkaTopicPartitionState<KPH>> newPartitionStates = createPartitionStateHolders(
newPartitions,
KafkaTopicPartitionStateSentinel.EARLIEST_OFFSET,
timestampWatermarkMode,
watermarksPeriodic,
watermarksPunctuated,
userCodeClassLoader);
if (useMetrics) {
registerOffsetMetrics(consumerMetricGroup, newPartitionStates);
}
for (KafkaTopicPartitionState<KPH> newPartitionState : newPartitionStates) {
// the ordering is crucial here; first register the state holder, then
// push it to the partitions queue to be read
subscribedPartitionStates.add(newPartitionState);
unassignedPartitionsQueue.add(newPartitionState);
}
} | Adds a list of newly discovered partitions to the fetcher for consuming.
<p>This method creates the partition state holder for each new partition, using
{@link KafkaTopicPartitionStateSentinel#EARLIEST_OFFSET} as the starting offset.
It uses the earliest offset because there may be delay in discovering a partition
after it was created and started receiving records.
<p>After the state representation for a partition is created, it is added to the
unassigned partitions queue to await to be consumed.
@param newPartitions discovered partitions to add |
public final void commitInternalOffsetsToKafka(
Map<KafkaTopicPartition, Long> offsets,
@Nonnull KafkaCommitCallback commitCallback) throws Exception {
// Ignore sentinels. They might appear here if snapshot has started before actual offsets values
// replaced sentinels
doCommitInternalOffsetsToKafka(filterOutSentinels(offsets), commitCallback);
} | Commits the given partition offsets to the Kafka brokers (or to ZooKeeper for
older Kafka versions). This method is only ever called when the offset commit mode of
the consumer is {@link OffsetCommitMode#ON_CHECKPOINTS}.
<p>The given offsets are the internal checkpointed offsets, representing
the last processed record of each partition. Version-specific implementations of this method
need to hold the contract that the given offsets must be incremented by 1 before
committing them, so that committed offsets to Kafka represent "the next record to process".
@param offsets The offsets to commit to Kafka (implementations must increment offsets by 1 before committing).
@param commitCallback The callback that the user should trigger when a commit request completes or fails.
@throws Exception This method forwards exceptions. |
public HashMap<KafkaTopicPartition, Long> snapshotCurrentState() {
// this method assumes that the checkpoint lock is held
assert Thread.holdsLock(checkpointLock);
HashMap<KafkaTopicPartition, Long> state = new HashMap<>(subscribedPartitionStates.size());
for (KafkaTopicPartitionState<KPH> partition : subscribedPartitionStates) {
state.put(partition.getKafkaTopicPartition(), partition.getOffset());
}
return state;
} | Takes a snapshot of the partition offsets.
<p>Important: This method must be called under the checkpoint lock.
@return A map from partition to current offset. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.