code
stringlengths
67
466k
docstring
stringlengths
1
13.2k
int refreshAndGetMin() { int min = Integer.MAX_VALUE; ResultSubpartition[] allPartitions = partition.getAllPartitions(); if (allPartitions.length == 0) { // meaningful value when no channels exist: return 0; } for (ResultSubpartition part : allPartitions) { int size = part.unsynchronizedGetNumberOfQueuedBuffers(); min = Math.min(min, size); } return min; }
Iterates over all sub-partitions and collects the minimum number of queued buffers in a sub-partition in a best-effort way. @return minimum number of queued buffers per sub-partition (<tt>0</tt> if sub-partitions exist)
int refreshAndGetMax() { int max = 0; for (ResultSubpartition part : partition.getAllPartitions()) { int size = part.unsynchronizedGetNumberOfQueuedBuffers(); max = Math.max(max, size); } return max; }
Iterates over all sub-partitions and collects the maximum number of queued buffers in a sub-partition in a best-effort way. @return maximum number of queued buffers per sub-partition
float refreshAndGetAvg() { long total = 0; ResultSubpartition[] allPartitions = partition.getAllPartitions(); for (ResultSubpartition part : allPartitions) { int size = part.unsynchronizedGetNumberOfQueuedBuffers(); total += size; } return total / (float) allPartitions.length; }
Iterates over all sub-partitions and collects the average number of queued buffers in a sub-partition in a best-effort way. @return average number of queued buffers per sub-partition
public static void registerQueueLengthMetrics(MetricGroup group, ResultPartition partition) { ResultPartitionMetrics metrics = new ResultPartitionMetrics(partition); group.gauge("totalQueueLen", metrics.getTotalQueueLenGauge()); group.gauge("minQueueLen", metrics.getMinQueueLenGauge()); group.gauge("maxQueueLen", metrics.getMaxQueueLenGauge()); group.gauge("avgQueueLen", metrics.getAvgQueueLenGauge()); }
------------------------------------------------------------------------
public synchronized URL addFile(File localFile, String remoteFile) throws IOException, MalformedURLException { return addPath(new Path(localFile.toURI()), new Path(remoteFile)); }
Adds a file to the artifact server. @param localFile the local file to serve. @param remoteFile the remote path with which to locate the file. @return the fully-qualified remote path to the file. @throws MalformedURLException if the remote path is invalid.
public synchronized URL addPath(Path path, Path remoteFile) throws IOException, MalformedURLException { if (paths.containsKey(remoteFile)) { throw new IllegalArgumentException("duplicate path registered"); } if (remoteFile.isAbsolute()) { throw new IllegalArgumentException("not expecting an absolute path"); } URL fileURL = new URL(baseURL, remoteFile.toString()); router.addAny(fileURL.getPath(), new VirtualFileServerHandler(path)); paths.put(remoteFile, fileURL); return fileURL; }
Adds a path to the artifact server. @param path the qualified FS path to serve (local, hdfs, etc). @param remoteFile the remote path with which to locate the file. @return the fully-qualified remote path to the file. @throws MalformedURLException if the remote path is invalid.
public synchronized void stop() throws Exception { if (this.serverChannel != null) { this.serverChannel.close().awaitUninterruptibly(); this.serverChannel = null; } if (bootstrap != null) { if (bootstrap.group() != null) { bootstrap.group().shutdownGracefully(); } bootstrap = null; } }
Stops the artifact server. @throws Exception
private Object invokeRpc(Method method, Object[] args) throws Exception { String methodName = method.getName(); Class<?>[] parameterTypes = method.getParameterTypes(); Annotation[][] parameterAnnotations = method.getParameterAnnotations(); Time futureTimeout = extractRpcTimeout(parameterAnnotations, args, timeout); final RpcInvocation rpcInvocation = createRpcInvocationMessage(methodName, parameterTypes, args); Class<?> returnType = method.getReturnType(); final Object result; if (Objects.equals(returnType, Void.TYPE)) { tell(rpcInvocation); result = null; } else { // execute an asynchronous call CompletableFuture<?> resultFuture = ask(rpcInvocation, futureTimeout); CompletableFuture<?> completableFuture = resultFuture.thenApply((Object o) -> { if (o instanceof SerializedValue) { try { return ((SerializedValue<?>) o).deserializeValue(getClass().getClassLoader()); } catch (IOException | ClassNotFoundException e) { throw new CompletionException( new RpcException("Could not deserialize the serialized payload of RPC method : " + methodName, e)); } } else { return o; } }); if (Objects.equals(returnType, CompletableFuture.class)) { result = completableFuture; } else { try { result = completableFuture.get(futureTimeout.getSize(), futureTimeout.getUnit()); } catch (ExecutionException ee) { throw new RpcException("Failure while obtaining synchronous RPC result.", ExceptionUtils.stripExecutionException(ee)); } } } return result; }
Invokes a RPC method by sending the RPC invocation details to the rpc endpoint. @param method to call @param args of the method call @return result of the RPC @throws Exception if the RPC invocation fails
protected RpcInvocation createRpcInvocationMessage( final String methodName, final Class<?>[] parameterTypes, final Object[] args) throws IOException { final RpcInvocation rpcInvocation; if (isLocal) { rpcInvocation = new LocalRpcInvocation( methodName, parameterTypes, args); } else { try { RemoteRpcInvocation remoteRpcInvocation = new RemoteRpcInvocation( methodName, parameterTypes, args); if (remoteRpcInvocation.getSize() > maximumFramesize) { throw new IOException("The rpc invocation size exceeds the maximum akka framesize."); } else { rpcInvocation = remoteRpcInvocation; } } catch (IOException e) { LOG.warn("Could not create remote rpc invocation message. Failing rpc invocation because...", e); throw e; } } return rpcInvocation; }
Create the RpcInvocation message for the given RPC. @param methodName of the RPC @param parameterTypes of the RPC @param args of the RPC @return RpcInvocation message which encapsulates the RPC details @throws IOException if we cannot serialize the RPC invocation parameters
private static Time extractRpcTimeout(Annotation[][] parameterAnnotations, Object[] args, Time defaultTimeout) { if (args != null) { Preconditions.checkArgument(parameterAnnotations.length == args.length); for (int i = 0; i < parameterAnnotations.length; i++) { if (isRpcTimeout(parameterAnnotations[i])) { if (args[i] instanceof Time) { return (Time) args[i]; } else { throw new RuntimeException("The rpc timeout parameter must be of type " + Time.class.getName() + ". The type " + args[i].getClass().getName() + " is not supported."); } } } } return defaultTimeout; }
Extracts the {@link RpcTimeout} annotated rpc timeout value from the list of given method arguments. If no {@link RpcTimeout} annotated parameter could be found, then the default timeout is returned. @param parameterAnnotations Parameter annotations @param args Array of arguments @param defaultTimeout Default timeout to return if no {@link RpcTimeout} annotated parameter has been found @return Timeout extracted from the array of arguments or the default timeout
private static boolean isRpcTimeout(Annotation[] annotations) { for (Annotation annotation : annotations) { if (annotation.annotationType().equals(RpcTimeout.class)) { return true; } } return false; }
Checks whether any of the annotations is of type {@link RpcTimeout}. @param annotations Array of annotations @return True if {@link RpcTimeout} was found; otherwise false
protected CompletableFuture<?> ask(Object message, Time timeout) { return FutureUtils.toJava( Patterns.ask(rpcEndpoint, message, timeout.toMilliseconds())); }
Sends the message to the RPC endpoint and returns a future containing its response. @param message to send to the RPC endpoint @param timeout time to wait until the response future is failed with a {@link TimeoutException} @return Response future
private void openAllOperators() throws Exception { for (StreamOperator<?> operator : operatorChain.getAllOperators()) { if (operator != null) { operator.open(); } } }
Execute {@link StreamOperator#open()} of each operator in the chain of this {@link StreamTask}. Opening happens from <b>tail to head</b> operator in the chain, contrary to {@link StreamOperator#close()} which happens <b>head to tail</b> (see {@link #closeAllOperators()}.
private void closeAllOperators() throws Exception { // We need to close them first to last, since upstream operators in the chain might emit // elements in their close methods. StreamOperator<?>[] allOperators = operatorChain.getAllOperators(); for (int i = allOperators.length - 1; i >= 0; i--) { StreamOperator<?> operator = allOperators[i]; if (operator != null) { operator.close(); } } }
Execute {@link StreamOperator#close()} of each operator in the chain of this {@link StreamTask}. Closing happens from <b>head to tail</b> operator in the chain, contrary to {@link StreamOperator#open()} which happens <b>tail to head</b> (see {@link #openAllOperators()}.
private void tryDisposeAllOperators() throws Exception { for (StreamOperator<?> operator : operatorChain.getAllOperators()) { if (operator != null) { operator.dispose(); } } }
Execute {@link StreamOperator#dispose()} of each operator in the chain of this {@link StreamTask}. Disposing happens from <b>tail to head</b> operator in the chain.
private void disposeAllOperators() { if (operatorChain != null) { for (StreamOperator<?> operator : operatorChain.getAllOperators()) { try { if (operator != null) { operator.dispose(); } } catch (Throwable t) { LOG.error("Error during disposal of stream operator.", t); } } } }
Execute @link StreamOperator#dispose()} of each operator in the chain of this {@link StreamTask}. Disposing happens from <b>tail to head</b> operator in the chain. <p>The difference with the {@link #tryDisposeAllOperators()} is that in case of an exception, this method catches it and logs the message.
@Override public boolean triggerCheckpoint( CheckpointMetaData checkpointMetaData, CheckpointOptions checkpointOptions, boolean advanceToEndOfEventTime) throws Exception { try { // No alignment if we inject a checkpoint CheckpointMetrics checkpointMetrics = new CheckpointMetrics() .setBytesBufferedInAlignment(0L) .setAlignmentDurationNanos(0L); return performCheckpoint(checkpointMetaData, checkpointOptions, checkpointMetrics, advanceToEndOfEventTime); } catch (Exception e) { // propagate exceptions only if the task is still in "running" state if (isRunning) { throw new Exception("Could not perform checkpoint " + checkpointMetaData.getCheckpointId() + " for operator " + getName() + '.', e); } else { LOG.debug("Could not perform checkpoint {} for operator {} while the " + "invokable was not in state running.", checkpointMetaData.getCheckpointId(), getName(), e); return false; } } }
------------------------------------------------------------------------
private StateBackend createStateBackend() throws Exception { final StateBackend fromApplication = configuration.getStateBackend(getUserCodeClassLoader()); return StateBackendLoader.fromApplicationOrConfigOrDefault( fromApplication, getEnvironment().getTaskManagerInfo().getConfiguration(), getUserCodeClassLoader(), LOG); }
------------------------------------------------------------------------
@Override public void handleAsyncException(String message, Throwable exception) { if (isRunning) { // only fail if the task is still running getEnvironment().failExternally(exception); } }
Handles an exception thrown by another thread (e.g. a TriggerTask), other than the one executing the main task by failing the task entirely. <p>In more detail, it marks task execution failed for an external reason (a reason other than the task code itself throwing an exception). If the task is already in a terminal state (such as FINISHED, CANCELED, FAILED), or if the task is already canceling this does nothing. Otherwise it sets the state to FAILED, and, if the invokable code is running, starts an asynchronous thread that aborts that code. <p>This method never blocks.
public Result registerReference(SharedStateRegistryKey registrationKey, StreamStateHandle state) { Preconditions.checkNotNull(state); StreamStateHandle scheduledStateDeletion = null; SharedStateRegistry.SharedStateEntry entry; synchronized (registeredStates) { Preconditions.checkState(open, "Attempt to register state to closed SharedStateRegistry."); entry = registeredStates.get(registrationKey); if (entry == null) { // Additional check that should never fail, because only state handles that are not placeholders should // ever be inserted to the registry. Preconditions.checkState(!isPlaceholder(state), "Attempt to reference unknown state: " + registrationKey); entry = new SharedStateRegistry.SharedStateEntry(state); registeredStates.put(registrationKey, entry); } else { // delete if this is a real duplicate if (!Objects.equals(state, entry.stateHandle)) { scheduledStateDeletion = state; LOG.trace("Identified duplicate state registration under key {}. New state {} was determined to " + "be an unnecessary copy of existing state {} and will be dropped.", registrationKey, state, entry.stateHandle); } entry.increaseReferenceCount(); } } scheduleAsyncDelete(scheduledStateDeletion); LOG.trace("Registered shared state {} under key {}.", entry, registrationKey); return new Result(entry); }
Register a reference to the given shared state in the registry. This does the following: We check if the state handle is actually new by the registrationKey. If it is new, we register it with a reference count of 1. If there is already a state handle registered under the given key, we dispose the given "new" state handle, uptick the reference count of the previously existing state handle and return it as a replacement with the result. <p>IMPORTANT: caller should check the state handle returned by the result, because the registry is performing de-duplication and could potentially return a handle that is supposed to replace the one from the registration request. @param state the shared state for which we register a reference. @return the result of this registration request, consisting of the state handle that is registered under the key by the end of the operation and its current reference count.
public Result unregisterReference(SharedStateRegistryKey registrationKey) { Preconditions.checkNotNull(registrationKey); final Result result; final StreamStateHandle scheduledStateDeletion; SharedStateRegistry.SharedStateEntry entry; synchronized (registeredStates) { entry = registeredStates.get(registrationKey); Preconditions.checkState(entry != null, "Cannot unregister a state that is not registered."); entry.decreaseReferenceCount(); // Remove the state from the registry when it's not referenced any more. if (entry.getReferenceCount() <= 0) { registeredStates.remove(registrationKey); scheduledStateDeletion = entry.getStateHandle(); result = new Result(null, 0); } else { scheduledStateDeletion = null; result = new Result(entry); } } LOG.trace("Unregistered shared state {} under key {}.", entry, registrationKey); scheduleAsyncDelete(scheduledStateDeletion); return result; }
Releases one reference to the given shared state in the registry. This decreases the reference count by one. Once the count reaches zero, the shared state is deleted. @param registrationKey the shared state for which we release a reference. @return the result of the request, consisting of the reference count after this operation and the state handle, or null if the state handle was deleted through this request. Returns null if the registry was previously closed.
public void registerAll(Iterable<? extends CompositeStateHandle> stateHandles) { if (stateHandles == null) { return; } synchronized (registeredStates) { for (CompositeStateHandle stateHandle : stateHandles) { stateHandle.registerSharedStates(this); } } }
Register given shared states in the registry. @param stateHandles The shared states to register.
private boolean hasTimestamp(Iterable<TimestampedValue<Object>> elements) { Iterator<TimestampedValue<Object>> it = elements.iterator(); if (it.hasNext()) { return it.next().hasTimestamp(); } return false; }
Returns true if the first element in the Iterable of {@link TimestampedValue} has a timestamp.
public static <W extends Window> TimeEvictor<W> of(Time windowSize) { return new TimeEvictor<>(windowSize.toMilliseconds()); }
Creates a {@code TimeEvictor} that keeps the given number of elements. Eviction is done before the window function. @param windowSize The amount of time for which to keep elements.
public static SlidingEventTimeWindows of(Time size, Time slide) { return new SlidingEventTimeWindows(size.toMilliseconds(), slide.toMilliseconds(), 0); }
Creates a new {@code SlidingEventTimeWindows} {@link WindowAssigner} that assigns elements to sliding time windows based on the element timestamp. @param size The size of the generated windows. @param slide The slide interval of the generated windows. @return The time policy.
public boolean setCancellerHandle(ScheduledFuture<?> cancellerHandle) { synchronized (lock) { if (this.cancellerHandle == null) { if (!discarded) { this.cancellerHandle = cancellerHandle; return true; } else { return false; } } else { throw new IllegalStateException("A canceller handle was already set"); } } }
Sets the handle for the canceller to this pending checkpoint. This method fails with an exception if a handle has already been set. @return true, if the handle was set, false, if the checkpoint is already disposed;
public TaskAcknowledgeResult acknowledgeTask( ExecutionAttemptID executionAttemptId, TaskStateSnapshot operatorSubtaskStates, CheckpointMetrics metrics) { synchronized (lock) { if (discarded) { return TaskAcknowledgeResult.DISCARDED; } final ExecutionVertex vertex = notYetAcknowledgedTasks.remove(executionAttemptId); if (vertex == null) { if (acknowledgedTasks.contains(executionAttemptId)) { return TaskAcknowledgeResult.DUPLICATE; } else { return TaskAcknowledgeResult.UNKNOWN; } } else { acknowledgedTasks.add(executionAttemptId); } List<OperatorID> operatorIDs = vertex.getJobVertex().getOperatorIDs(); int subtaskIndex = vertex.getParallelSubtaskIndex(); long ackTimestamp = System.currentTimeMillis(); long stateSize = 0L; if (operatorSubtaskStates != null) { for (OperatorID operatorID : operatorIDs) { OperatorSubtaskState operatorSubtaskState = operatorSubtaskStates.getSubtaskStateByOperatorID(operatorID); // if no real operatorSubtaskState was reported, we insert an empty state if (operatorSubtaskState == null) { operatorSubtaskState = new OperatorSubtaskState(); } OperatorState operatorState = operatorStates.get(operatorID); if (operatorState == null) { operatorState = new OperatorState( operatorID, vertex.getTotalNumberOfParallelSubtasks(), vertex.getMaxParallelism()); operatorStates.put(operatorID, operatorState); } operatorState.putState(subtaskIndex, operatorSubtaskState); stateSize += operatorSubtaskState.getStateSize(); } } ++numAcknowledgedTasks; // publish the checkpoint statistics // to prevent null-pointers from concurrent modification, copy reference onto stack final PendingCheckpointStats statsCallback = this.statsCallback; if (statsCallback != null) { // Do this in millis because the web frontend works with them long alignmentDurationMillis = metrics.getAlignmentDurationNanos() / 1_000_000; SubtaskStateStats subtaskStateStats = new SubtaskStateStats( subtaskIndex, ackTimestamp, stateSize, metrics.getSyncDurationMillis(), metrics.getAsyncDurationMillis(), metrics.getBytesBufferedInAlignment(), alignmentDurationMillis); statsCallback.reportSubtaskStats(vertex.getJobvertexId(), subtaskStateStats); } return TaskAcknowledgeResult.SUCCESS; } }
Acknowledges the task with the given execution attempt id and the given subtask state. @param executionAttemptId of the acknowledged task @param operatorSubtaskStates of the acknowledged task @param metrics Checkpoint metrics for the stats @return TaskAcknowledgeResult of the operation
public void addMasterState(MasterState state) { checkNotNull(state); synchronized (lock) { if (!discarded) { masterState.add(state); } } }
Adds a master state (state generated on the checkpoint coordinator) to the pending checkpoint. @param state The state to add
public void abort(CheckpointFailureReason reason, Throwable cause) { try { CheckpointException exception = new CheckpointException(reason, cause); onCompletionPromise.completeExceptionally(exception); reportFailedCheckpoint(exception); assertAbortSubsumedForced(reason); } finally { dispose(true); } }
Aborts a checkpoint with reason and cause.
private void reportFailedCheckpoint(Exception cause) { // to prevent null-pointers from concurrent modification, copy reference onto stack final PendingCheckpointStats statsCallback = this.statsCallback; if (statsCallback != null) { long failureTimestamp = System.currentTimeMillis(); statsCallback.reportFailedCheckpoint(failureTimestamp, cause); } }
Reports a failed checkpoint with the given optional cause. @param cause The failure cause or <code>null</code>.
public PythonWindowedStream count_window(long size, long slide) { return new PythonWindowedStream<GlobalWindow>(this.stream.countWindow(size, slide)); }
A thin wrapper layer over {@link KeyedStream#countWindow(long, long)}. @param size The size of the windows in number of elements. @param slide The slide interval in number of elements. @return The python windowed stream {@link PythonWindowedStream}
public PythonWindowedStream time_window(Time size, Time slide) { return new PythonWindowedStream<TimeWindow>(this.stream.timeWindow(size, slide)); }
A thin wrapper layer over {@link KeyedStream#timeWindow(Time, Time)}. @param size The size of the window. @return The python wrapper {@link PythonWindowedStream}
public PythonSingleOutputStreamOperator reduce(ReduceFunction<PyObject> reducer) throws IOException { return new PythonSingleOutputStreamOperator(this.stream.reduce(new PythonReduceFunction(reducer))); }
A thin wrapper layer over {@link KeyedStream#reduce(ReduceFunction)}. @param reducer The {@link ReduceFunction} that will be called for every element of the input values with the same key. @return The transformed data stream @{link PythonSingleOutputStreamOperator}.
@Override public void configure(Configuration parameters) { super.configure(parameters); if (charsetName == null || !Charset.isSupported(charsetName)) { throw new RuntimeException("Unsupported charset: " + charsetName); } if (charsetName.equalsIgnoreCase(StandardCharsets.US_ASCII.name())) { ascii = true; } this.decoder = Charset.forName(charsetName).newDecoder(); this.byteWrapper = ByteBuffer.allocate(1); }
--------------------------------------------------------------------------------------------
@Override public StringValue readRecord(StringValue reuse, byte[] bytes, int offset, int numBytes) { if (this.ascii) { reuse.setValueAscii(bytes, offset, numBytes); return reuse; } else { ByteBuffer byteWrapper = this.byteWrapper; if (bytes != byteWrapper.array()) { byteWrapper = ByteBuffer.wrap(bytes, 0, bytes.length); this.byteWrapper = byteWrapper; } byteWrapper.limit(offset + numBytes); byteWrapper.position(offset); try { CharBuffer result = this.decoder.decode(byteWrapper); reuse.setValue(result); return reuse; } catch (CharacterCodingException e) { if (skipInvalidLines) { return null; } else { byte[] copy = new byte[numBytes]; System.arraycopy(bytes, offset, copy, 0, numBytes); throw new RuntimeException("Line could not be encoded: " + Arrays.toString(copy), e); } } } }
--------------------------------------------------------------------------------------------
static TypeInformation schemaToTypeInfo(TypeDescription schema) { switch (schema.getCategory()) { case BOOLEAN: return BasicTypeInfo.BOOLEAN_TYPE_INFO; case BYTE: return BasicTypeInfo.BYTE_TYPE_INFO; case SHORT: return BasicTypeInfo.SHORT_TYPE_INFO; case INT: return BasicTypeInfo.INT_TYPE_INFO; case LONG: return BasicTypeInfo.LONG_TYPE_INFO; case FLOAT: return BasicTypeInfo.FLOAT_TYPE_INFO; case DOUBLE: return BasicTypeInfo.DOUBLE_TYPE_INFO; case DECIMAL: return BasicTypeInfo.BIG_DEC_TYPE_INFO; case STRING: case CHAR: case VARCHAR: return BasicTypeInfo.STRING_TYPE_INFO; case DATE: return SqlTimeTypeInfo.DATE; case TIMESTAMP: return SqlTimeTypeInfo.TIMESTAMP; case BINARY: return PrimitiveArrayTypeInfo.BYTE_PRIMITIVE_ARRAY_TYPE_INFO; case STRUCT: List<TypeDescription> fieldSchemas = schema.getChildren(); TypeInformation[] fieldTypes = new TypeInformation[fieldSchemas.size()]; for (int i = 0; i < fieldSchemas.size(); i++) { fieldTypes[i] = schemaToTypeInfo(fieldSchemas.get(i)); } String[] fieldNames = schema.getFieldNames().toArray(new String[]{}); return new RowTypeInfo(fieldTypes, fieldNames); case LIST: TypeDescription elementSchema = schema.getChildren().get(0); TypeInformation<?> elementType = schemaToTypeInfo(elementSchema); // arrays of primitive types are handled as object arrays to support null values return ObjectArrayTypeInfo.getInfoFor(elementType); case MAP: TypeDescription keySchema = schema.getChildren().get(0); TypeDescription valSchema = schema.getChildren().get(1); TypeInformation<?> keyType = schemaToTypeInfo(keySchema); TypeInformation<?> valType = schemaToTypeInfo(valSchema); return new MapTypeInfo<>(keyType, valType); case UNION: throw new UnsupportedOperationException("UNION type is not supported yet."); default: throw new IllegalArgumentException("Unknown type " + schema); } }
Converts an ORC schema to a Flink TypeInformation. @param schema The ORC schema. @return The TypeInformation that corresponds to the ORC schema.
static int fillRows(Row[] rows, TypeDescription schema, VectorizedRowBatch batch, int[] selectedFields) { int rowsToRead = Math.min((int) batch.count(), rows.length); List<TypeDescription> fieldTypes = schema.getChildren(); // read each selected field for (int fieldIdx = 0; fieldIdx < selectedFields.length; fieldIdx++) { int orcIdx = selectedFields[fieldIdx]; readField(rows, fieldIdx, fieldTypes.get(orcIdx), batch.cols[orcIdx], rowsToRead); } return rowsToRead; }
Fills an ORC batch into an array of Row. @param rows The batch of rows need to be filled. @param schema The schema of the ORC data. @param batch The ORC data. @param selectedFields The list of selected ORC fields. @return The number of rows that were filled.
private static void readField(Object[] vals, int fieldIdx, TypeDescription schema, ColumnVector vector, int childCount) { // check the type of the vector to decide how to read it. switch (schema.getCategory()) { case BOOLEAN: if (vector.noNulls) { readNonNullLongColumn(vals, fieldIdx, (LongColumnVector) vector, childCount, OrcBatchReader::readBoolean); } else { readLongColumn(vals, fieldIdx, (LongColumnVector) vector, childCount, OrcBatchReader::readBoolean); } break; case BYTE: if (vector.noNulls) { readNonNullLongColumn(vals, fieldIdx, (LongColumnVector) vector, childCount, OrcBatchReader::readByte); } else { readLongColumn(vals, fieldIdx, (LongColumnVector) vector, childCount, OrcBatchReader::readByte); } break; case SHORT: if (vector.noNulls) { readNonNullLongColumn(vals, fieldIdx, (LongColumnVector) vector, childCount, OrcBatchReader::readShort); } else { readLongColumn(vals, fieldIdx, (LongColumnVector) vector, childCount, OrcBatchReader::readShort); } break; case INT: if (vector.noNulls) { readNonNullLongColumn(vals, fieldIdx, (LongColumnVector) vector, childCount, OrcBatchReader::readInt); } else { readLongColumn(vals, fieldIdx, (LongColumnVector) vector, childCount, OrcBatchReader::readInt); } break; case LONG: if (vector.noNulls) { readNonNullLongColumn(vals, fieldIdx, (LongColumnVector) vector, childCount, OrcBatchReader::readLong); } else { readLongColumn(vals, fieldIdx, (LongColumnVector) vector, childCount, OrcBatchReader::readLong); } break; case FLOAT: if (vector.noNulls) { readNonNullDoubleColumn(vals, fieldIdx, (DoubleColumnVector) vector, childCount, OrcBatchReader::readFloat); } else { readDoubleColumn(vals, fieldIdx, (DoubleColumnVector) vector, childCount, OrcBatchReader::readFloat); } break; case DOUBLE: if (vector.noNulls) { readNonNullDoubleColumn(vals, fieldIdx, (DoubleColumnVector) vector, childCount, OrcBatchReader::readDouble); } else { readDoubleColumn(vals, fieldIdx, (DoubleColumnVector) vector, childCount, OrcBatchReader::readDouble); } break; case CHAR: case VARCHAR: case STRING: if (vector.noNulls) { readNonNullBytesColumnAsString(vals, fieldIdx, (BytesColumnVector) vector, childCount); } else { readBytesColumnAsString(vals, fieldIdx, (BytesColumnVector) vector, childCount); } break; case DATE: if (vector.noNulls) { readNonNullLongColumnAsDate(vals, fieldIdx, (LongColumnVector) vector, childCount); } else { readLongColumnAsDate(vals, fieldIdx, (LongColumnVector) vector, childCount); } break; case TIMESTAMP: if (vector.noNulls) { readNonNullTimestampColumn(vals, fieldIdx, (TimestampColumnVector) vector, childCount); } else { readTimestampColumn(vals, fieldIdx, (TimestampColumnVector) vector, childCount); } break; case BINARY: if (vector.noNulls) { readNonNullBytesColumnAsBinary(vals, fieldIdx, (BytesColumnVector) vector, childCount); } else { readBytesColumnAsBinary(vals, fieldIdx, (BytesColumnVector) vector, childCount); } break; case DECIMAL: if (vector.noNulls) { readNonNullDecimalColumn(vals, fieldIdx, (DecimalColumnVector) vector, childCount); } else { readDecimalColumn(vals, fieldIdx, (DecimalColumnVector) vector, childCount); } break; case STRUCT: if (vector.noNulls) { readNonNullStructColumn(vals, fieldIdx, (StructColumnVector) vector, schema, childCount); } else { readStructColumn(vals, fieldIdx, (StructColumnVector) vector, schema, childCount); } break; case LIST: if (vector.noNulls) { readNonNullListColumn(vals, fieldIdx, (ListColumnVector) vector, schema, childCount); } else { readListColumn(vals, fieldIdx, (ListColumnVector) vector, schema, childCount); } break; case MAP: if (vector.noNulls) { readNonNullMapColumn(vals, fieldIdx, (MapColumnVector) vector, schema, childCount); } else { readMapColumn(vals, fieldIdx, (MapColumnVector) vector, schema, childCount); } break; case UNION: throw new UnsupportedOperationException("UNION type not supported yet"); default: throw new IllegalArgumentException("Unknown type " + schema); } }
Reads a vector of data into an array of objects. @param vals The array that needs to be filled. @param fieldIdx If the vals array is an array of Row, the index of the field that needs to be filled. Otherwise a -1 must be passed and the data is directly filled into the array. @param schema The schema of the vector to read. @param vector The vector to read. @param childCount The number of vector entries to read.
private static void fillColumnWithRepeatingValue(Object[] vals, int fieldIdx, Object repeatingValue, int childCount) { if (fieldIdx == -1) { // set value as an object Arrays.fill(vals, 0, childCount, repeatingValue); } else { // set value as a field of Row Row[] rows = (Row[]) vals; for (int i = 0; i < childCount; i++) { rows[i].setField(fieldIdx, repeatingValue); } } }
Sets a repeating value to all objects or row fields of the passed vals array. @param vals The array of objects or Rows. @param fieldIdx If the objs array is an array of Row, the index of the field that needs to be filled. Otherwise a -1 must be passed and the data is directly filled into the array. @param repeatingValue The value that is set. @param childCount The number of times the value is set.
private void setChaining(Map<Integer, byte[]> hashes, List<Map<Integer, byte[]>> legacyHashes, Map<Integer, List<Tuple2<byte[], byte[]>>> chainedOperatorHashes) { for (Integer sourceNodeId : streamGraph.getSourceIDs()) { createChain(sourceNodeId, sourceNodeId, hashes, legacyHashes, 0, chainedOperatorHashes); } }
Sets up task chains from the source {@link StreamNode} instances. <p>This will recursively create all {@link JobVertex} instances.
public static String lpad(String base, int len, String pad) { if (len < 0 || "".equals(pad)) { return null; } else if (len == 0) { return ""; } char[] data = new char[len]; char[] baseChars = base.toCharArray(); char[] padChars = pad.toCharArray(); // the length of the padding needed int pos = Math.max(len - base.length(), 0); // copy the padding for (int i = 0; i < pos; i += pad.length()) { for (int j = 0; j < pad.length() && j < pos - i; j++) { data[i + j] = padChars[j]; } } // copy the base int i = 0; while (pos + i < len && i < base.length()) { data[pos + i] = baseChars[i]; i += 1; } return new String(data); }
Returns the string str left-padded with the string pad to a length of len characters. If str is longer than len, the return value is shortened to len characters.
public static String rpad(String base, int len, String pad) { if (len < 0 || "".equals(pad)) { return null; } else if (len == 0) { return ""; } char[] data = new char[len]; char[] baseChars = base.toCharArray(); char[] padChars = pad.toCharArray(); int pos = 0; // copy the base while (pos < base.length() && pos < len) { data[pos] = baseChars[pos]; pos += 1; } // copy the padding while (pos < len) { int i = 0; while (i < pad.length() && i < len - pos) { data[pos + i] = padChars[i]; i += 1; } pos += pad.length(); } return new String(data); }
Returns the string str right-padded with the string pad to a length of len characters. If str is longer than len, the return value is shortened to len characters.
public static String replace(String str, String oldStr, String replacement) { return str.replace(oldStr, replacement); }
Replaces all the old strings with the replacement string.
public static String splitIndex(String str, String separator, int index) { if (index < 0) { return null; } String[] values = StringUtils.splitByWholeSeparatorPreserveAllTokens(str, separator); if (index >= values.length) { return null; } else { return values[index]; } }
Split target string with custom separator and pick the index-th(start with 0) result. @param str target string. @param separator custom separator. @param index index of the result which you want. @return the string at the index of split results.
public static String splitIndex(String str, int character, int index) { if (character > 255 || character < 1 || index < 0) { return null; } String[] values = StringUtils.splitPreserveAllTokens(str, (char) character); if (index >= values.length) { return null; } else { return values[index]; } }
Split target string with custom separator and pick the index-th(start with 0) result. @param str target string. @param character int value of the separator character @param index index of the result which you want. @return the string at the index of split results.
public static String regexpReplace(String str, String regex, String replacement) { if (regex.isEmpty()) { return str; } try { // we should use StringBuffer here because Matcher only accept it StringBuffer sb = new StringBuffer(); Matcher m = REGEXP_PATTERN_CACHE.get(regex).matcher(str); while (m.find()) { m.appendReplacement(sb, replacement); } m.appendTail(sb); return sb.toString(); } catch (Exception e) { LOG.error( String.format("Exception in regexpReplace('%s', '%s', '%s')", str, regex, replacement), e); // return null if exception in regex replace return null; } }
Returns a string resulting from replacing all substrings that match the regular expression with replacement.
public static String regexpExtract(String str, String regex, int extractIndex) { if (extractIndex < 0) { return null; } try { Matcher m = REGEXP_PATTERN_CACHE.get(regex).matcher(str); if (m.find()) { MatchResult mr = m.toMatchResult(); return mr.group(extractIndex); } return null; } catch (Exception e) { LOG.error( String.format("Exception in regexpExtract('%s', '%s', '%d')", str, regex, extractIndex), e); return null; } }
Returns a string extracted with a specified regular expression and a regex match group index.
public static BinaryString keyValue( BinaryString str, BinaryString pairSeparator, BinaryString kvSeparator, BinaryString keyName) { if (str == null || str.getSizeInBytes() == 0) { return null; } if (pairSeparator != null && pairSeparator.getSizeInBytes() == 1 && kvSeparator != null && kvSeparator.getSizeInBytes() == 1) { return str.keyValue(pairSeparator.getByte(0), kvSeparator.getByte(0), keyName); } else { return BinaryString.fromString( keyValue( BinaryString.safeToString(str), BinaryString.safeToString(pairSeparator), BinaryString.safeToString(kvSeparator), BinaryString.safeToString(keyName))); } }
Parse string as key-value string and return the value matches key name. example: keyvalue('k1=v1;k2=v2', ';', '=', 'k2') = 'v2' keyvalue('k1:v1,k2:v2', ',', ':', 'k3') = NULL @param str target string. @param pairSeparator separator between key-value tuple. @param kvSeparator separator between key and value. @param keyName name of the key whose value you want return. @return target value.
public static String hash(String algorithm, String str, String charsetName) { try { byte[] digest = MessageDigest .getInstance(algorithm) .digest(strToBytesWithCharset(str, charsetName)); return EncodingUtils.hex(digest); } catch (NoSuchAlgorithmException e) { throw new IllegalArgumentException("Unsupported algorithm: " + algorithm, e); } }
Calculate the hash value of a given string. @param algorithm message digest algorithm. @param str string to hash. @param charsetName charset of string. @return hash value of string.
public static String parseUrl(String urlStr, String partToExtract) { URL url; try { url = URL_CACHE.get(urlStr); } catch (Exception e) { LOG.error("Parse URL error: " + urlStr, e); return null; } if ("HOST".equals(partToExtract)) { return url.getHost(); } if ("PATH".equals(partToExtract)) { return url.getPath(); } if ("QUERY".equals(partToExtract)) { return url.getQuery(); } if ("REF".equals(partToExtract)) { return url.getRef(); } if ("PROTOCOL".equals(partToExtract)) { return url.getProtocol(); } if ("FILE".equals(partToExtract)) { return url.getFile(); } if ("AUTHORITY".equals(partToExtract)) { return url.getAuthority(); } if ("USERINFO".equals(partToExtract)) { return url.getUserInfo(); } return null; }
Parse url and return various components of the URL. If accept any null arguments, return null. @param urlStr URL string. @param partToExtract determines which components would return. accept values: HOST,PATH,QUERY,REF, PROTOCOL,FILE,AUTHORITY,USERINFO @return target value.
public static String parseUrl(String urlStr, String partToExtract, String key) { if (!"QUERY".equals(partToExtract)) { return null; } String query = parseUrl(urlStr, partToExtract); if (query == null) { return null; } Pattern p = Pattern.compile("(&|^)" + Pattern.quote(key) + "=([^&]*)"); Matcher m = p.matcher(query); if (m.find()) { return m.group(2); } return null; }
Parse url and return various parameter of the URL. If accept any null arguments, return null. @param urlStr URL string. @param partToExtract must be QUERY, or return null. @param key parameter name. @return target value.
public static String hex(String x) { return EncodingUtils.hex(x.getBytes(StandardCharsets.UTF_8)).toUpperCase(); }
Returns the hex string of a string argument.
public static Map<String, String> strToMap(String text, String listDelimiter, String keyValueDelimiter) { if (StringUtils.isEmpty(text)) { return EMPTY_MAP; } String[] keyValuePairs = text.split(listDelimiter); Map<String, String> ret = new HashMap<>(keyValuePairs.length); for (String keyValuePair : keyValuePairs) { String[] keyValue = keyValuePair.split(keyValueDelimiter, 2); if (keyValue.length < 2) { ret.put(keyValuePair, null); } else { ret.put(keyValue[0], keyValue[1]); } } return ret; }
Creates a map by parsing text. Split text into key-value pairs using two delimiters. The first delimiter separates pairs, and the second delimiter separates key and value. @param text the input text @param listDelimiter the delimiter to separates pairs @param keyValueDelimiter the delimiter to separates key and value @return the map
public static BigDecimal sround(BigDecimal b0, int b1) { return b0.movePointRight(b1) .setScale(0, RoundingMode.HALF_UP).movePointLeft(b1); }
SQL <code>ROUND</code> operator applied to BigDecimal values.
@Nonnull public ConsumerRecords<byte[], byte[]> pollNext() throws Exception { synchronized (lock) { while (next == null && error == null) { lock.wait(); } ConsumerRecords<byte[], byte[]> n = next; if (n != null) { next = null; lock.notifyAll(); return n; } else { ExceptionUtils.rethrowException(error, error.getMessage()); // this statement cannot be reached since the above method always throws an exception // this is only here to silence the compiler and any warnings return ConsumerRecords.empty(); } } }
Polls the next element from the Handover, possibly blocking until the next element is available. This method behaves similar to polling from a blocking queue. <p>If an exception was handed in by the producer ({@link #reportError(Throwable)}), then that exception is thrown rather than an element being returned. @return The next element (buffer of records, never null). @throws ClosedException Thrown if the Handover was {@link #close() closed}. @throws Exception Rethrows exceptions from the {@link #reportError(Throwable)} method.
public void produce(final ConsumerRecords<byte[], byte[]> element) throws InterruptedException, WakeupException, ClosedException { checkNotNull(element); synchronized (lock) { while (next != null && !wakeupProducer) { lock.wait(); } wakeupProducer = false; // if there is still an element, we must have been woken up if (next != null) { throw new WakeupException(); } // if there is no error, then this is open and can accept this element else if (error == null) { next = element; lock.notifyAll(); } // an error marks this as closed for the producer else { throw new ClosedException(); } } }
Hands over an element from the producer. If the Handover already has an element that was not yet picked up by the consumer thread, this call blocks until the consumer picks up that previous element. <p>This behavior is similar to a "size one" blocking queue. @param element The next element to hand over. @throws InterruptedException Thrown, if the thread is interrupted while blocking for the Handover to be empty. @throws WakeupException Thrown, if the {@link #wakeupProducer()} method is called while blocking for the Handover to be empty. @throws ClosedException Thrown if the Handover was closed or concurrently being closed.
public void reportError(Throwable t) { checkNotNull(t); synchronized (lock) { // do not override the initial exception if (error == null) { error = t; } next = null; lock.notifyAll(); } }
Reports an exception. The consumer will throw the given exception immediately, if it is currently blocked in the {@link #pollNext()} method, or the next time it calls that method. <p>After this method has been called, no call to either {@link #produce(ConsumerRecords)} or {@link #pollNext()} will ever return regularly any more, but will always return exceptionally. <p>If another exception was already reported, this method does nothing. <p>For the producer, the Handover will appear as if it was {@link #close() closed}. @param t The exception to report.
@Override public void close() { synchronized (lock) { next = null; wakeupProducer = false; if (error == null) { error = new ClosedException(); } lock.notifyAll(); } }
Closes the handover. Both the {@link #produce(ConsumerRecords)} method and the {@link #pollNext()} will throw a {@link ClosedException} on any currently blocking and future invocations. <p>If an exception was previously reported via the {@link #reportError(Throwable)} method, that exception will not be overridden. The consumer thread will throw that exception upon calling {@link #pollNext()}, rather than the {@code ClosedException}.
public boolean isCompatibleWith(DeweyNumber other) { if (length() > other.length()) { // prefix case for (int i = 0; i < other.length(); i++) { if (other.deweyNumber[i] != deweyNumber[i]) { return false; } } return true; } else if (length() == other.length()) { // check init digits for equality int lastIndex = length() - 1; for (int i = 0; i < lastIndex; i++) { if (other.deweyNumber[i] != deweyNumber[i]) { return false; } } // check that the last digit is greater or equal return deweyNumber[lastIndex] >= other.deweyNumber[lastIndex]; } else { return false; } }
Checks whether this dewey number is compatible to the other dewey number. <p>True iff this contains other as a prefix or iff they differ only in the last digit whereas the last digit of this is greater than the last digit of other. @param other The other dewey number to check compatibility against @return Whether this dewey number is compatible to the other dewey number
public DeweyNumber increase(int times) { int[] newDeweyNumber = Arrays.copyOf(deweyNumber, deweyNumber.length); newDeweyNumber[deweyNumber.length - 1] += times; return new DeweyNumber(newDeweyNumber); }
Creates a new dewey number from this such that its last digit is increased by the supplied number. @param times how many times to increase the Dewey number @return A new dewey number derived from this whose last digit is increased by given number
public DeweyNumber addStage() { int[] newDeweyNumber = Arrays.copyOf(deweyNumber, deweyNumber.length + 1); return new DeweyNumber(newDeweyNumber); }
Creates a new dewey number from this such that a 0 is appended as new last digit. @return A new dewey number which contains this as a prefix and has 0 as last digit
public static DeweyNumber fromString(final String deweyNumberString) { String[] splits = deweyNumberString.split("\\."); if (splits.length == 0) { return new DeweyNumber(Integer.parseInt(deweyNumberString)); } else { int[] deweyNumber = new int[splits.length]; for (int i = 0; i < splits.length; i++) { deweyNumber[i] = Integer.parseInt(splits[i]); } return new DeweyNumber(deweyNumber); } }
Creates a dewey number from a string representation. The input string must be a dot separated string of integers. @param deweyNumberString Dot separated string of integers @return Dewey number generated from the given input string
@Override public boolean triggerCheckpoint(CheckpointMetaData checkpointMetaData, CheckpointOptions checkpointOptions, boolean advanceToEndOfEventTime) throws Exception { if (!externallyInducedCheckpoints) { return super.triggerCheckpoint(checkpointMetaData, checkpointOptions, advanceToEndOfEventTime); } else { // we do not trigger checkpoints here, we simply state whether we can trigger them synchronized (getCheckpointLock()) { return isRunning(); } } }
------------------------------------------------------------------------
public CompletableFuture<JobDetailsInfo> getJobDetails(JobID jobId) { final JobDetailsHeaders detailsHeaders = JobDetailsHeaders.getInstance(); final JobMessageParameters params = new JobMessageParameters(); params.jobPathParameter.resolve(jobId); return sendRequest( detailsHeaders, params); }
Requests the job details. @param jobId The job id @return Job details
@Override public CompletableFuture<JobResult> requestJobResult(@Nonnull JobID jobId) { return pollResourceAsync( () -> { final JobMessageParameters messageParameters = new JobMessageParameters(); messageParameters.jobPathParameter.resolve(jobId); return sendRequest( JobExecutionResultHeaders.getInstance(), messageParameters); }); }
Requests the {@link JobResult} for the given {@link JobID}. The method retries multiple times to poll the {@link JobResult} before giving up. @param jobId specifying the job for which to retrieve the {@link JobResult} @return Future which is completed with the {@link JobResult} once the job has completed or with a failure if the {@link JobResult} could not be retrieved.
@Override public CompletableFuture<JobSubmissionResult> submitJob(@Nonnull JobGraph jobGraph) { // we have to enable queued scheduling because slot will be allocated lazily jobGraph.setAllowQueuedScheduling(true); CompletableFuture<java.nio.file.Path> jobGraphFileFuture = CompletableFuture.supplyAsync(() -> { try { final java.nio.file.Path jobGraphFile = Files.createTempFile("flink-jobgraph", ".bin"); try (ObjectOutputStream objectOut = new ObjectOutputStream(Files.newOutputStream(jobGraphFile))) { objectOut.writeObject(jobGraph); } return jobGraphFile; } catch (IOException e) { throw new CompletionException(new FlinkException("Failed to serialize JobGraph.", e)); } }, executorService); CompletableFuture<Tuple2<JobSubmitRequestBody, Collection<FileUpload>>> requestFuture = jobGraphFileFuture.thenApply(jobGraphFile -> { List<String> jarFileNames = new ArrayList<>(8); List<JobSubmitRequestBody.DistributedCacheFile> artifactFileNames = new ArrayList<>(8); Collection<FileUpload> filesToUpload = new ArrayList<>(8); filesToUpload.add(new FileUpload(jobGraphFile, RestConstants.CONTENT_TYPE_BINARY)); for (Path jar : jobGraph.getUserJars()) { jarFileNames.add(jar.getName()); filesToUpload.add(new FileUpload(Paths.get(jar.toUri()), RestConstants.CONTENT_TYPE_JAR)); } for (Map.Entry<String, DistributedCache.DistributedCacheEntry> artifacts : jobGraph.getUserArtifacts().entrySet()) { artifactFileNames.add(new JobSubmitRequestBody.DistributedCacheFile(artifacts.getKey(), new Path(artifacts.getValue().filePath).getName())); filesToUpload.add(new FileUpload(Paths.get(artifacts.getValue().filePath), RestConstants.CONTENT_TYPE_BINARY)); } final JobSubmitRequestBody requestBody = new JobSubmitRequestBody( jobGraphFile.getFileName().toString(), jarFileNames, artifactFileNames); return Tuple2.of(requestBody, Collections.unmodifiableCollection(filesToUpload)); }); final CompletableFuture<JobSubmitResponseBody> submissionFuture = requestFuture.thenCompose( requestAndFileUploads -> sendRetriableRequest( JobSubmitHeaders.getInstance(), EmptyMessageParameters.getInstance(), requestAndFileUploads.f0, requestAndFileUploads.f1, isConnectionProblemOrServiceUnavailable()) ); submissionFuture .thenCombine(jobGraphFileFuture, (ignored, jobGraphFile) -> jobGraphFile) .thenAccept(jobGraphFile -> { try { Files.delete(jobGraphFile); } catch (IOException e) { log.warn("Could not delete temporary file {}.", jobGraphFile, e); } }); return submissionFuture .thenApply( (JobSubmitResponseBody jobSubmitResponseBody) -> new JobSubmissionResult(jobGraph.getJobID())) .exceptionally( (Throwable throwable) -> { throw new CompletionException(new JobSubmissionException(jobGraph.getJobID(), "Failed to submit JobGraph.", ExceptionUtils.stripCompletionException(throwable))); }); }
Submits the given {@link JobGraph} to the dispatcher. @param jobGraph to submit @return Future which is completed with the submission response
private <R, A extends AsynchronouslyCreatedResource<R>> CompletableFuture<R> pollResourceAsync( final Supplier<CompletableFuture<A>> resourceFutureSupplier) { return pollResourceAsync(resourceFutureSupplier, new CompletableFuture<>(), 0); }
Creates a {@code CompletableFuture} that polls a {@code AsynchronouslyCreatedResource} until its {@link AsynchronouslyCreatedResource#queueStatus() QueueStatus} becomes {@link QueueStatus.Id#COMPLETED COMPLETED}. The future completes with the result of {@link AsynchronouslyCreatedResource#resource()}. @param resourceFutureSupplier The operation which polls for the {@code AsynchronouslyCreatedResource}. @param <R> The type of the resource. @param <A> The type of the {@code AsynchronouslyCreatedResource}. @return A {@code CompletableFuture} delivering the resource.
@Override public String getWebInterfaceURL() { try { return getWebMonitorBaseUrl().get().toString(); } catch (InterruptedException | ExecutionException e) { ExceptionUtils.checkInterrupted(e); log.warn("Could not retrieve the web interface URL for the cluster.", e); return "Unknown address."; } }
======================================
private <M extends MessageHeaders<EmptyRequestBody, P, U>, U extends MessageParameters, P extends ResponseBody> CompletableFuture<P> sendRequest(M messageHeaders, U messageParameters) { return sendRequest(messageHeaders, messageParameters, EmptyRequestBody.getInstance()); }
-------------------------------------------------------------------------
public static ExecutionGraph buildGraph( @Nullable ExecutionGraph prior, JobGraph jobGraph, Configuration jobManagerConfig, ScheduledExecutorService futureExecutor, Executor ioExecutor, SlotProvider slotProvider, ClassLoader classLoader, CheckpointRecoveryFactory recoveryFactory, Time rpcTimeout, RestartStrategy restartStrategy, MetricGroup metrics, BlobWriter blobWriter, Time allocationTimeout, Logger log) throws JobExecutionException, JobException { checkNotNull(jobGraph, "job graph cannot be null"); final String jobName = jobGraph.getName(); final JobID jobId = jobGraph.getJobID(); final FailoverStrategy.Factory failoverStrategy = FailoverStrategyLoader.loadFailoverStrategy(jobManagerConfig, log); final JobInformation jobInformation = new JobInformation( jobId, jobName, jobGraph.getSerializedExecutionConfig(), jobGraph.getJobConfiguration(), jobGraph.getUserJarBlobKeys(), jobGraph.getClasspaths()); // create a new execution graph, if none exists so far final ExecutionGraph executionGraph; try { executionGraph = (prior != null) ? prior : new ExecutionGraph( jobInformation, futureExecutor, ioExecutor, rpcTimeout, restartStrategy, failoverStrategy, slotProvider, classLoader, blobWriter, allocationTimeout); } catch (IOException e) { throw new JobException("Could not create the ExecutionGraph.", e); } // set the basic properties executionGraph.setScheduleMode(jobGraph.getScheduleMode()); executionGraph.setQueuedSchedulingAllowed(jobGraph.getAllowQueuedScheduling()); try { executionGraph.setJsonPlan(JsonPlanGenerator.generatePlan(jobGraph)); } catch (Throwable t) { log.warn("Cannot create JSON plan for job", t); // give the graph an empty plan executionGraph.setJsonPlan("{}"); } // initialize the vertices that have a master initialization hook // file output formats create directories here, input formats create splits final long initMasterStart = System.nanoTime(); log.info("Running initialization on master for job {} ({}).", jobName, jobId); for (JobVertex vertex : jobGraph.getVertices()) { String executableClass = vertex.getInvokableClassName(); if (executableClass == null || executableClass.isEmpty()) { throw new JobSubmissionException(jobId, "The vertex " + vertex.getID() + " (" + vertex.getName() + ") has no invokable class."); } try { vertex.initializeOnMaster(classLoader); } catch (Throwable t) { throw new JobExecutionException(jobId, "Cannot initialize task '" + vertex.getName() + "': " + t.getMessage(), t); } } log.info("Successfully ran initialization on master in {} ms.", (System.nanoTime() - initMasterStart) / 1_000_000); // topologically sort the job vertices and attach the graph to the existing one List<JobVertex> sortedTopology = jobGraph.getVerticesSortedTopologicallyFromSources(); if (log.isDebugEnabled()) { log.debug("Adding {} vertices from job graph {} ({}).", sortedTopology.size(), jobName, jobId); } executionGraph.attachJobGraph(sortedTopology); if (log.isDebugEnabled()) { log.debug("Successfully created execution graph from job graph {} ({}).", jobName, jobId); } // configure the state checkpointing JobCheckpointingSettings snapshotSettings = jobGraph.getCheckpointingSettings(); if (snapshotSettings != null) { List<ExecutionJobVertex> triggerVertices = idToVertex(snapshotSettings.getVerticesToTrigger(), executionGraph); List<ExecutionJobVertex> ackVertices = idToVertex(snapshotSettings.getVerticesToAcknowledge(), executionGraph); List<ExecutionJobVertex> confirmVertices = idToVertex(snapshotSettings.getVerticesToConfirm(), executionGraph); CompletedCheckpointStore completedCheckpoints; CheckpointIDCounter checkpointIdCounter; try { int maxNumberOfCheckpointsToRetain = jobManagerConfig.getInteger( CheckpointingOptions.MAX_RETAINED_CHECKPOINTS); if (maxNumberOfCheckpointsToRetain <= 0) { // warning and use 1 as the default value if the setting in // state.checkpoints.max-retained-checkpoints is not greater than 0. log.warn("The setting for '{} : {}' is invalid. Using default value of {}", CheckpointingOptions.MAX_RETAINED_CHECKPOINTS.key(), maxNumberOfCheckpointsToRetain, CheckpointingOptions.MAX_RETAINED_CHECKPOINTS.defaultValue()); maxNumberOfCheckpointsToRetain = CheckpointingOptions.MAX_RETAINED_CHECKPOINTS.defaultValue(); } completedCheckpoints = recoveryFactory.createCheckpointStore(jobId, maxNumberOfCheckpointsToRetain, classLoader); checkpointIdCounter = recoveryFactory.createCheckpointIDCounter(jobId); } catch (Exception e) { throw new JobExecutionException(jobId, "Failed to initialize high-availability checkpoint handler", e); } // Maximum number of remembered checkpoints int historySize = jobManagerConfig.getInteger(WebOptions.CHECKPOINTS_HISTORY_SIZE); CheckpointStatsTracker checkpointStatsTracker = new CheckpointStatsTracker( historySize, ackVertices, snapshotSettings.getCheckpointCoordinatorConfiguration(), metrics); // load the state backend from the application settings final StateBackend applicationConfiguredBackend; final SerializedValue<StateBackend> serializedAppConfigured = snapshotSettings.getDefaultStateBackend(); if (serializedAppConfigured == null) { applicationConfiguredBackend = null; } else { try { applicationConfiguredBackend = serializedAppConfigured.deserializeValue(classLoader); } catch (IOException | ClassNotFoundException e) { throw new JobExecutionException(jobId, "Could not deserialize application-defined state backend.", e); } } final StateBackend rootBackend; try { rootBackend = StateBackendLoader.fromApplicationOrConfigOrDefault( applicationConfiguredBackend, jobManagerConfig, classLoader, log); } catch (IllegalConfigurationException | IOException | DynamicCodeLoadingException e) { throw new JobExecutionException(jobId, "Could not instantiate configured state backend", e); } // instantiate the user-defined checkpoint hooks final SerializedValue<MasterTriggerRestoreHook.Factory[]> serializedHooks = snapshotSettings.getMasterHooks(); final List<MasterTriggerRestoreHook<?>> hooks; if (serializedHooks == null) { hooks = Collections.emptyList(); } else { final MasterTriggerRestoreHook.Factory[] hookFactories; try { hookFactories = serializedHooks.deserializeValue(classLoader); } catch (IOException | ClassNotFoundException e) { throw new JobExecutionException(jobId, "Could not instantiate user-defined checkpoint hooks", e); } final Thread thread = Thread.currentThread(); final ClassLoader originalClassLoader = thread.getContextClassLoader(); thread.setContextClassLoader(classLoader); try { hooks = new ArrayList<>(hookFactories.length); for (MasterTriggerRestoreHook.Factory factory : hookFactories) { hooks.add(MasterHooks.wrapHook(factory.create(), classLoader)); } } finally { thread.setContextClassLoader(originalClassLoader); } } final CheckpointCoordinatorConfiguration chkConfig = snapshotSettings.getCheckpointCoordinatorConfiguration(); executionGraph.enableCheckpointing( chkConfig.getCheckpointInterval(), chkConfig.getCheckpointTimeout(), chkConfig.getMinPauseBetweenCheckpoints(), chkConfig.getMaxConcurrentCheckpoints(), chkConfig.getCheckpointRetentionPolicy(), triggerVertices, ackVertices, confirmVertices, hooks, checkpointIdCounter, completedCheckpoints, rootBackend, checkpointStatsTracker); } // create all the metrics for the Execution Graph metrics.gauge(RestartTimeGauge.METRIC_NAME, new RestartTimeGauge(executionGraph)); metrics.gauge(DownTimeGauge.METRIC_NAME, new DownTimeGauge(executionGraph)); metrics.gauge(UpTimeGauge.METRIC_NAME, new UpTimeGauge(executionGraph)); metrics.gauge(NumberOfFullRestartsGauge.METRIC_NAME, new NumberOfFullRestartsGauge(executionGraph)); executionGraph.getFailoverStrategy().registerMetrics(metrics); return executionGraph; }
Builds the ExecutionGraph from the JobGraph. If a prior execution graph exists, the JobGraph will be attached. If no prior execution graph exists, then the JobGraph will become attach to a new empty execution graph.
public int getNumOccupiedMemorySegments() { // either the number of memory segments, or one for spilling final int numPartitionBuffers = this.partitionBuffers != null ? this.partitionBuffers.length : this.buildSideWriteBuffer.getNumOccupiedMemorySegments(); return numPartitionBuffers + numOverflowSegments; }
Gets the number of memory segments used by this partition, which includes build side memory buffers and overflow memory segments. @return The number of occupied memory segments.
public final long insertIntoBuildBuffer(BT record) throws IOException { this.buildSideRecordCounter++; if (isInMemory()) { final long pointer = this.buildSideWriteBuffer.getCurrentPointer(); this.buildSideSerializer.serialize(record, this.buildSideWriteBuffer); return isInMemory() ? pointer : -1; } else { this.buildSideSerializer.serialize(record, this.buildSideWriteBuffer); return -1; } }
Inserts the given object into the current buffer. This method returns a pointer that can be used to address the written record in this partition, if it is in-memory. The returned pointers have no expressiveness in the case where the partition is spilled. @param record The object to be written to the partition. @return A pointer to the object in the partition, or <code>-1</code>, if the partition is spilled. @throws IOException Thrown, when this is a spilled partition and the write failed.
public int spillPartition(List<MemorySegment> target, IOManager ioAccess, FileIOChannel.ID targetChannel, LinkedBlockingQueue<MemorySegment> bufferReturnQueue) throws IOException { // sanity checks if (!isInMemory()) { throw new RuntimeException("Bug in Hybrid Hash Join: " + "Request to spill a partition that has already been spilled."); } if (getNumOccupiedMemorySegments() < 2) { throw new RuntimeException("Bug in Hybrid Hash Join: " + "Request to spill a partition with less than two buffers."); } // return the memory from the overflow segments for (int i = 0; i < this.numOverflowSegments; i++) { target.add(this.overflowSegments[i]); } this.overflowSegments = null; this.numOverflowSegments = 0; this.nextOverflowBucket = 0; // create the channel block writer and spill the current buffers // that keep the build side buffers current block, as it is most likely not full, yet // we return the number of blocks that become available this.buildSideChannel = ioAccess.createBlockChannelWriter(targetChannel, bufferReturnQueue); return this.buildSideWriteBuffer.spill(this.buildSideChannel); }
Spills this partition to disk and sets it up such that it continues spilling records that are added to it. The spilling process must free at least one buffer, either in the partition's record buffers, or in the memory segments for overflow buckets. The partition immediately takes back one buffer to use it for further spilling. @param target The list to which memory segments from overflow buckets are added. @param ioAccess The I/O manager to be used to create a writer to disk. @param targetChannel The id of the target channel for this partition. @return The number of buffers that were freed by spilling this partition. @throws IOException Thrown, if the writing failed.
public void setReadPosition(long pointer) { final int bufferNum = (int) (pointer >>> this.segmentSizeBits); final int offset = (int) (pointer & (this.memorySegmentSize - 1)); this.currentBufferNum = bufferNum; seekInput(this.partitionBuffers[bufferNum], offset, bufferNum < this.partitionBuffers.length-1 ? this.memorySegmentSize : this.finalBufferLimit); }
--------------------------------------------------------------------------------------------------
JobSubmissionResult finalizeExecute() throws ProgramInvocationException { return client.run(detachedPlan, jarFilesToAttach, classpathsToAttach, userCodeClassLoader, savepointSettings); }
Finishes this Context Environment's execution by explicitly running the plan constructed.
@PublicEvolving @Override public Tuple2<Long, Long> getCurrentState() throws IOException { if (this.blockBasedInput == null) { throw new RuntimeException("You must have forgotten to call open() on your input format."); } return new Tuple2<>( this.blockBasedInput.getCurrBlockPos(), // the last read index in the block this.readRecords // the number of records read ); }
--------------------------------------------------------------------------------------------
public static void bestEffortDiscardAllStateObjects( Iterable<? extends StateObject> handlesToDiscard) throws Exception { LambdaUtil.applyToAllWhileSuppressingExceptions(handlesToDiscard, StateObject::discardState); }
Iterates through the passed state handles and calls discardState() on each handle that is not null. All occurring exceptions are suppressed and collected until the iteration is over and emitted as a single exception. @param handlesToDiscard State handles to discard. Passed iterable is allowed to deliver null values. @throws Exception exception that is a collection of all suppressed exceptions that were caught during iteration
public static void discardStateFuture(RunnableFuture<? extends StateObject> stateFuture) throws Exception { if (null != stateFuture) { if (!stateFuture.cancel(true)) { try { // We attempt to get a result, in case the future completed before cancellation. StateObject stateObject = FutureUtils.runIfNotDoneAndGet(stateFuture); if (null != stateObject) { stateObject.discardState(); } } catch (CancellationException | ExecutionException ex) { LOG.debug("Cancelled execution of snapshot future runnable. Cancellation produced the following " + "exception, which is expected an can be ignored.", ex); } } } }
Discards the given state future by first trying to cancel it. If this is not possible, then the state object contained in the future is calculated and afterwards discarded. @param stateFuture to be discarded @throws Exception if the discard operation failed
public static <T> T find(Class<T> factoryClass, Descriptor descriptor) { Preconditions.checkNotNull(descriptor); return findInternal(factoryClass, descriptor.toProperties(), Optional.empty()); }
Finds a table factory of the given class and descriptor. @param factoryClass desired factory class @param descriptor descriptor describing the factory configuration @param <T> factory class type @return the matching factory
public static <T> T find(Class<T> factoryClass, Descriptor descriptor, ClassLoader classLoader) { Preconditions.checkNotNull(descriptor); Preconditions.checkNotNull(classLoader); return findInternal(factoryClass, descriptor.toProperties(), Optional.of(classLoader)); }
Finds a table factory of the given class, descriptor, and classloader. @param factoryClass desired factory class @param descriptor descriptor describing the factory configuration @param classLoader classloader for service loading @param <T> factory class type @return the matching factory
public static <T> T find(Class<T> factoryClass, Map<String, String> propertyMap) { return findInternal(factoryClass, propertyMap, Optional.empty()); }
Finds a table factory of the given class and property map. @param factoryClass desired factory class @param propertyMap properties that describe the factory configuration @param <T> factory class type @return the matching factory
public static <T> T find(Class<T> factoryClass, Map<String, String> propertyMap, ClassLoader classLoader) { Preconditions.checkNotNull(classLoader); return findInternal(factoryClass, propertyMap, Optional.of(classLoader)); }
Finds a table factory of the given class, property map, and classloader. @param factoryClass desired factory class @param propertyMap properties that describe the factory configuration @param classLoader classloader for service loading @param <T> factory class type @return the matching factory
public static <T> T findInternal(Class<T> factoryClass, Map<String, String> properties, Optional<ClassLoader> classLoader) { Preconditions.checkNotNull(factoryClass); Preconditions.checkNotNull(properties); List<TableFactory> foundFactories = discoverFactories(classLoader); List<TableFactory> classFactories = filterByFactoryClass( factoryClass, properties, foundFactories); List<TableFactory> contextFactories = filterByContext( factoryClass, properties, foundFactories, classFactories); return filterBySupportedProperties( factoryClass, properties, foundFactories, contextFactories); }
Finds a table factory of the given class, property map, and classloader. @param factoryClass desired factory class @param properties properties that describe the factory configuration @param classLoader classloader for service loading @param <T> factory class type @return the matching factory
private static List<TableFactory> discoverFactories(Optional<ClassLoader> classLoader) { try { List<TableFactory> result = new LinkedList<>(); if (classLoader.isPresent()) { ServiceLoader .load(TableFactory.class, classLoader.get()) .iterator() .forEachRemaining(result::add); } else { defaultLoader.iterator().forEachRemaining(result::add); } return result; } catch (ServiceConfigurationError e) { LOG.error("Could not load service provider for table factories.", e); throw new TableException("Could not load service provider for table factories.", e); } }
Searches for factories using Java service providers. @return all factories in the classpath
private static <T> List<TableFactory> filterByFactoryClass( Class<T> factoryClass, Map<String, String> properties, List<TableFactory> foundFactories) { List<TableFactory> classFactories = foundFactories.stream() .filter(p -> factoryClass.isAssignableFrom(p.getClass())) .collect(Collectors.toList()); if (classFactories.isEmpty()) { throw new NoMatchingTableFactoryException( String.format("No factory implements '%s'.", factoryClass.getCanonicalName()), factoryClass, foundFactories, properties); } return classFactories; }
Filters factories with matching context by factory class.
private static <T> List<TableFactory> filterByContext( Class<T> factoryClass, Map<String, String> properties, List<TableFactory> foundFactories, List<TableFactory> classFactories) { List<TableFactory> matchingFactories = classFactories.stream().filter(factory -> { Map<String, String> requestedContext = normalizeContext(factory); Map<String, String> plainContext = new HashMap<>(requestedContext); // we remove the version for now until we have the first backwards compatibility case // with the version we can provide mappings in case the format changes plainContext.remove(CONNECTOR_PROPERTY_VERSION); plainContext.remove(FORMAT_PROPERTY_VERSION); plainContext.remove(METADATA_PROPERTY_VERSION); plainContext.remove(STATISTICS_PROPERTY_VERSION); plainContext.remove(CATALOG_PROPERTY_VERSION); // check if required context is met return plainContext.keySet().stream().allMatch(e -> properties.containsKey(e) && properties.get(e).equals(plainContext.get(e))); }).collect(Collectors.toList()); if (matchingFactories.isEmpty()) { throw new NoMatchingTableFactoryException( "No context matches.", factoryClass, foundFactories, properties); } return matchingFactories; }
Filters for factories with matching context. @return all matching factories
private static Map<String, String> normalizeContext(TableFactory factory) { Map<String, String> requiredContext = factory.requiredContext(); if (requiredContext == null) { throw new TableException( String.format("Required context of factory '%s' must not be null.", factory.getClass().getName())); } return requiredContext.keySet().stream() .collect(Collectors.toMap(key -> key.toLowerCase(), key -> requiredContext.get(key))); }
Prepares the properties of a context to be used for match operations.
private static <T> T filterBySupportedProperties( Class<T> factoryClass, Map<String, String> properties, List<TableFactory> foundFactories, List<TableFactory> classFactories) { final List<String> plainGivenKeys = new LinkedList<>(); properties.keySet().forEach(k -> { // replace arrays with wildcard String key = k.replaceAll(".\\d+", ".#"); // ignore duplicates if (!plainGivenKeys.contains(key)) { plainGivenKeys.add(key); } }); Optional<String> lastKey = Optional.empty(); List<TableFactory> supportedFactories = new LinkedList<>(); for (TableFactory factory: classFactories) { Set<String> requiredContextKeys = normalizeContext(factory).keySet(); Tuple2<List<String>, List<String>> tuple2 = normalizeSupportedProperties(factory); // ignore context keys List<String> givenContextFreeKeys = plainGivenKeys.stream() .filter(p -> !requiredContextKeys.contains(p)) .collect(Collectors.toList()); List<String> givenFilteredKeys = filterSupportedPropertiesFactorySpecific( factory, givenContextFreeKeys); Boolean allTrue = true; for (String k: givenFilteredKeys) { lastKey = Optional.of(k); if (!(tuple2.f0.contains(k) || tuple2.f1.stream().anyMatch(p -> k.startsWith(p)))) { allTrue = false; break; } } if (allTrue) { supportedFactories.add(factory); } } if (supportedFactories.isEmpty() && classFactories.size() == 1 && lastKey.isPresent()) { // special case: when there is only one matching factory but the last property key // was incorrect TableFactory factory = classFactories.get(0); Tuple2<List<String>, List<String>> tuple2 = normalizeSupportedProperties(factory); String errorMessage = String.format( "The matching factory '%s' doesn't support '%s'.\n\nSupported properties of " + "this factory are:\n%s", factory.getClass().getName(), lastKey.get(), String.join("\n", tuple2.f0)); throw new NoMatchingTableFactoryException( errorMessage, factoryClass, foundFactories, properties); } else if (supportedFactories.isEmpty()) { throw new NoMatchingTableFactoryException( "No factory supports all properties.", factoryClass, foundFactories, properties); } else if (supportedFactories.size() > 1) { throw new AmbiguousTableFactoryException( supportedFactories, factoryClass, foundFactories, properties); } return (T) supportedFactories.get(0); }
Filters the matching class factories by supported properties.
private static Tuple2<List<String>, List<String>> normalizeSupportedProperties(TableFactory factory) { List<String> supportedProperties = factory.supportedProperties(); if (supportedProperties == null) { throw new TableException( String.format("Supported properties of factory '%s' must not be null.", factory.getClass().getName())); } List<String> supportedKeys = supportedProperties.stream() .map(p -> p.toLowerCase()) .collect(Collectors.toList()); // extract wildcard prefixes List<String> wildcards = extractWildcardPrefixes(supportedKeys); return Tuple2.of(supportedKeys, wildcards); }
Prepares the supported properties of a factory to be used for match operations.
private static List<String> extractWildcardPrefixes(List<String> propertyKeys) { return propertyKeys.stream() .filter(p -> p.endsWith("*")) .map(s -> s.substring(0, s.length() - 1)) .collect(Collectors.toList()); }
Converts the prefix of properties with wildcards (e.g., "format.*").
private static List<String> filterSupportedPropertiesFactorySpecific( TableFactory factory, List<String> keys) { if (factory instanceof TableFormatFactory) { boolean includeSchema = ((TableFormatFactory) factory).supportsSchemaDerivation(); return keys.stream().filter(k -> { if (includeSchema) { return k.startsWith(Schema.SCHEMA + ".") || k.startsWith(FormatDescriptorValidator.FORMAT + "."); } else { return k.startsWith(FormatDescriptorValidator.FORMAT + "."); } }).collect(Collectors.toList()); } else { return keys; } }
Performs filtering for special cases (i.e. table format factories with schema derivation).
@Override public void insertOrReplaceRecord(T record) throws IOException { if (closed) { return; } T match = prober.getMatchFor(record, reuse); if (match == null) { prober.insertAfterNoMatch(record); } else { prober.updateMatch(record); } }
Searches the hash table for a record with the given key. If it is found, then it is overridden with the specified record. Otherwise, the specified record is inserted. @param record The record to insert or to replace with. @throws IOException (EOFException specifically, if memory ran out)
@Override public void insert(T record) throws IOException { if (closed) { return; } final int hashCode = MathUtils.jenkinsHash(buildSideComparator.hash(record)); final int bucket = hashCode & numBucketsMask; final int bucketSegmentIndex = bucket >>> numBucketsPerSegmentBits; // which segment contains the bucket final MemorySegment bucketSegment = bucketSegments[bucketSegmentIndex]; final int bucketOffset = (bucket & numBucketsPerSegmentMask) << bucketSizeBits; // offset of the bucket in the segment final long firstPointer = bucketSegment.getLong(bucketOffset); try { final long newFirstPointer = recordArea.appendPointerAndRecord(firstPointer, record); bucketSegment.putLong(bucketOffset, newFirstPointer); } catch (EOFException ex) { compactOrThrow(); insert(record); return; } numElements++; resizeTableIfNecessary(); }
Inserts the given record into the hash table. Note: this method doesn't care about whether a record with the same key is already present. @param record The record to insert. @throws IOException (EOFException specifically, if memory ran out)
private void rebuild(long newNumBucketSegments) throws IOException { // Get new bucket segments releaseBucketSegments(); allocateBucketSegments((int)newNumBucketSegments); T record = buildSideSerializer.createInstance(); try { EntryIterator iter = getEntryIterator(); recordArea.resetAppendPosition(); recordArea.setWritePosition(0); while ((record = iter.next(record)) != null && !closed) { final int hashCode = MathUtils.jenkinsHash(buildSideComparator.hash(record)); final int bucket = hashCode & numBucketsMask; final int bucketSegmentIndex = bucket >>> numBucketsPerSegmentBits; // which segment contains the bucket final MemorySegment bucketSegment = bucketSegments[bucketSegmentIndex]; final int bucketOffset = (bucket & numBucketsPerSegmentMask) << bucketSizeBits; // offset of the bucket in the segment final long firstPointer = bucketSegment.getLong(bucketOffset); long ptrToAppended = recordArea.noSeekAppendPointerAndRecord(firstPointer, record); bucketSegment.putLong(bucketOffset, ptrToAppended); } recordArea.freeSegmentsAfterAppendPosition(); holes = 0; } catch (EOFException ex) { throw new RuntimeException("Bug in InPlaceMutableHashTable: we shouldn't get out of memory during a rebuild, " + "because we aren't allocating any new memory."); } }
Same as above, but the number of bucket segments of the new table can be specified.
public static StringifiedAccumulatorResult[] stringifyAccumulatorResults(Map<String, OptionalFailure<Accumulator<?, ?>>> accs) { if (accs == null || accs.isEmpty()) { return new StringifiedAccumulatorResult[0]; } else { StringifiedAccumulatorResult[] results = new StringifiedAccumulatorResult[accs.size()]; int i = 0; for (Map.Entry<String, OptionalFailure<Accumulator<?, ?>>> entry : accs.entrySet()) { results[i++] = stringifyAccumulatorResult(entry.getKey(), entry.getValue()); } return results; } }
Flatten a map of accumulator names to Accumulator instances into an array of StringifiedAccumulatorResult values.
private void writeObject(final ObjectOutputStream out) throws IOException { out.writeObject(reducer.getClass()); out.writeObject(combiner.getClass()); jobConf.write(out); }
Custom serialization methods. @see <a href="http://docs.oracle.com/javase/7/docs/api/java/io/Serializable.html">http://docs.oracle.com/javase/7/docs/api/java/io/Serializable.html</a>
public boolean match(String[] requestPathTokens, Map<String, String> params) { if (tokens.length == requestPathTokens.length) { for (int i = 0; i < tokens.length; i++) { String key = tokens[i]; String value = requestPathTokens[i]; if (key.length() > 0 && key.charAt(0) == ':') { // This is a placeholder params.put(key.substring(1), value); } else if (!key.equals(value)) { // This is a constant return false; } } return true; } if (tokens.length > 0 && tokens[tokens.length - 1].equals(":*") && tokens.length <= requestPathTokens.length) { // The first part for (int i = 0; i < tokens.length - 2; i++) { String key = tokens[i]; String value = requestPathTokens[i]; if (key.length() > 0 && key.charAt(0) == ':') { // This is a placeholder params.put(key.substring(1), value); } else if (!key.equals(value)) { // This is a constant return false; } } // The last :* part StringBuilder b = new StringBuilder(requestPathTokens[tokens.length - 1]); for (int i = tokens.length; i < requestPathTokens.length; i++) { b.append('/'); b.append(requestPathTokens[i]); } params.put("*", b.toString()); return true; } return false; }
{@code params} will be updated with params embedded in the request path. <p>This method signature is designed so that {@code requestPathTokens} and {@code params} can be created only once then reused, to optimize for performance when a large number of path patterns need to be matched. @return {@code false} if not matched; in this case params should be reset
@Override public Tuple2<K, V> deserialize(ConsumerRecord<byte[], byte[]> record) throws Exception { K key = null; V value = null; if (record.key() != null) { inputDeserializer.setBuffer(record.key()); key = keySerializer.deserialize(inputDeserializer); } if (record.value() != null) { inputDeserializer.setBuffer(record.value()); value = valueSerializer.deserialize(inputDeserializer); } return new Tuple2<>(key, value); }
------------------------------------------------------------------------
public boolean isMatching(ResourceProfile required) { if (required == UNKNOWN) { return true; } if (cpuCores >= required.getCpuCores() && heapMemoryInMB >= required.getHeapMemoryInMB() && directMemoryInMB >= required.getDirectMemoryInMB() && nativeMemoryInMB >= required.getNativeMemoryInMB() && networkMemoryInMB >= required.getNetworkMemoryInMB()) { for (Map.Entry<String, Resource> resource : required.extendedResources.entrySet()) { if (!extendedResources.containsKey(resource.getKey()) || !extendedResources.get(resource.getKey()).getResourceAggregateType().equals(resource.getValue().getResourceAggregateType()) || extendedResources.get(resource.getKey()).getValue() < resource.getValue().getValue()) { return false; } } return true; } return false; }
Check whether required resource profile can be matched. @param required the required resource profile @return true if the requirement is matched, otherwise false
public static String repeat(final String str, final int repeat) { // Performance tuned for 2.0 (JDK1.4) if (str == null) { return null; } if (repeat <= 0) { return EMPTY; } final int inputLength = str.length(); if (repeat == 1 || inputLength == 0) { return str; } if (inputLength == 1 && repeat <= PAD_LIMIT) { return repeat(str.charAt(0), repeat); } final int outputLength = inputLength * repeat; switch (inputLength) { case 1: return repeat(str.charAt(0), repeat); case 2: final char ch0 = str.charAt(0); final char ch1 = str.charAt(1); final char[] output2 = new char[outputLength]; for (int i = repeat * 2 - 2; i >= 0; i--, i--) { output2[i] = ch0; output2[i + 1] = ch1; } return new String(output2); default: final StringBuilder buf = new StringBuilder(outputLength); for (int i = 0; i < repeat; i++) { buf.append(str); } return buf.toString(); } }
Repeat a String {@code repeat} times to form a new String. <pre> StringUtils.repeat(null, 2) = null StringUtils.repeat("", 0) = "" StringUtils.repeat("", 2) = "" StringUtils.repeat("a", 3) = "aaa" StringUtils.repeat("ab", 2) = "abab" StringUtils.repeat("a", -2) = "" </pre> @param str the String to repeat, may be null @param repeat number of times to repeat str, negative treated as zero @return a new String consisting of the original String repeated, {@code null} if null String input