name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hadoop_AbfsOutputStream_flush_rdh | /**
* Flushes this output stream and forces any buffered output bytes to be
* written out. If any data remains in the payload it is committed to the
* service. Data is queued for writing and forced out to the service
* before the call returns.
*/
@Override
public void flush() throws IOException {
if (!disableOutputStreamFlush) {
flushInternalAsync();
}
} | 3.26 |
hadoop_AbfsOutputStream_clearActiveBlock_rdh | /**
* Clear the active block.
*/
private void clearActiveBlock() {
if (activeBlock != null) {
LOG.debug("Clearing active block");
}
synchronized(this) {
activeBlock = null;
}
} | 3.26 |
hadoop_AbfsOutputStream_write_rdh | /**
* Writes length bytes from the specified byte array starting at off to
* this output stream.
*
* @param data
* the byte array to write.
* @param off
* the start off in the data.
* @param length
* the number of bytes to write.
* @throws IOException
* if an I/O error occurs. In particular, an IOException may be
* thrown if the output stream has been closed.
*/
@Overridepublic synchronized void write(final byte[] data, final int off, final int length) throws IOException {
// validate if data is not null and index out of bounds.
DataBlocks.validateWriteArgs(data, off, length);m1();
if (((off < 0) || (length < 0)) || (length > (data.length - off))) {
throw new IndexOutOfBoundsException();
}
if (hasLease() && isLeaseFreed()) {
throw new PathIOException(path, ERR_WRITE_WITHOUT_LEASE);
}
DataBlocks.DataBlock block = createBlockIfNeeded();
int written = block.write(data,
off, length);
int remainingCapacity = block.remainingCapacity();
if (written < length) {
// Number of bytes to write is more than the data block capacity,
// trigger an upload and then write on the next block.
LOG.debug("writing more data than block capacity -triggering upload");
uploadCurrentBlock();
// tail recursion is mildly expensive, but given buffer sizes must be MB.
// it's unlikely to recurse very deeply.
this.write(data, off + written, length - written);
} else if (remainingCapacity == 0) {
// the whole buffer is done, trigger an upload
uploadCurrentBlock();
}
incrementWriteOps();
} | 3.26 |
hadoop_AbfsOutputStream_m0_rdh | /**
* Upload a block of data.
* This will take the block.
*
* @param blockToUpload
* block to upload.
* @throws IOException
* upload failure
*/
private void m0(DataBlocks.DataBlock blockToUpload,
boolean isFlush, boolean isClose) throws IOException {
if (this.isAppendBlob) {
writeAppendBlobCurrentBufferToService();
return;
}
if (!blockToUpload.hasData()) {
return;
}
numOfAppendsToServerSinceLastFlush++;
final int
bytesLength = blockToUpload.dataSize();
final long offset = position;
position += bytesLength;
outputStreamStatistics.bytesToUpload(bytesLength);outputStreamStatistics.writeCurrentBuffer();
DataBlocks.BlockUploadData blockUploadData = blockToUpload.startUpload();
final Future<Void> job = executorService.submit(() -> {
AbfsPerfTracker tracker = client.getAbfsPerfTracker();
try (AbfsPerfInfo perfInfo =
new AbfsPerfInfo(tracker, "writeCurrentBufferToService", "append")) {
AppendRequestParameters.Mode mode = APPEND_MODE;
if (isFlush & isClose) {
mode = FLUSH_CLOSE_MODE;
} else if (isFlush) {
mode = FLUSH_MODE;
}
/* Parameters Required for an APPEND call.
offset(here) - refers to the position in the file.
bytesLength - Data to be uploaded from the block.
mode - If it's append, flush or flush_close.
leaseId - The AbfsLeaseId for this request.
*/
AppendRequestParameters v10 = new AppendRequestParameters(offset, 0, bytesLength, mode, false, leaseId, f0);
AbfsRestOperation op = client.append(path, blockUploadData.toByteArray(), v10, cachedSasToken.get(), new TracingContext(tracingContext));
cachedSasToken.update(op.getSasToken());
perfInfo.registerResult(op.getResult());
perfInfo.registerSuccess(true);
outputStreamStatistics.uploadSuccessful(bytesLength);
return null;
} finally {
IOUtils.close(blockUploadData, blockToUpload);
}
});
writeOperations.add(new WriteOperation(job, offset, bytesLength));
// Try to shrink the queue
shrinkWriteOperationQueue();
} | 3.26 |
hadoop_AbfsOutputStream_hsync_rdh | /**
* Similar to posix fsync, flush out the data in client's user buffer
* all the way to the disk device (but the disk may have it in its cache).
*
* @throws IOException
* if error occurs
*/
@Override
public void hsync() throws IOException {
if (supportFlush) {
flushInternal(false);
}
} | 3.26 |
hadoop_AbfsOutputStream_m1_rdh | /**
* Throw the last error recorded if not null.
* After the stream is closed, this is always set to
* an exception, so acts as a guard against method invocation once
* closed.
*
* @throws IOException
* if lastError is set
*/
private void m1() throws IOException {
if (lastError != null) {
throw lastError;
}
} | 3.26 |
hadoop_AbfsOutputStream_toString_rdh | /**
* Appending AbfsOutputStream statistics to base toString().
*
* @return String with AbfsOutputStream statistics.
*/
@Override
public String toString() {
final StringBuilder sb = new StringBuilder(super.toString());
sb.append("AbfsOutputStream@").append(this.hashCode());
sb.append("){");
sb.append(outputStreamStatistics.toString());
sb.append("}");
return sb.toString();
} | 3.26 |
hadoop_AbfsOutputStream_writeAppendBlobCurrentBufferToService_rdh | /**
* Appending the current active data block to service. Clearing the active
* data block and releasing all buffered data.
*
* @throws IOException
* if there is any failure while starting an upload for
* the dataBlock or while closing the BlockUploadData.
*/
private void
writeAppendBlobCurrentBufferToService() throws IOException {
DataBlocks.DataBlock activeBlock = getActiveBlock();
// No data, return.
if (!hasActiveBlockDataToUpload()) {
return;
}
final int bytesLength = activeBlock.dataSize();
DataBlocks.BlockUploadData uploadData = activeBlock.startUpload();
clearActiveBlock();
outputStreamStatistics.writeCurrentBuffer();
outputStreamStatistics.bytesToUpload(bytesLength);
final long offset = position;
position += bytesLength;
AbfsPerfTracker tracker = client.getAbfsPerfTracker();
try (AbfsPerfInfo perfInfo = new AbfsPerfInfo(tracker, "writeCurrentBufferToService", "append")) {
AppendRequestParameters reqParams = new AppendRequestParameters(offset, 0, bytesLength, APPEND_MODE, true, leaseId, f0);
AbfsRestOperation op = client.append(path, uploadData.toByteArray(), reqParams, cachedSasToken.get(), new TracingContext(tracingContext));
cachedSasToken.update(op.getSasToken());
outputStreamStatistics.uploadSuccessful(bytesLength);
perfInfo.registerResult(op.getResult());
perfInfo.registerSuccess(true);
return;
} catch (Exception
ex) {outputStreamStatistics.uploadFailed(bytesLength);failureWhileSubmit(ex);
} finally {
IOUtils.close(uploadData);
}
} | 3.26 |
hadoop_AbfsOutputStream_createBlockIfNeeded_rdh | /**
* Demand create a destination block.
*
* @return the active block; null if there isn't one.
* @throws IOException
* on any failure to create
*/
private synchronized DataBlock createBlockIfNeeded() throws IOException {
if (activeBlock == null) {
blockCount++;
activeBlock = blockFactory.create(blockCount, this.blockSize, outputStreamStatistics);
}
return activeBlock;
} | 3.26 |
hadoop_AbfsOutputStream_hflush_rdh | /**
* Flush out the data in client's user buffer. After the return of
* this call, new readers will see the data.
*
* @throws IOException
* if any error occurs
*/
@Override
public void hflush() throws IOException {
if (supportFlush) {
flushInternal(false);
}
} | 3.26 |
hadoop_AbfsOutputStream_getOutputStreamStatistics_rdh | /**
* Getter method for AbfsOutputStream statistics.
*
* @return statistics for AbfsOutputStream.
*/
@VisibleForTesting public AbfsOutputStreamStatistics getOutputStreamStatistics() {
return outputStreamStatistics;
} | 3.26 |
hadoop_AbfsOutputStream_shrinkWriteOperationQueue_rdh | /**
* Try to remove the completed write operations from the beginning of write
* operation FIFO queue.
*/
private synchronized void shrinkWriteOperationQueue() throws IOException {
try {
WriteOperation peek = writeOperations.peek();
while ((peek != null) && peek.task.isDone()) {
peek.task.get();
lastTotalAppendOffset += peek.length;
writeOperations.remove();
peek = writeOperations.peek();
// Incrementing statistics to indicate queue has been shrunk.
outputStreamStatistics.queueShrunk();
}
} catch (Exception e) {
if (e.getCause() instanceof AzureBlobFileSystemException) {
lastError = ((AzureBlobFileSystemException) (e.getCause()));
} else {
lastError = new IOException(e);}
throw lastError;
}
} | 3.26 |
hadoop_S3LogParser_eNoTrailing_rdh | /**
* An entry in the regexp.
*
* @param name
* name of the group
* @param pattern
* pattern to use in the regexp
* @return the pattern for the regexp
*/
private static String eNoTrailing(String name, String pattern) {
return String.format("(?<%s>%s)", name, pattern);
} | 3.26 |
hadoop_S3LogParser_q_rdh | /**
* Quoted entry using the {@link #QUOTED} pattern.
*
* @param name
* name of the element (for code clarity only)
* @return the pattern for the regexp
*/
private static String q(String name) {
return e(name, QUOTED);
} | 3.26 |
hadoop_S3LogParser_e_rdh | /**
* Simple entry using the {@link #SIMPLE} pattern.
*
* @param name
* name of the element (for code clarity only)
* @return the pattern for the regexp
*/
private static String e(String name) {
return e(name, SIMPLE);
} | 3.26 |
hadoop_ResponseInfo__r_rdh | // Value is raw HTML and shouldn't be escaped
public ResponseInfo _r(String key, Object value) {
items.add(Item.of(key, value, true));
return this;} | 3.26 |
hadoop_ResponseInfo_$about_rdh | // Do NOT add any constructors here, unless...
public static ResponseInfo $about(String about) {
ResponseInfo info = new ResponseInfo();
info.about = about;
return info;
} | 3.26 |
hadoop_Hadoop20JHParser_canParse_rdh | /**
* Can this parser parse the input?
*
* @param input
* @return Whether this parser can parse the input.
* @throws IOException
* We will deem a stream to be a good 0.20 job history stream if the
* first line is exactly "Meta VERSION=\"1\" ."
*/
public static boolean canParse(InputStream input) throws IOException {
try {
LineReader reader = new LineReader(input);
Text buffer = new Text();
return (reader.readLine(buffer) != 0) && buffer.toString().equals("Meta VERSION=\"1\" .");
} catch (EOFException e) {
return false;
}} | 3.26 |
flink_RelWindowProperties_create_rdh | /**
* Creates a {@link RelWindowProperties}, may return null if the window properties can't be
* propagated (loss window start and window end columns).
*/
@Nullablepublic static RelWindowProperties create(ImmutableBitSet windowStartColumns, ImmutableBitSet windowEndColumns, ImmutableBitSet windowTimeColumns, WindowSpec
windowSpec, LogicalType timeAttributeType) {
if (windowStartColumns.isEmpty() || windowEndColumns.isEmpty()) {
// the broadcast of window properties require both window_start and window_end
return null;} else {
return new RelWindowProperties(windowStartColumns, windowEndColumns, windowTimeColumns, windowSpec, timeAttributeType);
}
} | 3.26 |
flink_KeyValueDataType_m0_rdh | // --------------------------------------------------------------------------------------------
private DataType m0(DataType innerDataType) {
if (conversionClass == MapData.class) {
return innerDataType.bridgedTo(toInternalConversionClass(innerDataType.getLogicalType()));
}
return innerDataType;
} | 3.26 |
flink_SymbolUtil_commonToCalcite_rdh | /**
* Converts from a common to a Calcite symbol. The common symbol can be a publicly exposed one
* such as {@link TimeIntervalUnit} or internal one such as {@link DateTimeUtils.TimeUnitRange}.
*/
public static Enum<?> commonToCalcite(Enum<?> commonSymbol) {
checkCommonSymbol(commonSymbol);
Enum<?> calciteSymbol = commonToCalcite.get(commonSymbol);
if (calciteSymbol == null) {
calciteSymbol = internalCommonToCalcite.get(commonSymbol);
if (calciteSymbol == null) {
throw new UnsupportedOperationException(String.format("Cannot map '%s' to an internal symbol.", commonSymbol));
}
}
return calciteSymbol;} | 3.26 |
flink_SymbolUtil_addSymbolMapping_rdh | // --------------------------------------------------------------------------------------------
// Helper methods
// --------------------------------------------------------------------------------------------
private static void addSymbolMapping(@Nullable
TableSymbol commonSymbol, @Nullable
Enum<?> commonInternalSymbol, Enum<?> calciteSymbol, String serializableKind, String serializableValue) {
checkNotNull(calciteSymbol, "Calcite symbol must not be null.");
checkNotNull(serializableKind, "Serializable kind must not be null.");
checkNotNull(serializableValue, "Serializable value must not be null.");
final SerializableSymbol serializableSymbol = SerializableSymbol.of(serializableKind, serializableValue);checkCalciteSymbol(calciteSymbol);
final Class<?>
calciteSymbolClass = calciteSymbol.getDeclaringClass();
if (calciteToSymbolKind.containsKey(calciteSymbolClass)) {
checkArgument(calciteToSymbolKind.get(calciteSymbolClass).equals(serializableKind), "All Calcite symbols should map to the same kind.");
} else { calciteToSymbolKind.put(calciteSymbolClass, serializableKind);
}
serializableToCalcite.put(serializableSymbol, calciteSymbol);
calciteToSerializable.put(calciteSymbol, serializableSymbol);
if (commonSymbol
!= null) {
final Enum<?> commonSymbolEnum = ((Enum<?>) (commonSymbol));
checkCommonSymbol(commonSymbolEnum);
f0.put(calciteSymbol, commonSymbolEnum);
commonToCalcite.put(commonSymbolEnum, calciteSymbol);
}
if (commonInternalSymbol != null) {
checkCommonSymbol(commonInternalSymbol);
calciteToInternalCommon.put(calciteSymbol, commonInternalSymbol);
internalCommonToCalcite.put(commonInternalSymbol, calciteSymbol);
}
} | 3.26 |
flink_SymbolUtil_calciteToCommon_rdh | /**
* Converts from Calcite to a common symbol. The common symbol can be a publicly exposed one
* such as {@link TimeIntervalUnit} or internal one such as {@link DateTimeUtils.TimeUnitRange}.
* Since the common symbol is optional, the input is returned as a fallback.
*/
public static Enum<?> calciteToCommon(Enum<?> calciteSymbol, boolean preferInternal) {
checkCalciteSymbol(calciteSymbol);
Enum<?> internalCommonSymbol = (preferInternal) ? calciteToInternalCommon.get(calciteSymbol) : null;
if (internalCommonSymbol == null) {
internalCommonSymbol = f0.get(calciteSymbol);
if (internalCommonSymbol == null) {
// for cases that have no common representation
// e.g. TRIM
return calciteSymbol;
}
}
return internalCommonSymbol;
} | 3.26 |
flink_SizeBasedWindowFunction_windowSizeAttribute_rdh | /**
* The field for the window size.
*/
default LocalReferenceExpression windowSizeAttribute() {
return localRef("window_size", DataTypes.INT());
} | 3.26 |
flink_JobManagerJobMetricGroup_m0_rdh | // ------------------------------------------------------------------------
// Component Metric Group Specifics
// ------------------------------------------------------------------------
@Override
protected Iterable<? extends ComponentMetricGroup> m0() {
return operators.values();
} | 3.26 |
flink_S3RecoverableWriter_castToS3Recoverable_rdh | // --------------------------- Utils ---------------------------
private static S3Recoverable castToS3Recoverable(CommitRecoverable recoverable) {
if (recoverable instanceof S3Recoverable)
{
return ((S3Recoverable) (recoverable));
}
throw new IllegalArgumentException("S3 File System cannot recover recoverable for other file system: " + recoverable);
} | 3.26 |
flink_S3RecoverableWriter_m1_rdh | // --------------------------- Static Constructor ---------------------------
public static S3RecoverableWriter m1(final FileSystem fs, final FunctionWithException<File, RefCountedFileWithStream, IOException> tempFileCreator, final S3AccessHelper s3AccessHelper, final Executor uploadThreadPool, final long userDefinedMinPartSize, final int maxConcurrentUploadsPerStream) {
checkArgument(userDefinedMinPartSize >= S3_MULTIPART_MIN_PART_SIZE);
final S3RecoverableMultipartUploadFactory uploadFactory = new S3RecoverableMultipartUploadFactory(fs, s3AccessHelper, maxConcurrentUploadsPerStream, uploadThreadPool, tempFileCreator);
return new S3RecoverableWriter(s3AccessHelper, uploadFactory, tempFileCreator, userDefinedMinPartSize);
} | 3.26 |
flink_CrossNode_getOperator_rdh | // ------------------------------------------------------------------------
@Override
public CrossOperatorBase<?, ?, ?, ?> getOperator() {
return ((CrossOperatorBase<?, ?, ?, ?>) (super.getOperator()));
} | 3.26 |
flink_SlicingWindowAggOperatorBuilder_countStarIndex_rdh | /**
* Specify the index position of the COUNT(*) value in the accumulator buffer. This is only
* required for Hopping windows which uses this to determine whether the window is empty and
* then decide whether to register timer for the next window.
*
* @see HoppingSliceAssigner#nextTriggerWindow(long, Supplier)
*/
public SlicingWindowAggOperatorBuilder countStarIndex(int indexOfCountStart) {this.indexOfCountStart = indexOfCountStart;
return this;
} | 3.26 |
flink_SinkUtils_tryAcquire_rdh | /**
* Acquire permits on the given semaphore within a given allowed timeout and deal with errors.
*
* @param permits
* the mumber of permits to acquire.
* @param maxConcurrentRequests
* the maximum number of permits the semaphore was initialized
* with.
* @param maxConcurrentRequestsTimeout
* the timeout to acquire the permits.
* @param semaphore
* the semaphore to acquire permits to.
* @throws InterruptedException
* if the current thread was interrupted.
* @throws TimeoutException
* if the waiting time elapsed before all permits were acquired.
*/
public static void tryAcquire(int
permits, int maxConcurrentRequests, Duration maxConcurrentRequestsTimeout, Semaphore semaphore) throws InterruptedException, TimeoutException {
if (!semaphore.tryAcquire(permits, maxConcurrentRequestsTimeout.toMillis(), TimeUnit.MILLISECONDS)) {
throw new TimeoutException(String.format("Failed to acquire %d out of %d permits to send value in %s.", permits, maxConcurrentRequests, maxConcurrentRequestsTimeout));
}
} | 3.26 |
flink_HadoopDataOutputStream_getHadoopOutputStream_rdh | /**
* Gets the wrapped Hadoop output stream.
*
* @return The wrapped Hadoop output stream.
*/
public FSDataOutputStream getHadoopOutputStream() {
return f0;
} | 3.26 |
flink_RateLimiter_notifyCheckpointComplete_rdh | /**
* Notifies this {@code RateLimiter} that the checkpoint with the given {@code checkpointId}
* completed and was committed. Makes it possible to implement rate limiters that control data
* emission per checkpoint cycle.
*
* @param checkpointId
* The ID of the checkpoint that has been completed.
*/
default void notifyCheckpointComplete(long checkpointId) {
} | 3.26 |
flink_EndOfPartitionEvent_read_rdh | // ------------------------------------------------------------------------
@Override
public void read(DataInputView in) {
// Nothing to do here
} | 3.26 |
flink_EndOfPartitionEvent_hashCode_rdh | // ------------------------------------------------------------------------
@Override
public int hashCode() {
return 1965146673;
} | 3.26 |
flink_ReusingBuildFirstHashJoinIterator_open_rdh | // --------------------------------------------------------------------------------------------
@Override
public void open() throws IOException, MemoryAllocationException, InterruptedException {
this.hashJoin.open(this.firstInput, this.secondInput, buildSideOuterJoin);
} | 3.26 |
flink_DualInputSemanticProperties_addForwardedField_rdh | /**
* Adds, to the existing information, a field that is forwarded directly from the source
* record(s) in the first input to the destination record(s).
*
* @param input
* the input of the source field
* @param sourceField
* the position in the source record
* @param targetField
* the position in the destination record
*/
public void addForwardedField(int input, int sourceField, int targetField)
{
Map<Integer, FieldSet> fieldMapping;
if ((input != 0) && (input != 1)) {
throw new IndexOutOfBoundsException();}
else if (input == 0) {
fieldMapping = this.fieldMapping1;
} else {
fieldMapping = this.fieldMapping2;
}
if (isTargetFieldPresent(targetField, fieldMapping)) {
throw new InvalidSemanticAnnotationException((("Target field " + targetField) + " was added twice to input ") + input);
}
FieldSet targetFields = fieldMapping.get(sourceField);
if (targetFields != null) {
fieldMapping.put(sourceField, targetFields.addField(targetField));
} else {
fieldMapping.put(sourceField, new FieldSet(targetField));
}
} | 3.26 |
flink_DualInputSemanticProperties_addReadFields_rdh | /**
* Adds, to the existing information, field(s) that are read in the source record(s) from the
* first input.
*
* @param input
* the input of the read fields
* @param readFields
* the position(s) in the source record(s)
*/
public void addReadFields(int input,
FieldSet readFields) {
if ((input != 0) && (input != 1)) {
throw new IndexOutOfBoundsException();
} else if (input == 0) {
this.readFields1 = (this.readFields1 == null) ? readFields.clone() : this.readFields1.addFields(readFields);
} else {
this.readFields2 = (this.readFields2 == null) ? readFields.clone() : this.readFields2.addFields(readFields);
}
} | 3.26 |
flink_CheckpointStatsSnapshot_getLatestRestoredCheckpoint_rdh | /**
* Returns the latest restored checkpoint.
*
* @return Latest restored checkpoint or <code>null</code>.
*/
@Nullable
public RestoredCheckpointStats getLatestRestoredCheckpoint() {
return latestRestoredCheckpoint;
} | 3.26 |
flink_PlanJSONDumpGenerator_setEncodeForHTML_rdh | // --------------------------------------------------------------------------------------------
public void setEncodeForHTML(boolean encodeForHTML) {
this.f0 = encodeForHTML;
} | 3.26 |
flink_PlanJSONDumpGenerator_compilePlanToJSON_rdh | // --------------------------------------------------------------------------------------------
private void compilePlanToJSON(List<DumpableNode<?>> nodes, PrintWriter writer) {
// initialization to assign node ids
this.nodeIds = new HashMap<DumpableNode<?>, Integer>();this.nodeCnt = 0;
// JSON header
writer.print("{\n\t\"nodes\": [\n\n");
// Generate JSON for plan
for (int i = 0; i < nodes.size(); i++) {
visit(nodes.get(i), writer, i == 0);
} // JSON Footer
writer.println("\n\t]\n}");
} | 3.26 |
flink_JobVertexInputInfo_getExecutionVertexInputInfos_rdh | /**
* The input information of subtasks of this job vertex.
*/
public List<ExecutionVertexInputInfo> getExecutionVertexInputInfos() {
return executionVertexInputInfos;
} | 3.26 |
flink_HiveParserSqlSumAggFunction_isDistinct_rdh | // ~ Methods ----------------------------------------------------------------
@Override
public boolean isDistinct() {
return isDistinct;
} | 3.26 |
flink_Buckets_getMaxPartCounter_rdh | // --------------------------- Testing Methods -----------------------------
@VisibleForTesting
public long getMaxPartCounter() {
return maxPartCounter;
} | 3.26 |
flink_Buckets_initializeState_rdh | /**
* Initializes the state after recovery from a failure.
*
* <p>During this process:
*
* <ol>
* <li>we set the initial value for part counter to the maximum value used before across all
* tasks and buckets. This guarantees that we do not overwrite valid data,
* <li>we commit any pending files for previous checkpoints (previous to the last successful
* one from which we restore),
* <li>we resume writing to the previous in-progress file of each bucket, and
* <li>if we receive multiple states for the same bucket, we merge them.
* </ol>
*
* @param bucketStates
* the state holding recovered state about active buckets.
* @param partCounterState
* the state holding the max previously used part counters.
* @throws Exception
* if anything goes wrong during retrieving the state or restoring/committing
* of any in-progress/pending part files
*/
public void initializeState(final ListState<byte[]> bucketStates, final ListState<Long> partCounterState) throws Exception {
initializePartCounter(partCounterState);
LOG.info("Subtask {} initializing its state (max part counter={}).", subtaskIndex, maxPartCounter);
initializeActiveBuckets(bucketStates);
} | 3.26 |
flink_ArrayListConverter_createObjectArrayKind_rdh | /**
* Creates the kind of array for {@link List#toArray(Object[])}.
*/
private static Object[] createObjectArrayKind(Class<?> elementClazz) {
// e.g. int[] is not a Object[]
if (elementClazz.isPrimitive()) {
return ((Object[]) (Array.newInstance(primitiveToWrapper(elementClazz), 0)));
}
// e.g. int[][] and Integer[] are Object[]
return ((Object[]) (Array.newInstance(elementClazz, 0)));
} | 3.26 |
flink_BlockingBackChannel_getReadEndAfterSuperstepEnded_rdh | /**
* Called by iteration head after it has sent all input for the current superstep through the
* data channel (blocks iteration head).
*/
public DataInputView getReadEndAfterSuperstepEnded() {
try {
return queue.take().switchBuffers();
} catch (InterruptedException
| IOException e) {
throw new RuntimeException(e);
}
} | 3.26 |
flink_BlockingBackChannel_getWriteEnd_rdh | /**
* Called by iteration tail to save the output of the current superstep.
*/
public DataOutputView getWriteEnd() {
return buffer;
} | 3.26 |
flink_RowDataUtil_isRetractMsg_rdh | /**
* Returns true if the message is either {@link RowKind#DELETE} or {@link RowKind#UPDATE_BEFORE}, which refers to a retract operation of aggregation.
*/
public static boolean isRetractMsg(RowData row) {
RowKind kind = row.getRowKind();
return (kind == RowKind.UPDATE_BEFORE) || (kind == RowKind.DELETE);
} | 3.26 |
flink_RowDataUtil_isAccumulateMsg_rdh | /**
* Returns true if the message is either {@link RowKind#INSERT} or {@link RowKind#UPDATE_AFTER},
* which refers to an accumulate operation of aggregation.
*/
public static boolean isAccumulateMsg(RowData row) {
RowKind kind = row.getRowKind();
return (kind == RowKind.INSERT) || (kind == RowKind.UPDATE_AFTER);
} | 3.26 |
flink_UnsafeMemoryBudget_reserveMemory_rdh | /**
* Reserve memory of certain size if it is available.
*
* <p>Adjusted version of {@link java.nio.Bits#reserveMemory(long, int)} taken from Java 11.
*/
@SuppressWarnings({ "OverlyComplexMethod", "JavadocReference", "NestedTryStatement" })
void reserveMemory(long size) throws MemoryReservationException {
long availableOrReserved = tryReserveMemory(size);
// optimist!
if (availableOrReserved >= size) {
return;
}
// no luck
throw new MemoryReservationException(String.format((("Could not allocate %d bytes, only %d bytes are remaining. This usually indicates " + "that you are requesting more memory than you have reserved. ") + "However, when running an old JVM version it can also be caused by slow garbage collection. ") + "Try to upgrade to Java 8u72 or higher if running on an old Java version.", size, availableOrReserved));
} | 3.26 |
flink_ObjectColumnSummary_getNonNullCount_rdh | /**
* The number of non-null values in this column.
*/@Override
public long getNonNullCount() {
return 0;
} | 3.26 |
flink_Optimizer_getDefaultParallelism_rdh | // ------------------------------------------------------------------------
// Getters / Setters
// ------------------------------------------------------------------------
public int getDefaultParallelism() {
return defaultParallelism;
} | 3.26 |
flink_Optimizer_getPostPassFromPlan_rdh | // ------------------------------------------------------------------------
// Miscellaneous
// ------------------------------------------------------------------------
private OptimizerPostPass getPostPassFromPlan(Plan program) {
final String className = program.getPostPassClassName();
if (className == null) {
throw new CompilerException("Optimizer Post Pass class description is null");
}
try {
Class<? extends OptimizerPostPass> clazz = Class.forName(className).asSubclass(OptimizerPostPass.class);
try {
return InstantiationUtil.instantiate(clazz, OptimizerPostPass.class);
} catch (RuntimeException rtex) {
// unwrap the source exception
if (rtex.getCause() != null) {
throw new CompilerException("Cannot instantiate optimizer post pass: " + rtex.getMessage(), rtex.getCause());
} else {
throw rtex;
}
}
} catch (ClassNotFoundException cnfex) {
throw new
CompilerException(("Cannot load Optimizer post-pass class '" + className)
+ "'.", cnfex);
} catch (ClassCastException
ccex) {
throw new CompilerException(("Class '" + className) + "' is not an optimizer post-pass.", ccex);
}
} | 3.26 |
flink_Optimizer_compile_rdh | /**
* Translates the given program to an OptimizedPlan. The optimized plan describes for each
* operator which strategy to use (such as hash join versus sort-merge join), what data exchange
* method to use (local pipe forward, shuffle, broadcast), what exchange mode to use (pipelined,
* batch), where to cache intermediate results, etc,
*
* <p>The optimization happens in multiple phases:
*
* <ol>
* <li>Create optimizer dag implementation of the program.
* <p><tt>OptimizerNode</tt> representations of the PACTs, assign parallelism and compute
* size estimates.
* <li>Compute interesting properties and auxiliary structures.
* <li>Enumerate plan alternatives. This cannot be done in the same step as the interesting
* property computation (as opposed to the Database approaches), because we support plans
* that are not trees.
* </ol>
*
* @param program
* The program to be translated.
* @param postPasser
* The function to be used for post passing the optimizer's plan and setting
* the data type specific serialization routines.
* @return The optimized plan.
* @throws CompilerException
* Thrown, if the plan is invalid or the optimizer encountered an
* inconsistent situation during the compilation process.
*/
private OptimizedPlan compile(Plan program, OptimizerPostPass postPasser) throws CompilerException {
if ((program == null) || (postPasser == null)) {
throw new NullPointerException();
}
if (LOG.isDebugEnabled()) {
LOG.debug(("Beginning compilation of program '" + program.getJobName()) + '\'');
}
final ExecutionMode defaultDataExchangeMode = program.getExecutionConfig().getExecutionMode();
final int defaultParallelism = (program.getDefaultParallelism() > 0) ? program.getDefaultParallelism() : this.defaultParallelism;
// log the default settings
LOG.debug("Using a default parallelism of {}", defaultParallelism);LOG.debug("Using default data exchange mode {}", defaultDataExchangeMode);
// the first step in the compilation is to create the optimizer plan representation
// this step does the following:
// 1) It creates an optimizer plan node for each operator
// 2) It connects them via channels
// 3) It looks for hints about local strategies and channel types and
// sets the types and strategies accordingly
// 4) It makes estimates about the data volume of the data sources and
// propagates those estimates through the plan
GraphCreatingVisitor graphCreator = new GraphCreatingVisitor(defaultParallelism, defaultDataExchangeMode);
program.accept(graphCreator);
// if we have a plan with multiple data sinks, add logical optimizer nodes that have two
// data-sinks as children
// each until we have only a single root node. This allows to transparently deal with the
// nodes with
// multiple outputs
OptimizerNode rootNode;
if (graphCreator.getSinks().size() == 1) {
rootNode = graphCreator.getSinks().get(0);
} else if (graphCreator.getSinks().size() > 1) {
Iterator<DataSinkNode> iter = graphCreator.getSinks().iterator();
rootNode = iter.next();
while (iter.hasNext()) {
rootNode = new SinkJoiner(rootNode, iter.next());
}
} else {
throw new CompilerException("Bug: The optimizer plan representation has no sinks.");
}
// now that we have all nodes created and recorded which ones consume memory, tell the nodes
// their minimal
// guaranteed memory, for further cost estimations. We assume an equal distribution of
// memory among consumer tasks
rootNode.accept(new IdAndEstimatesVisitor(this.statistics));
// We need to enforce that union nodes always forward their output to their successor.
// Any partitioning must be either pushed before or done after the union, but not on the
// union's output.
UnionParallelismAndForwardEnforcer v6 = new UnionParallelismAndForwardEnforcer();
rootNode.accept(v6);
// We are dealing with operator DAGs, rather than operator trees.
// That requires us to deviate at some points from the classical DB optimizer algorithms.
// This step builds auxiliary structures to help track branches and joins in the DAG
BranchesVisitor branchingVisitor = new BranchesVisitor();
rootNode.accept(branchingVisitor);
// Propagate the interesting properties top-down through the graph
InterestingPropertyVisitor propsVisitor = new InterestingPropertyVisitor(this.costEstimator);rootNode.accept(propsVisitor);// perform a sanity check: the root may not have any unclosed branches
if ((rootNode.getOpenBranches() != null) && (rootNode.getOpenBranches().size() > 0)) {
throw new CompilerException("Bug: Logic for branching plans (non-tree plans) has an error, and does not " + "track the re-joining of branches correctly.");
}
// the final step is now to generate the actual plan alternatives
List<PlanNode> bestPlan = rootNode.getAlternativePlans(this.costEstimator);
if (bestPlan.size() != 1) {
throw new CompilerException("Error in compiler: more than one best plan was created!");
}
// check if the best plan's root is a data sink (single sink plan)
// if so, directly take it. if it is a sink joiner node, get its contained sinks
PlanNode bestPlanRoot = bestPlan.get(0);
List<SinkPlanNode> bestPlanSinks = new ArrayList<SinkPlanNode>(4);
if (bestPlanRoot instanceof SinkPlanNode) {
bestPlanSinks.add(((SinkPlanNode) (bestPlanRoot)));
} else if (bestPlanRoot instanceof SinkJoinerPlanNode) {
((SinkJoinerPlanNode) (bestPlanRoot)).getDataSinks(bestPlanSinks);
}
// finalize the plan
OptimizedPlan plan = new PlanFinalizer().createFinalPlan(bestPlanSinks, program.getJobName(), program);
plan.accept(new BinaryUnionReplacer());
plan.accept(new RangePartitionRewriter(plan));
// post pass the plan. this is the phase where the serialization and comparator code is set
postPasser.postPass(plan);
return plan;} | 3.26 |
flink_AvroSerializer_isImmutableType_rdh | // ------------------------------------------------------------------------
// Properties
// ------------------------------------------------------------------------
@Override
public boolean isImmutableType() {
return false;
} | 3.26 |
flink_AvroSerializer_readObject_rdh | // -------- backwards compatibility with 1.5, 1.6 -----------
private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException {
/* Please see FLINK-11436 for details on why manual deserialization is required.
During the release of Flink 1.7, the value of serialVersionUID was uptick to 2L (was 1L before)
And although the AvroSerializer (along with it's snapshot class) were migrated to the new serialization
abstraction (hence free from Java serialization), there were composite serializers that were not migrated
and were serialized with Java serialization. In case that one of the nested serializers were Avro we would
bump into deserialization exception due to a wrong serialVersionUID. Unfortunately it is not possible to revert
the serialVersionUID back to 1L, because users might have snapshots with 2L present already.
To overcome this we first need to make sure that the AvroSerializer is being Java deserialized with
FailureTolerantObjectInputStream, and then we determine the serialized layout by looking at the fields.
From: https://docs.oracle.com/javase/8/docs/platform/serialization/spec/class.html#a5421
-------------------------------------------------------------------------------------------------------------
The descriptors for primitive typed fields are written first
sorted by field name followed by descriptors for the object typed fields sorted by field name.
The names are sorted using String.compareTo.
-------------------------------------------------------------------------------------------------------------
pre 1.6 field order: [type]
pre 1.7 field order: [schemaString, type]
post 1.7 field order: [previousSchema, schema, type]
We would use the first field to distinguish between the three different layouts.
To complicate things even further in pre 1.7, the field @schemaString could be
null or a string, but, in post 1.7, the field @previousSchema was never set to null, therefore
we can use the first field to determine the version.
this logic should stay here as long as we support Flink 1.6 (along with Java serialized
TypeSerializers)
*/
final Object firstField = in.readObject();
if (firstField == null) {
// first field can only be NULL in 1.6 (schemaString)
read16Layout(null, in);
} else if (firstField instanceof String) { // first field is a String only in 1.6 (schemaString)
read16Layout(((String) (firstField)), in);
} else if (firstField instanceof Class<?>) { // first field is a Class<?> only in 1.5 (type)
@SuppressWarnings("unchecked")
Class<T> type =
((Class<T>) (firstField));
read15Layout(type);
} else if (firstField instanceof SerializableAvroSchema) {
readCurrentLayout(((SerializableAvroSchema) (firstField)), in);
} else {
throw new IllegalStateException((("Failed to Java-Deserialize an AvroSerializer instance. " + "Was expecting a first field to be either a String or SerializableAvroSchema, but got: ") + "") + firstField.getClass());
}
} | 3.26 |
flink_AvroSerializer_createInstance_rdh | // ------------------------------------------------------------------------
@Override
@SuppressWarnings("unchecked")
public T createInstance() {
checkAvroInitialized();
return ((T) (avroData.newRecord(null, runtimeSchema)));
} | 3.26 |
flink_AvroSerializer_isGenericRecord_rdh | // ------------------------------------------------------------------------
// Utilities
// ------------------------------------------------------------------------
static boolean isGenericRecord(Class<?> type)
{
return (!SpecificRecord.class.isAssignableFrom(type)) && GenericRecord.class.isAssignableFrom(type);
} | 3.26 |
flink_AvroSerializer_snapshotConfiguration_rdh | // ------------------------------------------------------------------------
// Compatibility and Upgrades
// ------------------------------------------------------------------------
@Override
public TypeSerializerSnapshot<T> snapshotConfiguration() {
if (configSnapshot == null) {
checkAvroInitialized();configSnapshot = new AvroSerializerSnapshot<>(runtimeSchema, type);
}
return configSnapshot;
} | 3.26 |
flink_AvroSerializer_copy_rdh | // ------------------------------------------------------------------------
// Copying
// ------------------------------------------------------------------------
@Override
public T copy(T from) { if (CONCURRENT_ACCESS_CHECK) {
enterExclusiveThread();
}
try {
checkAvroInitialized();
return avroData.deepCopy(runtimeSchema, from);
} finally {if (CONCURRENT_ACCESS_CHECK) {
exitExclusiveThread();
}
}
} | 3.26 |
flink_AvroSerializer_m0_rdh | // ------------------------------------------------------------------------
@Nonnull
public Class<T> m0() {
return type;
} | 3.26 |
flink_AvroSerializer_checkAvroInitialized_rdh | // ------------------------------------------------------------------------
// Initialization
// ------------------------------------------------------------------------
private void checkAvroInitialized() {
if (writer == null) {
initializeAvro();
}
} | 3.26 |
flink_AvroSerializer_enterExclusiveThread_rdh | // --------------------------------------------------------------------------------------------
// Concurrency checks
// --------------------------------------------------------------------------------------------
private void enterExclusiveThread() {
// we use simple get, check, set here, rather than CAS
// we don't need lock-style correctness, this is only a sanity-check and we thus
// favor speed at the cost of some false negatives in this check
Thread previous = currentThread;
Thread thisThread = Thread.currentThread();
if (previous == null) {
currentThread = thisThread;
} else if (previous != thisThread) {
throw new IllegalStateException((("Concurrent access to KryoSerializer. Thread 1: " +
thisThread.getName()) + " , Thread 2: ") + previous.getName());
}
} | 3.26 |
flink_DynamicProcessingTimeSessionWindows_mergeWindows_rdh | /**
* Merge overlapping {@link TimeWindow}s.
*/
@Override
public void mergeWindows(Collection<TimeWindow> windows, MergeCallback<TimeWindow> c) {
TimeWindow.mergeWindows(windows, c);
} | 3.26 |
flink_DynamicProcessingTimeSessionWindows_withDynamicGap_rdh | /**
* Creates a new {@code SessionWindows} {@link WindowAssigner} that assigns elements to sessions
* based on the element timestamp.
*
* @param sessionWindowTimeGapExtractor
* The extractor to use to extract the time gap from the
* input elements
* @return The policy.
*/ public static <T> DynamicProcessingTimeSessionWindows<T> withDynamicGap(SessionWindowTimeGapExtractor<T> sessionWindowTimeGapExtractor) {
return new DynamicProcessingTimeSessionWindows<>(sessionWindowTimeGapExtractor);
} | 3.26 |
flink_LocalBufferPool_m2_rdh | /**
* Destroy is called after the produce or consume phase of a task finishes.
*/
@Override
public void m2() {
// NOTE: if you change this logic, be sure to update recycle() as well!
CompletableFuture<?> toNotify = null;
synchronized(availableMemorySegments) {
if
(!isDestroyed) {
MemorySegment v14;
while ((v14 = availableMemorySegments.poll()) != null) {
returnMemorySegment(v14);
}
BufferListener listener;
while ((listener = registeredListeners.poll()) != null) {
listener.notifyBufferDestroyed();
}
if (!isAvailable()) {
toNotify = availabilityHelper.getAvailableFuture();
}
isDestroyed = true;
}
}
mayNotifyAvailable(toNotify);
networkBufferPool.destroyBufferPool(this);
} | 3.26 |
flink_LocalBufferPool_requestMemorySegmentFromGlobalWhenAvailable_rdh | /**
* Tries to obtain a buffer from global pool as soon as one pool is available. Note that
* multiple {@link LocalBufferPool}s might wait on the future of the global pool, hence this
* method double-check if a new buffer is really needed at the time it becomes available.
*/
@GuardedBy("availableMemorySegments")
private void requestMemorySegmentFromGlobalWhenAvailable() {
assert Thread.holdsLock(availableMemorySegments);
checkState(!requestingNotificationOfGlobalPoolAvailable, "local buffer pool is already in the state of requesting memory segment from global when it is available.");
requestingNotificationOfGlobalPoolAvailable = true;
assertNoException(networkBufferPool.getAvailableFuture().thenRun(this::onGlobalPoolAvailable));
} | 3.26 |
flink_LocalBufferPool_reserveSegments_rdh | // ------------------------------------------------------------------------
// Properties
// ------------------------------------------------------------------------
@Override
public void reserveSegments(int numberOfSegmentsToReserve) throws IOException {checkArgument(numberOfSegmentsToReserve <= numberOfRequiredMemorySegments, "Can not reserve more segments than number of required segments.");
CompletableFuture<?> toNotify = null;
synchronized(availableMemorySegments)
{
checkDestroyed();
if (numberOfRequestedMemorySegments < numberOfSegmentsToReserve) {
availableMemorySegments.addAll(networkBufferPool.requestPooledMemorySegmentsBlocking(numberOfSegmentsToReserve - numberOfRequestedMemorySegments));
toNotify = availabilityHelper.getUnavailableToResetAvailable();
}
}
mayNotifyAvailable(toNotify);
} | 3.26 |
flink_LocalBufferPool_mayNotifyAvailable_rdh | // ------------------------------------------------------------------------
/**
* Notifies the potential segment consumer of the new available segments by completing the
* previous uncompleted future.
*/
private void mayNotifyAvailable(@Nullable
CompletableFuture<?> toNotify) {
if (toNotify != null) {
toNotify.complete(null);
}
} | 3.26 |
flink_RelTimeIndicatorConverter_materializeProcTime_rdh | // ----------------------------------------------------------------------------------------
// Utility
// ----------------------------------------------------------------------------------------
private RelNode materializeProcTime(RelNode node) {
// there is no need to add a redundant calc to materialize proc-time if input is empty
// values. Otherwise we need add a PruneEmptyRules after the RelTimeIndicatorConverter to
// remove the redundant calc.
if ((node instanceof FlinkLogicalValues) && FlinkLogicalValues.isEmpty(((FlinkLogicalValues) (node)))) {
return node;
}
Set<Integer> procTimeFieldIndices = gatherProcTimeIndices(node);
return materializeTimeIndicators(node, procTimeFieldIndices);
} | 3.26 |
flink_ShortParser_parseField_rdh | /**
* Static utility to parse a field of type short from a byte sequence that represents text
* characters (such as when read from a file stream).
*
* @param bytes
* The bytes containing the text data that should be parsed.
* @param startPos
* The offset to start the parsing.
* @param length
* The length of the byte sequence (counting from the offset).
* @param delimiter
* The delimiter that terminates the field.
* @return The parsed value.
* @throws NumberFormatException
* Thrown when the value cannot be parsed because the text
* represents not a correct number.
*/
public static final short parseField(byte[] bytes, int startPos, int length, char delimiter) {long val = 0;boolean neg = false;
if (bytes[startPos] == delimiter) {throw new
NumberFormatException("Empty field.");
}
if (bytes[startPos] ==
'-') {
neg = true;
startPos++;
length--;
if ((length == 0) || (bytes[startPos] == delimiter)) {
throw new NumberFormatException("Orphaned minus sign.");
}
}
for (; length > 0; startPos++ , length--) {
if (bytes[startPos] == delimiter) {
return ((short) (neg ? -val : val));
}
if ((bytes[startPos] < 48) || (bytes[startPos] > 57)) {
throw new NumberFormatException("Invalid character.");
}
val *= 10;
val += bytes[startPos] - 48;
if ((val > OVERFLOW_BOUND) && ((!neg) || (val > UNDERFLOW_BOUND))) {
throw new NumberFormatException("Value overflow/underflow");
}
}
return ((short) (neg ? -val : val));
} | 3.26 |
flink_ShortParser_m0_rdh | /**
* Static utility to parse a field of type short from a byte sequence that represents text
* characters (such as when read from a file stream).
*
* @param bytes
* The bytes containing the text data that should be parsed.
* @param startPos
* The offset to start the parsing.
* @param length
* The length of the byte sequence (counting from the offset).
* @return The parsed value.
* @throws NumberFormatException
* Thrown when the value cannot be parsed because the text
* represents not a correct number.
*/
public static final short m0(byte[] bytes, int startPos, int length) {
return parseField(bytes, startPos, length, ((char) (0xffff)));
} | 3.26 |
flink_EitherSerializerSnapshot_getCurrentVersion_rdh | // ------------------------------------------------------------------------
@Override
public int getCurrentVersion() {
return CURRENT_VERSION;
} | 3.26 |
flink_SummaryAggregatorFactory_m0_rdh | /**
* Create a SummaryAggregator for the supplied type.
*
* @param <T>
* the type to aggregate
* @param <R>
* the result type of the aggregation
*/
@SuppressWarnings("unchecked")
public static <T, R> Aggregator<T, R> m0(Class<T> type) {
if (type == Long.class) {
return ((Aggregator<T, R>) (new LongSummaryAggregator()));
} else if (type
== LongValue.class) {
return ((Aggregator<T, R>) (new ValueSummaryAggregator.LongValueSummaryAggregator()));
} else if (type == Integer.class) {
return ((Aggregator<T, R>) (new IntegerSummaryAggregator()));
} else if (type == IntValue.class) {
return ((Aggregator<T, R>) (new ValueSummaryAggregator.IntegerValueSummaryAggregator()));
} else if (type == Double.class) {
return ((Aggregator<T, R>) (new DoubleSummaryAggregator()));
} else if
(type == DoubleValue.class) {
return ((Aggregator<T, R>) (new ValueSummaryAggregator.DoubleValueSummaryAggregator())); } else if (type == Float.class) {
return
((Aggregator<T, R>) (new FloatSummaryAggregator()));
} else if (type == FloatValue.class) {
return ((Aggregator<T, R>) (new ValueSummaryAggregator.FloatValueSummaryAggregator()));
} else if (type == Short.class) {return ((Aggregator<T, R>) (new ShortSummaryAggregator()));
} else if (type == ShortValue.class) {
return ((Aggregator<T, R>) (new ValueSummaryAggregator.ShortValueSummaryAggregator()));
} else
if (type == Boolean.class) {
return ((Aggregator<T, R>) (new BooleanSummaryAggregator()));
} else if (type == BooleanValue.class) {
return ((Aggregator<T, R>) (new ValueSummaryAggregator.BooleanValueSummaryAggregator()));
} else if (type == String.class) {
return ((Aggregator<T, R>) (new StringSummaryAggregator()));
} else if (type ==
StringValue.class) {
return ((Aggregator<T, R>) (new ValueSummaryAggregator.StringValueSummaryAggregator()));
} else {
// rather than error for unsupported types do something very generic
return ((Aggregator<T, R>) (new ObjectSummaryAggregator()));
}
} | 3.26 |
flink_ReusingBuildSecondHashJoinIterator_open_rdh | // --------------------------------------------------------------------------------------------
@Override
public void open() throws IOException, MemoryAllocationException, InterruptedException {
this.hashJoin.open(this.secondInput, this.firstInput, buildSideOuterJoin);
} | 3.26 |
flink_RestartStrategies_exponentialDelayRestart_rdh | /**
* Generates a ExponentialDelayRestartStrategyConfiguration.
*
* @param initialBackoff
* Starting duration between restarts
* @param maxBackoff
* The highest possible duration between restarts
* @param backoffMultiplier
* Delay multiplier how many times is the delay longer than before
* @param resetBackoffThreshold
* How long the job must run smoothly to reset the time interval
* @param jitterFactor
* How much the delay may differ (in percentage)
*/
public static ExponentialDelayRestartStrategyConfiguration exponentialDelayRestart(Time initialBackoff, Time maxBackoff, double backoffMultiplier, Time resetBackoffThreshold, double jitterFactor) {
return new ExponentialDelayRestartStrategyConfiguration(initialBackoff, maxBackoff, backoffMultiplier, resetBackoffThreshold, jitterFactor);
} | 3.26 |
flink_RestartStrategies_fromConfiguration_rdh | /**
* Reads a {@link RestartStrategyConfiguration} from a given {@link ReadableConfig}.
*
* @param configuration
* configuration object to retrieve parameters from
* @return {@link Optional#empty()} when no restart strategy parameters provided
*/
public static Optional<RestartStrategyConfiguration> fromConfiguration(ReadableConfig configuration) {
return configuration.getOptional(RestartStrategyOptions.RESTART_STRATEGY).map(confName -> parseConfiguration(confName, configuration));
} | 3.26 |
flink_RestartStrategies_fixedDelayRestart_rdh | /**
* Generates a FixedDelayRestartStrategyConfiguration.
*
* @param restartAttempts
* Number of restart attempts for the FixedDelayRestartStrategy
* @param delayInterval
* Delay in-between restart attempts for the FixedDelayRestartStrategy
* @return FixedDelayRestartStrategy
*/
public static RestartStrategyConfiguration fixedDelayRestart(int restartAttempts, Time delayInterval) {
return new FixedDelayRestartStrategyConfiguration(restartAttempts, delayInterval);} | 3.26 |
flink_RestartStrategies_failureRateRestart_rdh | /**
* Generates a FailureRateRestartStrategyConfiguration.
*
* @param failureRate
* Maximum number of restarts in given interval {@code failureInterval}
* before failing a job
* @param failureInterval
* Time interval for failures
* @param delayInterval
* Delay in-between restart attempts
*/
public static FailureRateRestartStrategyConfiguration failureRateRestart(int failureRate, Time failureInterval, Time delayInterval) {
return new FailureRateRestartStrategyConfiguration(failureRate, failureInterval, delayInterval);
} | 3.26 |
flink_FlinkRelMetadataQuery_getColumnOriginNullCount_rdh | /**
* Returns origin null count of the given column.
*
* @param rel
* the relational expression
* @param index
* the index of the given column
* @return the null count of the given column if can be estimated, else return null.
*/
public Double getColumnOriginNullCount(RelNode rel, int index) {
for (; ;) {
try {
return columnOriginNullCountHandler.getColumnOriginNullCount(rel, this, index);
} catch (JaninoRelMetadataProvider.NoHandler e) {
columnOriginNullCountHandler = revise(e.relClass, ColumnOriginNullCount.DEF);
}
}
} | 3.26 |
flink_FlinkRelMetadataQuery_getUpsertKeysInKeyGroupRange_rdh | /**
* Determines the set of upsert minimal keys in a single key group range, which means can ignore
* exchange by partition keys.
*
* <p>Some optimizations can rely on this ability to do upsert in a single key group range.
*/
public Set<ImmutableBitSet> getUpsertKeysInKeyGroupRange(RelNode rel, int[] partitionKeys) {
if (rel instanceof Exchange) {
Exchange v1 = ((Exchange) (rel));
if (Arrays.equals(v1.getDistribution().getKeys().stream().mapToInt(Integer::intValue).toArray(), partitionKeys)) {
rel = v1.getInput();
}
}
return getUpsertKeys(rel);
} | 3.26 |
flink_FlinkRelMetadataQuery_getRelModifiedMonotonicity_rdh | /**
* Returns the {@link RelModifiedMonotonicity} statistic.
*
* @param rel
* the relational expression
* @return the monotonicity for the corresponding RelNode
*/
public RelModifiedMonotonicity getRelModifiedMonotonicity(RelNode rel) {
for (; ;) {
try {
return modifiedMonotonicityHandler.getRelModifiedMonotonicity(rel, this);
} catch (JaninoRelMetadataProvider.NoHandler e) {
modifiedMonotonicityHandler = revise(e.relClass, ModifiedMonotonicity.DEF);
}
}
} | 3.26 |
flink_FlinkRelMetadataQuery_getRelWindowProperties_rdh | /**
* Returns the {@link RelWindowProperties} statistic.
*
* @param rel
* the relational expression
* @return the window properties for the corresponding RelNode
*/
public RelWindowProperties
getRelWindowProperties(RelNode rel) {
for (; ;) {
try {
return windowPropertiesHandler.getWindowProperties(rel, this);
} catch (JaninoRelMetadataProvider.NoHandler e) {
windowPropertiesHandler = revise(e.relClass, WindowProperties.DEF);
}
}
}
/**
* Determines the set of upsert minimal keys for this expression. A key is represented as an
* {@link org.apache.calcite.util.ImmutableBitSet} | 3.26 |
flink_FlinkRelMetadataQuery_reuseOrCreate_rdh | /**
* Reuse input metadataQuery instance if it could cast to FlinkRelMetadataQuery class, or create
* one if not.
*
* @param mq
* metadataQuery which try to reuse
* @return a FlinkRelMetadataQuery instance
*/
public static FlinkRelMetadataQuery reuseOrCreate(RelMetadataQuery mq) {
if (mq instanceof FlinkRelMetadataQuery) {
return ((FlinkRelMetadataQuery) (mq));
} else {
return instance();
}
} | 3.26 |
flink_FlinkRelMetadataQuery_getColumnNullCount_rdh | /**
* Returns the null count of the given column.
*
* @param rel
* the relational expression
* @param index
* the index of the given column
* @return the null count of the given column if can be estimated, else return null.
*/
public Double getColumnNullCount(RelNode rel, int index) {
for (; ;) {try {
return columnNullCountHandler.getColumnNullCount(rel, this, index);
} catch (JaninoRelMetadataProvider.NoHandler e) {
columnNullCountHandler = revise(e.relClass, ColumnNullCount.DEF);
}
}
} | 3.26 |
flink_Executing_maybeRescale_rdh | /**
* Rescale the job if {@link Context#shouldRescale} is true. Otherwise, force a rescale using
* {@link Executing#forceRescale()} after {@link JobManagerOptions#SCHEDULER_SCALING_INTERVAL_MAX}.
*/
private void maybeRescale() {rescaleScheduled = false;
if (context.shouldRescale(getExecutionGraph(), false)) {
getLogger().info("Can change the parallelism of the job. Restarting the job.");
context.goToRestarting(getExecutionGraph(), getExecutionGraphHandler(), getOperatorCoordinatorHandler(), Duration.ofMillis(0L), getFailures());
} else if (scalingIntervalMax != null) {
getLogger().info("The longer the pipeline runs, the more the (small) resource gain is worth the restarting time. " + "Last resource added does not meet {}, force a rescale after {} time({}) if the resource is still there.", JobManagerOptions.MIN_PARALLELISM_INCREASE, JobManagerOptions.SCHEDULER_SCALING_INTERVAL_MAX.key(), scalingIntervalMax);
if (m0().compareTo(scalingIntervalMax) > 0) {
forceRescale();} else {
// schedule a force rescale in JobManagerOptions.SCHEDULER_SCALING_INTERVAL_MAX time
context.runIfState(this, this::forceRescale, scalingIntervalMax);
}
}
} | 3.26 |
flink_Executing_forceRescale_rdh | /**
* Force rescaling as long as the target parallelism is different from the current one.
*/
private void forceRescale() {
if (context.shouldRescale(getExecutionGraph(), true)) {
getLogger().info("Added resources are still there after {} time({}), force a rescale.", JobManagerOptions.SCHEDULER_SCALING_INTERVAL_MAX.key(), scalingIntervalMax);
context.goToRestarting(getExecutionGraph(), getExecutionGraphHandler(), getOperatorCoordinatorHandler(), Duration.ofMillis(0L), getFailures());
}} | 3.26 |
flink_SyntaxHighlightStyle_getQuotedStyle_rdh | /**
* Returns the style for a SQL character literal, such as {@code 'Hello, world!'}.
*
* @return Style for SQL character literals
*/
public AttributedStyle getQuotedStyle() {
return singleQuotedStyle;
} | 3.26 |
flink_SyntaxHighlightStyle_getHintStyle_rdh | /**
* Returns the style for a SQL hint, such as {@literal /*+ This is a hint *}{@literal /}.
*
* @return Style for SQL hints
*/
public AttributedStyle
getHintStyle() {
return hintStyle;
} | 3.26 |
flink_SyntaxHighlightStyle_getCommentStyle_rdh | /**
* Returns the style for a SQL comments, such as {@literal /* This is a comment *}{@literal /}
* or {@literal -- End of line comment}.
*
* @return Style for SQL comments
*/
public AttributedStyle getCommentStyle() {
return commentStyle;
} | 3.26 |
flink_Pool_pollEntry_rdh | /**
* Gets the next cached entry. This blocks until the next entry is available.
*/
public T pollEntry() throws InterruptedException {
return pool.take();} | 3.26 |
flink_Pool_add_rdh | /**
* Adds an entry to the pool with an optional payload. This method fails if called more often
* than the pool capacity specified during construction.
*/
public synchronized void add(T object) {
if (poolSize >= poolCapacity) {
throw new IllegalStateException("No space left in pool");
}
poolSize++;
addBack(object);
} | 3.26 |
flink_Pool_tryPollEntry_rdh | /**
* Tries to get the next cached entry. If the pool is empty, this method returns null.
*/
@Nullable
public T tryPollEntry() {
return pool.poll();
} | 3.26 |
flink_Pool_addBack_rdh | /**
* Internal callback to put an entry back to the pool.
*/
void addBack(T object) {
pool.add(object);
} | 3.26 |
flink_ExecutorThreadFactory_newThread_rdh | // ------------------------------------------------------------------------
@Override
public Thread newThread(Runnable runnable) {
Thread t = new Thread(group, runnable, namePrefix + threadNumber.getAndIncrement());
t.setDaemon(true);
t.setPriority(threadPriority);
// optional handler for uncaught exceptions
if (exceptionHandler != null) {
t.setUncaughtExceptionHandler(exceptionHandler);
}
return t;
} | 3.26 |
flink_BlockingBackChannelBroker_instance_rdh | /**
* Retrieve singleton instance.
*/
public static Broker<BlockingBackChannel> instance() {
return INSTANCE;
} | 3.26 |
flink_TableFunction_finish_rdh | /**
* This method is called at the end of data processing. After this method is called, no more
* records can be produced for the downstream operators.
*
* <p><b>NOTE:</b>This method does not need to close any resources. You should release external
* resources in the {@link #close()} method. More details can see {@link StreamOperator#finish()}.
*
* <p><b>Important:</b>Emit record in the {@link #close()} method is impossible since
* flink-1.14, if you need to emit records at the end of data processing, do so in the {@link #finish()} method.
*/
public void finish() throws Exception {
// do nothing
} | 3.26 |
flink_TableFunction_setCollector_rdh | /**
* Internal use. Sets the current collector.
*/
public final void setCollector(Collector<T> collector) {
this.collector = collector;
}
/**
* Returns the result type of the evaluation method.
*
* @deprecated This method uses the old type system and is based on the old reflective
extraction logic. The method will be removed in future versions and is only called when
using the deprecated {@code TableEnvironment.registerFunction(...)} method. The new
reflective extraction logic (possibly enriched with {@link DataTypeHint} and {@link FunctionHint}) should be powerful enough to cover most use cases. For advanced users, it
is possible to override {@link UserDefinedFunction#getTypeInference(DataTypeFactory)} | 3.26 |
flink_TableFunction_collect_rdh | /**
* Emits an (implicit or explicit) output row.
*
* <p>If null is emitted as an explicit row, it will be skipped by the runtime. For implicit
* rows, the row's field will be null.
*
* @param row
* the output row
*/
protected final void collect(T row) {
collector.collect(row);} | 3.26 |
flink_QueryableStateUtils_createKvStateServer_rdh | /**
* Initializes the {@link KvStateServer server} responsible for sending the requested internal
* state to the {@link KvStateClientProxy client proxy}.
*
* @param address
* the address to bind to.
* @param ports
* the range of ports the state server will attempt to listen to (see {@link org.apache.flink.configuration.QueryableStateOptions#SERVER_PORT_RANGE
* QueryableStateOptions.SERVER_PORT_RANGE}).
* @param eventLoopThreads
* the number of threads to be used to process incoming requests.
* @param queryThreads
* the number of threads to be used to send the actual state.
* @param kvStateRegistry
* the registry with the queryable state.
* @param stats
* statistics to be gathered about the incoming requests.
* @return the {@link KvStateServer state server}.
*/
public static KvStateServer createKvStateServer(final String address, final Iterator<Integer> ports, final int eventLoopThreads, final int queryThreads, final KvStateRegistry kvStateRegistry, final KvStateRequestStats stats) {
Preconditions.checkNotNull(address, "address");
Preconditions.checkNotNull(kvStateRegistry, "registry");
Preconditions.checkNotNull(stats, "stats");
Preconditions.checkArgument(eventLoopThreads >= 1);
Preconditions.checkArgument(queryThreads >= 1);
try {
String classname = "org.apache.flink.queryablestate.server.KvStateServerImpl";
Class<? extends KvStateServer> clazz = Class.forName(classname).asSubclass(KvStateServer.class);
Constructor<? extends KvStateServer> constructor = clazz.getConstructor(String.class, Iterator.class, Integer.class, Integer.class, KvStateRegistry.class, KvStateRequestStats.class);
return constructor.newInstance(address, ports, eventLoopThreads, queryThreads, kvStateRegistry, stats);
} catch (ClassNotFoundException e) {
final String msg = "Could not load Queryable State Server. " + ERROR_MESSAGE_ON_LOAD_FAILURE;
if (LOG.isDebugEnabled()) {
LOG.debug((msg + " Cause: ") + e.getMessage());
} else {
LOG.info(msg);
}
return null;
} catch (InvocationTargetException e) {
LOG.error("Queryable State Server could not be created: ", e.getTargetException());
return null;
} catch (Throwable t) {
LOG.error("Failed to instantiate the Queryable State Server.", t);
return null;
}
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.