name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
flink_EmbeddedRocksDBStateBackend_m1_rdh | /**
* Gets the type of the priority queue state. It will fallback to the default value, if it is
* not explicitly set.
*
* @return The type of the priority queue state.
*/
public PriorityQueueStateType m1() {
return priorityQueueConfig.getPriorityQueueStateType();
} | 3.26 |
flink_EmbeddedRocksDBStateBackend_getWriteBatchSize_rdh | /**
* Gets the max batch size will be used in {@link RocksDBWriteBatchWrapper}.
*/
public long getWriteBatchSize() {
return writeBatchSize == UNDEFINED_WRITE_BATCH_SIZE ? WRITE_BATCH_SIZE.defaultValue().getBytes() : writeBatchSize;
} | 3.26 |
flink_RestClient_m0_rdh | /**
* Creates a new RestClient for the provided root URL. If the protocol of the URL is "https",
* then SSL is automatically enabled for the REST client.
*/
public static RestClient m0(Configuration configuration, Executor executor, URL rootUrl) throws ConfigurationException {
Preconditions.checkNotNull(configuration);Preconditions.checkNotNull(rootUrl);
if ("https".equals(rootUrl.getProtocol())) {
configuration = configuration.clone();
configuration.setBoolean(SSL_REST_ENABLED, true); }
return new RestClient(configuration, executor, rootUrl.getHost(), rootUrl.getPort());
} | 3.26 |
flink_GroupReduceNode_isCombineable_rdh | /**
* Checks, whether a combiner function has been given for the function encapsulated by this
* reduce contract.
*
* @return True, if a combiner has been given, false otherwise.
*/
public boolean isCombineable()
{
return getOperator().isCombinable();
} | 3.26 |
flink_GroupReduceNode_getOperator_rdh | // ------------------------------------------------------------------------
/**
* Gets the operator represented by this optimizer node.
*
* @return The operator represented by this optimizer node.
*/
@Override
public GroupReduceOperatorBase<?, ?, ?> getOperator() {
return ((GroupReduceOperatorBase<?, ?, ?>) (super.getOperator()));
} | 3.26 |
flink_GroupReduceNode_computeOperatorSpecificDefaultEstimates_rdh | // --------------------------------------------------------------------------------------------
// Estimates
// --------------------------------------------------------------------------------------------
@Override
protected void computeOperatorSpecificDefaultEstimates(DataStatistics statistics) {
// no real estimates possible for a reducer.
} | 3.26 |
flink_DiskCacheManager_increaseNumCachedBytesAndCheckFlush_rdh | // ------------------------------------------------------------------------
// Internal Methods
// ------------------------------------------------------------------------
private void increaseNumCachedBytesAndCheckFlush(int numIncreasedCachedBytes) {
numCachedBytesCounter += numIncreasedCachedBytes;
if (numCachedBytesCounter > maxCachedBytesBeforeFlush) {
forceFlushCachedBuffers();
}
} | 3.26 |
flink_DiskCacheManager_release_rdh | /**
* Release this {@link DiskCacheManager}, it means all memory taken by this class will recycle.
*/
void release() {
Arrays.stream(f0).forEach(SubpartitionDiskCacheManager::release);
partitionFileWriter.release();} | 3.26 |
flink_DiskCacheManager_getBufferIndex_rdh | /**
* Return the current buffer index.
*
* @param subpartitionId
* the target subpartition id
* @return the finished buffer index
*/
int getBufferIndex(int subpartitionId) {
return f0[subpartitionId].getBufferIndex();
} | 3.26 |
flink_DiskCacheManager_appendEndOfSegmentEvent_rdh | /**
* Append the end-of-segment event to {@link DiskCacheManager}, which indicates the segment has
* finished.
*
* @param record
* the end-of-segment event
* @param subpartitionId
* target subpartition of this record.
*/ void appendEndOfSegmentEvent(ByteBuffer record,
int subpartitionId) {
f0[subpartitionId].appendEndOfSegmentEvent(record);
increaseNumCachedBytesAndCheckFlush(record.remaining());} | 3.26 |
flink_DiskCacheManager_close_rdh | /**
* Close this {@link DiskCacheManager}, it means no data can append to memory.
*/void close() {
forceFlushCachedBuffers();
} | 3.26 |
flink_DiskCacheManager_startSegment_rdh | // Called by DiskTierProducerAgent
// ------------------------------------------------------------------------
void startSegment(int subpartitionId, int segmentIndex) {
f0[subpartitionId].startSegment(segmentIndex);
} | 3.26 |
flink_DiskCacheManager_append_rdh | /**
* Append buffer to {@link DiskCacheManager}.
*
* @param buffer
* to be managed by this class.
* @param subpartitionId
* the subpartition of this record.
*/
void append(Buffer buffer, int subpartitionId) {
f0[subpartitionId].append(buffer);
increaseNumCachedBytesAndCheckFlush(buffer.readableBytes());
} | 3.26 |
flink_DiskCacheManager_flushBuffers_rdh | /**
* Note that the request of flushing buffers may come from the disk check thread or the task
* thread, so the method itself should ensure the thread safety.
*/
private synchronized void flushBuffers(boolean forceFlush) {
if ((!forceFlush) && (!hasFlushCompleted.isDone())) {
return;
}
List<PartitionFileWriter.SubpartitionBufferContext> buffersToFlush = new
ArrayList<>();
int numToWriteBuffers = getSubpartitionToFlushBuffers(buffersToFlush);
if (numToWriteBuffers > 0) {
CompletableFuture<Void> flushCompletableFuture = partitionFileWriter.write(partitionId, buffersToFlush);
if (!forceFlush) {
hasFlushCompleted = flushCompletableFuture;
}
}
numCachedBytesCounter = 0;
} | 3.26 |
flink_UpsertTestSink_builder_rdh | /**
* Create a {@link UpsertTestSinkBuilder} to construct a new {@link UpsertTestSink}.
*
* @param <IN>
* type of incoming records
* @return {@link UpsertTestSinkBuilder}
*/
public static <IN> UpsertTestSinkBuilder<IN> builder() {
return new UpsertTestSinkBuilder<>();
} | 3.26 |
flink_CommonExecLegacySink_translateToTransformation_rdh | /**
* Translates {@link TableSink} into a {@link Transformation}.
*
* @param withChangeFlag
* Set to true to emit records with change flags.
* @return The {@link Transformation} that corresponds to the translated {@link TableSink}.
*/
@SuppressWarnings("unchecked")
private Transformation<T> translateToTransformation(PlannerBase planner, ExecNodeConfig config, boolean
withChangeFlag) {
// if no change flags are requested, verify table is an insert-only (append-only) table.
if ((!withChangeFlag) && needRetraction) {
throw new TableException("Table is not an append-only table. " + "Use the toRetractStream() in order to handle add and retract messages.");
}
final ExecEdge inputEdge = getInputEdges().get(0);
final Transformation<RowData> inputTransform = ((Transformation<RowData>) (inputEdge.translateToPlan(planner)));
final RowType inputRowType = ((RowType) (inputEdge.getOutputType()));
final RowType convertedInputRowType = checkAndConvertInputTypeIfNeeded(inputRowType);final DataType resultDataType = tableSink.getConsumedDataType();
if (CodeGenUtils.isInternalClass(resultDataType)) {
return ((Transformation<T>) (inputTransform));
} else {
final int rowtimeIndex = getRowtimeIndex(inputRowType);
final DataType physicalOutputType = TableSinkUtils.inferSinkPhysicalDataType(resultDataType, convertedInputRowType, withChangeFlag);
final TypeInformation<T> outputTypeInfo = SinkCodeGenerator.deriveSinkOutputTypeInfo(tableSink, physicalOutputType, withChangeFlag);
final CodeGenOperatorFactory<T> converterOperator = SinkCodeGenerator.generateRowConverterOperator(new CodeGeneratorContext(config, planner.getFlinkContext().getClassLoader()), convertedInputRowType, tableSink, physicalOutputType, withChangeFlag, "SinkConversion", rowtimeIndex);
final String v14 = "SinkConversion To " + resultDataType.getConversionClass().getSimpleName();
return ExecNodeUtil.createOneInputTransformation(inputTransform, createFormattedTransformationName(v14, "SinkConversion", config), createFormattedTransformationDescription(v14, config), converterOperator, outputTypeInfo, inputTransform.getParallelism(), false);
}
} | 3.26 |
flink_StateObjectCollection_empty_rdh | // ------------------------------------------------------------------------
// Helper methods.
// ------------------------------------------------------------------------
@SuppressWarnings("unchecked")
public static <T extends StateObject> StateObjectCollection<T> empty() {
return ((StateObjectCollection<T>) (EMPTY));} | 3.26 |
flink_StateObjectCollection_hasState_rdh | /**
* Returns true if this contains at least one {@link StateObject}.
*/
public boolean hasState() {
for (StateObject state : stateObjects) {
if (state != null) {
return true;
}
}
return false;
} | 3.26 |
flink_Tuple19_equals_rdh | /**
* Deep equality for tuples by calling equals() on the tuple members.
*
* @param o
* the object checked for equality
* @return true if this is equal to o.
*/
@Override
public boolean equals(Object
o) {
if (this == o) {
return true;
}if (!(o instanceof Tuple19)) {
return false;
}
@SuppressWarnings("rawtypes")
Tuple19 tuple = ((Tuple19) (o));
if (f0 != null ? !f0.equals(tuple.f0) : tuple.f0 != null) {
return false;
} if (f1 != null ? !f1.equals(tuple.f1) : tuple.f1 != null) {
return false;
}
if (f2 != null ? !f2.equals(tuple.f2) : tuple.f2 != null) {
return false;
}
if (f3 != null ? !f3.equals(tuple.f3) : tuple.f3 != null) {
return false;
}
if (f4 != null ? !f4.equals(tuple.f4) : tuple.f4 != null) {
return false;
}
if (f5 != null ? !f5.equals(tuple.f5) : tuple.f5 != null) {
return false;
}
if (f6 != null ? !f6.equals(tuple.f6) : tuple.f6 != null) {
return false;
}
if (f7 != null ? !f7.equals(tuple.f7) : tuple.f7 != null) {
return false;
}
if (f8 != null ? !f8.equals(tuple.f8) : tuple.f8 != null) {
return false;
}
if (f9 != null ? !f9.equals(tuple.f9) : tuple.f9 != null) {
return false;
}
if (f10 != null ? !f10.equals(tuple.f10) : tuple.f10
!= null) {
return false;
}
if (f11 != null ? !f11.equals(tuple.f11) : tuple.f11 != null) {
return false;
}
if (f12 != null ? !f12.equals(tuple.f12) : tuple.f12
!= null) {
return false;
}
if (f13 != null ? !f13.equals(tuple.f13) : tuple.f13 != null) {
return false;
}
if (f14 != null ? !f14.equals(tuple.f14) : tuple.f14 != null) {
return false;
}
if (f15 != null ? !f15.equals(tuple.f15) : tuple.f15 !=
null) {
return false;
}
if (f16 != null ? !f16.equals(tuple.f16) : tuple.f16 != null) {
return false;
}
if (f17 != null ? !f17.equals(tuple.f17) : tuple.f17 != null) {
return false;
}
if (f18 != null ? !f18.equals(tuple.f18) : tuple.f18 != null) {
return false;
}
return true;
} | 3.26 |
flink_Tuple19_m1_rdh | /**
* Creates a new tuple and assigns the given values to the tuple's fields. This is more
* convenient than using the constructor, because the compiler can infer the generic type
* arguments implicitly. For example: {@code Tuple3.of(n, x, s)} instead of {@code new
* Tuple3<Integer, Double, String>(n, x, s)}
*/
public static <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18> Tuple19<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18> m1(T0 f0, T1 f1, T2 f2, T3 f3, T4 f4, T5 f5, T6
f6, T7 f7, T8 f8, T9 f9, T10 f10, T11 f11, T12 f12, T13
f13, T14 f14, T15 f15, T16
f16, T17 f17, T18
f18) {
return new Tuple19<>(f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11, f12, f13, f14, f15, f16, f17, f18);
} | 3.26 |
flink_Tuple19_m0_rdh | /**
* Sets new values to all fields of the tuple.
*
* @param f0
* The value for field 0
* @param f1
* The value for field 1
* @param f2
* The value for field 2
* @param f3
* The value for field 3
* @param f4
* The value for field 4
* @param f5
* The value for field 5
* @param f6
* The value for field 6
* @param f7
* The value for field 7
* @param f8
* The value for field 8
* @param f9
* The value for field 9
* @param f10
* The value for field 10
* @param f11
* The value for field 11
* @param f12
* The value for field 12
* @param f13
* The value for field 13
* @param f14
* The value for field 14
* @param f15
* The value for field 15
* @param f16
* The value for field 16
* @param f17
* The value for field 17
* @param f18
* The value for field 18
*/
public void m0(T0
f0, T1 f1,
T2 f2, T3 f3, T4 f4, T5 f5, T6 f6, T7 f7, T8 f8, T9 f9, T10 f10, T11 f11, T12 f12, T13 f13, T14 f14, T15 f15, T16 f16, T17 f17, T18 f18) {
this.f0 = f0;
this.f1 = f1;
this.f2 = f2;
this.f3 = f3;
this.f4 = f4;this.f5 = f5;
this.f6
= f6;
this.f7 = f7;
this.f8 = f8;
this.f9 = f9;
this.f10 = f10;
this.f11 = f11;
this.f12 = f12;
this.f13 = f13;
this.f14 = f14;
this.f15 = f15;
this.f16 = f16;
this.f17 = f17;
this.f18 = f18;
} | 3.26 |
flink_Tuple19_toString_rdh | // -------------------------------------------------------------------------------------------------
// standard utilities
// -------------------------------------------------------------------------------------------------
/**
* Creates a string representation of the tuple in the form (f0, f1, f2, f3, f4, f5, f6, f7, f8,
* f9, f10, f11, f12, f13, f14, f15, f16, f17, f18), where the individual fields are the value
* returned by calling {@link Object#toString} on that field.
*
* @return The string representation of the tuple.
*/
@Override
public String
toString() {
return ((((((((((((((((((((((((((((((((((((("(" + StringUtils.arrayAwareToString(this.f0)) + ",") + StringUtils.arrayAwareToString(this.f1)) + ",") + StringUtils.arrayAwareToString(this.f2)) + ",") + StringUtils.arrayAwareToString(this.f3)) +
",") + StringUtils.arrayAwareToString(this.f4)) + ",") + StringUtils.arrayAwareToString(this.f5)) + ",") + StringUtils.arrayAwareToString(this.f6)) + ",") + StringUtils.arrayAwareToString(this.f7)) + ",") + StringUtils.arrayAwareToString(this.f8)) + ",") + StringUtils.arrayAwareToString(this.f9)) + ",") + StringUtils.arrayAwareToString(this.f10)) + ",") + StringUtils.arrayAwareToString(this.f11)) + ",") + StringUtils.arrayAwareToString(this.f12)) + ",") + StringUtils.arrayAwareToString(this.f13)) + ",") + StringUtils.arrayAwareToString(this.f14)) + ",") + StringUtils.arrayAwareToString(this.f15)) + ",") + StringUtils.arrayAwareToString(this.f16)) + ",") + StringUtils.arrayAwareToString(this.f17)) + ",") + StringUtils.arrayAwareToString(this.f18)) + ")";
} | 3.26 |
flink_Serializers_getContainedGenericTypes_rdh | /**
* Returns all GenericTypeInfos contained in a composite type.
*
* @param typeInfo
* {@link CompositeType}
*/
private static void getContainedGenericTypes(CompositeType<?> typeInfo, List<GenericTypeInfo<?>> target) {
for (int i = 0;
i < typeInfo.getArity(); i++) {
TypeInformation<?> type = typeInfo.getTypeAt(i);
if (type instanceof CompositeType) {
getContainedGenericTypes(((CompositeType<?>) (type)), target);
} else if (type instanceof GenericTypeInfo) {
if (!target.contains(type)) {
target.add(((GenericTypeInfo<?>) (type)));
}
}}
} | 3.26 |
flink_HsMemoryDataManager_getBuffersInOrder_rdh | // Write lock should be acquired before invoke this method.
@Override
public Deque<BufferIndexAndChannel> getBuffersInOrder(int subpartitionId, SpillStatus spillStatus, ConsumeStatusWithId consumeStatusWithId) {
HsSubpartitionMemoryDataManager targetSubpartitionDataManager = getSubpartitionMemoryDataManager(subpartitionId);
return targetSubpartitionDataManager.getBuffersSatisfyStatus(spillStatus, consumeStatusWithId);
} | 3.26 |
flink_HsMemoryDataManager_spillBuffers_rdh | /**
* Spill buffers for each subpartition in a decision.
*
* <p>Note that: The method should not be locked, it is the responsibility of each subpartition
* to maintain thread safety itself.
*
* @param toSpill
* All buffers that need to be spilled in a decision.
*/
private void spillBuffers(Map<Integer, List<BufferIndexAndChannel>> toSpill) {
CompletableFuture<Void> spillingCompleteFuture = new CompletableFuture<>();
List<BufferWithIdentity> bufferWithIdentities = new ArrayList<>();
toSpill.forEach((subpartitionId, bufferIndexAndChannels) -> {
HsSubpartitionMemoryDataManager subpartitionDataManager = getSubpartitionMemoryDataManager(subpartitionId);
bufferWithIdentities.addAll(subpartitionDataManager.spillSubpartitionBuffers(bufferIndexAndChannels, spillingCompleteFuture));
// decrease numUnSpillBuffers as this subpartition's buffer is spill.
numUnSpillBuffers.getAndAdd(-bufferIndexAndChannels.size());
});
FutureUtils.assertNoException(spiller.spillAsync(bufferWithIdentities).thenAccept(spilledBuffers ->
{
fileDataIndex.addBuffers(spilledBuffers);
spillingCompleteFuture.complete(null);
}));
} | 3.26 |
flink_HsMemoryDataManager_append_rdh | // ------------------------------------
// For ResultPartition
// ------------------------------------
/**
* Append record to {@link HsMemoryDataManager}, It will be managed by {@link HsSubpartitionMemoryDataManager} witch it belongs to.
*
* @param record
* to be managed by this class.
* @param targetChannel
* target subpartition of this record.
* @param dataType
* the type of this record. In other words, is it data or event.
*/
public void append(ByteBuffer record, int targetChannel, Buffer.DataType dataType) throws IOException {
try {
getSubpartitionMemoryDataManager(targetChannel).append(record, dataType);
} catch (InterruptedException e) {
throw new IOException(e);
}
} | 3.26 |
flink_HsMemoryDataManager_registerNewConsumer_rdh | /**
* Register {@link HsSubpartitionConsumerInternalOperations} to {@link #subpartitionViewOperationsMap}. It is used to obtain the consumption progress of the
* subpartition.
*/
public HsDataView registerNewConsumer(int subpartitionId, HsConsumerId consumerId, HsSubpartitionConsumerInternalOperations viewOperations) {
HsSubpartitionConsumerInternalOperations oldView = subpartitionViewOperationsMap.get(subpartitionId).put(consumerId, viewOperations);
Preconditions.checkState(oldView == null, "Each subpartition view should have unique consumerId.");
return getSubpartitionMemoryDataManager(subpartitionId).registerNewConsumer(consumerId);
} | 3.26 |
flink_HsMemoryDataManager_releaseBuffers_rdh | /**
* Release buffers for each subpartition in a decision.
*
* <p>Note that: The method should not be locked, it is the responsibility of each subpartition
* to maintain thread safety itself.
*
* @param toRelease
* All buffers that need to be released in a decision.
*/
private void
releaseBuffers(Map<Integer, List<BufferIndexAndChannel>> toRelease) {
toRelease.forEach((subpartitionId, subpartitionBuffers) -> getSubpartitionMemoryDataManager(subpartitionId).releaseSubpartitionBuffers(subpartitionBuffers));
} | 3.26 |
flink_HsMemoryDataManager_handleDecision_rdh | // ------------------------------------
// Internal Method
// ------------------------------------
// Attention: Do not call this method within the read lock and subpartition lock, otherwise
// deadlock may occur as this method maybe acquire write lock and other subpartition's lock
// inside.
private void handleDecision(@SuppressWarnings("OptionalUsedAsFieldOrParameterType")
Optional<Decision> decisionOpt) {
Decision
decision = decisionOpt.orElseGet(() -> callWithLock(() -> spillStrategy.decideActionWithGlobalInfo(this)));
if (!decision.getBufferToSpill().isEmpty()) {
spillBuffers(decision.getBufferToSpill());
}
if (!decision.getBufferToRelease().isEmpty()) {
releaseBuffers(decision.getBufferToRelease());
}
} | 3.26 |
flink_HsMemoryDataManager_getNextBufferIndexToConsume_rdh | // Write lock should be acquired before invoke this method.
@Override
public List<Integer> getNextBufferIndexToConsume(HsConsumerId consumerId) {
ArrayList<Integer> consumeIndexes = new ArrayList<>(numSubpartitions);
for (int channel = 0; channel < numSubpartitions; channel++) {
HsSubpartitionConsumerInternalOperations viewOperation = subpartitionViewOperationsMap.get(channel).get(consumerId);
// Access consuming offset without lock to prevent deadlock.
// A consuming thread may being blocked on the memory data manager lock, while holding
// the viewOperation lock.
consumeIndexes.add(viewOperation == null ? -1 : viewOperation.getConsumingOffset(false) + 1);}
return consumeIndexes;
} | 3.26 |
flink_HsMemoryDataManager_close_rdh | /**
* Close this {@link HsMemoryDataManager}, it means no data can append to memory and all buffer
* taken by this class will recycle.
*/
public void close() {
spillAndReleaseAllData();
spiller.close();
poolSizeChecker.shutdown();
} | 3.26 |
flink_HsMemoryDataManager_markBufferReleasedFromFile_rdh | // ------------------------------------
// Callback for subpartition
// ------------------------------------
@Override
public void markBufferReleasedFromFile(int subpartitionId, int bufferIndex) {
fileDataIndex.markBufferReleased(subpartitionId, bufferIndex);
} | 3.26 |
flink_HsMemoryDataManager_getPoolSize_rdh | // ------------------------------------
// For Spilling Strategy
// ------------------------------------
@Override
public int getPoolSize() {
return poolSize.get();
} | 3.26 |
flink_ClassLoadingUtils_runWithContextClassLoader_rdh | /**
* Runs the given supplier in a {@link TemporaryClassLoaderContext} based on the given
* classloader.
*
* @param supplier
* supplier to run
* @param contextClassLoader
* class loader that should be set as the context class loader
*/
public static <T, E extends Throwable> T runWithContextClassLoader(SupplierWithException<T, E> supplier, ClassLoader contextClassLoader) throws E {
try (TemporaryClassLoaderContext ignored
= TemporaryClassLoaderContext.of(contextClassLoader)) {
return supplier.get();
}
} | 3.26 |
flink_ClassLoadingUtils_withContextClassLoader_rdh | /**
* Wraps the given executor such that all submitted are runnables are run in a {@link TemporaryClassLoaderContext} based on the given classloader.
*
* @param executor
* executor to wrap
* @param contextClassLoader
* class loader that should be set as the context class loader
* @return wrapped executor
*/
public static Executor withContextClassLoader(Executor executor, ClassLoader contextClassLoader) {
return new ContextClassLoaderSettingExecutor(executor, contextClassLoader);
} | 3.26 |
flink_BinaryIndexedSortable_writeIndexAndNormalizedKey_rdh | /**
* Write of index and normalizedKey.
*/
protected void writeIndexAndNormalizedKey(RowData record, long currOffset) {
// add the pointer and the normalized key
this.currentSortIndexSegment.putLong(this.currentSortIndexOffset, currOffset);
if (this.numKeyBytes != 0) {
normalizedKeyComputer.putKey(record, this.currentSortIndexSegment, this.currentSortIndexOffset + OFFSET_LEN);
}this.currentSortIndexOffset += this.indexEntrySize;
this.numRecords++;
} | 3.26 |
flink_BinaryIndexedSortable_checkNextIndexOffset_rdh | /**
* check if we need request next index memory.
*/
protected boolean checkNextIndexOffset() {
if (this.currentSortIndexOffset > this.lastIndexEntryOffset) {
MemorySegment returnSegment = nextMemorySegment();
if (returnSegment
!=
null) {
this.currentSortIndexSegment = returnSegment;
this.sortIndex.add(this.currentSortIndexSegment);
this.currentSortIndexOffset = 0;
} else {
return false;
}
}
return true;
} | 3.26 |
flink_BinaryIndexedSortable_writeToOutput_rdh | /**
* Spill: Write all records to a {@link AbstractPagedOutputView}.
*/
public void writeToOutput(AbstractPagedOutputView output) throws IOException {
final int numRecords = this.numRecords;
int currentMemSeg = 0;
int currentRecord = 0;
while (currentRecord < numRecords) {
final MemorySegment currentIndexSegment = this.sortIndex.get(currentMemSeg++);
// go through all records in the memory segment
for (int offset = 0; (currentRecord < numRecords) && (offset <= this.lastIndexEntryOffset); currentRecord++ , offset += this.indexEntrySize) {
final long pointer = currentIndexSegment.getLong(offset);
this.recordBuffer.setReadPosition(pointer);
this.serializer.copyFromPagesToView(this.recordBuffer, output);
}
}
} | 3.26 |
flink_WrappingCollector_setCollector_rdh | /**
* Sets the current collector which is used to emit the final result.
*/
public void
setCollector(Collector<T> collector) {
this.collector = collector;
} | 3.26 |
flink_WrappingCollector_outputResult_rdh | /**
* Outputs the final result to the wrapped collector.
*/
public void outputResult(T result) {
this.collector.collect(result);
} | 3.26 |
flink_AvroParquetReaders_forGenericRecord_rdh | /**
* Creates a new {@link AvroParquetRecordFormat} that reads the parquet file into Avro {@link GenericRecord GenericRecords}.
*
* <p>To read into {@link GenericRecord GenericRecords}, this method needs an Avro Schema. That
* is because Flink needs to be able to serialize the results in its data flow, which is very
* inefficient without the schema. And while the Schema is stored in the Avro file header, Flink
* needs this schema during 'pre-flight' time when the data flow is set up and wired, which is
* before there is access to the files.
*/
public static StreamFormat<GenericRecord> forGenericRecord(final Schema schema) {return // Must override the lambda representation because of a bug in shading lambda
// serialization, see FLINK-28043 for more details.
new AvroParquetRecordFormat<>(new GenericRecordAvroTypeInfo(schema), new SerializableSupplier<GenericData>() {
@Override
public GenericData get() {
return GenericData.get();
}
});
} | 3.26 |
flink_AvroParquetReaders_forSpecificRecord_rdh | /**
* Creates a new {@link AvroParquetRecordFormat} that reads the parquet file into Avro {@link org.apache.avro.specific.SpecificRecord SpecificRecords}.
*
* <p>To read into Avro {@link GenericRecord GenericRecords}, use the {@link #forGenericRecord(Schema)} method.
*
* @see #forGenericRecord(Schema)
*/
public static <T extends SpecificRecordBase> StreamFormat<T> forSpecificRecord(final Class<T> typeClass) {
return new AvroParquetRecordFormat<>(new AvroTypeInfo<>(typeClass), () -> SpecificData.get());
} | 3.26 |
flink_AvroParquetReaders_forReflectRecord_rdh | /**
* Creates a new {@link AvroParquetRecordFormat} that reads the parquet file into Avro records
* via reflection.
*
* <p>To read into Avro {@link GenericRecord GenericRecords}, use the {@link #forGenericRecord(Schema)} method.
*
* <p>To read into Avro {@link org.apache.avro.specific.SpecificRecord SpecificRecords}, use the
* {@link #forSpecificRecord(Class)} method.
*
* @see #forGenericRecord(Schema)
* @see #forSpecificRecord(Class)
*/
public static <T> StreamFormat<T> forReflectRecord(final Class<T> typeClass) {
if (SpecificRecordBase.class.isAssignableFrom(typeClass)) {
throw new IllegalArgumentException("Please use AvroParquetReaders.forSpecificRecord(Class<T>) for SpecificRecord.");
} else if (GenericRecord.class.isAssignableFrom(typeClass)) {
throw new IllegalArgumentException((((("Please use AvroParquetReaders.forGenericRecord(Class<T>) for GenericRecord." + "Cannot read and create Avro GenericRecord without specifying the Avro Schema. ") + "This is because Flink needs to be able serialize the results in its data flow, which is") + "very inefficient without the schema. And while the Schema is stored in the Avro file header,") + "Flink needs this schema during 'pre-flight' time when the data flow is set up and wired,") + "which is before there is access to the files");
}
// this is a PoJo that Avo will reader via reflect de-serialization
// for Flink, this is just a plain PoJo type
return new AvroParquetRecordFormat<>(TypeExtractor.createTypeInfo(typeClass), () -> ReflectData.get());
} | 3.26 |
flink_SqlGatewayRestEndpointUtils_parseToken_rdh | /**
* Parse token from the result uri.
*/
@Nullable
public static Long parseToken(@Nullable
String nextResultUri) {
if ((nextResultUri == null) || (nextResultUri.length() == 0)) { return null;
}
String[] split = nextResultUri.split("/");
// remove query string
String s = split[split.length - 1];
s = s.replaceAll("\\?.*", "");
return Long.valueOf(s);
} | 3.26 |
flink_Tuple6_of_rdh | /**
* Creates a new tuple and assigns the given values to the tuple's fields. This is more
* convenient than using the constructor, because the compiler can infer the generic type
* arguments implicitly. For example: {@code Tuple3.of(n, x, s)} instead of {@code new
* Tuple3<Integer, Double, String>(n, x, s)}
*/
public static <T0, T1, T2, T3, T4, T5> Tuple6<T0, T1, T2, T3, T4, T5> of(T0 f0, T1 f1, T2 f2, T3 f3, T4 f4, T5 f5) {
return new Tuple6<>(f0, f1, f2, f3, f4, f5);
} | 3.26 |
flink_Tuple6_equals_rdh | /**
* Deep equality for tuples by calling equals() on the tuple members.
*
* @param o
* the object checked for equality
* @return true if this is equal to o.
*/
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if
(!(o instanceof Tuple6)) {
return false;}
@SuppressWarnings("rawtypes")
Tuple6 tuple = ((Tuple6) (o));
if (f0 != null ? !f0.equals(tuple.f0) : tuple.f0 != null) {
return false;
}
if (f1 != null ? !f1.equals(tuple.f1) : tuple.f1 != null) {
return false;
}
if (f2 != null ? !f2.equals(tuple.f2) : tuple.f2 != null) {
return false;}
if (f3 != null ? !f3.equals(tuple.f3) : tuple.f3 != null) {
return false;
}
if (f4 != null ?
!f4.equals(tuple.f4) : tuple.f4 != null) {
return false;
}
if (f5 != null ? !f5.equals(tuple.f5) : tuple.f5 != null) {
return false;
}
return true;
} | 3.26 |
flink_Tuple6_toString_rdh | // -------------------------------------------------------------------------------------------------
// standard utilities
// -------------------------------------------------------------------------------------------------
/**
* Creates a string representation of the tuple in the form (f0, f1, f2, f3, f4, f5), where the
* individual fields are the value returned by calling {@link Object#toString} on that field.
*
* @return The string representation of the tuple.
*/
@Override
public String toString() {
return ((((((((((("(" + StringUtils.arrayAwareToString(this.f0)) + ",") + StringUtils.arrayAwareToString(this.f1)) +
",") + StringUtils.arrayAwareToString(this.f2)) + ",") + StringUtils.arrayAwareToString(this.f3)) + ",") + StringUtils.arrayAwareToString(this.f4)) + ",") + StringUtils.arrayAwareToString(this.f5)) + ")";
} | 3.26 |
flink_Tuple6_setFields_rdh | /**
* Sets new values to all fields of the tuple.
*
* @param f0
* The value for field 0
* @param f1
* The value for field 1
* @param f2
* The value for field 2
* @param f3
* The value for field 3
* @param f4
* The value for field 4
* @param f5
* The value for field 5
*/
public void setFields(T0 f0, T1 f1, T2 f2, T3 f3, T4 f4, T5 f5) {
this.f0 = f0;
this.f1 = f1;
this.f2 = f2;
this.f3 = f3;
this.f4 = f4;
this.f5 = f5;
} | 3.26 |
flink_RocksDBOptionsFactory_createNativeMetricsOptions_rdh | /**
* This method should enable certain RocksDB metrics to be forwarded to Flink's metrics
* reporter.
*
* <p>Enabling these monitoring options may degrade RockDB performance and should be set with
* care.
*
* @param nativeMetricOptions
* The options object with the pre-defined options.
* @return The options object on which the additional options are set.
*/
default RocksDBNativeMetricOptions createNativeMetricsOptions(RocksDBNativeMetricOptions nativeMetricOptions) {
return nativeMetricOptions;
} | 3.26 |
flink_RocksDBOptionsFactory_createReadOptions_rdh | /**
* This method should set the additional options on top of the current options object. The
* current options object may contain pre-defined options based on flags that have been
* configured on the state backend.
*
* <p>It is important to set the options on the current object and return the result from the
* setter methods, otherwise the pre-defined options may get lost.
*
* @param currentOptions
* The options object with the pre-defined options.
* @param handlesToClose
* The collection to register newly created {@link org.rocksdb.RocksObject}s.
* @return The options object on which the additional options are set.
*/
default ReadOptions createReadOptions(ReadOptions currentOptions, Collection<AutoCloseable> handlesToClose) {
return currentOptions; } | 3.26 |
flink_RocksDBOptionsFactory_createWriteOptions_rdh | /**
* This method should set the additional options on top of the current options object. The
* current options object may contain pre-defined options based on flags that have been
* configured on the state backend.
*
* <p>It is important to set the options on the current object and return the result from the
* setter methods, otherwise the pre-defined options may get lost.
*
* @param currentOptions
* The options object with the pre-defined options.
* @param handlesToClose
* The collection to register newly created {@link org.rocksdb.RocksObject}s.
* @return The options object on which the additional options are set.
*/
default WriteOptions createWriteOptions(WriteOptions currentOptions, Collection<AutoCloseable> handlesToClose) {
return currentOptions;} | 3.26 |
flink_TestSignalHandler_handle_rdh | /**
* Handle an incoming signal.
*
* @param signal
* The incoming signal
*/
@Override
public void handle(Signal signal) {
LOG.warn("RECEIVED SIGNAL {}: SIG{}. Shutting down as requested.", signal.getNumber(), signal.getName());
prevHandler.handle(signal);
} | 3.26 |
flink_TestSignalHandler_register_rdh | /**
* Register some signal handlers.
*/
public static void register() {
synchronized(TestSignalHandler.class) {
if (f0) {
return;
}
f0 = true;
final String[] signals = (System.getProperty("os.name").startsWith("Windows")) ? new String[]{ "TERM", "INT" } : new String[]{ "TERM", "HUP", "INT" };
for (String v1 : signals) {
try {
new Handler(v1);
} catch (Exception e) {
LOG.info("Error while registering signal handler", e);
}
}
}
} | 3.26 |
flink_AutoCloseableRegistry_doClose_rdh | /**
* This implementation implies that any exception is possible during closing.
*/@Override
protected void doClose(List<AutoCloseable> toClose) throws Exception {
IOUtils.closeAll(reverse(toClose), Throwable.class);
} | 3.26 |
flink_CountEvictor_of_rdh | /**
* Creates a {@code CountEvictor} that keeps the given number of elements in the pane Eviction
* is done before/after the window function based on the value of doEvictAfter.
*
* @param maxCount
* The number of elements to keep in the pane.
* @param doEvictAfter
* Whether to do eviction after the window function.
*/
public static <W extends Window> CountEvictor<W> of(long maxCount, boolean doEvictAfter) {
return new CountEvictor<>(maxCount, doEvictAfter);
} | 3.26 |
flink_BytesMap_m0_rdh | /**
*
* @param reservedRecordMemory
* reserved fixed memory or not.
*/
public void m0(boolean reservedRecordMemory) {
returnSegments(this.bucketSegments); this.bucketSegments.clear();if (!reservedRecordMemory) {
memoryPool.close();
}
numElements = 0;
} | 3.26 |
flink_BytesMap_calcSecondHashCode_rdh | // M(the num of buckets) is the nth power of 2, so the second hash code must be odd, and always
// is
// H2(K) = 1 + 2 * ((H1(K)/M) mod (M-1))
protected int calcSecondHashCode(final int firstHashCode) {return (((firstHashCode >> log2NumBuckets) & numBucketsMask2) << 1) + 1;
} | 3.26 |
flink_BytesMap_reset_rdh | /**
* reset the map's record and bucket area's memory segments for reusing.
*/
public void reset()
{
setBucketVariables(bucketSegments);
resetBucketSegments(bucketSegments);
numElements = 0;
LOG.debug("reset BytesHashMap with record memory segments {}, {} in bytes, init allocating {} for bucket area.", memoryPool.freePages(), memoryPool.freePages() * segmentSize, bucketSegments.size());
} | 3.26 |
flink_BytesMap_growAndRehash_rdh | /**
*
* @throws EOFException
* if the map can't allocate much more memory.
*/
protected void growAndRehash() throws EOFException {
// allocate the new data structures
int required = 2 * bucketSegments.size();
if ((required * ((long)
(numBucketsPerSegment))) > Integer.MAX_VALUE) {
LOG.warn("We can't handle more than Integer.MAX_VALUE buckets (eg. because hash functions return int)");
throw new EOFException();
}
int numAllocatedSegments =
required - memoryPool.freePages();if (numAllocatedSegments > 0) {
LOG.warn("BytesHashMap can't allocate {} pages, and now used {} pages", required, reservedNumBuffers);
throw new EOFException();
}
List<MemorySegment> newBucketSegments = memoryPool.allocateSegments(required);
setBucketVariables(newBucketSegments);
long reHashStartTime = System.currentTimeMillis();
resetBucketSegments(newBucketSegments);
// Re-mask (we don't recompute the hashcode because we stored all 32 bits of it)
for (MemorySegment memorySegment : bucketSegments) {
for (int j = 0; j < numBucketsPerSegment; j++) {
final int recordPointer = memorySegment.getInt(j * f0);
if (recordPointer != END_OF_LIST) {
final int hashCode1 = memorySegment.getInt((j * f0) + ELEMENT_POINT_LENGTH);
int newPos
= hashCode1 & numBucketsMask;
int bucketSegmentIndex = newPos >>> numBucketsPerSegmentBits;
int bucketOffset = (newPos & numBucketsPerSegmentMask) << BUCKET_SIZE_BITS;
int step = STEP_INCREMENT;
long hashCode2 = 0;
while (newBucketSegments.get(bucketSegmentIndex).getInt(bucketOffset) != END_OF_LIST) {
if (step == 1) {
hashCode2 = calcSecondHashCode(hashCode1);
}
newPos = ((int) ((hashCode1 +
(step * hashCode2)) & numBucketsMask));
// which segment contains the bucket
bucketSegmentIndex = newPos >>> numBucketsPerSegmentBits;
// offset of the bucket in the segment
bucketOffset = (newPos & numBucketsPerSegmentMask) << BUCKET_SIZE_BITS;
step
+=
STEP_INCREMENT;
} newBucketSegments.get(bucketSegmentIndex).putInt(bucketOffset, recordPointer);
newBucketSegments.get(bucketSegmentIndex).putInt(bucketOffset + ELEMENT_POINT_LENGTH, hashCode1);
}
}
}
LOG.info("The rehash take {} ms for {} segments", System.currentTimeMillis() - reHashStartTime, required);
this.memoryPool.returnAll(this.bucketSegments);
this.bucketSegments
= newBucketSegments;
} | 3.26 |
flink_BytesMap_lookup_rdh | /**
*
* @param key
* by which looking up the value in the hash map. Only support the key in the
* BinaryRowData form who has only one MemorySegment.
* @return {@link LookupInfo}
*/
public LookupInfo<K, V> lookup(K key) {
final int hashCode1 = key.hashCode();
int v6 = hashCode1 & numBucketsMask;
// which segment contains the bucket
int bucketSegmentIndex = v6 >>> numBucketsPerSegmentBits;
// offset of the bucket in the segment
int bucketOffset = (v6 & numBucketsPerSegmentMask) << BUCKET_SIZE_BITS;
boolean found = false;
int
step = STEP_INCREMENT;
int
hashCode2 = 0;
int findElementPtr;
try {
do {
findElementPtr = bucketSegments.get(bucketSegmentIndex).getInt(bucketOffset);
if (findElementPtr == END_OF_LIST) {
// This is a new key.
break;
} else {
final int storedHashCode = bucketSegments.get(bucketSegmentIndex).getInt(bucketOffset + ELEMENT_POINT_LENGTH);
if (hashCode1 == storedHashCode) {
recordArea.setReadPosition(findElementPtr);
if (recordArea.readKeyAndEquals(key)) {
// we found an element with a matching key, and not just a hash
// collision
found = true;reusedValue = recordArea.readValue(reusedValue);
break;
}}
}
if (step == 1) {
hashCode2 = calcSecondHashCode(hashCode1);
}
v6 = (hashCode1 + (step * hashCode2)) & numBucketsMask;// which segment contains the bucket
bucketSegmentIndex = v6 >>> numBucketsPerSegmentBits;
// offset of the bucket in the segment
bucketOffset = (v6 & numBucketsPerSegmentMask) << BUCKET_SIZE_BITS;
step += STEP_INCREMENT;
} while (true );
}
catch (IOException ex) {
throw new RuntimeException("Error reading record from the aggregate map: " + ex.getMessage(), ex);
}
reuseLookupInfo.set(found, hashCode1, key, reusedValue, bucketSegmentIndex, bucketOffset);
return reuseLookupInfo;
} | 3.26 |
flink_SavepointMetadataV2_getNewOperators_rdh | /**
*
* @return List of new operator states for the savepoint, represented by their target {@link OperatorID} and {@link StateBootstrapTransformation}.
*/
public List<StateBootstrapTransformationWithID<?>> getNewOperators() {
return operatorStateIndex.values().stream().filter(OperatorStateSpecV2::isNewStateTransformation).map(OperatorStateSpecV2::asNewStateTransformation).collect(Collectors.toList());
} | 3.26 |
flink_SavepointMetadataV2_getOperatorState_rdh | /**
*
* @return Operator state for the given UID.
* @throws IOException
* If the savepoint does not contain operator state with the given uid.
*/
public OperatorState getOperatorState(OperatorIdentifier identifier) throws IOException {
OperatorID operatorID = identifier.getOperatorId();
OperatorStateSpecV2 operatorState = operatorStateIndex.get(operatorID);
if ((operatorState == null) || operatorState.isNewStateTransformation()) {
throw new IOException("Savepoint does not contain state with operator " + identifier.getUid().map(uid -> "uid " + uid).orElse("hash " + operatorID.toHexString()));
}
return operatorState.asExistingState();
} | 3.26 |
flink_SavepointMetadataV2_getExistingOperators_rdh | /**
*
* @return List of {@link OperatorState} that already exists within the savepoint.
*/
public List<OperatorState> getExistingOperators() {
return operatorStateIndex.values().stream().filter(OperatorStateSpecV2::isExistingState).map(OperatorStateSpecV2::asExistingState).collect(Collectors.toList());
} | 3.26 |
flink_TableStreamOperator_computeMemorySize_rdh | /**
* Compute memory size from memory faction.
*/
public long computeMemorySize() {
final
Environment environment = getContainingTask().getEnvironment();
return environment.getMemoryManager().computeMemorySize(getOperatorConfig().getManagedMemoryFractionOperatorUseCaseOfSlot(ManagedMemoryUseCase.OPERATOR, environment.getTaskManagerInfo().getConfiguration(), environment.getUserCodeClassLoader().asClassLoader()));
} | 3.26 |
flink_TwoInputNode_getOperator_rdh | // ------------------------------------------------------------------------
@Override
public DualInputOperator<?, ?, ?, ?> getOperator() {
return ((DualInputOperator<?, ?, ?, ?>) (super.getOperator()));
} | 3.26 |
flink_TwoInputNode_getSecondIncomingConnection_rdh | /**
* Gets the DagConnection through which this node receives its <i>second</i> input.
*
* @return The second input connection.
*/
public DagConnection getSecondIncomingConnection() {
return
this.input2;
} | 3.26 |
flink_TwoInputNode_accept_rdh | // --------------------------------------------------------------------------------------------
// Miscellaneous
// --------------------------------------------------------------------------------------------
@Override
public void accept(Visitor<OptimizerNode> visitor) {
if (visitor.preVisit(this)) {
if ((this.input1 == null) || (this.input2 == null)) {
throw new CompilerException();
}
getFirstPredecessorNode().accept(visitor);
getSecondPredecessorNode().accept(visitor);
for
(DagConnection connection : getBroadcastConnections()) {
connection.getSource().accept(visitor);
}
visitor.postVisit(this);}
} | 3.26 |
flink_CliOptionsParser_printHelpClient_rdh | // --------------------------------------------------------------------------------------------
// Help
// --------------------------------------------------------------------------------------------
/**
* Prints the help for the client.
*/
public static void printHelpClient(PrintWriter writer) {
writer.println("./sql-client [MODE] [OPTIONS]");
writer.println();
writer.println("The following options are available:");
printHelpEmbeddedModeClient(writer);
printHelpGatewayModeClient(writer);
writer.println();
} | 3.26 |
flink_CliOptionsParser_checkUrl_rdh | // --------------------------------------------------------------------------------------------
private static URL checkUrl(CommandLine line, Option option)
{
final List<URL> urls = checkUrls(line, option);
if ((urls != null) && (!urls.isEmpty())) {
return urls.get(0);
}
return null;
} | 3.26 |
flink_CliOptionsParser_parseEmbeddedModeClient_rdh | // --------------------------------------------------------------------------------------------
// Line Parsing
// --------------------------------------------------------------------------------------------
public static EmbeddedCliOptions parseEmbeddedModeClient(String[] args) {
try {
DefaultParser parser = new DefaultParser();CommandLine line = parser.parse(EMBEDDED_MODE_CLIENT_OPTIONS, args, true);
return new CliOptions.EmbeddedCliOptions(line.hasOption(CliOptionsParser.OPTION_HELP.getOpt()), checkSessionId(line), checkUrl(line, CliOptionsParser.OPTION_INIT_FILE), checkUrl(line, CliOptionsParser.OPTION_FILE), line.getOptionValue(CliOptionsParser.OPTION_UPDATE.getOpt()), line.getOptionValue(CliOptionsParser.OPTION_HISTORY.getOpt()), checkUrls(line, CliOptionsParser.OPTION_JAR), checkUrls(line, CliOptionsParser.OPTION_LIBRARY), getPythonConfiguration(line), line.getOptionProperties(OPTION_SESSION_CONFIG.getOpt()));
} catch (ParseException e) {
throw new SqlClientException(e.getMessage());
}
} | 3.26 |
flink_KafkaStandaloneGenerator_main_rdh | /**
* Entry point to the kafka data producer.
*/
public static void main(String[] args) throws Exception {
final KafkaCollector[] collectors = new KafkaCollector[NUM_PARTITIONS];
// create the generator threads
for (int i = 0; i < collectors.length; i++) {
collectors[i] = new KafkaCollector(BROKER_ADDRESS, TOPIC, i);
}
StandaloneThreadedGenerator.runGenerator(collectors);
} | 3.26 |
flink_FileSystemCommitter_commitPartitions_rdh | /**
* Commits the partitions with a filter to filter out invalid task attempt files. In speculative
* execution mode, there might be some files which do not belong to the finished attempt.
*
* @param taskAttemptFilter
* the filter that accepts subtaskIndex and attemptNumber
* @throws Exception
* if partition commitment fails
*/
public void commitPartitions(BiPredicate<Integer, Integer> taskAttemptFilter) throws Exception {
FileSystem fs = factory.create(tmpPath.toUri());
List<Path> taskPaths = listTaskTemporaryPaths(fs, tmpPath, taskAttemptFilter);
try (PartitionLoader loader = new PartitionLoader(overwrite, fs, metaStoreFactory, isToLocal, identifier, policies))
{
if (partitionColumnSize > 0) {
if (taskPaths.isEmpty() && (!f0.isEmpty())) {
if (partitionColumnSize ==
f0.size()) {
loader.loadEmptyPartition(this.f0);
}
} else {
for (Map.Entry<LinkedHashMap<String, String>, List<Path>> entry :
collectPartSpecToPaths(fs, taskPaths, partitionColumnSize).entrySet()) {
loader.loadPartition(entry.getKey(), entry.getValue(), true);
}
}
} else {
loader.loadNonPartition(taskPaths, true);
}
} finally {
for (Path taskPath : taskPaths) {fs.delete(taskPath, true);
}
}
} | 3.26 |
flink_FileSystemCommitter_commitPartitionsWithFiles_rdh | /**
* For committing job's output after successful batch job completion, it will commit with the
* given partitions and corresponding files written which means it'll move the temporary files
* to partition's location.
*/
public void commitPartitionsWithFiles(Map<String, List<Path>> partitionsFiles) throws Exception {
FileSystem fs = factory.create(tmpPath.toUri());
try (PartitionLoader loader = new PartitionLoader(overwrite, fs, metaStoreFactory, isToLocal, identifier, policies)) {
if (partitionColumnSize > 0) {
if (partitionsFiles.isEmpty() && (!f0.isEmpty())) {
if (partitionColumnSize == f0.size()) {
loader.loadEmptyPartition(this.f0);
}} else {
for (Map.Entry<String, List<Path>> partitionFile : partitionsFiles.entrySet()) {
LinkedHashMap<String, String> partSpec = extractPartitionSpecFromPath(new Path(partitionFile.getKey()));
loader.loadPartition(partSpec, partitionFile.getValue(), false);
}
}
} else {
List<Path>
files = new ArrayList<>();
partitionsFiles.values().forEach(files::addAll);
loader.loadNonPartition(files, false);
}
}
} | 3.26 |
flink_SerializedCheckpointData_fromDeque_rdh | /**
* Converts a list of checkpoints into an array of SerializedCheckpointData.
*
* @param checkpoints
* The checkpoints to be converted into IdsCheckpointData.
* @param serializer
* The serializer to serialize the IDs.
* @param outputBuffer
* The reusable serialization buffer.
* @param <T>
* The type of the ID.
* @return An array of serializable SerializedCheckpointData, one per entry in the queue.
* @throws IOException
* Thrown, if the serialization fails.
*/
public static <T> SerializedCheckpointData[] fromDeque(ArrayDeque<Tuple2<Long, Set<T>>> checkpoints, TypeSerializer<T> serializer, DataOutputSerializer outputBuffer) throws IOException
{
SerializedCheckpointData[] serializedCheckpoints = new SerializedCheckpointData[checkpoints.size()];
int v1 = 0;
for (Tuple2<Long, Set<T>> checkpoint : checkpoints) {
outputBuffer.clear();
Set<T> checkpointIds = checkpoint.f1;
for (T id : checkpointIds) {
serializer.serialize(id, outputBuffer);
}
serializedCheckpoints[v1++] = new
SerializedCheckpointData(checkpoint.f0, outputBuffer.getCopyOfBuffer(), checkpointIds.size());
}
return serializedCheckpoints;
} | 3.26 |
flink_SerializedCheckpointData_getNumIds_rdh | /**
* Gets the number of IDs in the checkpoint.
*
* @return The number of IDs in the checkpoint.
*/
public int getNumIds() {
return numIds;
} | 3.26 |
flink_SerializedCheckpointData_toDeque_rdh | // ------------------------------------------------------------------------
// De-Serialize from Checkpoint
// ------------------------------------------------------------------------
/**
* De-serializes an array of SerializedCheckpointData back into an ArrayDeque of element
* checkpoints.
*
* @param data
* The data to be deserialized.
* @param serializer
* The serializer used to deserialize the data.
* @param <T>
* The type of the elements.
* @return An ArrayDeque of element checkpoints.
* @throws IOException
* Thrown, if the serialization fails.
*/
public static <T> ArrayDeque<Tuple2<Long, Set<T>>> toDeque(SerializedCheckpointData[] data, TypeSerializer<T> serializer) throws IOException {
ArrayDeque<Tuple2<Long, Set<T>>> deque = new ArrayDeque<>(data.length);
DataInputDeserializer deser = null;
for (SerializedCheckpointData checkpoint : data)
{
byte[] serializedData =
checkpoint.getSerializedData();
if (deser
== null) {
deser = new DataInputDeserializer(serializedData, 0, serializedData.length);
} else {
deser.setBuffer(serializedData);
}
final Set<T> ids = CollectionUtil.newHashSetWithExpectedSize(checkpoint.getNumIds());
final int numIds = checkpoint.getNumIds();
for (int i = 0; i < numIds; i++) {
ids.add(serializer.deserialize(deser));
}
deque.addLast(new Tuple2<Long, Set<T>>(checkpoint.checkpointId, ids));
}
return deque;
} | 3.26 |
flink_CompileUtils_compile_rdh | /**
* Compiles a generated code to a Class.
*
* @param cl
* the ClassLoader used to load the class
* @param name
* the class name
* @param code
* the generated code
* @param <T>
* the class type
* @return the compiled class
*/
@SuppressWarnings("unchecked")
public static <T>
Class<T> compile(ClassLoader cl, String name, String code) {
try {
// The class name is part of the "code" and makes the string unique,
// to prevent class leaks we don't cache the class loader directly
// but only its hash code
final ClassKey classKey = new ClassKey(cl.hashCode(), code);
return ((Class<T>) (COMPILED_CLASS_CACHE.get(classKey, () -> doCompile(cl, name, code))));
} catch (Exception e) {
throw new FlinkRuntimeException(e.getMessage(), e);
} } | 3.26 |
flink_CompileUtils_compileExpression_rdh | /**
* Compiles an expression code to a janino {@link ExpressionEvaluator}.
*
* @param code
* the expression code
* @param argumentNames
* the expression argument names
* @param argumentClasses
* the expression argument classes
* @param returnClass
* the return type of the expression
* @return the compiled class
*/
public static ExpressionEvaluator compileExpression(String code, List<String> argumentNames, List<Class<?>> argumentClasses, Class<?> returnClass) {try {
ExpressionKey key = new ExpressionKey(code, argumentNames, argumentClasses, returnClass);return COMPILED_EXPRESSION_CACHE.get(key, () -> {
ExpressionEvaluator expressionEvaluator = new ExpressionEvaluator();
// Input args
expressionEvaluator.setParameters(argumentNames.toArray(new String[0]), argumentClasses.toArray(new Class[0]));
// Result type
expressionEvaluator.setExpressionType(returnClass);
try {
// Compile
expressionEvaluator.cook(code);
} catch (CompileException e) {
throw new <e>InvalidProgramException("Table program cannot be compiled. This is a bug. Please file an issue.\nExpression: " + code);
}
return expressionEvaluator;
});} catch (Exception e) {
throw new FlinkRuntimeException(e.getMessage(), e);
}
} | 3.26 |
flink_CompileUtils_cleanUp_rdh | /**
* Triggers internal garbage collection of expired cache entries.
*/
public static void cleanUp() {
COMPILED_CLASS_CACHE.cleanUp();
COMPILED_EXPRESSION_CACHE.cleanUp();
} | 3.26 |
flink_GivenJavaClasses_javaClassesThat_rdh | /**
* Equivalent of {@link ArchRuleDefinition#classes()}, but only for Java classes.
*/
public static GivenClassesConjunction javaClassesThat(DescribedPredicate<JavaClass> predicate) {
return classes().that(areJavaClasses()).and(predicate);
} | 3.26 |
flink_GivenJavaClasses_noJavaClassesThat_rdh | /**
* Equivalent of {@link ArchRuleDefinition#noClasses()}, but only for Java classes.
*/
public static GivenClassesConjunction noJavaClassesThat(DescribedPredicate<JavaClass> predicate) {
return noClasses().that(areJavaClasses()).and(predicate);
} | 3.26 |
flink_NettyProtocol_getClientChannelHandlers_rdh | /**
* Returns the client channel handlers.
*
* <pre>
* +-----------+----------+ +----------------------+
* | Remote input channel | | request client |
* +-----------+----------+ +-----------+----------+
* | | (1) write
* +---------------+-----------------------------------+---------------+
* | | CLIENT CHANNEL PIPELINE | |
* | | \|/ |
* | +----------+----------+ +----------------------+ |
* | | Request handler + | Message encoder | |
* | +----------+----------+ +-----------+----------+ |
* | /|\ \|/ |
* | | | |
* | +----------+------------+ | |
* | | Message+Frame decoder | | |
* | +----------+------------+ | |
* | /|\ | |
* +---------------+-----------------------------------+---------------+
* | | (3) server response \|/ (2) client request
* +---------------+-----------------------------------+---------------+
* | | | |
* | [ Socket.read() ] [ Socket.write() ] |
* | |
* | Netty Internal I/O Threads (Transport Implementation) |
* +-------------------------------------------------------------------+
* </pre>
*
* @return channel handlers
*/
public ChannelHandler[] getClientChannelHandlers() {
NetworkClientHandler networkClientHandler = new CreditBasedPartitionRequestClientHandler();
return
new ChannelHandler[]{ messageEncoder, new NettyMessageClientDecoderDelegate(networkClientHandler), networkClientHandler };
} | 3.26 |
flink_NettyProtocol_getServerChannelHandlers_rdh | /**
* Returns the server channel handlers.
*
* <pre>
* +-------------------------------------------------------------------+
* | SERVER CHANNEL PIPELINE |
* | |
* | +----------+----------+ (3) write +----------------------+ |
* | | Queue of queues +----------->| Message encoder | |
* | +----------+----------+ +-----------+----------+ |
* | /|\ \|/ |
* | | (2) enqueue | |
* | +----------+----------+ | |
* | | Request handler | | |
* | +----------+----------+ | |
* | /|\ | |
* | | | |
* | +-----------+-----------+ | |
* | | Message+Frame decoder | | |
* | +-----------+-----------+ | |
* | /|\ | |
* +---------------+-----------------------------------+---------------+
* | | (1) client request \|/
* +---------------+-----------------------------------+---------------+
* | | | |
* | [ Socket.read() ] [ Socket.write() ] |
* | |
* | Netty Internal I/O Threads (Transport Implementation) |
* +-------------------------------------------------------------------+
* </pre>
*
* @return channel handlers
*/
public ChannelHandler[] getServerChannelHandlers() {
PartitionRequestQueue queueOfPartitionQueues = new PartitionRequestQueue();
PartitionRequestServerHandler serverHandler = new PartitionRequestServerHandler(partitionProvider, taskEventPublisher, queueOfPartitionQueues);
return new ChannelHandler[]{ messageEncoder, new NettyMessage.NettyMessageDecoder(), serverHandler, queueOfPartitionQueues };
} | 3.26 |
flink_TpcdsTestProgram_prepareTableEnv_rdh | /**
* Prepare TableEnvironment for query.
*
* @param sourceTablePath
* @return */
private static TableEnvironment prepareTableEnv(String sourceTablePath, Boolean useTableStats) {
// init Table Env
EnvironmentSettings environmentSettings = EnvironmentSettings.inBatchMode();TableEnvironment tEnv = TableEnvironment.create(environmentSettings);
// config Optimizer parameters
// TODO use the default shuffle mode of batch runtime mode once FLINK-23470 is implemented
tEnv.getConfig().set(ExecutionConfigOptions.TABLE_EXEC_SHUFFLE_MODE, GlobalStreamExchangeMode.POINTWISE_EDGES_PIPELINED.toString());
tEnv.getConfig().set(OptimizerConfigOptions.TABLE_OPTIMIZER_BROADCAST_JOIN_THRESHOLD, (10 * 1024) * 1024L);
tEnv.getConfig().set(OptimizerConfigOptions.TABLE_OPTIMIZER_JOIN_REORDER_ENABLED, true);
// register TPC-DS tables
TPCDS_TABLES.forEach(table -> {
TpcdsSchema schema = TpcdsSchemaProvider.getTableSchema(table);
CsvTableSource.Builder builder = CsvTableSource.builder();
builder.path(((sourceTablePath + f0)
+ table) + DATA_SUFFIX);
for (int i = 0; i < schema.getFieldNames().size();
i++) {
builder.field(schema.getFieldNames().get(i), TypeConversions.fromDataTypeToLegacyInfo(schema.getFieldTypes().get(i)));
}
builder.fieldDelimiter(COL_DELIMITER);
builder.emptyColumnAsNull();
builder.lineDelimiter("\n");
CsvTableSource tableSource = builder.build();
ConnectorCatalogTable catalogTable = ConnectorCatalogTable.source(tableSource, true);
tEnv.getCatalog(tEnv.getCurrentCatalog()).ifPresent(catalog -> {
try {
catalog.createTable(new ObjectPath(tEnv.getCurrentDatabase(), table), catalogTable, false);
} catch (Exception e) {
throw new <e>RuntimeException();
}
});
});
// register statistics info
if (useTableStats) {
TpcdsStatsProvider.registerTpcdsStats(tEnv);
}
return tEnv;
} | 3.26 |
flink_ClassLoaderUtil_validateClassLoadable_rdh | /**
* Checks, whether the class that was not found in the given exception, can be resolved through
* the given class loader.
*
* @param cnfe
* The ClassNotFoundException that defines the name of the class.
* @param cl
* The class loader to use for the class resolution.
* @return True, if the class can be resolved with the given class loader, false if not.
*/
public static boolean validateClassLoadable(ClassNotFoundException cnfe, ClassLoader cl) {
try {
String className = cnfe.getMessage();
Class.forName(className, false, cl);
return true;
} catch (ClassNotFoundException e) {
return false;
} catch
(Exception e) {
return false;
}
} | 3.26 |
flink_ClassLoaderUtil_formatURL_rdh | /**
* Returns the interpretation of URL in string format.
*
* <p>If the URL is null, it returns '(null)'.
*
* <p>If the URL protocol is file, prepend 'file:' flag before the formatted URL. Otherwise, use
* 'url: ' as the prefix instead.
*
* <p>Also, it checks whether the object that the URL directs to exists or not. If the object
* exists, some additional checks should be performed in order to determine that the object is a
* directory or a valid/invalid jar file. If the object does not exist, a missing flag should be
* appended.
*
* @param url
* URL that should be formatted
* @return The formatted URL
* @throws IOException
* When JarFile cannot be closed
*/
public static String formatURL(URL url) throws IOException {
StringBuilder bld = new StringBuilder();
bld.append("\n ");
if (url == null) {
bld.append("(null)");
} else if ("file".equals(url.getProtocol())) {
String filePath = url.getPath();
File fileFile = new File(filePath);
bld.append("file: '").append(filePath).append('\'');
if (fileFile.exists()) {
if (fileFile.isDirectory()) {
bld.append(" (directory)");
} else {
JarFile jar = null;
try {
jar = new JarFile(filePath);
bld.append(" (valid JAR)");} catch (Exception e) {
bld.append(" (invalid JAR: ").append(e.getMessage()).append(')');
} finally {
if (jar != null) {
jar.close();
}
}
}
} else {
bld.append(" (missing)");
}
} else {
bld.append("url: ").append(url);}
return bld.toString();
} | 3.26 |
flink_DataOutputSerializer_getSharedBuffer_rdh | /**
* Gets a reference to the internal byte buffer. This buffer may be larger than the actual
* serialized data. Only the bytes from zero to {@link #length()} are valid. The buffer will
* also be overwritten with the next write calls.
*
* <p>This method is useful when trying to avid byte copies, but should be used carefully.
*
* @return A reference to the internal shared and reused buffer.
*/
public byte[] getSharedBuffer() {
return f0;
} | 3.26 |
flink_DataOutputSerializer_write_rdh | // ----------------------------------------------------------------------------------------
// Data Output
// ----------------------------------------------------------------------------------------
@Override
public void write(int b) throws IOException {
if (this.position >= this.f0.length) {resize(1);
}
this.f0[this.position++] = ((byte) (b & 0xff));
} | 3.26 |
flink_DataOutputSerializer_getByteArray_rdh | /**
*
* @deprecated Replaced by {@link #getSharedBuffer()} for a better, safer name.
*/
@Deprecated
public byte[] getByteArray() {
return getSharedBuffer();
} | 3.26 |
flink_DataOutputSerializer_getCopyOfBuffer_rdh | /**
* Gets a copy of the buffer that has the right length for the data serialized so far. The
* returned buffer is an exclusive copy and can be safely used without being overwritten by
* future write calls to this serializer.
*
* <p>This method is equivalent to {@code Arrays.copyOf(getSharedBuffer(), length());}
*
* @return A non-shared copy of the serialization buffer.
*/
public byte[] getCopyOfBuffer() {
return Arrays.copyOf(f0, position);
} | 3.26 |
flink_JobManagerSharedServices_shutdown_rdh | /**
* Shutdown the {@link JobMaster} services.
*
* <p>This method makes sure all services are closed or shut down, even when an exception
* occurred in the shutdown of one component. The first encountered exception is thrown, with
* successive exceptions added as suppressed exceptions.
*
* @throws Exception
* The first Exception encountered during shutdown.
*/
public void shutdown() throws Exception {
Throwable exception = null;
try {
ExecutorUtils.gracefulShutdown(SHUTDOWN_TIMEOUT.toMillis(), TimeUnit.MILLISECONDS, futureExecutor, ioExecutor);
}
catch
(Throwable t) {exception = t;
}
try {
shuffleMaster.close();
} catch (Throwable t) {
exception = ExceptionUtils.firstOrSuppressed(t, exception);
}
libraryCacheManager.shutdown();
if (exception != null) {ExceptionUtils.rethrowException(exception, "Error while shutting down JobManager services");
}
} | 3.26 |
flink_JobManagerSharedServices_fromConfiguration_rdh | // ------------------------------------------------------------------------
// Creating the components from a configuration
// ------------------------------------------------------------------------
public static JobManagerSharedServices fromConfiguration(Configuration config, BlobServer blobServer, FatalErrorHandler fatalErrorHandler) throws Exception {
checkNotNull(config);
checkNotNull(blobServer);
final String classLoaderResolveOrder = config.getString(CoreOptions.CLASSLOADER_RESOLVE_ORDER);
final String[] alwaysParentFirstLoaderPatterns = CoreOptions.getParentFirstLoaderPatterns(config);
final boolean failOnJvmMetaspaceOomError = config.getBoolean(CoreOptions.FAIL_ON_USER_CLASS_LOADING_METASPACE_OOM);
final boolean checkClassLoaderLeak = config.getBoolean(CoreOptions.CHECK_LEAKED_CLASSLOADER);
final BlobLibraryCacheManager libraryCacheManager = new BlobLibraryCacheManager(blobServer, BlobLibraryCacheManager.defaultClassLoaderFactory(FlinkUserCodeClassLoaders.ResolveOrder.fromString(classLoaderResolveOrder), alwaysParentFirstLoaderPatterns, failOnJvmMetaspaceOomError ? fatalErrorHandler :
null, checkClassLoaderLeak), true);
final int numberCPUCores = Hardware.getNumberCPUCores();
final int jobManagerFuturePoolSize = config.getInteger(JobManagerOptions.JOB_MANAGER_FUTURE_POOL_SIZE, numberCPUCores);
final ScheduledExecutorService futureExecutor = Executors.newScheduledThreadPool(jobManagerFuturePoolSize, new ExecutorThreadFactory("jobmanager-future"));
final int jobManagerIoPoolSize = config.getInteger(JobManagerOptions.JOB_MANAGER_IO_POOL_SIZE, numberCPUCores);final ExecutorService ioExecutor = Executors.newFixedThreadPool(jobManagerIoPoolSize, new ExecutorThreadFactory("jobmanager-io"));
final ShuffleMasterContext shuffleMasterContext = new ShuffleMasterContextImpl(config, fatalErrorHandler);
final ShuffleMaster<?> shuffleMaster = ShuffleServiceLoader.loadShuffleServiceFactory(config).createShuffleMaster(shuffleMasterContext);
shuffleMaster.start();
return new JobManagerSharedServices(futureExecutor, ioExecutor, libraryCacheManager, shuffleMaster, blobServer);
} | 3.26 |
flink_FromElementsGeneratorFunction_checkIterable_rdh | // ------------------------------------------------------------------------
// Utilities
// ------------------------------------------------------------------------
/**
* Verifies that all elements in the iterable are non-null, and are of the given class, or a
* subclass thereof.
*
* @param elements
* The iterable to check.
* @param viewedAs
* The class to which the elements must be assignable to.
* @param <OUT>
* The generic type of the iterable to be checked.
*/
public static <OUT> void checkIterable(Iterable<OUT> elements, Class<?> viewedAs) {
for (OUT elem : elements) {
if (elem == null) {
throw new IllegalArgumentException("The collection contains a null element");
}
if (!viewedAs.isAssignableFrom(elem.getClass())) {
throw new IllegalArgumentException("The elements in the collection are not all subclasses of " + viewedAs.getCanonicalName());
}
}
} | 3.26 |
flink_FromElementsGeneratorFunction_setOutputType_rdh | // For backward compatibility: Supports legacy usage of
// StreamExecutionEnvironment#fromElements() which lacked type information and relied on the
// returns() method. See FLINK-21386 for details.
@Override
public void setOutputType(TypeInformation<OUT> outTypeInfo, ExecutionConfig executionConfig) {
Preconditions.checkState(elements != null, "The output type should've been specified before shipping the graph to the cluster");
checkIterable(elements, outTypeInfo.getTypeClass());
TypeSerializer<OUT> newSerializer = outTypeInfo.createSerializer(executionConfig);
if (Objects.equals(serializer, newSerializer)) {
return;
}serializer = newSerializer;
try {
serializeElements(elements);
} catch (IOException e) {
throw new RuntimeException(e.getMessage(),
e);
}
} | 3.26 |
flink_NonSpanningWrapper_transferTo_rdh | /**
* Copies the data and transfers the "ownership" (i.e. clears current wrapper).
*/
void transferTo(ByteBuffer dst) {
segment.get(position, dst, remaining());
clear();
} | 3.26 |
flink_NonSpanningWrapper_readFully_rdh | // -------------------------------------------------------------------------------------------------------------
// DataInput specific methods
// -------------------------------------------------------------------------------------------------------------
@Override
public final void readFully(byte[] b) {
readFully(b, 0, b.length);
} | 3.26 |
flink_HandlerRequest_create_rdh | /**
* Short-cut for {@link #create(RequestBody, MessageParameters, Collection)} without any
* uploaded files.
*/
@VisibleForTestingpublic static <R extends RequestBody, M extends MessageParameters> HandlerRequest<R> create(R requestBody, M messageParameters) {
return create(requestBody, messageParameters, Collections.emptyList());
}
/**
* Creates a new {@link HandlerRequest}. The given {@link MessageParameters} | 3.26 |
flink_HandlerRequest_m0_rdh | /**
* Returns the request body.
*
* @return request body
*/
public R m0() {
return requestBody;
} | 3.26 |
flink_HandlerRequest_resolveParametersAndCreate_rdh | /**
* Creates a new {@link HandlerRequest} after resolving the given {@link MessageParameters}
* against the given query/path parameter maps.
*
* <p>For tests it is recommended to resolve the parameters manually and use {@link #create}.
*/
public static <R extends RequestBody, M extends MessageParameters> HandlerRequest<R> resolveParametersAndCreate(R requestBody, M messageParameters, Map<String, String> receivedPathParameters, Map<String, List<String>> receivedQueryParameters, Collection<File> uploadedFiles) throws HandlerRequestException {
resolvePathParameters(messageParameters, receivedPathParameters);
resolveQueryParameters(messageParameters, receivedQueryParameters);
return create(requestBody, messageParameters, uploadedFiles);
} | 3.26 |
flink_HandlerRequest_getPathParameter_rdh | /**
* Returns the value of the {@link MessagePathParameter} for the given class.
*
* @param parameterClass
* class of the parameter
* @param <X>
* the value type that the parameter contains
* @param <PP>
* type of the path parameter
* @return path parameter value for the given class
* @throws IllegalStateException
* if no value is defined for the given parameter class
*/
public <X, PP extends MessagePathParameter<X>> X getPathParameter(Class<PP> parameterClass) {
@SuppressWarnings("unchecked")
PP pathParameter = ((PP) (pathParameters.get(parameterClass)));Preconditions.checkState(pathParameter != null, "No parameter could be found for the given class.");
return pathParameter.getValue();
}
/**
* Returns the value of the {@link MessageQueryParameter} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.