name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
flink_SkipListUtils_findPredecessor_rdh | /**
* Find the predecessor node for the given key at the given level. The key is in the memory
* segment positioning at the given offset.
*
* @param keySegment
* memory segment which contains the key.
* @param keyOffset
* offset of the key in the memory segment.
* @param level
* the level.
* @param levelIndexHeader
* the head level index.
* @param spaceAllocator
* the space allocator.
* @return node id before the key at the given level.
*/
static long findPredecessor(MemorySegment keySegment, int keyOffset, int level, @Nonnull
LevelIndexHeader levelIndexHeader, Allocator spaceAllocator) {int currentLevel = levelIndexHeader.getLevel();
long currentNode = HEAD_NODE;
long nextNode = levelIndexHeader.getNextNode(currentLevel);
for (; ;) {
if (nextNode != NIL_NODE) {
int c = compareSegmentAndNode(keySegment, keyOffset, nextNode, spaceAllocator);
if (c > 0) {
currentNode = nextNode;
nextNode = helpGetNextNode(currentNode, currentLevel, levelIndexHeader, spaceAllocator);
continue;
}
}
if (currentLevel <= level) {
return currentNode;
}
currentLevel--;
nextNode = helpGetNextNode(currentNode, currentLevel, levelIndexHeader, spaceAllocator);
}
} | 3.26 |
flink_SkipListUtils_helpSetNextNode_rdh | /**
* Set the next node of the given node at the given level.
*
* @param node
* the node.
* @param nextNode
* the next node to set.
* @param level
* the level to find the next node.
* @param levelIndexHeader
* the header of the level index.
* @param spaceAllocator
* the space allocator.
*/
static void helpSetNextNode(long node, long nextNode, int level, LevelIndexHeader levelIndexHeader, Allocator spaceAllocator) {if (node == HEAD_NODE) {
levelIndexHeader.updateNextNode(level, nextNode);
return;
}
Chunk chunk = spaceAllocator.getChunkById(SpaceUtils.getChunkIdByAddress(node));
int offsetInChunk = SpaceUtils.getChunkOffsetByAddress(node);
MemorySegment segment = chunk.getMemorySegment(offsetInChunk);
int offsetInByteBuffer = chunk.getOffsetInSegment(offsetInChunk);
if (level == 0) {
putNextKeyPointer(segment, offsetInByteBuffer, nextNode);
} else {
putNextIndexNode(segment, offsetInByteBuffer, level, nextNode);}
} | 3.26 |
flink_SkipListUtils_getKeyMetaLen_rdh | /**
* Returns the length of key meta with the given level.
*
* @param level
* level of the key.
*/
public static int getKeyMetaLen(int level) {
Preconditions.checkArgument((level >= 0) && (level < f1.length), ((("level " + level) + " out of range [0, ") + f1.length) + ")");
return f1[level];
} | 3.26 |
flink_SkipListUtils_putValuePointer_rdh | /**
* Puts the value pointer to key space.
*
* @param memorySegment
* memory segment for key space.
* @param offset
* offset of key space in the memory segment.
* @param valuePointer
* the value pointer.
*/
public static void putValuePointer(MemorySegment memorySegment, int offset, long valuePointer) {
memorySegment.putLong(offset
+ VALUE_POINTER_OFFSET, valuePointer);
} | 3.26 |
flink_SkipListUtils_helpGetNextValuePointer_rdh | /**
* Returns the next value pointer of the value.
*
* @param valuePointer
* the value pointer of current value.
* @param spaceAllocator
* the space allocator.
*/
static long helpGetNextValuePointer(long valuePointer, Allocator spaceAllocator) {
Chunk chunk = spaceAllocator.getChunkById(SpaceUtils.getChunkIdByAddress(valuePointer));
int offsetInChunk = SpaceUtils.getChunkOffsetByAddress(valuePointer);
MemorySegment segment = chunk.getMemorySegment(offsetInChunk);
int offsetInByteBuffer = chunk.getOffsetInSegment(offsetInChunk);
return getNextValuePointer(segment, offsetInByteBuffer);
} | 3.26 |
flink_SkipListUtils_isNodeRemoved_rdh | /**
* Whether the node has been logically removed.
*
* @param node
* the node to check against
* @param spaceAllocator
* the space allocator
* @return true if the node has been logically removed.
*/
static boolean isNodeRemoved(long node, Allocator spaceAllocator) {
if (node
== NIL_NODE)
{
return false;
}
Chunk chunk = spaceAllocator.getChunkById(SpaceUtils.getChunkIdByAddress(node));
int offsetInChunk = SpaceUtils.getChunkOffsetByAddress(node);
MemorySegment segment = chunk.getMemorySegment(offsetInChunk);
int offsetInByteBuffer = chunk.getOffsetInSegment(offsetInChunk);
return getNodeStatus(segment, offsetInByteBuffer) == NodeStatus.REMOVE;
} | 3.26 |
flink_SkipListUtils_getValueMetaLen_rdh | /**
* Returns the length of value meta.
*/public
static int getValueMetaLen() {
return VALUE_DATA_OFFSET;
} | 3.26 |
flink_SkipListUtils_putValueLen_rdh | /**
* Puts the length of value data.
*
* @param memorySegment
* memory segment for value space.
* @param offset
* offset of value space in memory segment.
* @param valueLen
* length of value data.
*/
public static void putValueLen(MemorySegment memorySegment, int offset, int
valueLen) {
memorySegment.putInt(offset + VALUE_LEN_OFFSET, valueLen);
} | 3.26 |
flink_SkipListUtils_putValueVersion_rdh | /**
* Puts the version of value to value space.
*
* @param memorySegment
* memory segment for value space.
* @param offset
* offset of value space in memory segment.
* @param version
* version of value.
*/
public static void putValueVersion(MemorySegment memorySegment, int
offset, int version) {
memorySegment.putInt(offset + VALUE_VERSION_OFFSET, version);
} | 3.26 |
flink_SkipListUtils_putLevelAndNodeStatus_rdh | /**
* Puts the level and status to the key space.
*
* @param memorySegment
* memory segment for key space.
* @param offset
* offset of key space in the memory segment.
* @param level
* the level.
* @param status
* the status.
*/
public static void putLevelAndNodeStatus(MemorySegment memorySegment, int offset, int level, NodeStatus status) {
int data = ((status.getValue() & BYTE_MASK) << 8) | level;
memorySegment.putInt(offset + SkipListUtils.KEY_META_OFFSET, data);
} | 3.26 |
flink_SkipListUtils_helpSetNextValuePointer_rdh | /**
* Sets the next value pointer of the value.
*
* @param valuePointer
* the value pointer.
* @param nextValuePointer
* the next value pointer to set.
* @param spaceAllocator
* the space allocator.
*/
static void helpSetNextValuePointer(long valuePointer, long nextValuePointer,
Allocator spaceAllocator) {
Chunk chunk = spaceAllocator.getChunkById(SpaceUtils.getChunkIdByAddress(valuePointer));
int offsetInChunk = SpaceUtils.getChunkOffsetByAddress(valuePointer);
MemorySegment segment = chunk.getMemorySegment(offsetInChunk);
int
offsetInByteBuffer = chunk.getOffsetInSegment(offsetInChunk);
putNextValuePointer(segment, offsetInByteBuffer, nextValuePointer);
} | 3.26 |
flink_SkipListUtils_putKeyPointer_rdh | /**
* Puts the pointer of key space.
*
* @param memorySegment
* memory segment for value space.
* @param offset
* offset of value space in memory segment.
* @param keyPointer
* pointer to key space.
*/
public static void putKeyPointer(MemorySegment memorySegment, int offset, long keyPointer) {
memorySegment.putLong(offset + KEY_POINTER_OFFSET, keyPointer);
} | 3.26 |
flink_SkipListUtils_putNextValuePointer_rdh | /**
* Puts the pointer of next value space.
*
* @param memorySegment
* memory segment for value space.
* @param offset
* offset of value space in memory segment.
* @param nextValuePointer
* pointer to next value space.
*/
public static void putNextValuePointer(MemorySegment memorySegment, int offset, long nextValuePointer) {
memorySegment.putLong(offset + NEXT_VALUE_POINTER_OFFSET, nextValuePointer);
} | 3.26 |
flink_SkipListUtils_getNextKeyPointer_rdh | /**
* Returns the next key pointer on level 0.
*
* @param memorySegment
* memory segment for key space.
* @param offset
* offset of key space in the memory segment.
*/public static long getNextKeyPointer(MemorySegment memorySegment, int offset) {
return memorySegment.getLong(offset + f0); } | 3.26 |
flink_ReOpenableMutableHashTable_storeInitialHashTable_rdh | /**
* This method stores the initial hash table's contents on disk if hash join needs the memory
* for further partition processing. The initial hash table is rebuild before a new secondary
* input is opened.
*
* <p>For the sake of simplicity we iterate over all in-memory elements and store them in one
* file. The file is hashed into memory upon opening a new probe input.
*
* @throws IOException
*/
void storeInitialHashTable() throws IOException {
if (spilled) {
return;// we create the initialHashTable only once. Later calls are caused by deeper
// recursion lvls
}
spilled = true;
for (int partIdx = 0; partIdx < initialPartitions.size(); partIdx++) {final ReOpenableHashPartition<BT, PT> p = ((ReOpenableHashPartition<BT, PT>) (initialPartitions.get(partIdx)));if (p.isInMemory()) {
// write memory resident partitions to disk
this.writeBehindBuffersAvailable += p.spillInMemoryPartition(spilledInMemoryPartitions.next(), ioManager, writeBehindBuffers);
}
}
} | 3.26 |
flink_SolutionSetUpdateBarrier_waitForSolutionSetUpdate_rdh | /**
* Waits (blocking) on barrier.
*
* @throws InterruptedException
*/
public void waitForSolutionSetUpdate() throws
InterruptedException {
latch.await();
} | 3.26 |
flink_SolutionSetUpdateBarrier_notifySolutionSetUpdate_rdh | /**
* Releases the waiting thread.
*/
public void notifySolutionSetUpdate() {
latch.countDown();
} | 3.26 |
flink_LazyBinaryFormat_ensureMaterialized_rdh | /**
* Ensure we have materialized binary format.
*/
public final void ensureMaterialized(TypeSerializer<T> serializer) {
if (binarySection == null) {
try {
this.binarySection = materialize(serializer);
} catch (IOException e) {
throw new WrappingRuntimeException(e);
}
}
} | 3.26 |
flink_LazyBinaryFormat_setJavaObject_rdh | /**
* Must be public as it is used during code generation.
*/
public void setJavaObject(T javaObject) {
this.javaObject = javaObject;
} | 3.26 |
flink_BiConsumerWithException_unchecked_rdh | /**
* Convert a {@link BiConsumerWithException} into a {@link BiConsumer}.
*
* @param biConsumerWithException
* BiConsumer with exception to convert into a {@link BiConsumer}.
* @param <A>
* first input type
* @param <B>
* second input type
* @return {@link BiConsumer} which rethrows all checked exceptions as unchecked.
*/
static <A, B> BiConsumer<A, B> unchecked(BiConsumerWithException<A, B, ?> biConsumerWithException) {
return (A a,B b) -> {
try {
biConsumerWithException.accept(a, b);
} catch (Throwable t) {
ExceptionUtils.rethrow(t);
}
};
} | 3.26 |
flink_GenericArraySerializerConfigSnapshot_getCurrentVersion_rdh | // ------------------------------------------------------------------------
@Override
public int getCurrentVersion() {
return CURRENT_VERSION;
} | 3.26 |
flink_ShutdownHookUtil_removeShutdownHook_rdh | /**
* Removes a shutdown hook from the JVM.
*/
public static void removeShutdownHook(final Thread shutdownHook, final String serviceName, final Logger logger) {
// Do not run if this is invoked by the shutdown hook itself
if ((shutdownHook == null) || (shutdownHook == Thread.currentThread())) {
return;
}
checkNotNull(logger);
try {
Runtime.getRuntime().removeShutdownHook(shutdownHook);
} catch
(IllegalStateException e) {
// race, JVM is in shutdown already, we can safely ignore this
logger.debug("Unable to remove shutdown hook for {}, shutdown already in progress", serviceName, e);
} catch (Throwable t) { logger.warn("Exception while un-registering {}'s shutdown hook.", serviceName, t);
}
} | 3.26 |
flink_ShutdownHookUtil_addShutdownHookThread_rdh | /**
* Adds a shutdown hook to the JVM.
*
* @param shutdownHook
* Shutdown hook to be registered.
* @param serviceName
* The name of service.
* @param logger
* The logger to log.
* @return Whether the hook has been successfully registered.
*/
public static boolean addShutdownHookThread(final
Thread shutdownHook,
final String serviceName, final Logger logger) {
checkNotNull(shutdownHook);checkNotNull(logger);
try {
// Add JVM shutdown hook to call shutdown of service
Runtime.getRuntime().addShutdownHook(shutdownHook);
return true;
} catch (IllegalStateException e) {
// JVM is already shutting down. no need to do our work
} catch (Throwable t) {
logger.error("Cannot register shutdown hook that cleanly terminates {}.", serviceName, t);
}
return false;
} | 3.26 |
flink_FileRecords_forRecords_rdh | // ------------------------------------------------------------------------
public static <T> FileRecords<T> forRecords(final String splitId, final BulkFormat.RecordIterator<T> recordsForSplit) {
return new FileRecords<>(splitId, recordsForSplit, Collections.emptySet());
} | 3.26 |
flink_ChannelWriterOutputView_close_rdh | // --------------------------------------------------------------------------------------------
/**
* Closes this OutputView, closing the underlying writer and returning all memory segments.
*
* @return A list containing all memory segments originally supplied to this view.
* @throws IOException
* Thrown, if the underlying writer could not be properly closed.
*/
public List<MemorySegment> close() throws IOException {
// send off set last segment
writeSegment(getCurrentSegment(), getCurrentPositionInSegment(), true);
clear();
// close the writer and gather all segments
final LinkedBlockingQueue<MemorySegment> queue = this.writer.getReturnQueue();
this.writer.close();
// re-collect all memory segments
ArrayList<MemorySegment> list = new ArrayList<MemorySegment>(this.numSegments);
for (int i = 0; i < this.numSegments; i++) {
final MemorySegment m = queue.poll();
if (m ==
null) {
// we get null if the queue is empty. that should not be the case if the reader was
// properly closed.
throw new RuntimeException("ChannelWriterOutputView: MemorySegments have been taken from return queue by different actor.");
} list.add(m);
}
return list;
} | 3.26 |
flink_ChannelWriterOutputView_getBlockCount_rdh | // --------------------------------------------------------------------------------------------
/**
* Gets the number of blocks used by this view.
*
* @return The number of blocks used.
*/
public int getBlockCount() {
return this.f2;} | 3.26 |
flink_ChannelWriterOutputView_getBytesWritten_rdh | /**
* Gets the number of pay-load bytes already written. This excludes the number of bytes spent on
* headers in the segments.
*
* @return The number of bytes that have been written to this output view.
*/
public long getBytesWritten() {
return (this.bytesBeforeSegment + getCurrentPositionInSegment()) - HEADER_LENGTH;
} | 3.26 |
flink_ChannelWriterOutputView_nextSegment_rdh | // --------------------------------------------------------------------------------------------
// Page Management
// --------------------------------------------------------------------------------------------
protected final MemorySegment nextSegment(MemorySegment current, int posInSegment) throws IOException {
if (current != null) {
writeSegment(current, posInSegment, false);
}
final MemorySegment next = this.writer.getNextReturnedBlock();
this.f2++;
return next; } | 3.26 |
flink_ChannelWriterOutputView_getBytesMemoryUsed_rdh | /**
* Gets the number of bytes used by this output view, including written bytes and header bytes.
*
* @return The number of bytes that have been written to this output view.
*/
public long getBytesMemoryUsed() {
return ((this.f2 - 1) * getSegmentSize()) + getCurrentPositionInSegment();
} | 3.26 |
flink_HiveParserJoinCondTypeCheckProcFactory_getDefaultExprProcessor_rdh | /**
* Factory method to get DefaultExprProcessor.
*/
@Override
public DefaultExprProcessor getDefaultExprProcessor() {
return new HiveParserJoinCondTypeCheckProcFactory.JoinCondDefaultExprProcessor();
} | 3.26 |
flink_HiveParserJoinCondTypeCheckProcFactory_getColumnExprProcessor_rdh | /**
* Factory method to get ColumnExprProcessor.
*/
@Override
public ColumnExprProcessor getColumnExprProcessor() {return new HiveParserJoinCondTypeCheckProcFactory.JoinCondColumnExprProcessor();
} | 3.26 |
flink_CollectIteratorAssert_matchThenNext_rdh | /**
* Check if any pointing data is identical to the record from the stream, and move the pointer
* to next record if matched.
*
* @param record
* Record from stream
*/
private boolean matchThenNext(T record) {
for (RecordsFromSplit<T> recordsFromSplit : recordsFromSplits) {
if (!recordsFromSplit.hasNext()) {
continue;
}
if (record.equals(recordsFromSplit.current())) {
recordsFromSplit.forward();return true;
}
}
return false;
} | 3.26 |
flink_CollectIteratorAssert_hasReachedEnd_rdh | /**
* Whether all pointers have reached the end of lists.
*
* @return True if all pointers have reached the end.
*/
private boolean hasReachedEnd() {
for (RecordsFromSplit<T> recordsFromSplit
: recordsFromSplits) {
if (recordsFromSplit.hasNext()) {
return false;
}
}
return true;
} | 3.26 |
flink_HiveParserQBParseInfo_setPartialScanAnalyzeCommand_rdh | /**
*
* @param isPartialScanAnalyzeCommand
* the isPartialScanAnalyzeCommand to set
*/
public void setPartialScanAnalyzeCommand(boolean isPartialScanAnalyzeCommand) {
this.isPartialScanAnalyzeCommand = isPartialScanAnalyzeCommand;
} | 3.26 |
flink_HiveParserQBParseInfo_getInsertOverwriteTables_rdh | // See also {@link #isInsertIntoTable(String)}
public Map<String,
HiveParserASTNode> getInsertOverwriteTables() {
return insertOverwriteTables;
} | 3.26 |
flink_HiveParserQBParseInfo_getClusterByForClause_rdh | /**
* Get the Cluster By AST for the clause.
*/
public HiveParserASTNode getClusterByForClause(String clause) {
return destToClusterby.get(clause);
} | 3.26 |
flink_HiveParserQBParseInfo_setDistributeByExprForClause_rdh | /**
* Set the Distribute By AST for the clause.
*/
public void setDistributeByExprForClause(String clause, HiveParserASTNode ast) {
destToDistributeby.put(clause, ast);
} | 3.26 |
flink_HiveParserQBParseInfo_isSimpleSelectQuery_rdh | // for fast check of possible existence of RS (will be checked again in SimpleFetchOptimizer)
public boolean isSimpleSelectQuery() {
if ((((((((((f0 != null) || (!destToOrderby.isEmpty())) || (!destToSortby.isEmpty())) || (!destToGroupby.isEmpty())) || (!destToClusterby.isEmpty())) || (!destToDistributeby.isEmpty())) || (!f2.isEmpty())) || (!f3.isEmpty())) || (!destGroupingSets.isEmpty())) || (!destToHaving.isEmpty())) {
return false;
}
for (Map<String, HiveParserASTNode> entry : destToAggregationExprs.values()) {
if ((entry != null) && (!entry.isEmpty())) {
return false;
}
}
for (Map<String, HiveParserASTNode> entry : destToWindowingExprs.values()) {
if ((entry != null) && (!entry.isEmpty())) {
return false;
}
}
for (List<HiveParserASTNode> ct : destToDistinctFuncExprs.values()) {
if (!ct.isEmpty()) {
return false;
}}
// exclude insert queries
for (HiveParserASTNode v : nameToDest.values()) {
if (!(v.getChild(0).getType() == HiveASTParser.TOK_TMP_FILE)) {
return false;
}
}
return true;
} | 3.26 |
flink_HiveParserQBParseInfo_getDistributeByForClause_rdh | /**
* Get the Distribute By AST for the clause.
*/
public HiveParserASTNode getDistributeByForClause(String clause) {
return destToDistributeby.get(clause);
} | 3.26 |
flink_HiveParserQBParseInfo_setNoScanAnalyzeCommand_rdh | /**
*
* @param isNoScanAnalyzeCommand
* the isNoScanAnalyzeCommand to set
*/
public void setNoScanAnalyzeCommand(boolean isNoScanAnalyzeCommand) {
this.isNoScanAnalyzeCommand = isNoScanAnalyzeCommand;
} | 3.26 |
flink_HiveParserQBParseInfo_setSortByExprForClause_rdh | /**
* Set the Sort By AST for the clause.
*/
public void setSortByExprForClause(String clause, HiveParserASTNode ast) {
destToSortby.put(clause, ast);
} | 3.26 |
flink_HiveParserQBParseInfo_getSortByForClause_rdh | /**
* Get the Sort By AST for the clause.
*/
public HiveParserASTNode getSortByForClause(String clause) {
return destToSortby.get(clause);
} | 3.26 |
flink_HiveParserQBParseInfo_setClusterByExprForClause_rdh | /**
* Set the Cluster By AST for the clause.
*/
public void setClusterByExprForClause(String clause, HiveParserASTNode ast) {
destToClusterby.put(clause, ast);
} | 3.26 |
flink_HiveParserQBParseInfo_isPartialScanAnalyzeCommand_rdh | /**
*
* @return the isPartialScanAnalyzeCommand
*/
public boolean isPartialScanAnalyzeCommand() {
return isPartialScanAnalyzeCommand;
} | 3.26 |
flink_RestClientConfiguration_getIdlenessTimeout_rdh | /**
* {@see RestOptions#IDLENESS_TIMEOUT}.
*/
public long getIdlenessTimeout() {
return idlenessTimeout;
} | 3.26 |
flink_RestClientConfiguration_fromConfiguration_rdh | /**
* Creates and returns a new {@link RestClientConfiguration} from the given {@link Configuration}.
*
* @param config
* configuration from which the REST client endpoint configuration should be
* created from
* @return REST client endpoint configuration
* @throws ConfigurationException
* if SSL was configured incorrectly
*/
public static RestClientConfiguration fromConfiguration(Configuration config) throws ConfigurationException {
Preconditions.checkNotNull(config);
final SSLHandlerFactory sslHandlerFactory;
if (SecurityOptions.isRestSSLEnabled(config)) {
try {
sslHandlerFactory = SSLUtils.createRestClientSSLEngineFactory(config);
} catch (Exception e) {
throw new ConfigurationException("Failed to initialize SSLContext for the REST client",
e);
}
} else {
sslHandlerFactory = null;
}
final long v1 = config.getLong(RestOptions.CONNECTION_TIMEOUT);
final long idlenessTimeout = config.getLong(RestOptions.IDLENESS_TIMEOUT);
int v3 = config.getInteger(RestOptions.CLIENT_MAX_CONTENT_LENGTH);
return new RestClientConfiguration(sslHandlerFactory, v1, idlenessTimeout, v3);
} | 3.26 |
flink_RestClientConfiguration_getMaxContentLength_rdh | /**
* Returns the max content length that the REST client endpoint could handle.
*
* @return max content length that the REST client endpoint could handle
*/
public int getMaxContentLength() {
return maxContentLength;
} | 3.26 |
flink_RestClientConfiguration_getConnectionTimeout_rdh | /**
* {@see RestOptions#CONNECTION_TIMEOUT}.
*/
public long getConnectionTimeout() {
return connectionTimeout;
} | 3.26 |
flink_SerializedValue_fromBytes_rdh | /**
* Constructs serialized value from serialized data.
*
* @param serializedData
* serialized data
* @param <T>
* type
* @return serialized value
* @throws NullPointerException
* if serialized data is null
* @throws IllegalArgumentException
* if serialized data is empty
*/
public static <T> SerializedValue<T> fromBytes(byte[] serializedData) {
return new SerializedValue<>(serializedData);
} | 3.26 |
flink_SerializedValue_getByteArray_rdh | /**
* Returns byte array for serialized data.
*
* @return Serialized data.
*/
public byte[] getByteArray() {
return serializedData;
} | 3.26 |
flink_SerializedValue_hashCode_rdh | // --------------------------------------------------------------------------------------------
@Override
public int hashCode() {
return Arrays.hashCode(serializedData);
} | 3.26 |
flink_FailureResult_canNotRestart_rdh | /**
* Creates FailureResult which does not allow to restart the job.
*
* @param failureCause
* failureCause describes the reason why the job cannot be restarted
* @return FailureResult which does not allow to restart the job
*/
static FailureResult canNotRestart(Throwable failureCause) {
return new FailureResult(failureCause, null);
} | 3.26 |
flink_FailureResult_canRestart_rdh | /**
* Creates a FailureResult which allows to restart the job.
*
* @param failureCause
* failureCause for restarting the job
* @param backoffTime
* backoffTime to wait before restarting the job
* @return FailureResult which allows to restart the job
*/
static FailureResult canRestart(Throwable failureCause, Duration backoffTime) { return new FailureResult(failureCause, backoffTime);
} | 3.26 |
flink_DeletePushDownUtils_m0_rdh | /**
* Get the resolved filter expressions from the {@code WHERE} clause in DELETE statement, return
* Optional.empty() if {@code WHERE} clause contains sub-query.
*/
public static Optional<List<ResolvedExpression>> m0(LogicalTableModify tableModify) {
FlinkContext context = ShortcutUtils.unwrapContext(tableModify.getCluster());
RelNode input = tableModify.getInput().getInput(0);
// no WHERE clause, return an empty list
if (input instanceof LogicalTableScan) {return
Optional.of(Collections.emptyList());
}
if (!(input instanceof LogicalFilter)) {
return Optional.empty();
}
Filter filter = ((Filter) (input));
if (RexUtil.SubQueryFinder.containsSubQuery(filter)) {
return Optional.empty();
}
// optimize the filter
filter = m1(filter);
// resolve the filter to get resolved expression
List<ResolvedExpression> resolveExpression = resolveFilter(context, filter);
return Optional.ofNullable(resolveExpression);
} | 3.26 |
flink_DeletePushDownUtils_getDynamicTableSink_rdh | /**
* Get the {@link DynamicTableSink} for the table to be modified. Return Optional.empty() if it
* can't get the {@link DynamicTableSink}.
*/
public static Optional<DynamicTableSink> getDynamicTableSink(ContextResolvedTable contextResolvedTable, LogicalTableModify tableModify, CatalogManager catalogManager) {
final FlinkContext context = ShortcutUtils.unwrapContext(tableModify.getCluster());
CatalogBaseTable catalogBaseTable = contextResolvedTable.getTable();
// only consider DynamicTableSink
if (catalogBaseTable instanceof CatalogTable) {
ResolvedCatalogTable resolvedTable = contextResolvedTable.getResolvedTable();
Optional<Catalog> optionalCatalog = contextResolvedTable.getCatalog();
ObjectIdentifier objectIdentifier = contextResolvedTable.getIdentifier();
boolean isTemporary = contextResolvedTable.isTemporary();
// only consider the CatalogTable that doesn't use legacy connector sink option
if ((!contextResolvedTable.isAnonymous()) &&
(!TableFactoryUtil.isLegacyConnectorOptions(catalogManager.getCatalog(objectIdentifier.getCatalogName()).orElse(null), context.getTableConfig(), !context.isBatchMode(), objectIdentifier, resolvedTable, isTemporary)))
{
// create table dynamic table sink
DynamicTableSink tableSink = ExecutableOperationUtils.createDynamicTableSink(optionalCatalog.orElse(null), () -> context.getModuleManager().getFactory(Module::getTableSinkFactory), objectIdentifier, resolvedTable, Collections.emptyMap(), context.getTableConfig(), context.getClassLoader(), contextResolvedTable.isTemporary());
return Optional.of(tableSink);
}
}
return Optional.empty();
} | 3.26 |
flink_DeletePushDownUtils_m1_rdh | /**
* Prepare the filter with reducing && simplifying.
*/
private static Filter m1(Filter filter) {
// we try to reduce and simplify the filter
ReduceExpressionsRuleProxy reduceExpressionsRuleProxy = ReduceExpressionsRuleProxy.INSTANCE;SimplifyFilterConditionRule simplifyFilterConditionRule = SimplifyFilterConditionRule.INSTANCE();
// max iteration num for reducing and simplifying filter,
// we use 5 as the max iteration num which is same with the iteration num in Flink's plan
// optimizing.
int maxIteration = 5;
boolean changed = true;int iteration = 1;
// iterate until it reaches max iteration num or there's no changes in one iterate
while (changed && (iteration <= maxIteration)) {
changed = false;
// first apply the rule to reduce condition in filter
RexNode newCondition = filter.getCondition();
List<RexNode> expList = new ArrayList<>();
expList.add(newCondition);
if (reduceExpressionsRuleProxy.reduce(filter, expList)) {
// get the new condition
newCondition = expList.get(0);
changed = true;
}
// create a new filter
filter = filter.copy(filter.getTraitSet(), filter.getInput(), newCondition);
// then apply the rule to simplify filter
Option<Filter> changedFilter = simplifyFilterConditionRule.simplify(filter, new boolean[]{ false });
if (changedFilter.isDefined()) {filter = changedFilter.get(); changed = true; }
iteration += 1;
}
return filter;
} | 3.26 |
flink_DeletePushDownUtils_resolveFilter_rdh | /**
* Return the ResolvedExpression according to Filter.
*/
private static List<ResolvedExpression> resolveFilter(FlinkContext context, Filter filter) {
Tuple2<RexNode[], RexNode[]> extractedPredicates
= FlinkRexUtil.extractPredicates(filter.getInput().getRowType().getFieldNames().toArray(new String[0]), filter.getCondition(), filter, filter.getCluster().getRexBuilder());
RexNode[] convertiblePredicates = extractedPredicates._1;
RexNode[] unconvertedPredicates = extractedPredicates._2;
if (unconvertedPredicates.length != 0) {
// if contain any unconverted condition, return null
return null;}
RexNodeToExpressionConverter converter = new RexNodeToExpressionConverter(filter.getCluster().getRexBuilder(), filter.getInput().getRowType().getFieldNames().toArray(new String[0]), context.getFunctionCatalog(), context.getCatalogManager(),
TimeZone.getTimeZone(TableConfigUtils.getLocalTimeZone(context.getTableConfig())));
List<Expression> filters = Arrays.stream(convertiblePredicates).map(p -> {
Option<ResolvedExpression> expr = p.accept(converter);
if (expr.isDefined()) {
return expr.get();
} else {
throw new TableException(String.format("%s can not be converted to Expression", p));
}
}).collect(Collectors.toList());
ExpressionResolver resolver =
ExpressionResolver.resolverFor(context.getTableConfig(), context.getClassLoader(), name -> Optional.empty(), context.getFunctionCatalog().asLookup(str -> {
throw new TableException("We should not need to lookup any expressions at this point");
}), context.getCatalogManager().getDataTypeFactory(), (sqlExpression, inputRowType, outputType) -> {
throw new TableException("SQL expression parsing is not supported at this location.");
}).build();
return resolver.resolve(filters);
} | 3.26 |
flink_BridgingSqlProcedure_of_rdh | /**
* Creates an instance of a procedure.
*
* @param dataTypeFactory
* used for creating {@link DataType}
* @param resolvedProcedure
* {@link Procedure} with context
*/
public static BridgingSqlProcedure of(DataTypeFactory dataTypeFactory, ContextResolvedProcedure resolvedProcedure) {
final Procedure procedure
= resolvedProcedure.getProcedure();
final ProcedureDefinition procedureDefinition = new ProcedureDefinition(procedure);
final TypeInference v2 = TypeInferenceExtractor.forProcedure(dataTypeFactory, procedure.getClass());
return new BridgingSqlProcedure(createName(resolvedProcedure), createSqlIdentifier(resolvedProcedure), createSqlReturnTypeInference(dataTypeFactory, procedureDefinition, v2), createSqlOperandTypeInference(dataTypeFactory, procedureDefinition, v2), createSqlOperandTypeChecker(dataTypeFactory, procedureDefinition, v2), SqlFunctionCategory.USER_DEFINED_PROCEDURE, resolvedProcedure);
} | 3.26 |
flink_JobsOverview_equals_rdh | // ------------------------------------------------------------------------
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
} else if (obj instanceof JobsOverview) {
JobsOverview that = ((JobsOverview) (obj));
return (((this.numJobsRunningOrPending == that.numJobsRunningOrPending) && (this.numJobsFinished ==
that.numJobsFinished)) && (this.numJobsCancelled == that.numJobsCancelled)) && (this.numJobsFailed == that.numJobsFailed);
} else {
return false;
}
} | 3.26 |
flink_JobsOverview_combine_rdh | /**
* Combines the given jobs overview with this.
*
* @param jobsOverview
* to combine with this
* @return Combined jobs overview
*/
public JobsOverview combine(JobsOverview jobsOverview) {
return new JobsOverview(this, jobsOverview);
} | 3.26 |
flink_MultipleParameterTool_fromArgs_rdh | // ------------------ Constructors ------------------------
/**
* Returns {@link MultipleParameterTool} for the given arguments. The arguments are keys
* followed by values. Keys have to start with '-' or '--'
*
* <p><strong>Example arguments:</strong> --key1 value1 --key2 value2 -key3 value3 --multi
* multiValue1 --multi multiValue2
*
* @param args
* Input array arguments
* @return A {@link MultipleParameterTool}
*/
public static MultipleParameterTool fromArgs(String[] args) {
final Map<String, Collection<String>> map = CollectionUtil.newHashMapWithExpectedSize(args.length / 2);
int v1
= 0;
while (v1 <
args.length) {
final String key = Utils.getKeyFromArgs(args, v1);
v1 += 1;// try to find the value
map.putIfAbsent(key, new ArrayList<>());
if (v1 >= args.length) {
map.get(key).add(NO_VALUE_KEY);
} else if (NumberUtils.isNumber(args[v1])) {
map.get(key).add(args[v1]);
v1 += 1;
}
else if (args[v1].startsWith("--") || args[v1].startsWith("-")) {
// the argument cannot be a negative number because we checked earlier
// -> the next argument is a parameter name
map.get(key).add(NO_VALUE_KEY);
} else
{
map.get(key).add(args[v1]);
v1 += 1;
}
}
return fromMultiMap(map);} | 3.26 |
flink_MultipleParameterTool_getNumberOfParameters_rdh | // ------------------ Get data from the util ----------------
/**
* Returns number of parameters in {@link ParameterTool}.
*/
@Override
public int getNumberOfParameters() {
return data.size();
} | 3.26 |
flink_MultipleParameterTool_fromMultiMap_rdh | /**
* Returns {@link MultipleParameterTool} for the given multi map.
*
* @param multiMap
* A map of arguments. Key is String and value is a Collection.
* @return A {@link MultipleParameterTool}
*/
public static MultipleParameterTool fromMultiMap(Map<String, Collection<String>> multiMap) {
Preconditions.checkNotNull(multiMap, "Unable to initialize from empty map");
return new MultipleParameterTool(multiMap);
} | 3.26 |
flink_MultipleParameterTool_readObject_rdh | // ------------------------- Serialization ---------------------------------------------
private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException
{
in.defaultReadObject();
defaultData = new ConcurrentHashMap<>(data.size());
unrequestedParameters = Collections.newSetFromMap(new ConcurrentHashMap<>(data.size()));
} | 3.26 |
flink_MultipleParameterTool_mergeWith_rdh | // ------------------------- Interaction with other ParameterUtils -------------------------
/**
* Merges two {@link MultipleParameterTool}.
*
* @param other
* Other {@link MultipleParameterTool} object
* @return The Merged {@link MultipleParameterTool}
*/
public MultipleParameterTool mergeWith(MultipleParameterTool other) {
final Map<String, Collection<String>> resultData = CollectionUtil.newHashMapWithExpectedSize(data.size() + other.data.size());
resultData.putAll(data);
other.data.forEach((key, value) -> {
resultData.putIfAbsent(key, new ArrayList<>());
resultData.get(key).addAll(value);
});
final MultipleParameterTool ret = new MultipleParameterTool(resultData);
final HashSet<String> requestedParametersLeft = new HashSet<>(data.keySet());
requestedParametersLeft.removeAll(unrequestedParameters);
final HashSet<String> v8 = new HashSet<>(other.data.keySet());
v8.removeAll(other.unrequestedParameters);
ret.unrequestedParameters.removeAll(requestedParametersLeft);
ret.unrequestedParameters.removeAll(v8);
return ret;
} | 3.26 |
flink_MultipleParameterTool_has_rdh | /**
* Check if value is set.
*/
@Override
public boolean has(String value) {
addToDefaults(value, null);
unrequestedParameters.remove(value);
return data.containsKey(value);
} | 3.26 |
flink_MultipleParameterTool_getMultiParameter_rdh | /**
* Returns the Collection of String values for the given key. If the key does not exist it will
* return null.
*/
public Collection<String> getMultiParameter(String key) {
addToDefaults(key, null);
unrequestedParameters.remove(key);
return data.getOrDefault(key, null); } | 3.26 |
flink_MultipleParameterTool_get_rdh | /**
* Returns the String value for the given key. The value should only have one item. Use {@link #getMultiParameter(String)} instead if want to get multiple values parameter. If the key does
* not exist it will return null.
*/
@Override
public String get(String key) {
addToDefaults(key, null);
unrequestedParameters.remove(key);if (!data.containsKey(key)) {
return null;
}
Preconditions.checkState(data.get(key).size() == 1, "Key %s should has only one value.", key);
return ((String) (data.get(key).toArray()[0]));
} | 3.26 |
flink_MultipleParameterTool_m0_rdh | // ------------------------- ExecutionConfig.UserConfig interface -------------------------
@Override public Map<String, String> m0() {
return getFlatMapOfData(data);
} | 3.26 |
flink_MultipleParameterTool_getFlatMapOfData_rdh | /**
* Get the flat map of the multiple map data. If the key have multiple values, only the last one
* will be used. This is also the current behavior when multiple parameters is specified for
* {@link ParameterTool}.
*
* @param data
* multiple map of data.
* @return flat map of data.
*/
private static Map<String, String> getFlatMapOfData(Map<String, Collection<String>> data) {
return data.entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, e -> {
if (e.getValue().size() > 0) {
return ((String) (e.getValue().toArray()[e.getValue().size() - 1]));
} else {
return NO_VALUE_KEY;
}
}));
} | 3.26 |
flink_MultipleParameterTool_getMultiParameterRequired_rdh | /**
* Returns the Collection of String values for the given key. If the key does not exist it will
* throw a {@link RuntimeException}.
*/
public Collection<String> getMultiParameterRequired(String key) {
addToDefaults(key, null);
Collection<String> value = getMultiParameter(key); if (value == null) {
throw new RuntimeException(("No data for required key '" + key) + "'");
}
return value;
}
// ------------------------- Export to different targets -------------------------
/**
* Return MultiMap of all the parameters processed by {@link MultipleParameterTool}.
*
* @return MultiMap of the {@link MultipleParameterTool} | 3.26 |
flink_NettyMessageDecoder_onNewMessageReceived_rdh | /**
* Notifies that a new message is to be decoded.
*
* @param msgId
* The type of the message to be decoded.
* @param messageLength
* The length of the message to be decoded.
*/
void onNewMessageReceived(int msgId, int messageLength) {
this.msgId = msgId;
this.messageLength = messageLength;
} | 3.26 |
flink_CollectionUtil_iteratorToList_rdh | /**
* Collects the elements in the Iterator in a List. If the iterator argument is null, this
* method returns an empty list.
*/
public static <E> List<E> iteratorToList(@Nullable
Iterator<E> iterator) {
if (iterator == null) {
return Collections.emptyList();
}
final ArrayList<E> list = new ArrayList<>();
iterator.forEachRemaining(list::add);
return list;
} | 3.26 |
flink_CollectionUtil_partition_rdh | /**
* Partition a collection into approximately n buckets.
*/
public static <T> Collection<List<T>> partition(Collection<T> elements, int numBuckets)
{
Map<Integer, List<T>> buckets = newHashMapWithExpectedSize(numBuckets);
int initialCapacity = elements.size() / numBuckets;
int index = 0;
for (T element : elements) {
int bucket = index % numBuckets;
buckets.computeIfAbsent(bucket, key -> new ArrayList<>(initialCapacity)).add(element);
index++;
}
return buckets.values();
} | 3.26 |
flink_CollectionUtil_newHashSetWithExpectedSize_rdh | /**
* Creates a new {@link HashSet} of the expected size, i.e. a hash set that will not rehash if
* expectedSize many unique elements are inserted, considering the load factor.
*
* @param expectedSize
* the expected size of the created hash map.
* @return a new hash map instance with enough capacity for the expected size.
* @param <E>
* the type of elements stored by this set.
*/
public static <E> HashSet<E> newHashSetWithExpectedSize(int expectedSize) {
return new HashSet<>(computeRequiredCapacity(expectedSize, HASH_MAP_DEFAULT_LOAD_FACTOR), HASH_MAP_DEFAULT_LOAD_FACTOR);
} | 3.26 |
flink_CollectionUtil_map_rdh | /**
* Returns an immutable {@link Map} from the provided entries.
*/
@SafeVarargs
public static <K, V> Map<K, V> map(Map.Entry<K, V>... entries) {
if (entries == null) {
return Collections.emptyMap();}
Map<K, V> map = new HashMap<>();
for (Map.Entry<K, V> entry : entries) {
map.put(entry.getKey(), entry.getValue());
}return Collections.unmodifiableMap(map);
} | 3.26 |
flink_CollectionUtil_newLinkedHashMapWithExpectedSize_rdh | /**
* Creates a new {@link LinkedHashMap} of the expected size, i.e. a hash map that will not
* rehash if expectedSize many keys are inserted, considering the load factor.
*
* @param expectedSize
* the expected size of the created hash map.
* @return a new hash map instance with enough capacity for the expected size.
* @param <K>
* the type of keys maintained by this map.
* @param <V>
* the type of mapped values.
*/
public static <K, V> LinkedHashMap<K, V> newLinkedHashMapWithExpectedSize(int expectedSize) {
return new LinkedHashMap<>(computeRequiredCapacity(expectedSize, HASH_MAP_DEFAULT_LOAD_FACTOR), HASH_MAP_DEFAULT_LOAD_FACTOR);
} | 3.26 |
flink_CollectionUtil_newHashMapWithExpectedSize_rdh | /**
* Creates a new {@link HashMap} of the expected size, i.e. a hash map that will not rehash if
* expectedSize many keys are inserted, considering the load factor.
*
* @param expectedSize
* the expected size of the created hash map.
* @return a new hash map instance with enough capacity for the expected size.
* @param <K>
* the type of keys maintained by this map.
* @param <V>
* the type of mapped values.
*/
public static <K, V> HashMap<K, V>
newHashMapWithExpectedSize(int expectedSize) {return new HashMap<>(computeRequiredCapacity(expectedSize, HASH_MAP_DEFAULT_LOAD_FACTOR), HASH_MAP_DEFAULT_LOAD_FACTOR);
} | 3.26 |
flink_CollectionUtil_newLinkedHashSetWithExpectedSize_rdh | /**
* Creates a new {@link LinkedHashSet} of the expected size, i.e. a hash set that will not
* rehash if expectedSize many unique elements are inserted, considering the load factor.
*
* @param expectedSize
* the expected size of the created hash map.
* @return a new hash map instance with enough capacity for the expected size.
* @param <E>
* the type of elements stored by this set.
*/
public static <E> LinkedHashSet<E> newLinkedHashSetWithExpectedSize(int expectedSize) {
return new LinkedHashSet<>(computeRequiredCapacity(expectedSize, HASH_MAP_DEFAULT_LOAD_FACTOR), HASH_MAP_DEFAULT_LOAD_FACTOR);
} | 3.26 |
flink_CollectionUtil_iterableToList_rdh | /**
* Collects the elements in the Iterable in a List. If the iterable argument is null, this
* method returns an empty list.
*/
public static <E> List<E> iterableToList(@Nullable
Iterable<E> iterable) {
if (iterable == null) {
return Collections.emptyList();
}
final ArrayList<E> list = new ArrayList<>();
iterable.iterator().forEachRemaining(list::add);
return list;
} | 3.26 |
flink_CollectionUtil_computeRequiredCapacity_rdh | /**
* Helper method to compute the right capacity for a hash map with load factor
* HASH_MAP_DEFAULT_LOAD_FACTOR.
*/@VisibleForTesting
static int computeRequiredCapacity(int expectedSize, float loadFactor) {
Preconditions.checkArgument(expectedSize >= 0);
Preconditions.checkArgument(loadFactor > 0.0F);
if (expectedSize <= 2) {
return expectedSize + 1;
}
return expectedSize < ((Integer.MAX_VALUE / 2) + 1) ? ((int) (Math.ceil(expectedSize / loadFactor))) : Integer.MAX_VALUE;
} | 3.26 |
flink_CollectionUtil_entry_rdh | /**
* Returns an immutable {@link Map.Entry}.
*/
public static <K, V>
Map.Entry<K, V> entry(K k, V v) {
return new AbstractMap.SimpleImmutableEntry<>(k, v);
} | 3.26 |
flink_ExecutionConfig_setGlobalJobParameters_rdh | /**
* Register a custom, serializable user configuration object.
*
* @param globalJobParameters
* Custom user configuration object
*/
public void setGlobalJobParameters(GlobalJobParameters globalJobParameters) {
Preconditions.checkNotNull(globalJobParameters, "globalJobParameters shouldn't be null");
setGlobalJobParameters(globalJobParameters.toMap());
} | 3.26 |
flink_ExecutionConfig_setMaxParallelism_rdh | /**
* Sets the maximum degree of parallelism defined for the program.
*
* <p>The maximum degree of parallelism specifies the upper limit for dynamic scaling. It also
* defines the number of key groups used for partitioned state.
*
* @param maxParallelism
* Maximum degree of parallelism to be used for the program.
*/
@PublicEvolving
public void setMaxParallelism(int maxParallelism) {
checkArgument(maxParallelism >
0, "The maximum parallelism must be greater than 0.");
configuration.set(PipelineOptions.MAX_PARALLELISM, maxParallelism);
} | 3.26 |
flink_ExecutionConfig_getRegisteredTypesWithKryoSerializerClasses_rdh | /**
* Returns the registered types with their Kryo Serializer classes.
*/
public LinkedHashMap<Class<?>, Class<? extends Serializer<?>>> getRegisteredTypesWithKryoSerializerClasses() {
return registeredTypesWithKryoSerializerClasses;
} | 3.26 |
flink_ExecutionConfig_configure_rdh | /**
* Sets all relevant options contained in the {@link ReadableConfig} such as e.g. {@link PipelineOptions#CLOSURE_CLEANER_LEVEL}.
*
* <p>It will change the value of a setting only if a corresponding option was set in the {@code configuration}. If a key is not present, the current value of a field will remain untouched.
*
* @param configuration
* a configuration to read the values from
* @param classLoader
* a class loader to use when loading classes
*/
public void configure(ReadableConfig configuration, ClassLoader classLoader) {
configuration.getOptional(PipelineOptions.AUTO_TYPE_REGISTRATION).ifPresent(this::setAutoTypeRegistration);
configuration.getOptional(PipelineOptions.AUTO_GENERATE_UIDS).ifPresent(this::setAutoGeneratedUids);
configuration.getOptional(PipelineOptions.AUTO_WATERMARK_INTERVAL).ifPresent(this::setAutoWatermarkInterval);
configuration.getOptional(PipelineOptions.CLOSURE_CLEANER_LEVEL).ifPresent(this::setClosureCleanerLevel);
configuration.getOptional(PipelineOptions.FORCE_AVRO).ifPresent(this::setForceAvro);
configuration.getOptional(PipelineOptions.GENERIC_TYPES).ifPresent(this::setGenericTypes);configuration.getOptional(PipelineOptions.FORCE_KRYO).ifPresent(this::setForceKryo);
configuration.getOptional(PipelineOptions.GLOBAL_JOB_PARAMETERS).ifPresent(this::setGlobalJobParameters);
configuration.getOptional(MetricOptions.LATENCY_INTERVAL).ifPresent(this::setLatencyTrackingInterval);
configuration.getOptional(StateChangelogOptions.PERIODIC_MATERIALIZATION_INTERVAL).ifPresent(this::setPeriodicMaterializeIntervalMillis);
configuration.getOptional(StateChangelogOptions.MATERIALIZATION_MAX_FAILURES_ALLOWED).ifPresent(this::setMaterializationMaxAllowedFailures);
configuration.getOptional(PipelineOptions.MAX_PARALLELISM).ifPresent(this::setMaxParallelism);
configuration.getOptional(CoreOptions.DEFAULT_PARALLELISM).ifPresent(this::setParallelism);
configuration.getOptional(PipelineOptions.OBJECT_REUSE).ifPresent(this::setObjectReuse);
configuration.getOptional(TaskManagerOptions.TASK_CANCELLATION_INTERVAL).ifPresent(this::setTaskCancellationInterval);
configuration.getOptional(TaskManagerOptions.TASK_CANCELLATION_TIMEOUT).ifPresent(this::setTaskCancellationTimeout);
configuration.getOptional(ExecutionOptions.SNAPSHOT_COMPRESSION).ifPresent(this::setUseSnapshotCompression);
RestartStrategies.fromConfiguration(configuration).ifPresent(this::setRestartStrategy);
configuration.getOptional(PipelineOptions.KRYO_DEFAULT_SERIALIZERS).map(s -> parseKryoSerializersWithExceptionHandling(classLoader, s)).ifPresent(s -> this.defaultKryoSerializerClasses =
s);
configuration.getOptional(PipelineOptions.POJO_REGISTERED_CLASSES).map(c -> loadClasses(c,
classLoader, "Could not load pojo type to be registered.")).ifPresent(c -> this.registeredPojoTypes = c);configuration.getOptional(PipelineOptions.KRYO_REGISTERED_CLASSES).map(c -> loadClasses(c, classLoader, "Could not load kryo type to be registered.")).ifPresent(c -> this.registeredKryoTypes = c);
configuration.getOptional(JobManagerOptions.SCHEDULER).ifPresent(t -> this.configuration.set(JobManagerOptions.SCHEDULER, t));
}
/**
*
* @return A copy of internal {@link #configuration}. Note it is missing all options that are
stored as plain java fields in {@link ExecutionConfig}, for example {@link #registeredKryoTypes} | 3.26 |
flink_ExecutionConfig_registerKryoType_rdh | /**
* Registers the given type with the serialization stack. If the type is eventually serialized
* as a POJO, then the type is registered with the POJO serializer. If the type ends up being
* serialized with Kryo, then it will be registered at Kryo to make sure that only tags are
* written.
*
* @param type
* The class of the type to register.
*/
public void registerKryoType(Class<?> type) {if (type == null) {
throw new NullPointerException("Cannot register null type class.");
}
registeredKryoTypes.add(type);
} | 3.26 |
flink_ExecutionConfig_disableClosureCleaner_rdh | /**
* Disables the ClosureCleaner.
*
* @see #enableClosureCleaner()
*/
public ExecutionConfig disableClosureCleaner() {
return setClosureCleanerLevel(ClosureCleanerLevel.NONE);
} | 3.26 |
flink_ExecutionConfig_setTaskCancellationTimeout_rdh | /**
* Sets the timeout (in milliseconds) after which an ongoing task cancellation is considered
* failed, leading to a fatal TaskManager error.
*
* <p>The cluster default is configured via {@link TaskManagerOptions#TASK_CANCELLATION_TIMEOUT}.
*
* <p>The value <code>0</code> disables the timeout. In this case a stuck cancellation will not
* lead to a fatal error.
*
* @param timeout
* The task cancellation timeout (in milliseconds).
*/
@PublicEvolving
public ExecutionConfig setTaskCancellationTimeout(long timeout) {
checkArgument(timeout >= 0, "Timeout needs to be >= 0.");
configuration.set(TaskManagerOptions.TASK_CANCELLATION_TIMEOUT, timeout);
return this;
} | 3.26 |
flink_ExecutionConfig_setClosureCleanerLevel_rdh | /**
* Configures the closure cleaner. Please see {@link ClosureCleanerLevel} for details on the
* different settings.
*/
public ExecutionConfig setClosureCleanerLevel(ClosureCleanerLevel level) {
configuration.set(PipelineOptions.CLOSURE_CLEANER_LEVEL, level);return this;
} | 3.26 |
flink_ExecutionConfig_canEqual_rdh | /**
* This method simply checks whether the object is an {@link ExecutionConfig} instance.
*
* @deprecated It is not intended to be used by users.
*/
@Deprecated
public boolean canEqual(Object obj) {
return obj instanceof ExecutionConfig;
} | 3.26 |
flink_ExecutionConfig_addDefaultKryoSerializer_rdh | /**
* Adds a new Kryo default serializer to the Runtime.
*
* @param type
* The class of the types serialized with the given serializer.
* @param serializerClass
* The class of the serializer to use.
*/
public void addDefaultKryoSerializer(Class<?> type, Class<? extends Serializer<?>> serializerClass) {
if ((type == null) || (serializerClass == null)) {
throw new NullPointerException("Cannot register null class or serializer.");}
defaultKryoSerializerClasses.put(type, serializerClass);
} | 3.26 |
flink_ExecutionConfig_isObjectReuseEnabled_rdh | /**
* Returns whether object reuse has been enabled or disabled. @see #enableObjectReuse()
*/
public boolean isObjectReuseEnabled() {
return configuration.get(PipelineOptions.OBJECT_REUSE);
} | 3.26 |
flink_ExecutionConfig_getRestartStrategy_rdh | /**
* Returns the restart strategy which has been set for the current job.
*
* @return The specified restart configuration
*/
@PublicEvolving
@SuppressWarnings("deprecation")
public RestartStrategyConfiguration getRestartStrategy() {
if (restartStrategyConfiguration instanceof RestartStrategies.FallbackRestartStrategyConfiguration) {
// support the old API calls by creating a restart strategy from them
if ((getNumberOfExecutionRetries() > 0) && (getExecutionRetryDelay() >= 0)) {
return RestartStrategies.fixedDelayRestart(getNumberOfExecutionRetries(), getExecutionRetryDelay());
} else if (getNumberOfExecutionRetries() == 0) {
return RestartStrategies.noRestart();
} else {
return restartStrategyConfiguration;
}
} else {
return restartStrategyConfiguration;
}
} | 3.26 |
flink_ExecutionConfig_setParallelism_rdh | /**
* Sets the parallelism for operations executed through this environment. Setting a parallelism
* of x here will cause all operators (such as join, map, reduce) to run with x parallel
* instances.
*
* <p>This method overrides the default parallelism for this environment. The local execution
* environment uses by default a value equal to the number of hardware contexts (CPU cores /
* threads). When executing the program via the command line client from a JAR file, the default
* parallelism is the one configured for that setup.
*
* @param parallelism
* The parallelism to use
*/public ExecutionConfig setParallelism(int parallelism) {
if (parallelism != PARALLELISM_UNKNOWN) {
if ((parallelism < 1) && (parallelism != PARALLELISM_DEFAULT)) {
throw new IllegalArgumentException("Parallelism must be at least one, or ExecutionConfig.PARALLELISM_DEFAULT (use system default).");
}
configuration.set(CoreOptions.DEFAULT_PARALLELISM, parallelism);
}
return this;
} | 3.26 |
flink_ExecutionConfig_registerPojoType_rdh | /**
* Registers the given type with the serialization stack. If the type is eventually serialized
* as a POJO, then the type is registered with the POJO serializer. If the type ends up being
* serialized with Kryo, then it will be registered at Kryo to make sure that only tags are
* written.
*
* @param type
* The class of the type to register.
*/
public void registerPojoType(Class<?> type) {
if (type == null) {
throw new NullPointerException("Cannot register null type class.");
}
if (!registeredPojoTypes.contains(type)) {
registeredPojoTypes.add(type);}
} | 3.26 |
flink_ExecutionConfig_m0_rdh | /**
* Interval for sending latency tracking marks from the sources to the sinks. Flink will send
* latency tracking marks from the sources at the specified interval.
*
* <p>Setting a tracking interval <= 0 disables the latency tracking.
*
* @param interval
* Interval in milliseconds.
*/@PublicEvolving
public ExecutionConfig m0(long interval) {
configuration.set(MetricOptions.LATENCY_INTERVAL, interval);
return this;
} | 3.26 |
flink_ExecutionConfig_isForceAvroEnabled_rdh | /**
* Returns whether the Apache Avro is the default serializer for POJOs.
*/
public boolean isForceAvroEnabled() {
return configuration.get(PipelineOptions.FORCE_AVRO);
} | 3.26 |
flink_ExecutionConfig_setRestartStrategy_rdh | /**
* Sets the restart strategy to be used for recovery.
*
* <pre>{@code ExecutionConfig config = env.getConfig();
*
* config.setRestartStrategy(RestartStrategies.fixedDelayRestart(
* 10, // number of retries
* 1000 // delay between retries));}</pre>
*
* @param restartStrategyConfiguration
* Configuration defining the restart strategy to use
*/
@PublicEvolving
public void setRestartStrategy(RestartStrategies.RestartStrategyConfiguration restartStrategyConfiguration) {
this.restartStrategyConfiguration = Preconditions.checkNotNull(restartStrategyConfiguration);
} | 3.26 |
flink_ExecutionConfig_getDefaultInputDependencyConstraint_rdh | /**
* This method is deprecated. It was used to return the {@link InputDependencyConstraint}
* utilized by the old scheduler implementations. These implementations were removed as part of
* FLINK-20589.
*
* @return The previous default constraint {@link InputDependencyConstraint#ANY}.
* @deprecated due to the deprecation of {@code InputDependencyConstraint}.
*/
@PublicEvolving@Deprecated
public InputDependencyConstraint getDefaultInputDependencyConstraint() {
return InputDependencyConstraint.ANY;} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.