name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
flink_HiveFunctionDefinitionFactory_isFlinkFunction_rdh | /**
* Distinguish if the function is a Flink function.
*
* @return whether the function is a Flink function
*/
private boolean isFlinkFunction(CatalogFunction catalogFunction, ClassLoader classLoader) {
if (catalogFunction.getFunctionLanguage() == FunctionLanguage.PYTHON) {
return true;
}
try {
Class<?> c = Class.forName(catalogFunction.getClassName(), true, classLoader);
if (UserDefinedFunction.class.isAssignableFrom(c)) {
return true;
}
} catch (ClassNotFoundException e) {throw new RuntimeException(String.format("Can't resolve udf class %s", catalogFunction.getClassName()), e);
}
return false;
} | 3.26 |
flink_StreamTableSourceFactory_createTableSource_rdh | /**
* Only create a stream table source.
*/
@Override
default TableSource<T> createTableSource(Map<String, String> properties) {
StreamTableSource<T> source = createStreamTableSource(properties);
if (source == null) {
throw new ValidationException("Please override 'createTableSource(Context)' method.");
}
return source;
} | 3.26 |
flink_SharedBufferEdge_readObject_rdh | // ------------------------------------------------------------------------
private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException {
in.defaultReadObject();
if (nodeIdSerializer == null) {
// the nested serializers will be null if this was read from a savepoint taken with
// versions
// lower than Flink 1.7; in this case, we explicitly create instances for the nested
// serializers
this.nodeIdSerializer = new NodeId.NodeIdSerializer();
this.deweyNumberSerializer = DeweyNumberSerializer.INSTANCE;
}
} | 3.26 |
flink_SharedBufferEdge_snapshotConfiguration_rdh | // -----------------------------------------------------------------------------------
@Override
public TypeSerializerSnapshot<SharedBufferEdge> snapshotConfiguration() {
return new SharedBufferEdgeSerializerSnapshot(this);
} | 3.26 |
flink_TableSourceFactory_createTableSource_rdh | /**
* Creates and configures a {@link TableSource} based on the given {@link Context}.
*
* @param context
* context of this table source.
* @return the configured table source.
*/
default TableSource<T> createTableSource(Context context) {
return createTableSource(context.getObjectIdentifier().toObjectPath(), context.getTable());
} | 3.26 |
flink_BulkPartialSolutionPlanNode_getPartialSolutionNode_rdh | // --------------------------------------------------------------------------------------------
public BulkPartialSolutionNode getPartialSolutionNode() {
return ((BulkPartialSolutionNode) (this.template));
} | 3.26 |
flink_BulkPartialSolutionPlanNode_accept_rdh | // --------------------------------------------------------------------------------------------
@Override
public void accept(Visitor<PlanNode> visitor) {
if (visitor.preVisit(this)) {
visitor.postVisit(this);
}
} | 3.26 |
flink_Hardware_m0_rdh | // ------------------------------------------------------------------------
/**
* Gets the number of CPU cores (hardware contexts) that the JVM has access to.
*
* @return The number of CPU cores.
*/
public static int m0() {
return Runtime.getRuntime().availableProcessors();
}
/**
* Returns the size of the physical memory in bytes.
*
* @return the size of the physical memory in bytes or {@code -1} | 3.26 |
flink_NFAStateNameHandler_clear_rdh | /**
* Clear the names added during checking name uniqueness.
*/
public void clear() {
usedNames.clear();
} | 3.26 |
flink_NFAStateNameHandler_getUniqueInternalName_rdh | /**
* Used to give a unique name to {@link org.apache.flink.cep.nfa.NFA} states created during the
* translation process. The name format will be {@code baseName:counter} , where the counter is
* increasing for states with the same {@code baseName}.
*
* @param baseName
* The base of the name.
* @return The (unique) name that is going to be used internally for the state.
*/
public String getUniqueInternalName(String baseName) {
int counter = 0;String v1 = baseName;
while (usedNames.contains(v1)) {
v1 = (baseName + STATE_NAME_DELIM) + (counter++);
} usedNames.add(v1);
return v1;
} | 3.26 |
flink_NFAStateNameHandler_checkNameUniqueness_rdh | /**
* Checks if the given name is already used or not. If yes, it throws a {@link MalformedPatternException}.
*
* @param name
* The name to be checked.
*/public void checkNameUniqueness(String
name) {
if (usedNames.contains(name)) {
throw new MalformedPatternException(("Duplicate pattern name: " + name) + ". Names must be unique.");
}
usedNames.add(name);
} | 3.26 |
flink_DataGeneratorSource_getProducedType_rdh | // ------------------------------------------------------------------------
// source methods
// ------------------------------------------------------------------------
@Override
public TypeInformation<OUT> getProducedType() {
return typeInfo;
} | 3.26 |
flink_SolutionSetNode_setCandidateProperties_rdh | // --------------------------------------------------------------------------------------------
public void setCandidateProperties(GlobalProperties gProps, LocalProperties lProps, Channel initialInput) {
this.cachedPlans = Collections.<PlanNode>singletonList(new SolutionSetPlanNode(this, ("SolutionSet (" + this.getOperator().getName()) + ")", gProps, lProps, initialInput));
} | 3.26 |
flink_SolutionSetNode_getOperator_rdh | // --------------------------------------------------------------------------------------------
/**
* Gets the contract object for this data source node.
*
* @return The contract.
*/
@Override
public SolutionSetPlaceHolder<?> getOperator() {
return ((SolutionSetPlaceHolder<?>) (super.getOperator()));
} | 3.26 |
flink_ValueLiteralExpression_deriveDataTypeFromValue_rdh | // --------------------------------------------------------------------------------------------
private static DataType deriveDataTypeFromValue(Object value) {
return ValueDataTypeConverter.extractDataType(value).orElseThrow(() -> new ValidationException((("Cannot derive a data type for value '" + value) +
"'. ") + "The data type must be specified explicitly."));
} | 3.26 |
flink_ValueLiteralExpression_getValueAs_rdh | /**
* Returns the value (excluding null) as an instance of the given class.
*
* <p>It supports conversions to default conversion classes of {@link LogicalType LogicalTypes}
* and additionally to {@link BigDecimal} for all types of {@link LogicalTypeFamily#NUMERIC}.
* This method should not be called with other classes.
*
* <p>Note to implementers: Whenever we add a new class here, make sure to also update the
* planner for supporting the class via {@link CallContext#getArgumentValue(int, Class)}.
*/
@SuppressWarnings("unchecked")
public <T> Optional<T> getValueAs(Class<T> clazz) {
Preconditions.checkArgument(!clazz.isPrimitive());
if (value
== null) {
return Optional.empty();
}
Object convertedValue = null;
if (clazz.isInstance(value)) {
convertedValue = clazz.cast(value);
} else { Class<?> valueClass = value.getClass();
if (clazz == Period.class) {
convertedValue = convertToPeriod(value, valueClass);
} else if (clazz == Duration.class) {
convertedValue = convertToDuration(value, valueClass);
} else if (clazz == LocalDate.class) {
convertedValue = convertToLocalDate(value, valueClass);
} else if (clazz
== LocalTime.class) {
convertedValue = convertToLocalTime(value, valueClass);
} else if (clazz == LocalDateTime.class) {
convertedValue = convertToLocalDateTime(value, valueClass);
} else if (clazz == OffsetDateTime.class) {
convertedValue = convertToOffsetDateTime(value,
valueClass);
} else if (clazz == Instant.class) {
convertedValue = convertToInstant(value, valueClass);
} else if (clazz == BigDecimal.class) {
convertedValue = m0(value);
}
}
return Optional.ofNullable(((T) (convertedValue)));
} | 3.26 |
flink_ValueLiteralExpression_stringifyValue_rdh | /**
* Supports (nested) arrays and makes string values more explicit.
*/
private static String stringifyValue(Object value) {
if (value instanceof String[]) {
final String[] array = ((String[]) (value));
return Stream.of(array).map(ValueLiteralExpression::stringifyValue).collect(Collectors.joining(", ", "[", "]"));
} else if (value instanceof Object[]) {
final Object[] array = ((Object[]) (value));
return
Stream.of(array).map(ValueLiteralExpression::stringifyValue).collect(Collectors.joining(", ", "[", "]"));
} else if (value instanceof String) {
return ("'" + ((String) (value)).replace("'", "''")) + "'";
}
return StringUtils.arrayAwareToString(value);
} | 3.26 |
flink_ContextResolvedTable_getTable_rdh | /**
* Returns the original metadata object returned by the catalog.
*/
@SuppressWarnings("unchecked")
public <T extends CatalogBaseTable> T getTable() {
return ((T) (resolvedTable.getOrigin()));
} | 3.26 |
flink_ContextResolvedTable_isTemporary_rdh | /**
*
* @return true if the table is temporary. An anonymous table is always temporary.
*/
public boolean isTemporary() {
return catalog == null;
} | 3.26 |
flink_ContextResolvedTable_generateAnonymousStringIdentifier_rdh | /**
* This method tries to return the connector name of the table, trying to provide a bit more
* helpful toString for anonymous tables. It's only to help users to debug, and its return value
* should not be relied on.
*/private static String generateAnonymousStringIdentifier(@Nullable String hint, ResolvedCatalogBaseTable<?> resolvedTable) {
// Planner can do some fancy optimizations' logic squashing two sources together in the same
// operator. Because this logic is string based, anonymous tables still need some kind of
// unique string based identifier that can be used later by the planner.
if (hint == null) {
try {
hint = resolvedTable.getOptions().get(FactoryUtil.CONNECTOR.key());
} catch (Exception ignored) {
}
}
int id = uniqueId.incrementAndGet();
if (hint == null) {
return ("*anonymous$" + id) + "*";
}
return ((("*anonymous_" + hint) + "$") + id) + "*";
} | 3.26 |
flink_ContextResolvedTable_getCatalog_rdh | /**
* Returns empty if {@link #isPermanent()} is false.
*/
public Optional<Catalog> getCatalog() {
return Optional.ofNullable(catalog);
} | 3.26 |
flink_ContextResolvedTable_copy_rdh | /**
* Copy the {@link ContextResolvedTable}, replacing the underlying {@link ResolvedSchema}.
*/
public ContextResolvedTable copy(ResolvedSchema newSchema) {
return new ContextResolvedTable(objectIdentifier, catalog, new ResolvedCatalogTable(((CatalogTable) (resolvedTable.getOrigin())), newSchema), false);
} | 3.26 |
flink_Tuple3_setFields_rdh | /**
* Sets new values to all fields of the tuple.
*
* @param f0
* The value for field 0
* @param f1
* The value for field 1
* @param f2
* The value for field 2
*/
public void setFields(T0 f0, T1 f1, T2 f2) {
this.f0 = f0;
this.f1 = f1;
this.f2 = f2;
}
// -------------------------------------------------------------------------------------------------
// standard utilities
// -------------------------------------------------------------------------------------------------
/**
* Creates a string representation of the tuple in the form (f0, f1, f2), where the individual
* fields are the value returned by calling {@link Object#toString} | 3.26 |
flink_Tuple3_equals_rdh | /**
* Deep equality for tuples by calling equals() on the tuple members.
*
* @param o
* the object checked for equality
* @return true if this is equal to o.
*/
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof Tuple3)) {
return false;
}
@SuppressWarnings("rawtypes")
Tuple3 tuple = ((Tuple3) (o));
if (f0 != null ? !f0.equals(tuple.f0) : tuple.f0 != null) {
return false;
}
if (f1 != null ? !f1.equals(tuple.f1) : tuple.f1 != null) {
return false;
}
if (f2 != null ? !f2.equals(tuple.f2) : tuple.f2 != null) {
return false;
}
return true;
} | 3.26 |
flink_WorksetPlanNode_getWorksetNode_rdh | // --------------------------------------------------------------------------------------------
public WorksetNode getWorksetNode() {
return ((WorksetNode) (this.template));
} | 3.26 |
flink_WorksetPlanNode_accept_rdh | // --------------------------------------------------------------------------------------------
@Override
public void accept(Visitor<PlanNode> visitor) {
if (visitor.preVisit(this)) {
visitor.postVisit(this);
}
} | 3.26 |
flink_ConnectionUtils_tryToConnect_rdh | /**
*
* @param fromAddress
* The address to connect from.
* @param toSocket
* The socket address to connect to.
* @param timeout
* The timeout fr the connection.
* @param logFailed
* Flag to indicate whether to log failed attempts on info level (failed
* attempts are always logged on DEBUG level).
* @return True, if the connection was successful, false otherwise.
* @throws IOException
* Thrown if the socket cleanup fails.
*/
private static boolean tryToConnect(InetAddress fromAddress, SocketAddress toSocket, int timeout, boolean logFailed) throws IOException {
String detailedMessage = String.format("connect to [%s] from local address [%s] with timeout [%s]", toSocket, fromAddress, timeout);
if (LOG.isDebugEnabled()) {LOG.debug("Trying to " + detailedMessage);
}
try (Socket socket =
new Socket()) {
// port 0 = let the OS choose the port
SocketAddress bindP = new InetSocketAddress(fromAddress, 0);
// machine
socket.bind(bindP);
socket.connect(toSocket, timeout);
return true;
} catch (Exception ex) {
String message = (("Failed to " + detailedMessage) + " due to: ") + ex.getMessage();
if (LOG.isDebugEnabled()) {
LOG.debug(message, ex);
} else if (logFailed) {
LOG.info(message);
}
return false;
}
}
/**
* A {@link LeaderRetrievalListener} that allows retrieving an {@link InetAddress} | 3.26 |
flink_ConnectionUtils_hasCommonPrefix_rdh | /**
* Checks if two addresses have a common prefix (first 2 bytes). Example: 192.168.???.??? Works
* also with ipv6, but accepts probably too many addresses
*/private static
boolean hasCommonPrefix(byte[] address, byte[] address2) {
return (address[0] == address2[0]) && (address[1] == address2[1]);
} | 3.26 |
flink_ConnectionUtils_findConnectingAddress_rdh | /**
* Finds the local network address from which this machine can connect to the target address.
* This method tries to establish a proper network connection to the given target, so it only
* succeeds if the target socket address actually accepts connections. The method tries various
* strategies multiple times and uses an exponential backoff timer between tries.
*
* <p>If no connection attempt was successful after the given maximum time, the method will
* choose some address based on heuristics (excluding link-local and loopback addresses.)
*
* <p>This method will initially not log on info level (to not flood the log while the backoff
* time is still very low). It will start logging after a certain time has passes.
*
* @param targetAddress
* The address that the method tries to connect to.
* @param maxWaitMillis
* The maximum time that this method tries to connect, before falling back
* to the heuristics.
* @param startLoggingAfter
* The time after which the method will log on INFO level.
*/
public static InetAddress findConnectingAddress(InetSocketAddress
targetAddress, long maxWaitMillis, long startLoggingAfter) throws IOException {
if (targetAddress == null) {
throw new NullPointerException("targetAddress must not be null");
}
if (maxWaitMillis <= 0) {
throw new IllegalArgumentException("Max wait time must be positive");
}
final
long startTimeNanos = System.nanoTime();long currentSleepTime = MIN_SLEEP_TIME;
long elapsedTimeMillis = 0;
final List<AddressDetectionState> strategies = Collections.unmodifiableList(Arrays.asList(AddressDetectionState.LOCAL_HOST, AddressDetectionState.ADDRESS, AddressDetectionState.FAST_CONNECT, AddressDetectionState.SLOW_CONNECT));
// loop while there is time left
while (elapsedTimeMillis < maxWaitMillis) {
boolean logging = elapsedTimeMillis >= startLoggingAfter;
if (logging) {
LOG.info("Trying to connect to " + targetAddress);
}
// Try each strategy in order
for (AddressDetectionState strategy : strategies) {
InetAddress address = findAddressUsingStrategy(strategy, targetAddress, logging);
if (address != null) {
return address;
}
}// we have made a pass with all strategies over all interfaces
// sleep for a while before we make the next pass
elapsedTimeMillis = (System.nanoTime() - startTimeNanos) / 1000000;
long toWait = Math.min(maxWaitMillis - elapsedTimeMillis, currentSleepTime);if (toWait > 0) {
if (logging) {
LOG.info("Could not connect. Waiting for {} msecs before next attempt", toWait);
} else {
LOG.debug("Could not connect. Waiting for {} msecs before next attempt", toWait);
}
try {
Thread.sleep(toWait);
} catch (InterruptedException e) {
throw new IOException("Connection attempts have been interrupted.");
}
}
// increase the exponential backoff timer
currentSleepTime = Math.min(2 * currentSleepTime, MAX_SLEEP_TIME);
}
// our attempts timed out. use the heuristic fallback
LOG.warn("Could not connect to {}. Selecting a local address using heuristics.", targetAddress);
InetAddress heuristic = findAddressUsingStrategy(AddressDetectionState.HEURISTIC, targetAddress, true);
if (heuristic != null) {
return heuristic;
} else {
LOG.warn("Could not find any IPv4 address that is not loopback or link-local. Using localhost address.");
return InetAddress.getLocalHost();
}
} | 3.26 |
flink_HiveParserTypeInfoUtils_implicitConvertible_rdh | /**
* Test if it's implicitly convertible for data comparison.
*/
public static boolean implicitConvertible(PrimitiveObjectInspector.PrimitiveCategory from, PrimitiveObjectInspector.PrimitiveCategory to) {
if (from == to) {
return true;
}
PrimitiveObjectInspectorUtils.PrimitiveGrouping fromPg = PrimitiveObjectInspectorUtils.getPrimitiveGrouping(from);
PrimitiveObjectInspectorUtils.PrimitiveGrouping toPg = PrimitiveObjectInspectorUtils.getPrimitiveGrouping(to);
// Allow implicit String to Double conversion
if ((fromPg == PrimitiveGrouping.STRING_GROUP) && (to == PrimitiveCategory.DOUBLE)) {
return true;
}
// Void can be converted to any type
if (from == PrimitiveCategory.VOID) {
return true;
}
// Allow implicit String to Date conversion
if ((fromPg ==
PrimitiveGrouping.DATE_GROUP) && (toPg == PrimitiveGrouping.STRING_GROUP)) {
return true;
}
// Allow implicit Numeric to String conversion
if ((fromPg == PrimitiveGrouping.NUMERIC_GROUP) && (toPg == PrimitiveGrouping.STRING_GROUP)) {
return true;
}
// Allow implicit String to varchar conversion, and vice versa
if ((fromPg == PrimitiveGrouping.STRING_GROUP) && (toPg == PrimitiveGrouping.STRING_GROUP)) {
return true;
}
// Allow implicit conversion from Byte -> Integer -> Long -> Float -> Double
// Decimal -> String
Integer f = numericTypes.get(from);
Integer t = numericTypes.get(to);
if ((f == null) || (t == null)) {
return false;
}
return f <= t;
} | 3.26 |
flink_SourceReaderBase_getNumberOfCurrentlyAssignedSplits_rdh | /**
* Gets the number of splits the reads has currently assigned.
*
* <p>These are the splits that have been added via {@link #addSplits(List)} and have not yet
* been finished by returning them from the {@link SplitReader#fetch()} as part of {@link RecordsWithSplitIds#finishedSplits()}.
*/
public int getNumberOfCurrentlyAssignedSplits() {
return splitStates.size();
} | 3.26 |
flink_SourceReaderBase_finishedOrAvailableLater_rdh | // ------------------ private helper methods ---------------------
private InputStatus finishedOrAvailableLater() {
final boolean allFetchersHaveShutdown = splitFetcherManager.maybeShutdownFinishedFetchers();
if (!(f0 && allFetchersHaveShutdown)) {
return InputStatus.NOTHING_AVAILABLE;
}
if (elementsQueue.isEmpty()) {// We may reach here because of exceptional split fetcher, check it.
splitFetcherManager.checkErrors();
return InputStatus.END_OF_INPUT;
} else {
// We can reach this case if we just processed all data from the queue and finished a
// split,
// and concurrently the fetcher finished another split, whose data is then in the queue.
return InputStatus.MORE_AVAILABLE;
}
} | 3.26 |
flink_KeyedStateFactory_createOrUpdateInternalState_rdh | /**
* Creates or updates internal state and returns a new {@link InternalKvState}.
*
* @param namespaceSerializer
* TypeSerializer for the state namespace.
* @param stateDesc
* The {@code StateDescriptor} that contains the name of the state.
* @param snapshotTransformFactory
* factory of state snapshot transformer.
* @param allowFutureMetadataUpdates
* whether allow metadata to update in the future or not.
* @param <N>
* The type of the namespace.
* @param <SV>
* The type of the stored state value.
* @param <SEV>
* The type of the stored state value or entry for collection types (list or map).
* @param <S>
* The type of the public API state.
* @param <IS>
* The type of internal state.
*/
@Nonnull
default <N, SV, SEV, S extends State, IS extends S> IS createOrUpdateInternalState(@Nonnull
TypeSerializer<N> namespaceSerializer, @Nonnull
StateDescriptor<S, SV> stateDesc, @Nonnull
StateSnapshotTransformFactory<SEV> snapshotTransformFactory, boolean allowFutureMetadataUpdates) throws Exception {
if (allowFutureMetadataUpdates) {
throw new UnsupportedOperationException(this.getClass().getName() + "doesn't support to allow future metadata update");
} else {
return createOrUpdateInternalState(namespaceSerializer, stateDesc, snapshotTransformFactory);
} } | 3.26 |
flink_UpTimeGauge_getValue_rdh | // ------------------------------------------------------------------------
@Override
public Long getValue() {
final JobStatus status = jobStatusProvider.getState();
if (status == JobStatus.RUNNING) {
// running right now - report the uptime
final long runningTimestamp = jobStatusProvider.getStatusTimestamp(JobStatus.RUNNING);
// we use 'Math.max' here to avoid negative timestamps when clocks change
return Math.max(System.currentTimeMillis() - runningTimestamp, 0);
} else if (status.isTerminalState())
{
// not running any more -> finished or not on leader
return NO_LONGER_RUNNING;
} else {
// not yet running or not up at the moment
return 0L;
}
} | 3.26 |
flink_BufferDecompressor_decompress_rdh | /**
* Decompresses the input {@link Buffer} into the intermediate buffer and returns the
* decompressed data size.
*/
private int decompress(Buffer buffer) {
checkArgument(buffer != null, "The input buffer must not be null.");
checkArgument(buffer.isBuffer(), "Event can not be decompressed.");
checkArgument(buffer.isCompressed(), "Buffer not compressed.");
checkArgument(buffer.getReaderIndex() == 0, "Reader index of the input buffer must be 0.");checkArgument(buffer.readableBytes() > 0, "No data to be decompressed.");
checkState(internalBuffer.refCnt() == 1, "Illegal reference count, buffer need to be released.");
int length
= buffer.getSize();
MemorySegment memorySegment = buffer.getMemorySegment();
// If buffer is on-heap, manipulate the underlying array directly. There are two main
// reasons why NIO buffer is not directly used here: One is that some compression
// libraries will use the underlying array for heap buffer, but our input buffer may be
// a read-only ByteBuffer, and it is illegal to access internal array. Another reason
// is that for the on-heap buffer, directly operating the underlying array can reduce
// additional overhead compared to generating a NIO buffer.
if (!memorySegment.isOffHeap()) {
return blockDecompressor.decompress(memorySegment.getArray(), buffer.getMemorySegmentOffset(), length, internalBufferArray, 0);
} else {// decompress the given buffer into the internal heap buffer
return blockDecompressor.decompress(buffer.getNioBuffer(0, length), 0, length, internalBuffer.getNioBuffer(0, internalBuffer.capacity()), 0);
}
} | 3.26 |
flink_BufferDecompressor_decompressToIntermediateBuffer_rdh | /**
* Decompresses the given {@link Buffer} using {@link BlockDecompressor}. The decompressed data
* will be stored in the intermediate buffer of this {@link BufferDecompressor} and returned to
* the caller. The caller must guarantee that the returned {@link Buffer} has been freed when
* calling the method next time.
*
* <p>Notes that the decompression will always start from offset 0 to the size of the input
* {@link Buffer}.
*/
public Buffer decompressToIntermediateBuffer(Buffer buffer) {
int decompressedLen = decompress(buffer);
internalBuffer.setSize(decompressedLen);
return internalBuffer.retainBuffer();
} | 3.26 |
flink_BufferDecompressor_decompressToOriginalBuffer_rdh | /**
* The difference between this method and {@link #decompressToIntermediateBuffer(Buffer)} is
* that this method copies the decompressed data to the input {@link Buffer} starting from
* offset 0.
*
* <p>The caller must guarantee that the input {@link Buffer} is writable and there's enough
* space left.
*/
@VisibleForTesting
public Buffer decompressToOriginalBuffer(Buffer buffer) {
int decompressedLen = decompress(buffer);
// copy the decompressed data back
int v2 = buffer.getMemorySegmentOffset();
MemorySegment
v3 = buffer.getMemorySegment();
v3.put(v2, internalBufferArray, 0, decompressedLen);
return new ReadOnlySlicedNetworkBuffer(buffer.asByteBuf(), 0, decompressedLen, v2, false);
} | 3.26 |
flink_ResourceManagerFactory_getEffectiveConfigurationForResourceManager_rdh | /**
* Configuration changes in this method will be visible to only {@link ResourceManager}. This
* can overwrite {@link #getEffectiveConfigurationForResourceManagerAndRuntimeServices}.
*/
protected Configuration getEffectiveConfigurationForResourceManager(final Configuration configuration) {
return configuration;
} | 3.26 |
flink_ResourceManagerFactory_getEffectiveConfigurationForResourceManagerAndRuntimeServices_rdh | /**
* Configuration changes in this method will be visible to both {@link ResourceManager} and
* {@link ResourceManagerRuntimeServices}. This can be overwritten by {@link #getEffectiveConfigurationForResourceManager}.
*/
protected Configuration getEffectiveConfigurationForResourceManagerAndRuntimeServices(final Configuration configuration) {
return configuration;
} | 3.26 |
flink_HeapAggregatingState_mergeState_rdh | // ------------------------------------------------------------------------
// state merging
// ------------------------------------------------------------------------
@Override
protected ACC mergeState(ACC a, ACC b) {
return aggregateTransformation.aggFunction.merge(a, b);
} | 3.26 |
flink_StateMapSnapshot_isOwner_rdh | /**
* Returns true iff the given state map is the owner of this snapshot object.
*/
public boolean isOwner(T stateMap)
{
return owningStateMap == stateMap;
} | 3.26 |
flink_StateMapSnapshot_release_rdh | /**
* Release the snapshot.
*/
public void release() {
} | 3.26 |
flink_JoinWithSolutionSetFirstDriver_setup_rdh | // --------------------------------------------------------------------------------------------
@Override
public void setup(TaskContext<FlatJoinFunction<IT1,
IT2, OT>, OT> context) {
this.taskContext = context;
this.running = true;
} | 3.26 |
flink_JoinWithSolutionSetFirstDriver_initialize_rdh | // --------------------------------------------------------------------------------------------
@Override
@SuppressWarnings("unchecked")
public void initialize() {
final TypeSerializer<IT1> solutionSetSerializer;
final TypeComparator<IT1> solutionSetComparator;// grab a handle to the hash table from the iteration broker
if (taskContext instanceof AbstractIterativeTask) {
AbstractIterativeTask<?, ?> v3 = ((AbstractIterativeTask<?, ?>) (taskContext));
String identifier = v3.brokerKey();
Object v5 = SolutionSetBroker.instance().get(identifier);
if (v5 instanceof CompactingHashTable) {
this.hashTable = ((CompactingHashTable<IT1>) (v5));
solutionSetSerializer = this.hashTable.getBuildSideSerializer();
solutionSetComparator = this.hashTable.getBuildSideComparator().duplicate();
} else if (v5 instanceof JoinHashMap) {
this.objectMap
= ((JoinHashMap<IT1>) (v5));
solutionSetSerializer = this.objectMap.getBuildSerializer();
solutionSetComparator = this.objectMap.getBuildComparator().duplicate();
} else {
throw new RuntimeException("Unrecognized solution set index: " + v5);
}
} else {
throw new RuntimeException("The task context of this driver is no iterative task context.");
}
TaskConfig config = taskContext.getTaskConfig();
ClassLoader classLoader = taskContext.getUserCodeClassLoader();
TypeSerializer<IT2> probeSideSerializer = taskContext.<IT2>getInputSerializer(0).getSerializer();
TypeComparatorFactory<IT2> probeSideComparatorFactory = config.getDriverComparator(0, classLoader);
this.probeSideComparator = probeSideComparatorFactory.createComparator();
ExecutionConfig executionConfig
= taskContext.getExecutionConfig();
objectReuseEnabled = executionConfig.isObjectReuseEnabled();
if (objectReuseEnabled) {
solutionSideRecord = solutionSetSerializer.createInstance();
probeSideRecord = probeSideSerializer.createInstance();
}
TypePairComparatorFactory<IT1, IT2> factory = taskContext.getTaskConfig().getPairComparatorFactory(taskContext.getUserCodeClassLoader());
pairComparator = factory.createComparator21(solutionSetComparator, this.probeSideComparator);
} | 3.26 |
flink_SupportsPartitioning_requiresPartitionGrouping_rdh | /**
* Returns whether data needs to be grouped by partition before it is consumed by the sink. By
* default, this is not required from the runtime and records arrive in arbitrary partition
* order.
*
* <p>If this method returns true, the sink can expect that all records will be grouped by the
* partition keys before consumed by the sink. In other words: The sink will receive all
* elements of one partition and then all elements of another partition. Elements of different
* partitions will not be mixed. For some sinks, this can be used to reduce the number of
* partition writers and improve writing performance by writing one partition at a time.
*
* <p>The given argument indicates whether the current execution mode supports grouping or not.
* For example, depending on the execution mode a sorting operation might not be available
* during runtime.
*
* @param supportsGrouping
* whether the current execution mode supports grouping
* @return whether data need to be grouped by partition before consumed by the sink. If {@code supportsGrouping} is false, it should never return true, otherwise the planner will fail.
*/
@SuppressWarnings("unused")
default boolean requiresPartitionGrouping(boolean supportsGrouping) {
return false;
} | 3.26 |
flink_Router_removePathPattern_rdh | /**
* Removes the route specified by the path pattern.
*/
public void removePathPattern(String pathPattern) {
for (MethodlessRouter<T> router : routers.values()) {
router.removePathPattern(pathPattern);
}
anyMethodRouter.removePathPattern(pathPattern);
} | 3.26 |
flink_Router_targetToString_rdh | /**
* Helper for toString.
*
* <p>For example, returns "io.netty.example.http.router.HttpRouterServerHandler" instead of
* "class io.netty.example.http.router.HttpRouterServerHandler"
*/
private static String
targetToString(Object target) {
if (target instanceof Class) {
return ((Class<?>) (target)).getName();
} else {
return target.toString();
}
} | 3.26 |
flink_Router_addConnect_rdh | // --------------------------------------------------------------------------
public Router<T>
addConnect(String path, T target) {
return addRoute(HttpMethod.CONNECT, path, target);
} | 3.26 |
flink_Router_notFound_rdh | // --------------------------------------------------------------------------
/**
* Sets the fallback target for use when there's no match at {@link #route(HttpMethod, String)}.
*/
public Router<T> notFound(T
target) {
this.notFound = target;
return this;
} | 3.26 |
flink_Router_allowedMethods_rdh | /**
* Returns allowed methods for a specific URI.
*
* <p>For {@code OPTIONS *}, use {@link #allAllowedMethods()} instead of this method.
*/
public Set<HttpMethod> allowedMethods(String uri) {
QueryStringDecoder decoder = new QueryStringDecoder(uri);
String[] tokens = PathPattern.removeSlashesAtBothEnds(decoder.path()).split("/");
if (anyMethodRouter.anyMatched(tokens)) {
return allAllowedMethods();
}
Set<HttpMethod> ret = new HashSet<HttpMethod>(routers.size());
for (Map.Entry<HttpMethod, MethodlessRouter<T>> entry : routers.entrySet()) {
MethodlessRouter<T> router = entry.getValue();
if (router.anyMatched(tokens)) {
HttpMethod method = entry.getKey();
ret.add(method);
}
}
return ret;
} | 3.26 |
flink_Router_size_rdh | /**
* Returns the number of routes in this router.
*/
public int size() {
int ret = anyMethodRouter.size();
for (MethodlessRouter<T> router : routers.values()) {
ret +=
router.size();
}
return
ret;
} | 3.26 |
flink_Router_allAllowedMethods_rdh | /**
* Returns all methods that this router handles. For {@code OPTIONS *}.
*/
public Set<HttpMethod> allAllowedMethods() {
if (anyMethodRouter.size() > 0) {
Set<HttpMethod> ret = new HashSet<HttpMethod>(9);
ret.add(HttpMethod.CONNECT);
ret.add(HttpMethod.DELETE);
ret.add(HttpMethod.GET);ret.add(HttpMethod.HEAD);
ret.add(HttpMethod.OPTIONS); ret.add(HttpMethod.PATCH);
ret.add(HttpMethod.POST);
ret.add(HttpMethod.PUT);
ret.add(HttpMethod.TRACE);
return ret;
} else {
return new HashSet<HttpMethod>(routers.keySet());
}
} | 3.26 |
flink_Router_toString_rdh | /**
* Returns visualized routing rules.
*/
@Override
public String toString() {
// Step 1/2: Dump routers and anyMethodRouter in order
int numRoutes = size();
List<String> methods = new ArrayList<String>(numRoutes);
List<String> patterns = new ArrayList<String>(numRoutes);
List<String> targets = new ArrayList<String>(numRoutes);
// For router
for (Entry<HttpMethod,
MethodlessRouter<T>> e : routers.entrySet()) {
HttpMethod v29 = e.getKey();
MethodlessRouter<T> router
= e.getValue();
m0(v29.toString(), router.routes(), methods, patterns, targets);
}
// For anyMethodRouter
m0("*", anyMethodRouter.routes(), methods, patterns, targets);
// For notFound
if (notFound != null) {
methods.add("*");
patterns.add("*");
targets.add(targetToString(notFound));
}
// Step 2/2: Format the List into aligned columns: <method> <patterns> <target>
int maxLengthMethod = maxLength(methods);
int maxLengthPattern = maxLength(patterns);
String format = ((("%-" + maxLengthMethod) + "s %-") + maxLengthPattern) + "s %s\n";
int initialCapacity = ((((maxLengthMethod + 1) + maxLengthPattern) + 1) + 20) * methods.size();
StringBuilder b =
new StringBuilder(initialCapacity);
for (int i = 0; i < methods.size(); i++) {
String method = methods.get(i);
String pattern = patterns.get(i);
String target = targets.get(i);
b.append(String.format(format, method, pattern, target));
}
return b.toString();
} | 3.26 |
flink_Router_addRoute_rdh | // --------------------------------------------------------------------------
/**
* Add route.
*
* <p>A path pattern can only point to one target. This method does nothing if the pattern has
* already been added.
*/
public Router<T> addRoute(HttpMethod method, String pathPattern, T target) {
getMethodlessRouter(method).addRoute(pathPattern, target);
return this;
} | 3.26 |
flink_Router_m0_rdh | // --------------------------------------------------------------------------
// Design decision:
// We do not allow access to routers and anyMethodRouter, because we don't
// want to expose MethodlessRouter, OrderlessRouter, and PathPattern.
// Exposing those will complicate the use of this package.
/**
* Helper for toString.
*/
private static <T> void m0(String
method, Map<PathPattern, T> routes, List<String> accMethods, List<String> accPatterns, List<String> accTargets) {
for (Map.Entry<PathPattern, T> entry : routes.entrySet()) {
accMethods.add(method);
accPatterns.add("/" + entry.getKey().pattern());
accTargets.add(targetToString(entry.getValue()));
}
} | 3.26 |
flink_Router_decodePathTokens_rdh | // --------------------------------------------------------------------------
private String[]
decodePathTokens(String uri) {
// Need to split the original URI (instead of QueryStringDecoder#path) then decode the
// tokens (components),
// otherwise /test1/123%2F456 will not match /test1/:p1
int qPos = uri.indexOf("?");
String v12 = (qPos >= 0) ? uri.substring(0, qPos) : uri;String[] encodedTokens = PathPattern.removeSlashesAtBothEnds(v12).split("/");
String[] decodedTokens = new String[encodedTokens.length];
for (int i = 0; i < encodedTokens.length; i++) {
String encodedToken =
encodedTokens[i];
decodedTokens[i] = QueryStringDecoder.decodeComponent(encodedToken);
}
return decodedTokens;
} | 3.26 |
flink_PartialCachingLookupProvider_of_rdh | /**
* Build a {@link PartialCachingLookupProvider} from the specified {@link LookupFunction} and
* {@link LookupCache}.
*/
static PartialCachingLookupProvider of(LookupFunction lookupFunction, LookupCache cache) {
return new PartialCachingLookupProvider() {
@Override
public LookupCache getCache() {
return cache;
}
@Override
public LookupFunction createLookupFunction() {
return lookupFunction;}
};
} | 3.26 |
flink_StreamSourceContexts_processAndEmitWatermark_rdh | /**
* This will only be called if allowWatermark returned {@code true}.
*/
@Override
protected void processAndEmitWatermark(Watermark mark) {
nextWatermarkTime = Long.MAX_VALUE;
output.emitWatermark(mark);
// we can shutdown the watermark timer now, no watermarks will be needed any more.
// Note that this procedure actually doesn't need to be synchronized with the lock,
// but since it's only a one-time thing, doesn't hurt either
final ScheduledFuture<?> nextWatermarkTimer = this.nextWatermarkTimer;
if (nextWatermarkTimer != null) {
nextWatermarkTimer.cancel(true);
}
} | 3.26 |
flink_StreamSourceContexts_getSourceContext_rdh | /**
* Depending on the {@link TimeCharacteristic}, this method will return the adequate {@link org.apache.flink.streaming.api.functions.source.SourceFunction.SourceContext}. That is:
*
* <ul>
* <li>{@link TimeCharacteristic#IngestionTime} = {@code AutomaticWatermarkContext}
* <li>{@link TimeCharacteristic#ProcessingTime} = {@code NonTimestampContext}
* <li>{@link TimeCharacteristic#EventTime} = {@code ManualWatermarkContext}
* </ul>
*/
public static <OUT> SourceFunction.SourceContext<OUT> getSourceContext(TimeCharacteristic timeCharacteristic, ProcessingTimeService processingTimeService, Object checkpointLock, Output<StreamRecord<OUT>> output, long watermarkInterval, long idleTimeout, boolean emitProgressiveWatermarks) {final SourceFunction.SourceContext<OUT> ctx;
switch (timeCharacteristic) {
case EventTime
:
ctx = new ManualWatermarkContext<>(output, processingTimeService, checkpointLock, idleTimeout, emitProgressiveWatermarks);
break;
case IngestionTime :
Preconditions.checkState(emitProgressiveWatermarks, "Ingestion time is not available when emitting progressive watermarks " + "is disabled.");
ctx = new AutomaticWatermarkContext<>(output, watermarkInterval, processingTimeService, checkpointLock, idleTimeout);
break;
case ProcessingTime :
ctx = new NonTimestampContext<>(checkpointLock, output);
break;
default :
throw new IllegalArgumentException(String.valueOf(timeCharacteristic));
}
return new SwitchingOnClose<>(ctx);
} | 3.26 |
flink_HadoopInputFormatCommonBase_getCredentialsFromUGI_rdh | /**
*
* @param ugi
* The user information
* @return new credentials object from the user information.
*/
public static Credentials getCredentialsFromUGI(UserGroupInformation ugi) {
return ugi.getCredentials();
} | 3.26 |
flink_SqlFunctionUtils_repeat_rdh | /**
* Returns a string that repeats the base string n times.
*/
public static String repeat(String str, int repeat) {
return EncodingUtils.repeat(str, repeat);
} | 3.26 |
flink_SqlFunctionUtils_regexpExtract_rdh | /**
* Returns the first string extracted with a specified regular expression.
*/
public static String regexpExtract(String str, String regex) {
return m3(str, regex, 0);
} | 3.26 |
flink_SqlFunctionUtils_regexpReplace_rdh | /**
* Returns a string resulting from replacing all substrings that match the regular expression
* with replacement.
*/
public static String regexpReplace(String str, String regex, String replacement) {
if (((str == null) || (regex == null)) || (replacement == null))
{
return null;
}
try {
return str.replaceAll(regex, Matcher.quoteReplacement(replacement));
} catch (Exception e) {
LOG.error(String.format("Exception in regexpReplace('%s', '%s', '%s')", str, regex,
replacement), e);
// return null if exception in regex replace
return null;
}
} | 3.26 |
flink_SqlFunctionUtils_byteArrayCompare_rdh | /**
* Compares two byte arrays in lexicographical order.
*
* <p>The result is positive if {@code array1} is great than {@code array2}, negative if {@code array1} is less than {@code array2} and 0 if {@code array1} is equal to {@code array2}.
*
* <p>Note: Currently, this is used in {@code ScalarOperatorGens} for comparing two fields of
* binary or varbinary type.
*
* @param array1
* byte array to compare.
* @param array2
* byte array to compare.
* @return an Integer indicating which one is bigger
*/
public static int byteArrayCompare(byte[] array1, byte[] array2) {
for (int i =
0, j = 0; (i < array1.length) && (j < array2.length); i++ , j++) {
int a = array1[i] & 0xff;
int b = array2[j] & 0xff;
if (a != b) {
return a - b;
}
}
return array1.length - array2.length;
} | 3.26 |
flink_SqlFunctionUtils_struncate_rdh | /**
* SQL <code>TRUNCATE</code> operator applied to double values.
*/
public static double struncate(double b0) {
return m7(b0, 0);
} | 3.26 |
flink_SqlFunctionUtils_ceil_rdh | /**
* SQL <code>CEIL</code> operator applied to long values.
*/
public static long ceil(long b0, long b1) {
return floor((b0 + b1) - 1, b1);
} | 3.26 |
flink_SqlFunctionUtils_cot_rdh | /**
* SQL <code>COT</code> operator applied to double values.
*/
public static double
cot(double b0) {
return 1.0 / Math.tan(b0);
} | 3.26 |
flink_SqlFunctionUtils_tanh_rdh | /**
* Calculates the hyperbolic tangent of a big decimal number.
*/
public static double tanh(DecimalData a) {
return Math.tanh(doubleValue(a));
} | 3.26 |
flink_SqlFunctionUtils_sround_rdh | /**
* SQL <code>ROUND</code> operator applied to DecimalData values.
*/
public static DecimalData sround(DecimalData b0, int b1) {
return DecimalDataUtils.sround(b0, b1);
} | 3.26 |
flink_SqlFunctionUtils_log2_rdh | /**
* Returns the logarithm of "a" with base 2.
*/
public static double log2(double x) {
return Math.log(x) / Math.log(2);
} | 3.26 |
flink_SqlFunctionUtils_parseUrl_rdh | /**
* Parse url and return various parameter of the URL. If accept any null arguments, return null.
*
* @param urlStr
* URL string.
* @param partToExtract
* must be QUERY, or return null.
* @param key
* parameter name.
* @return target value.
*/
public static String parseUrl(String urlStr, String partToExtract, String key) {
if (!"QUERY".equals(partToExtract))
{
return null;
}
String query = parseUrl(urlStr, partToExtract);if (query == null) {
return null;
}
Pattern p = Pattern.compile(("(&|^)" + Pattern.quote(key)) + "=([^&]*)");
Matcher m = p.matcher(query);
if (m.find()) {
return m.group(2);
}
return null;
} | 3.26 |
flink_SqlFunctionUtils_lpad_rdh | // -------------------------- string functions ------------------------
/**
* Returns the string str left-padded with the string pad to a length of len characters. If str
* is longer than len, the return value is shortened to len characters.
*/
public static String lpad(String base, int len, String pad) {
if ((len < 0) || "".equals(pad)) {
return null;
} else if (len == 0) {
return "";
}
char[] data = new char[len];
char[] baseChars = base.toCharArray();
char[] padChars = pad.toCharArray();
// the length of the padding needed
int pos = Math.max(len - base.length(), 0);
// copy the padding
for (int i = 0; i < pos; i += pad.length()) {
for (int j = 0; (j < pad.length()) && (j < (pos - i)); j++) {
data[i + j] = padChars[j];
}
}
// copy the base
int i = 0;
while (((pos + i) < len)
&& (i < base.length())) {
data[pos + i] = baseChars[i];
i += 1;
}
return new String(data);
} | 3.26 |
flink_SqlFunctionUtils_log_rdh | /**
* Returns the logarithm of "x" with base "base".
*/
public static double log(double base, double x) {
return Math.log(x) / Math.log(base);
} | 3.26 |
flink_SqlFunctionUtils_keyValue_rdh | /**
* Parse string as key-value string and return the value matches key name. example:
* keyvalue('k1=v1;k2=v2', ';', '=', 'k2') = 'v2' keyvalue('k1:v1,k2:v2', ',', ':', 'k3') = NULL
*
* @param str
* target string.
* @param pairSeparator
* separator between key-value tuple.
* @param kvSeparator
* separator between key and value.
* @param keyName
* name of the key whose value you want return.
* @return target value.
*/public static BinaryStringData keyValue(BinaryStringData str, BinaryStringData pairSeparator, BinaryStringData kvSeparator, BinaryStringData keyName) {
if ((str == null) || (str.getSizeInBytes() == 0)) {
return null;
}
if ((((pairSeparator != null) && (pairSeparator.getSizeInBytes() == 1)) && (kvSeparator != null)) && (kvSeparator.getSizeInBytes() == 1)) {
return BinaryStringDataUtil.keyValue(str, pairSeparator.byteAt(0), kvSeparator.byteAt(0), keyName);
} else {
return BinaryStringData.fromString(keyValue(BinaryStringDataUtil.safeToString(str), BinaryStringDataUtil.safeToString(pairSeparator), BinaryStringDataUtil.safeToString(kvSeparator), BinaryStringDataUtil.safeToString(keyName)));
}
} | 3.26 |
flink_SqlFunctionUtils_m2_rdh | /**
* Replaces all the old strings with the replacement string.
*/
public static String m2(String str, String oldStr, String replacement) {
return str.replace(oldStr, replacement);
} | 3.26 |
flink_SqlFunctionUtils_rpad_rdh | /**
* Returns the string str right-padded with the string pad to a length of len characters. If str
* is longer than len, the return value is shortened to len characters.
*/
public static String rpad(String base, int len, String pad) {
if ((len < 0) || "".equals(pad)) {
return null;
} else if (len == 0) {
return "";
}
char[] data = new char[len];
char[] baseChars = base.toCharArray();
char[] padChars = pad.toCharArray();
int pos
= 0;
// copy the base
while ((pos < base.length()) && (pos < len)) {
data[pos] = baseChars[pos];
pos += 1;
}
// copy the padding
while (pos < len) {
int i = 0;
while ((i < pad.length()) && (i < (len - pos))) {
data[pos + i] = padChars[i];
i += 1;
}
pos += pad.length();
}
return new String(data);
} | 3.26 |
flink_SqlFunctionUtils_hex_rdh | /**
* Returns the hex string of a string argument.
*/
public static String hex(String x) {
return EncodingUtils.hex(x.getBytes(StandardCharsets.UTF_8)).toUpperCase();
} | 3.26 |
flink_SqlFunctionUtils_initcap_rdh | /**
* SQL INITCAP(string) function.
*/
public static String
initcap(String s) {
// Assumes Alpha as [A-Za-z0-9]
// white space is treated as everything else.
final int len = s.length();
boolean start = true;
final StringBuilder newS = new StringBuilder();
for (int i = 0; i < len; i++) {
char curCh = s.charAt(i);
final int c = ((int) (curCh));
if (start) {
// curCh is whitespace or first character of word.
if ((c > 47) && (c < 58)) {
// 0-9
start = false;
} else if ((c > 64) && (c < 91)) {
// A-Z
start = false;
} else if ((c > 96) && (c
< 123)) {
// a-z
start = false;
curCh = ((char) (c - 32));// Uppercase this character
}
// else {} whitespace
} else // Inside of a word or white space after end of word.
if ((c > 47) && (c < 58)) {
// 0-9
// noop
} else if ((c > 64) && (c < 91)) {
// A-Z
curCh = ((char) (c + 32));// Lowercase this character
} else if ((c > 96) && (c < 123)) {
// a-z
// noop
} else {
// whitespace
start = true;
}
newS.append(curCh);
}// for each character in s
return newS.toString();
} | 3.26 |
flink_SqlFunctionUtils_m3_rdh | /**
* Returns a string extracted with a specified regular expression and a regex match group index.
*/
public static String m3(String str, String regex, int extractIndex) {
if ((str == null) || (regex == null)) {
return null;
}
try {
Matcher m = Pattern.compile(regex).matcher(str);
if (m.find()) {
MatchResult mr = m.toMatchResult();
return mr.group(extractIndex);
}
} catch (Exception e) {
LOG.error(String.format("Exception in regexpExtract('%s', '%s', '%d')", str, regex, extractIndex), e);
}
return null;
} | 3.26 |
flink_SqlFunctionUtils_splitIndex_rdh | /**
* Split target string with custom separator and pick the index-th(start with 0) result.
*
* @param str
* target string.
* @param character
* int value of the separator character
* @param index
* index of the result which you want.
* @return the string at the index of split results.
*/
public static String splitIndex(String str, int character, int index) {
if (((character > 255) || (character < 1)) || (index <
0)) {
return null;
}
String[] v16 = StringUtils.splitPreserveAllTokens(str, ((char) (character)));
if (index >= v16.length) {
return null;
} else {
return v16[index];
}
} | 3.26 |
flink_SqlFunctionUtils_hash_rdh | /**
* Calculate the hash value of a given string.
*
* @param algorithm
* message digest algorithm.
* @param str
* string to hash.
* @param charsetName
* charset of string.
* @return hash value of string.
*/
public static String hash(String algorithm, String str, String charsetName) {try {
byte[] digest = MessageDigest.getInstance(algorithm).digest(m4(str, charsetName));
return EncodingUtils.hex(digest);
} catch (NoSuchAlgorithmException e) {
throw new IllegalArgumentException("Unsupported algorithm: " + algorithm, e);
}
} | 3.26 |
flink_SqlFunctionUtils_abs_rdh | /**
* SQL <code>ABS</code> operator applied to float values.
*/
public static float abs(float b0) {
return Math.abs(b0);
} | 3.26 |
flink_SqlFunctionUtils_floor_rdh | /**
* SQL <code>FLOOR</code> operator applied to long values.
*/
public static long floor(long b0, long b1) {
long r = b0 % b1;
if (r < 0) {
r
+= b1;
}
return b0 - r;
} | 3.26 |
flink_SqlFunctionUtils_strToMap_rdh | /**
* Creates a map by parsing text. Split text into key-value pairs using two delimiters. The
* first delimiter separates pairs, and the second delimiter separates key and value. Both
* {@code listDelimiter} and {@code keyValueDelimiter} are treated as regular expressions.
*
* @param text
* the input text
* @param listDelimiter
* the delimiter to separates pairs
* @param keyValueDelimiter
* the delimiter to separates key and value
* @return the map
*/
public static Map<String, String> strToMap(String text, String listDelimiter, String keyValueDelimiter) {
if (StringUtils.isEmpty(text)) {
return EMPTY_MAP;
}
String[] keyValuePairs = text.split(listDelimiter);
Map<String, String> ret = CollectionUtil.newHashMapWithExpectedSize(keyValuePairs.length);
for (String v41 : keyValuePairs) {String[] keyValue = v41.split(keyValueDelimiter, 2);
if (keyValue.length < 2) {
ret.put(v41, null);
} else {
ret.put(keyValue[0], keyValue[1]);
}
}
return ret;
} | 3.26 |
flink_WatermarkOutputMultiplexer_getImmediateOutput_rdh | /**
* Returns an immediate {@link WatermarkOutput} for the given output ID.
*
* <p>>See {@link WatermarkOutputMultiplexer} for a description of immediate and deferred
* outputs.
*/public WatermarkOutput
getImmediateOutput(String outputId) {
final PartialWatermark outputState = watermarkPerOutputId.get(outputId);
Preconditions.checkArgument(outputState != null, "no output registered under id %s", outputId);return new ImmediateOutput(outputState);
} | 3.26 |
flink_WatermarkOutputMultiplexer_registerNewOutput_rdh | /**
* Registers a new multiplexed output, which creates internal states for that output and returns
* an output ID that can be used to get a deferred or immediate {@link WatermarkOutput} for that
* output.
*/
public void registerNewOutput(String id, WatermarkUpdateListener onWatermarkUpdate) {
final PartialWatermark outputState = new PartialWatermark(onWatermarkUpdate);
final PartialWatermark previouslyRegistered = watermarkPerOutputId.putIfAbsent(id, outputState);
checkState(previouslyRegistered == null, "Already contains an output for ID %s", id);
combinedWatermarkStatus.add(outputState);
} | 3.26 |
flink_WatermarkOutputMultiplexer_updateCombinedWatermark_rdh | /**
* Checks whether we need to update the combined watermark. Should be called when a newly
* emitted per-output watermark is higher than the max so far or if we need to combined the
* deferred per-output updates.
*/
private void updateCombinedWatermark() {
if (combinedWatermarkStatus.updateCombinedWatermark()) {
underlyingOutput.emitWatermark(new Watermark(combinedWatermarkStatus.getCombinedWatermark()));
} else if (combinedWatermarkStatus.isIdle()) {
underlyingOutput.markIdle();
}
} | 3.26 |
flink_WatermarkOutputMultiplexer_getDeferredOutput_rdh | /**
* Returns a deferred {@link WatermarkOutput} for the given output ID.
*
* <p>>See {@link WatermarkOutputMultiplexer} for a description of immediate and deferred
* outputs.
*/
public WatermarkOutput getDeferredOutput(String outputId) {
final PartialWatermark outputState = watermarkPerOutputId.get(outputId);
Preconditions.checkArgument(outputState != null, "no output registered under id %s", outputId);
return new DeferredOutput(outputState);
} | 3.26 |
flink_PartitionedFile_getIndexEntry_rdh | /**
* Gets the index entry of the target region and subpartition either from the index data cache
* or the index data file.
*/
void getIndexEntry(FileChannel indexFile, ByteBuffer target, int region, int subpartition) throws IOException {
checkArgument(target.capacity() == INDEX_ENTRY_SIZE, "Illegal target buffer size.");
target.clear();
long indexEntryOffset = getIndexEntryOffset(region, subpartition);
if (indexEntryCache != null) {
for
(int i = 0; i < INDEX_ENTRY_SIZE; ++i) {
target.put(indexEntryCache.get(((int) (indexEntryOffset)) + i));
}
} else {
synchronized(indexFilePath)
{
indexFile.position(indexEntryOffset);
BufferReaderWriterUtil.readByteBufferFully(indexFile, target);
}
}
target.flip();
} | 3.26 |
flink_ResultInfo_getFieldGetters_rdh | /**
* Create the {@link FieldGetter} to get column value in the results.
*
* <p>With {@code JSON} format, it uses the {@link ResolvedSchema} to build the getters.
* However, it uses {@link StringData}'s {@link FieldGetter} to get the column values.
*/
public List<FieldGetter>
getFieldGetters() {
if (rowFormat == RowFormat.JSON) {
List<LogicalType> columnTypes = columnInfos.stream().map(ColumnInfo::getLogicalType).collect(Collectors.toList());
return IntStream.range(0, columnTypes.size()).mapToObj(i -> RowData.createFieldGetter(columnTypes.get(i), i)).collect(Collectors.toList());
} else {
return IntStream.range(0, columnInfos.size()).mapToObj(i -> RowData.createFieldGetter(STRING_TYPE, i)).collect(Collectors.toList());
}
} | 3.26 |
flink_ResultInfo_getData_rdh | /**
* Get the data.
*/
public List<RowData> getData() {
return data;
} | 3.26 |
flink_ResultInfo_getRowFormat_rdh | /**
* Get the row format about the data.
*/
public RowFormat getRowFormat() {
return rowFormat;
} | 3.26 |
flink_ResultInfo_getColumnInfos_rdh | /**
* Get the column info of the data.
*/
public List<ColumnInfo> getColumnInfos() {
return Collections.unmodifiableList(columnInfos);
} | 3.26 |
flink_ResultInfo_getResultSchema_rdh | /**
* Get the schemas of the results.
*/public
ResolvedSchema getResultSchema() {
return ResolvedSchema.of(columnInfos.stream().map(ColumnInfo::toColumn).collect(Collectors.toList()));
} | 3.26 |
flink_IterationHeadTask_initBackChannel_rdh | /**
* The iteration head prepares the backchannel: it allocates memory, instantiates a {@link BlockingBackChannel} and hands it to the iteration tail via a {@link Broker} singleton.
*/
private BlockingBackChannel initBackChannel() throws Exception {
/* get the size of the memory available to the backchannel */
int backChannelMemoryPages = getMemoryManager().computeNumberOfPages(this.config.getRelativeBackChannelMemory());
/* allocate the memory available to the backchannel */
List<MemorySegment> segments = new ArrayList<MemorySegment>();
int segmentSize = getMemoryManager().getPageSize();
getMemoryManager().allocatePages(this, segments, backChannelMemoryPages);
/* instantiate the backchannel */
BlockingBackChannel backChannel = new BlockingBackChannel(new SerializedUpdateBuffer(segments, segmentSize, getIOManager()));
/* hand the backchannel over to the iteration tail */
Broker<BlockingBackChannel> broker = BlockingBackChannelBroker.instance();
broker.handIn(brokerKey(), backChannel);
return backChannel;
} | 3.26 |
flink_IterationHeadTask_getNumTaskInputs_rdh | // --------------------------------------------------------------------------------------------
@Override
protected int getNumTaskInputs() {
// this task has an additional input in the workset case for the initial solution set
boolean v0 = config.getIsWorksetIteration();
return driver.getNumberOfInputs() + (v0 ? 1 : 0);
} | 3.26 |
flink_RuntimeEnvironment_getExecutionConfig_rdh | // ------------------------------------------------------------------------
@Override
public ExecutionConfig getExecutionConfig() {
return this.executionConfig;
} | 3.26 |
flink_BigIntSerializer_writeBigInteger_rdh | // --------------------------------------------------------------------------------------------
// Static Helpers for BigInteger Serialization
// --------------------------------------------------------------------------------------------
public static void writeBigInteger(BigInteger record, DataOutputView target) throws IOException {
// null value support
if (record == null) {
target.writeInt(0);
return;
} else if (record == BigInteger.ZERO) {
target.writeInt(1);
return;
} else if (record == BigInteger.ONE) {
target.writeInt(2);
return;
} else if (record == BigInteger.TEN) {
target.writeInt(3);
return;
}
// default
final byte[] bytes = record.toByteArray();
// the length we write is offset by four, because null and short-paths for ZERO, ONE, and
// TEN
target.writeInt(bytes.length + 4);
target.write(bytes);
} | 3.26 |
flink_KubernetesResourceManagerDriver_recoverWorkerNodesFromPreviousAttempts_rdh | // ------------------------------------------------------------------------
// Internal
// ------------------------------------------------------------------------
private void recoverWorkerNodesFromPreviousAttempts() throws ResourceManagerException {
List<KubernetesPod> podList = flinkKubeClient.getPodsWithLabels(KubernetesUtils.getTaskManagerSelectors(clusterId));
final List<KubernetesWorkerNode> recoveredWorkers = new ArrayList<>();
for
(KubernetesPod v11 : podList) {
final KubernetesWorkerNode worker = new KubernetesWorkerNode(new ResourceID(v11.getName()));
final long attempt = worker.getAttempt();
if (attempt > currentMaxAttemptId) {
currentMaxAttemptId = attempt;
}
if (v11.isTerminated() || (!v11.isScheduled())) {
stopPod(v11.getName());
} else {
recoveredWorkers.add(worker);
}
}
log.info("Recovered {} pods from previous attempts, current attempt id is {}.", recoveredWorkers.size(), ++currentMaxAttemptId);
getResourceEventHandler().onPreviousAttemptWorkersRecovered(recoveredWorkers);
} | 3.26 |
flink_KubernetesResourceManagerDriver_initializeInternal_rdh | // ------------------------------------------------------------------------
// ResourceManagerDriver
// ------------------------------------------------------------------------
@Override
protected void initializeInternal() throws Exception {
podsWatchOpt = watchTaskManagerPods();
final File podTemplateFile = KubernetesUtils.getTaskManagerPodTemplateFileInPod();
if (podTemplateFile.exists()) {taskManagerPodTemplate = KubernetesUtils.loadPodFromTemplateFile(flinkKubeClient, podTemplateFile, Constants.MAIN_CONTAINER_NAME);
} else {
taskManagerPodTemplate = new FlinkPod.Builder().build();
}
updateKubernetesServiceTargetPortIfNecessary();
recoverWorkerNodesFromPreviousAttempts();
this.running = true;
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.