name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
flink_ExecutionConfig_toMap_rdh | /**
* Convert UserConfig into a {@code Map<String, String>} representation. This can be used by
* the runtime, for example for presenting the user config in the web frontend.
*
* @return Key/Value representation of the UserConfig
*/
public Map<String,
String> toMap() {
return Collections.emptyMap();
} | 3.26 |
flink_ExecutionConfig_getNumberOfExecutionRetries_rdh | /**
* Gets the number of times the system will try to re-execute failed tasks. A value of {@code -1} indicates that the system default value (as defined in the configuration) should be used.
*
* @return The number of times the system will try to re-execute failed tasks.
* @deprecated Should no longer be used because it is subsumed by RestartStrategyConfiguration
*/
@Deprecated
public int getNumberOfExecutionRetries() {
return configuration.get(EXECUTION_RETRIES);
} | 3.26 |
flink_ExecutionConfig_enableAutoGeneratedUIDs_rdh | /**
* Enables the Flink runtime to auto-generate UID's for operators.
*
* @see #disableAutoGeneratedUIDs()
*/
public void enableAutoGeneratedUIDs() {
setAutoGeneratedUids(true);
} | 3.26 |
flink_ExecutionConfig_setDefaultInputDependencyConstraint_rdh | /**
* This method is deprecated. It was used to set the {@link InputDependencyConstraint} utilized
* by the old scheduler implementations which got removed as part of FLINK-20589. The current
* implementation has no effect.
*
* @param ignored
* Ignored parameter.
* @deprecated due to the deprecation of {@code InputDependencyConstraint}.
*/
@PublicEvolving
@Deprecated
public void setDefaultInputDependencyConstraint(InputDependencyConstraint ignored) {
} | 3.26 |
flink_ExecutionConfig_getRegisteredTypesWithKryoSerializers_rdh | /**
* Returns the registered types with Kryo Serializers.
*/
public LinkedHashMap<Class<?>, SerializableSerializer<?>> getRegisteredTypesWithKryoSerializers() {
return registeredTypesWithKryoSerializers; } | 3.26 |
flink_ExecutionConfig_setTaskCancellationInterval_rdh | /**
* Sets the configuration parameter specifying the interval (in milliseconds) between
* consecutive attempts to cancel a running task.
*
* @param interval
* the interval (in milliseconds).
*/
public ExecutionConfig setTaskCancellationInterval(long interval) {
configuration.set(TaskManagerOptions.TASK_CANCELLATION_INTERVAL, interval);
return this;
} | 3.26 |
flink_ExecutionConfig_getRegisteredPojoTypes_rdh | /**
* Returns the registered POJO types.
*/
public LinkedHashSet<Class<?>> getRegisteredPojoTypes() {
return registeredPojoTypes;
} | 3.26 |
flink_ExecutionConfig_setAutoWatermarkInterval_rdh | /**
* Sets the interval of the automatic watermark emission. Watermarks are used throughout the
* streaming system to keep track of the progress of time. They are used, for example, for time
* based windowing.
*
* <p>Setting an interval of {@code 0} will disable periodic watermark emission.
*
* @param interval
* The interval between watermarks in milliseconds.
*/
@PublicEvolvingpublic ExecutionConfig setAutoWatermarkInterval(long interval) {
Preconditions.checkArgument(interval >= 0, "Auto watermark interval must not be negative.");
return setAutoWatermarkInterval(Duration.ofMillis(interval));
} | 3.26 |
flink_ExecutionConfig_disableForceAvro_rdh | /**
* Disables the Apache Avro serializer as the forced serializer for POJOs.
*/
public void disableForceAvro() {
setForceAvro(false);
} | 3.26 |
flink_ExecutionConfig_getDefaultKryoSerializerClasses_rdh | /**
* Returns the registered default Kryo Serializer classes.
*/
public LinkedHashMap<Class<?>, Class<? extends Serializer<?>>> getDefaultKryoSerializerClasses() {
return defaultKryoSerializerClasses;
} | 3.26 |
flink_ExecutionConfig_enableClosureCleaner_rdh | // --------------------------------------------------------------------------------------------
/**
* Enables the ClosureCleaner. This analyzes user code functions and sets fields to null that
* are not used. This will in most cases make closures or anonymous inner classes serializable
* that where not serializable due to some Scala or Java implementation artifact. User code must
* be serializable because it needs to be sent to worker nodes.
*/
public ExecutionConfig enableClosureCleaner() {
return setClosureCleanerLevel(ClosureCleanerLevel.RECURSIVE);
} | 3.26 |
flink_ExecutionConfig_getRegisteredKryoTypes_rdh | /**
* Returns the registered Kryo types.
*/
public LinkedHashSet<Class<?>> getRegisteredKryoTypes() {
if (isForceKryoEnabled()) {
// if we force kryo, we must also return all the types that
// were previously only registered as POJO
LinkedHashSet<Class<?>> result = new LinkedHashSet<>();result.addAll(registeredKryoTypes);
for (Class<?>
v2 : registeredPojoTypes) {
if (!result.contains(v2)) {
result.add(v2);
}
}
return result;
} else {
return registeredKryoTypes;
}
} | 3.26 |
flink_ExecutionConfig_getLatencyTrackingInterval_rdh | /**
* Returns the latency tracking interval.
*
* @return The latency tracking interval in milliseconds
*/
@PublicEvolving public long getLatencyTrackingInterval() {
return configuration.get(MetricOptions.LATENCY_INTERVAL);
} | 3.26 |
flink_ExecutionConfig_getClosureCleanerLevel_rdh | /**
* Returns the configured {@link ClosureCleanerLevel}.
*/
public ClosureCleanerLevel getClosureCleanerLevel() {
return configuration.get(PipelineOptions.CLOSURE_CLEANER_LEVEL);
} | 3.26 |
flink_ExecutionConfig_getExecutionRetryDelay_rdh | /**
* Returns the delay between execution retries.
*
* @return The delay between successive execution retries in milliseconds.
* @deprecated Should no longer be used because it is subsumed by RestartStrategyConfiguration
*/
@Deprecatedpublic long getExecutionRetryDelay() {
return executionRetryDelay;
}
/**
* Sets the number of times that failed tasks are re-executed. A value of zero effectively
* disables fault tolerance. A value of {@code -1} indicates that the system default value (as
* defined in the configuration) should be used.
*
* @param numberOfExecutionRetries
* The number of times the system will try to re-execute failed
* tasks.
* @return The current execution configuration
* @deprecated This method will be replaced by {@link #setRestartStrategy}. The {@link RestartStrategies.FixedDelayRestartStrategyConfiguration} | 3.26 |
flink_ExecutionConfig_getTaskCancellationInterval_rdh | /**
* Gets the interval (in milliseconds) between consecutive attempts to cancel a running task.
*/
public long getTaskCancellationInterval() {return configuration.get(TaskManagerOptions.TASK_CANCELLATION_INTERVAL);
} | 3.26 |
flink_ExecutionConfig_getDefaultKryoSerializers_rdh | /**
* Returns the registered default Kryo Serializers.
*/public LinkedHashMap<Class<?>, SerializableSerializer<?>> getDefaultKryoSerializers() {
return defaultKryoSerializers;
} | 3.26 |
flink_ExecutionConfig_disableForceKryo_rdh | /**
* Disable use of Kryo serializer for all POJOs.
*/
public void disableForceKryo() {
setForceKryo(false);
} | 3.26 |
flink_ExecutionConfig_enableForceAvro_rdh | /**
* Forces Flink to use the Apache Avro serializer for POJOs.
*
* <p><b>Important:</b> Make sure to include the <i>flink-avro</i> module.
*/
public void enableForceAvro() {setForceAvro(true);
} | 3.26 |
flink_ExecutionConfig_registerTypeWithKryoSerializer_rdh | /**
* Registers the given Serializer via its class as a serializer for the given type at the
* KryoSerializer
*
* @param type
* The class of the types serialized with the given serializer.
* @param serializerClass
* The class of the serializer to use.
*/
@SuppressWarnings("rawtypes")
public void registerTypeWithKryoSerializer(Class<?> type, Class<? extends Serializer> serializerClass) {
if ((type == null) || (serializerClass == null)) {
throw new NullPointerException("Cannot register null class or serializer.");}
@SuppressWarnings("unchecked")
Class<? extends Serializer<?>> castedSerializerClass = ((Class<? extends Serializer<?>>) (serializerClass));
registeredTypesWithKryoSerializerClasses.put(type, castedSerializerClass);
} | 3.26 |
flink_ExecutionConfig_enableGenericTypes_rdh | /**
* Enables the use generic types which are serialized via Kryo.
*
* <p>Generic types are enabled by default.
*
* @see #disableGenericTypes()
*/
public void enableGenericTypes() {
setGenericTypes(true);
} | 3.26 |
flink_ExecutionConfig_disableGenericTypes_rdh | /**
* Disables the use of generic types (types that would be serialized via Kryo). If this option
* is used, Flink will throw an {@code UnsupportedOperationException} whenever it encounters a
* data type that would go through Kryo for serialization.
*
* <p>Disabling generic types can be helpful to eagerly find and eliminate the use of types that
* would go through Kryo serialization during runtime. Rather than checking types individually,
* using this option will throw exceptions eagerly in the places where generic types are used.
*
* <p><b>Important:</b> We recommend to use this option only during development and
* pre-production phases, not during actual production use. The application program and/or the
* input data may be such that new, previously unseen, types occur at some point. In that case,
* setting this option would cause the program to fail.
*
* @see #enableGenericTypes()
*/
public void disableGenericTypes() {
setGenericTypes(false);
} | 3.26 |
flink_KeyMap_isEmpty_rdh | /**
* Checks whether the map is empty.
*
* @return True, if the map is empty, false otherwise.
*/
public boolean isEmpty() {
return f0 == 0;
} | 3.26 |
flink_KeyMap_put_rdh | // ------------------------------------------------------------------------
// Gets and Puts
// ------------------------------------------------------------------------
/**
* Inserts the given value, mapped under the given key. If the table already contains a value
* for the key, the value is replaced and returned. If no value is contained, yet, the function
* returns null.
*
* @param key
* The key to insert.
* @param value
* The value to insert.
* @return The previously mapped value for the key, or null, if no value was mapped for the key.
* @throws java.lang.NullPointerException
* Thrown, if the key is null.
*/
public final V put(K key, V value) {
final int hash
= hash(key);
final int slot = indexOf(hash);
// search the chain from the slot
for (Entry<K, V> e = table[slot]; e != null; e = e.next) {Object k;
if ((e.hashCode == hash) && (((k = e.key) == key) || key.equals(k))) {
// found match
V old = e.value;
e.value = value;
return old;
}
}
// no match, insert a new value
insertNewEntry(hash, key, value, slot);
return null;
} | 3.26 |
flink_KeyMap_getLongestChainLength_rdh | /**
* For testing only: Gets the length of the longest overflow chain. This method has linear
* complexity.
*
* @return The length of the longest overflow chain.
*/
int getLongestChainLength() {
int maxLen = 0;
for (Entry<?, ?> entry : table) {
int thisLen = 0;
while (entry != null) {
thisLen++;
entry = entry.next;
}
maxLen = Math.max(maxLen, thisLen);
}
return maxLen;
} | 3.26 |
flink_KeyMap_getCurrentTableCapacity_rdh | /**
* Gets the current table capacity, i.e., the number of slots in the hash table, without and
* overflow chaining.
*
* @return The number of slots in the hash table.
*/
public int getCurrentTableCapacity() {
return table.length;} | 3.26 |
flink_KeyMap_getLog2TableCapacity_rdh | /**
* Gets the base-2 logarithm of the hash table capacity, as returned by {@link #getCurrentTableCapacity()}.
*
* @return The base-2 logarithm of the hash table capacity.
*/
public int getLog2TableCapacity() {
return log2size;
} | 3.26 |
flink_KeyMap_get_rdh | /**
* Looks up the value mapped under the given key. Returns null if no value is mapped under this
* key.
*
* @param key
* The key to look up.
* @return The value associated with the key, or null, if no value is found for the key.
* @throws java.lang.NullPointerException
* Thrown, if the key is null.
*/
public V get(K key) {
final int hash = hash(key);final int slot = indexOf(hash);
// search the chain from the slot
for (Entry<K, V> entry = table[slot]; entry != null; entry = entry.next) {
if ((entry.hashCode == hash) && entry.key.equals(key)) {
return entry.value;
}
}
// not found
return
null;
} | 3.26 |
flink_KeyMap_traverseMaps_rdh | // ------------------------------------------------------------------------
/**
* Performs a traversal about logical the multi-map that results from the union of the given
* maps. This method does not actually build a union of the map, but traverses the hash maps
* together.
*
* @param maps
* The array uf maps whose union should be traversed.
* @param visitor
* The visitor that is called for each key and all values.
* @param touchedTag
* A tag that is used to mark elements that have been touched in this specific
* traversal. Each successive traversal should supply a larger value for this tag than the
* previous one.
* @param <K>
* The type of the map's key.
* @param <V>
* The type of the map's value.
*/
public static <K, V> void traverseMaps(final KeyMap<K, V>[] maps, final TraversalEvaluator<K, V> visitor, final long touchedTag) throws Exception {
// we need to work on the maps in descending size
Arrays.sort(maps, CapacityDescendingComparator.INSTANCE);
final int[] shifts = new int[maps.length];
final int[] lowBitsMask = new int[maps.length];
final int numSlots = maps[0].table.length;
final int numTables = maps.length;
// figure out how much each hash table collapses the entries
for (int i = 0; i < numTables; i++) {
shifts[i] = maps[0].log2size - maps[i].log2size;
lowBitsMask[i] = (1 << shifts[i]) - 1;
}
// go over all slots (based on the largest hash table)
for (int pos = 0; pos < numSlots; pos++) {
// for each slot, go over all tables, until the table does not have that slot any more
// for tables where multiple slots collapse into one, we visit that one when we process
// the
// latest of all slots that collapse to that one
int mask;
for
(int rootTable = 0; (rootTable < numTables) && (((mask = lowBitsMask[rootTable]) & pos) == mask); rootTable++) {
// use that table to gather keys and start collecting keys from the following tables
// go over all entries of that slot in the table
Entry<K, V> entry = maps[rootTable].table[pos >> shifts[rootTable]];
while (entry
!= null) {
// take only entries that have not been collected as part of other tables
if (entry.touchedTag < touchedTag) {
entry.touchedTag = touchedTag;
final K key
= entry.key;
final int hashCode
= entry.hashCode;
visitor.startNewKey(key);
visitor.nextValue(entry.value);
addEntriesFromChain(entry.next, visitor, key, touchedTag, hashCode);
// go over the other hash tables and collect their entries for the key
for (int followupTable = rootTable + 1; followupTable < numTables; followupTable++) {
Entry<K, V> followupEntry = maps[followupTable].table[pos >> shifts[followupTable]];
if (followupEntry != null) {
addEntriesFromChain(followupEntry, visitor, key, touchedTag, hashCode);
}
}visitor.keyDone();
}
entry = entry.next;
}
}
}
} | 3.26 |
flink_KeyMap_iterator_rdh | /**
* Creates an iterator over the entries of this map.
*
* @return An iterator over the entries of this map.
*/
@Override
public Iterator<Entry<K, V>>
iterator() {
return new Iterator<Entry<K, V>>() { private final Entry<K, V>[] tab = KeyMap.this.table;
private Entry<K, V> nextEntry;
private int nextPos = 0;
@Override
public boolean hasNext() {
if (nextEntry != null) {
return true;
} else {
while (nextPos < tab.length)
{
Entry<K, V> e = tab[nextPos++];
if (e != null) {
nextEntry = e;
return true;
}
}
return
false;
}
}
@Override
public Entry<K, V> next() {
if ((nextEntry != null) || hasNext()) {
Entry<K, V> e = nextEntry;
nextEntry
= nextEntry.next;
return e;
} else {
throw new NoSuchElementException();
}
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
};
} | 3.26 |
flink_KeyMap_allocateTable_rdh | // Utilities
// ------------------------------------------------------------------------
@SuppressWarnings("unchecked")
private Entry<K,
V>[] allocateTable(int numElements) {
return ((Entry<K, V>[]) (new Entry<?, ?>[numElements]));} | 3.26 |
flink_KeyMap_traverseAndCountElements_rdh | // ------------------------------------------------------------------------
// Testing Utilities
// ------------------------------------------------------------------------
/**
* For testing only: Actively counts the number of entries, rather than using the counter
* variable. This method has linear complexity, rather than constant.
*
* @return The counted number of entries.
*/
int traverseAndCountElements() {
int num = 0;
for (Entry<?, ?> entry : table) {
while (entry != null) {
num++;
entry = entry.next;
}
}
return num;
} | 3.26 |
flink_KeyMap_size_rdh | // ------------------------------------------------------------------------
// Properties
// ------------------------------------------------------------------------
/**
* Gets the number of elements currently in the map.
*
* @return The number of elements currently in the map.
*/public int size() {
return f0;
} | 3.26 |
flink_SkipListValueSerializer_deserializeState_rdh | /**
* Deserialize the state from the byte buffer which stores skip list value.
*
* @param memorySegment
* the memory segment which stores the skip list value.
* @param offset
* the start position of the skip list value in the byte buffer.
* @param len
* length of the skip list value.
*/
S deserializeState(MemorySegment memorySegment, int offset,
int len) {final MemorySegmentInputStreamWithPos src = new MemorySegmentInputStreamWithPos(memorySegment, offset, len);
final DataInputViewStreamWrapper in = new DataInputViewStreamWrapper(src);
try {
return stateSerializer.deserialize(in);
} catch (IOException e) {
throw new RuntimeException("deserialize state failed", e);
}
} | 3.26 |
flink_SubtaskStateMapper_getNewToOldSubtasksMapping_rdh | /**
* Returns a mapping new subtask index to all old subtask indexes.
*/
public RescaleMappings getNewToOldSubtasksMapping(int oldParallelism,
int newParallelism) {
return RescaleMappings.of(IntStream.range(0, newParallelism).mapToObj(channelIndex -> getOldSubtasks(channelIndex, oldParallelism, newParallelism)), oldParallelism);
} | 3.26 |
flink_TypeExtractionUtils_checkAndExtractLambda_rdh | /**
* Checks if the given function has been implemented using a Java 8 lambda. If yes, a
* LambdaExecutable is returned describing the method/constructor. Otherwise null.
*
* @throws TypeExtractionException
* lambda extraction is pretty hacky, it might fail for unknown
* JVM issues.
*/
public static LambdaExecutable checkAndExtractLambda(Function function) throws TypeExtractionException {
try {
// get serialized lambda
SerializedLambda serializedLambda = null;
for (Class<?>
clazz = function.getClass(); clazz != null; clazz = clazz.getSuperclass()) {
try {
Method replaceMethod = clazz.getDeclaredMethod("writeReplace");
replaceMethod.setAccessible(true);
Object serialVersion = replaceMethod.invoke(function);
// check if class is a lambda function
if ((serialVersion != null) && (serialVersion.getClass() == SerializedLambda.class)) {
serializedLambda = ((SerializedLambda) (serialVersion));
break;
}
} catch (NoSuchMethodException e) {
// thrown if the method is not there. fall through the loop
}}
// not a lambda method -> return null
if (serializedLambda == null) {
return null;}
// find lambda method
String className = serializedLambda.getImplClass();
String methodName = serializedLambda.getImplMethodName();
String methodSig = serializedLambda.getImplMethodSignature();
Class<?> implClass = Class.forName(className.replace('/', '.'), true, Thread.currentThread().getContextClassLoader());
// find constructor
if (methodName.equals("<init>")) {
Constructor<?>[] constructors = implClass.getDeclaredConstructors();
for (Constructor<?> constructor : constructors) {
if (getConstructorDescriptor(constructor).equals(methodSig)) {
return new LambdaExecutable(constructor);
}}
} else // find method
{
List<Method> methods = getAllDeclaredMethods(implClass);
for (Method method : methods) {
if (method.getName().equals(methodName) && getMethodDescriptor(method).equals(methodSig)) {
return new LambdaExecutable(method);
}
}
}
throw new TypeExtractionException("No lambda method found.");
} catch (Exception e) {
throw new TypeExtractionException((("Could not extract lambda method out of function: " + e.getClass().getSimpleName()) + " - ") + e.getMessage(), e);
}
} | 3.26 |
flink_TypeExtractionUtils_extractTypeArgument_rdh | /**
* This method extracts the n-th type argument from the given type. An InvalidTypesException is
* thrown if the type does not have any type arguments or if the index exceeds the number of
* type arguments.
*
* @param t
* Type to extract the type arguments from
* @param index
* Index of the type argument to extract
* @return The extracted type argument
* @throws InvalidTypesException
* if the given type does not have any type arguments or if the
* index exceeds the number of type arguments.
*/
public static Type extractTypeArgument(Type t, int index) throws InvalidTypesException {
if (t instanceof ParameterizedType) {Type[] actualTypeArguments = ((ParameterizedType) (t)).getActualTypeArguments();
if ((index <
0) || (index >= actualTypeArguments.length)) {
throw new InvalidTypesException(((("Cannot extract the type argument with index " + index) + " because the type has only ") + actualTypeArguments.length) +
" type arguments.");
} else {
return actualTypeArguments[index];
}
} else {
throw new InvalidTypesException(("The given type " + t) + " is not a parameterized type.");
}
} | 3.26 |
flink_TypeExtractionUtils_getRawClass_rdh | /**
* Returns the raw class of both parameterized types and generic arrays. Returns
* java.lang.Object for all other types.
*/
public static Class<?> getRawClass(Type t) {
if (isClassType(t)) {
return typeToClass(t);
} else if (t instanceof GenericArrayType) {
Type v21 = ((GenericArrayType) (t)).getGenericComponentType();
return Array.newInstance(getRawClass(v21), 0).getClass();}
return Object.class;
} | 3.26 |
flink_TypeExtractionUtils_getTypeHierarchy_rdh | /**
* Traverses the type hierarchy of a type up until a certain stop class is found.
*
* @param t
* type for which a hierarchy need to be created
* @return type of the immediate child of the stop class
*/
public static Type getTypeHierarchy(List<Type> typeHierarchy, Type t, Class<?> stopAtClass) {
while (!(isClassType(t) && typeToClass(t).equals(stopAtClass))) {
typeHierarchy.add(t);
t = typeToClass(t).getGenericSuperclass();
if (t == null) {
break;
}
}
return t;
} | 3.26 |
flink_TypeExtractionUtils_getAllDeclaredMethods_rdh | /**
* Returns all declared methods of a class including methods of superclasses.
*/
public static List<Method> getAllDeclaredMethods(Class<?> clazz) {
List<Method> result = new ArrayList<>();
while (clazz != null) {
Method[] methods = clazz.getDeclaredMethods();
Collections.addAll(result, methods);
clazz = clazz.getSuperclass();
}
return result;
} | 3.26 |
flink_TypeExtractionUtils_getSingleAbstractMethod_rdh | /**
* Extracts a Single Abstract Method (SAM) as defined in Java Specification (4.3.2. The Class
* Object, 9.8 Functional Interfaces, 9.4.3 Interface Method Body) from given class.
*
* @param baseClass
* a class that is a FunctionalInterface to retrieve a SAM from
* @throws InvalidTypesException
* if the given class does not implement FunctionalInterface
* @return single abstract method of the given class
*/
public static Method getSingleAbstractMethod(Class<?> baseClass) {
if (!baseClass.isInterface())
{
throw new InvalidTypesException(("Given class: " + baseClass) + "is not a FunctionalInterface.");
}
Method
sam = null;
for (Method method : baseClass.getMethods()) {
if (Modifier.isAbstract(method.getModifiers())) {
if (sam == null) {
sam = method;
} else {
throw new InvalidTypesException(("Given class: " + baseClass) + " is not a FunctionalInterface. It has more than one abstract method.");
}
}
}
if (sam == null) {
throw new InvalidTypesException(("Given class: " + baseClass) + " is not a FunctionalInterface. It does not have any abstract methods.");
}
return sam;
} | 3.26 |
flink_TypeExtractionUtils_sameTypeVars_rdh | /**
* Checks whether two types are type variables describing the same.
*/
public static boolean sameTypeVars(Type t1, Type t2) {
return (((t1 instanceof TypeVariable) && (t2 instanceof TypeVariable)) && ((TypeVariable<?>) (t1)).getName().equals(((TypeVariable<?>) (t2)).getName())) && ((TypeVariable<?>) (t1)).getGenericDeclaration().equals(((TypeVariable<?>) (t2)).getGenericDeclaration());
} | 3.26 |
flink_TypeExtractionUtils_validateLambdaType_rdh | /**
* Checks whether the given type has the generic parameters declared in the class definition.
*
* @param t
* type to be validated
*/
public static void validateLambdaType(Class<?> baseClass, Type t) {
if (!(t
instanceof Class)) {
return;
}
final Class<?> clazz = ((Class<?>) (t));
if (clazz.getTypeParameters().length > 0) {
throw new InvalidTypesException((((((("The generic type parameters of '" + clazz.getSimpleName()) + "' are missing. ") + "In many cases lambda methods don't provide enough information for automatic type extraction when Java generics are involved. ") + "An easy workaround is to use an (anonymous) class instead that implements the '") + baseClass.getName()) + "' interface. ") + "Otherwise the type has to be specified explicitly using type information.");
}
} | 3.26 |
flink_TypeExtractionUtils_hasSuperclass_rdh | /**
* Returns true if the given class has a superclass of given name.
*
* @param clazz
* class to be analyzed
* @param superClassName
* class name of the super class
*/
public static boolean hasSuperclass(Class<?> clazz, String superClassName) {
List<Type> hierarchy = new ArrayList<>();
getTypeHierarchy(hierarchy, clazz, Object.class);
for (Type t
: hierarchy) {
if (isClassType(t) && typeToClass(t).getName().equals(superClassName)) {
return true;
}
}
return false;
} | 3.26 |
flink_TypeExtractionUtils_typeToClass_rdh | /**
* Convert ParameterizedType or Class to a Class.
*/
@SuppressWarnings("unchecked")
public static <T> Class<T> typeToClass(Type t) {
if (t instanceof Class) {
return ((Class<T>) (t));} else if (t instanceof ParameterizedType) {
return ((Class<T>) (((ParameterizedType) (t)).getRawType()));
}
throw new IllegalArgumentException("Cannot convert type to class");
} | 3.26 |
flink_TypeExtractionUtils_isClassType_rdh | /**
* Checks if a type can be converted to a Class. This is true for ParameterizedType and Class.
*/
public static boolean isClassType(Type t) {
return (t instanceof Class<?>) || (t instanceof ParameterizedType);
} | 3.26 |
flink_TypeExtractionUtils_extractTypeFromLambda_rdh | /**
* Extracts type from given index from lambda. It supports nested types.
*
* @param baseClass
* SAM function that the lambda implements
* @param exec
* lambda function to extract the type from
* @param lambdaTypeArgumentIndices
* position of type to extract in type hierarchy
* @param paramLen
* count of total parameters of the lambda (including closure parameters)
* @param baseParametersLen
* count of lambda interface parameters (without closure parameters)
* @return extracted type
*/
public static Type extractTypeFromLambda(Class<?> baseClass, LambdaExecutable exec, int[] lambdaTypeArgumentIndices, int paramLen, int baseParametersLen) {
Type output = exec.getParameterTypes()[(paramLen - baseParametersLen) + lambdaTypeArgumentIndices[0]];
for (int i = 1; i < lambdaTypeArgumentIndices.length; i++) {
validateLambdaType(baseClass, output);
output = extractTypeArgument(output, lambdaTypeArgumentIndices[i]);
}
validateLambdaType(baseClass, output);
return output;
} | 3.26 |
flink_WindowJoin_main_rdh | // *************************************************************************
// PROGRAM
// *************************************************************************
public static void main(String[] args) throws Exception {
// parse the parameters
final ParameterTool params = ParameterTool.fromArgs(args);
final long
windowSize = params.getLong("windowSize", 2000);
final long rate = params.getLong("rate", 3L);
final boolean fileOutput = params.has("output");
System.out.println((("Using windowSize=" + windowSize) + ", data rate=") + rate);
System.out.println("To customize example, use: WindowJoin [--windowSize <window-size-in-millis>] [--rate <elements-per-second>]");
// obtain execution environment, run this example in "ingestion time"
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
// make parameters available in the web interface
env.getConfig().setGlobalJobParameters(params);
// create the data sources for both grades and salaries
DataStream<Tuple2<String, Integer>> grades = GradeSource.getSource(env, rate).assignTimestampsAndWatermarks(IngestionTimeWatermarkStrategy.create());
DataStream<Tuple2<String, Integer>> salaries = SalarySource.getSource(env, rate).assignTimestampsAndWatermarks(IngestionTimeWatermarkStrategy.create());
// run the actual window join program
// for testability, this functionality is in a separate method.
DataStream<Tuple3<String, Integer, Integer>> v7 = runWindowJoin(grades, salaries, windowSize);
if (fileOutput)
{
v7.sinkTo(FileSink.<Tuple3<String, Integer, Integer>>forRowFormat(new Path(params.get("output")), new SimpleStringEncoder<>()).withRollingPolicy(DefaultRollingPolicy.builder().withMaxPartSize(MemorySize.ofMebiBytes(1)).withRolloverInterval(Duration.ofSeconds(10)).build()).build()).name("output");
} else {
// print the results with a single thread, rather than in parallel
v7.print().setParallelism(1);
}
// execute program
env.execute("Windowed Join Example");
} | 3.26 |
flink_NettyShuffleUtils_getNetworkBuffersPerInputChannel_rdh | /**
* Calculates and returns the number of required exclusive network buffers per input channel.
*/
public static int getNetworkBuffersPerInputChannel(final int configuredNetworkBuffersPerChannel) {
return configuredNetworkBuffersPerChannel;
} | 3.26 |
flink_NettyShuffleUtils_getMinMaxFloatingBuffersPerInputGate_rdh | /**
* Calculates and returns the floating network buffer pool size used by the input gate. The
* left/right value of the returned pair represent the min/max buffers require by the pool.
*/
public static Pair<Integer, Integer> getMinMaxFloatingBuffersPerInputGate(final int numFloatingBuffersPerGate) {
// We should guarantee at-least one floating buffer for local channel state recovery.
return Pair.of(1, numFloatingBuffersPerGate);
} | 3.26 |
flink_NettyShuffleUtils_getMinMaxNetworkBuffersPerResultPartition_rdh | /**
* Calculates and returns local network buffer pool size used by the result partition. The
* left/right value of the returned pair represent the min/max buffers require by the pool.
*/
public static Pair<Integer, Integer> getMinMaxNetworkBuffersPerResultPartition(final int configuredNetworkBuffersPerChannel, final int numFloatingBuffersPerGate, final int sortShuffleMinParallelism, final int sortShuffleMinBuffers, final int numSubpartitions, final boolean enableTieredStorage, final int tieredStoreExclusiveBuffers, final ResultPartitionType type) {
boolean isSortShuffle = type.isBlockingOrBlockingPersistentResultPartition() && (numSubpartitions >= sortShuffleMinParallelism);
int min;
if (isSortShuffle) {
min = sortShuffleMinBuffers;} else
{
min = (enableTieredStorage) ? Math.min(tieredStoreExclusiveBuffers, numSubpartitions + 1) : numSubpartitions + 1;
}
int max = (type.isBounded()) ? (numSubpartitions * configuredNetworkBuffersPerChannel) + numFloatingBuffersPerGate : isSortShuffle ? Math.max(min, 4 * numSubpartitions) : NetworkBufferPool.UNBOUNDED_POOL_SIZE;
// for each upstream hash-based blocking/pipelined subpartition, at least one buffer is
// needed even the configured network buffers per channel is 0 and this behavior is for
// performance. If it's not guaranteed that each subpartition can get at least one buffer,
// more partial buffers with little data will be outputted to network/disk and recycled to
// be used by other subpartitions which can not get a buffer for data caching.
return Pair.of(min, Math.max(min, max));
} | 3.26 |
flink_DuplicatingCheckpointOutputStream_closeAndGetSecondaryHandle_rdh | /**
* Returns the state handle from the {@link #secondaryOutputStream}. Also reports suppressed
* exceptions from earlier interactions with that stream.
*/
public StreamStateHandle closeAndGetSecondaryHandle() throws IOException
{
if (secondaryStreamException == null) {flushInternalBuffer();
return secondaryOutputStream.closeAndGetHandle();
} else {
throw new IOException("Secondary stream previously failed exceptionally", secondaryStreamException);
}
} | 3.26 |
flink_DuplicatingCheckpointOutputStream_closeAndGetPrimaryHandle_rdh | /**
* Returns the state handle from the {@link #primaryOutputStream}.
*/
public StreamStateHandle closeAndGetPrimaryHandle() throws IOException {
flushInternalBuffer();
return primaryOutputStream.closeAndGetHandle();
} | 3.26 |
flink_Vectorizer_setWriter_rdh | /**
* Users are not supposed to use this method since this is intended to be used only by the
* {@link OrcBulkWriter}.
*
* @param writer
* the underlying ORC Writer.
*/
public void setWriter(Writer writer) {
this.writer = writer;
} | 3.26 |
flink_Vectorizer_addUserMetadata_rdh | /**
* Adds arbitrary user metadata to the outgoing ORC file.
*
* <p>Users who want to dynamically add new metadata either based on either the input or from an
* external system can do so by calling <code>addUserMetadata(...)</code> inside the overridden
* vectorize() method.
*
* @param key
* a key to label the data with.
* @param value
* the contents of the metadata.
*/
public void addUserMetadata(String key, ByteBuffer value) {
this.writer.addUserMetadata(key, value);
} | 3.26 |
flink_NettyBufferPool_buffer_rdh | // ------------------------------------------------------------------------
// Delegate calls to the allocated and prohibit heap buffer allocations
// ------------------------------------------------------------------------
@Override
public ByteBuf buffer() {
return alloc.buffer();
} | 3.26 |
flink_SqlGatewayRestAPIDocGenerator_main_rdh | /**
* Generates the Sql Gateway REST API documentation.
*
* @param args
* args[0] contains the directory into which the generated files are placed
* @throws IOException
* if any file operation failed
*/
public static void main(String[] args) throws IOException, ConfigurationException {
String outputDirectory = args[0];
for (final SqlGatewayRestAPIVersion apiVersion : SqlGatewayRestAPIVersion.values()) {
if (apiVersion == SqlGatewayRestAPIVersion.V0) {
// this version exists only for testing purposes
continue;
}createHtmlFile(new DocumentingSqlGatewayRestEndpoint(), apiVersion, Paths.get(outputDirectory, ("rest_" + apiVersion.getURLVersionPrefix()) + "_sql_gateway.html"));
}
} | 3.26 |
flink_HiveTypeUtil_toFlinkType_rdh | /**
* Convert Hive data type to a Flink data type.
*
* @param hiveType
* a Hive data type
* @return the corresponding Flink data type
*/
public static DataType toFlinkType(TypeInfo hiveType) {
checkNotNull(hiveType, "hiveType cannot be null");
switch (hiveType.getCategory()) {
case PRIMITIVE :
return toFlinkPrimitiveType(((PrimitiveTypeInfo) (hiveType)));
case LIST :
ListTypeInfo listTypeInfo = ((ListTypeInfo) (hiveType));
return DataTypes.ARRAY(toFlinkType(listTypeInfo.getListElementTypeInfo()));
case MAP :MapTypeInfo mapTypeInfo = ((MapTypeInfo) (hiveType));
return
DataTypes.MAP(toFlinkType(mapTypeInfo.getMapKeyTypeInfo()), toFlinkType(mapTypeInfo.getMapValueTypeInfo()));
case STRUCT :
StructTypeInfo structTypeInfo = ((StructTypeInfo) (hiveType));List<String> names = structTypeInfo.getAllStructFieldNames();
List<TypeInfo> typeInfos = structTypeInfo.getAllStructFieldTypeInfos();
DataTypes[] fields =
new DataTypes.Field[names.size()];
for (int i = 0; i < fields.length; i++) {
fields[i] = DataTypes.FIELD(names.get(i), toFlinkType(typeInfos.get(i)));
}
return DataTypes.ROW(fields);
default :
throw new UnsupportedOperationException(String.format("Flink doesn't support Hive data type %s yet.", hiveType));}
} | 3.26 |
flink_HiveTypeUtil_toHiveTypeInfo_rdh | /**
* Convert Flink LogicalType to Hive TypeInfo. For types with a precision parameter, e.g.
* timestamp, the supported precisions in Hive and Flink can be different. Therefore the
* conversion will fail for those types if the precision is not supported by Hive and
* checkPrecision is true.
*
* @param logicalType
* a Flink LogicalType
* @param checkPrecision
* whether to fail the conversion if the precision of the LogicalType is
* not supported by Hive
* @return the corresponding Hive data type
*/
public static TypeInfo toHiveTypeInfo(LogicalType logicalType, boolean checkPrecision) {
checkNotNull(logicalType, "type cannot be null");
return logicalType.accept(new TypeInfoLogicalTypeVisitor(logicalType, checkPrecision));
} | 3.26 |
flink_HeapKeyedStateBackend_numKeyValueStateEntries_rdh | /**
* Returns the total number of state entries across all keys for the given namespace.
*/@VisibleForTesting
public int numKeyValueStateEntries(Object namespace) {
int sum = 0;
for (StateTable<?, ?, ?> state : f0.values()) {
sum += state.sizeOfNamespace(namespace);
}
return sum;
} | 3.26 |
flink_HeapKeyedStateBackend_create_rdh | // ------------------------------------------------------------------------
// state backend operations
// ------------------------------------------------------------------------
@Nonnull
@Override
public <T extends HeapPriorityQueueElement & PriorityComparable<? super T> & Keyed<?>> KeyGroupedInternalPriorityQueue<T> create(@Nonnull
String stateName, @Nonnull
TypeSerializer<T> byteOrderedElementSerializer) {
return priorityQueuesManager.createOrUpdate(stateName, byteOrderedElementSerializer);
} | 3.26 |
flink_LinkElement_link_rdh | /**
* Creates a link with a given url. This url will be used as a description for that link.
*
* @param link
* address that this link should point to
* @return link representation
*/
public static LinkElement link(String link) {
return new LinkElement(link, link);
} | 3.26 |
flink_TaskIOMetricGroup_getTaskInitializationDuration_rdh | /**
* Returns the duration of time required for a task's restoring/initialization, which reaches
* its maximum when the task begins running and remains constant throughout the task's running.
* Return 0 when the task is not in initialization/running status.
*/
@VisibleForTesting
public long getTaskInitializationDuration() {
if (taskInitializeTime == INVALID_TIMESTAMP) {
return 0L;
} else if (taskStartTime == INVALID_TIMESTAMP) {return clock.absoluteTimeMillis() - taskInitializeTime;} else {
return
taskStartTime - taskInitializeTime;
}
} | 3.26 |
flink_TaskIOMetricGroup_reuseRecordsInputCounter_rdh | // ============================================================================================
// Metric Reuse
// ============================================================================================
| 3.26 |
flink_TaskIOMetricGroup_getNumBytesInCounter_rdh | // ============================================================================================
// Getters
// ============================================================================================
public Counter getNumBytesInCounter() {
return numBytesIn;
} | 3.26 |
flink_MultisetTypeInfo_getElementTypeInfo_rdh | // ------------------------------------------------------------------------
// MultisetTypeInfo specific properties
// ------------------------------------------------------------------------
/**
* Gets the type information for the elements contained in the Multiset
*/
public TypeInformation<T> getElementTypeInfo() {
return getKeyTypeInfo();
} | 3.26 |
flink_Tuple21_copy_rdh | /**
* Shallow tuple copy.
*
* @return A new Tuple with the same fields as this.
*/
@Override
@SuppressWarnings("unchecked")
public Tuple21<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20> copy() {return new Tuple21<>(this.f0, this.f1, this.f2, this.f3, this.f4, this.f5, this.f6, this.f7,
this.f8, this.f9, this.f10, this.f11, this.f12, this.f13, this.f14, this.f15, this.f16, this.f17, this.f18, this.f19, this.f20);
} | 3.26 |
flink_Tuple21_setFields_rdh | /**
* Sets new values to all fields of the tuple.
*
* @param f0
* The value for field 0
* @param f1
* The value for field 1
* @param f2
* The value for field 2
* @param f3
* The value for field 3
* @param f4
* The value for field 4
* @param f5
* The value for field 5
* @param f6
* The value for field 6
* @param f7
* The value for field 7
* @param f8
* The value for field 8
* @param f9
* The value for field 9
* @param f10
* The value for field 10
* @param f11
* The value for field 11
* @param f12
* The value for field 12
* @param f13
* The value for field 13
* @param f14
* The value for field 14
* @param f15
* The value for field 15
* @param f16
* The value for field 16
* @param f17
* The value for field 17
* @param f18
* The value for field 18
* @param f19
* The value for field 19
* @param f20
* The value for field 20
*/
public void setFields(T0 f0, T1 f1, T2 f2, T3 f3, T4 f4, T5 f5, T6 f6, T7 f7, T8 f8, T9 f9, T10 f10, T11 f11, T12 f12,
T13 f13, T14 f14, T15 f15, T16 f16, T17 f17, T18 f18, T19 f19, T20 f20) {
this.f0 = f0;
this.f1 = f1;
this.f2 = f2;
this.f3 = f3;
this.f4 = f4;
this.f5 = f5;
this.f6 = f6;
this.f7 = f7;
this.f8 = f8;
this.f9 = f9;
this.f10 = f10;
this.f11 = f11;
this.f12 = f12;this.f13 = f13;
this.f14 =
f14;
this.f15 = f15;
this.f16 = f16;
this.f17 = f17;
this.f18 = f18;
this.f19 = f19;
this.f20 = f20;
} | 3.26 |
flink_Tuple21_toString_rdh | // -------------------------------------------------------------------------------------------------
// standard utilities
// -------------------------------------------------------------------------------------------------
/**
* Creates a string representation of the tuple in the form (f0, f1, f2, f3, f4, f5, f6, f7, f8,
* f9, f10, f11, f12, f13, f14, f15, f16, f17, f18, f19, f20), where the individual fields are
* the value returned by calling {@link Object#toString} on that field.
*
* @return The string representation of the tuple.
*/@Override
public String toString() {
return ((((((((((((((((((((((((((((((((((((((((("(" + StringUtils.arrayAwareToString(this.f0)) + ",") + StringUtils.arrayAwareToString(this.f1)) + ",") + StringUtils.arrayAwareToString(this.f2)) + ",") + StringUtils.arrayAwareToString(this.f3)) + ",") + StringUtils.arrayAwareToString(this.f4)) + ",") + StringUtils.arrayAwareToString(this.f5)) + ",") + StringUtils.arrayAwareToString(this.f6)) + ",") + StringUtils.arrayAwareToString(this.f7)) + ",") + StringUtils.arrayAwareToString(this.f8)) + ",") + StringUtils.arrayAwareToString(this.f9)) + ",") + StringUtils.arrayAwareToString(this.f10)) + ",") + StringUtils.arrayAwareToString(this.f11)) + ",") + StringUtils.arrayAwareToString(this.f12)) + ",") + StringUtils.arrayAwareToString(this.f13)) + ",") + StringUtils.arrayAwareToString(this.f14))
+ ",") + StringUtils.arrayAwareToString(this.f15)) + ",") +
StringUtils.arrayAwareToString(this.f16)) + ",") + StringUtils.arrayAwareToString(this.f17)) + ",") + StringUtils.arrayAwareToString(this.f18)) + ",") + StringUtils.arrayAwareToString(this.f19)) + ",") + StringUtils.arrayAwareToString(this.f20)) + ")";
} | 3.26 |
flink_Tuple21_of_rdh | /**
* Creates a new tuple and assigns the given values to the tuple's fields. This is more
* convenient than using the constructor, because the compiler can infer the generic type
* arguments implicitly. For example: {@code Tuple3.of(n, x, s)} instead of {@code new
* Tuple3<Integer, Double, String>(n, x, s)}
*/
public static <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20> Tuple21<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10,
T11, T12, T13, T14, T15, T16, T17, T18, T19, T20> of(T0 f0, T1 f1, T2 f2, T3 f3, T4 f4, T5 f5, T6 f6, T7 f7, T8 f8, T9 f9, T10 f10, T11 f11, T12 f12, T13 f13, T14 f14, T15 f15, T16 f16, T17 f17, T18 f18, T19 f19, T20 f20) {
return
new Tuple21<>(f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11, f12, f13, f14, f15, f16, f17, f18, f19, f20);
} | 3.26 |
flink_WindowOperatorBuilder_withInputCountIndex_rdh | /**
* The index of COUNT(*) in the aggregates. -1 when the input doesn't * contain COUNT(*), i.e.
* doesn't contain retraction messages. We make sure there is a * COUNT(*) if input stream
* contains retraction.
*/
public WindowOperatorBuilder withInputCountIndex(int inputCountIndex) {
this.inputCountIndex = inputCountIndex;
return this;
} | 3.26 |
flink_BlockInfo_setAccumulatedRecordCount_rdh | /**
* Sets the accumulatedRecordCount to the specified value.
*
* @param accumulatedRecordCount
* the accumulatedRecordCount to set
*/
public void setAccumulatedRecordCount(long accumulatedRecordCount) {
this.accumulatedRecordCount = accumulatedRecordCount;
} | 3.26 |
flink_BlockInfo_getAccumulatedRecordCount_rdh | /**
* Returns the accumulated record count.
*
* @return the accumulated record count
*/public long getAccumulatedRecordCount() {
return this.accumulatedRecordCount;
} | 3.26 |
flink_BlockInfo_setRecordCount_rdh | /**
* Sets the recordCount to the specified value.
*
* @param recordCount
* the recordCount to set
*/
public void setRecordCount(long recordCount) {
this.recordCount = recordCount;
} | 3.26 |
flink_BlockInfo_setFirstRecordStart_rdh | /**
* Sets the firstRecordStart to the specified value.
*
* @param firstRecordStart
* the firstRecordStart to set
*/
public void setFirstRecordStart(long
firstRecordStart) {
this.firstRecordStart =
firstRecordStart;
} | 3.26 |
flink_DispatcherResourceManagerComponent_m0_rdh | /**
* Deregister the Flink application from the resource management system by signalling the {@link ResourceManager} and also stop the process.
*
* @param applicationStatus
* to terminate the application with
* @param diagnostics
* additional information about the shut down, can be {@code null}
* @return Future which is completed once the shut down
*/
public CompletableFuture<Void> m0(final ApplicationStatus applicationStatus, @Nullable
final String diagnostics) {
return internalShutdown(() -> resourceManagerService.deregisterApplication(applicationStatus,
diagnostics));
} | 3.26 |
flink_RecordProcessorUtils_getRecordProcessor1_rdh | /**
* Get record processor for the first input of {@link TwoInputStreamOperator}, which will omit
* call of {@link StreamOperator#setKeyContextElement1} if it doesn't have key context.
*
* @param operator
* the {@link TwoInputStreamOperator}
* @return the record processor
*/
public static <T> ThrowingConsumer<StreamRecord<T>, Exception> getRecordProcessor1(TwoInputStreamOperator<T, ?, ?> operator) {
boolean canOmitSetKeyContext;
if (operator instanceof AbstractStreamOperator) {
canOmitSetKeyContext = canOmitSetKeyContext(((AbstractStreamOperator<?>) (operator)), 0);
} else {
canOmitSetKeyContext = (operator instanceof KeyContextHandler) && (!((KeyContextHandler) (operator)).hasKeyContext1());
}
if (canOmitSetKeyContext) {
return operator::processElement1;
} else {
return record -> {
operator.setKeyContextElement1(record);
operator.processElement1(record);
};
}
} | 3.26 |
flink_RecordProcessorUtils_getRecordProcessor2_rdh | /**
* Get record processor for the second input of {@link TwoInputStreamOperator}, which will omit
* call of {@link StreamOperator#setKeyContextElement2} if it doesn't have key context.
*
* @param operator
* the {@link TwoInputStreamOperator}
* @return the record processor
*/
public static <T> ThrowingConsumer<StreamRecord<T>, Exception> getRecordProcessor2(TwoInputStreamOperator<?, T, ?> operator) {
boolean canOmitSetKeyContext;
if
(operator instanceof AbstractStreamOperator) {
canOmitSetKeyContext = canOmitSetKeyContext(((AbstractStreamOperator<?>) (operator)), 1);
} else {
canOmitSetKeyContext = (operator instanceof KeyContextHandler) && (!((KeyContextHandler) (operator)).hasKeyContext2());
}
if (canOmitSetKeyContext) {
return operator::processElement2;
} else {
return record -> {
operator.setKeyContextElement2(record);
operator.processElement2(record);
};
}} | 3.26 |
flink_FileSink_build_rdh | /**
* Creates the actual sink.
*/
public FileSink<IN> build() {
return new FileSink<>(this);
} | 3.26 |
flink_AbstractPythonEnvironmentManager_constructEnvironmentVariables_rdh | /**
* Constructs the environment variables which is used to launch the python UDF worker.
*
* @return The environment variables which contain the paths of the python dependencies.
*/
@VisibleForTesting
public Map<String, String> constructEnvironmentVariables(String baseDirectory) throws IOException {
Map<String, String> env = new HashMap<>(this.systemEnv);
constructFilesDirectory(env, baseDirectory);
if (dependencyInfo.getPythonPath().isPresent()) {
appendToPythonPath(env, Collections.singletonList(dependencyInfo.getPythonPath().get()));
}
LOG.info("PYTHONPATH of python worker: {}", env.get("PYTHONPATH"));constructRequirementsDirectory(env, baseDirectory);
constructArchivesDirectory(env, baseDirectory);
// set BOOT_LOG_DIR.
env.put("BOOT_LOG_DIR", baseDirectory);
// disable the launching of gateway server to prevent from this dead loop:
// launch UDF worker -> import udf -> import job code
// ^ | (If the job code is not enclosed in a
// | | if name == 'main' statement)
// | V
// execute job in local mode <- launch gateway server and submit job to local executor
env.put(PYFLINK_GATEWAY_DISABLED,
"true");
// set the path of python interpreter, it will be used to execute the udf worker.
env.put("python", dependencyInfo.getPythonExec());
LOG.info("Python interpreter path: {}", dependencyInfo.getPythonExec());
return env;
} | 3.26 |
flink_StreamGraphUtils_configureBufferTimeout_rdh | /**
* Configure a stream node's buffer timeout according to the given transformation.
*
* @param streamGraph
* The StreamGraph the node belongs to
* @param nodeId
* The node's id
* @param transformation
* A given transformation
* @param defaultBufferTimeout
* The default buffer timeout value
*/
public static <T> void configureBufferTimeout(StreamGraph streamGraph, int nodeId, Transformation<T> transformation, long defaultBufferTimeout) {
if (transformation.getBufferTimeout() >= 0) {
streamGraph.setBufferTimeout(nodeId, transformation.getBufferTimeout());
} else {
streamGraph.setBufferTimeout(nodeId, defaultBufferTimeout);
}
} | 3.26 |
flink_StreamGraphUtils_validateTransformationUid_rdh | /**
* Throw {@link IllegalStateException} if the {@link PhysicalTransformation}'s uid or hash is
* not set when auto generate uid is disabled.
*
* @param streamGraph
* The given graph that the transformation is added to
* @param transformation
* The transformation needed to validate
*/
public static void validateTransformationUid(StreamGraph
streamGraph, Transformation<?> transformation) {
if (!streamGraph.getExecutionConfig().hasAutoGeneratedUIDsEnabled()) {
if (((transformation instanceof PhysicalTransformation) && (transformation.getUserProvidedNodeHash() == null)) && (transformation.getUid() == null)) {
throw new
IllegalStateException(("Auto generated UIDs have been disabled " + "but no UID or hash has been assigned to operator ") + transformation.getName());
}
}
} | 3.26 |
flink_StringifiedAccumulatorResult_stringifyAccumulatorResults_rdh | // ------------------------------------------------------------------------
// Utilities
// ------------------------------------------------------------------------
/**
* Flatten a map of accumulator names to Accumulator instances into an array of
* StringifiedAccumulatorResult values.
*/
public static StringifiedAccumulatorResult[] stringifyAccumulatorResults(Map<String, OptionalFailure<Accumulator<?, ?>>> accs) {
if ((accs == null) || accs.isEmpty()) {
return new StringifiedAccumulatorResult[0];
} else {
StringifiedAccumulatorResult[] results = new StringifiedAccumulatorResult[accs.size()];
int i = 0;
for (Map.Entry<String, OptionalFailure<Accumulator<?, ?>>> entry : accs.entrySet()) {
results[i++] = stringifyAccumulatorResult(entry.getKey(), entry.getValue());
}
return results;
}
} | 3.26 |
flink_WindowsGrouping_buildTriggerWindowElementsIterator_rdh | /**
*
* @return the iterator of the next triggerable window's elements.
*/
public RowIterator<BinaryRowData> buildTriggerWindowElementsIterator() {
currentWindow = nextWindow;
// It is illegal to call this method after [[hasTriggerWindow()]] has returned `false`.
Preconditions.checkState((watermark == Long.MIN_VALUE) || (nextWindow != null), "next trigger window cannot be null.");
if (nextWindow.getEnd() > watermark) {
throw new IllegalStateException("invalid window triggered " + currentWindow);
}
// advance in the stride of slideSize for hasTriggerWindow
nextWindow = TimeWindow.of(currentWindow.getStart() + slideSize, (currentWindow.getStart() + slideSize) + windowSize);
// build trigger window elements' iterator
emptyWindowTriggered = true;
onBufferEvict(triggerWindowStartIndex);
return new WindowsElementsIterator(newBufferIterator(triggerWindowStartIndex));
} | 3.26 |
flink_WindowsGrouping_reset_rdh | /**
* Reset for next group.
*/
public void reset() {
nextWindow = null;
watermark = Long.MIN_VALUE;
triggerWindowStartIndex = 0;
emptyWindowTriggered = true;
resetBuffer();
} | 3.26 |
flink_WindowsGrouping_hasTriggerWindow_rdh | /**
* Check if there are windows could be triggered according to the current watermark.
*
* @return true when there are windows to be triggered. It is designed to be idempotent.
*/
public boolean hasTriggerWindow() {
skipEmptyWindow();
Preconditions.checkState((watermark
== Long.MIN_VALUE) || (nextWindow != null), "next trigger window cannot be null.");
return (nextWindow != null) && (nextWindow.getEnd() <= watermark);
} | 3.26 |
flink_WindowsGrouping_getTriggerWindow_rdh | /**
*
* @return the last triggered window.
*/
public TimeWindow getTriggerWindow() {
return currentWindow;
} | 3.26 |
flink_ResettableExternalBuffer_close_rdh | /**
* Delete all files and release the memory.
*/
@Override
public void close() {
clearChannels();
inMemoryBuffer.close();
pool.close();
} | 3.26 |
flink_ResettableExternalBuffer_upperBound_rdh | // Find the index of the first element which is strictly greater than `goal` in `list`.
// `list` must be sorted.
// If every element in `list` is not larger than `goal`, return `list.size()`.
private int upperBound(int goal, List<Integer> list) {
if (list.size() == 0) {
return 0;
}
if (list.get(list.size() - 1) <= goal) {
return list.size();
}// Binary search
int head = 0;
int tail =
list.size() - 1;
int mid;
while (head < tail) {
mid = (head + tail) / 2;
if (list.get(mid) <= goal) {
head = mid + 1;
} else {
tail = mid;}
}
return head;} | 3.26 |
flink_BasicArrayTypeInfo_getInfoFor_rdh | // --------------------------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@PublicEvolving
public static <X, C> BasicArrayTypeInfo<X, C> getInfoFor(Class<X> type) {
if (!type.isArray()) {
throw new InvalidTypesException("The given class is no array.");}
// basic type arrays
return ((BasicArrayTypeInfo<X, C>) (TYPES.get(type)));
} | 3.26 |
flink_AscendingTimestampExtractor_extractTimestamp_rdh | // ------------------------------------------------------------------------
@Override public final long extractTimestamp(T element, long elementPrevTimestamp) {
final long newTimestamp = extractAscendingTimestamp(element);
if (newTimestamp >= this.currentTimestamp) {
this.currentTimestamp = newTimestamp;
return newTimestamp;
} else {
violationHandler.m0(newTimestamp, this.currentTimestamp);
return newTimestamp;
}
} | 3.26 |
flink_AscendingTimestampExtractor_withViolationHandler_rdh | /**
* Sets the handler for violations to the ascending timestamp order.
*
* @param handler
* The violation handler to use.
* @return This extractor.
*/
public AscendingTimestampExtractor<T> withViolationHandler(MonotonyViolationHandler handler) {
this.violationHandler = requireNonNull(handler);
return this;
} | 3.26 |
flink_ChannelStateWriter_start_rdh | /**
* No-op implementation of {@link ChannelStateWriter}.
*/class NoOpChannelStateWriter implements ChannelStateWriter {
@Override
public void start(long checkpointId, CheckpointOptions checkpointOptions) {
} | 3.26 |
flink_BooleanConditions_trueFunction_rdh | /**
*
* @return An {@link IterativeCondition} that always returns {@code true}.
*/public static <T> IterativeCondition<T> trueFunction()
{
return SimpleCondition.of(value -> true);
} | 3.26 |
flink_BooleanConditions_falseFunction_rdh | /**
*
* @return An {@link IterativeCondition} that always returns {@code false}.
*/
public static <T> IterativeCondition<T> falseFunction() {
return SimpleCondition.of(value -> false);
} | 3.26 |
flink_PerJobMiniClusterFactory_m0_rdh | /**
* Starts a {@link MiniCluster} and submits a job.
*/
public CompletableFuture<JobClient> m0(JobGraph jobGraph, ClassLoader userCodeClassloader) throws Exception {
MiniClusterConfiguration miniClusterConfig = getMiniClusterConfig(jobGraph.getMaximumParallelism());
MiniCluster miniCluster =
miniClusterFactory.apply(miniClusterConfig);miniCluster.start();
return miniCluster.submitJob(jobGraph).thenApplyAsync(FunctionUtils.uncheckedFunction(submissionResult -> {
org.apache.flink.client.ClientUtils.waitUntilJobInitializationFinished(() -> miniCluster.getJobStatus(submissionResult.getJobID()).get(), () -> miniCluster.requestJobResult(submissionResult.getJobID()).get(), userCodeClassloader);
return submissionResult;
})).thenApply(result -> new MiniClusterJobClient(result.getJobID(), miniCluster, userCodeClassloader, MiniClusterJobClient.JobFinalizationBehavior.SHUTDOWN_CLUSTER)).whenComplete((ignored, throwable) -> {if (throwable != null) {
// We failed to create the JobClient and must shutdown to ensure
// cleanup.
shutDownCluster(miniCluster);
}
}).thenApply(Function.identity());
} | 3.26 |
flink_CompletedCheckpoint_registerSharedStatesAfterRestored_rdh | // ------------------------------------------------------------------------
// Shared State
// ------------------------------------------------------------------------
/**
* Register all shared states in the given registry. This method is called before the checkpoint
* is added into the store.
*
* @param sharedStateRegistry
* The registry where shared states are registered
* @param restoreMode
* the mode in which this checkpoint was restored from
*/
public void registerSharedStatesAfterRestored(SharedStateRegistry sharedStateRegistry, RestoreMode restoreMode) {
// in claim mode we should not register any shared handles
if (!props.isUnclaimed()) {
sharedStateRegistry.registerAllAfterRestored(this, restoreMode);
}
} | 3.26 |
flink_CompletedCheckpoint_checkpointsMatch_rdh | // ------------------------------------------------------------------------
// Miscellaneous
// ------------------------------------------------------------------------
public static boolean checkpointsMatch(Collection<CompletedCheckpoint> first, Collection<CompletedCheckpoint> second) {
if (first.size() != second.size()) {return false;
}
List<Tuple2<Long, JobID>> firstInterestingFields = new ArrayList<>(first.size());
for (CompletedCheckpoint checkpoint : first) {
firstInterestingFields.add(new Tuple2<>(checkpoint.getCheckpointID(), checkpoint.getJobId()));
}
List<Tuple2<Long, JobID>> secondInterestingFields = new ArrayList<>(second.size());
for (CompletedCheckpoint checkpoint : second) {
secondInterestingFields.add(new Tuple2<>(checkpoint.getCheckpointID(), checkpoint.getJobId()));
}
return firstInterestingFields.equals(secondInterestingFields);
} | 3.26 |
flink_CompletedCheckpoint_getJobId_rdh | // ------------------------------------------------------------------------
// Properties
// ------------------------------------------------------------------------
public JobID getJobId() {
return job;
} | 3.26 |
flink_CompletedCheckpoint_markAsDiscarded_rdh | // ------------------------------------------------------------------------
// Discard and Dispose
// ------------------------------------------------------------------------
public DiscardObject markAsDiscarded() {
if (completedCheckpointStats != null) {
completedCheckpointStats.discard();
}
return new CompletedCheckpointDiscardObject();
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.