name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
flink_SharedBuffer_removeEvent_rdh
|
/**
* Removes an event from cache and state.
*
* @param eventId
* id of the event
*/
void removeEvent(EventId eventId) throws Exception {
this.eventsBufferCache.invalidate(eventId);
this.eventsBuffer.remove(eventId);
}
| 3.26 |
flink_SharedBuffer_upsertEntry_rdh
|
/**
* Inserts or updates a shareBufferNode in cache.
*
* @param nodeId
* id of the event
* @param entry
* SharedBufferNode
*/
void upsertEntry(NodeId nodeId, Lockable<SharedBufferNode> entry) {
this.entryCache.put(nodeId, entry);
}
| 3.26 |
flink_SharedBuffer_getEntry_rdh
|
/**
* It always returns node either from state or cache.
*
* @param nodeId
* id of the node
* @return SharedBufferNode
*/
Lockable<SharedBufferNode> getEntry(NodeId nodeId) {
try {
Lockable<SharedBufferNode> lockableFromCache = entryCache.getIfPresent(nodeId);
if (Objects.nonNull(lockableFromCache)) {
return lockableFromCache;
} else {
Lockable<SharedBufferNode>
lockableFromState = entries.get(nodeId);
if (Objects.nonNull(lockableFromState)) {
entryCache.put(nodeId, lockableFromState);
}
return lockableFromState;}
} catch (Exception ex) {
throw new WrappingRuntimeException(ex);}
}
| 3.26 |
flink_SharedBuffer_isEmpty_rdh
|
/**
* Checks if there is no elements in the buffer.
*
* @return true if there is no elements in the buffer
* @throws Exception
* Thrown if the system cannot access the state.
*/
public boolean isEmpty() throws Exception {return Iterables.isEmpty(eventsBufferCache.asMap().keySet()) && Iterables.isEmpty(eventsBuffer.keys());
}
| 3.26 |
flink_SharedBuffer_upsertEvent_rdh
|
/**
* Inserts or updates an event in cache.
*
* @param eventId
* id of the event
* @param event
* event body
*/
void upsertEvent(EventId eventId, Lockable<V> event) {
this.eventsBufferCache.put(eventId, event);
}
| 3.26 |
flink_SharedBuffer_removeEntry_rdh
|
/**
* Removes a ShareBufferNode from cache and state.
*
* @param nodeId
* id of the event
*/
void removeEntry(NodeId nodeId) throws Exception {
this.entryCache.invalidate(nodeId);
this.entries.remove(nodeId);
}
| 3.26 |
flink_TieredStorageResourceRegistry_clearResourceFor_rdh
|
/**
* Remove all resources for the given owner.
*
* @param owner
* identifier of the data that the resources correspond to.
*/
public void clearResourceFor(TieredStorageDataIdentifier owner) {
List<TieredStorageResource> cleanersForOwner = registeredResources.remove(owner);
if (cleanersForOwner != null) {
cleanersForOwner.forEach(TieredStorageResource::release);}
}
| 3.26 |
flink_TieredStorageResourceRegistry_registerResource_rdh
|
/**
* Register a new resource for the given owner.
*
* @param owner
* identifier of the data that the resource corresponds to.
* @param tieredStorageResource
* the tiered storage resources to be registered.
*/
public void registerResource(TieredStorageDataIdentifier owner, TieredStorageResource tieredStorageResource) {
registeredResources.computeIfAbsent(owner, ignore -> new ArrayList<>()).add(tieredStorageResource);
}
| 3.26 |
flink_MapSerializer_isImmutableType_rdh
|
// ------------------------------------------------------------------------
// Type Serializer implementation
// ------------------------------------------------------------------------
@Override
public boolean isImmutableType() {
return false;
}
| 3.26 |
flink_MapSerializer_snapshotConfiguration_rdh
|
// --------------------------------------------------------------------------------------------
@Override
public TypeSerializerSnapshot<Map<K, V>> snapshotConfiguration() {
return new MapSerializerSnapshot<>(this);
}
| 3.26 |
flink_MapSerializer_getKeySerializer_rdh
|
// ------------------------------------------------------------------------
// MapSerializer specific properties
// ------------------------------------------------------------------------
public TypeSerializer<K> getKeySerializer() {
return keySerializer;}
| 3.26 |
flink_RateLimitedSourceReader_start_rdh
|
// ------------------------------------------------------------------------
@Override
public void start() {
sourceReader.start();
}
| 3.26 |
flink_TypeInferenceOperandInference_inferOperandTypesOrError_rdh
|
// --------------------------------------------------------------------------------------------
private void inferOperandTypesOrError(FlinkTypeFactory typeFactory, CallContext callContext, RelDataType[] operandTypes) {
final List<DataType> expectedDataTypes;
// typed arguments have highest priority
if (typeInference.getTypedArguments().isPresent()) {
expectedDataTypes = typeInference.getTypedArguments().get();
} else {
expectedDataTypes = typeInference.getInputTypeStrategy().inferInputTypes(callContext, false).orElse(null);
}
// early out for invalid input
if ((expectedDataTypes == null) || (expectedDataTypes.size() != operandTypes.length)) {
return;
}
for (int i
= 0; i < operandTypes.length; i++)
{
final
LogicalType inferredType = expectedDataTypes.get(i).getLogicalType();
operandTypes[i] = typeFactory.createFieldTypeFromLogicalType(inferredType);}
}
| 3.26 |
flink_LookupCallContext_getKey_rdh
|
// --------------------------------------------------------------------------------------------
private LookupKey getKey(int pos) {
final int index = lookupKeyOrder[pos];
return lookupKeys.get(index);
}
| 3.26 |
flink_JobID_generate_rdh
|
// ------------------------------------------------------------------------
// Static factory methods
// ------------------------------------------------------------------------
/**
* Creates a new (statistically) random JobID.
*
* @return A new random JobID.
*/
public static JobID generate() {
return new JobID();
}
| 3.26 |
flink_JobID_fromHexString_rdh
|
/**
* Parses a JobID from the given string.
*
* @param hexString
* string representation of a JobID
* @return Parsed JobID
* @throws IllegalArgumentException
* if the JobID could not be parsed from the given string
*/
public static JobID
fromHexString(String hexString) {
try {
return new JobID(StringUtils.hexStringToByte(hexString));
} catch (Exception e) {
throw new IllegalArgumentException((("Cannot parse JobID from \"" + hexString) + "\". The expected format is ") + "[0-9a-fA-F]{32}, e.g. fd72014d4c864993a2e5a9287b4a9c5d.", e);
} }
| 3.26 |
flink_JobID_fromByteArray_rdh
|
/**
* Creates a new JobID from the given byte sequence. The byte sequence must be exactly 16 bytes
* long. The first eight bytes make up the lower part of the ID, while the next 8 bytes make up
* the upper part of the ID.
*
* @param bytes
* The byte sequence.
* @return A new JobID corresponding to the ID encoded in the bytes.
*/
public static JobID fromByteArray(byte[] bytes) {
return new JobID(bytes);
}
| 3.26 |
flink_Table_limit_rdh
|
/**
* Limits a (possibly sorted) result to the first n rows from an offset position.
*
* <p>This method is a synonym for {@link #offset(int)} followed by {@link #fetch(int)}.
*/default Table limit(int offset, int fetch) {
return offset(offset).fetch(fetch);
}
| 3.26 |
flink_AbstractMapSerializer_isImmutableType_rdh
|
// ------------------------------------------------------------------------
// Type Serializer implementation
// ------------------------------------------------------------------------
@Override
public boolean isImmutableType() {
return
false;
}
| 3.26 |
flink_AbstractMapSerializer_getKeySerializer_rdh
|
// ------------------------------------------------------------------------
/**
* Returns the serializer for the keys in the map.
*
* @return The serializer for the keys in the map.
*/
public TypeSerializer<K> getKeySerializer()
{
return keySerializer;
}
| 3.26 |
flink_TupleTypeInfoBase_getFieldTypes_rdh
|
/**
* Returns the field types.
*/
public TypeInformation<?>[] getFieldTypes() {
return types;
}
| 3.26 |
flink_LogicalTypeUtils_toRowType_rdh
|
/**
* Converts any logical type to a row type. Composite types are converted to a row type. Atomic
* types are wrapped into a field.
*/
public static RowType toRowType(LogicalType t) {
switch (t.getTypeRoot()) {
case ROW :
return ((RowType) (t));
case STRUCTURED_TYPE :
final StructuredType structuredType = ((StructuredType) (t));
final List<RowField> fields = structuredType.getAttributes().stream().map(attribute -> new RowField(attribute.getName(), attribute.getType(), attribute.getDescription().orElse(null))).collect(Collectors.toList());
return new RowType(structuredType.isNullable(), fields);
case DISTINCT_TYPE :
return toRowType(((DistinctType) (t)).getSourceType());
default :
return RowType.of(t);
}
}
| 3.26 |
flink_LogicalTypeUtils_renameRowFields_rdh
|
/**
* Renames the fields of the given {@link RowType}.
*/
public static RowType renameRowFields(RowType rowType, List<String> newFieldNames) {
Preconditions.checkArgument(rowType.getFieldCount() == newFieldNames.size(), "Row length and new names must match.");
final List<RowField> newFields = IntStream.range(0, rowType.getFieldCount()).mapToObj(pos -> {
final RowField oldField = rowType.getFields().get(pos);
return new RowField(newFieldNames.get(pos), oldField.getType(), oldField.getDescription().orElse(null));
}).collect(Collectors.toList());
return new RowType(rowType.isNullable(), newFields);
}
| 3.26 |
flink_LogicalTypeUtils_toInternalConversionClass_rdh
|
/**
* Returns the conversion class for the given {@link LogicalType} that is used by the table
* runtime as internal data structure.
*
* @see RowData
*/
public static Class<?> toInternalConversionClass(LogicalType type) {
// ordered by type root definition
switch
(type.getTypeRoot()) {
case CHAR :
case VARCHAR :
return StringData.class;
case BOOLEAN :
return Boolean.class;case BINARY :
case VARBINARY :
return byte[].class;
case DECIMAL :
return DecimalData.class;
case TINYINT :
return Byte.class;
case SMALLINT :
return Short.class;
case INTEGER :
case DATE :
case TIME_WITHOUT_TIME_ZONE :
case INTERVAL_YEAR_MONTH :
return Integer.class;
case BIGINT :
case INTERVAL_DAY_TIME :
return Long.class;
case FLOAT :
return Float.class;
case DOUBLE :
return Double.class;
case TIMESTAMP_WITHOUT_TIME_ZONE :
case TIMESTAMP_WITH_LOCAL_TIME_ZONE :
return TimestampData.class;
case TIMESTAMP_WITH_TIME_ZONE :
throw new UnsupportedOperationException("Unsupported type: " + type);case ARRAY :
return ArrayData.class;
case MULTISET :
case MAP :
return MapData.class;
case ROW :
case STRUCTURED_TYPE :
return RowData.class;
case DISTINCT_TYPE :
return toInternalConversionClass(((DistinctType) (type)).getSourceType());
case RAW :
return RawValueData.class;
case NULL :
return Object.class;
case SYMBOL :
case UNRESOLVED :
default :
throw new IllegalArgumentException("Illegal type: " + type);
}
}
| 3.26 |
flink_LogicalTypeUtils_m1_rdh
|
/**
* Returns a unique name for an atomic type.
*/
public static String m1(List<String> existingNames) {
int i = 0;
String v3 = ATOMIC_FIELD_NAME;
while ((null != existingNames) && existingNames.contains(v3)) {
v3 = (ATOMIC_FIELD_NAME + "_") + (i++);
}
return v3;
}
| 3.26 |
flink_SubpartitionDiskCacheManager_removeAllBuffers_rdh
|
/**
* Note that allBuffers can be touched by multiple threads.
*/
List<Tuple2<Buffer, Integer>> removeAllBuffers() {
synchronized(allBuffers) {List<Tuple2<Buffer, Integer>> targetBuffers = new ArrayList<>(allBuffers);
allBuffers.clear();
return targetBuffers;
}
}
| 3.26 |
flink_SubpartitionDiskCacheManager_startSegment_rdh
|
// ------------------------------------------------------------------------
// Called by DiskCacheManager
// ------------------------------------------------------------------------
void startSegment(int segmentId) {
synchronized(allBuffers) {
this.segmentId
= segmentId;
}
}
| 3.26 |
flink_SubpartitionDiskCacheManager_addBuffer_rdh
|
/**
* This method is only called by the task thread.
*/
private void addBuffer(Buffer buffer) {synchronized(allBuffers) {allBuffers.add(new Tuple2<>(buffer, bufferIndex));
}
bufferIndex++;}
| 3.26 |
flink_HighAvailabilityServicesUtils_getWebMonitorAddress_rdh
|
/**
* Get address of web monitor from configuration.
*
* @param configuration
* Configuration contains those for WebMonitor.
* @param resolution
* Whether to try address resolution of the given hostname or not. This allows
* to fail fast in case that the hostname cannot be resolved.
* @return Address of WebMonitor.
*/
public static String getWebMonitorAddress(Configuration configuration,
AddressResolution resolution) throws UnknownHostException {
final
String address = checkNotNull(configuration.getString(RestOptions.ADDRESS), "%s must be set", RestOptions.ADDRESS.key());
if (resolution
== AddressResolution.TRY_ADDRESS_RESOLUTION) {// Fail fast if the hostname cannot be resolved
// noinspection ResultOfMethodCallIgnored
InetAddress.getByName(address);
}
final int port = configuration.getInteger(RestOptions.PORT);
final boolean enableSSL = SecurityOptions.isRestSSLEnabled(configuration);
final String protocol = (enableSSL) ? "https://" : "http://";return String.format("%s%s:%s", protocol, address, port);
}
| 3.26 |
flink_HighAvailabilityServicesUtils_createAvailableOrEmbeddedServices_rdh
|
/**
* Utils class to instantiate {@link HighAvailabilityServices} implementations.
*/
public class HighAvailabilityServicesUtils {public static HighAvailabilityServices createAvailableOrEmbeddedServices(Configuration config, Executor executor, FatalErrorHandler fatalErrorHandler) throws Exception {
HighAvailabilityMode highAvailabilityMode =
HighAvailabilityMode.fromConfig(config);
switch (highAvailabilityMode) {
case NONE :
return new EmbeddedHaServices(executor);
case ZOOKEEPER
:
return createZooKeeperHaServices(config, executor, fatalErrorHandler);
case KUBERNETES :
return createCustomHAServices("org.apache.flink.kubernetes.highavailability.KubernetesHaServicesFactory", config, executor);
case FACTORY_CLASS :
return createCustomHAServices(config, executor);
default :
throw new Exception(("High availability mode " + highAvailabilityMode) + " is not supported.");
}
| 3.26 |
flink_HighAvailabilityServicesUtils_getJobManagerAddress_rdh
|
/**
* Returns the JobManager's hostname and port extracted from the given {@link Configuration}.
*
* @param configuration
* Configuration to extract the JobManager's address from
* @return The JobManager's hostname and port
* @throws ConfigurationException
* if the JobManager's address cannot be extracted from the
* configuration
*/
public static Tuple2<String, Integer> getJobManagerAddress(Configuration
configuration) throws ConfigurationException {
final String hostname = configuration.getString(JobManagerOptions.ADDRESS);
final int port = configuration.getInteger(JobManagerOptions.PORT);
if (hostname == null) { throw new ConfigurationException(("Config parameter '" + JobManagerOptions.ADDRESS) + "' is missing (hostname/address of JobManager to connect to).");
}
if ((port <= 0) || (port >= 65536)) {
throw new ConfigurationException(((("Invalid value for '" + JobManagerOptions.PORT) + "' (port of the JobManager actor system) : ") + port) + ". it must be greater than 0 and less than 65536.");
}
return Tuple2.of(hostname, port);
}
| 3.26 |
flink_HighAvailabilityServicesUtils_getClusterHighAvailableStoragePath_rdh
|
/**
* Gets the cluster high available storage path from the provided configuration.
*
* <p>The format is {@code HA_STORAGE_PATH/HA_CLUSTER_ID}.
*
* @param configuration
* containing the configuration values
* @return Path under which all highly available cluster artifacts are being stored
*/
public static Path getClusterHighAvailableStoragePath(Configuration configuration) {
final String storagePath = configuration.getValue(HighAvailabilityOptions.HA_STORAGE_PATH);
if (isNullOrWhitespaceOnly(storagePath)) {
throw
new IllegalConfigurationException("Configuration is missing the mandatory parameter: " + HighAvailabilityOptions.HA_STORAGE_PATH);
}
final Path path;
try {path = new Path(storagePath);
} catch (Exception e) {
throw new IllegalConfigurationException(("Invalid path for highly available storage (" + HighAvailabilityOptions.HA_STORAGE_PATH.key()) + ')', e);
}
final String clusterId = configuration.getValue(HighAvailabilityOptions.HA_CLUSTER_ID);
final Path clusterStoragePath;
try {
clusterStoragePath = new Path(path, clusterId);
} catch (Exception e) {
throw new IllegalConfigurationException(String.format("Cannot create cluster high available storage path '%s/%s'. This indicates that an invalid cluster id (%s) has been specified.", storagePath, clusterId, HighAvailabilityOptions.HA_CLUSTER_ID.key()), e); }
return clusterStoragePath;
}
| 3.26 |
flink_KerberosUtils_getKrb5LoginModuleName_rdh
|
/* Return the Kerberos login module name */
public static String getKrb5LoginModuleName() {
return System.getProperty("java.vendor").contains("IBM") ? "com.ibm.security.auth.module.Krb5LoginModule" : "com.sun.security.auth.module.Krb5LoginModule";
}
| 3.26 |
flink_RequestStatusOverview_readResolve_rdh
|
/**
* Preserve the singleton property by returning the singleton instance
*/
private Object readResolve() {
return INSTANCE;
}
| 3.26 |
flink_RequestStatusOverview_hashCode_rdh
|
// ------------------------------------------------------------------------
@Override
public int hashCode() {
return RequestStatusOverview.class.hashCode();
}
| 3.26 |
flink_StreamElementSerializer_snapshotConfiguration_rdh
|
// --------------------------------------------------------------------------------------------
// Serializer configuration snapshotting & compatibility
//
// This serializer may be used by Flink internal operators that need to checkpoint
// buffered records. Therefore, it may be part of managed state and need to implement
// the configuration snapshot and compatibility methods.
// --------------------------------------------------------------------------------------------
@Override
public StreamElementSerializerSnapshot<T> snapshotConfiguration() {
return new StreamElementSerializerSnapshot<>(this);
}
| 3.26 |
flink_StreamElementSerializer_isImmutableType_rdh
|
// ------------------------------------------------------------------------
// Utilities
// ------------------------------------------------------------------------
@Override
public boolean isImmutableType() {
return false;
}
| 3.26 |
flink_StreamElementSerializer_equals_rdh
|
// ------------------------------------------------------------------------
// Utilities
// ------------------------------------------------------------------------
@Override
public boolean equals(Object obj) {
if (obj instanceof StreamElementSerializer) {
StreamElementSerializer<?> other = ((StreamElementSerializer<?>) (obj));
return typeSerializer.equals(other.typeSerializer);
} else {
return false;
}
}
| 3.26 |
flink_StreamElementSerializer_createInstance_rdh
|
// ------------------------------------------------------------------------
// Utilities
// ------------------------------------------------------------------------
@Override
public StreamRecord<T> createInstance() {
return new StreamRecord<T>(typeSerializer.createInstance());
}
| 3.26 |
flink_OperatorCoordinatorCheckpointContext_notifyCheckpointAborted_rdh
|
/**
* We override the method here to remove the checked exception. Please check the Java docs of
* {@link CheckpointListener#notifyCheckpointAborted(long)} for more detail semantic of the
* method.
*/
@Override
default void notifyCheckpointAborted(long checkpointId) {
}
| 3.26 |
flink_CastRuleProvider_cast_rdh
|
/**
* Create a {@link CastExecutor} and execute the cast on the provided {@code value}. Fails with
* {@link IllegalArgumentException} if the rule cannot be resolved, or with an exception from
* the {@link CastExecutor} itself if the rule can fail.
*/
@SuppressWarnings("unchecked")
@Nullable
public static Object cast(CastRule.Context context, LogicalType inputLogicalType, LogicalType targetLogicalType, Object value) {
CastExecutor<Object, Object> castExecutor = ((CastExecutor<Object, Object>) (CastRuleProvider.create(context, inputLogicalType, targetLogicalType)));
if (castExecutor == null) {
throw new NullPointerException((("Unsupported casting from " + inputLogicalType) + " to ") + targetLogicalType);
}
return castExecutor.cast(value);
}
| 3.26 |
flink_CastRuleProvider_exists_rdh
|
/**
* Returns {@code true} if and only if a {@link CastRule} can be resolved for the provided input
* type and target type.
*/
public static boolean exists(LogicalType inputType, LogicalType targetType) {
return resolve(inputType, targetType) != null;
}
| 3.26 |
flink_CastRuleProvider_resolve_rdh
|
/* ------- Entrypoint ------- */
/**
* Resolve a {@link CastRule} for the provided input type and target type. Returns {@code null}
* if no rule can be resolved.
*/
@Nullable
public static CastRule<?, ?> resolve(LogicalType inputType, LogicalType targetType) {
return INSTANCE.internalResolve(inputType, targetType);}
| 3.26 |
flink_CastRuleProvider_canFail_rdh
|
/**
* Resolves the rule and returns the result of {@link CastRule#canFail(LogicalType,
* LogicalType)}. Fails with {@link NullPointerException} if the rule cannot be resolved.
*/
public static boolean canFail(LogicalType inputType, LogicalType targetType) {
return Preconditions.checkNotNull(resolve(inputType, targetType), "Cast rule cannot be resolved").canFail(inputType, targetType);
}
| 3.26 |
flink_CastRuleProvider_create_rdh
|
/**
* Create a {@link CastExecutor} for the provided input type and target type. Returns {@code null} if no rule can be resolved.
*
* @see CastRule#create(CastRule.Context, LogicalType, LogicalType)
*/
@Nullable
public static CastExecutor<?, ?> create(CastRule.Context context, LogicalType inputLogicalType, LogicalType
targetLogicalType) {
CastRule<?, ?> rule = INSTANCE.internalResolve(inputLogicalType, targetLogicalType);
if (rule == null) {
return null;
}
return rule.create(context, inputLogicalType, targetLogicalType);
}
/**
* Create a {@link CastCodeBlock} for the provided input type and target type. Returns {@code null} if no rule can be resolved or the resolved rule is not instance of {@link CodeGeneratorCastRule}
| 3.26 |
flink_CastRuleProvider_generateAlwaysNonNullCodeBlock_rdh
|
/**
* This method wraps {@link #generateCodeBlock(CodeGeneratorCastRule.Context, String, String,
* LogicalType, LogicalType)}, but adding the assumption that the inputTerm is always non-null.
* Used by {@link CodeGeneratorCastRule}s which checks for nullability, rather than deferring
* the check to the rules.
*/
static CastCodeBlock generateAlwaysNonNullCodeBlock(CodeGeneratorCastRule.Context context, String inputTerm, LogicalType inputLogicalType, LogicalType targetLogicalType) {
if (inputLogicalType instanceof NullType) {
return generateCodeBlock(context, inputTerm, "true", inputLogicalType, targetLogicalType);
}
return generateCodeBlock(context, inputTerm, "false", inputLogicalType.copy(false), targetLogicalType);
}
| 3.26 |
flink_EncodingUtils_escapeJava_rdh
|
// --------------------------------------------------------------------------------------------
// Java String Escaping
//
// copied from o.a.commons.lang.StringEscapeUtils (commons-lang:2.4)
// but without escaping forward slashes.
// --------------------------------------------------------------------------------------------
/**
* Escapes the characters in a <code>String</code> using Java String rules.
*
* <p>Deals correctly with quotes and control-chars (tab, backslash, cr, ff, etc.)
*
* <p>So a tab becomes the characters <code>'\\'</code> and <code>'t'</code>.
*
* <p>The only difference between Java strings and JavaScript strings is that in JavaScript, a
* single quote must be escaped.
*
* <p>Example:
*
* <pre>
* input string: He didn't say, "Stop!"
* output string: He didn't say, \"Stop!\"
* </pre>
*
* @param str
* String to escape values in, may be null
* @return String with escaped values, <code>null</code> if null string input
*/
public static String escapeJava(String
str) {
return escapeJavaStyleString(str, false);
}
| 3.26 |
flink_EncodingUtils_repeat_rdh
|
/**
* Returns padding using the specified delimiter repeated to a given length.
*
* <pre>
* StringUtils.repeat('e', 0) = ""
* StringUtils.repeat('e', 3) = "eee"
* StringUtils.repeat('e', -2) = ""
* </pre>
*
* <p>Note: this method doesn't not support padding with <a
* href="http://www.unicode.org/glossary/#supplementary_character">Unicode Supplementary
* Characters</a> as they require a pair of {@code char}s to be represented. If you are needing
* to support full I18N of your applications consider using {@link #repeat(String, int)}
* instead.
*
* @param ch
* character to repeat
* @param repeat
* number of times to repeat char, negative treated as zero
* @return String with repeated character
* @see #repeat(String, int)
*/
public static String repeat(final char ch, final int repeat) {
final char[] buf = new char[repeat];
for (int i = repeat - 1; i >= 0; i--) {
buf[i] = ch;}
return new String(buf);
}
| 3.26 |
flink_EncodingUtils_decodeHex_rdh
|
/**
* Converts an array of characters representing hexadecimal values into an array of bytes of
* those same values. The returned array will be half the length of the passed array, as it
* takes two characters to represent any given byte. An exception is thrown if the passed char
* array has an odd number of elements.
*
* <p>Copied from
* https://github.com/apache/commons-codec/blob/master/src/main/java/org/apache/commons/codec/binary/Hex.java.
*
* @param str
* An array of characters containing hexadecimal digits
* @return A byte array to contain the binary data decoded from the supplied char array.
* @throws TableException
* Thrown if an odd number of characters or illegal characters are
* supplied
*/
public static byte[] decodeHex(final String str) throws TableException {
final int len = str.length();
if ((len & 0x1) != 0) {
throw new TableException("Odd number of characters.");
}
final int outLen = len >> 1;
final byte[] out = new byte[outLen];
// two characters form the hex value.
for (int i = 0,
j = 0; j <
len; i++) {
int f = toDigit(str.charAt(j), j) << 4;
j++;
f = f | toDigit(str.charAt(j), j);
j++;
out[i] = ((byte) (f & 0xff));}
return out;
}
| 3.26 |
flink_EncodingUtils_toDigit_rdh
|
/**
* Converts a hexadecimal character to an integer.
*
* <p>Copied from
* https://github.com/apache/commons-codec/blob/master/src/main/java/org/apache/commons/codec/binary/Hex.java.
*
* @param ch
* A character to convert to an integer digit
* @param idx
* The index of the character in the source
* @return An integer
* @throws TableException
* Thrown if ch is an illegal hex character
*/
private static int toDigit(final char ch, final int idx) throws
TableException {
final int digit = Character.digit(ch, 16);
if (digit == (-1)) {
throw new TableException(((("Illegal hexadecimal character: [" + ch) + "] at index: [") + idx) + "]");
}
return digit;
}
| 3.26 |
flink_StringData_fromString_rdh
|
// ------------------------------------------------------------------------------------------
// Construction Utilities
// ------------------------------------------------------------------------------------------
/**
* Creates an instance of {@link StringData} from the given {@link String}.
*/
static StringData fromString(String str) {
return BinaryStringData.fromString(str);
}
| 3.26 |
flink_EmptyFieldsCountAccumulator_updateResultVector_rdh
|
/**
* Increases the result vector component at the specified position by the specified delta.
*/
private void updateResultVector(int
position, int delta) {
// inflate the vector to contain the given position
while (this.resultVector.size() <= position) {
this.resultVector.add(0);
}
// increment the component value
final int component = this.resultVector.get(position);
this.resultVector.set(position, component + delta);
}
| 3.26 |
flink_EmptyFieldsCountAccumulator_add_rdh
|
/**
* Increases the result vector component at the specified position by 1.
*/
@Override
public void add(Integer position) {
updateResultVector(position, 1);
}
| 3.26 |
flink_EmptyFieldsCountAccumulator_getDataSet_rdh
|
// UTIL METHODS
// *************************************************************************
@SuppressWarnings("unchecked")
private static DataSet<StringTriple> getDataSet(ExecutionEnvironment env,
ParameterTool params) {
if (params.has("input")) {
return env.readCsvFile(params.get("input")).fieldDelimiter(";").pojoType(EmptyFieldsCountAccumulator.StringTriple.class);
} else {
System.out.println("Executing EmptyFieldsCountAccumulator example with default input data set.");
System.out.println("Use --input to specify file input.");
return env.fromCollection(getExampleInputTuples());
}
}
| 3.26 |
flink_AvroInputFormat_getProducedType_rdh
|
// --------------------------------------------------------------------------------------------
// Typing
// --------------------------------------------------------------------------------------------
@Override
public TypeInformation<E> getProducedType() {
return TypeExtractor.getForClass(this.avroValueType);
}
| 3.26 |
flink_AvroInputFormat_getCurrentState_rdh
|
// --------------------------------------------------------------------------------------------
// Checkpointing
// --------------------------------------------------------------------------------------------
@Override
public Tuple2<Long, Long> getCurrentState() throws IOException {
return new Tuple2<>(this.lastSync, this.recordsReadSinceLastSync);
}
| 3.26 |
flink_AvroInputFormat_setReuseAvroValue_rdh
|
/**
* Sets the flag whether to reuse the Avro value instance for all records. By default, the input
* format reuses the Avro value.
*
* @param reuseAvroValue
* True, if the input format should reuse the Avro value instance, false
* otherwise.
*/
public void setReuseAvroValue(boolean reuseAvroValue) {
this.reuseAvroValue = reuseAvroValue;
}
| 3.26 |
flink_AvroInputFormat_open_rdh
|
// --------------------------------------------------------------------------------------------
// Input Format Methods
// --------------------------------------------------------------------------------------------
@Override
public void open(FileInputSplit split) throws IOException {
super.open(split);
f0 = initReader(split);
f0.sync(split.getStart());lastSync = f0.previousSync();
}
| 3.26 |
flink_AvroInputFormat_setUnsplittable_rdh
|
/**
* If set, the InputFormat will only read entire files.
*/
public void setUnsplittable(boolean unsplittable) {
this.unsplittable = unsplittable;
}
| 3.26 |
flink_TestingSplitEnumeratorContext_metricGroup_rdh
|
// ------------------------------------------------------------------------
// SplitEnumeratorContext methods
// ------------------------------------------------------------------------
@Override
public SplitEnumeratorMetricGroup metricGroup() {
return UnregisteredMetricsGroup.createSplitEnumeratorMetricGroup();
}
| 3.26 |
flink_TestingSplitEnumeratorContext_triggerAllActions_rdh
|
// ------------------------------------------------------------------------
// access to events / properties / execution
// ------------------------------------------------------------------------
public void triggerAllActions() {
executor.triggerPeriodicScheduledTasks();
executor.triggerAll(); }
| 3.26 |
flink_DynamicSinkUtils_convertExternalToRel_rdh
|
/**
* Converts an external sink (i.e. further {@link DataStream} transformations) to a {@link RelNode}.
*/
public static RelNode convertExternalToRel(FlinkRelBuilder relBuilder, RelNode input, ExternalModifyOperation externalModifyOperation) {
final DynamicTableSink tableSink = new ExternalDynamicSink(externalModifyOperation.getChangelogMode().orElse(null), externalModifyOperation.getPhysicalDataType());return // targetColumns
convertSinkToRel(relBuilder, input, Collections.emptyMap(), externalModifyOperation.getContextResolvedTable(), Collections.emptyMap(), null, false, tableSink);
}
| 3.26 |
flink_DynamicSinkUtils_addExtraMetaCols_rdh
|
/**
* Add extra meta columns for underlying table scan, return a new resolve schema after adding
* extra meta columns.
*/
private static ResolvedSchema addExtraMetaCols(LogicalTableModify tableModify,
LogicalTableScan tableScan, String tableDebugName, List<MetadataColumn> metadataColumns, FlinkTypeFactory
typeFactory) {
final TableSourceTable
sourceTable = tableScan.getTable().unwrap(TableSourceTable.class);
DynamicTableSource dynamicTableSource = sourceTable.tableSource();
// get old schema and new schema after add some cols
ResolvedSchema oldSchema = sourceTable.contextResolvedTable().getResolvedSchema();
List<Column> newColumns = new ArrayList<>(oldSchema.getColumns());
newColumns.addAll(metadataColumns);
// get the new resolved schema after adding extra meta columns
ResolvedSchema resolvedSchema = ResolvedSchema.of(newColumns);
List<RelDataTypeField> oldFields = sourceTable.getRowType().getFieldList();
List<RelDataTypeField> newFields = new ArrayList<>(sourceTable.getRowType().getFieldList());
for (int i = 0; i < metadataColumns.size(); i++) {
MetadataColumn column = metadataColumns.get(i);
// add a new field
newFields.add(new RelDataTypeFieldImpl(column.getName(), oldFields.size() + i, typeFactory.createFieldTypeFromLogicalType(column.getDataType().getLogicalType())));
}
// create a copy for TableSourceTable with new resolved schema
TableSourceTable newTableSourceTab = sourceTable.copy(dynamicTableSource, sourceTable.contextResolvedTable().copy(resolvedSchema), new RelRecordType(StructKind.FULLY_QUALIFIED, newFields, false), sourceTable.abilitySpecs());
// create a copy for table scan with new TableSourceTable
LogicalTableScan newTableScan = new LogicalTableScan(tableScan.getCluster(), tableScan.getTraitSet(), tableScan.getHints(), newTableSourceTab);
Project project = ((Project) (tableModify.getInput()));
// replace with the new table scan
if (project.getInput() instanceof LogicalFilter) {
LogicalFilter logicalFilter = ((LogicalFilter) (project.getInput()));
project.replaceInput(0, logicalFilter.copy(logicalFilter.getTraitSet(), newTableScan, logicalFilter.getCondition()));
} else {
project.replaceInput(0, newTableScan);
}
// validate and apply metadata
// TODO FLINK-33083 we should not ignore the produced abilities but actually put those into
// the table scan
DynamicSourceUtils.validateAndApplyMetadata(tableDebugName, resolvedSchema, newTableSourceTab.tableSource(), new ArrayList<>());
return
resolvedSchema;
}
| 3.26 |
flink_DynamicSinkUtils_convertSinkToRel_rdh
|
/**
* Converts a given {@link DynamicTableSink} to a {@link RelNode}. It adds helper projections if
* necessary.
*/
public static RelNode convertSinkToRel(FlinkRelBuilder relBuilder, RelNode input, SinkModifyOperation sinkModifyOperation, DynamicTableSink sink) {
return
convertSinkToRel(relBuilder,
input, sinkModifyOperation.getDynamicOptions(), sinkModifyOperation.getContextResolvedTable(), sinkModifyOperation.getStaticPartitions(), sinkModifyOperation.getTargetColumns(), sinkModifyOperation.isOverwrite(), sink);
}
| 3.26 |
flink_DynamicSinkUtils_prepareDynamicSink_rdh
|
/**
* Prepares the given {@link DynamicTableSink}. It check whether the sink is compatible with the
* INSERT INTO clause and applies initial parameters.
*/
private static void prepareDynamicSink(String tableDebugName, Map<String, String> staticPartitions, boolean isOverwrite, DynamicTableSink sink, ResolvedCatalogTable table, List<SinkAbilitySpec> sinkAbilitySpecs) {
validatePartitioning(tableDebugName, staticPartitions, sink, table.getPartitionKeys());
validateAndApplyOverwrite(tableDebugName, isOverwrite, sink, sinkAbilitySpecs);
validateAndApplyMetadata(tableDebugName, sink, table.getResolvedSchema(), sinkAbilitySpecs);
}
| 3.26 |
flink_DynamicSinkUtils_convertPredicateToNegative_rdh
|
/**
* Convert the predicate in WHERE clause to the negative predicate.
*/
private static void convertPredicateToNegative(LogicalTableModify tableModify) {
RexBuilder rexBuilder = tableModify.getCluster().getRexBuilder();
RelNode input = tableModify.getInput();
LogicalFilter newFilter;
// if the input is a table scan, there's no predicate which means it's always true
// the negative predicate should be false
if (input.getInput(0) instanceof LogicalTableScan) {
newFilter = LogicalFilter.create(input.getInput(0), rexBuilder.makeLiteral(false));
} else {
LogicalFilter
filter = ((LogicalFilter) (input.getInput(0)));
// create a filter with negative predicate
RexNode complementFilter =
rexBuilder.makeCall(filter.getCondition().getType(), FlinkSqlOperatorTable.NOT, Collections.singletonList(filter.getCondition()));
newFilter = filter.copy(filter.getTraitSet(), filter.getInput(), complementFilter);
}
// replace with the new filter
input.replaceInput(0, newFilter);
}
| 3.26 |
flink_DynamicSinkUtils_getPhysicalColumnIndices_rdh
|
/**
* Return the indices from {@param colIndexes} that belong to physical column.
*/private static int[] getPhysicalColumnIndices(List<Integer> colIndexes, ResolvedSchema schema) {
return colIndexes.stream().filter(i -> schema.getColumns().get(i).isPhysical()).mapToInt(i -> i).toArray();
}
| 3.26 |
flink_DynamicSinkUtils_createRequiredMetadataColumns_rdh
|
/**
* Returns a list of required metadata columns. Ordered by the iteration order of {@link SupportsWritingMetadata#listWritableMetadata()}.
*
* <p>This method assumes that sink and schema have been validated via {@link #prepareDynamicSink}.
*/
private static List<MetadataColumn> createRequiredMetadataColumns(ResolvedSchema schema, DynamicTableSink sink) {
final List<Column> tableColumns = schema.getColumns();
final List<Integer> metadataColumns = extractPersistedMetadataColumns(schema);
Map<String, MetadataColumn> metadataKeysToMetadataColumns = new HashMap<>();
for (Integer columnIndex : metadataColumns) {
MetadataColumn metadataColumn = ((MetadataColumn) (tableColumns.get(columnIndex)));
String metadataKey = metadataColumn.getMetadataKey().orElse(metadataColumn.getName());
// After resolving, every metadata column has the unique metadata key.
metadataKeysToMetadataColumns.put(metadataKey, metadataColumn);
}
final Map<String, DataType> metadataMap = extractMetadataMap(sink);
return metadataMap.keySet().stream().filter(metadataKeysToMetadataColumns::containsKey).map(metadataKeysToMetadataColumns::get).collect(Collectors.toList());
}
| 3.26 |
flink_DynamicSinkUtils_validateSchemaAndApplyImplicitCast_rdh
|
/**
* Checks if the given query can be written into the given sink type.
*
* <p>It checks whether field types are compatible (types should be equal including precisions).
* If types are not compatible, but can be implicitly cast, a cast projection will be applied.
* Otherwise, an exception will be thrown.
*/
private static RelNode validateSchemaAndApplyImplicitCast(RelNode query, RowType sinkType, String tableDebugName, FlinkTypeFactory typeFactory) {
final RowType queryType = FlinkTypeFactory.toLogicalRowType(query.getRowType());
final List<RowField> queryFields = queryType.getFields();
final List<RowField> sinkFields = sinkType.getFields();
if (queryFields.size() != sinkFields.size()) {
throw createSchemaMismatchException("Different number of columns.", tableDebugName, queryFields, sinkFields);
}
boolean requiresCasting = false;
for (int i = 0; i < sinkFields.size(); i++) {
final LogicalType queryColumnType = queryFields.get(i).getType();
final LogicalType sinkColumnType = sinkFields.get(i).getType();
if (!supportsImplicitCast(queryColumnType,
sinkColumnType)) {
throw createSchemaMismatchException(String.format("Incompatible types for sink column '%s' at position %s.", sinkFields.get(i).getName(), i), tableDebugName, queryFields, sinkFields);
}if (!supportsAvoidingCast(queryColumnType, sinkColumnType)) {requiresCasting = true;
}
}
if (requiresCasting) {
final RelDataType castRelDataType = typeFactory.buildRelNodeRowType(sinkType);
return RelOptUtil.createCastRel(query, castRelDataType,
true);
}
return query;
}
| 3.26 |
flink_DynamicSinkUtils_createConsumedType_rdh
|
/**
* Returns the {@link DataType} that a sink should consume as the output from the runtime.
*
* <p>The format looks as follows: {@code PHYSICAL COLUMNS + PERSISTED METADATA COLUMNS}
*/
private static RowType createConsumedType(ResolvedSchema schema, DynamicTableSink sink) {
final Map<String, DataType> metadataMap = extractMetadataMap(sink);
final Stream<RowField> physicalFields = schema.getColumns().stream().filter(Column::isPhysical).map(c -> new RowField(c.getName(), c.getDataType().getLogicalType()));
final Stream<RowField> metadataFields = createRequiredMetadataColumns(schema, sink).stream().map(column -> // Use alias to ensures that physical and metadata
// columns don't collide.
new RowField(column.getName(), metadataMap.get(column.getMetadataKey().orElse(column.getName())).getLogicalType()));
final List<RowField> rowFields = Stream.concat(physicalFields, metadataFields).collect(Collectors.toList());
return new RowType(false, rowFields);
}
| 3.26 |
flink_DynamicSinkUtils_convertCollectToRel_rdh
|
/**
* Converts an {@link TableResult#collect()} sink to a {@link RelNode}.
*/
public static RelNode convertCollectToRel(FlinkRelBuilder relBuilder, RelNode input, CollectModifyOperation collectModifyOperation, ReadableConfig configuration, ClassLoader classLoader) {
final DataTypeFactory dataTypeFactory = unwrapContext(relBuilder).getCatalogManager().getDataTypeFactory();
final ResolvedSchema childSchema = collectModifyOperation.getChild().getResolvedSchema();
final ResolvedSchema schema = ResolvedSchema.physical(childSchema.getColumnNames(), childSchema.getColumnDataTypes());
final ResolvedCatalogTable catalogTable = new ResolvedCatalogTable(new ExternalCatalogTable(Schema.newBuilder().fromResolvedSchema(schema).build()), schema);
final ContextResolvedTable contextResolvedTable = ContextResolvedTable.anonymous("collect", catalogTable); final DataType consumedDataType = fixCollectDataType(dataTypeFactory, schema);
final String zone = configuration.get(TableConfigOptions.LOCAL_TIME_ZONE);
final ZoneId zoneId = (TableConfigOptions.LOCAL_TIME_ZONE.defaultValue().equals(zone)) ? ZoneId.systemDefault() : ZoneId.of(zone);final CollectDynamicSink tableSink = new CollectDynamicSink(contextResolvedTable.getIdentifier(), consumedDataType, configuration.get(CollectSinkOperatorFactory.MAX_BATCH_SIZE), configuration.get(CollectSinkOperatorFactory.SOCKET_TIMEOUT), classLoader, zoneId, configuration.get(ExecutionConfigOptions.TABLE_EXEC_LEGACY_CAST_BEHAVIOUR).isEnabled());
collectModifyOperation.setSelectResultProvider(tableSink.getSelectResultProvider());collectModifyOperation.setConsumedDataType(consumedDataType);
return // dynamicOptions
// staticPartitions
// targetColumns
convertSinkToRel(relBuilder, input, Collections.emptyMap(), contextResolvedTable, Collections.emptyMap(), null, false, tableSink);
}
| 3.26 |
flink_DynamicSinkUtils_fixCollectDataType_rdh
|
// --------------------------------------------------------------------------------------------
/**
* Temporary solution until we drop legacy types.
*/
private static DataType fixCollectDataType(DataTypeFactory dataTypeFactory, ResolvedSchema schema) {
final DataType fixedDataType = DataTypeUtils.transform(dataTypeFactory, schema.toSourceRowDataType(), TypeTransformations.legacyRawToTypeInfoRaw(),
TypeTransformations.legacyToNonLegacy());
// TODO erase the conversion class earlier when dropping legacy code, esp. FLINK-22321
return TypeConversions.fromLogicalToDataType(fixedDataType.getLogicalType());
}
| 3.26 |
flink_DynamicSinkUtils_projectColumnsForUpdate_rdh
|
// create a project only select the required column or expression for update
private static RelNode projectColumnsForUpdate(LogicalTableModify tableModify, int originColsCount, ResolvedSchema resolvedSchema, List<Integer> updatedIndexes, SupportsRowLevelUpdate.RowLevelUpdateMode updateMode, String tableDebugName, DataTypeFactory dataTypeFactory, FlinkTypeFactory typeFactory) {
RexBuilder rexBuilder = tableModify.getCluster().getRexBuilder();
// the updated columns, whose order is same to user's update clause
List<String> updatedColumnNames =
tableModify.getUpdateColumnList();
List<RexNode> newRexNodeList = new ArrayList<>();
List<String> newFieldNames = new ArrayList<>();
List<DataType> updateTargetDataTypes = new ArrayList<>();
Project project = ((Project) (tableModify.getInput()));
LogicalFilter filter =
null;
// if the update mode is all rows, we need to know the filter to rewrite
// the update expression to IF(filter, updated_expr, col_expr)
if ((updateMode == RowLevelUpdateMode.ALL_ROWS) && (project.getInput() instanceof LogicalFilter)) {
filter = ((LogicalFilter) (project.getInput()));
}
// the rex nodes for the project are like: index for all col, update expressions for the
// updated columns
List<RexNode> oldRexNodes = project.getProjects();
for (int index : updatedIndexes) {
String colName = resolvedSchema.getColumnNames().get(index);
// if the updated cols contain the col to be selected, the updated expression should
// be in the project node
if (updatedColumnNames.contains(colName)) {
// get the index of the updated column in all updated columns
int i = updatedColumnNames.indexOf(colName);
// get the update expression
RexNode rexNode = oldRexNodes.get(originColsCount + i);
if (filter != null) {
rexNode = rexBuilder.makeCall(FlinkSqlOperatorTable.IF, Arrays.asList(filter.getCondition(), rexNode, rexBuilder.makeInputRef(project.getInput(), index)));
}
newRexNodeList.add(rexNode);
} else {
newRexNodeList.add(rexBuilder.makeInputRef(project.getInput(), index));
}
newFieldNames.add(colName);
updateTargetDataTypes.add(resolvedSchema.getColumnDataTypes().get(index));}
project = // if filter is not null, we need to remove the filter in the plan since we
// have rewritten the expression to IF(filter, updated_expr, col_expr)
project.copy(project.getTraitSet(), filter !=
null ? filter.getInput() : project.getInput(), newRexNodeList, RexUtil.createStructType(typeFactory, newRexNodeList, newFieldNames, null));
return validateSchemaAndApplyImplicitCast(project, updateTargetDataTypes, tableDebugName, dataTypeFactory, typeFactory);
}
| 3.26 |
flink_StreamingRuntimeContext_isCheckpointingEnabled_rdh
|
// ------------------ expose (read only) relevant information from the stream config -------- //
/**
* Returns true if checkpointing is enabled for the running job.
*
* @return true if checkpointing is enabled.
*/
public boolean isCheckpointingEnabled() {
return streamConfig.isCheckpointingEnabled();
}
| 3.26 |
flink_StreamingRuntimeContext_m0_rdh
|
/**
* Returns the task manager runtime info of the task manager running this stream task.
*
* @return The task manager runtime info.
*/
public TaskManagerRuntimeInfo m0() {
return taskEnvironment.getTaskManagerInfo();
}
| 3.26 |
flink_StreamingRuntimeContext_hasBroadcastVariable_rdh
|
// ------------------------------------------------------------------------
// broadcast variables
// ------------------------------------------------------------------------
@Override
public boolean hasBroadcastVariable(String name) {
throw new UnsupportedOperationException("Broadcast variables can only be used in DataSet programs");
}
| 3.26 |
flink_StreamingRuntimeContext_getState_rdh
|
// ------------------------------------------------------------------------
// key/value state
// ------------------------------------------------------------------------
@Override
public <T> ValueState<T> getState(ValueStateDescriptor<T> stateProperties) {
KeyedStateStore keyedStateStore = checkPreconditionsAndGetKeyedStateStore(stateProperties);stateProperties.initializeSerializerUnlessSet(getExecutionConfig());
return keyedStateStore.getState(stateProperties);
}
| 3.26 |
flink_StreamingRuntimeContext_getInputSplitProvider_rdh
|
// ------------------------------------------------------------------------
/**
* Returns the input split provider associated with the operator.
*
* @return The input split provider.
*/
public InputSplitProvider getInputSplitProvider() {
return taskEnvironment.getInputSplitProvider();
}
| 3.26 |
flink_WritableSerializer_ensureInstanceInstantiated_rdh
|
// --------------------------------------------------------------------------------------------
private void ensureInstanceInstantiated() {
if (copyInstance == null) {
copyInstance = createInstance();
}}
| 3.26 |
flink_WritableSerializer_snapshotConfiguration_rdh
|
// --------------------------------------------------------------------------------------------
// Serializer configuration snapshotting & compatibility
// --------------------------------------------------------------------------------------------
@Override
public TypeSerializerSnapshot<T> snapshotConfiguration() {
return new WritableSerializerSnapshot<>(typeClass);
}
| 3.26 |
flink_WritableSerializer_hashCode_rdh
|
// --------------------------------------------------------------------------------------------
@Override
public int hashCode() {
return this.typeClass.hashCode();
}
| 3.26 |
flink_HiveTableUtil_createSchema_rdh
|
/**
* Create a Flink's Schema from Hive table's columns and partition keys.
*/
public static Schema createSchema(List<FieldSchema> nonPartCols, List<FieldSchema> partitionKeys, Set<String> notNullColumns, @Nullable
UniqueConstraint
primaryKey) {
Tuple2<String[], DataType[]> columnInformation = getColumnInformation(nonPartCols, partitionKeys, notNullColumns, primaryKey);
Builder builder = Schema.newBuilder().fromFields(columnInformation.f0, columnInformation.f1);
if (primaryKey != null) {
builder.primaryKeyNamed(primaryKey.getName(), primaryKey.getColumns().toArray(new String[0]));
}
return builder.build();
}
| 3.26 |
flink_HiveTableUtil_relyConstraint_rdh
|
// returns a constraint trait that requires RELY
public static byte relyConstraint(byte trait) {
return ((byte) (trait | HIVE_CONSTRAINT_RELY));
}
| 3.26 |
flink_HiveTableUtil_createHivePartition_rdh
|
// --------------------------------------------------------------------------------------------
// Helper methods
// --------------------------------------------------------------------------------------------
/**
* Creates a Hive partition instance.
*/public static Partition createHivePartition(String dbName, String tableName, List<String> values, StorageDescriptor sd, Map<String, String> parameters) {
Partition partition = new Partition();
partition.setDbName(dbName);
partition.setTableName(tableName);
partition.setValues(values);
partition.setParameters(parameters);partition.setSd(sd);
int currentTime = ((int) (System.currentTimeMillis() / 1000));
partition.setCreateTime(currentTime);
partition.setLastAccessTime(currentTime);
return partition;
}
| 3.26 |
flink_HiveTableUtil_maskFlinkProperties_rdh
|
/**
* Add a prefix to Flink-created properties to distinguish them from Hive-created properties.
*/
private static Map<String, String> maskFlinkProperties(Map<String, String> properties) {
return properties.entrySet().stream().filter(e -> (e.getKey() != null) && (e.getValue() != null)).map(e -> new Tuple2<>(FLINK_PROPERTY_PREFIX + e.getKey(), e.getValue())).collect(Collectors.toMap(t -> t.f0, t -> t.f1));
}
| 3.26 |
flink_HiveTableUtil_createHiveColumns_rdh
|
/**
* Create Hive columns from Flink ResolvedSchema.
*/
public static List<FieldSchema>
createHiveColumns(ResolvedSchema schema) {String[] fieldNames = schema.getColumnNames().toArray(new String[0]);
DataType[] fieldTypes = schema.getColumnDataTypes().toArray(new DataType[0]);
List<FieldSchema> v16 = new ArrayList<>(fieldNames.length);
for (int i = 0; i < fieldNames.length; i++) {
v16.add(new FieldSchema(fieldNames[i],
HiveTypeUtil.toHiveTypeInfo(fieldTypes[i], true).getTypeName(), null));
}
return v16;
}
| 3.26 |
flink_HiveTableUtil_checkAcidTable_rdh
|
/**
* Check whether to read or write on the hive ACID table.
*
* @param tableOptions
* Hive table options.
* @param tablePath
* Identifier table path.
* @throws FlinkHiveException
* Thrown, if the source or sink table is transactional.
*/
public static void checkAcidTable(Map<String, String> tableOptions, ObjectPath tablePath) {
String tableIsTransactional = tableOptions.get("transactional");
if (tableIsTransactional == null) {
tableIsTransactional = tableOptions.get("transactional".toUpperCase());
}
if ((tableIsTransactional != null) && tableIsTransactional.equalsIgnoreCase("true")) {throw new FlinkHiveException(String.format("Reading or writing ACID table %s is not supported.", tablePath));
}
}
| 3.26 |
flink_HiveTableUtil_initiateTableFromProperties_rdh
|
/**
* Extract DDL semantics from properties and use it to initiate the table. The related
* properties will be removed from the map after they're used.
*/
private static void initiateTableFromProperties(Table hiveTable, Map<String, String> properties, HiveConf hiveConf) {
extractExternal(hiveTable, properties);
extractRowFormat(hiveTable.getSd(), properties);
extractStoredAs(hiveTable.getSd(), properties, hiveConf);
extractLocation(hiveTable.getSd(), properties);
}
| 3.26 |
flink_HiveTableUtil_getHadoopConfiguration_rdh
|
/**
* Returns a new Hadoop Configuration object using the path to the hadoop conf configured.
*
* @param hadoopConfDir
* Hadoop conf directory path.
* @return A Hadoop configuration instance.
*/
public static Configuration getHadoopConfiguration(String hadoopConfDir) {
if (new
File(hadoopConfDir).exists()) {
List<File> possiableConfFiles = new ArrayList<File>();
File coreSite = new File(hadoopConfDir, "core-site.xml");
if (coreSite.exists()) {
possiableConfFiles.add(coreSite);
}
File hdfsSite = new File(hadoopConfDir, "hdfs-site.xml");
if (hdfsSite.exists()) {
possiableConfFiles.add(hdfsSite);
}
File yarnSite = new File(hadoopConfDir, "yarn-site.xml");
if (yarnSite.exists()) {
possiableConfFiles.add(yarnSite);
}
// Add mapred-site.xml. We need to read configurations like compression codec.
File v56
= new File(hadoopConfDir, "mapred-site.xml");
if (v56.exists()) {
possiableConfFiles.add(v56);
}
if (possiableConfFiles.isEmpty()) {
return null;
} else {
Configuration
hadoopConfiguration = new Configuration();
for (File confFile : possiableConfFiles) {
hadoopConfiguration.addResource(new Path(confFile.getAbsolutePath()));
}
return hadoopConfiguration;
}
}
return null;
}
| 3.26 |
flink_HiveTableUtil_enableConstraint_rdh
|
// returns a constraint trait that requires ENABLE
public static byte enableConstraint(byte trait) {
return ((byte) (trait | HIVE_CONSTRAINT_ENABLE));
}
| 3.26 |
flink_HiveTableUtil_createResolvedSchema_rdh
|
/**
* Create a Flink's ResolvedSchema from Hive table's columns and partition keys.
*/
public static ResolvedSchema createResolvedSchema(List<FieldSchema> nonPartCols, List<FieldSchema> partitionKeys, Set<String> notNullColumns, @Nullable
UniqueConstraint primaryKey) {
Tuple2<String[], DataType[]> columnInformation = getColumnInformation(nonPartCols, partitionKeys, notNullColumns, primaryKey);
return new ResolvedSchema(IntStream.range(0, columnInformation.f0.length).mapToObj(i -> Column.physical(columnInformation.f0[i], columnInformation.f1[i])).collect(Collectors.toList()), Collections.emptyList(), primaryKey == null ? null : UniqueConstraint.primaryKey(primaryKey.getName(), primaryKey.getColumns()));
}
| 3.26 |
flink_HiveTableUtil_extractHiveTableInfo_rdh
|
/**
* Get the hive table's information.
*
* @return non-part fields, part fields, notNullColumns, primaryKey.
*/
private static Tuple4<List<FieldSchema>, List<FieldSchema>, Set<String>, Optional<UniqueConstraint>> extractHiveTableInfo(HiveConf hiveConf, Table hiveTable, HiveMetastoreClientWrapper client, HiveShim hiveShim) {
List<FieldSchema> fields = getNonPartitionFields(hiveConf, hiveTable, hiveShim);
Set<String> notNullColumns = client.getNotNullColumns(hiveConf, hiveTable.getDbName(), hiveTable.getTableName());
Optional<UniqueConstraint> primaryKey = client.getPrimaryKey(hiveTable.getDbName(), hiveTable.getTableName(), HiveTableUtil.relyConstraint(((byte) (0))));
// PK columns cannot be null
primaryKey.ifPresent(pk -> notNullColumns.addAll(pk.getColumns()));
return Tuple4.of(fields, hiveTable.getPartitionKeys(), notNullColumns, primaryKey);
}
| 3.26 |
flink_HiveTableUtil_extractRowType_rdh
|
/**
* Create the Hive table's row type.
*/
public static DataType extractRowType(HiveConf hiveConf, Table hiveTable, HiveMetastoreClientWrapper client, HiveShim hiveShim) {
Tuple4<List<FieldSchema>, List<FieldSchema>, Set<String>, Optional<UniqueConstraint>> hiveTableInfo = extractHiveTableInfo(hiveConf, hiveTable, client, hiveShim);
Tuple2<String[], DataType[]> types = extractColumnInformation(Stream.of(hiveTableInfo.f0, hiveTableInfo.f1).flatMap(Collection::stream).collect(Collectors.toList()), hiveTableInfo.f2);
return DataTypes.ROW(IntStream.range(0, types.f0.length).mapToObj(i -> DataTypes.FIELD(types.f0[i], types.f1[i])).collect(Collectors.toList()));}
| 3.26 |
flink_LongHybridHashTable_putBuildRow_rdh
|
// ---------------------- interface to join operator ---------------------------------------
public void putBuildRow(BinaryRowData row) throws IOException {
long key = getBuildLongKey(row);
final int hashCode = hashLong(key, 0);
m0(key, hashCode, row);
}
| 3.26 |
flink_LongHybridHashTable_get_rdh
|
/**
* This method is only used for operator fusion codegen to get build row from hash table. If the
* build partition has spilled to disk, return null directly which requires the join operator
* also spill probe row to disk.
*/
@Nullable
public final RowIterator<BinaryRowData> get(long probeKey) throws IOException {
if (denseMode) {
if ((probeKey >= minKey) && (probeKey <= maxKey)) {
long v4 =
probeKey - minKey;
long denseBucketOffset = v4 << 3;
int v6
= ((int) (denseBucketOffset >>> segmentSizeBits));
int denseSegOffset = ((int) (denseBucketOffset & segmentSizeMask));
long address = denseBuckets[v6].getLong(denseSegOffset);
this.matchIterator = densePartition.valueIter(address);
} else {
this.matchIterator = densePartition.valueIter(INVALID_ADDRESS);
}
return matchIterator;
} else {
final int hash = hashLong(probeKey, this.currentRecursionDepth);
currentProbePartition = this.partitionsBeingBuilt.get(hash % partitionsBeingBuilt.size());
if (currentProbePartition.isInMemory()) {
this.matchIterator = currentProbePartition.get(probeKey, hash);
return matchIterator;
} else {
// If the build partition has spilled to disk, return null directly which requires
// the join operator also spill probe row to disk.
return null;
}
}
}
| 3.26 |
flink_LongHybridHashTable_tryDenseMode_rdh
|
// ---------------------- interface to join operator end -----------------------------------
/**
* After build end, try to use dense mode.
*/private void tryDenseMode() {
// if some partitions have spilled to disk, always use hash mode
if (numSpillFiles != 0) {
return;
}
long v18 = Long.MAX_VALUE;
long maxKey = Long.MIN_VALUE;
long recordCount = 0;
for (LongHashPartition p : this.partitionsBeingBuilt) {
long partitionRecords = p.getBuildSideRecordCount();
recordCount += partitionRecords;
if (partitionRecords > 0) {
if (p.getMinKey() < v18) {
v18 = p.getMinKey();
}
if (p.getMaxKey() > maxKey) {
maxKey = p.getMaxKey();
}
}
}
if (buildSpillRetBufferNumbers !=
0) {
throw new RuntimeException("buildSpillRetBufferNumbers should be 0: " + buildSpillRetBufferNumbers);
}
long range = (maxKey - v18)
+ 1;
// 1.range is negative mean: range is too big to overflow
// 2.range is zero, maybe the max is Long.Max, and the min is Long.Min,
// so we should not use dense mode too.
if ((range > 0) && ((range <= (recordCount * 4)) || (range <= (segmentSize / 8)))) {
// try to request memory.
int buffers = ((int) (Math.ceil(((double) (range * 8)) / segmentSize)));
// TODO MemoryManager needs to support flexible larger segment, so that the index area
// of the build side is placed on a segment to avoid the overhead of addressing.
MemorySegment[] denseBuckets = new MemorySegment[buffers];
for (int i = 0; i < buffers; i++) {
MemorySegment seg = getNextBuffer();
if (seg == null) {
returnAll(Arrays.asList(denseBuckets));
return;
}
denseBuckets[i] = seg;
for (int j = 0; j < segmentSize; j += 8) {
seg.putLong(j, INVALID_ADDRESS);
}
}
denseMode = true;
LOG.info("LongHybridHashTable: Use dense mode!");
this.minKey = v18;
this.maxKey = maxKey;
List<MemorySegment> segments = new ArrayList<>();
buildSpillReturnBuffers.drainTo(segments);
returnAll(segments);
ArrayList<MemorySegment> dataBuffers = new
ArrayList<>();
long addressOffset = 0;
for (LongHashPartition p : this.partitionsBeingBuilt) {
p.iteratorToDenseBucket(denseBuckets, addressOffset, v18);
p.updateDenseAddressOffset(addressOffset);
dataBuffers.addAll(Arrays.asList(p.getPartitionBuffers()));
addressOffset += p.getPartitionBuffers().length << segmentSizeBits;
returnAll(Arrays.asList(p.getBuckets()));
}
this.denseBuckets = denseBuckets;
this.densePartition = new LongHashPartition(this, buildSideSerializer, dataBuffers.toArray(new MemorySegment[0]));
freeCurrent();
}
}
| 3.26 |
flink_LongHybridHashTable_insertIntoProbeBuffer_rdh
|
/**
* If the probe row corresponding partition has been spilled to disk, just call this method
* spill probe row to disk.
*
* <p>Note: This must be called only after {@link LongHybridHashTable#get} method.
*/
public final void insertIntoProbeBuffer(RowData probeRecord) throws IOException {
checkNotNull(currentProbePartition);
currentProbePartition.insertIntoProbeBuffer(probeSideSerializer, probeToBinary(probeRecord));
}
| 3.26 |
flink_ExecNodeContext_getTypeAsString_rdh
|
/**
* Returns the {@link #name} and {@link #version}, to be serialized into the JSON plan as one
* string, which in turn will be parsed by {@link ExecNodeContext#ExecNodeContext(String)} when
* deserialized from a JSON plan or when needed by {@link ExecNodeTypeIdResolver#typeFromId(DatabindContext, String)}.
*/
@JsonValue
public String getTypeAsString() {
if ((name == null) || (version == null)) {
throw new TableException(String.format("Can not serialize ExecNode with id: %d. Missing type, this is a bug," + " please file a ticket.", getId()));
}
return (name + "_") + version;
}
| 3.26 |
flink_ExecNodeContext_generateUid_rdh
|
/**
* Returns a new {@code uid} for transformations.
*/
public String generateUid(String transformationName, ExecNodeConfig
config) {
if (!transformationNamePattern.matcher(transformationName).matches()) {
throw new
TableException((("Invalid transformation name '" + transformationName) + "'. ") + "This is a bug, please file an issue.");
}
final String uidPattern = config.get(ExecutionConfigOptions.TABLE_EXEC_UID_FORMAT);
// Note: name and version are not included in the UID by default as they would prevent
// migration.
// No version because: An operator can change its state layout and bump up the ExecNode
// version, in this case the UID should still be able to map state even after plan
// migration to the new version.
// No name because: We might fuse operators in the future, and a new operator might
// subscribe to multiple old UIDs.
return StringUtils.replaceEach(uidPattern, new String[]{ "<id>", "<type>", "<version>", "<transformation>" }, new String[]{ String.valueOf(id), name, String.valueOf(version), transformationName });
}
| 3.26 |
flink_ExecNodeContext_newNodeId_rdh
|
/**
* Generate an unique ID for ExecNode.
*/
public static int newNodeId() {
return idCounter.incrementAndGet();
}
| 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.