name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hudi_HoodieLogFileReader_prev_rdh
|
/**
* This is a reverse iterator Note: At any point, an instance of HoodieLogFileReader should either iterate reverse
* (prev) or forward (next). Doing both in the same instance is not supported WARNING : Every call to prev() should be
* preceded with hasPrev()
*/
@Override
public HoodieLogBlock prev() throws IOException {
if (!this.reverseReader) {
throw new HoodieNotSupportedException("Reverse log reader has not been enabled");
}
long blockSize = inputStream.readLong();
long blockEndPos = inputStream.getPos();
// blocksize should read everything about a block including the length as well
try {
inputStream.seek(reverseLogFilePosition - blockSize);
}
catch (Exception e) {
// this could be a corrupt block
inputStream.seek(blockEndPos);
throw new CorruptedLogFileException("Found possible corrupted block, cannot read log file in reverse, " + "fallback to forward reading of logfile");
}
boolean hasNext =
hasNext();
reverseLogFilePosition -= blockSize;
lastReverseLogFilePosition = reverseLogFilePosition;
return next();
}
| 3.26 |
hudi_Types_isTighterThan_rdh
|
/**
* Returns whether this DecimalType is tighter than `other`. If yes, it means `this`
* can be casted into `other` safely without losing any precision or range.
*/
public boolean isTighterThan(PrimitiveType other) {
if (other instanceof DecimalType) {
DecimalType dt = ((DecimalType) (other));
return ((precision - scale) <= (dt.precision - dt.scale)) && (scale <= dt.scale);
}
if (other instanceof IntType) {
return isTighterThan(get(10, 0));}
return false;
}
| 3.26 |
hudi_Types_get_rdh
|
// Experimental method to support defaultValue
public static Field get(int id, boolean isOptional, String name, Type type, String doc, Object defaultValue) {
return new Field(isOptional, id, name, type, doc, defaultValue);
}
| 3.26 |
hudi_Types_fieldByName_rdh
|
/**
* Case-sensitive get field by name
*/
public Field fieldByName(String name) {
if (nameToFields == null) {
nameToFields = Arrays.stream(fields).collect(Collectors.toMap(Field::name, field -> field));
}
return nameToFields.get(name);
}
| 3.26 |
hudi_Types_isWiderThan_rdh
|
/**
* Returns whether this DecimalType is wider than `other`. If yes, it means `other`
* can be casted into `this` safely without losing any precision or range.
*/
public boolean isWiderThan(PrimitiveType other) {
if (other instanceof DecimalType) {
DecimalType dt = ((DecimalType) (other));
return ((precision - scale) >= (dt.precision - dt.scale)) && (scale > dt.scale);
}
if (other instanceof IntType) {
return isWiderThan(get(10, 0));
}return false;
}
| 3.26 |
hudi_SqlQueryPreCommitValidator_validateRecordsBeforeAndAfter_rdh
|
/**
* Takes input datasets 1) before commit started and 2) with inflight commit. Perform required validation
* and throw error if validation fails
*/
@Override
public void validateRecordsBeforeAndAfter(Dataset<Row> before, Dataset<Row> after, final Set<String> partitionsAffected) {
String hoodieTableName = "staged_table_" + TABLE_COUNTER.incrementAndGet();
String hoodieTableBeforeCurrentCommit = hoodieTableName + "_before";
String hoodieTableWithInflightCommit = hoodieTableName + "_after";
before.registerTempTable(hoodieTableBeforeCurrentCommit);
after.registerTempTable(hoodieTableWithInflightCommit);
JavaSparkContext jsc = HoodieSparkEngineContext.getSparkContext(getEngineContext());
SQLContext sqlContext = new SQLContext(jsc);
String[] queries = getQueriesToRun();
Arrays.asList(queries).parallelStream().forEach(query -> validateUsingQuery(query, hoodieTableBeforeCurrentCommit, hoodieTableWithInflightCommit, sqlContext));
}
| 3.26 |
hudi_SafeParquetRecordReaderWrapper_createValue_rdh
|
/**
* We could be in concurrent fetch and read env. We need to ensure new ArrayWritable as ParquetReader implementation
* reuses same ArrayWritable for all reads which will cause corruption when buffering. So, we create a new
* ArrayWritable here with Value class from parquetReader's value and an empty array.
*/
@Override
public ArrayWritable createValue() {
// Call createValue of parquetReader to get size and class type info only
Writable[] emptyWritableBuf = new Writable[numValueFields];
return new ArrayWritable(valueClass, emptyWritableBuf);
}
| 3.26 |
hudi_HoodieSimpleBucketLayout_determinesNumFileGroups_rdh
|
/**
* Bucketing controls the number of file groups directly.
*/
@Override
public boolean determinesNumFileGroups() {
return true;
}
| 3.26 |
hudi_ByteBufferBackedInputStream_getPosition_rdh
|
/**
* Returns current position of the stream
*/
public int getPosition() {
return buffer.position() - bufferOffset;
}
| 3.26 |
hudi_ByteBufferBackedInputStream_m0_rdh
|
/**
* Copies at most {@code length} bytes starting from position {@code pos} into the target
* buffer with provided {@code offset}. Returns number of bytes copied from the backing buffer
*
* NOTE: This does not change the current position of the stream and is thread-safe
*
* @param pos
* absolute position w/in stream to read from
* @param targetBuffer
* target buffer to copy into
* @param offset
* target buffer offset to copy at
* @param length
* length of the sequence to copy
* @return number of bytes copied
*/
public int m0(long pos, byte[] targetBuffer, int offset, int
length) {
int bufferPos = bufferOffset + ((int) (pos));
if (bufferPos > buffer.limit()) {
throw new IllegalArgumentException(String.format("Can't read past the backing buffer boundary (offset %d, length %d)", pos, buffer.limit() - bufferOffset));
} else
if (length > targetBuffer.length) {
throw new IllegalArgumentException(String.format("Target buffer is too small (length %d, buffer size %d)", length,
targetBuffer.length));
}
// Determine total number of bytes available to read
int available = Math.min(length, buffer.limit() - bufferPos);
// Get current buffer position in the backing array
System.arraycopy(buffer.array(), bufferPos, targetBuffer, offset,
available);
return available;
}
| 3.26 |
hudi_ByteBufferBackedInputStream_seek_rdh
|
/**
* Seeks to a position w/in the stream
*
* NOTE: Position is relative to the start of the stream (ie its absolute w/in this stream),
* with following invariant being assumed:
* <p>0 <= pos <= length (of the stream)</p>
*
* This method is NOT thread-safe
*
* @param pos
* target position to seek to w/in the holding buffer
*/
public void seek(long pos) {
buffer.reset();// to mark
int offset = buffer.position();
// NOTE: That the new pos is still relative to buffer's offset
int newPos = offset + ((int) (pos));
if ((newPos >
buffer.limit()) || (newPos < offset)) {
throw new IllegalArgumentException(String.format("Can't seek past the backing buffer (limit %d, offset %d, new %d)", buffer.limit(), offset, newPos));
}
buffer.position(newPos);
}
| 3.26 |
hudi_HoodieConsistentBucketLayout_determinesNumFileGroups_rdh
|
/**
* Bucketing controls the number of file groups directly.
*/
@Override
public boolean determinesNumFileGroups() {
return true;
}
| 3.26 |
hudi_HoodieConsistentBucketLayout_layoutPartitionerClass_rdh
|
/**
* Consistent hashing will tag all incoming records, so we could go ahead reusing an existing Partitioner
*/
@Override
public Option<String> layoutPartitionerClass() {
return Option.empty();
}
| 3.26 |
hudi_TypeUtils_getValueToEnumMap_rdh
|
/**
* Maps values from the provided Enum's {@link Class} into corresponding values,
* extracted by provided {@code valueMapper}
*/
public static <EnumT extends Enum<EnumT>> Map<String, EnumT> getValueToEnumMap(@Nonnull
Class<EnumT> klass, @Nonnull
Function<EnumT, String> valueMapper)
{
return Arrays.stream(klass.getEnumConstants()).collect(Collectors.toMap(valueMapper, Function.identity()));
}
| 3.26 |
hudi_TypeUtils_unsafeCast_rdh
|
/**
* This utility abstracts unsafe type-casting in a way that allows to
* <ul>
* <li>Search for such type-casts more easily (just searching for usages of this method)</li>
* <li>Avoid type-cast warnings from the compiler</li>
* </ul>
*/
@SuppressWarnings("unchecked")
public static <T> T
unsafeCast(Object o) {
return ((T) (o));
}
| 3.26 |
hudi_QuickstartConfigurations_sql_rdh
|
/**
* Creates the tool to build hoodie table DDL.
*/
public static Sql sql(String tableName) {
return new Sql(tableName);
}
| 3.26 |
hudi_ConfigProperty_key_rdh
|
/**
* Create a OptionBuilder with key.
*
* @param key
* The key of the option
* @return Return a OptionBuilder.
*/
public static PropertyBuilder key(String key) {
Objects.requireNonNull(key);
return new PropertyBuilder(key);
}
| 3.26 |
hudi_ConfigProperty_markAdvanced_rdh
|
/**
* Marks the config as an advanced config.
*/
public ConfigProperty<T> markAdvanced() {
return new ConfigProperty<>(key, defaultValue, docOnDefaultValue, doc, sinceVersion, deprecatedVersion, inferFunction, validValues, true, alternatives);
}
| 3.26 |
hudi_HoodieSparkKeyGeneratorFactory_getKeyGeneratorClassNameFromType_rdh
|
/**
*
* @param type
* {@link KeyGeneratorType} enum.
* @return The key generator class name for Spark based on the {@link KeyGeneratorType}.
*/
public static String getKeyGeneratorClassNameFromType(KeyGeneratorType type) {
switch (type)
{
case SIMPLE :
return SimpleKeyGenerator.class.getName();
case COMPLEX :
return ComplexKeyGenerator.class.getName();
case TIMESTAMP :
return TimestampBasedKeyGenerator.class.getName();
case CUSTOM :
return CustomKeyGenerator.class.getName();
case NON_PARTITION :
return NonpartitionedKeyGenerator.class.getName();
case GLOBAL_DELETE :
return GlobalDeleteKeyGenerator.class.getName();
default :
throw new HoodieKeyGeneratorException("Unsupported keyGenerator Type " + type);
}
}
| 3.26 |
hudi_HoodieSparkKeyGeneratorFactory_getKeyGenerator_rdh
|
/**
* Instantiate {@link BuiltinKeyGenerator}.
*
* @param properties
* properties map.
* @return the key generator thus instantiated.
*/
public static Option<BuiltinKeyGenerator> getKeyGenerator(Properties properties) {
TypedProperties typedProperties = new TypedProperties();
typedProperties.putAll(properties);
if (Option.ofNullable(properties.get(HoodieWriteConfig.KEYGENERATOR_CLASS_NAME.key())).map(v -> v.equals(NonpartitionedKeyGenerator.class.getName())).orElse(false)) {
return Option.empty();// Do not instantiate NonPartitionKeyGen
} else {
try {
return Option.of(((BuiltinKeyGenerator) (HoodieSparkKeyGeneratorFactory.createKeyGenerator(typedProperties))));
} catch
(ClassCastException cce) {throw new HoodieIOException("Only those key generators implementing BuiltInKeyGenerator interface is supported with virtual keys");
} catch (IOException
e) {
throw new HoodieIOException("Key generator instantiation failed ", e);
}
}
}
| 3.26 |
hudi_HoodieSparkKeyGeneratorFactory_inferKeyGeneratorTypeFromWriteConfig_rdh
|
/**
* Infers the key generator type based on the record key and partition fields.
* If neither of the record key and partition fields are set, the default type is returned.
*
* @param props
* Properties from the write config.
* @return Inferred key generator type.
*/
public static KeyGeneratorType inferKeyGeneratorTypeFromWriteConfig(TypedProperties props) {
String partitionFields = props.getString(KeyGeneratorOptions.PARTITIONPATH_FIELD_NAME.key(), null);
String recordsKeyFields = props.getString(KeyGeneratorOptions.RECORDKEY_FIELD_NAME.key(), null);
return inferKeyGeneratorType(Option.ofNullable(recordsKeyFields), partitionFields);
}
| 3.26 |
hudi_HoodieSparkKeyGeneratorFactory_convertToSparkKeyGenerator_rdh
|
/**
* Convert hoodie-common KeyGenerator to SparkKeyGeneratorInterface implement.
*/
public static String convertToSparkKeyGenerator(String keyGeneratorClassName) {
return COMMON_TO_SPARK_KEYGENERATOR.getOrDefault(keyGeneratorClassName, keyGeneratorClassName);
}
| 3.26 |
hudi_BucketStreamWriteFunction_m0_rdh
|
/**
* Determine whether the current fileID belongs to the current task.
* (partition + curBucket) % numPartitions == this taskID belongs to this task.
*/ public boolean m0(int bucketNumber, String partition) {
final int partitionIndex = (partition.hashCode() & Integer.MAX_VALUE) % parallelism;
int globalIndex = partitionIndex + bucketNumber;
return BucketIdentifier.mod(globalIndex, parallelism)
== taskID;
}
| 3.26 |
hudi_BucketStreamWriteFunction_bootstrapIndexIfNeed_rdh
|
/**
* Get partition_bucket -> fileID mapping from the existing hudi table.
* This is a required operation for each restart to avoid having duplicate file ids for one bucket.
*/
private void bootstrapIndexIfNeed(String partition) {
if (OptionsResolver.isInsertOverwrite(config)) {
// skips the index loading for insert overwrite operation.
return;
}
if (bucketIndex.containsKey(partition)) {
return;
}
LOG.info(String.format("Loading Hoodie Table %s, with path %s", this.metaClient.getTableConfig().getTableName(), (this.metaClient.getBasePath() + "/") + partition));
// Load existing fileID belongs to this task
Map<Integer, String> bucketToFileIDMap = new HashMap<>();
this.writeClient.getHoodieTable().getHoodieView().getLatestFileSlices(partition).forEach(fileSlice -> {
String fileId = fileSlice.getFileId();
int v12 = BucketIdentifier.bucketIdFromFileId(fileId);
if (isBucketToLoad(v12, partition)) {
LOG.info(String.format("Should load this partition bucket %s with fileId %s", v12, fileId));
// Validate that one bucketId has only ONE fileId
if (bucketToFileIDMap.containsKey(v12)) {throw new RuntimeException(String.format("Duplicate fileId %s from bucket %s of partition %s found " + "during the BucketStreamWriteFunction index bootstrap.", fileId, v12, partition));} else {
LOG.info(String.format("Adding fileId %s to the bucket %s of partition %s.", fileId, v12, partition));
bucketToFileIDMap.put(v12, fileId);
}
}
});
bucketIndex.put(partition, bucketToFileIDMap);
}
| 3.26 |
hudi_ExpressionEvaluators_getInLiteralVals_rdh
|
// -------------------------------------------------------------------------
// Utilities
// -------------------------------------------------------------------------
/**
* Returns the IN expression literal values.
*/
private static Object[] getInLiteralVals(List<Expression> childExprs) {
List<Object> vals = new ArrayList<>();
for (int i = 1; i < childExprs.size(); i++) {
vals.add(ExpressionUtils.getValueFromLiteral(((ValueLiteralExpression) (childExprs.get(i)))));
}
return
vals.toArray();
}
| 3.26 |
hudi_ExpressionEvaluators_fromExpression_rdh
|
/**
* Converts specific call expression to the evaluator.
* <p>Two steps to bind the call:
* 1. map the evaluator instance;
* 2. bind the field reference;
*
* <p>Normalize the expression to simplify the subsequent decision logic:
* always put the literal expression in the RHS.
*/
public static Evaluator fromExpression(CallExpression expr) {
FunctionDefinition funDef = expr.getFunctionDefinition();
List<Expression> childExprs = expr.getChildren();
boolean normalized = childExprs.get(0) instanceof FieldReferenceExpression;
if (BuiltInFunctionDefinitions.NOT.equals(funDef)) {
Not evaluator = Not.getInstance();
Evaluator childEvaluator = fromExpression(((CallExpression) (childExprs.get(0))));
return evaluator.bindEvaluator(childEvaluator);
}
if (BuiltInFunctionDefinitions.AND.equals(funDef)) {
And evaluator = And.getInstance();
Evaluator v6 = fromExpression(((CallExpression) (childExprs.get(0))));
Evaluator evaluator2 = fromExpression(((CallExpression) (childExprs.get(1))));
return evaluator.bindEvaluator(v6, evaluator2);}
if (BuiltInFunctionDefinitions.OR.equals(funDef)) {
Or evaluator = Or.getInstance();
Evaluator evaluator1 = fromExpression(((CallExpression) (childExprs.get(0))));
Evaluator evaluator2 = fromExpression(((CallExpression) (childExprs.get(1))));
return evaluator.bindEvaluator(evaluator1, evaluator2);}
// handle unary operators
if (BuiltInFunctionDefinitions.IS_NULL.equals(funDef)) {
FieldReferenceExpression rExpr = ((FieldReferenceExpression) (childExprs.get(0)));
return IsNull.getInstance().bindFieldReference(rExpr);
} else if (BuiltInFunctionDefinitions.IS_NOT_NULL.equals(funDef)) {
FieldReferenceExpression rExpr = ((FieldReferenceExpression) (childExprs.get(0)));
return IsNotNull.getInstance().bindFieldReference(rExpr);
}
boolean hasNullLiteral = childExprs.stream().anyMatch(e -> (e instanceof ValueLiteralExpression) &&
(ExpressionUtils.getValueFromLiteral(((ValueLiteralExpression) (e))) == null));
if (hasNullLiteral) {
return AlwaysFalse.m2();
}
// handle IN specifically
if (BuiltInFunctionDefinitions.IN.equals(funDef)) {
ValidationUtils.checkState(normalized, "The IN expression expects to be normalized");
In in = In.getInstance();
FieldReferenceExpression rExpr = ((FieldReferenceExpression) (childExprs.get(0)));
in.bindFieldReference(rExpr);
in.bindVals(getInLiteralVals(childExprs));
return in;
}
NullFalseEvaluator evaluator;
// handle binary operators
if (BuiltInFunctionDefinitions.EQUALS.equals(funDef)) {
evaluator = EqualTo.m1();
} else if (BuiltInFunctionDefinitions.NOT_EQUALS.equals(funDef)) {
evaluator = NotEqualTo.getInstance();
} else
if (BuiltInFunctionDefinitions.LESS_THAN.equals(funDef)) {
evaluator = (normalized) ? LessThan.getInstance() : GreaterThan.getInstance();
} else if (BuiltInFunctionDefinitions.GREATER_THAN.equals(funDef)) {
evaluator = (normalized) ? GreaterThan.getInstance() : LessThan.getInstance();} else if (BuiltInFunctionDefinitions.LESS_THAN_OR_EQUAL.equals(funDef)) {
evaluator = (normalized) ? LessThanOrEqual.getInstance() : GreaterThanOrEqual.getInstance();
} else if (BuiltInFunctionDefinitions.GREATER_THAN_OR_EQUAL.equals(funDef)) {
evaluator = (normalized) ? GreaterThanOrEqual.getInstance() : LessThanOrEqual.getInstance();
} else {
throw new AssertionError("Unexpected function definition " + funDef);
}
FieldReferenceExpression rExpr = (normalized) ? ((FieldReferenceExpression) (childExprs.get(0))) : ((FieldReferenceExpression) (childExprs.get(1)));
ValueLiteralExpression vExpr = (normalized) ? ((ValueLiteralExpression) (childExprs.get(1))) : ((ValueLiteralExpression) (childExprs.get(0)));
evaluator.bindVal(vExpr).bindFieldReference(rExpr);
return evaluator;
}
| 3.26 |
hudi_RollbackUtils_getRollbackPlan_rdh
|
/**
* Get Latest version of Rollback plan corresponding to a clean instant.
*
* @param metaClient
* Hoodie Table Meta Client
* @param rollbackInstant
* Instant referring to rollback action
* @return Rollback plan corresponding to rollback instant
* @throws IOException
*/
public static HoodieRollbackPlan getRollbackPlan(HoodieTableMetaClient metaClient, HoodieInstant rollbackInstant) throws IOException {
// TODO: add upgrade step if required.
final HoodieInstant requested = HoodieTimeline.getRollbackRequestedInstant(rollbackInstant);
return TimelineMetadataUtils.deserializeAvroMetadata(metaClient.getActiveTimeline().readRollbackInfoAsBytes(requested).get(), HoodieRollbackPlan.class);
}
| 3.26 |
hudi_TransactionUtils_getInflightAndRequestedInstants_rdh
|
/**
* Get InflightAndRequest instants.
*
* @param metaClient
* @return */
public static Set<String> getInflightAndRequestedInstants(HoodieTableMetaClient metaClient) {
// collect InflightAndRequest instants for deltaCommit/commit/compaction/clustering
Set<String> timelineActions = CollectionUtils.createImmutableSet(HoodieTimeline.REPLACE_COMMIT_ACTION, HoodieTimeline.COMPACTION_ACTION, HoodieTimeline.DELTA_COMMIT_ACTION, HoodieTimeline.COMMIT_ACTION);
return metaClient.getActiveTimeline().getTimelineOfActions(timelineActions).filterInflightsAndRequested().getInstantsAsStream().map(HoodieInstant::getTimestamp).collect(Collectors.toSet());
}
| 3.26 |
hudi_TransactionUtils_resolveWriteConflictIfAny_rdh
|
/**
* Resolve any write conflicts when committing data.
*
* @param table
* @param currentTxnOwnerInstant
* @param thisCommitMetadata
* @param config
* @param lastCompletedTxnOwnerInstant
* @param pendingInstants
* @return * @throws HoodieWriteConflictException
*/public static Option<HoodieCommitMetadata> resolveWriteConflictIfAny(final HoodieTable table, final Option<HoodieInstant> currentTxnOwnerInstant, final Option<HoodieCommitMetadata> thisCommitMetadata, final HoodieWriteConfig config, Option<HoodieInstant> lastCompletedTxnOwnerInstant, boolean reloadActiveTimeline, Set<String> pendingInstants) throws HoodieWriteConflictException
{
WriteOperationType operationType = thisCommitMetadata.map(HoodieCommitMetadata::getOperationType).orElse(null);
if (config.needResolveWriteConflict(operationType)) {// deal with pendingInstants
Stream<HoodieInstant> completedInstantsDuringCurrentWriteOperation = getCompletedInstantsDuringCurrentWriteOperation(table.getMetaClient(), pendingInstants);
ConflictResolutionStrategy resolutionStrategy = config.getWriteConflictResolutionStrategy();
if (reloadActiveTimeline) {table.getMetaClient().reloadActiveTimeline();
}Stream<HoodieInstant> instantStream = Stream.concat(resolutionStrategy.getCandidateInstants(table.getMetaClient(), currentTxnOwnerInstant.get(), lastCompletedTxnOwnerInstant), completedInstantsDuringCurrentWriteOperation);
final ConcurrentOperation thisOperation = new ConcurrentOperation(currentTxnOwnerInstant.get(), thisCommitMetadata.orElse(new HoodieCommitMetadata()));
instantStream.forEach(instant -> {
try {
ConcurrentOperation otherOperation = new ConcurrentOperation(instant, table.getMetaClient());
if (resolutionStrategy.hasConflict(thisOperation, otherOperation)) {
LOG.info(((("Conflict encountered between current instant = " + thisOperation) + " and instant = ") + otherOperation) + ", attempting to resolve it...");
resolutionStrategy.resolveConflict(table, thisOperation, otherOperation);
}
} catch (IOException io) {
throw new <io>HoodieWriteConflictException("Unable to resolve conflict, if present");
}
});
LOG.info("Successfully resolved conflicts, if any");
return thisOperation.getCommitMetadataOption();
}
return thisCommitMetadata;
}
| 3.26 |
hudi_TransactionUtils_getLastCompletedTxnInstantAndMetadata_rdh
|
/**
* Get the last completed transaction hoodie instant and {@link HoodieCommitMetadata#getExtraMetadata()}.
*
* @param metaClient
* @return */
public static Option<Pair<HoodieInstant, Map<String, String>>> getLastCompletedTxnInstantAndMetadata(HoodieTableMetaClient metaClient) {
Option<HoodieInstant> hoodieInstantOption = metaClient.getActiveTimeline().getCommitsTimeline().filterCompletedInstants().lastInstant();
return
getHoodieInstantAndMetaDataPair(metaClient, hoodieInstantOption);
}
| 3.26 |
hudi_HoodieRecordPayload_preCombine_rdh
|
/**
* When more than one HoodieRecord have the same HoodieKey in the incoming batch, this function combines them before attempting to insert/upsert by taking in a schema.
* Implementation can leverage the schema to decide their business logic to do preCombine.
*
* @param oldValue
* instance of the old {@link HoodieRecordPayload} to be combined with.
* @param schema
* Payload related schema. For example use schema to overwrite old instance for specified fields that doesn't equal to default value.
* @param properties
* Payload related properties. For example pass the ordering field(s) name to extract from value in storage.
* @return the combined value
*/
@PublicAPIMethod(maturity = ApiMaturityLevel.EVOLVING) default T preCombine(T oldValue, Schema schema, Properties properties) {
return preCombine(oldValue, properties);
}
| 3.26 |
hudi_HoodieRecordPayload_combineAndGetUpdateValue_rdh
|
/**
* This methods lets you write custom merging/combining logic to produce new values as a function of current value on storage and whats contained
* in this object. Implementations can leverage properties if required.
* <p>
* eg:
* 1) You are updating counters, you may want to add counts to currentValue and write back updated counts
* 2) You may be reading DB redo logs, and merge them with current image for a database row on storage
* </p>
*
* @param currentValue
* Current value in storage, to merge/combine this payload with
* @param schema
* Schema used for record
* @param properties
* Payload related properties. For example pass the ordering field(s) name to extract from value in storage.
* @return new combined/merged value to be written back to storage. EMPTY to skip writing this record.
*/default Option<IndexedRecord> combineAndGetUpdateValue(IndexedRecord currentValue, Schema schema, Properties properties) throws IOException {
return combineAndGetUpdateValue(currentValue, schema);
}
| 3.26 |
hudi_HoodieRecordPayload_getOrderingValue_rdh
|
/**
* This method can be used to extract the ordering value of the payload for combining/merging,
* or 0 if no value is specified which means natural order(arrival time is used).
*
* @return the ordering value
*/
@PublicAPIMethod(maturity = ApiMaturityLevel.STABLE)
default Comparable<?> getOrderingValue() {
// default natural order
return 0;
}
| 3.26 |
hudi_HoodieRecordPayload_getInsertValue_rdh
|
/**
* Generates an avro record out of the given HoodieRecordPayload, to be written out to storage. Called when writing a new value for the given
* HoodieKey, wherein there is no existing record in storage to be combined against. (i.e insert) Return EMPTY to skip writing this record.
* Implementations can leverage properties if required.
*
* @param schema
* Schema used for record
* @param properties
* Payload related properties. For example pass the ordering field(s) name to extract from value in storage.
* @return the {@link IndexedRecord} to be inserted.
*/
@PublicAPIMethod(maturity = ApiMaturityLevel.STABLE)
default Option<IndexedRecord> getInsertValue(Schema schema, Properties properties) throws IOException {
return getInsertValue(schema);
}
/**
* This method can be used to extract some metadata from HoodieRecordPayload. The metadata is passed to {@code WriteStatus.markSuccess()} and
* {@code WriteStatus.markFailure()}
| 3.26 |
hudi_RocksDBDAO_init_rdh
|
/**
* Initialized Rocks DB instance.
*/
private void init() {
try {
LOG.info("DELETING RocksDB persisted at " + rocksDBBasePath);
FileIOUtils.deleteDirectory(new File(rocksDBBasePath));
managedHandlesMap = new ConcurrentHashMap<>();
managedDescriptorMap = new ConcurrentHashMap<>();
// If already present, loads the existing column-family handles
final DBOptions v0 = new DBOptions().setCreateIfMissing(true).setCreateMissingColumnFamilies(true).setWalDir(rocksDBBasePath).setStatsDumpPeriodSec(300).setStatistics(new Statistics());
v0.setLogger(new Logger(v0) {
@Override
protected void log(InfoLogLevel infoLogLevel, String logMsg) {
switch (infoLogLevel) {
case DEBUG_LEVEL :
LOG.debug("From Rocks DB : {}", logMsg);
break;
case WARN_LEVEL :LOG.warn("From Rocks DB : {}", logMsg);
break;
case ERROR_LEVEL :
case FATAL_LEVEL :
LOG.error("From Rocks DB : {}", logMsg);
break;
case HEADER_LEVEL
:
case NUM_INFO_LOG_LEVELS :
case INFO_LEVEL :
default :
LOG.info("From Rocks DB : {}", logMsg);
break;
}
}
});
final List<ColumnFamilyDescriptor> managedColumnFamilies = loadManagedColumnFamilies(v0);
final List<ColumnFamilyHandle> v2 = new ArrayList<>(managedColumnFamilies.size());
FileIOUtils.mkdir(new File(rocksDBBasePath));rocksDB = RocksDB.open(v0, rocksDBBasePath, managedColumnFamilies,
v2);ValidationUtils.checkArgument(v2.size() == managedColumnFamilies.size(), "Unexpected number of handles are returned");
for (int index = 0; index < v2.size(); index++) {
ColumnFamilyHandle handle = v2.get(index);
ColumnFamilyDescriptor descriptor = managedColumnFamilies.get(index);
String familyNameFromHandle = new String(handle.getName());
String familyNameFromDescriptor = new String(descriptor.getName());
ValidationUtils.checkArgument(familyNameFromDescriptor.equals(familyNameFromHandle), "Family Handles not in order with descriptors");
managedHandlesMap.put(familyNameFromHandle, handle);managedDescriptorMap.put(familyNameFromDescriptor, descriptor);
}
} catch (RocksDBException | IOException re) {
LOG.error("Got exception opening Rocks DB instance ", re);
throw new HoodieException(re);
}
}
| 3.26 |
hudi_RocksDBDAO_put_rdh
|
/**
* Perform single PUT on a column-family.
*
* @param columnFamilyName
* Column family name
* @param key
* Key
* @param value
* Payload
* @param <T>
* Type of Payload
*/
public <K extends Serializable, T extends Serializable> void put(String columnFamilyName, K key, T value) {
try {
byte[] payload =
serializePayload(value);
getRocksDB().put(managedHandlesMap.get(columnFamilyName), SerializationUtils.serialize(key), payload);
} catch (Exception e) {
throw new HoodieException(e);
}
}
| 3.26 |
hudi_RocksDBDAO_prefixSearch_rdh
|
/**
* Perform a prefix search and return stream of key-value pairs retrieved.
*
* @param columnFamilyName
* Column Family Name
* @param prefix
* Prefix Key
* @param <T>
* Type of value stored
*/
public <T
extends Serializable> Stream<Pair<String, T>> prefixSearch(String columnFamilyName, String prefix) {
ValidationUtils.checkArgument(!closed);
final HoodieTimer timer = HoodieTimer.start();
long timeTakenMicro = 0;
List<Pair<String, T>> results = new LinkedList<>();
try (final
RocksIterator it = getRocksDB().newIterator(managedHandlesMap.get(columnFamilyName))) {it.seek(getUTF8Bytes(prefix));
while (it.isValid() && new String(it.key()).startsWith(prefix)) {
long beginTs = System.nanoTime();
T val = SerializationUtils.deserialize(it.value());
timeTakenMicro
+= (System.nanoTime() - beginTs) / 1000;
results.add(Pair.of(new String(it.key()), val));
it.next();
}
}
LOG.info((((((((("Prefix Search for (query=" + prefix) + ") on ") + columnFamilyName) + ". Total Time Taken (msec)=") + timer.endTimer()) + ". Serialization Time taken(micro)=") + timeTakenMicro) + ", num entries=") + results.size());
return results.stream();
}
| 3.26 |
hudi_RocksDBDAO_deleteInBatch_rdh
|
/**
* Helper to add delete operation in batch.
*
* @param batch
* Batch Handle
* @param columnFamilyName
* Column Family
* @param key
* Key
*/
public <K extends Serializable> void deleteInBatch(WriteBatch batch,
String columnFamilyName, K key) {
try {
batch.delete(managedHandlesMap.get(columnFamilyName), SerializationUtils.serialize(key));
} catch (Exception e)
{
throw new HoodieException(e);
}
}
| 3.26 |
hudi_RocksDBDAO_writeBatch_rdh
|
/**
* Perform a batch write operation.
*/
public void writeBatch(BatchHandler handler)
{
try (WriteBatch batch = new WriteBatch()) {
handler.apply(batch);
getRocksDB().write(new WriteOptions(), batch);
} catch (RocksDBException re) {
throw new HoodieException(re);
}
}
| 3.26 |
hudi_RocksDBDAO_close_rdh
|
/**
* Close the DAO object.
*/
public synchronized void close() {
if (!closed) {
closed = true; managedHandlesMap.values().forEach(AbstractImmutableNativeReference::close);
managedHandlesMap.clear();
managedDescriptorMap.clear();
getRocksDB().close();
try {
FileIOUtils.deleteDirectory(new File(rocksDBBasePath));
} catch (IOException e) {
throw new HoodieIOException(e.getMessage(), e);
}
}
}
| 3.26 |
hudi_RocksDBDAO_dropColumnFamily_rdh
|
/**
* Note : Does not delete from underlying DB. Just closes the handle.
*
* @param columnFamilyName
* Column Family Name
*/
public void dropColumnFamily(String columnFamilyName) {
ValidationUtils.checkArgument(!closed);
managedDescriptorMap.computeIfPresent(columnFamilyName, (colFamilyName, descriptor) -> {
ColumnFamilyHandle handle = managedHandlesMap.get(colFamilyName);
try {
getRocksDB().dropColumnFamily(handle);
handle.close();
} catch (RocksDBException e) {throw new <e>HoodieException();
}
managedHandlesMap.remove(columnFamilyName);
return null;
});
}
| 3.26 |
hudi_RocksDBDAO_getRocksDB_rdh
|
/**
* Create RocksDB if not initialized.
*/
private RocksDB getRocksDB() {return rocksDB;}
| 3.26 |
hudi_RocksDBDAO_addColumnFamily_rdh
|
/**
* Add a new column family to store.
*
* @param columnFamilyName
* Column family name
*/ public void addColumnFamily(String columnFamilyName) {
ValidationUtils.checkArgument(!closed);
managedDescriptorMap.computeIfAbsent(columnFamilyName, colFamilyName -> {
try {ColumnFamilyDescriptor descriptor = getColumnFamilyDescriptor(StringUtils.getUTF8Bytes(colFamilyName));
ColumnFamilyHandle handle = getRocksDB().createColumnFamily(descriptor);
managedHandlesMap.put(colFamilyName, handle);
return
descriptor;
} catch (RocksDBException e) {
throw new <e>HoodieException();}
});}
| 3.26 |
hudi_RocksDBDAO_get_rdh
|
/**
* Retrieve a value for a given key in a column family.
*
* @param columnFamilyName
* Column Family Name
* @param key
* Key to be retrieved
* @param <T>
* Type of object stored.
*/
public <K extends Serializable, T extends Serializable> T get(String columnFamilyName, K key) {
ValidationUtils.checkArgument(!closed);
try {
byte[] val = getRocksDB().get(managedHandlesMap.get(columnFamilyName), SerializationUtils.serialize(key));
return val == null ? null : SerializationUtils.deserialize(val);
} catch (Exception e) {
throw new HoodieException(e);
}
}
| 3.26 |
hudi_RocksDBDAO_iterator_rdh
|
/**
* Return Iterator of key-value pairs from RocksIterator.
*
* @param columnFamilyName
* Column Family Name
* @param <T>
* Type of value stored
*/
public <T extends Serializable> Iterator<T> iterator(String columnFamilyName) {
return new IteratorWrapper<>(getRocksDB().newIterator(managedHandlesMap.get(columnFamilyName)));
}
| 3.26 |
hudi_RocksDBDAO_prefixDelete_rdh
|
/**
* Perform a prefix delete and return stream of key-value pairs retrieved.
*
* @param columnFamilyName
* Column Family Name
* @param prefix
* Prefix Key
* @param <T>
* Type of value stored
*/
public <T extends Serializable> void prefixDelete(String columnFamilyName, String prefix) {
ValidationUtils.checkArgument(!closed);
LOG.info((("Prefix DELETE (query=" + prefix) + ") on ") + columnFamilyName);
final RocksIterator it = getRocksDB().newIterator(managedHandlesMap.get(columnFamilyName));
it.seek(getUTF8Bytes(prefix));
// Find first and last keys to be deleted
String firstEntry = null;
String lastEntry = null;
while (it.isValid() && new String(it.key()).startsWith(prefix)) {
String result = new String(it.key());
it.next();
if (firstEntry == null) {
firstEntry = result;
}
lastEntry = result;
}
it.close();
if (null != firstEntry) {
try {
// This will not delete the last entry
getRocksDB().deleteRange(managedHandlesMap.get(columnFamilyName), getUTF8Bytes(firstEntry), getUTF8Bytes(lastEntry));
// Delete the last entry
getRocksDB().delete(getUTF8Bytes(lastEntry));
} catch (RocksDBException e) {
LOG.error("Got exception performing range delete");
throw new HoodieException(e);
}
}
}
| 3.26 |
hudi_RocksDBDAO_loadManagedColumnFamilies_rdh
|
/**
* Helper to load managed column family descriptors.
*/
private List<ColumnFamilyDescriptor> loadManagedColumnFamilies(DBOptions dbOptions) throws RocksDBException {
final List<ColumnFamilyDescriptor> managedColumnFamilies = new ArrayList<>();final Options options = new Options(dbOptions, new ColumnFamilyOptions());
List<byte[]> existing = RocksDB.listColumnFamilies(options, rocksDBBasePath);
if (existing.isEmpty()) {
LOG.info("No column family found. Loading default");
managedColumnFamilies.add(m0(RocksDB.DEFAULT_COLUMN_FAMILY));
} else {
LOG.info("Loading column families :" + existing.stream().map(String::new).collect(Collectors.toList()));
managedColumnFamilies.addAll(existing.stream().map(RocksDBDAO::getColumnFamilyDescriptor).collect(Collectors.toList()));
}
return managedColumnFamilies;
}
| 3.26 |
hudi_RocksDBDAO_delete_rdh
|
/**
* Perform a single Delete operation.
*
* @param columnFamilyName
* Column Family name
* @param key
* Key to be deleted
*/
public <K extends Serializable> void delete(String columnFamilyName, K key) {
try {
getRocksDB().delete(managedHandlesMap.get(columnFamilyName), SerializationUtils.serialize(key));
} catch (Exception e) {
throw new HoodieException(e);
}
}
| 3.26 |
hudi_RocksDBDAO_putInBatch_rdh
|
/**
* Helper to add put operation in batch.
*
* @param batch
* Batch Handle
* @param columnFamilyName
* Column Family
* @param key
* Key
* @param value
* Payload
* @param <T>
* Type of payload
*/
public <K extends Serializable, T extends Serializable> void putInBatch(WriteBatch batch, String columnFamilyName, K key, T value) {
try {
byte[] keyBytes = SerializationUtils.serialize(key);
byte[] payload = serializePayload(value);
batch.put(managedHandlesMap.get(columnFamilyName), keyBytes, payload);} catch (Exception e) {
throw new HoodieException(e);
}
}
| 3.26 |
hudi_ImmutableTriple_getMiddle_rdh
|
/**
* {@inheritDoc }
*/
@Override
public M getMiddle() {
return middle;
}
| 3.26 |
hudi_ImmutableTriple_getLeft_rdh
|
// -----------------------------------------------------------------------
/**
* {@inheritDoc }
*/
@Override
public L getLeft() {
return left;
}
| 3.26 |
hudi_ImmutableTriple_of_rdh
|
/**
* <p>
* Obtains an immutable triple of from three objects inferring the generic types.
* </p>
*
* <p>
* This factory allows the triple to be created using inference to obtain the generic types.
* </p>
*
* @param <L>
* the left element type
* @param <M>
* the middle element type
* @param <R>
* the right element type
* @param left
* the left element, may be null
* @param middle
* the middle element, may be null
* @param right
* the right element, may be null
* @return a triple formed from the three parameters, not null
*/
public static <L, M, R> ImmutableTriple<L, M, R> of(final L left, final M middle, final R right) {
return new ImmutableTriple<L, M, R>(left, middle, right);
}
| 3.26 |
hudi_ImmutableTriple_getRight_rdh
|
/**
* {@inheritDoc }
*/
@Override
public R getRight() {
return right;
}
| 3.26 |
hudi_HoodieEmptyRecord_readRecordPayload_rdh
|
/**
* NOTE: This method is declared final to make sure there's no polymorphism and therefore
* JIT compiler could perform more aggressive optimizations
*/
@Override
protected final T readRecordPayload(Kryo kryo, Input input) {
this.type = kryo.readObject(input, HoodieRecordType.class);
this.orderingVal = ((Comparable<?>) (kryo.readClassAndObject(input)));
// NOTE: [[EmptyRecord]]'s payload is always null
return null;
}
| 3.26 |
hudi_HoodieEmptyRecord_writeRecordPayload_rdh
|
/**
* NOTE: This method is declared final to make sure there's no polymorphism and therefore
* JIT compiler could perform more aggressive optimizations
*/
@Override
protected final void writeRecordPayload(T payload, Kryo kryo, Output output) {
kryo.writeObject(output, type);
// NOTE: Since [[orderingVal]] is polymorphic we have to write out its class
// to be able to properly deserialize it
kryo.writeClassAndObject(output, orderingVal);
}
| 3.26 |
hudi_Pipelines_compact_rdh
|
/**
* The compaction tasks pipeline.
*
* <p>The compaction plan operator monitors the new compaction plan on the timeline
* then distributes the sub-plans to the compaction tasks. The compaction task then
* handle over the metadata to commit task for compaction transaction commit.
* The whole pipeline looks like the following:
*
* <pre>
* /=== | task1 | ===\
* | plan generation | ===> hash | commit |
* \=== | task2 | ===/
*
* Note: both the compaction plan generation task and commission task are singleton.
* </pre>
*
* @param conf
* The configuration
* @param dataStream
* The input data stream
* @return the compaction pipeline
*/
public static DataStreamSink<CompactionCommitEvent> compact(Configuration conf, DataStream<Object> dataStream) {
DataStreamSink<CompactionCommitEvent> compactionCommitEventDataStream = // make the distribution strategy deterministic to avoid concurrent modifications
// on the same bucket files
// plan generate must be singleton
dataStream.transform("compact_plan_generate", TypeInformation.of(CompactionPlanEvent.class), new CompactionPlanOperator(conf)).setParallelism(1).setMaxParallelism(1).keyBy(plan -> plan.getOperation().getFileGroupId().getFileId()).transform("compact_task", TypeInformation.of(CompactionCommitEvent.class), new CompactOperator(conf)).setParallelism(conf.getInteger(FlinkOptions.COMPACTION_TASKS)).addSink(new CompactionCommitSink(conf)).name("compact_commit").setParallelism(1);// compaction commit should be singleton
compactionCommitEventDataStream.getTransformation().setMaxParallelism(1);
return compactionCommitEventDataStream;
}
| 3.26 |
hudi_Pipelines_boundedBootstrap_rdh
|
/**
* Constructs bootstrap pipeline for batch execution mode.
* The indexing data set is loaded before the actual data write
* in order to support batch UPSERT.
*/
private static DataStream<HoodieRecord> boundedBootstrap(Configuration conf, RowType rowType, DataStream<RowData> dataStream) {
final RowDataKeyGen rowDataKeyGen = RowDataKeyGen.instance(conf, rowType);
// shuffle by partition keys
dataStream = dataStream.keyBy(rowDataKeyGen::getPartitionPath);
return rowDataToHoodieRecord(conf, rowType, dataStream).transform("batch_index_bootstrap", TypeInformation.of(HoodieRecord.class), new BatchBootstrapOperator<>(conf)).setParallelism(conf.getOptional(FlinkOptions.INDEX_BOOTSTRAP_TASKS).orElse(dataStream.getParallelism())).uid(opUID("batch_index_bootstrap", conf));
}
| 3.26 |
hudi_Pipelines_bootstrap_rdh
|
/**
* Constructs bootstrap pipeline.
* The bootstrap operator loads the existing data index (primary key to file id mapping),
* then send the indexing data set to subsequent operator(usually the bucket assign operator).
*
* @param conf
* The configuration
* @param rowType
* The row type
* @param dataStream
* The data stream
* @param bounded
* Whether the source is bounded
* @param overwrite
* Whether it is insert overwrite
*/
public static DataStream<HoodieRecord> bootstrap(Configuration conf, RowType rowType, DataStream<RowData> dataStream, boolean bounded, boolean overwrite) {
final boolean globalIndex = conf.getBoolean(FlinkOptions.INDEX_GLOBAL_ENABLED);
if (overwrite || OptionsResolver.isBucketIndexType(conf)) {
return rowDataToHoodieRecord(conf, rowType, dataStream);
} else if ((bounded && (!globalIndex)) && OptionsResolver.isPartitionedTable(conf)) {
return boundedBootstrap(conf, rowType, dataStream);
} else {
return streamBootstrap(conf, rowType, dataStream, bounded);
}
}
| 3.26 |
hudi_Pipelines_bulkInsert_rdh
|
/**
* Bulk insert the input dataset at once.
*
* <p>By default, the input dataset would shuffle by the partition path first then
* sort by the partition path before passing around to the write function.
* The whole pipeline looks like the following:
*
* <pre>
* | input1 | ===\ /=== |sorter| === | task1 | (p1, p2)
* shuffle
* | input2 | ===/ \=== |sorter| === | task2 | (p3, p4)
*
* Note: Both input1 and input2's dataset come from partitions: p1, p2, p3, p4
* </pre>
*
* <p>The write task switches to new file handle each time it receives a record
* from the different partition path, the shuffle and sort would reduce small files.
*
* <p>The bulk insert should be run in batch execution mode.
*
* @param conf
* The configuration
* @param rowType
* The input row type
* @param dataStream
* The input data stream
* @return the bulk insert data stream sink
*/
public static DataStreamSink<Object> bulkInsert(Configuration conf, RowType rowType, DataStream<RowData> dataStream) {
WriteOperatorFactory<RowData> operatorFactory = BulkInsertWriteOperator.getFactory(conf, rowType);
if (OptionsResolver.isBucketIndexType(conf)) {
// TODO support bulk insert for consistent bucket index
if (OptionsResolver.isConsistentHashingBucketIndexType(conf)) {
throw new HoodieException("Consistent hashing bucket index does not work with bulk insert using FLINK engine. Use simple bucket index or Spark engine."); }
String indexKeys =
OptionsResolver.getIndexKeyField(conf);
int numBuckets = conf.getInteger(FlinkOptions.BUCKET_INDEX_NUM_BUCKETS);
BucketIndexPartitioner<HoodieKey> partitioner = new BucketIndexPartitioner<>(numBuckets, indexKeys);
RowDataKeyGen keyGen = RowDataKeyGen.instance(conf, rowType);
RowType rowTypeWithFileId = BucketBulkInsertWriterHelper.rowTypeWithFileId(rowType);
InternalTypeInfo<RowData> typeInfo = InternalTypeInfo.of(rowTypeWithFileId);
boolean needFixedFileIdSuffix = OptionsResolver.isNonBlockingConcurrencyControl(conf);
Map<String, String> bucketIdToFileId = new HashMap<>();
dataStream = dataStream.partitionCustom(partitioner, keyGen::getHoodieKey).map(record -> BucketBulkInsertWriterHelper.rowWithFileId(bucketIdToFileId, keyGen, record, indexKeys, numBuckets, needFixedFileIdSuffix), typeInfo).setParallelism(conf.getInteger(FlinkOptions.WRITE_TASKS));// same parallelism as write task to avoid shuffle
if (conf.getBoolean(FlinkOptions.WRITE_BULK_INSERT_SORT_INPUT)) {
SortOperatorGen sortOperatorGen
=
BucketBulkInsertWriterHelper.getFileIdSorterGen(rowTypeWithFileId);dataStream = dataStream.transform("file_sorter", typeInfo, sortOperatorGen.createSortOperator(conf)).setParallelism(conf.getInteger(FlinkOptions.WRITE_TASKS));// same parallelism as write task to avoid shuffle
ExecNodeUtil.setManagedMemoryWeight(dataStream.getTransformation(), (conf.getInteger(FlinkOptions.WRITE_SORT_MEMORY)
* 1024L) * 1024L);
}
return dataStream.transform(opName("bucket_bulk_insert", conf), TypeInformation.of(Object.class), operatorFactory).uid(opUID("bucket_bulk_insert", conf)).setParallelism(conf.getInteger(FlinkOptions.WRITE_TASKS)).addSink(DummySink.INSTANCE).name("dummy");}
final String[] v10 = FilePathUtils.extractPartitionKeys(conf);
if (v10.length > 0) {
RowDataKeyGen rowDataKeyGen = RowDataKeyGen.instance(conf, rowType);
if (conf.getBoolean(FlinkOptions.WRITE_BULK_INSERT_SHUFFLE_INPUT)) {
// shuffle by partition keys
// use #partitionCustom instead of #keyBy to avoid duplicate sort operations,
// see BatchExecutionUtils#applyBatchExecutionSettings for details.
Partitioner<String> partitioner = (key, channels) -> KeyGroupRangeAssignment.assignKeyToParallelOperator(key, KeyGroupRangeAssignment.computeDefaultMaxParallelism(conf.getInteger(FlinkOptions.WRITE_TASKS)), channels);
dataStream = dataStream.partitionCustom(partitioner, rowDataKeyGen::getPartitionPath);
}
if (conf.getBoolean(FlinkOptions.WRITE_BULK_INSERT_SORT_INPUT)) {
String[] sortFields = v10;
String v14 = "sorter:(partition_key)";
if (conf.getBoolean(FlinkOptions.WRITE_BULK_INSERT_SORT_INPUT_BY_RECORD_KEY)) {
String[] recordKeyFields = conf.getString(FlinkOptions.RECORD_KEY_FIELD).split(",");
ArrayList<String> sortList = new ArrayList<>(Arrays.asList(v10));
Collections.addAll(sortList, recordKeyFields);
sortFields = sortList.toArray(new String[0]);
v14 = "sorter:(partition_key, record_key)";
}
SortOperatorGen sortOperatorGen = new SortOperatorGen(rowType, sortFields);
// sort by partition keys or (partition keys and record keys)
dataStream = dataStream.transform(v14, InternalTypeInfo.of(rowType), sortOperatorGen.createSortOperator(conf)).setParallelism(conf.getInteger(FlinkOptions.WRITE_TASKS));
ExecNodeUtil.setManagedMemoryWeight(dataStream.getTransformation(), (conf.getInteger(FlinkOptions.WRITE_SORT_MEMORY) * 1024L) * 1024L);
}
}
return // follow the parallelism of upstream operators to avoid shuffle
dataStream.transform(opName("hoodie_bulk_insert_write", conf), TypeInformation.of(Object.class), operatorFactory).setParallelism(conf.getInteger(FlinkOptions.WRITE_TASKS)).addSink(DummySink.INSTANCE).name("dummy");
}
| 3.26 |
hudi_Pipelines_rowDataToHoodieRecord_rdh
|
/**
* Transforms the row data to hoodie records.
*/
public static DataStream<HoodieRecord> rowDataToHoodieRecord(Configuration
conf, RowType rowType, DataStream<RowData> dataStream) {
return dataStream.map(RowDataToHoodieFunctions.create(rowType, conf), TypeInformation.of(HoodieRecord.class)).setParallelism(dataStream.getParallelism()).name("row_data_to_hoodie_record");
}
| 3.26 |
hudi_Pipelines_hoodieStreamWrite_rdh
|
/**
* The streaming write pipeline.
*
* <p>The input dataset shuffles by the primary key first then
* shuffles by the file group ID before passing around to the write function.
* The whole pipeline looks like the following:
*
* <pre>
* | input1 | ===\ /=== | bucket assigner | ===\ /=== | task1 |
* shuffle(by PK) shuffle(by bucket ID)
* | input2 | ===/ \=== | bucket assigner | ===/ \=== | task2 |
*
* Note: a file group must be handled by one write task to avoid write conflict.
* </pre>
*
* <p>The bucket assigner assigns the inputs to suitable file groups, the write task caches
* and flushes the data set to disk.
*
* @param conf
* The configuration
* @param dataStream
* The input data stream
* @return the stream write data stream pipeline
*/
public static DataStream<Object> hoodieStreamWrite(Configuration
conf, DataStream<HoodieRecord> dataStream) {
if (OptionsResolver.isBucketIndexType(conf)) {
HoodieIndex.BucketIndexEngineType bucketIndexEngineType = OptionsResolver.getBucketEngineType(conf);
switch (bucketIndexEngineType) {
case SIMPLE : int bucketNum = conf.getInteger(FlinkOptions.BUCKET_INDEX_NUM_BUCKETS);
String indexKeyFields = OptionsResolver.getIndexKeyField(conf);
BucketIndexPartitioner<HoodieKey> partitioner = new BucketIndexPartitioner<>(bucketNum, indexKeyFields);
return dataStream.partitionCustom(partitioner, HoodieRecord::getKey).transform(opName("bucket_write", conf), TypeInformation.of(Object.class), BucketStreamWriteOperator.getFactory(conf)).uid(opUID("bucket_write", conf)).setParallelism(conf.getInteger(FlinkOptions.WRITE_TASKS));
case CONSISTENT_HASHING :
if (OptionsResolver.isInsertOverwrite(conf)) {
// TODO support insert overwrite for consistent bucket index
throw new HoodieException("Consistent hashing bucket index does not work with insert overwrite using FLINK engine. Use simple bucket index or Spark engine.");
}
return dataStream.transform(opName("consistent_bucket_assigner", conf), TypeInformation.of(HoodieRecord.class), new ProcessOperator<>(new ConsistentBucketAssignFunction(conf))).uid(opUID("consistent_bucket_assigner", conf)).setParallelism(conf.getInteger(FlinkOptions.BUCKET_ASSIGN_TASKS)).keyBy(record -> record.getCurrentLocation().getFileId()).transform(opName("consistent_bucket_write", conf), TypeInformation.of(Object.class), BucketStreamWriteOperator.getFactory(conf)).uid(opUID("consistent_bucket_write", conf)).setParallelism(conf.getInteger(FlinkOptions.WRITE_TASKS));
default :
throw new HoodieNotSupportedException("Unknown bucket index engine type: " + bucketIndexEngineType);
}
} else {
WriteOperatorFactory<HoodieRecord> operatorFactory = StreamWriteOperator.getFactory(conf);
return // shuffle by fileId(bucket id)
// Key-by record key, to avoid multiple subtasks write to a bucket at the same time
dataStream.keyBy(HoodieRecord::getRecordKey).transform("bucket_assigner", TypeInformation.of(HoodieRecord.class), new KeyedProcessOperator<>(new BucketAssignFunction<>(conf))).uid(opUID("bucket_assigner", conf)).setParallelism(conf.getInteger(FlinkOptions.BUCKET_ASSIGN_TASKS)).keyBy(record -> record.getCurrentLocation().getFileId()).transform(opName("stream_write", conf), TypeInformation.of(Object.class), operatorFactory).uid(opUID("stream_write", conf)).setParallelism(conf.getInteger(FlinkOptions.WRITE_TASKS));
}}
| 3.26 |
hudi_Pipelines_cluster_rdh
|
/**
* The clustering tasks pipeline.
*
* <p>The clustering plan operator monitors the new clustering plan on the timeline
* then distributes the sub-plans to the clustering tasks. The clustering task then
* handle over the metadata to commit task for clustering transaction commit.
* The whole pipeline looks like the following:
*
* <pre>
* /=== | task1 | ===\
* | plan generation | ===> hash | commit |
* \=== | task2 | ===/
*
* Note: both the clustering plan generation task and commission task are singleton.
* </pre>
*
* @param conf
* The configuration
* @param rowType
* The input row type
* @param dataStream
* The input data stream
* @return the clustering pipeline
*/
public static DataStreamSink<ClusteringCommitEvent> cluster(Configuration conf, RowType rowType, DataStream<Object> dataStream) {
DataStream<ClusteringCommitEvent> clusteringStream = // plan generate must be singleton
// plan generate must be singleton
dataStream.transform("cluster_plan_generate", TypeInformation.of(ClusteringPlanEvent.class), new ClusteringPlanOperator(conf)).setParallelism(1).setMaxParallelism(1).keyBy(plan -> // make the distribution strategy deterministic to avoid concurrent modifications
// on the same bucket files
plan.getClusteringGroupInfo().getOperations().stream().map(ClusteringOperation::getFileId).collect(Collectors.joining())).transform("clustering_task", TypeInformation.of(ClusteringCommitEvent.class), new ClusteringOperator(conf, rowType)).setParallelism(conf.getInteger(FlinkOptions.CLUSTERING_TASKS));
if (OptionsResolver.sortClusteringEnabled(conf)) {
ExecNodeUtil.setManagedMemoryWeight(clusteringStream.getTransformation(), (conf.getInteger(FlinkOptions.WRITE_SORT_MEMORY) * 1024L) * 1024L);
}
DataStreamSink<ClusteringCommitEvent> clusteringCommitEventDataStream = clusteringStream.addSink(new ClusteringCommitSink(conf)).name("clustering_commit").setParallelism(1);// clustering commit should be singleton
clusteringCommitEventDataStream.getTransformation().setMaxParallelism(1);
return clusteringCommitEventDataStream;
}
| 3.26 |
hudi_HoodieIngestionService_startIngestion_rdh
|
/**
* Entrypoint to start ingestion.
* <p>
* Depends on the ingestion mode, this method will
* <li>either start a loop as implemented in {@link #startService()} for continuous mode
* <li>or do one-time ingestion as implemented in {@link #ingestOnce()} for non-continuous mode
*/
public void startIngestion() {
if (ingestionConfig.getBoolean(INGESTION_IS_CONTINUOUS)) {
LOG.info("Ingestion service starts running in continuous mode");
start(this::onIngestionCompletes);
try {
waitForShutdown();
} catch (Exception e) {
throw new HoodieIngestionException("Ingestion service was shut down with exception.", e);
}
LOG.info("Ingestion service (continuous mode) has been shut down.");
} else {
LOG.info("Ingestion service starts running in run-once mode");
ingestOnce();
LOG.info("Ingestion service (run-once mode) has been shut down.");
}
}
| 3.26 |
hudi_HoodieIngestionService_onIngestionCompletes_rdh
|
/**
* A callback method to be invoked after ingestion completes.
* <p>
* For continuous mode, this is invoked once after exiting the ingestion loop.
*/
protected boolean onIngestionCompletes(boolean hasError) {
return true;
}
| 3.26 |
hudi_HoodieIngestionService_requestShutdownIfNeeded_rdh
|
/**
* To determine if shutdown should be requested to allow gracefully terminate the ingestion in continuous mode.
* <p>
* Subclasses should implement the logic to make the decision. If the shutdown condition is met, the implementation
* should call {@link #shutdown(boolean)} to indicate the request.
*
* @see PostWriteTerminationStrategy
*/
protected boolean requestShutdownIfNeeded(Option<HoodieData<WriteStatus>> lastWriteStatus) {
return false;
}
| 3.26 |
hudi_HoodieIngestionService_startService_rdh
|
/**
* The main loop for running ingestion in continuous mode.
*/
@Overrideprotected Pair<CompletableFuture, ExecutorService> startService() {
ExecutorService executor = Executors.newFixedThreadPool(1);
return Pair.of(CompletableFuture.supplyAsync(() -> {
try {
while (!isShutdownRequested()) {
long ingestionStartEpochMillis
= System.currentTimeMillis();
ingestOnce();
boolean requested = requestShutdownIfNeeded(Option.empty());
if (!requested) {
sleepBeforeNextIngestion(ingestionStartEpochMillis);
}
}
} finally {
executor.shutdownNow();
}
return true;
}, executor), executor);
}
| 3.26 |
hudi_WriteMetadataEvent_builder_rdh
|
/**
* Returns the builder for {@link WriteMetadataEvent}.
*/
public static Builder builder() {
return new Builder();}
| 3.26 |
hudi_WriteMetadataEvent_mergeWith_rdh
|
/**
* Merges this event with given {@link WriteMetadataEvent} {@code other}.
*
* @param other
* The event to be merged
*/
public void mergeWith(WriteMetadataEvent other) {
ValidationUtils.checkArgument(this.taskID == other.taskID);
// the instant time could be monotonically increasing
this.instantTime = other.instantTime;
this.lastBatch |= other.lastBatch;// true if one of the event lastBatch is true
List<WriteStatus> statusList = new ArrayList<>();
statusList.addAll(this.writeStatuses);
statusList.addAll(other.writeStatuses);
this.writeStatuses = statusList;
}
| 3.26 |
hudi_WriteMetadataEvent_isReady_rdh
|
/**
* Returns whether the event is ready to commit.
*/
public boolean
isReady(String currentInstant) {
return lastBatch && this.instantTime.equals(currentInstant);
}
| 3.26 |
hudi_WriteMetadataEvent_emptyBootstrap_rdh
|
// -------------------------------------------------------------------------
// Utilities
// -------------------------------------------------------------------------
/**
* Creates empty bootstrap event for task {@code taskId}.
*
* <p>The event indicates that the new instant can start directly,
* there is no old instant write statuses to recover.
*/
public static WriteMetadataEvent emptyBootstrap(int taskId) {
return WriteMetadataEvent.builder().taskID(taskId).m0(BOOTSTRAP_INSTANT).writeStatus(Collections.emptyList()).bootstrap(true).build();
}
| 3.26 |
hudi_HoodieAvroUtils_needsRewriteToString_rdh
|
/**
* Helper for recordNeedsRewriteForExtendedAvroSchemaEvolution. Returns true if schema type is
* int, long, float, double, or bytes because avro doesn't support evolution from those types to
* string so some intervention is needed
*/
private static boolean needsRewriteToString(Schema schema) {
switch (schema.getType()) {
case INT :
case LONG :
case FLOAT :
case DOUBLE :
case BYTES :
return true;
default :
return false;
}
}
| 3.26 |
hudi_HoodieAvroUtils_getRecordKeyPartitionPathSchema_rdh
|
/**
* Fetch schema for record key and partition path.
*/
public static Schema getRecordKeyPartitionPathSchema() {
List<Schema.Field> toBeAddedFields = new ArrayList<>();
Schema recordSchema = Schema.createRecord("HoodieRecordKey", "", "", false);
Schema.Field recordKeyField = new Schema.Field(HoodieRecord.RECORD_KEY_METADATA_FIELD, METADATA_FIELD_SCHEMA, "", JsonProperties.NULL_VALUE);
Schema.Field v30 = new Schema.Field(HoodieRecord.PARTITION_PATH_METADATA_FIELD, METADATA_FIELD_SCHEMA, "", JsonProperties.NULL_VALUE);
toBeAddedFields.add(recordKeyField);
toBeAddedFields.add(v30);
recordSchema.setFields(toBeAddedFields);
return recordSchema;
}
| 3.26 |
hudi_HoodieAvroUtils_wrapValueIntoAvro_rdh
|
/**
* Wraps a value into Avro type wrapper.
*
* @param value
* Java value.
* @return A wrapped value with Avro type wrapper.
*/
public static Object wrapValueIntoAvro(Comparable<?> value) {
if (value == null) {
return null;} else if ((value instanceof Date) || (value instanceof LocalDate)) {
// NOTE: Due to breaking changes in code-gen b/w Avro 1.8.2 and 1.10, we can't
// rely on logical types to do proper encoding of the native Java types,
// and hereby have to encode value manually
LocalDate localDate = (value instanceof LocalDate) ? ((LocalDate) (value)) :
((Date) (value)).toLocalDate();
return DateWrapper.newBuilder(DATE_WRAPPER_BUILDER_STUB.get()).setValue(((int) (localDate.toEpochDay()))).build();
} else if (value instanceof BigDecimal) {
Schema valueSchema = DecimalWrapper.SCHEMA$.getField("value").schema();
BigDecimal upcastDecimal = tryUpcastDecimal(((BigDecimal) (value)), ((LogicalTypes.Decimal) (valueSchema.getLogicalType())));
return DecimalWrapper.newBuilder(DECIMAL_WRAPPER_BUILDER_STUB.get()).setValue(AVRO_DECIMAL_CONVERSION.toBytes(upcastDecimal, valueSchema, valueSchema.getLogicalType())).build();
} else if (value instanceof Timestamp) {
// NOTE: Due to breaking changes in code-gen b/w Avro 1.8.2 and 1.10, we can't
// rely on logical types to do proper encoding of the native Java types,
// and hereby have to encode value manually
Instant instant = ((Timestamp) (value)).toInstant();
return TimestampMicrosWrapper.newBuilder(TIMESTAMP_MICROS_WRAPPER_BUILDER_STUB.get()).setValue(instantToMicros(instant)).build();
} else if (value instanceof Boolean) {
return BooleanWrapper.newBuilder(BOOLEAN_WRAPPER_BUILDER_STUB.get()).setValue(((Boolean) (value))).build();
} else if (value instanceof Integer) {
return IntWrapper.newBuilder(INT_WRAPPER_BUILDER_STUB.get()).setValue(((Integer) (value))).build();
} else if (value instanceof Long) {
return LongWrapper.newBuilder(LONG_WRAPPER_BUILDER_STUB.get()).setValue(((Long) (value))).build();
} else if (value instanceof Float) {return FloatWrapper.newBuilder(FLOAT_WRAPPER_BUILDER_STUB.get()).setValue(((Float) (value))).build();
} else if (value instanceof Double) {
return DoubleWrapper.newBuilder(DOUBLE_WRAPPER_BUILDER_STUB.get()).setValue(((Double) (value))).build();
} else if (value instanceof ByteBuffer) {
return BytesWrapper.newBuilder(BYTES_WRAPPER_BUILDER_STUB.get()).setValue(((ByteBuffer) (value))).build();
} else if
((value instanceof String) || (value instanceof Utf8)) {
return StringWrapper.newBuilder(STRING_WRAPPER_BUILDER_STUB.get()).setValue(value.toString()).build();
} else {
throw new UnsupportedOperationException(String.format("Unsupported type of the value (%s)", value.getClass()));
}
}
| 3.26 |
hudi_HoodieAvroUtils_jsonBytesToAvro_rdh
|
/**
* Convert json bytes back into avro record.
*/
public static GenericRecord jsonBytesToAvro(byte[] bytes, Schema schema) throws IOException {ByteArrayInputStream bio = new ByteArrayInputStream(bytes);
JsonDecoder jsonDecoder = DecoderFactory.get().jsonDecoder(schema, bio);
GenericDatumReader<GenericRecord> reader = new GenericDatumReader<>(schema);
return reader.read(null, jsonDecoder);
}
| 3.26 |
hudi_HoodieAvroUtils_getNestedFieldValAsString_rdh
|
/**
* Obtain value of the provided field as string, denoted by dot notation. e.g: a.b.c
*/
public static String getNestedFieldValAsString(GenericRecord record, String fieldName, boolean returnNullIfNotFound, boolean consistentLogicalTimestampEnabled) {
Object obj = getNestedFieldVal(record, fieldName, returnNullIfNotFound, consistentLogicalTimestampEnabled);
return StringUtils.objToString(obj);
}
| 3.26 |
hudi_HoodieAvroUtils_generateProjectionSchema_rdh
|
/**
* Generate a reader schema off the provided writeSchema, to just project out the provided columns.
*/
public static Schema generateProjectionSchema(Schema originalSchema, List<String> fieldNames) {
Map<String, Field> schemaFieldsMap = originalSchema.getFields().stream().map(r -> Pair.of(r.name().toLowerCase(), r)).collect(Collectors.toMap(Pair::getLeft, Pair::getRight));
List<Schema.Field> projectedFields = new ArrayList<>();
for (String fn : fieldNames) {
Schema.Field field = schemaFieldsMap.get(fn.toLowerCase());
if (field == null) {
throw new HoodieException(((("Field " + fn) + " not found in log schema. Query cannot proceed! ") + "Derived Schema Fields: ") + new ArrayList<>(schemaFieldsMap.keySet()));
} else {
projectedFields.add(new Schema.Field(field.name(), field.schema(), field.doc(), field.defaultVal()));
}
}
Schema projectedSchema = Schema.createRecord(originalSchema.getName(), originalSchema.getDoc(), originalSchema.getNamespace(), originalSchema.isError());
projectedSchema.setFields(projectedFields);
return projectedSchema;
}
| 3.26 |
hudi_HoodieAvroUtils_toJavaDate_rdh
|
/**
* convert days to Date
* <p>
* NOTE: This method could only be used in tests
*
* @VisibleForTesting */
public static Date toJavaDate(int days) {
LocalDate date = LocalDate.ofEpochDay(days);
ZoneId defaultZoneId = ZoneId.systemDefault();
ZonedDateTime zonedDateTime
= date.atStartOfDay(defaultZoneId);
return new Date(zonedDateTime.toInstant().toEpochMilli());
}
| 3.26 |
hudi_HoodieAvroUtils_convertValueForSpecificDataTypes_rdh
|
/**
* This method converts values for fields with certain Avro/Parquet data types that require special handling.
*
* @param fieldSchema
* avro field schema
* @param fieldValue
* avro field value
* @return field value either converted (for certain data types) or as it is.
*/
public static Object convertValueForSpecificDataTypes(Schema fieldSchema, Object fieldValue, boolean consistentLogicalTimestampEnabled) {
if (fieldSchema == null) {
return fieldValue;
} else if (fieldValue == null) {
checkState(isNullable(fieldSchema));
return null;
}
return convertValueForAvroLogicalTypes(resolveNullableSchema(fieldSchema), fieldValue,
consistentLogicalTimestampEnabled);
}
| 3.26 |
hudi_HoodieAvroUtils_rewriteRecord_rdh
|
/**
* Given an Avro record with a given schema, rewrites it into the new schema while setting fields only from the new
* schema.
* <p>
* NOTE: This method is rewriting every record's field that is record itself recursively. It's
* caller's responsibility to make sure that no unnecessary re-writing occurs (by preemptively
* checking whether the record does require re-writing to adhere to the new schema)
* <p>
* NOTE: Here, the assumption is that you cannot go from an evolved schema (schema with (N) fields)
* to an older schema (schema with (N-1) fields). All fields present in the older record schema MUST be present in the
* new schema and the default/existing values are carried over.
* <p>
* This particular method does the following:
* <ol>
* <li>Create a new empty GenericRecord with the new schema.</li>
* <li>For GenericRecord, copy over the data from the old schema to the new schema or set default values for all
* fields of this transformed schema</li>
* <li>For SpecificRecord, hoodie_metadata_fields have a special treatment (see below)</li>
* </ol>
* <p>
* For SpecificRecord we ignore Hudi Metadata fields, because for code generated
* avro classes (HoodieMetadataRecord), the avro record is a SpecificBaseRecord type instead of a GenericRecord.
* SpecificBaseRecord throws null pointer exception for record.get(name) if name is not present in the schema of the
* record (which happens when converting a SpecificBaseRecord without hoodie_metadata_fields to a new record with it).
* In this case, we do NOT set the defaults for the hoodie_metadata_fields explicitly, instead, the new record assumes
* the default defined in the avro schema itself.
* TODO: See if we can always pass GenericRecord instead of SpecificBaseRecord in some cases.
*/
public static GenericRecord rewriteRecord(GenericRecord oldRecord, Schema newSchema) {
GenericRecord newRecord
= new GenericData.Record(newSchema);
boolean isSpecificRecord
= oldRecord instanceof SpecificRecordBase;
for (Schema.Field f : newSchema.getFields()) {
if (!(isSpecificRecord && isMetadataField(f.name()))) {
copyOldValueOrSetDefault(oldRecord,
newRecord, f);
}
}
return newRecord;
}
| 3.26 |
hudi_HoodieAvroUtils_recordToBytes_rdh
|
/**
* TODO serialize other type of record.
*/
public static Option<byte[]> recordToBytes(HoodieRecord record, Schema schema) throws IOException {return
Option.of(HoodieAvroUtils.indexedRecordToBytes(record.toIndexedRecord(schema, new
Properties()).get().getData()));
}
| 3.26 |
hudi_HoodieAvroUtils_avroToJson_rdh
|
/**
* Convert a given avro record to json and return the encoded bytes.
*
* @param record
* The GenericRecord to convert
* @param pretty
* Whether to pretty-print the json output
*/
public static byte[] avroToJson(GenericRecord record, boolean pretty) throws IOException {
DatumWriter<Object> writer
= new GenericDatumWriter<>(record.getSchema());
ByteArrayOutputStream out = new ByteArrayOutputStream();
JsonEncoder jsonEncoder = EncoderFactory.get().jsonEncoder(record.getSchema(), out, pretty);
writer.write(record, jsonEncoder);
jsonEncoder.flush();
return out.toByteArray();
}
| 3.26 |
hudi_HoodieAvroUtils_avroToBytes_rdh
|
/**
* Convert a given avro record to bytes.
*/
public static byte[] avroToBytes(GenericRecord record) {
return indexedRecordToBytes(record);
}
| 3.26 |
hudi_HoodieAvroUtils_rewriteEvolutionRecordWithMetadata_rdh
|
// TODO Unify the logical of rewriteRecordWithMetadata and rewriteEvolutionRecordWithMetadata, and delete this function.
public static GenericRecord rewriteEvolutionRecordWithMetadata(GenericRecord genericRecord, Schema newSchema, String fileName) {
GenericRecord newRecord = HoodieAvroUtils.rewriteRecordWithNewSchema(genericRecord, newSchema, new HashMap<>());
// do not preserve FILENAME_METADATA_FIELD
newRecord.put(HoodieRecord.FILENAME_META_FIELD_ORD, fileName);
return newRecord;
}
| 3.26 |
hudi_HoodieAvroUtils_bytesToAvro_rdh
|
/**
* Convert serialized bytes back into avro record.
*/
public static GenericRecord bytesToAvro(byte[] bytes, Schema writerSchema, Schema readerSchema) throws IOException {
BinaryDecoder decoder = DecoderFactory.get().binaryDecoder(bytes, BINARY_DECODER.get());
BINARY_DECODER.set(decoder);
GenericDatumReader<GenericRecord> reader = new GenericDatumReader<>(writerSchema, readerSchema);
return reader.read(null, decoder);
}
| 3.26 |
hudi_HoodieAvroUtils_fromJavaDate_rdh
|
/**
* convert Date to days
* <p>
* NOTE: This method could only be used in tests
*
* @VisibleForTesting */
public static int fromJavaDate(Date date) {
long millisUtc = date.getTime();
long millisLocal = millisUtc + TimeZone.getDefault().getOffset(millisUtc);
int julianDays = Math.toIntExact(Math.floorDiv(millisLocal, MILLIS_PER_DAY));
return julianDays;
}
| 3.26 |
hudi_HoodieAvroUtils_addMetadataFields_rdh
|
/**
* Adds the Hoodie metadata fields to the given schema.
*
* @param schema
* The schema
* @param withOperationField
* Whether to include the '_hoodie_operation' field
*/
public static Schema addMetadataFields(Schema schema,
boolean withOperationField) {
int newFieldsSize = HoodieRecord.HOODIE_META_COLUMNS.size() + (withOperationField ? 1 : 0);
List<Schema.Field> parentFields = new ArrayList<>(schema.getFields().size() + newFieldsSize);
Schema.Field commitTimeField = new Schema.Field(HoodieRecord.COMMIT_TIME_METADATA_FIELD, METADATA_FIELD_SCHEMA, "", JsonProperties.NULL_VALUE);
Schema.Field commitSeqnoField = new Schema.Field(HoodieRecord.COMMIT_SEQNO_METADATA_FIELD, METADATA_FIELD_SCHEMA, "", JsonProperties.NULL_VALUE);
Schema.Field recordKeyField = new Schema.Field(HoodieRecord.RECORD_KEY_METADATA_FIELD, METADATA_FIELD_SCHEMA, "", JsonProperties.NULL_VALUE);
Schema.Field partitionPathField = new Schema.Field(HoodieRecord.PARTITION_PATH_METADATA_FIELD, METADATA_FIELD_SCHEMA, "", JsonProperties.NULL_VALUE);
Schema.Field v17 =
new Schema.Field(HoodieRecord.FILENAME_METADATA_FIELD, METADATA_FIELD_SCHEMA, "", JsonProperties.NULL_VALUE);parentFields.add(commitTimeField);
parentFields.add(commitSeqnoField);
parentFields.add(recordKeyField);
parentFields.add(partitionPathField);
parentFields.add(v17);
if (withOperationField) {final Schema.Field operationField = new Schema.Field(HoodieRecord.OPERATION_METADATA_FIELD, METADATA_FIELD_SCHEMA, "", JsonProperties.NULL_VALUE);
parentFields.add(operationField);
}
for (Schema.Field
field : schema.getFields()) {
if (!isMetadataField(field.name())) {Schema.Field newField = new Schema.Field(field.name(), field.schema(), field.doc(), field.defaultVal());
for (Map.Entry<String, Object> prop : field.getObjectProps().entrySet()) {
newField.addProp(prop.getKey(), prop.getValue());
}
parentFields.add(newField);
}
}
Schema mergedSchema = Schema.createRecord(schema.getName(), schema.getDoc(), schema.getNamespace(), false);
mergedSchema.setFields(parentFields);
return mergedSchema;
}
| 3.26 |
hudi_HoodieAvroUtils_getNestedFieldVal_rdh
|
/**
* Obtain value of the provided field, denoted by dot notation. e.g: a.b.c
*/
public static Object getNestedFieldVal(GenericRecord record, String fieldName, boolean returnNullIfNotFound, boolean consistentLogicalTimestampEnabled) {
String[] parts = fieldName.split("\\.");
GenericRecord valueNode = record;
for (int i = 0; i < parts.length; i++) {
String part = parts[i];
Object val;
try {
val = HoodieAvroUtils.getFieldVal(valueNode, part, returnNullIfNotFound);
} catch (AvroRuntimeException e) {
if (returnNullIfNotFound) {
return null;
} else {
throw new HoodieException((((fieldName + "(Part -") + parts[i]) + ") field not found in record. Acceptable fields were :")
+ valueNode.getSchema().getFields().stream().map(Field::name).collect(Collectors.toList()));
}
}
if (i == (parts.length - 1)) {
// return, if last part of name
if (val == null) {
return null;
} else {
Schema fieldSchema = valueNode.getSchema().getField(part).schema();
return convertValueForSpecificDataTypes(fieldSchema, val, consistentLogicalTimestampEnabled);
}
} else if (!(val instanceof GenericRecord)) {
if (returnNullIfNotFound) {
return null;
} else {
throw new HoodieException("Cannot find a record at part value :" + part);
}
} else {
valueNode = ((GenericRecord) (val));
}}
// This can only be reached if the length of parts is 0
if (returnNullIfNotFound) {return null;} else {
throw new HoodieException((fieldName + " field not found in record. Acceptable fields were :") + valueNode.getSchema().getFields().stream().map(Field::name).collect(Collectors.toList()));
}
}
| 3.26 |
hudi_HoodieAvroUtils_recordNeedsRewriteForExtendedAvroTypePromotion_rdh
|
/**
* Avro does not support type promotion from numbers to string. This function returns true if
* it will be necessary to rewrite the record to support this promotion.
* NOTE: this does not determine whether the writerSchema and readerSchema are compatible.
* It is just trying to find if the reader expects a number to be promoted to string, as quick as possible.
*/
public static boolean recordNeedsRewriteForExtendedAvroTypePromotion(Schema writerSchema, Schema readerSchema) {
if (writerSchema.equals(readerSchema)) {
return false;
}
switch (readerSchema.getType()) {
case RECORD :
Map<String, Schema.Field> v112 = new HashMap<>();
for (Schema.Field field
: writerSchema.getFields()) {
v112.put(field.name(), field);
}
for (Schema.Field field : readerSchema.getFields()) {
if (v112.containsKey(field.name())) {
if (recordNeedsRewriteForExtendedAvroTypePromotion(v112.get(field.name()).schema(), field.schema())) {
return true;
}
}
}
return false;
case ARRAY :
if (writerSchema.getType().equals(ARRAY)) {
return recordNeedsRewriteForExtendedAvroTypePromotion(writerSchema.getElementType(), readerSchema.getElementType());
}
return false;
case MAP :
if (writerSchema.getType().equals(MAP)) {
return recordNeedsRewriteForExtendedAvroTypePromotion(writerSchema.getValueType(), readerSchema.getValueType());
}
return false;
case UNION :
return recordNeedsRewriteForExtendedAvroTypePromotion(getActualSchemaFromUnion(writerSchema, null), getActualSchemaFromUnion(readerSchema, null));
case ENUM :
case STRING :
case BYTES :
return needsRewriteToString(writerSchema);
default :
return false;}
}
| 3.26 |
hudi_HoodieAvroUtils_getNestedFieldSchemaFromRecord_rdh
|
/**
* Get schema for the given field and record. Field can be nested, denoted by dot notation. e.g: a.b.c
*
* @param record
* - record containing the value of the given field
* @param fieldName
* - name of the field
* @return */ public static Schema getNestedFieldSchemaFromRecord(GenericRecord record, String fieldName) {
String[] parts = fieldName.split("\\.");
GenericRecord
valueNode = record;
int i = 0;
for (; i < parts.length; i++) {
String part = parts[i];
Object val = valueNode.get(part);
if (i == (parts.length - 1)) {
return resolveNullableSchema(valueNode.getSchema().getField(part).schema());
} else {
if (!(val instanceof GenericRecord)) {
throw new HoodieException("Cannot find a record at part value :" + part);
}
valueNode = ((GenericRecord) (val));
}
}
throw new HoodieException("Failed to get schema. Not a valid field name: " + fieldName);
}
| 3.26 |
hudi_HoodieAvroUtils_getNestedFieldSchemaFromWriteSchema_rdh
|
/**
* Get schema for the given field and write schema. Field can be nested, denoted by dot notation. e.g: a.b.c
* Use this method when record is not available. Otherwise, prefer to use {@link #getNestedFieldSchemaFromRecord(GenericRecord, String)}
*
* @param writeSchema
* - write schema of the record
* @param fieldName
* - name of the field
* @return */
public static Schema getNestedFieldSchemaFromWriteSchema(Schema writeSchema, String fieldName) {
String[] parts = fieldName.split("\\.");
int v70 = 0;
for (; v70 < parts.length; v70++) {
String part = parts[v70];
Schema v72 =
writeSchema.getField(part).schema();
if (v70 == (parts.length - 1)) {
return resolveNullableSchema(v72);
}
}
throw new HoodieException("Failed to get schema. Not a valid field name: " + fieldName);
}
/**
* Returns the string value of the given record {@code rec} and field {@code fieldName}
| 3.26 |
hudi_HoodieAvroUtils_getRootLevelFieldName_rdh
|
/**
* Obtain the root-level field name of a full field name, possibly a nested field.
* For example, given "a.b.c", the output is "a"; given "a", the output is "a".
*
* @param fieldName
* The field name.
* @return Root-level field name
*/
public static String getRootLevelFieldName(String fieldName)
{
return fieldName.split("\\.")[0];
}
| 3.26 |
hudi_HoodieAvroUtils_addCommitMetadataToRecord_rdh
|
/**
* Adds the Hoodie commit metadata into the provided Generic Record.
*/
public static GenericRecord addCommitMetadataToRecord(GenericRecord record, String instantTime, String commitSeqno) {
record.put(HoodieRecord.COMMIT_TIME_METADATA_FIELD, instantTime);
record.put(HoodieRecord.COMMIT_SEQNO_METADATA_FIELD, commitSeqno);
return record;
}
| 3.26 |
hudi_HoodieAvroUtils_rewriteRecords_rdh
|
/**
* Converts list of {@link GenericRecord} provided into the {@link GenericRecord} adhering to the
* provided {@code newSchema}.
* <p>
* To better understand conversion rules please check {@link #rewriteRecord(GenericRecord, Schema)}
*/
public static List<GenericRecord> rewriteRecords(List<GenericRecord> records, Schema newSchema) {
return records.stream().map(r -> rewriteRecord(r, newSchema)).collect(Collectors.toList());
}
| 3.26 |
hudi_HoodieAvroUtils_getRecordColumnValues_rdh
|
/**
* Gets record column values into one object.
*
* @param record
* Hoodie record.
* @param columns
* Names of the columns to get values.
* @param schema
* {@link SerializableSchema} instance.
* @return Column value if a single column, or concatenated String values by comma.
*/
public static Object getRecordColumnValues(HoodieRecord record, String[] columns, SerializableSchema schema, boolean consistentLogicalTimestampEnabled) {
return getRecordColumnValues(record, columns, schema.get(), consistentLogicalTimestampEnabled);
}
| 3.26 |
hudi_HoodieAvroUtils_getFieldVal_rdh
|
/**
* Obtain value of the provided key, when set returnNullIfNotFound false,
* it is consistent with avro after 1.10
*/
public static Object getFieldVal(GenericRecord record, String key, boolean returnNullIfNotFound) {
Schema.Field v56 = record.getSchema().getField(key);
if (v56 == null) {
if (returnNullIfNotFound) {
return null;
} else {
// Since avro 1.10, arvo will throw AvroRuntimeException("Not a valid schema field: " + key)
// rather than return null like the previous version if record doesn't contain this key.
// Here we simulate this behavior.
throw new AvroRuntimeException("Not a valid schema field: " + key);
}
} else {
return record.get(v56.pos());
}
}
| 3.26 |
hudi_HoodieAvroUtils_unwrapAvroValueWrapper_rdh
|
/**
* Unwraps Avro value wrapper into Java value.
*
* @param avroValueWrapper
* A wrapped value with Avro type wrapper.
* @return Java value.
*/public static Comparable<?> unwrapAvroValueWrapper(Object avroValueWrapper) {
if (avroValueWrapper == null) {
return null;
} else if (avroValueWrapper
instanceof DateWrapper) {
return LocalDate.ofEpochDay(((DateWrapper) (avroValueWrapper)).getValue());
} else if (avroValueWrapper instanceof DecimalWrapper) {
Schema valueSchema = DecimalWrapper.SCHEMA$.getField("value").schema();
return AVRO_DECIMAL_CONVERSION.fromBytes(((DecimalWrapper) (avroValueWrapper)).getValue(), valueSchema, valueSchema.getLogicalType());
}
else if (avroValueWrapper instanceof TimestampMicrosWrapper) {
return microsToInstant(((TimestampMicrosWrapper) (avroValueWrapper)).getValue());
} else if (avroValueWrapper instanceof BooleanWrapper) {
return ((BooleanWrapper) (avroValueWrapper)).getValue();
} else if (avroValueWrapper instanceof IntWrapper) {
return ((IntWrapper) (avroValueWrapper)).getValue();
} else if (avroValueWrapper instanceof LongWrapper) {
return ((LongWrapper) (avroValueWrapper)).getValue();
} else if (avroValueWrapper instanceof FloatWrapper) {
return ((FloatWrapper) (avroValueWrapper)).getValue();
} else if (avroValueWrapper instanceof DoubleWrapper) {
return ((DoubleWrapper) (avroValueWrapper)).getValue();
} else if (avroValueWrapper instanceof BytesWrapper)
{
return ((BytesWrapper) (avroValueWrapper)).getValue();
} else if (avroValueWrapper instanceof StringWrapper) {
return ((StringWrapper) (avroValueWrapper)).getValue();
} else if (avroValueWrapper instanceof GenericRecord) {
// NOTE: This branch could be hit b/c Avro records could be reconstructed
// as {@code GenericRecord)
// TODO add logical type decoding
GenericRecord v128 = ((GenericRecord) (avroValueWrapper));
return ((Comparable<?>) (v128.get("value")));
} else {
throw new UnsupportedOperationException(String.format("Unsupported type of the value (%s)", avroValueWrapper.getClass()));
}
}
}
| 3.26 |
hudi_HoodieAvroUtils_removeFields_rdh
|
/**
* Given an Avro record and list of columns to remove, this method removes the list of columns from
* the given avro record using rewriteRecord method.
* <p>
* To better understand how it removes please check {@link #rewriteRecord(GenericRecord, Schema)}
*/
public static GenericRecord removeFields(GenericRecord record, Set<String> fieldsToRemove) {
Schema newSchema = removeFields(record.getSchema(), fieldsToRemove);
return rewriteRecord(record, newSchema);
}
| 3.26 |
hudi_HoodieAvroUtils_rewriteRecordWithNewSchema_rdh
|
/**
* Given avro records, rewrites them with new schema.
*
* @param oldRecords
* oldRecords to be rewritten
* @param newSchema
* newSchema used to rewrite oldRecord
* @param renameCols
* a map store all rename cols, (k, v)-> (colNameFromNewSchema, colNameFromOldSchema)
* @return a iterator of rewritten GenericRecords
*/
public static Iterator<GenericRecord> rewriteRecordWithNewSchema(Iterator<GenericRecord> oldRecords, Schema newSchema, Map<String, String> renameCols, boolean validate) {
if ((oldRecords == null) || (newSchema == null)) {
return Collections.emptyIterator();}
return new Iterator<GenericRecord>() {
@Override
public boolean hasNext() {
return oldRecords.hasNext();
}
@Override
public GenericRecord next() {
return rewriteRecordWithNewSchema(oldRecords.next(), newSchema, renameCols, validate);
}
};
}
| 3.26 |
hudi_HoodieAvroUtils_convertValueForAvroLogicalTypes_rdh
|
/**
* This method converts values for fields with certain Avro Logical data types that require special handling.
* <p>
* Logical Date Type is converted to actual Date value instead of Epoch Integer which is how it is
* represented/stored in parquet.
* <p>
* Decimal Data Type is converted to actual decimal value instead of bytes/fixed which is how it is
* represented/stored in parquet.
*
* @param fieldSchema
* avro field schema
* @param fieldValue
* avro field value
* @return field value either converted (for certain data types) or as it is.
*/
private static Object convertValueForAvroLogicalTypes(Schema fieldSchema,
Object fieldValue, boolean consistentLogicalTimestampEnabled) {
if
(fieldSchema.getLogicalType() == LogicalTypes.date()) {
return LocalDate.ofEpochDay(Long.parseLong(fieldValue.toString()));
} else if ((fieldSchema.getLogicalType() == LogicalTypes.timestampMillis()) && consistentLogicalTimestampEnabled) {
return new Timestamp(Long.parseLong(fieldValue.toString()));
} else if ((fieldSchema.getLogicalType() == LogicalTypes.timestampMicros()) && consistentLogicalTimestampEnabled) {
return new Timestamp(Long.parseLong(fieldValue.toString()) /
1000);
} else if (fieldSchema.getLogicalType() instanceof LogicalTypes.Decimal) {
Decimal dc = ((Decimal) (fieldSchema.getLogicalType()));
DecimalConversion decimalConversion = new DecimalConversion();
if (fieldSchema.getType() == Type.FIXED) {
return decimalConversion.fromFixed(((GenericFixed) (fieldValue)), fieldSchema, LogicalTypes.decimal(dc.getPrecision(), dc.getScale()));
} else if (fieldSchema.getType() == Type.BYTES) {
ByteBuffer byteBuffer = ((ByteBuffer) (fieldValue));
BigDecimal convertedValue =
decimalConversion.fromBytes(byteBuffer, fieldSchema, LogicalTypes.decimal(dc.getPrecision(), dc.getScale()));
byteBuffer.rewind();
return convertedValue;
}
}
return fieldValue; }
| 3.26 |
hudi_HoodieAvroUtils_sanitizeName_rdh
|
/**
* Sanitizes Name according to Avro rule for names.
* Removes characters other than the ones mentioned in https://avro.apache.org/docs/current/spec.html#names .
*
* @param name
* input name
* @param invalidCharMask
* replacement for invalid characters.
* @return sanitized name
*/
public static String sanitizeName(String name, String invalidCharMask) {
if
(INVALID_AVRO_FIRST_CHAR_IN_NAMES_PATTERN.matcher(name.substring(0, 1)).matches()) {name = INVALID_AVRO_FIRST_CHAR_IN_NAMES_PATTERN.matcher(name).replaceFirst(invalidCharMask);
}
return INVALID_AVRO_CHARS_IN_NAMES_PATTERN.matcher(name).replaceAll(invalidCharMask);
}
| 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.