name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hudi_BaseHoodieTableServiceClient_rollbackFailedWrites_rdh
|
/**
* Rollback all failed writes.
*
* @return true if rollback was triggered. false otherwise.
*/
protected Boolean rollbackFailedWrites() {
HoodieTable table = createTable(config, hadoopConf);
List<String> instantsToRollback = getInstantsToRollback(table.getMetaClient(), config.getFailedWritesCleanPolicy(), Option.empty());
Map<String, Option<HoodiePendingRollbackInfo>> pendingRollbacks = getPendingRollbackInfos(table.getMetaClient());
instantsToRollback.forEach(entry -> pendingRollbacks.putIfAbsent(entry, Option.empty()));
rollbackFailedWrites(pendingRollbacks);
return !pendingRollbacks.isEmpty();
}
| 3.26 |
hudi_BaseHoodieTableServiceClient_scheduleLogCompaction_rdh
|
/**
* Schedules a new log compaction instant.
*
* @param extraMetadata
* Extra Metadata to be stored
*/
public Option<String> scheduleLogCompaction(Option<Map<String, String>> extraMetadata) throws HoodieIOException {
String instantTime = createNewInstantTime();
return scheduleLogCompactionAtInstant(instantTime, extraMetadata) ? Option.of(instantTime) : Option.empty();
}
| 3.26 |
hudi_BaseHoodieTableServiceClient_compact_rdh
|
/**
* Ensures compaction instant is in expected state and performs Compaction for the workload stored in instant-time.
*
* @param compactionInstantTime
* Compaction Instant Time
* @return Collection of Write Status
*/
protected HoodieWriteMetadata<O> compact(String compactionInstantTime, boolean shouldComplete) {
HoodieTable<?, I, ?, T> table = createTable(config, context.getHadoopConf().get());
HoodieTimeline pendingCompactionTimeline = table.getActiveTimeline().filterPendingCompactionTimeline();
HoodieInstant inflightInstant = HoodieTimeline.getCompactionInflightInstant(compactionInstantTime);
if (pendingCompactionTimeline.containsInstant(inflightInstant)) {
table.rollbackInflightCompaction(inflightInstant, commitToRollback -> getPendingRollbackInfo(table.getMetaClient(), commitToRollback, false));
table.getMetaClient().reloadActiveTimeline();
}
compactionTimer = metrics.getCompactionCtx();
HoodieWriteMetadata<T> writeMetadata = table.compact(context, compactionInstantTime);
HoodieWriteMetadata<O> compactionMetadata = convertToOutputMetadata(writeMetadata);
if (shouldComplete && compactionMetadata.getCommitMetadata().isPresent()) {
completeCompaction(compactionMetadata.getCommitMetadata().get(), table, compactionInstantTime);
}
return compactionMetadata;
}
| 3.26 |
hudi_BaseHoodieTableServiceClient_scheduleLogCompactionAtInstant_rdh
|
/**
* Schedules a new log compaction instant with passed-in instant time.
*
* @param instantTime
* Log Compaction Instant Time
* @param extraMetadata
* Extra Metadata to be stored
*/
public boolean scheduleLogCompactionAtInstant(String instantTime, Option<Map<String, String>> extraMetadata) throws HoodieIOException {
return scheduleTableService(instantTime, extraMetadata, TableServiceType.LOG_COMPACT).isPresent();
}
| 3.26 |
hudi_BaseHoodieTableServiceClient_writeTableMetadata_rdh
|
/**
* Write the HoodieCommitMetadata to metadata table if available.
*
* @param table
* {@link HoodieTable} of interest.
* @param instantTime
* instant time of the commit.
* @param metadata
* instance of {@link HoodieCommitMetadata}.
* @param writeStatuses
* Write statuses of the commit
*/
protected void writeTableMetadata(HoodieTable table, String instantTime, HoodieCommitMetadata metadata, HoodieData<WriteStatus> writeStatuses) {
context.setJobStatus(this.getClass().getSimpleName(), "Committing to metadata table: " + config.getTableName());
Option<HoodieTableMetadataWriter> metadataWriterOpt = table.getMetadataWriter(instantTime);
if
(metadataWriterOpt.isPresent()) {
try (HoodieTableMetadataWriter metadataWriter = metadataWriterOpt.get()) {
metadataWriter.updateFromWriteStatuses(metadata, writeStatuses, instantTime);
} catch (Exception e) {
if (e instanceof HoodieException) {
throw ((HoodieException) (e));
} else {
throw new HoodieException("Failed to update metadata", e);
}
}
}
}
| 3.26 |
hudi_BaseHoodieTableServiceClient_logCompact_rdh
|
/**
* Performs Log Compaction for the workload stored in instant-time.
*
* @param logCompactionInstantTime
* Log Compaction Instant Time
* @return Collection of WriteStatus to inspect errors and counts
*/public HoodieWriteMetadata<O> logCompact(String logCompactionInstantTime) {
return logCompact(logCompactionInstantTime, config.shouldAutoCommit());
}
| 3.26 |
hudi_BaseHoodieTableServiceClient_getPendingRollbackInfos_rdh
|
/**
* Fetch map of pending commits to be rolled-back to {@link HoodiePendingRollbackInfo}.
*
* @param metaClient
* instance of {@link HoodieTableMetaClient} to use.
* @return map of pending commits to be rolled-back instants to Rollback Instant and Rollback plan Pair.
*/
protected Map<String, Option<HoodiePendingRollbackInfo>> getPendingRollbackInfos(HoodieTableMetaClient metaClient, boolean ignoreCompactionAndClusteringInstants) {
List<HoodieInstant> instants = metaClient.getActiveTimeline().filterPendingRollbackTimeline().getInstants();
Map<String, Option<HoodiePendingRollbackInfo>> infoMap = new HashMap<>();
for (HoodieInstant rollbackInstant : instants) {
HoodieRollbackPlan rollbackPlan;
try {
rollbackPlan = RollbackUtils.getRollbackPlan(metaClient, rollbackInstant);
} catch (Exception e) {
if (rollbackInstant.isRequested()) {
LOG.warn(("Fetching rollback plan failed for " + rollbackInstant) + ", deleting the plan since it's in REQUESTED state", e);
try {
metaClient.getActiveTimeline().deletePending(rollbackInstant);
} catch (HoodieIOException he) {LOG.warn("Cannot delete " + rollbackInstant, he);
continue;
}
} else {
// Here we assume that if the rollback is inflight, the rollback plan is intact
// in instant.rollback.requested. The exception here can be due to other reasons.
LOG.warn(("Fetching rollback plan failed for " + rollbackInstant) + ", skip the plan", e);
}
continue;}
try {
String action = rollbackPlan.getInstantToRollback().getAction();
String instantToRollback = rollbackPlan.getInstantToRollback().getCommitTime();
if (ignoreCompactionAndClusteringInstants) {
if (!HoodieTimeline.COMPACTION_ACTION.equals(action)) {
boolean isClustering = HoodieTimeline.REPLACE_COMMIT_ACTION.equals(action) && ClusteringUtils.getClusteringPlan(metaClient, new HoodieInstant(true, action, instantToRollback)).isPresent();if (!isClustering) {
infoMap.putIfAbsent(instantToRollback, Option.of(new HoodiePendingRollbackInfo(rollbackInstant, rollbackPlan)));
}
}
} else {
infoMap.putIfAbsent(instantToRollback, Option.of(new HoodiePendingRollbackInfo(rollbackInstant, rollbackPlan)));
}
} catch (Exception e) {
LOG.warn(("Processing rollback plan failed for " + rollbackInstant) + ", skip the plan", e);
}
}
return infoMap;
}
| 3.26 |
hudi_BaseHoodieTableServiceClient_inlineCompaction_rdh
|
/**
* Performs a compaction operation on a table, serially before or after an insert/upsert action.
* Scheduling and execution is done inline.
*/
protected Option<String> inlineCompaction(Option<Map<String, String>> extraMetadata) {
Option<String> compactionInstantTimeOpt = inlineScheduleCompaction(extraMetadata);
compactionInstantTimeOpt.ifPresent(compactInstantTime -> {
// inline compaction should auto commit as the user is never given control
compact(compactInstantTime, true);
});
return compactionInstantTimeOpt;
}
| 3.26 |
hudi_BaseHoodieTableServiceClient_rollbackFailedBootstrap_rdh
|
/**
* Main API to rollback failed bootstrap.
*/
public void rollbackFailedBootstrap() {
LOG.info("Rolling back pending bootstrap if present");
HoodieTable table = createTable(config, hadoopConf);
HoodieTimeline inflightTimeline = table.getMetaClient().getCommitsTimeline().filterPendingExcludingMajorAndMinorCompaction();
Option<String> instant = Option.fromJavaOptional(inflightTimeline.getReverseOrderedInstants().map(HoodieInstant::getTimestamp).findFirst());
if (instant.isPresent() && HoodieTimeline.compareTimestamps(instant.get(), HoodieTimeline.LESSER_THAN_OR_EQUALS, HoodieTimeline.FULL_BOOTSTRAP_INSTANT_TS))
{
LOG.info("Found pending bootstrap instants. Rolling them back");
table.rollbackBootstrap(context, createNewInstantTime());
LOG.info("Finished rolling back pending bootstrap");
}
// if bootstrap failed, lets delete metadata and restart from scratch
HoodieTableMetadataUtil.deleteMetadataTable(config.getBasePath(), context);
}
| 3.26 |
hudi_BaseHoodieTableServiceClient_scheduleClustering_rdh
|
/**
* Schedules a new clustering instant.
*
* @param extraMetadata
* Extra Metadata to be stored
*/
public Option<String> scheduleClustering(Option<Map<String, String>> extraMetadata) throws HoodieIOException {
String instantTime = createNewInstantTime();
return scheduleClusteringAtInstant(instantTime, extraMetadata) ? Option.of(instantTime) : Option.empty();
}
| 3.26 |
hudi_BaseHoodieTableServiceClient_scheduleCompactionAtInstant_rdh
|
/**
* Schedules a new compaction instant with passed-in instant time.
*
* @param instantTime
* Compaction Instant Time
* @param extraMetadata
* Extra Metadata to be stored
*/
public boolean scheduleCompactionAtInstant(String instantTime, Option<Map<String, String>> extraMetadata) throws HoodieIOException {
return scheduleTableService(instantTime, extraMetadata, TableServiceType.COMPACT).isPresent();
}
| 3.26 |
hudi_BaseHoodieTableServiceClient_rollbackFailedIndexingCommits_rdh
|
/**
* Rolls back the failed delta commits corresponding to the indexing action.
* Such delta commits are identified based on the suffix `METADATA_INDEXER_TIME_SUFFIX` ("004").
* <p>
* TODO(HUDI-5733): This should be cleaned up once the proper fix of rollbacks
* in the metadata table is landed.
*
* @return {@code true} if rollback happens; {@code false} otherwise.
*/
protected boolean rollbackFailedIndexingCommits() {
HoodieTable table = createTable(config, hadoopConf);
List<String> instantsToRollback = getFailedIndexingCommitsToRollback(table.getMetaClient());
Map<String, Option<HoodiePendingRollbackInfo>> pendingRollbacks = getPendingRollbackInfos(table.getMetaClient());
instantsToRollback.forEach(entry -> pendingRollbacks.putIfAbsent(entry, Option.empty()));
rollbackFailedWrites(pendingRollbacks);
return !pendingRollbacks.isEmpty();
}
| 3.26 |
hudi_BaseHoodieTableServiceClient_cluster_rdh
|
/**
* Ensures clustering instant is in expected state and performs clustering for the plan stored in metadata.
*
* @param clusteringInstant
* Clustering Instant Time
* @return Collection of Write Status
*/
public HoodieWriteMetadata<O> cluster(String clusteringInstant, boolean shouldComplete) {
HoodieTable<?, I, ?, T> table = createTable(config, context.getHadoopConf().get());
HoodieTimeline pendingClusteringTimeline = table.getActiveTimeline().filterPendingReplaceTimeline();
HoodieInstant inflightInstant = HoodieTimeline.getReplaceCommitInflightInstant(clusteringInstant);
if (pendingClusteringTimeline.containsInstant(inflightInstant)) {
table.rollbackInflightClustering(inflightInstant, commitToRollback -> getPendingRollbackInfo(table.getMetaClient(), commitToRollback, false));
table.getMetaClient().reloadActiveTimeline();
}
clusteringTimer = metrics.getClusteringCtx();
LOG.info("Starting clustering at " + clusteringInstant);
HoodieWriteMetadata<T> writeMetadata = table.cluster(context, clusteringInstant);
HoodieWriteMetadata<O> clusteringMetadata = convertToOutputMetadata(writeMetadata);
// Validation has to be done after cloning. if not, it could result in referencing the write status twice which means clustering could get executed twice.
validateClusteringCommit(clusteringMetadata, clusteringInstant, table);
// Publish file creation metrics for clustering.
if (config.isMetricsOn()) {
clusteringMetadata.getWriteStats().ifPresent(hoodieWriteStats -> hoodieWriteStats.stream().filter(hoodieWriteStat -> hoodieWriteStat.getRuntimeStats() != null).map(hoodieWriteStat -> hoodieWriteStat.getRuntimeStats().getTotalCreateTime()).forEach(metrics::updateClusteringFileCreationMetrics));
}
// TODO : Where is shouldComplete used ?
if (shouldComplete && clusteringMetadata.getCommitMetadata().isPresent()) {
completeClustering(((HoodieReplaceCommitMetadata) (clusteringMetadata.getCommitMetadata().get())), table, clusteringInstant, Option.ofNullable(convertToWriteStatus(writeMetadata)));
}
return
clusteringMetadata;}
| 3.26 |
hudi_BaseHoodieTableServiceClient_scheduleTableService_rdh
|
/**
* Schedule table services such as clustering, compaction & cleaning.
*
* @param extraMetadata
* Metadata to pass onto the scheduled service instant
* @param tableServiceType
* Type of table service to schedule
* @return */
public Option<String> scheduleTableService(String instantTime, Option<Map<String, String>> extraMetadata, TableServiceType tableServiceType) {
// A lock is required to guard against race conditions between an ongoing writer and scheduling a table service.
final Option<HoodieInstant> inflightInstant = Option.of(new HoodieInstant(State.REQUESTED, tableServiceType.getAction(), instantTime));
try {
this.txnManager.beginTransaction(inflightInstant, Option.empty());
LOG.info("Scheduling table service " + tableServiceType);
return scheduleTableServiceInternal(instantTime, extraMetadata, tableServiceType);
} finally {this.txnManager.endTransaction(inflightInstant);
}
}
| 3.26 |
hudi_BaseHoodieTableServiceClient_inlineScheduleClustering_rdh
|
/**
* Schedules clustering inline.
*
* @param extraMetadata
* extra metadata to use.
* @return clustering instant if scheduled.
*/
protected Option<String> inlineScheduleClustering(Option<Map<String, String>> extraMetadata) {
return scheduleClustering(extraMetadata);
}
| 3.26 |
hudi_BaseHoodieTableServiceClient_commitCompaction_rdh
|
/**
* Commit a compaction operation. Allow passing additional meta-data to be stored in commit instant file.
*
* @param compactionInstantTime
* Compaction Instant Time
* @param metadata
* All the metadata that gets stored along with a commit
* @param extraMetadata
* Extra Metadata to be stored
*/
public void commitCompaction(String compactionInstantTime, HoodieCommitMetadata metadata, Option<Map<String, String>> extraMetadata) {
extraMetadata.ifPresent(m -> m.forEach(metadata::addMetadata));
completeCompaction(metadata, createTable(config, context.getHadoopConf().get()), compactionInstantTime);}
| 3.26 |
hudi_BaseHoodieTableServiceClient_inlineClustering_rdh
|
/**
* Executes a clustering plan on a table, serially before or after an insert/upsert action.
* Schedules and executes clustering inline.
*/
protected Option<String> inlineClustering(Option<Map<String, String>> extraMetadata) {
Option<String> clusteringInstantOpt = inlineScheduleClustering(extraMetadata);
clusteringInstantOpt.ifPresent(clusteringInstant -> {
// inline cluster should auto commit as the user is never given control
cluster(clusteringInstant, true);
});
return clusteringInstantOpt;
}
| 3.26 |
hudi_BaseHoodieTableServiceClient_inlineLogCompact_rdh
|
/**
* Performs a log compaction operation on a table, serially before or after an insert/upsert action.
*/
protected Option<String> inlineLogCompact(Option<Map<String, String>> extraMetadata) {
Option<String> logCompactionInstantTimeOpt = scheduleLogCompaction(extraMetadata);
logCompactionInstantTimeOpt.ifPresent(logCompactInstantTime -> {
// inline log compaction should auto commit as the user is never given control
logCompact(logCompactInstantTime, true);
});
return logCompactionInstantTimeOpt;
}
| 3.26 |
hudi_BaseHoodieTableServiceClient_preCommit_rdh
|
/**
* Any pre-commit actions like conflict resolution goes here.
*
* @param metadata
* commit metadata for which pre commit is being invoked.
*/
protected void preCommit(HoodieCommitMetadata metadata) {
// Create a Hoodie table after startTxn which encapsulated the commits and files visible.
// Important to create this after the lock to ensure the latest commits show up in the timeline without need for reload
HoodieTable table = createTable(config, hadoopConf);
resolveWriteConflict(table, metadata, this.pendingInflightAndRequestedInstants);
}
| 3.26 |
hudi_BaseHoodieTableServiceClient_isPreCommitRequired_rdh
|
/**
* Some writers use SparkAllowUpdateStrategy and treat replacecommit plan as revocable plan.
* In those cases, their ConflictResolutionStrategy implementation should run conflict resolution
* even for clustering operations.
*
* @return boolean
*/
protected boolean isPreCommitRequired() {
return this.config.getWriteConflictResolutionStrategy().isPreCommitRequired();
}
| 3.26 |
hudi_HoodieInternalConfig_getBulkInsertIsPartitionRecordsSorted_rdh
|
/**
* Returns if partition records are sorted or not.
*
* @param propertyValue
* value for property BULKINSERT_ARE_PARTITIONER_RECORDS_SORTED.
* @return the property value.
*/
public static Boolean getBulkInsertIsPartitionRecordsSorted(String propertyValue) {
return propertyValue != null ? Boolean.parseBoolean(propertyValue)
: DEFAULT_BULKINSERT_ARE_PARTITIONER_RECORDS_SORTED;
}
| 3.26 |
hudi_HoodieColumnRangeMetadata_merge_rdh
|
/**
* Merges the given two column range metadata.
*/
public static HoodieColumnRangeMetadata<Comparable> merge(HoodieColumnRangeMetadata<Comparable> left, HoodieColumnRangeMetadata<Comparable> right) {
String filePath = left.getFilePath();
String columnName = left.getColumnName();
Comparable min = minVal(left.getMinValue(), right.getMinValue());
Comparable max = maxVal(left.getMaxValue(), right.getMaxValue());
long nullCount = left.getNullCount() + right.getNullCount();
long valueCount = left.getValueCount() + right.getValueCount();
long totalSize = left.getTotalSize() + right.getTotalSize();
long totalUncompressedSize = left.getTotalUncompressedSize() + right.getTotalUncompressedSize();
return create(filePath, columnName, min, max, nullCount, valueCount, totalSize, totalUncompressedSize);
}
| 3.26 |
hudi_InternalSchemaBuilder_refreshNewId_rdh
|
/**
* Assigns new ids for all fields in a Type, based on initial id.
*
* @param type
* a type.
* @param nextId
* initial id which used to fresh ids for all fields in a type
* @return a new type with new ids
*/
public Type refreshNewId(Type type, AtomicInteger nextId) {
switch (type.typeId()) {
case RECORD :
Types.RecordType record =
((Types.RecordType) (type));
List<Types.Field> oldFields = record.fields();
int currentId = nextId.get();
nextId.set(currentId + record.fields().size());
List<Types.Field> internalFields = new ArrayList<>();
for (int i = 0; i < oldFields.size(); i++) {
Types.Field oldField =
oldFields.get(i);
Type fieldType = refreshNewId(oldField.type(), nextId);
internalFields.add(Types.Field.get(currentId++, oldField.isOptional(), oldField.name(), fieldType, oldField.doc()));
}
return Types.RecordType.get(internalFields);
case ARRAY :
Types.ArrayType array = ((Types.ArrayType) (type));
int elementId = nextId.get();
nextId.set(elementId + 1);
Type elementType = refreshNewId(array.elementType(), nextId);
return Types.ArrayType.get(elementId, array.isElementOptional(), elementType);
case MAP :
Types.MapType map = ((Types.MapType) (type));
int v42 = nextId.get();
int valueId = v42 + 1;
nextId.set(v42 + 2);
Type keyType = refreshNewId(map.keyType(), nextId);
Type valueType = refreshNewId(map.valueType(), nextId);
return Types.MapType.get(v42, valueId, keyType, valueType, map.isValueOptional());
default :
return type;
}
}
| 3.26 |
hudi_InternalSchemaBuilder_m0_rdh
|
/**
* Build a mapping from id to field for a internal Type.
*
* @param type
* hoodie internal type
* @return a mapping from id to field
*/
public Map<Integer, Types.Field> m0(Type type) {
Map<Integer, Types.Field> idToField = new HashMap<>();
visitIdToField(type, idToField);
return idToField;
}
| 3.26 |
hudi_InternalSchemaBuilder_index2Parents_rdh
|
/**
* Build a mapping which maintain the relation between child field id and it's parent field id.
* if a child field y(which id is 9) belong to a nest field x(which id is 6), then (9 -> 6) will be added to the result map.
* if a field has no parent field, nothings will be added.
*
* @param record
* hoodie record type.
* @return a mapping from id to parentId for a record Type
*/
public Map<Integer, Integer> index2Parents(Types.RecordType record) {
Map<Integer, Integer> result = new HashMap<>();
Deque<Integer> parentIds = new LinkedList<>();
index2Parents(record, parentIds, result);
return result;
}
| 3.26 |
hudi_InternalSchemaBuilder_buildIdToName_rdh
|
/**
* Build a mapping from id to full field name for a internal Type.
* if a field y belong to a struct filed x, then the full name of y is x.y
*
* @param type
* hoodie internal type
* @return a mapping from id to full field name
*/
public Map<Integer, String> buildIdToName(Type type) {
Map<Integer, String> result = new HashMap<>();
buildNameToId(type).forEach((k, v) -> result.put(v, k));return result;
}
| 3.26 |
hudi_InternalSchemaBuilder_buildNameToId_rdh
|
/**
* Build a mapping from full field name to id for a internal Type.
* if a field y belong to a struct filed x, then the full name of y is x.y
*
* @param type
* hoodie internal type
* @return a mapping from full field name to id
*/
public Map<String, Integer> buildNameToId(Type type) {
return visit(type, new NameToIDVisitor());
}
| 3.26 |
hudi_InternalSchemaBuilder_visit_rdh
|
/**
* Use to traverse all types in internalSchema with visitor.
*
* @param schema
* hoodie internal schema
* @return visitor expected result.
*/
public <T> T visit(InternalSchema schema, InternalSchemaVisitor<T> visitor) {
return visitor.schema(schema, visit(schema.getRecord(), visitor));
}
| 3.26 |
hudi_ClusteringCommand_runClustering_rdh
|
/**
* Run clustering table service.
* <p>
* Example:
* > connect --path {path to hudi table}
* > clustering scheduleAndExecute --sparkMaster local --sparkMemory 2g
*/
@ShellMethod(key = "clustering scheduleAndExecute", value = "Run Clustering. Make a cluster plan first and execute that plan immediately")
public String runClustering(@ShellOption(value = "--sparkMaster", defaultValue = SparkUtil.DEFAULT_SPARK_MASTER, help = "Spark master")final String master,
@ShellOption(value = "--sparkMemory", help = "Spark executor memory", defaultValue = "4g")
final String sparkMemory, @ShellOption(value = "--parallelism", help = "Parallelism for hoodie clustering", defaultValue = "1") final String parallelism, @ShellOption(value = "--retry", help = "Number of retries", defaultValue = "1")
final String retry, @ShellOption(value
= "--propsFilePath", help = "path to properties file on localfs or dfs with configurations for " + "hoodie client for compacting", defaultValue = "")
final String propsFilePath, @ShellOption(value = "--hoodieConfigs", help = "Any configuration that can be set in the properties file can be " + "passed here in the form of an array", defaultValue = "")
final String[] configs) throws Exception {
HoodieTableMetaClient client = HoodieCLI.getTableMetaClient();
boolean initialized = HoodieCLI.initConf();
HoodieCLI.initFS(initialized);
String sparkPropertiesPath = Utils.getDefaultPropertiesFile(JavaConverters.mapAsScalaMapConverter(System.getenv()).asScala());
SparkLauncher sparkLauncher = SparkUtil.initLauncher(sparkPropertiesPath);
sparkLauncher.addAppArgs(SparkCommand.CLUSTERING_SCHEDULE_AND_EXECUTE.toString(), master, sparkMemory, client.getBasePath(), client.getTableConfig().getTableName(), parallelism, retry, propsFilePath);
UtilHelpers.validateAndAddProperties(configs, sparkLauncher);
Process process = sparkLauncher.launch();
InputStreamConsumer.captureOutput(process);
int exitCode = process.waitFor();
if (exitCode != 0) {
return "Failed to run clustering for scheduleAndExecute.";
}
return "Succeeded to run clustering for scheduleAndExecute";
}
| 3.26 |
hudi_ClusteringCommand_scheduleClustering_rdh
|
/**
* Schedule clustering table service.
* <p>
* Example:
* > connect --path {path to hudi table}
* > clustering schedule --sparkMaster local --sparkMemory 2g
*/
@ShellMethod(key = "clustering schedule", value = "Schedule Clustering")
public String scheduleClustering(@ShellOption(value = "--sparkMaster", defaultValue = SparkUtil.DEFAULT_SPARK_MASTER, help = "Spark master")
final String master, @ShellOption(value
= "--sparkMemory", defaultValue = "1g", help = "Spark executor memory")
final String sparkMemory, @ShellOption(value = "--propsFilePath", help = "path to properties file on localfs or dfs with configurations " + "for hoodie client for clustering", defaultValue = "")
final String propsFilePath, @ShellOption(value = "--hoodieConfigs", help = "Any configuration that can be set in the properties file can " + "be passed here in the form of an array", defaultValue = "")
final String[] configs) throws Exception {
HoodieTableMetaClient client = HoodieCLI.getTableMetaClient();
boolean initialized = HoodieCLI.initConf();
HoodieCLI.initFS(initialized);
String sparkPropertiesPath = Utils.getDefaultPropertiesFile(JavaConverters.mapAsScalaMapConverter(System.getenv()).asScala());
SparkLauncher sparkLauncher = SparkUtil.initLauncher(sparkPropertiesPath);
// First get a clustering instant time and pass it to spark launcher for scheduling clustering
String clusteringInstantTime = client.createNewInstantTime();
sparkLauncher.addAppArgs(SparkCommand.CLUSTERING_SCHEDULE.toString(), master, sparkMemory, client.getBasePath(), client.getTableConfig().getTableName(), clusteringInstantTime, propsFilePath);
UtilHelpers.validateAndAddProperties(configs, sparkLauncher);
Process process = sparkLauncher.launch();
InputStreamConsumer.captureOutput(process);
int exitCode = process.waitFor();
if (exitCode != 0) {
return "Failed to schedule clustering for " + clusteringInstantTime;
}
return "Succeeded to schedule clustering for " + clusteringInstantTime;
}
| 3.26 |
hudi_HoodieBloomIndex_tagLocationBacktoRecords_rdh
|
/**
* Tag the <rowKey, filename> back to the original HoodieRecord List.
*/
protected <R> HoodieData<HoodieRecord<R>> tagLocationBacktoRecords(HoodiePairData<HoodieKey, HoodieRecordLocation> keyFilenamePair, HoodieData<HoodieRecord<R>> records, HoodieTable hoodieTable) {
HoodiePairData<HoodieKey, HoodieRecord<R>>
keyRecordPairs = records.mapToPair(record -> new ImmutablePair<>(record.getKey(), record));
// Here as the records might have more data than keyFilenamePairs (some row keys' fileId is null),
// so we do left outer join.
return keyRecordPairs.leftOuterJoin(keyFilenamePair).values().map(v -> HoodieIndexUtils.tagAsNewRecordIfNeeded(v.getLeft(), Option.ofNullable(v.getRight().orElse(null))));
}
| 3.26 |
hudi_HoodieBloomIndex_lookupIndex_rdh
|
/**
* Lookup the location for each record key and return the pair<record_key,location> for all record keys already
* present and drop the record keys if not present.
*/
private HoodiePairData<HoodieKey, HoodieRecordLocation> lookupIndex(HoodiePairData<String, String> partitionRecordKeyPairs, final HoodieEngineContext context, final HoodieTable hoodieTable) {
// Step 1: Obtain records per partition, in the incoming records
Map<String, Long> recordsPerPartition =
partitionRecordKeyPairs.countByKey();
List<String> affectedPartitionPathList = new ArrayList<>(recordsPerPartition.keySet());
// Step 2: Load all involved files as <Partition, filename> pairs
List<Pair<String, BloomIndexFileInfo>> fileInfoList = getBloomIndexFileInfoForPartitions(context, hoodieTable, affectedPartitionPathList);
final Map<String, List<BloomIndexFileInfo>> partitionToFileInfo = fileInfoList.stream().collect(groupingBy(Pair::getLeft, mapping(Pair::getRight, toList())));
// Step 3: Obtain a HoodieData, for each incoming record, that already exists, with the file id,
// that contains it.
HoodiePairData<HoodieFileGroupId, String> fileComparisonPairs = explodeRecordsWithFileComparisons(partitionToFileInfo, partitionRecordKeyPairs);
return bloomIndexHelper.findMatchingFilesForRecordKeys(config, context, hoodieTable, partitionRecordKeyPairs, fileComparisonPairs, partitionToFileInfo, recordsPerPartition);
}
| 3.26 |
hudi_HoodieBloomIndex_explodeRecordsWithFileComparisons_rdh
|
/**
* For each incoming record, produce N output records, 1 each for each file against which the record's key needs to be
* checked. For tables, where the keys have a definite insert order (e.g: timestamp as prefix), the number of files
* to be compared gets cut down a lot from range pruning.
* <p>
* Sub-partition to ensure the records can be looked up against files & also prune file<=>record comparisons based on
* recordKey ranges in the index info.
*/
HoodiePairData<HoodieFileGroupId, String> explodeRecordsWithFileComparisons(final Map<String, List<BloomIndexFileInfo>> partitionToFileIndexInfo, HoodiePairData<String, String> partitionRecordKeyPairs) {
IndexFileFilter indexFileFilter = (config.useBloomIndexTreebasedFilter()) ?
new IntervalTreeBasedIndexFileFilter(partitionToFileIndexInfo) : new ListBasedIndexFileFilter(partitionToFileIndexInfo);
return partitionRecordKeyPairs.map(partitionRecordKeyPair -> {
String recordKey = partitionRecordKeyPair.getRight();
String partitionPath = partitionRecordKeyPair.getLeft();
return indexFileFilter.getMatchingFilesAndPartition(partitionPath, recordKey).stream().map(partitionFileIdPair -> new
ImmutablePair<>(new HoodieFileGroupId(partitionFileIdPair.getLeft(), partitionFileIdPair.getRight()), recordKey));
}).flatMapToPair(Stream::iterator);
}
| 3.26 |
hudi_HoodieBloomIndex_isGlobal_rdh
|
/**
* This is not global, since we depend on the partitionPath to do the lookup.
*/
@Override
public boolean isGlobal() {
return false;
}
| 3.26 |
hudi_HoodieBloomIndex_canIndexLogFiles_rdh
|
/**
* No indexes into log files yet.
*/
@Override
public boolean canIndexLogFiles() {
return false;
}
| 3.26 |
hudi_HoodieBloomIndex_getFileInfoForLatestBaseFiles_rdh
|
/**
* Get BloomIndexFileInfo for all the latest base files for the requested partitions.
*
* @param partitions
* - List of partitions to get the base files for
* @param context
* - Engine context
* @param hoodieTable
* - Hoodie Table
* @return List of partition and file column range info pairs
*/
private List<Pair<String, BloomIndexFileInfo>> getFileInfoForLatestBaseFiles(List<String> partitions, final HoodieEngineContext context, final HoodieTable hoodieTable) {
List<Pair<String, String>> partitionPathFileIDList = getLatestBaseFilesForAllPartitions(partitions, context, hoodieTable).stream().map(pair -> Pair.of(pair.getKey(), pair.getValue().getFileId())).collect(toList());
return partitionPathFileIDList.stream().map(pf -> Pair.of(pf.getKey(), new BloomIndexFileInfo(pf.getValue()))).collect(toList());
}
| 3.26 |
hudi_HoodieBloomIndex_loadColumnRangesFromMetaIndex_rdh
|
/**
* Load the column stats index as BloomIndexFileInfo for all the involved files in the partition.
*
* @param partitions
* - List of partitions for which column stats need to be loaded
* @param context
* - Engine context
* @param hoodieTable
* - Hoodie table
* @return List of partition and file column range info pairs
*/
protected List<Pair<String,
BloomIndexFileInfo>> loadColumnRangesFromMetaIndex(List<String> partitions, final HoodieEngineContext context, final HoodieTable<?, ?, ?, ?> hoodieTable) {
// also obtain file ranges, if range pruning is enabled
context.setJobStatus(this.getClass().getName(), "Load meta index key ranges for file slices: " + config.getTableName());
String keyField = HoodieMetadataField.RECORD_KEY_METADATA_FIELD.getFieldName();
List<Pair<String, HoodieBaseFile>>
baseFilesForAllPartitions = HoodieIndexUtils.getLatestBaseFilesForAllPartitions(partitions, context, hoodieTable);
// Partition and file name pairs
List<Pair<String, String>> partitionFileNameList = new ArrayList<>(baseFilesForAllPartitions.size());
Map<Pair<String, String>, String> partitionAndFileNameToFileId = new HashMap<>(baseFilesForAllPartitions.size(), 1);
baseFilesForAllPartitions.forEach(pair -> {
Pair<String, String> partitionAndFileName = Pair.of(pair.getKey(), pair.getValue().getFileName());
partitionFileNameList.add(partitionAndFileName);
partitionAndFileNameToFileId.put(partitionAndFileName, pair.getValue().getFileId());
});
if (partitionFileNameList.isEmpty()) {
return Collections.emptyList();
}
Map<Pair<String, String>, HoodieMetadataColumnStats> fileToColumnStatsMap = hoodieTable.getMetadataTable().getColumnStats(partitionFileNameList, keyField);
List<Pair<String, BloomIndexFileInfo>> result
= new ArrayList<>(fileToColumnStatsMap.size());
for (Map.Entry<Pair<String, String>, HoodieMetadataColumnStats> entry : fileToColumnStatsMap.entrySet()) {
result.add(Pair.of(entry.getKey().getLeft(), // NOTE: Here we assume that the type of the primary key field is string
new BloomIndexFileInfo(partitionAndFileNameToFileId.get(entry.getKey()), ((String) (unwrapAvroValueWrapper(entry.getValue().getMinValue()))), ((String) (unwrapAvroValueWrapper(entry.getValue().getMaxValue()))))));
}
return result;
}
| 3.26 |
hudi_HoodieBloomIndex_isImplicitWithStorage_rdh
|
/**
* Bloom filters are stored, into the same data files.
*/
@Override
public boolean isImplicitWithStorage() {
return true;
}
| 3.26 |
hudi_HoodieBloomIndex_loadColumnRangesFromFiles_rdh
|
/**
* Load all involved files as <Partition, filename> pair List.
*/
List<Pair<String, BloomIndexFileInfo>> loadColumnRangesFromFiles(List<String> partitions, final HoodieEngineContext context, final HoodieTable hoodieTable) {
// Obtain the latest data files from all the partitions.
List<Pair<String, Pair<String, HoodieBaseFile>>> partitionPathFileIDList = getLatestBaseFilesForAllPartitions(partitions, context, hoodieTable).stream().map(pair -> Pair.of(pair.getKey(), Pair.of(pair.getValue().getFileId(), pair.getValue()))).collect(toList());
context.setJobStatus(this.getClass().getName(), "Obtain key ranges for file slices (range pruning=on): " + config.getTableName());
return context.map(partitionPathFileIDList, pf -> { try {
HoodieRangeInfoHandle rangeInfoHandle = new <config>HoodieRangeInfoHandle(hoodieTable, Pair.of(pf.getKey(), pf.getValue().getKey()));
String[] minMaxKeys = rangeInfoHandle.getMinMaxKeys(pf.getValue().getValue());
return Pair.of(pf.getKey(), new
BloomIndexFileInfo(pf.getValue().getKey(), minMaxKeys[0], minMaxKeys[1]));
} catch (MetadataNotFoundException me) {
LOG.warn("Unable to find range metadata in file :" + pf);
return Pair.of(pf.getKey(), new BloomIndexFileInfo(pf.getValue().getKey()));
}
}, Math.max(partitionPathFileIDList.size(), 1));
}
| 3.26 |
hudi_FlinkConcatAndReplaceHandle_write_rdh
|
/**
* Write old record as is w/o merging with incoming record.
*/
@Override
public void write(HoodieRecord oldRecord) {
Schema oldSchema = (config.populateMetaFields()) ? writeSchemaWithMetaFields : writeSchema;
String v1 = oldRecord.getRecordKey(oldSchema, keyGeneratorOpt);
try {
fileWriter.write(v1, oldRecord, writeSchema);
} catch (IOException | RuntimeException e) {
String errMsg = String.format("Failed to write old record into new file for key %s from old file %s to new file %s with writerSchema %s", v1, getOldFilePath(), newFilePath, writeSchemaWithMetaFields.toString(true));
LOG.debug("Old record is " + oldRecord);
throw new HoodieUpsertException(errMsg,
e);
}
recordsWritten++;
}
| 3.26 |
hudi_HoodieTimer_start_rdh
|
/**
* Creates an instance of {@link HoodieTimer} already started
*/
public static HoodieTimer start() {
return new HoodieTimer(true);
}
| 3.26 |
hudi_HoodieTimer_create_rdh
|
/**
* Creates an instance of {@link HoodieTimer} that is NOT started
*/
public static HoodieTimer create() {
return new HoodieTimer(false);
}
| 3.26 |
hudi_HoodieGlobalBloomIndex_loadColumnRangesFromFiles_rdh
|
/**
* Load all involved files as <Partition, filename> pairs from all partitions in the table.
*/
@Override
List<Pair<String, BloomIndexFileInfo>> loadColumnRangesFromFiles(List<String> partitions, final HoodieEngineContext context, final HoodieTable hoodieTable) {
HoodieTableMetaClient metaClient = hoodieTable.getMetaClient();
List<String> allPartitionPaths = FSUtils.getAllPartitionPaths(context, config.getMetadataConfig(), metaClient.getBasePath());
return super.loadColumnRangesFromFiles(allPartitionPaths,
context, hoodieTable);
}
| 3.26 |
hudi_HoodieGlobalBloomIndex_tagLocationBacktoRecords_rdh
|
/**
* Tagging for global index should only consider the record key.
*/
@Override
protected <R> HoodieData<HoodieRecord<R>> tagLocationBacktoRecords(HoodiePairData<HoodieKey, HoodieRecordLocation> keyLocationPairs, HoodieData<HoodieRecord<R>> records, HoodieTable hoodieTable) {
HoodiePairData<String, HoodieRecordGlobalLocation> keyAndExistingLocations = keyLocationPairs.mapToPair(p -> Pair.of(p.getLeft().getRecordKey(),
HoodieRecordGlobalLocation.fromLocal(p.getLeft().getPartitionPath(), p.getRight())));
boolean mayContainDuplicateLookup = hoodieTable.getMetaClient().getTableType() == MERGE_ON_READ;
boolean shouldUpdatePartitionPath = config.getGlobalBloomIndexUpdatePartitionPath() && hoodieTable.isPartitioned();
return tagGlobalLocationBackToRecords(records, keyAndExistingLocations, mayContainDuplicateLookup, shouldUpdatePartitionPath, config, hoodieTable);
}
| 3.26 |
hudi_HoodieGlobalBloomIndex_explodeRecordsWithFileComparisons_rdh
|
/**
* For each incoming record, produce N output records, 1 each for each file against which the record's key needs to be
* checked. For tables, where the keys have a definite insert order (e.g: timestamp as prefix), the number of files
* to be compared gets cut down a lot from range pruning.
* <p>
* Sub-partition to ensure the records can be looked up against files & also prune file<=>record comparisons based on
* recordKey ranges in the index info. the partition path of the incoming record (partitionRecordKeyPairs._2()) will
* be ignored since the search scope should be bigger than that
*/
@Override
HoodiePairData<HoodieFileGroupId, String> explodeRecordsWithFileComparisons(final Map<String, List<BloomIndexFileInfo>> partitionToFileIndexInfo, HoodiePairData<String, String> partitionRecordKeyPairs) {
IndexFileFilter indexFileFilter = (config.useBloomIndexTreebasedFilter()) ? new IntervalTreeBasedGlobalIndexFileFilter(partitionToFileIndexInfo) : new ListBasedGlobalIndexFileFilter(partitionToFileIndexInfo);return partitionRecordKeyPairs.map(partitionRecordKeyPair ->
{
String recordKey = partitionRecordKeyPair.getRight();
String partitionPath = partitionRecordKeyPair.getLeft();
return indexFileFilter.getMatchingFilesAndPartition(partitionPath, recordKey).stream().map(partitionFileIdPair -> new ImmutablePair<>(new HoodieFileGroupId(partitionFileIdPair.getLeft(), partitionFileIdPair.getRight()), recordKey));
}).flatMapToPair(Stream::iterator);
}
| 3.26 |
hudi_PostgresDebeziumSource_processDataset_rdh
|
/**
* Debezium Kafka Payload has a nested structure (see https://debezium.io/documentation/reference/1.4/connectors/postgresql.html#postgresql-create-events).
* This function flattens this nested structure for the Postgres data, and also extracts a subset of Debezium metadata fields.
*
* @param rowDataset
* Dataset containing Debezium Payloads
* @return New dataset with flattened columns
*/
@Override
protected Dataset<Row> processDataset(Dataset<Row> rowDataset) {
if (rowDataset.columns().length > 0) {
// Pick selective debezium and postgres meta fields: pick the row values from before field for delete record
// and row values from after field for insert or update records.
Dataset<Row> insertedOrUpdatedData =
rowDataset.selectExpr(String.format("%s as %s", DebeziumConstants.INCOMING_OP_FIELD, DebeziumConstants.FLATTENED_OP_COL_NAME), String.format("%s as %s", DebeziumConstants.INCOMING_TS_MS_FIELD, DebeziumConstants.UPSTREAM_PROCESSING_TS_COL_NAME), String.format("%s as %s", DebeziumConstants.INCOMING_SOURCE_NAME_FIELD, DebeziumConstants.FLATTENED_SHARD_NAME), String.format("%s as %s", DebeziumConstants.INCOMING_SOURCE_SCHEMA_FIELD, DebeziumConstants.FLATTENED_SCHEMA_NAME), String.format("%s as %s", DebeziumConstants.INCOMING_SOURCE_TS_MS_FIELD, DebeziumConstants.FLATTENED_TS_COL_NAME), String.format("%s as %s", DebeziumConstants.INCOMING_SOURCE_TXID_FIELD, DebeziumConstants.FLATTENED_TX_ID_COL_NAME), String.format("%s as %s", DebeziumConstants.INCOMING_SOURCE_LSN_FIELD, DebeziumConstants.FLATTENED_LSN_COL_NAME), String.format("%s as %s", DebeziumConstants.INCOMING_SOURCE_XMIN_FIELD, DebeziumConstants.FLATTENED_XMIN_COL_NAME), String.format("%s.*", DebeziumConstants.INCOMING_AFTER_FIELD)).filter(rowDataset.col(DebeziumConstants.INCOMING_OP_FIELD).notEqual(DebeziumConstants.DELETE_OP));
Dataset<Row> deletedData = rowDataset.selectExpr(String.format("%s as %s", DebeziumConstants.INCOMING_OP_FIELD, DebeziumConstants.FLATTENED_OP_COL_NAME), String.format("%s as %s", DebeziumConstants.INCOMING_TS_MS_FIELD, DebeziumConstants.UPSTREAM_PROCESSING_TS_COL_NAME), String.format("%s as %s", DebeziumConstants.INCOMING_SOURCE_NAME_FIELD, DebeziumConstants.FLATTENED_SHARD_NAME), String.format("%s as %s", DebeziumConstants.INCOMING_SOURCE_SCHEMA_FIELD, DebeziumConstants.FLATTENED_SCHEMA_NAME), String.format("%s as %s", DebeziumConstants.INCOMING_SOURCE_TS_MS_FIELD, DebeziumConstants.FLATTENED_TS_COL_NAME), String.format("%s as %s", DebeziumConstants.INCOMING_SOURCE_TXID_FIELD, DebeziumConstants.FLATTENED_TX_ID_COL_NAME), String.format("%s as %s", DebeziumConstants.INCOMING_SOURCE_LSN_FIELD, DebeziumConstants.FLATTENED_LSN_COL_NAME), String.format("%s as %s", DebeziumConstants.INCOMING_SOURCE_XMIN_FIELD, DebeziumConstants.FLATTENED_XMIN_COL_NAME), String.format("%s.*", DebeziumConstants.INCOMING_BEFORE_FIELD)).filter(rowDataset.col(DebeziumConstants.INCOMING_OP_FIELD).equalTo(DebeziumConstants.DELETE_OP));
return insertedOrUpdatedData.union(deletedData);
} else {
return rowDataset;
}
}
| 3.26 |
hudi_HoodieJavaWriteClient_initializeMetadataTable_rdh
|
/**
* Initialize the metadata table if needed. Creating the metadata table writer
* will trigger the initial bootstrapping from the data table.
*
* @param inFlightInstantTimestamp
* - The in-flight action responsible for the metadata table initialization
*/
private void initializeMetadataTable(Option<String> inFlightInstantTimestamp) {
if (!config.isMetadataTableEnabled()) {
return;
}
try (HoodieTableMetadataWriter writer = JavaHoodieBackedTableMetadataWriter.create(context.getHadoopConf().get(), config, context, inFlightInstantTimestamp)) {
if (writer.isInitialized()) {
writer.performTableServices(inFlightInstantTimestamp);
}
} catch (Exception e) {
throw new HoodieException("Failed to instantiate Metadata table ", e);
}
}
| 3.26 |
hudi_HoodieCLI_getTableMetaClient_rdh
|
/**
* Get tableMetadata, throw NullPointerException when it is null.
*
* @return tableMetadata which is instance of HoodieTableMetaClient
*/
public static HoodieTableMetaClient getTableMetaClient() {
if (tableMetadata == null) {
throw new NullPointerException("There is no hudi table. Please use connect command to set table first");
}
return tableMetadata;
}
| 3.26 |
hudi_WriteProfile_reload_rdh
|
/**
* Reload the write profile, should do once for each checkpoint.
*
* <p>We do these things: i). reload the timeline; ii). re-construct the record profile;
* iii) clean the small files cache.
*
* <p>Note: This method should be thread safe.
*/
public synchronized void reload(long checkpointId) {
if (this.reloadedCheckpointId >= checkpointId) {
// already reloaded
return;}
this.metaClient.reloadActiveTimeline();
// release the old fs view and create a new one
SyncableFileSystemView v16 = this.fsView;
this.fsView
= getFileSystemView();
v16.close();
recordProfile();
cleanMetadataCache(this.metaClient.getCommitsTimeline().filterCompletedInstants().getInstantsAsStream());
this.smallFilesMap.clear();
this.reloadedCheckpointId = checkpointId;
}
| 3.26 |
hudi_WriteProfile_cleanMetadataCache_rdh
|
/**
* Remove the overdue metadata from the cache
* whose instant does not belong to the given instants {@code instants}.
*/
private void cleanMetadataCache(Stream<HoodieInstant> instants) {
Set<String> timestampSet = instants.map(HoodieInstant::getTimestamp).collect(Collectors.toSet());
this.metadataCache.keySet().retainAll(timestampSet);
}
| 3.26 |
hudi_WriteProfile_getSmallFiles_rdh
|
/**
* Returns a list of small files in the given partition path.
*
* <p>Note: This method should be thread safe.
*/
public synchronized List<SmallFile> getSmallFiles(String partitionPath) {
// lookup the cache first
if (smallFilesMap.containsKey(partitionPath)) {
return smallFilesMap.get(partitionPath);
}
List<SmallFile> smallFiles = new ArrayList<>();
if (config.getParquetSmallFileLimit() <= 0) {
this.smallFilesMap.put(partitionPath, smallFiles);
return smallFiles;
}
smallFiles = smallFilesProfile(partitionPath);
this.smallFilesMap.put(partitionPath, smallFiles);
return smallFiles;
}
| 3.26 |
hudi_WriteProfile_averageBytesPerRecord_rdh
|
/**
* Obtains the average record size based on records written during previous commits. Used for estimating how many
* records pack into one file.
*/
private long averageBytesPerRecord() {
long avgSize = config.getCopyOnWriteRecordSizeEstimate();
long fileSizeThreshold = ((long) (config.getRecordSizeEstimationThreshold() * config.getParquetSmallFileLimit()));
HoodieTimeline commitTimeline = metaClient.getCommitsTimeline().filterCompletedInstants();
if (!commitTimeline.empty()) {
// Go over the reverse ordered commits to get a more recent estimate of average record size.
Iterator<HoodieInstant> instants = commitTimeline.getReverseOrderedInstants().iterator();
while (instants.hasNext()) {
HoodieInstant instant = instants.next();
final HoodieCommitMetadata commitMetadata = this.metadataCache.computeIfAbsent(instant.getTimestamp(), k -> WriteProfiles.getCommitMetadataSafely(config.getTableName(), basePath, instant, commitTimeline).orElse(null));
if (commitMetadata == null) {
continue;
}
long totalBytesWritten = commitMetadata.fetchTotalBytesWritten();
long totalRecordsWritten = commitMetadata.fetchTotalRecordsWritten();
if ((totalBytesWritten > fileSizeThreshold) && (totalRecordsWritten > 0)) {
avgSize = ((long) (Math.ceil((1.0 * totalBytesWritten) / totalRecordsWritten)));
break;
}
}
}
LOG.info("Refresh average bytes per record => " + avgSize);
return avgSize;
}
| 3.26 |
hudi_WriteProfile_smallFilesProfile_rdh
|
/**
* Returns a list of small files in the given partition path from the latest filesystem view.
*/
protected List<SmallFile> smallFilesProfile(String partitionPath) {
// smallFiles only for partitionPath
List<SmallFile> smallFileLocations = new ArrayList<>();
HoodieTimeline commitTimeline = metaClient.getCommitsTimeline().filterCompletedInstants();
if (!commitTimeline.empty()) {
// if we have some commits
HoodieInstant latestCommitTime = commitTimeline.lastInstant().get();
List<HoodieBaseFile> allFiles = fsView.getLatestBaseFilesBeforeOrOn(partitionPath, latestCommitTime.getTimestamp()).collect(Collectors.toList());
for (HoodieBaseFile file : allFiles) {
// filter out the corrupted files.
if ((file.getFileSize() < config.getParquetSmallFileLimit()) && (file.getFileSize() > 0)) {
SmallFile sf = new SmallFile();sf.location = new HoodieRecordLocation(file.getCommitTime(), file.getFileId());
sf.sizeBytes = file.getFileSize();
smallFileLocations.add(sf);
}
}
}
return smallFileLocations;
}
| 3.26 |
hudi_Hive3Shim_getTimestampWriteable_rdh
|
/**
* Get timestamp writeable object from long value.
* Hive3 use TimestampWritableV2 to build timestamp objects and Hive2 use TimestampWritable.
* So that we need to initialize timestamp according to the version of Hive.
*/
public Writable getTimestampWriteable(long value, boolean timestampMillis) {
try {
Object timestamp = TIMESTAMP_CLASS.newInstance(); SET_TIME_IN_MILLIS.invoke(timestamp, timestampMillis ? value : value / 1000);
return ((Writable) (TIMESTAMP_WRITEABLE_V2_CONSTRUCTOR.newInstance(timestamp)));
} catch (IllegalAccessException | InstantiationException | InvocationTargetException e) {
throw new HoodieException("can not create writable v2 class!", e);
}
}
| 3.26 |
hudi_Hive3Shim_m0_rdh
|
/**
* Get date writeable object from int value.
* Hive3 use DateWritableV2 to build date objects and Hive2 use DateWritable.
* So that we need to initialize date according to the version of Hive.
*/public Writable m0(int value) {
try
{
return ((Writable) (DATE_WRITEABLE_V2_CONSTRUCTOR.newInstance(value)));
} catch (IllegalAccessException | InstantiationException | InvocationTargetException e) {
throw new HoodieException("can not create writable v2 class!", e);
}
}
| 3.26 |
hudi_HashFunction_hash_rdh
|
/**
* Hashes a specified key into several integers.
*
* @param k
* The specified key.
* @return The array of hashed values.
*/
public int[] hash(Key k) {
byte[] b = k.getBytes();
if (b == null) {
throw new NullPointerException("buffer reference is null");
}
if (b.length == 0) {
throw new IllegalArgumentException("key length must be > 0");
}
int[] result = new
int[nbHash];
for (int i = 0, initval = 0; i < nbHash; i++) {
initval = hashFunction.hash(b, initval);
result[i] = Math.abs(initval % maxValue);
}
return result;
}
| 3.26 |
hudi_HashFunction_clear_rdh
|
/**
* Clears <i>this</i> hash function. A NOOP
*/public void clear() {
}
| 3.26 |
hudi_BaseHoodieQueueBasedExecutor_execute_rdh
|
/**
* Main API to run both production and consumption.
*/@Override
public E execute() {
try {
checkState(this.consumer.isPresent());
setUp();
// Start consuming/producing asynchronously
this.consumingFuture = startConsumingAsync();
this.producingFuture = startProducingAsync();
// NOTE: To properly support mode when there's no consumer, we have to fall back
// to producing future as the trigger for us to shut down the queue
return // Block until producing and consuming both finish
allOf(Arrays.asList(producingFuture, consumingFuture)).whenComplete((ignored, throwable) -> {
// Close the queue to release the resources
queue.close();
}).thenApply(ignored -> consumer.get().finish()).get();
} catch (Exception e) {
if
(e instanceof InterruptedException) {
// In case {@code InterruptedException} was thrown, resetting the interrupted flag
// of the thread, we reset it (to true) again to permit subsequent handlers
// to be interrupted as well
Thread.currentThread().interrupt();
}
// throw if we have any other exception seen already. There is a chance that cancellation/closing of producers with CompeletableFuture wins before the actual exception
// is thrown.
if (this.queue.getThrowable()
!= null) {
throw new HoodieException(queue.getThrowable());
}
throw new HoodieException(e);
}}
| 3.26 |
hudi_BaseHoodieQueueBasedExecutor_startConsumingAsync_rdh
|
/**
* Start consumer
*/
private CompletableFuture<Void> startConsumingAsync() {
return consumer.map(consumer -> CompletableFuture.supplyAsync(() -> {
doConsume(queue, consumer);
return ((Void) (null));
}, consumerExecutorService)).orElse(CompletableFuture.completedFuture(null));
}
| 3.26 |
hudi_BaseHoodieQueueBasedExecutor_startProducingAsync_rdh
|
/**
* Start producing
*/
public final CompletableFuture<Void> startProducingAsync() {
return allOf(producers.stream().map(producer -> CompletableFuture.supplyAsync(() ->
{
doProduce(queue, producer);
return ((Void) (null));
}, producerExecutorService)).collect(Collectors.toList())).thenApply(ignored -> ((Void) (null))).whenComplete((result, throwable) -> {
// Regardless of how producing has completed, we have to close producers
// to make sure resources are properly cleaned up
producers.forEach(HoodieProducer::close);
// Mark production as done so that consumer will be able to exit
queue.seal();
});
}
| 3.26 |
hudi_ParquetSchemaConverter_toParquetType_rdh
|
/**
* Converts Flink Internal Type to Parquet schema.
*
* @param typeInformation
* Flink type information
* @param legacyMode
* is standard LIST and MAP schema or back-compatible schema
* @return Parquet schema
*/
public static MessageType
toParquetType(TypeInformation<?> typeInformation, boolean legacyMode) {
return ((MessageType) (convertField(null, typeInformation, Repetition.OPTIONAL, legacyMode)));
}
| 3.26 |
hudi_ParquetSchemaConverter_fromParquetType_rdh
|
/**
* Converts Parquet schema to Flink Internal Type.
*
* @param type
* Parquet schema
* @return Flink type information
*/
public static TypeInformation<?> fromParquetType(MessageType type) {
return convertFields(type.getFields());
}
| 3.26 |
hudi_TableChangesHelper_applyAddChange2Fields_rdh
|
/**
* Apply add operation and column position change operation.
*
* @param fields
* origin column fields.
* @param adds
* column fields to be added.
* @param pchanges
* a wrapper class hold all the position change operations.
* @return column fields after adjusting the position.
*/
public static List<Types.Field> applyAddChange2Fields(List<Types.Field> fields, ArrayList<Types.Field> adds, ArrayList<TableChange.ColumnPositionChange> pchanges) {
if ((adds == null) && (pchanges == null)) {
return fields;}
LinkedList<Types.Field> result = new LinkedList<>(fields);
// apply add columns
if ((adds != null) && (!adds.isEmpty())) {
result.addAll(adds);
}// apply position change
if ((pchanges != null) && (!pchanges.isEmpty())) {for (TableChange.ColumnPositionChange pchange
: pchanges) {
Types.Field srcField = result.stream().filter(f -> f.fieldId() == pchange.getSrcId()).findFirst().get();
Types.Field dsrField =
result.stream().filter(f -> f.fieldId() == pchange.getDsrId()).findFirst().orElse(null);
// we remove srcField first
result.remove(srcField);
switch (pchange.type()) {
case AFTER :// add srcField after dsrField
result.add(result.indexOf(dsrField) + 1, srcField);
break;
case BEFORE :
// add srcField before dsrField
result.add(result.indexOf(dsrField), srcField);
break; case FIRST :
result.addFirst(srcField);
break;default :
// should not reach here
}
}
}
return result;
}
| 3.26 |
hudi_AvroSchemaEvolutionUtils_reconcileSchema_rdh
|
/**
* Support reconcile from a new avroSchema.
* 1) incoming data has missing columns that were already defined in the table –> null values will be injected into missing columns
* 2) incoming data contains new columns not defined yet in the table -> columns will be added to the table schema (incoming dataframe?)
* 3) incoming data has missing columns that are already defined in the table and new columns not yet defined in the table ->
* new columns will be added to the table schema, missing columns will be injected with null values
* 4) support type change
* 5) support nested schema change.
* Notice:
* the incoming schema should not have delete/rename semantics.
* for example: incoming schema: int a, int b, int d; oldTableSchema int a, int b, int c, int d
* we must guarantee the column c is missing semantic, instead of delete semantic.
*
* @param incomingSchema
* implicitly evolution of avro when hoodie write operation
* @param oldTableSchema
* old internalSchema
* @return reconcile Schema
*/
public static InternalSchema reconcileSchema(Schema incomingSchema, InternalSchema oldTableSchema) {
/* If incoming schema is null, we fall back on table schema. */ if (incomingSchema.getType() == Type.NULL) {
return oldTableSchema;}
InternalSchema inComingInternalSchema = convert(incomingSchema);
// check column add/missing
List<String> colNamesFromIncoming =
inComingInternalSchema.getAllColsFullName();
List<String> colNamesFromOldSchema = oldTableSchema.getAllColsFullName();List<String> diffFromOldSchema = colNamesFromOldSchema.stream().filter(f -> !colNamesFromIncoming.contains(f)).collect(Collectors.toList());
List<String> diffFromEvolutionColumns = colNamesFromIncoming.stream().filter(f -> !colNamesFromOldSchema.contains(f)).collect(Collectors.toList());
// check type change.
List<String> typeChangeColumns = colNamesFromIncoming.stream().filter(f -> colNamesFromOldSchema.contains(f) && (!inComingInternalSchema.findType(f).equals(oldTableSchema.findType(f)))).collect(Collectors.toList());
if (((colNamesFromIncoming.size() == colNamesFromOldSchema.size())
&& (diffFromOldSchema.size() == 0)) && typeChangeColumns.isEmpty()) {
return oldTableSchema;
}
// Remove redundancy from diffFromEvolutionSchema.
// for example, now we add a struct col in evolvedSchema, the struct col is " user struct<name:string, age:int> "
// when we do diff operation: user, user.name, user.age will appeared in the resultSet which is redundancy, user.name and user.age should be excluded.
// deal with add operation
TreeMap<Integer, String> finalAddAction = new TreeMap<>();
for (int i = 0; i < diffFromEvolutionColumns.size(); i++) {
String name = diffFromEvolutionColumns.get(i);
int splitPoint = name.lastIndexOf(".");
String parentName = (splitPoint > 0) ? name.substring(0, splitPoint) : "";
if ((!parentName.isEmpty()) && diffFromEvolutionColumns.contains(parentName)) {
// find redundancy, skip it
continue;
}
finalAddAction.put(inComingInternalSchema.findIdByName(name), name);
}
TableChanges.ColumnAddChange addChange = TableChanges.ColumnAddChange.get(oldTableSchema);
finalAddAction.entrySet().stream().forEach(f -> {
String name = f.getValue();
int splitPoint = name.lastIndexOf(".");
String parentName = (splitPoint > 0) ? name.substring(0, splitPoint) : "";
String rawName = (splitPoint > 0) ? name.substring(splitPoint + 1) : name;
// try to infer add position.
Optional<String> inferPosition = colNamesFromIncoming.stream().filter(c -> (((c.lastIndexOf(".") == splitPoint) && c.startsWith(parentName)) && (inComingInternalSchema.findIdByName(c) > inComingInternalSchema.findIdByName(name))) && (oldTableSchema.findIdByName(c) > 0)).sorted((s1, s2) -> oldTableSchema.findIdByName(s1) - oldTableSchema.findIdByName(s2)).findFirst();
addChange.addColumns(parentName, rawName, inComingInternalSchema.findType(name), null);
inferPosition.map(i -> addChange.addPositionChange(name, i, "before"));
});
// do type evolution.
InternalSchema internalSchemaAfterAddColumns = SchemaChangeUtils.applyTableChanges2Schema(oldTableSchema, addChange);
TableChanges.ColumnUpdateChange typeChange = TableChanges.ColumnUpdateChange.get(internalSchemaAfterAddColumns);
typeChangeColumns.stream().filter(f -> !inComingInternalSchema.findType(f).isNestedType()).forEach(col -> {
typeChange.updateColumnType(col, inComingInternalSchema.findType(col));
});
return SchemaChangeUtils.applyTableChanges2Schema(internalSchemaAfterAddColumns, typeChange);
}
| 3.26 |
hudi_AvroSchemaEvolutionUtils_reconcileSchemaRequirements_rdh
|
/**
* Reconciles nullability and datatype requirements b/w {@code source} and {@code target} schemas,
* by adjusting these of the {@code source} schema to be in-line with the ones of the
* {@code target} one. Source is considered to be new incoming schema, while target could refer to prev table schema.
* For example,
* if colA in source is non-nullable, but is nullable in target, output schema will have colA as nullable.
* if "hoodie.datasource.write.new.columns.nullable" is set to true and if colB is not present in source, but
* is present in target, output schema will have colB as nullable.
* if colC has different data type in source schema compared to target schema and if its promotable, (say source is int,
* and target is long and since int can be promoted to long), colC will be long data type in output schema.
*
* @param sourceSchema
* source schema that needs reconciliation
* @param targetSchema
* target schema that source schema will be reconciled against
* @param opts
* config options
* @return schema (based off {@code source} one) that has nullability constraints and datatypes reconciled
*/
public static Schema reconcileSchemaRequirements(Schema sourceSchema, Schema
targetSchema, Map<String, String> opts) {
if (((sourceSchema.getType() == Type.NULL) || sourceSchema.getFields().isEmpty()) || targetSchema.getFields().isEmpty()) {
return sourceSchema;}
InternalSchema sourceInternalSchema = convert(sourceSchema);
InternalSchema targetInternalSchema = convert(targetSchema);
List<String> colNamesSourceSchema = sourceInternalSchema.getAllColsFullName();
List<String> colNamesTargetSchema = targetInternalSchema.getAllColsFullName();
boolean makeNewColsNullable = "true".equals(opts.get(MAKE_NEW_COLUMNS_NULLABLE.key()));
List<String> nullableUpdateColsInSource = new ArrayList<>();
List<String> typeUpdateColsInSource = new ArrayList<>();
colNamesSourceSchema.forEach(field -> {
// handle columns that needs to be made nullable
if ((makeNewColsNullable && (!colNamesTargetSchema.contains(field))) || (colNamesTargetSchema.contains(field) && (sourceInternalSchema.findField(field).isOptional() != targetInternalSchema.findField(field).isOptional()))) {
nullableUpdateColsInSource.add(field);
}
// handle columns that needs type to be updated
if (colNamesTargetSchema.contains(field) && SchemaChangeUtils.shouldPromoteType(sourceInternalSchema.findType(field), targetInternalSchema.findType(field))) {
typeUpdateColsInSource.add(field);
}
});
if (nullableUpdateColsInSource.isEmpty() && typeUpdateColsInSource.isEmpty()) {
// standardize order of unions
return convert(sourceInternalSchema, sourceSchema.getFullName());
}
TableChanges.ColumnUpdateChange schemaChange = TableChanges.ColumnUpdateChange.get(sourceInternalSchema);
// Reconcile nullability constraints (by executing phony schema change)
if (!nullableUpdateColsInSource.isEmpty()) {
schemaChange = reduce(nullableUpdateColsInSource, schemaChange, (change, field) -> change.updateColumnNullability(field, true));
}
// Reconcile type promotions
if (!typeUpdateColsInSource.isEmpty()) {
schemaChange = reduce(typeUpdateColsInSource, schemaChange, (change, field) -> change.updateColumnType(field, targetInternalSchema.findType(field)));
}
return convert(SchemaChangeUtils.applyTableChanges2Schema(sourceInternalSchema, schemaChange), sourceSchema.getFullName());
}
| 3.26 |
hudi_BinaryUtil_compareTo_rdh
|
/**
* Lexicographically compare two arrays.
* copy from hbase
*
* @param buffer1
* left operand
* @param buffer2
* right operand
* @param offset1
* Where to start comparing in the left buffer
* @param offset2
* Where to start comparing in the right buffer
* @param length1
* How much to compare from the left buffer
* @param length2
* How much to compare from the right buffer
* @return 0 if equal, < 0 if left is less than right, etc.
*/
public static int compareTo(byte[] buffer1, int offset1, int length1, byte[] buffer2, int offset2, int length2) {
// Short circuit equal case
if (((buffer1 == buffer2) && (offset1 == offset2)) && (length1 == length2)) {
return 0;
}// Bring WritableComparator code local
int end1 = offset1 + length1;
int end2 = offset2 + length2;
for (int
i = offset1, j = offset2; (i < end1) && (j < end2); i++ , j++) {
int a = buffer1[i] & 0xff;
int b = buffer2[j] & 0xff;
if (a != b) {
return a - b;
}
}
return length1 - length2;
}
| 3.26 |
hudi_BinaryUtil_toBytes_rdh
|
/**
* Copies {@link ByteBuffer} into allocated {@code byte[]} array
*/public static byte[] toBytes(ByteBuffer buffer) {byte[] bytes = new byte[buffer.remaining()];
buffer.get(bytes);
return bytes;
}
| 3.26 |
hudi_BinaryUtil_generateChecksum_rdh
|
/**
* Generate a checksum for a given set of bytes.
*/
public static long generateChecksum(byte[] data) {
CRC32 crc = new CRC32();
crc.update(data);
return crc.getValue();
}
| 3.26 |
hudi_BinaryUtil_interleaving_rdh
|
/**
* Interleaving array bytes.
* Interleaving means take one bit from the first matrix element, one bit
* from the next, etc, then take the second bit from the first matrix
* element, second bit from the second, all the way to the last bit of the
* last element. Combine those bits in that order into a single BigInteger,
*
* @param buffer
* candidate element to do interleaving
* @return byte size of candidate element
*/
public static byte[] interleaving(byte[][] buffer, int size) {
int candidateSize = buffer.length;
byte[] result = new byte[size * candidateSize];
int resBitPos = 0;
int totalBits = size * 8;
for (int bitStep = 0; bitStep < totalBits; bitStep++) {
int currentBytePos = ((int) (Math.floor(bitStep / 8)));
int currentBitPos = bitStep % 8;
for (int i = 0; i < candidateSize; i++) {
int tempResBytePos = ((int) (Math.floor(resBitPos / 8)));
int tempResBitPos = resBitPos % 8;
result[tempResBytePos] =
updatePos(result[tempResBytePos], tempResBitPos, buffer[i][currentBytePos], currentBitPos);
resBitPos++;
}
}
return result;
}
| 3.26 |
hudi_HFileBootstrapIndex_writeNextSourceFileMapping_rdh
|
/**
* Write next source file to hudi file-id. Entries are expected to be appended in hudi file-group id
* order.
*
* @param mapping
* bootstrap source file mapping.
*/
private void writeNextSourceFileMapping(BootstrapFileMapping mapping) {
try {
HoodieBootstrapFilePartitionInfo srcFilePartitionInfo = new
HoodieBootstrapFilePartitionInfo();
srcFilePartitionInfo.setPartitionPath(mapping.getPartitionPath());
srcFilePartitionInfo.setBootstrapPartitionPath(mapping.getBootstrapPartitionPath());
srcFilePartitionInfo.setBootstrapFileStatus(mapping.getBootstrapFileStatus());
KeyValue kv = new KeyValue(getUTF8Bytes(getFileGroupKey(mapping.getFileGroupId())), new byte[0], new byte[0], HConstants.LATEST_TIMESTAMP, Type.Put, TimelineMetadataUtils.serializeAvroMetadata(srcFilePartitionInfo, HoodieBootstrapFilePartitionInfo.class).get());
indexByFileIdWriter.append(kv);
numFileIdKeysAdded++;
} catch (IOException e) {
throw new HoodieIOException(e.getMessage(), e);
}
}
| 3.26 |
hudi_HFileBootstrapIndex_m2_rdh
|
/**
* Commit bootstrap index entries. Appends Metadata and closes write handles.
*/
private void m2() {
try {
if (!closed) {
HoodieBootstrapIndexInfo partitionIndexInfo = HoodieBootstrapIndexInfo.newBuilder().setCreatedTimestamp(new Date().getTime()).setNumKeys(numPartitionKeysAdded).setBootstrapBasePath(bootstrapBasePath).build();
LOG.info("Adding Partition FileInfo :" + partitionIndexInfo);
HoodieBootstrapIndexInfo fileIdIndexInfo = HoodieBootstrapIndexInfo.newBuilder().setCreatedTimestamp(new Date().getTime()).setNumKeys(numFileIdKeysAdded).setBootstrapBasePath(bootstrapBasePath).build();
LOG.info("Appending FileId FileInfo :" + fileIdIndexInfo);
indexByPartitionWriter.appendFileInfo(INDEX_INFO_KEY, TimelineMetadataUtils.serializeAvroMetadata(partitionIndexInfo, HoodieBootstrapIndexInfo.class).get());
indexByFileIdWriter.appendFileInfo(INDEX_INFO_KEY, TimelineMetadataUtils.serializeAvroMetadata(fileIdIndexInfo, HoodieBootstrapIndexInfo.class).get());
close();
}
} catch (IOException ioe) {
throw new HoodieIOException(ioe.getMessage(), ioe);
}
}
| 3.26 |
hudi_HFileBootstrapIndex_getUserKeyFromCellKey_rdh
|
/**
* HFile stores cell key in the format example : "2020/03/18//LATEST_TIMESTAMP/Put/vlen=3692/seqid=0".
* This API returns only the user key part from it.
*
* @param cellKey
* HFIle Cell Key
* @return */
private static String getUserKeyFromCellKey(String cellKey)
{
int hfileSuffixBeginIndex = cellKey.lastIndexOf(HFILE_CELL_KEY_SUFFIX_PART);
return cellKey.substring(0, hfileSuffixBeginIndex); }
| 3.26 |
hudi_HFileBootstrapIndex_getFileGroupKey_rdh
|
/**
* Returns file group key to be used in HFile.
*
* @param fileGroupId
* File Group Id.
* @return */
private static String getFileGroupKey(HoodieFileGroupId fileGroupId) {
return (getPartitionKey(fileGroupId.getPartitionPath()) + KEY_PARTS_SEPARATOR) + getKeyValueString(FILE_ID_KEY_PREFIX, fileGroupId.getFileId());
}
| 3.26 |
hudi_HFileBootstrapIndex_getPartitionKey_rdh
|
/**
* Returns partition-key to be used in HFile.
*
* @param partition
* Partition-Path
* @return */
private static String getPartitionKey(String partition) {
return getKeyValueString(PARTITION_KEY_PREFIX, partition);
}
| 3.26 |
hudi_HFileBootstrapIndex_writeNextPartition_rdh
|
/**
* Append bootstrap index entries for next partitions in sorted order.
*
* @param partitionPath
* Hudi Partition Path
* @param bootstrapPartitionPath
* Source Partition Path
* @param bootstrapFileMappings
* Bootstrap Source File to Hudi File Id mapping
*/private void writeNextPartition(String partitionPath, String bootstrapPartitionPath, List<BootstrapFileMapping> bootstrapFileMappings) {
try {
LOG.info((((("Adding bootstrap partition Index entry for partition :" + partitionPath) + ", bootstrap Partition :") + bootstrapPartitionPath) + ", Num Entries :") + bootstrapFileMappings.size());
LOG.info("ADDING entries :" + bootstrapFileMappings);HoodieBootstrapPartitionMetadata bootstrapPartitionMetadata = new HoodieBootstrapPartitionMetadata();
bootstrapPartitionMetadata.setBootstrapPartitionPath(bootstrapPartitionPath);
bootstrapPartitionMetadata.setPartitionPath(partitionPath);
bootstrapPartitionMetadata.setFileIdToBootstrapFile(bootstrapFileMappings.stream().map(m -> Pair.of(m.getFileId(), m.getBootstrapFileStatus())).collect(Collectors.toMap(Pair::getKey, Pair::getValue)));
Option<byte[]> bytes = TimelineMetadataUtils.serializeAvroMetadata(bootstrapPartitionMetadata, HoodieBootstrapPartitionMetadata.class);if (bytes.isPresent()) {
indexByPartitionWriter.append(new KeyValue(Bytes.toBytes(getPartitionKey(partitionPath)), new byte[0], new byte[0], HConstants.LATEST_TIMESTAMP, Type.Put, bytes.get()));
numPartitionKeysAdded++;
}
} catch (IOException e) { throw new HoodieIOException(e.getMessage(), e);}
}
| 3.26 |
hudi_HFileBootstrapIndex_close_rdh
|
/**
* Close Writer Handles.
*/
public void close() {
try {
if (!closed) {
indexByPartitionWriter.close();
indexByFileIdWriter.close();
closed =
true;
}
} catch (IOException ioe) {
throw new HoodieIOException(ioe.getMessage(), ioe);}
}
| 3.26 |
hudi_HFileBootstrapIndex_createReader_rdh
|
/**
* Helper method to create HFile Reader.
*
* @param hFilePath
* File Path
* @param conf
* Configuration
* @param fileSystem
* File System
*/
private static Reader createReader(String hFilePath, Configuration conf, FileSystem fileSystem) {
LOG.info("Opening HFile for reading :" + hFilePath);
return HoodieHFileUtils.createHFileReader(fileSystem, new HFilePathForReader(hFilePath), new CacheConfig(conf), conf);
}
| 3.26 |
hudi_OverwriteWithLatestAvroPayload_overwriteField_rdh
|
/**
* Return true if value equals defaultValue otherwise false.
*/
public Boolean overwriteField(Object
value, Object defaultValue) {
if (JsonProperties.NULL_VALUE.equals(defaultValue)) {
return value == null;
}
return Objects.equals(value, defaultValue);}
| 3.26 |
hudi_TableOptionProperties_m0_rdh
|
/**
* Initialize the {@link #FILE_NAME} meta file.
*/
public static void m0(String basePath,
Configuration hadoopConf, Map<String, String> options)
throws IOException {
Path propertiesFilePath = getPropertiesFilePath(basePath);
FileSystem fs = FSUtils.getFs(basePath, hadoopConf);
try (FSDataOutputStream outputStream = fs.create(propertiesFilePath)) {
Properties properties = new Properties();
properties.putAll(options);
properties.store(outputStream, "Table option properties saved on " + new Date(System.currentTimeMillis()));
}
LOG.info(String.format("Create file %s success.", propertiesFilePath));
}
| 3.26 |
hudi_TableOptionProperties_loadFromProperties_rdh
|
/**
* Read table options map from the given table base path.
*/
public static Map<String, String> loadFromProperties(String basePath, Configuration hadoopConf) {
Path propertiesFilePath = getPropertiesFilePath(basePath);
Map<String, String> options = new HashMap<>();
Properties props = new Properties();
FileSystem fs = FSUtils.getFs(basePath, hadoopConf);
try (FSDataInputStream inputStream = fs.open(propertiesFilePath)) {
props.load(inputStream);
for (final String v9 : props.stringPropertyNames()) {
options.put(v9, props.getProperty(v9));
}
} catch (IOException e) {
throw new HoodieIOException(String.format("Could not load table option properties from %s", propertiesFilePath), e);
}
LOG.info(String.format("Loading table option properties from %s success.", propertiesFilePath));
return options;
}
| 3.26 |
hudi_SparkHoodieBackedTableMetadataWriter_create_rdh
|
/**
* Return a Spark based implementation of {@code HoodieTableMetadataWriter} which can be used to
* write to the metadata table.
* <p>
* If the metadata table does not exist, an attempt is made to bootstrap it but there is no guaranteed that
* table will end up bootstrapping at this time.
*
* @param conf
* @param writeConfig
* @param context
* @param inflightInstantTimestamp
* Timestamp of an instant which is in-progress. This instant is ignored while
* attempting to bootstrap the table.
* @return An instance of the {@code HoodieTableMetadataWriter}
*/
public static HoodieTableMetadataWriter create(Configuration conf, HoodieWriteConfig writeConfig,
HoodieEngineContext context, Option<String> inflightInstantTimestamp) {
return new SparkHoodieBackedTableMetadataWriter(conf, writeConfig, EAGER, context, inflightInstantTimestamp);
}
| 3.26 |
hudi_SixToFiveDowngradeHandler_runCompaction_rdh
|
/**
* Utility method to run compaction for MOR table as part of downgrade step.
*/
private void runCompaction(HoodieTable table, HoodieEngineContext context, HoodieWriteConfig config, SupportsUpgradeDowngrade upgradeDowngradeHelper) {
try { if (table.getMetaClient().getTableType() == HoodieTableType.MERGE_ON_READ) {
// set required configs for scheduling compaction.
HoodieInstantTimeGenerator.setCommitTimeZone(table.getMetaClient().getTableConfig().getTimelineTimezone());
HoodieWriteConfig compactionConfig
= HoodieWriteConfig.newBuilder().withProps(config.getProps()).build();
compactionConfig.setValue(HoodieCompactionConfig.INLINE_COMPACT.key(), "true");
compactionConfig.setValue(HoodieCompactionConfig.INLINE_COMPACT_NUM_DELTA_COMMITS.key(), "1");
compactionConfig.setValue(HoodieCompactionConfig.INLINE_COMPACT_TRIGGER_STRATEGY.key(), CompactionTriggerStrategy.NUM_COMMITS.name());
compactionConfig.setValue(HoodieCompactionConfig.COMPACTION_STRATEGY.key(), UnBoundedCompactionStrategy.class.getName());
compactionConfig.setValue(HoodieMetadataConfig.ENABLE.key(), "false");
try (BaseHoodieWriteClient writeClient = upgradeDowngradeHelper.getWriteClient(compactionConfig, context)) {
Option<String> compactionInstantOpt = writeClient.scheduleCompaction(Option.empty());
if (compactionInstantOpt.isPresent()) {
writeClient.compact(compactionInstantOpt.get());
}
} }
}
catch (Exception e) {
throw new HoodieException(e);
}
}
| 3.26 |
hudi_SixToFiveDowngradeHandler_syncCompactionRequestedFileToAuxiliaryFolder_rdh
|
/**
* See HUDI-6040.
*/
private static void syncCompactionRequestedFileToAuxiliaryFolder(HoodieTable table) {
HoodieTableMetaClient metaClient = table.getMetaClient();
HoodieTimeline compactionTimeline = new HoodieActiveTimeline(metaClient,
false).filterPendingCompactionTimeline().filter(instant -> instant.getState() == HoodieInstant.State.REQUESTED);
compactionTimeline.getInstantsAsStream().forEach(instant -> {
String fileName = instant.getFileName();
FileIOUtils.copy(metaClient.getFs(), new Path(metaClient.getMetaPath(), fileName), new Path(metaClient.getMetaAuxiliaryPath(), fileName));
});
}
| 3.26 |
hudi_ExpressionPredicates_fromExpression_rdh
|
/**
* Converts specific call expression to the predicate.
*
* <p>Two steps to bind the call:
* 1. map the predicate instance;
* 2. bind the field reference;
*
* <p>Normalize the expression to simplify the subsequent decision logic:
* always put the literal expression in the RHS.
*
* @param callExpression
* The call expression to convert.
* @return The converted predicate.
*/
public static Predicate fromExpression(CallExpression callExpression) {
FunctionDefinition functionDefinition = callExpression.getFunctionDefinition();
List<Expression> childExpressions = callExpression.getChildren();
boolean normalized = childExpressions.get(0) instanceof FieldReferenceExpression;
if (BuiltInFunctionDefinitions.NOT.equals(functionDefinition)) {
Not predicate = Not.getInstance();
Predicate childPredicate = fromExpression(((CallExpression) (childExpressions.get(0))));return predicate.bindPredicate(childPredicate);
}
if (BuiltInFunctionDefinitions.AND.equals(functionDefinition)) {
And predicate = And.getInstance();
Predicate predicate1 = fromExpression(((CallExpression) (childExpressions.get(0))));
Predicate predicate2 = fromExpression(((CallExpression) (childExpressions.get(1))));
return predicate.m0(predicate1, predicate2);
}
if (BuiltInFunctionDefinitions.OR.equals(functionDefinition)) {
Or predicate = Or.getInstance();
Predicate predicate1 = fromExpression(((CallExpression) (childExpressions.get(0))));
Predicate predicate2 = fromExpression(((CallExpression) (childExpressions.get(1))));
return predicate.bindPredicates(predicate1, predicate2);
}
if ((BuiltInFunctionDefinitions.IS_NULL.equals(functionDefinition) || BuiltInFunctionDefinitions.IS_NOT_NULL.equals(functionDefinition)) || childExpressions.stream().anyMatch(e -> (e instanceof ValueLiteralExpression) && (getValueFromLiteral(((ValueLiteralExpression) (e))) == null))) {
return AlwaysNull.getInstance();
}
// handle IN specifically
if (BuiltInFunctionDefinitions.IN.equals(functionDefinition)) {
checkState(normalized, "The IN expression expects to be normalized");
In in = In.getInstance();
FieldReferenceExpression fieldReference = ((FieldReferenceExpression) (childExpressions.get(0)));
List<ValueLiteralExpression> valueLiterals = IntStream.range(1, childExpressions.size()).mapToObj(index -> ((ValueLiteralExpression) (childExpressions.get(index)))).collect(Collectors.toList());
return in.bindValueLiterals(valueLiterals).bindFieldReference(fieldReference);
}
ColumnPredicate predicate;
// handle binary operators
if (BuiltInFunctionDefinitions.EQUALS.equals(functionDefinition)) {
predicate = Equals.getInstance();
} else if (BuiltInFunctionDefinitions.NOT_EQUALS.equals(functionDefinition)) {
predicate = NotEquals.getInstance();
} else if (BuiltInFunctionDefinitions.LESS_THAN.equals(functionDefinition))
{
predicate = (normalized) ? LessThan.getInstance() : GreaterThan.getInstance();
} else if (BuiltInFunctionDefinitions.GREATER_THAN.equals(functionDefinition)) {
predicate = (normalized) ? GreaterThan.getInstance() : LessThan.getInstance(); } else if (BuiltInFunctionDefinitions.LESS_THAN_OR_EQUAL.equals(functionDefinition)) {
predicate = (normalized) ? LessThanOrEqual.getInstance() : GreaterThanOrEqual.getInstance();
} else if (BuiltInFunctionDefinitions.GREATER_THAN_OR_EQUAL.equals(functionDefinition)) {
predicate = (normalized) ? GreaterThanOrEqual.getInstance() : LessThanOrEqual.getInstance();
} else {
throw new AssertionError("Unexpected function definition " + functionDefinition);
}
FieldReferenceExpression fieldReference
= (normalized) ? ((FieldReferenceExpression) (childExpressions.get(0))) : ((FieldReferenceExpression) (childExpressions.get(1)));
ValueLiteralExpression
valueLiteral = (normalized) ? ((ValueLiteralExpression) (childExpressions.get(1))) : ((ValueLiteralExpression) (childExpressions.get(0))); return predicate.bindValueLiteral(valueLiteral).bindFieldReference(fieldReference);
}
| 3.26 |
hudi_ExpressionPredicates_bindValueLiteral_rdh
|
/**
* Binds value literal to create a column predicate.
*
* @param valueLiteral
* The value literal to negate.
* @return A column predicate.
*/
public ColumnPredicate bindValueLiteral(ValueLiteralExpression
valueLiteral) {
Object literalObject =
getValueFromLiteral(valueLiteral);
// validate that literal is serializable
if (literalObject instanceof Serializable) {
this.literal = ((Serializable) (literalObject));
} else {
LOG.warn(("Encountered a non-serializable literal. " + "Cannot push predicate with value literal [{}] into FileInputFormat. ") + "This is a bug and should be reported.", valueLiteral);
this.literal = null;
}
return this;
}
| 3.26 |
hudi_ExpressionPredicates_bindPredicate_rdh
|
/**
* Binds predicate to create a NOT predicate.
*
* @param predicate
* The predicate to negate.
* @return A NOT predicate.
*/
public Predicate bindPredicate(Predicate predicate) {
this.predicate = predicate;
return this;
}
| 3.26 |
hudi_ExpressionPredicates_bindFieldReference_rdh
|
/**
* Binds field reference to create a column predicate.
*
* @param fieldReference
* The field reference to negate.
* @return A column predicate.
*/
public ColumnPredicate bindFieldReference(FieldReferenceExpression fieldReference) {
this.f0 = fieldReference.getOutputDataType().getLogicalType();
this.columnName = fieldReference.getName();return this;
}
| 3.26 |
hudi_ExpressionPredicates_bindPredicates_rdh
|
/**
* Binds predicates to create an OR predicate.
*
* @param predicates
* The disjunctive predicates.
* @return An OR predicate.
*/
public Predicate bindPredicates(Predicate... predicates) {
this.predicates = predicates;
return this;
}
| 3.26 |
hudi_ExpressionPredicates_bindValueLiterals_rdh
|
/**
* Binds value literals to create an IN predicate.
*
* @param valueLiterals
* The value literals to negate.
* @return An IN predicate.
*/
public ColumnPredicate bindValueLiterals(List<ValueLiteralExpression> valueLiterals) {
this.literals = valueLiterals.stream().map(valueLiteral -> {
Object literalObject = getValueFromLiteral(valueLiteral);// validate that literal is serializable
if (literalObject instanceof Serializable) {
return ((Serializable) (literalObject));
} else {
In.LOG.warn(("Encountered a non-serializable literal. " + "Cannot push predicate with value literal [{}] into FileInputFormat. ") + "This is a bug and should be reported.", valueLiteral);
return null;
}
}).collect(Collectors.toList());
return this;
}
| 3.26 |
hudi_ExpressionPredicates_m0_rdh
|
/**
* Binds predicates to create an AND predicate.
*
* @param predicates
* The disjunctive predicates.
* @return An AND predicate.
*/
public Predicate m0(Predicate... predicates) {
this.predicates = predicates;
return this;
}
| 3.26 |
hudi_ExpressionPredicates_getInstance_rdh
|
/**
* Returns an OR predicate.
*/
public static Or getInstance() {
return new Or();
}
| 3.26 |
hudi_ExpressionPredicates_getFunctionDefinition_rdh
|
/**
* Returns function definition of predicate.
*
* @return A function definition of predicate.
*/
public FunctionDefinition getFunctionDefinition() {
return null;
}
| 3.26 |
hudi_HoodieBaseFile_getFileIdAndCommitTimeFromFileName_rdh
|
/**
* Parses the file ID and commit time from the fileName.
*
* @param fileName
* Name of the file
* @return String array of size 2 with fileId as the first and commitTime as the second element.
*/
private static String[] getFileIdAndCommitTimeFromFileName(String fileName)
{
return ExternalFilePathUtil.isExternallyCreatedFile(fileName) ? handleExternallyGeneratedFile(fileName) : handleHudiGeneratedFile(fileName);
}
| 3.26 |
hudi_HoodieBaseFile_maybeHandleExternallyGeneratedFileName_rdh
|
/**
* If the file was created externally, the original file path will have a '_[commitTime]_hudiext' suffix when stored in the metadata table. That suffix needs to be removed from the FileStatus so
* that the actual file can be found and read.
*
* @param fileStatus
* an input file status that may require updating
* @param fileId
* the fileId for the file
* @return the original file status if it was not externally created, or a new FileStatus with the original file name if it was externally created
*/
private static FileStatus maybeHandleExternallyGeneratedFileName(FileStatus fileStatus, String fileId) {
if (fileStatus
== null) {
return null;
}
if (ExternalFilePathUtil.isExternallyCreatedFile(fileStatus.getPath().getName()))
{
// fileId is the same as the original file name for externally created files
Path v9 = fileStatus.getPath().getParent();
return new FileStatus(fileStatus.getLen(), fileStatus.isDirectory(), fileStatus.getReplication(), fileStatus.getBlockSize(), fileStatus.getModificationTime(), fileStatus.getAccessTime(), fileStatus.getPermission(), fileStatus.getOwner(), fileStatus.getGroup(), new CachingPath(v9, createRelativePathUnsafe(fileId)));
} else {
return fileStatus;
}
}
| 3.26 |
hudi_ObjectSizeCalculator_getObjectSize_rdh
|
/**
* Given an object, returns the total allocated size, in bytes, of the object and all other objects reachable from it.
* Attempts to detect the current JVM memory layout, but may fail with {@link UnsupportedOperationException};
*
* @param obj
* the object; can be null. Passing in a {@link java.lang.Class} object doesn't do anything special, it
* measures the size of all objects reachable through it (which will include its class loader, and by
* extension, all other Class objects loaded by the same loader, and all the parent class loaders). It doesn't
* provide the size of the static fields in the JVM class that the Class object represents.
* @return the total allocated size of the object and all other objects it retains.
* @throws UnsupportedOperationException
* if the current vm memory layout cannot be detected.
*/
public static long getObjectSize(Object obj) throws UnsupportedOperationException {
// JDK versions 16 or later enforce strong encapsulation and block illegal reflective access.
// In effect, we cannot calculate object size by deep reflection and invoking `setAccessible` on a field,
// especially when the `isAccessible` is false. More details in JEP 403. While integrating Hudi with other
// software packages that compile against JDK 16 or later (e.g. Trino), the IllegalAccessException will be thrown.
// In that case, we use Java Object Layout (JOL) to estimate the object size.
//
// NOTE: We cannot get the object size base on the amount of byte serialized because there is no guarantee
// that the incoming object is serializable. We could have used Java's Instrumentation API, but it
// needs an instrumentation agent that can be hooked to the JVM. In lieu of that, we are using JOL.
// GraphLayout gives the deep size of an object, including the size of objects that are referenced from the given object.
return obj == null
? 0 : GraphLayout.parseInstance(obj).totalSize();
}
| 3.26 |
hudi_HoodieArchivedTimeline_loadInstants_rdh
|
/**
* Loads the instants from the timeline.
*
* @param metaClient
* The meta client.
* @param filter
* The time range filter where the target instant belongs to.
* @param loadMode
* The load mode.
* @param commitsFilter
* Filter of the instant type.
* @param recordConsumer
* Consumer of the instant record payload.
*/ public static void loadInstants(HoodieTableMetaClient metaClient, @Nullable
TimeRangeFilter filter, LoadMode loadMode, Function<GenericRecord, Boolean> commitsFilter, BiConsumer<String, GenericRecord> recordConsumer) {
try {// List all files
List<String> fileNames = LSMTimeline.latestSnapshotManifest(metaClient).getFileNames();
Schema readSchema = LSMTimeline.getReadSchema(loadMode);
fileNames.stream().filter(fileName -> (filter == null) || LSMTimeline.isFileInRange(filter, fileName)).parallel().forEach(fileName -> {
// Read the archived file
try (HoodieAvroParquetReader reader = ((HoodieAvroParquetReader) (HoodieFileReaderFactory.getReaderFactory(HoodieRecordType.AVRO).getFileReader(metaClient.getHadoopConf(),
new Path(metaClient.getArchivePath(), fileName))))) {
try (ClosableIterator<IndexedRecord> iterator = reader.getIndexedRecordIterator(HoodieLSMTimelineInstant.getClassSchema(), readSchema)) {
while (iterator.hasNext()) {
GenericRecord record = ((GenericRecord) (iterator.next()));
String instantTime = record.get(INSTANT_TIME_ARCHIVED_META_FIELD).toString();
if (((filter == null) || filter.isInRange(instantTime)) && commitsFilter.apply(record)) {
recordConsumer.accept(instantTime, record);
}
}
}
} catch (IOException ioException) {
throw new HoodieIOException("Error open file reader for path: " + new Path(metaClient.getArchivePath(), fileName));
}
});
}
catch (IOException e) {
throw new HoodieIOException("Could not load archived commit timeline from path " + metaClient.getArchivePath(), e);
}
}
| 3.26 |
hudi_HoodieArchivedTimeline_readObject_rdh
|
/**
* This method is only used when this object is deserialized in a spark executor.
*
* @deprecated */
private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException {
in.defaultReadObject();
}
| 3.26 |
hudi_ParquetUtils_readAvroRecords_rdh
|
/**
* NOTE: This literally reads the entire file contents, thus should be used with caution.
*/
@Override
public List<GenericRecord> readAvroRecords(Configuration configuration, Path filePath) {
List<GenericRecord> records
= new ArrayList<>();
try (ParquetReader reader = AvroParquetReader.builder(filePath).withConf(configuration).build()) {
Object obj = reader.read();
while (obj != null) {
if (obj instanceof GenericRecord) {
records.add(((GenericRecord) (obj)));
}obj = reader.read();
}
} catch (IOException e) {
throw new HoodieIOException("Failed to read avro records from Parquet " + filePath, e);
}
return records;
}
| 3.26 |
hudi_ParquetUtils_m0_rdh
|
/**
* Returns a closable iterator for reading the given parquet file.
*
* @param configuration
* configuration to build fs object
* @param filePath
* The parquet file path
* @param keyGeneratorOpt
* instance of KeyGenerator
* @return {@link ClosableIterator} of {@link HoodieKey}s for reading the parquet file
*/
@Override
public ClosableIterator<HoodieKey> m0(Configuration configuration, Path filePath, Option<BaseKeyGenerator> keyGeneratorOpt) {
try {
Configuration conf
=
new Configuration(configuration);
conf.addResource(FSUtils.getFs(filePath.toString(), conf).getConf());
Schema readSchema = keyGeneratorOpt.map(keyGenerator -> {
List<String> fields = new ArrayList<>();
fields.addAll(keyGenerator.getRecordKeyFieldNames());
fields.addAll(keyGenerator.getPartitionPathFields());
return HoodieAvroUtils.getSchemaForFields(readAvroSchema(conf, filePath), fields);
}).orElse(HoodieAvroUtils.getRecordKeyPartitionPathSchema());
AvroReadSupport.setAvroReadSchema(conf, readSchema);
AvroReadSupport.setRequestedProjection(conf, readSchema);
ParquetReader<GenericRecord> reader = AvroParquetReader.<GenericRecord>builder(filePath).withConf(conf).build();
return HoodieKeyIterator.getInstance(new ParquetReaderIterator<>(reader), keyGeneratorOpt);
} catch (IOException e) {
throw new HoodieIOException("Failed to read from Parquet file " + filePath, e);
}
}
| 3.26 |
hudi_ParquetUtils_readSchema_rdh
|
/**
* Get the schema of the given parquet file.
*/
public MessageType readSchema(Configuration configuration, Path parquetFilePath) {
return readMetadata(configuration, parquetFilePath).getFileMetaData().getSchema();
}
| 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.