name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hudi_CompactionUtils_m0_rdh
|
/**
* Generate compaction operation from file-slice.
*
* @param partitionPath
* Partition path
* @param fileSlice
* File Slice
* @param metricsCaptureFunction
* Metrics Capture function
* @return Compaction Operation
*/
public static HoodieCompactionOperation m0(String partitionPath, FileSlice fileSlice, Option<Function<Pair<String, FileSlice>, Map<String, Double>>> metricsCaptureFunction) {
HoodieCompactionOperation.Builder builder = HoodieCompactionOperation.newBuilder();
builder.setPartitionPath(partitionPath);
builder.setFileId(fileSlice.getFileId());
builder.setBaseInstantTime(fileSlice.getBaseInstantTime());
builder.setDeltaFilePaths(fileSlice.getLogFiles().map(lf -> lf.getPath().getName()).collect(Collectors.toList()));
if (fileSlice.getBaseFile().isPresent()) {
builder.setDataFilePath(fileSlice.getBaseFile().get().getFileName());
builder.setBootstrapFilePath(fileSlice.getBaseFile().get().getBootstrapBaseFile().map(BaseFile::getPath).orElse(null));
}
if (metricsCaptureFunction.isPresent()) {
builder.setMetrics(metricsCaptureFunction.get().apply(Pair.of(partitionPath, fileSlice)));
}
return builder.build();
}
| 3.26 |
hudi_CompactionUtils_getLogCompactionPlan_rdh
|
/**
* This method will serve only log compaction instants,
* because we use same HoodieCompactionPlan for both the operations.
*/
public static HoodieCompactionPlan getLogCompactionPlan(HoodieTableMetaClient metaClient, String logCompactionInstant) {
HoodieInstant logCompactionRequestedInstant = HoodieTimeline.getLogCompactionRequestedInstant(logCompactionInstant);
return getCompactionPlan(metaClient, logCompactionRequestedInstant);
}
| 3.26 |
hudi_CompactionUtils_getCompactionPlan_rdh
|
/**
* Util method to fetch both compaction and log compaction plan from requestedInstant.
*/
public static HoodieCompactionPlan getCompactionPlan(HoodieTableMetaClient metaClient, Option<byte[]> planContent) {
CompactionPlanMigrator migrator = new CompactionPlanMigrator(metaClient);
try {
HoodieCompactionPlan compactionPlan = TimelineMetadataUtils.deserializeCompactionPlan(planContent.get());
return migrator.upgradeToLatest(compactionPlan, compactionPlan.getVersion());
} catch (IOException e) {
throw new HoodieException(e);
}
}
| 3.26 |
hudi_CompactionUtils_getAllPendingLogCompactionPlans_rdh
|
/**
* Get all pending logcompaction plans along with their instants.
*
* @param metaClient
* Hoodie Meta Client
*/
public static List<Pair<HoodieInstant, HoodieCompactionPlan>> getAllPendingLogCompactionPlans(HoodieTableMetaClient metaClient) {
// This function returns pending logcompaction timeline.
Function<HoodieTableMetaClient, HoodieTimeline> filteredTimelineSupplier = hoodieTableMetaClient -> hoodieTableMetaClient.getActiveTimeline().filterPendingLogCompactionTimeline();// Hoodie requested instant supplier
Function<String, HoodieInstant> requestedInstantSupplier = HoodieTimeline::getLogCompactionRequestedInstant;
return getCompactionPlansByTimeline(metaClient, filteredTimelineSupplier, requestedInstantSupplier);
}
| 3.26 |
hudi_CompactionUtils_getPendingCompactionOperations_rdh
|
/**
* Get pending compaction operations for both major and minor compaction.
*/
public static Stream<Pair<HoodieFileGroupId, Pair<String, HoodieCompactionOperation>>> getPendingCompactionOperations(HoodieInstant instant, HoodieCompactionPlan compactionPlan) {
List<HoodieCompactionOperation> ops = compactionPlan.getOperations();
if (null != ops) {
return ops.stream().map(op -> Pair.of(new HoodieFileGroupId(op.getPartitionPath(), op.getFileId()), Pair.of(instant.getTimestamp(), op)));
} else { return Stream.empty();
}
}
| 3.26 |
hudi_HoodieRealtimeRecordReader_constructRecordReader_rdh
|
/**
* Construct record reader based on job configuration.
*
* @param split
* File Split
* @param jobConf
* Job Configuration
* @param realReader
* Parquet Record Reader
* @return Realtime Reader
*/
private static RecordReader<NullWritable, ArrayWritable> constructRecordReader(RealtimeSplit split, JobConf jobConf, RecordReader<NullWritable, ArrayWritable> realReader) {
try { if (canSkipMerging(jobConf)) {
LOG.info("Enabling un-merged reading of realtime records");
return new RealtimeUnmergedRecordReader(split, jobConf, realReader);
}
LOG.info("Enabling merged reading of realtime records for split " + split);
return new RealtimeCompactedRecordReader(split, jobConf, realReader);
} catch (Exception e) {
LOG.error("Got exception when constructing record reader", e);
try {
if (null != realReader) {
realReader.close();
}
} catch (IOException ioe) {
LOG.error("Unable to close real reader", ioe);
}
throw new HoodieException("Exception when constructing record reader ", e);
}
}
| 3.26 |
hudi_HoodieFlinkTableServiceClient_initMetadataWriter_rdh
|
/**
* Initialize the table metadata writer, for e.g, bootstrap the metadata table
* from the filesystem if it does not exist.
*/
private HoodieBackedTableMetadataWriter initMetadataWriter(Option<String> latestPendingInstant) {
return ((HoodieBackedTableMetadataWriter)
(FlinkHoodieBackedTableMetadataWriter.create(FlinkClientUtil.getHadoopConf(), this.config, HoodieFlinkEngineContext.DEFAULT, latestPendingInstant)));
}
| 3.26 |
hudi_HoodieROTablePathFilter_safeGetParentsParent_rdh
|
/**
* Obtain the path, two levels from provided path.
*
* @return said path if available, null otherwise
*/
private Path safeGetParentsParent(Path path) {
if (((path.getParent() != null) && (path.getParent().getParent() != null))
&& (path.getParent().getParent().getParent() != null)) {
return path.getParent().getParent().getParent();}
return null;
}
| 3.26 |
hudi_FormatUtils_getRowKindSafely_rdh
|
/**
* Returns the RowKind of the given record, never null.
* Returns RowKind.INSERT when the given field value not found.
*/
public static RowKind getRowKindSafely(IndexedRecord record, int index) {
if (index == (-1)) {
return RowKind.INSERT;
}
return getRowKind(record, index);
}
| 3.26 |
hudi_FormatUtils_getParallelProducers_rdh
|
/**
* Setup log and parquet reading in parallel. Both write to central buffer.
*/
private List<HoodieProducer<HoodieRecord<?>>> getParallelProducers(HoodieUnMergedLogRecordScanner.Builder scannerBuilder) {
List<HoodieProducer<HoodieRecord<?>>> producers = new ArrayList<>();
producers.add(new FunctionBasedQueueProducer<>(queue -> {
HoodieUnMergedLogRecordScanner scanner = scannerBuilder.withLogRecordScannerCallback(queue::insertRecord).build();// Scan all the delta-log files, filling in the queue
scanner.scan();
return null;
}));
return producers;
}
| 3.26 |
hudi_FormatUtils_setRowKind_rdh
|
/**
* Sets up the row kind to the row data {@code rowData} from the resolved operation.
*/
public static void setRowKind(RowData rowData, IndexedRecord record, int index) {
if (index == (-1))
{
return;
}
rowData.setRowKind(getRowKind(record, index));
}
| 3.26 |
hudi_FormatUtils_getRawValueWithAltKeys_rdh
|
/**
* Gets the raw value for a {@link ConfigProperty} config from Flink configuration. The key and
* alternative keys are used to fetch the config.
*
* @param flinkConf
* Configs in Flink {@link org.apache.flink.configuration.Configuration}.
* @param configProperty
* {@link ConfigProperty} config to fetch.
* @return {@link Option} of value if the config exists; empty {@link Option} otherwise.
*/
public static Option<String> getRawValueWithAltKeys(Configuration flinkConf, ConfigProperty<?> configProperty) {
if (flinkConf.containsKey(configProperty.key())) {
return Option.ofNullable(flinkConf.getString(configProperty.key(), ""));
}
for (String alternative : configProperty.getAlternatives()) {
if (flinkConf.containsKey(alternative)) {
return Option.ofNullable(flinkConf.getString(alternative, ""));
}
}
return Option.empty();
}
/**
* Gets the boolean value for a {@link ConfigProperty} config from Flink configuration. The key and
* alternative keys are used to fetch the config. The default value of {@link ConfigProperty}
* config, if exists, is returned if the config is not found in the configuration.
*
* @param conf
* Configs in Flink {@link Configuration}.
* @param configProperty
* {@link ConfigProperty} config to fetch.
* @return boolean value if the config exists; default boolean value if the config does not exist
and there is default value defined in the {@link ConfigProperty} config; {@code false}
| 3.26 |
hudi_FormatUtils_getRowKind_rdh
|
/**
* Returns the RowKind of the given record, never null.
* Returns RowKind.INSERT when the given field value not found.
*/
private static RowKind getRowKind(IndexedRecord record, int index) {
Object val = record.get(index);
if (val == null) {
return RowKind.INSERT;
}
final HoodieOperation operation = HoodieOperation.fromName(val.toString());
if (HoodieOperation.isInsert(operation)) {
return RowKind.INSERT;
} else if (HoodieOperation.isUpdateBefore(operation)) {
return RowKind.UPDATE_BEFORE;
} else if (HoodieOperation.isUpdateAfter(operation)) {
return RowKind.UPDATE_AFTER;
} else if (HoodieOperation.isDelete(operation)) {
return RowKind.DELETE;
} else {
throw new AssertionError();
}
}
| 3.26 |
hudi_CompactionUtil_scheduleCompaction_rdh
|
/**
* Schedules a new compaction instant.
*
* @param writeClient
* The write client
* @param deltaTimeCompaction
* Whether the compaction is trigger by elapsed delta time
* @param committed
* Whether the last instant was committed successfully
*/
public static void scheduleCompaction(HoodieFlinkWriteClient<?> writeClient, boolean deltaTimeCompaction, boolean committed) {
if (committed) {
writeClient.scheduleCompaction(Option.empty());
} else if (deltaTimeCompaction) {
// if there are no new commits and the compaction trigger strategy is based on elapsed delta time,
// schedules the compaction anyway.
writeClient.scheduleCompaction(Option.empty());}}
| 3.26 |
hudi_CompactionUtil_setPreCombineField_rdh
|
/**
* Sets up the preCombine field into the given configuration {@code conf}
* through reading from the hoodie table metadata.
* <p>
* This value is non-null as compaction can only be performed on MOR tables.
* Of which, MOR tables will have non-null precombine fields.
*
* @param conf
* The configuration
*/
public static void setPreCombineField(Configuration conf, HoodieTableMetaClient metaClient) {
String preCombineField = metaClient.getTableConfig().getPreCombineField();
if (preCombineField != null) {
conf.setString(FlinkOptions.PRECOMBINE_FIELD, preCombineField);
}
}
| 3.26 |
hudi_CompactionUtil_inferChangelogMode_rdh
|
/**
* Infers the changelog mode based on the data file schema(including metadata fields).
*
* <p>We can improve the code if the changelog mode is set up as table config.
*
* @param conf
* The configuration
* @param metaClient
* The meta client
*/
public static void inferChangelogMode(Configuration conf, HoodieTableMetaClient metaClient) throws Exception {
TableSchemaResolver tableSchemaResolver = new TableSchemaResolver(metaClient);
Schema tableAvroSchema = tableSchemaResolver.getTableAvroSchemaFromDataFile();
if (tableAvroSchema.getField(HoodieRecord.OPERATION_METADATA_FIELD) != null) {
conf.setBoolean(FlinkOptions.CHANGELOG_ENABLED, true);
}
}
| 3.26 |
hudi_CompactionUtil_inferMetadataConf_rdh
|
/**
* Infers the metadata config based on the existence of metadata folder.
*
* <p>We can improve the code if the metadata config is set up as table config.
*
* @param conf
* The configuration
* @param metaClient
* The meta client
*/
public static void inferMetadataConf(Configuration conf, HoodieTableMetaClient metaClient) {
String path = HoodieTableMetadata.getMetadataTableBasePath(conf.getString(FlinkOptions.PATH));
if (!StreamerUtil.tableExists(path, metaClient.getHadoopConf())) {
conf.setBoolean(FlinkOptions.METADATA_ENABLED, false);
}
}
| 3.26 |
hudi_CompactionUtil_rollbackCompaction_rdh
|
/**
* Force rolls back all the inflight compaction instants, especially for job failover restart.
*
* @param table
* The hoodie table
*/
public static void rollbackCompaction(HoodieFlinkTable<?> table) {
HoodieTimeline inflightCompactionTimeline = table.getActiveTimeline().filterPendingCompactionTimeline().filter(instant -> instant.getState() == HoodieInstant.State.INFLIGHT);
inflightCompactionTimeline.getInstants().forEach(inflightInstant -> {
LOG.info(("Rollback the inflight compaction instant: "
+ inflightInstant) + " for failover");
table.rollbackInflightCompaction(inflightInstant);
table.getMetaClient().reloadActiveTimeline();
});
}
| 3.26 |
hudi_CompactionUtil_setAvroSchema_rdh
|
/**
* Sets up the avro schema string into the HoodieWriteConfig {@code HoodieWriteConfig}
* through reading from the hoodie table metadata.
*
* @param writeConfig
* The HoodieWriteConfig
*/
public static void setAvroSchema(HoodieWriteConfig writeConfig, HoodieTableMetaClient metaClient) throws Exception {
TableSchemaResolver tableSchemaResolver = new TableSchemaResolver(metaClient);
Schema tableAvroSchema = tableSchemaResolver.getTableAvroSchema(false);
writeConfig.setSchema(tableAvroSchema.toString());
}
| 3.26 |
hudi_CompactionUtil_rollbackEarliestCompaction_rdh
|
/**
* Rolls back the earliest compaction if there exists.
*
* <p>Makes the strategy not that radical: firstly check whether there exists inflight compaction instants,
* rolls back the first inflight instant only if it has timed out. That means, if there are
* multiple timed out instants on the timeline, we only roll back the first one at a time.
*/
public static void rollbackEarliestCompaction(HoodieFlinkTable<?>
table, Configuration conf) {
Option<HoodieInstant> earliestInflight = table.getActiveTimeline().filterPendingCompactionTimeline().filter(instant -> instant.getState() == HoodieInstant.State.INFLIGHT).firstInstant();
if (earliestInflight.isPresent()) {
HoodieInstant instant = earliestInflight.get();
String currentTime = table.getMetaClient().createNewInstantTime();
int timeout = conf.getInteger(FlinkOptions.COMPACTION_TIMEOUT_SECONDS);
if (StreamerUtil.instantTimeDiffSeconds(currentTime, instant.getTimestamp()) >= timeout) {
LOG.info(((("Rollback the inflight compaction instant: " + instant) + " for timeout(") + timeout) + "s)");
table.rollbackInflightCompaction(instant);
table.getMetaClient().reloadActiveTimeline();
}
}
}
| 3.26 |
hudi_HoodieActiveTimeline_saveToPendingIndexAction_rdh
|
/**
* Save content for inflight/requested index instant.
*/
public void saveToPendingIndexAction(HoodieInstant instant, Option<byte[]> content) {
ValidationUtils.checkArgument(instant.getAction().equals(HoodieTimeline.INDEXING_ACTION), String.format("%s is not equal to %s action", instant.getAction(), INDEXING_ACTION));
createFileInMetaPath(instant.getFileName(), content,
false);
}
| 3.26 |
hudi_HoodieActiveTimeline_transitionCompactionInflightToComplete_rdh
|
/**
* Transition Compaction State from inflight to Committed.
*
* @param shouldLock
* Whether to hold the lock when performing transition
* @param inflightInstant
* Inflight instant
* @param data
* Extra Metadata
* @return commit instant
*/
public HoodieInstant transitionCompactionInflightToComplete(boolean shouldLock, HoodieInstant inflightInstant, Option<byte[]> data) {
ValidationUtils.checkArgument(inflightInstant.getAction().equals(HoodieTimeline.COMPACTION_ACTION));
ValidationUtils.checkArgument(inflightInstant.isInflight());HoodieInstant commitInstant = new HoodieInstant(State.COMPLETED, COMMIT_ACTION, inflightInstant.getTimestamp());transitionStateToComplete(shouldLock,
inflightInstant, commitInstant, data);
return commitInstant; }
| 3.26 |
hudi_HoodieActiveTimeline_createNewInstant_rdh
|
/**
* Create a pending instant and save to storage.
*
* @param instant
* the pending instant.
*/
public void createNewInstant(HoodieInstant instant) {
LOG.info("Creating a new instant " + instant);
ValidationUtils.checkArgument(!instant.isCompleted());
// Create the in-flight file
createFileInMetaPath(instant.getFileName(), Option.empty(), false);
}
| 3.26 |
hudi_HoodieActiveTimeline_createCompleteInstant_rdh
|
/**
* Create a complete instant and save to storage with a completion time.
*
* @param instant
* the complete instant.
*/
public void createCompleteInstant(HoodieInstant instant) {
LOG.info("Creating a new complete instant " + instant);
createCompleteFileInMetaPath(true, instant, Option.empty());
}
| 3.26 |
hudi_HoodieActiveTimeline_parseDateFromInstantTimeSafely_rdh
|
/**
* The same parsing method as above, but this method will mute ParseException.
* If the given timestamp is invalid, returns {@code Option.empty}.
* Or a corresponding Date value if these timestamp strings are provided
* {@link org.apache.hudi.common.table.timeline.HoodieTimeline#INIT_INSTANT_TS},
* {@link org.apache.hudi.common.table.timeline.HoodieTimeline#METADATA_BOOTSTRAP_INSTANT_TS},
* {@link org.apache.hudi.common.table.timeline.HoodieTimeline#FULL_BOOTSTRAP_INSTANT_TS}.
* This method is useful when parsing timestamp for metrics
*
* @param timestamp
* a timestamp String which follow pattern as
* {@link org.apache.hudi.common.table.timeline.HoodieInstantTimeGenerator#SECS_INSTANT_TIMESTAMP_FORMAT}.
* @return {@code Option<Date>} of instant timestamp, {@code Option.empty} if invalid timestamp
*/
public static Option<Date> parseDateFromInstantTimeSafely(String timestamp) {
Option<Date> v0;
try {
v0 = Option.of(HoodieInstantTimeGenerator.parseDateFromInstantTime(timestamp));
} catch (ParseException e) {
if (NOT_PARSABLE_TIMESTAMPS.contains(timestamp)) {
v0 = Option.of(new Date(Integer.parseInt(timestamp)));
} else {
LOG.warn((("Failed to parse timestamp " + timestamp) + ": ") + e.getMessage());
v0 = Option.empty();
}
}
return v0;
}
| 3.26 |
hudi_HoodieActiveTimeline_saveToPendingReplaceCommit_rdh
|
/**
* Saves content for requested REPLACE instant.
*/
public void saveToPendingReplaceCommit(HoodieInstant instant, Option<byte[]> content) {ValidationUtils.checkArgument(instant.getAction().equals(HoodieTimeline.REPLACE_COMMIT_ACTION));
createFileInMetaPath(instant.getFileName(), content, false);
}
| 3.26 |
hudi_HoodieActiveTimeline_revertInstantFromInflightToRequested_rdh
|
/**
* Revert instant state from inflight to requested.
*
* @param inflightInstant
* Inflight Instant
* @return requested instant
*/
public HoodieInstant revertInstantFromInflightToRequested(HoodieInstant inflightInstant) {
ValidationUtils.checkArgument(inflightInstant.isInflight());
HoodieInstant requestedInstant = new HoodieInstant(State.REQUESTED, inflightInstant.getAction(), inflightInstant.getTimestamp());
if (metaClient.getTimelineLayoutVersion().isNullVersion()) {
// Pass empty data since it is read from the corresponding .aux/.compaction instant file
transitionPendingState(inflightInstant, requestedInstant, Option.empty());
} else {
m1(inflightInstant);
}
return requestedInstant;
}
| 3.26 |
hudi_HoodieActiveTimeline_getCommitMetadataStream_rdh
|
/**
* Returns stream of {@link HoodieCommitMetadata} in order reverse to chronological (ie most
* recent metadata being the first element)
*/
private Stream<Pair<HoodieInstant, HoodieCommitMetadata>> getCommitMetadataStream() {
// NOTE: Streams are lazy
return getCommitsTimeline().filterCompletedInstants().getInstantsAsStream().sorted(Comparator.comparing(HoodieInstant::getTimestamp).reversed()).map(instant -> {
try {
HoodieCommitMetadata commitMetadata = HoodieCommitMetadata.fromBytes(getInstantDetails(instant).get(), HoodieCommitMetadata.class);
return Pair.of(instant, commitMetadata);
} catch (IOException e) {
throw new <e>HoodieIOException(String.format("Failed to fetch HoodieCommitMetadata for instant (%s)", instant));
}
});
}
| 3.26 |
hudi_HoodieActiveTimeline_m4_rdh
|
/**
* Transition Rollback State from inflight to Committed.
*
* @param shouldLock
* Whether to hold the lock when performing transition
* @param inflightInstant
* Inflight instant
* @param data
* Extra Metadata
* @return commit instant
*/
public HoodieInstant m4(boolean shouldLock, HoodieInstant inflightInstant, Option<byte[]> data) {
ValidationUtils.checkArgument(inflightInstant.getAction().equals(HoodieTimeline.ROLLBACK_ACTION));ValidationUtils.checkArgument(inflightInstant.isInflight());
HoodieInstant commitInstant = new HoodieInstant(State.COMPLETED, ROLLBACK_ACTION, inflightInstant.getTimestamp());
// Then write to timeline
transitionStateToComplete(shouldLock, inflightInstant, commitInstant, data);
return
commitInstant;
}
| 3.26 |
hudi_HoodieActiveTimeline_createNewInstantTime_rdh
|
/**
* Returns next instant time in the correct format.
* Ensures each instant time is at least 1 millisecond apart since we create instant times at millisecond granularity.
*
* @param shouldLock
* whether the lock should be enabled to get the instant time.
* @param timeGenerator
* TimeGenerator used to generate the instant time.
* @param milliseconds
* Milliseconds to add to current time while generating the new instant time
*/
public static String createNewInstantTime(boolean shouldLock, TimeGenerator timeGenerator, long milliseconds) {
return HoodieInstantTimeGenerator.createNewInstantTime(shouldLock, timeGenerator, milliseconds);
}
| 3.26 |
hudi_HoodieActiveTimeline_revertLogCompactionInflightToRequested_rdh
|
/**
* TODO: This method is not needed, since log compaction plan is not a immutable plan.
* Revert logcompaction State from inflight to requested.
*
* @param inflightInstant
* Inflight Instant
* @return requested instant
*/public HoodieInstant revertLogCompactionInflightToRequested(HoodieInstant inflightInstant) {
ValidationUtils.checkArgument(inflightInstant.getAction().equals(HoodieTimeline.LOG_COMPACTION_ACTION));
ValidationUtils.checkArgument(inflightInstant.isInflight());
HoodieInstant v11 = new HoodieInstant(State.REQUESTED, LOG_COMPACTION_ACTION, inflightInstant.getTimestamp());
if (metaClient.getTimelineLayoutVersion().isNullVersion()) {
// Pass empty data since it is read from the corresponding .aux/.compaction instant file
transitionPendingState(inflightInstant, v11, Option.empty());
} else {
m1(inflightInstant);
}
return
v11;
}
| 3.26 |
hudi_HoodieActiveTimeline_transitionCleanInflightToComplete_rdh
|
// -----------------------------------------------------------------
// END - COMPACTION RELATED META-DATA MANAGEMENT
// -----------------------------------------------------------------
/**
* Transition Clean State from inflight to Committed.
*
* @param shouldLock
* Whether to hold the lock when performing transition
* @param inflightInstant
* Inflight instant
* @param data
* Extra Metadata
* @return commit instant
*/
public HoodieInstant transitionCleanInflightToComplete(boolean shouldLock, HoodieInstant inflightInstant,
Option<byte[]> data) {
ValidationUtils.checkArgument(inflightInstant.getAction().equals(HoodieTimeline.CLEAN_ACTION));
ValidationUtils.checkArgument(inflightInstant.isInflight());
HoodieInstant commitInstant = new HoodieInstant(State.COMPLETED, CLEAN_ACTION, inflightInstant.getTimestamp());
// Then write to timeline
transitionStateToComplete(shouldLock, inflightInstant, commitInstant, data);
return commitInstant;
}
| 3.26 |
hudi_HoodieActiveTimeline_transitionReplaceRequestedToInflight_rdh
|
/**
* Transition replace requested file to replace inflight.
*
* @param requestedInstant
* Requested instant
* @param data
* Extra Metadata
* @return inflight instant
*/
public HoodieInstant transitionReplaceRequestedToInflight(HoodieInstant requestedInstant, Option<byte[]> data) {
ValidationUtils.checkArgument(requestedInstant.getAction().equals(HoodieTimeline.REPLACE_COMMIT_ACTION));
ValidationUtils.checkArgument(requestedInstant.isRequested());
HoodieInstant v21 = new HoodieInstant(State.INFLIGHT, REPLACE_COMMIT_ACTION, requestedInstant.getTimestamp());
// Then write to timeline
transitionPendingState(requestedInstant, v21, data);
return v21;
}
| 3.26 |
hudi_HoodieActiveTimeline_revertIndexInflightToRequested_rdh
|
/**
* Revert index instant state from inflight to requested.
*
* @param inflightInstant
* Inflight Instant
* @return requested instant
*/
public HoodieInstant revertIndexInflightToRequested(HoodieInstant inflightInstant) {
ValidationUtils.checkArgument(inflightInstant.getAction().equals(HoodieTimeline.INDEXING_ACTION), String.format("%s is not equal to %s action", inflightInstant.getAction(), INDEXING_ACTION));
ValidationUtils.checkArgument(inflightInstant.isInflight(), String.format("Instant %s not inflight", inflightInstant.getTimestamp()));
HoodieInstant requestedInstant = new HoodieInstant(State.REQUESTED, INDEXING_ACTION, inflightInstant.getTimestamp());
if (metaClient.getTimelineLayoutVersion().isNullVersion()) {
transitionPendingState(inflightInstant, requestedInstant, Option.empty());
} else {
m1(inflightInstant);
}
return requestedInstant;
}
| 3.26 |
hudi_HoodieActiveTimeline_deleteInstantFileIfExists_rdh
|
/**
* Note: This method should only be used in the case that delete requested/inflight instant or empty clean instant,
* and completed commit instant in an archive operation.
*/
public void deleteInstantFileIfExists(HoodieInstant instant) {
LOG.info("Deleting instant " + instant);
Path commitFilePath
= getInstantFileNamePath(instant.getFileName());
try {
if (metaClient.getFs().exists(commitFilePath)) {
boolean result = metaClient.getFs().delete(commitFilePath, false);
if (result) {
LOG.info("Removed instant " + instant);
} else {
throw new HoodieIOException((("Could not delete instant " + instant) + " with path ") + commitFilePath);
}
} else {
LOG.warn(("The commit " + commitFilePath) + " to remove does not exist");
}
} catch (IOException e)
{
throw new HoodieIOException("Could not remove commit " + commitFilePath, e);
}
}
| 3.26 |
hudi_HoodieActiveTimeline_getInstantFileName_rdh
|
/**
* Many callers might not pass completionTime, here we have to search
* timeline to get completionTime, the impact should be minor since
* 1. It appeals only tests pass instant without completion time
* 2. we already holds all instants in memory, the cost should be minor.
*
* <p>TODO: [HUDI-6885] Depreciate HoodieActiveTimeline#getInstantFileName and fix related tests.
*/
protected String getInstantFileName(HoodieInstant instant) {
if (instant.isCompleted() && (instant.getCompletionTime() == null)) {
return getInstantsAsStream().filter(s -> s.equals(instant)).findFirst().orElseThrow(() -> new HoodieIOException("Cannot find the instant" + instant)).getFileName();
}
return instant.getFileName();
}
| 3.26 |
hudi_HoodieActiveTimeline_transitionCleanRequestedToInflight_rdh
|
/**
* Transition Clean State from requested to inflight.
*
* @param requestedInstant
* requested instant
* @param data
* Optional data to be stored
* @return commit instant
*/
public HoodieInstant transitionCleanRequestedToInflight(HoodieInstant requestedInstant, Option<byte[]> data) {
ValidationUtils.checkArgument(requestedInstant.getAction().equals(HoodieTimeline.CLEAN_ACTION));
ValidationUtils.checkArgument(requestedInstant.isRequested());
HoodieInstant inflight = new HoodieInstant(State.INFLIGHT, CLEAN_ACTION, requestedInstant.getTimestamp());
transitionPendingState(requestedInstant, inflight, data);
return inflight;
}
| 3.26 |
hudi_HoodieActiveTimeline_parseDateFromInstantTime_rdh
|
/**
* Parse the timestamp of an Instant and return a {@code Date}.
* Throw ParseException if timestamp is not valid format as
* {@link org.apache.hudi.common.table.timeline.HoodieInstantTimeGenerator#SECS_INSTANT_TIMESTAMP_FORMAT}.
*
* @param timestamp
* a timestamp String which follow pattern as
* {@link org.apache.hudi.common.table.timeline.HoodieInstantTimeGenerator#SECS_INSTANT_TIMESTAMP_FORMAT}.
* @return Date of instant timestamp
*/
public static Date parseDateFromInstantTime(String timestamp) throws ParseException {
return HoodieInstantTimeGenerator.parseDateFromInstantTime(timestamp);
}
| 3.26 |
hudi_HoodieActiveTimeline_transitionRestoreRequestedToInflight_rdh
|
/**
* Transition Restore State from requested to inflight.
*
* @param requestedInstant
* requested instant
* @return commit instant
*/
public HoodieInstant transitionRestoreRequestedToInflight(HoodieInstant requestedInstant) {
ValidationUtils.checkArgument(requestedInstant.getAction().equals(HoodieTimeline.RESTORE_ACTION), "Transition to inflight requested for a restore instant with diff action " + requestedInstant);
ValidationUtils.checkArgument(requestedInstant.isRequested(), "Transition to inflight requested for an instant not in requested state " + requestedInstant.toString());
HoodieInstant inflight = new HoodieInstant(State.INFLIGHT, RESTORE_ACTION, requestedInstant.getTimestamp());
transitionPendingState(requestedInstant, inflight, Option.empty());
return inflight;
}
| 3.26 |
hudi_HoodieActiveTimeline_m3_rdh
|
// -----------------------------------------------------------------
// BEGIN - COMPACTION RELATED META-DATA MANAGEMENT.
// -----------------------------------------------------------------
public Option<byte[]> m3(HoodieInstant instant) {
return readDataFromPath(new Path(metaClient.getMetaPath(), getInstantFileName(instant)));
}
| 3.26 |
hudi_HoodieActiveTimeline_transitionIndexInflightToComplete_rdh
|
/**
* Transition index instant state from inflight to completed.
*
* @param shouldLock
* Whether to hold the lock when performing transition
* @param inflightInstant
* Inflight Instant
* @return completed instant
*/
public HoodieInstant transitionIndexInflightToComplete(boolean shouldLock, HoodieInstant inflightInstant, Option<byte[]> data) {
ValidationUtils.checkArgument(inflightInstant.getAction().equals(HoodieTimeline.INDEXING_ACTION), String.format("%s is not equal to %s action", inflightInstant.getAction(), INDEXING_ACTION));
ValidationUtils.checkArgument(inflightInstant.isInflight(), String.format("Instant %s not inflight", inflightInstant.getTimestamp()));
HoodieInstant commitInstant = new HoodieInstant(State.COMPLETED, INDEXING_ACTION, inflightInstant.getTimestamp());
transitionStateToComplete(shouldLock, inflightInstant, commitInstant, data);
return commitInstant;
}
| 3.26 |
hudi_HoodieActiveTimeline_formatDate_rdh
|
/**
* Format the Date to a String representing the timestamp of a Hoodie Instant.
*/
public static String formatDate(Date timestamp) {
return HoodieInstantTimeGenerator.formatDate(timestamp);
}
| 3.26 |
hudi_HoodieActiveTimeline_transitionCompactionRequestedToInflight_rdh
|
/**
* Transition Compaction State from requested to inflight.
*
* @param requestedInstant
* Requested instant
* @return inflight instant
*/
public HoodieInstant transitionCompactionRequestedToInflight(HoodieInstant requestedInstant) {
ValidationUtils.checkArgument(requestedInstant.getAction().equals(HoodieTimeline.COMPACTION_ACTION));
ValidationUtils.checkArgument(requestedInstant.isRequested());
HoodieInstant inflightInstant = new HoodieInstant(State.INFLIGHT, COMPACTION_ACTION, requestedInstant.getTimestamp());
transitionPendingState(requestedInstant, inflightInstant, Option.empty());
return inflightInstant;
}
| 3.26 |
hudi_HoodieActiveTimeline_transitionRollbackRequestedToInflight_rdh
|
/**
* Transition Rollback State from requested to inflight.
*
* @param requestedInstant
* requested instant
* @return commit instant
*/
public HoodieInstant
transitionRollbackRequestedToInflight(HoodieInstant requestedInstant) {
ValidationUtils.checkArgument(requestedInstant.getAction().equals(HoodieTimeline.ROLLBACK_ACTION));
ValidationUtils.checkArgument(requestedInstant.isRequested());
HoodieInstant inflight = new HoodieInstant(State.INFLIGHT, ROLLBACK_ACTION, requestedInstant.getTimestamp());
transitionPendingState(requestedInstant, inflight, Option.empty());
return
inflight;
}
| 3.26 |
hudi_HoodieActiveTimeline_transitionLogCompactionRequestedToInflight_rdh
|
/**
* Transition LogCompaction State from requested to inflight.
*
* @param requestedInstant
* Requested instant
* @return inflight instant
*/public HoodieInstant transitionLogCompactionRequestedToInflight(HoodieInstant requestedInstant) {
ValidationUtils.checkArgument(requestedInstant.getAction().equals(HoodieTimeline.LOG_COMPACTION_ACTION));
ValidationUtils.checkArgument(requestedInstant.isRequested());
HoodieInstant inflightInstant = new HoodieInstant(State.INFLIGHT, LOG_COMPACTION_ACTION, requestedInstant.getTimestamp());
transitionPendingState(requestedInstant, inflightInstant, Option.empty());
return inflightInstant;
}
| 3.26 |
hudi_HoodieActiveTimeline_transitionLogCompactionInflightToComplete_rdh
|
/**
* Transition Log Compaction State from inflight to Committed.
*
* @param shouldLock
* Whether to hold the lock when performing transition
* @param inflightInstant
* Inflight instant
* @param data
* Extra Metadata
* @return commit instant
*/
public HoodieInstant transitionLogCompactionInflightToComplete(boolean shouldLock, HoodieInstant inflightInstant, Option<byte[]> data)
{
ValidationUtils.checkArgument(inflightInstant.getAction().equals(HoodieTimeline.LOG_COMPACTION_ACTION));
ValidationUtils.checkArgument(inflightInstant.isInflight());
HoodieInstant commitInstant = new HoodieInstant(State.COMPLETED, DELTA_COMMIT_ACTION, inflightInstant.getTimestamp());transitionStateToComplete(shouldLock, inflightInstant, commitInstant,
data);
return commitInstant;
}
| 3.26 |
hudi_HoodieActiveTimeline_transitionIndexRequestedToInflight_rdh
|
/**
* Transition index instant state from requested to inflight.
*
* @param requestedInstant
* Inflight Instant
* @return inflight instant
*/
public HoodieInstant transitionIndexRequestedToInflight(HoodieInstant requestedInstant, Option<byte[]> data) {
ValidationUtils.checkArgument(requestedInstant.getAction().equals(HoodieTimeline.INDEXING_ACTION), String.format("%s is not equal to %s action", requestedInstant.getAction(), INDEXING_ACTION));
ValidationUtils.checkArgument(requestedInstant.isRequested(), String.format("Instant %s not in requested state", requestedInstant.getTimestamp()));
HoodieInstant inflightInstant = new HoodieInstant(State.INFLIGHT, INDEXING_ACTION, requestedInstant.getTimestamp());
transitionPendingState(requestedInstant, inflightInstant, data);
return inflightInstant;
}
| 3.26 |
hudi_HoodieActiveTimeline_getLastCommitMetadataWithValidSchema_rdh
|
/**
* Returns most recent instant having valid schema in its {@link HoodieCommitMetadata}
*/
public Option<Pair<HoodieInstant, HoodieCommitMetadata>> getLastCommitMetadataWithValidSchema() {
return Option.fromJavaOptional(getCommitMetadataStream().filter(instantCommitMetadataPair -> WriteOperationType.canUpdateSchema(instantCommitMetadataPair.getRight().getOperationType()) && (!StringUtils.isNullOrEmpty(instantCommitMetadataPair.getValue().getMetadata(HoodieCommitMetadata.SCHEMA_KEY)))).findFirst());
}
| 3.26 |
hudi_HoodieActiveTimeline_readObject_rdh
|
/**
* This method is only used when this object is deserialized in a spark executor.
*
* @deprecated */
@Deprecated
private void readObject(ObjectInputStream in) throws
IOException, ClassNotFoundException {
in.defaultReadObject();
}
| 3.26 |
hudi_HoodieActiveTimeline_transitionReplaceInflightToComplete_rdh
|
/**
* Transition replace inflight to Committed.
*
* @param shouldLock
* Whether to hold the lock when performing transition
* @param inflightInstant
* Inflight instant
* @param data
* Extra Metadata
* @return commit instant
*/
public HoodieInstant transitionReplaceInflightToComplete(boolean shouldLock, HoodieInstant inflightInstant, Option<byte[]> data) {
ValidationUtils.checkArgument(inflightInstant.getAction().equals(HoodieTimeline.REPLACE_COMMIT_ACTION));
ValidationUtils.checkArgument(inflightInstant.isInflight());HoodieInstant commitInstant = new HoodieInstant(State.COMPLETED, REPLACE_COMMIT_ACTION, inflightInstant.getTimestamp());
// Then write to timeline
transitionStateToComplete(shouldLock, inflightInstant, commitInstant, data);
return commitInstant;
}
| 3.26 |
hudi_ArchivalUtils_getMinAndMaxInstantsToKeep_rdh
|
/**
* getMinAndMaxInstantsToKeep is used by archival service to find the
* min instants and max instants to keep in the active timeline
*
* @param table
* table implementation extending org.apache.hudi.table.HoodieTable
* @param metaClient
* meta client
* @return Pair containing min instants and max instants to keep.
*/
public static Pair<Integer, Integer> getMinAndMaxInstantsToKeep(HoodieTable<?, ?,
?, ?> table, HoodieTableMetaClient metaClient) {
HoodieWriteConfig config = table.getConfig();
HoodieTimeline completedCommitsTimeline = table.getCompletedCommitsTimeline();
Option<HoodieInstant> latestCommit = completedCommitsTimeline.lastInstant();
HoodieCleaningPolicy cleanerPolicy = config.getCleanerPolicy();
int cleanerCommitsRetained = config.getCleanerCommitsRetained();
int cleanerHoursRetained = config.getCleanerHoursRetained();
int maxInstantsToKeep;
int minInstantsToKeep;
Option<HoodieInstant> earliestCommitToRetain = getEarliestCommitToRetain(metaClient, latestCommit, cleanerPolicy, cleanerCommitsRetained, cleanerHoursRetained);int configuredMinInstantsToKeep = config.getMinCommitsToKeep();
int configuredMaxInstantsToKeep = config.getMaxCommitsToKeep();
if (earliestCommitToRetain.isPresent()) {
int minInstantsToKeepBasedOnCleaning = completedCommitsTimeline.findInstantsAfter(earliestCommitToRetain.get().getTimestamp()).countInstants() + 2;
if (configuredMinInstantsToKeep < minInstantsToKeepBasedOnCleaning) {
maxInstantsToKeep = (minInstantsToKeepBasedOnCleaning + configuredMaxInstantsToKeep) - configuredMinInstantsToKeep;
minInstantsToKeep = minInstantsToKeepBasedOnCleaning;
LOG.warn(("The configured archival configs {}={} is more aggressive than the cleaning " + "configs as the earliest commit to retain is {}. Adjusted the archival configs ") + "to be {}={} and {}={}", MIN_COMMITS_TO_KEEP.key(), configuredMinInstantsToKeep, earliestCommitToRetain.get(), MIN_COMMITS_TO_KEEP.key(), minInstantsToKeep, MAX_COMMITS_TO_KEEP.key(), maxInstantsToKeep);
switch (cleanerPolicy) {
case KEEP_LATEST_COMMITS :
LOG.warn("Cleaning configs: {}=KEEP_LATEST_COMMITS {}={}", CLEANER_POLICY.key(), CLEANER_COMMITS_RETAINED.key(), cleanerCommitsRetained);
break;
case KEEP_LATEST_BY_HOURS :
LOG.warn("Cleaning configs: {}=KEEP_LATEST_BY_HOURS {}={}", CLEANER_POLICY.key(), CLEANER_HOURS_RETAINED.key(), cleanerHoursRetained);
break;
case KEEP_LATEST_FILE_VERSIONS :
LOG.warn("Cleaning configs: {}=CLEANER_FILE_VERSIONS_RETAINED {}={}", CLEANER_POLICY.key(), CLEANER_FILE_VERSIONS_RETAINED.key(), config.getCleanerFileVersionsRetained());
break;
default :
break;
}
} else {
maxInstantsToKeep = configuredMaxInstantsToKeep;
minInstantsToKeep = configuredMinInstantsToKeep;
}
} else {
maxInstantsToKeep = configuredMaxInstantsToKeep;
minInstantsToKeep = configuredMinInstantsToKeep;
}
return Pair.of(minInstantsToKeep, maxInstantsToKeep);
}
| 3.26 |
hudi_BoundedInMemoryQueue_expectMoreRecords_rdh
|
/**
* Checks if records are either available in the queue or expected to be written in future.
*/
private boolean expectMoreRecords() {
return (!isWriteDone.get()) || (isWriteDone.get() && (!queue.isEmpty()));
}
| 3.26 |
hudi_BoundedInMemoryQueue_markAsFailed_rdh
|
/**
* API to allow producers and consumer to communicate termination due to failure.
*/
@Override
public void markAsFailed(Throwable e) {
this.hasFailed.set(e);
// release the permits so that if the queueing thread is waiting for permits then it will
// get it.
this.rateLimiter.release(f0 + 1);
}
| 3.26 |
hudi_BoundedInMemoryQueue_adjustBufferSizeIfNeeded_rdh
|
/**
* Samples records with "RECORD_SAMPLING_RATE" frequency and computes average record size in bytes. It is used for
* determining how many maximum records to queue. Based on change in avg size it ma increase or decrease available
* permits.
*
* @param payload
* Payload to size
*/
private void
adjustBufferSizeIfNeeded(final O payload) throws InterruptedException {
if ((this.samplingRecordCounter.incrementAndGet() % RECORD_SAMPLING_RATE) != 0) {
return;
}
final long recordSizeInBytes = payloadSizeEstimator.sizeEstimate(payload);
final long newAvgRecordSizeInBytes = Math.max(1, ((avgRecordSizeInBytes * numSamples) + recordSizeInBytes) / (numSamples + 1));
final int v2 = ((int) (Math.min(f0, Math.max(1, this.memoryLimit / newAvgRecordSizeInBytes))));
// If there is any change in number of records to cache then we will either release (if it increased) or acquire
// (if it decreased) to adjust rate limiting to newly computed value.
if (v2 > currentRateLimit) {
rateLimiter.release(v2 - currentRateLimit);
} else if (v2 < currentRateLimit) {
rateLimiter.acquire(currentRateLimit - v2);
}
currentRateLimit = v2;
avgRecordSizeInBytes = newAvgRecordSizeInBytes;
numSamples++;}
| 3.26 |
hudi_BoundedInMemoryQueue_seal_rdh
|
/**
* Puts an empty entry to queue to denote termination.
*/
@Override
public void seal() {
// done queueing records notifying queue-reader.
isWriteDone.set(true);
}
| 3.26 |
hudi_BoundedInMemoryQueue_readNextRecord_rdh
|
/**
* Reader interface but never exposed to outside world as this is a single consumer queue. Reading is done through a
* singleton iterator for this queue.
*/
@Override
public Option<O> readNextRecord() {
if (this.isReadDone.get()) {
return Option.empty();
}
rateLimiter.release();
Option<O> newRecord = Option.empty();while (expectMoreRecords()) {try {
throwExceptionIfFailed();
newRecord = queue.poll(RECORD_POLL_INTERVAL_SEC, TimeUnit.SECONDS);
if (newRecord != null) {
break;
}
} catch (InterruptedException e) {
LOG.error("error reading records from queue", e);
throw new HoodieException(e);
}
}
// Check one more time here as it is possible producer erred out and closed immediately
throwExceptionIfFailed();
if ((newRecord != null) && newRecord.isPresent()) {
return newRecord;
} else {
// We are done reading all the records from internal iterator.
this.isReadDone.set(true);
return Option.empty();}
}
| 3.26 |
hudi_BoundedInMemoryQueue_insertRecord_rdh
|
/**
* Inserts record into queue after applying transformation.
*
* @param t
* Item to be queued
*/
@Override
public void insertRecord(I t) throws Exception
{
// If already closed, throw exception
if (isWriteDone.get()) {
throw new IllegalStateException("Queue closed for enqueueing new entries");
}
// We need to stop queueing if queue-reader has failed and exited.
throwExceptionIfFailed();
rateLimiter.acquire();
// We are retrieving insert value in the record queueing thread to offload computation
// around schema validation
// and record creation to it.
final O payload = transformFunction.apply(t);
adjustBufferSizeIfNeeded(payload);
queue.put(Option.of(payload));
}
| 3.26 |
hudi_SparkValidatorUtils_readRecordsForBaseFiles_rdh
|
/**
* Get records from specified list of data files.
*/
public static Dataset<Row> readRecordsForBaseFiles(SQLContext sqlContext, List<String> baseFilePaths) {
return sqlContext.read().parquet(JavaConverters.asScalaBufferConverter(baseFilePaths).asScala());
}
| 3.26 |
hudi_SparkValidatorUtils_getRecordsFromPendingCommits_rdh
|
/**
* Get reads from partitions modified including any inflight commits.
* Note that this only works for COW tables
*/
public static Dataset<Row> getRecordsFromPendingCommits(SQLContext sqlContext, Set<String> partitionsAffected, HoodieWriteMetadata<HoodieData<WriteStatus>> writeMetadata, HoodieTable table, String
instantTime) {
// build file system view with pending commits
HoodieTablePreCommitFileSystemView fsView = new HoodieTablePreCommitFileSystemView(table.getMetaClient(), table.getHoodieView(), writeMetadata.getWriteStats().get(), writeMetadata.getPartitionToReplaceFileIds(), instantTime);
List<String> newFiles = partitionsAffected.stream().flatMap(partition -> fsView.getLatestBaseFiles(partition).map(BaseFile::getPath)).collect(Collectors.toList());
if (newFiles.isEmpty()) {
return sqlContext.emptyDataFrame();
}
return readRecordsForBaseFiles(sqlContext, newFiles);
}
| 3.26 |
hudi_SparkValidatorUtils_getRecordsFromCommittedFiles_rdh
|
/**
* Get records from partitions modified as a dataset.
* Note that this only works for COW tables.
*
* @param sqlContext
* Spark {@link SQLContext} instance.
* @param partitionsAffected
* A set of affected partitions.
* @param table
* {@link HoodieTable} instance.
* @param newStructTypeSchema
* The {@link StructType} schema from after state.
* @return The records in Dataframe from committed files.
*/
public static Dataset<Row> getRecordsFromCommittedFiles(SQLContext sqlContext, Set<String> partitionsAffected, HoodieTable table, StructType newStructTypeSchema) {
List<String> committedFiles = partitionsAffected.stream().flatMap(partition -> table.getBaseFileOnlyView().getLatestBaseFiles(partition).map(BaseFile::getPath)).collect(Collectors.toList());
if (committedFiles.isEmpty()) {
try {
return sqlContext.createDataFrame(sqlContext.emptyDataFrame().rdd(), AvroConversionUtils.convertAvroSchemaToStructType(new TableSchemaResolver(table.getMetaClient()).getTableAvroSchema()));
} catch (Exception e) {
LOG.warn("Cannot get table schema from before state.", e);
LOG.warn(("Use the schema from after state (current transaction) to create the empty Spark " + "dataframe: ") + newStructTypeSchema);
return sqlContext.createDataFrame(sqlContext.emptyDataFrame().rdd(), newStructTypeSchema);
}
}
return readRecordsForBaseFiles(sqlContext, committedFiles);
}
| 3.26 |
hudi_SparkValidatorUtils_runValidatorAsync_rdh
|
/**
* Run validators in a separate thread pool for parallelism. Each of validator can submit a distributed spark job if needed.
*/
private static CompletableFuture<Boolean> runValidatorAsync(SparkPreCommitValidator validator, HoodieWriteMetadata<HoodieData<WriteStatus>> writeMetadata, Dataset<Row> beforeState, Dataset<Row> afterState, String instantTime) {
return CompletableFuture.supplyAsync(() -> {
try {
validator.validate(instantTime, writeMetadata, beforeState, afterState);
LOG.info("validation complete for " + validator.getClass().getName());
return true;} catch (HoodieValidationException e) {
LOG.error("validation failed for " + validator.getClass().getName(), e);
return false;
}
});
}
| 3.26 |
hudi_CommitsCommand_getCommitForInstant_rdh
|
/* Checks whether a commit or replacecommit action exists in the timeline. */
private Option<HoodieInstant> getCommitForInstant(HoodieTimeline timeline, String instantTime) {
List<HoodieInstant> instants = Arrays.asList(new HoodieInstant(false, HoodieTimeline.COMMIT_ACTION, instantTime), new HoodieInstant(false, HoodieTimeline.REPLACE_COMMIT_ACTION, instantTime), new HoodieInstant(false, HoodieTimeline.DELTA_COMMIT_ACTION, instantTime));
return Option.fromJavaOptional(instants.stream().filter(timeline::containsInstant).findAny());
}
| 3.26 |
hudi_CommitsCommand_printCommits_rdh
|
/**
* CLI command to display commits options.
*/
@ShellComponentpublic class CommitsCommand {
private String printCommits(HoodieDefaultTimeline timeline, final Integer limit, final String sortByField, final boolean descending, final boolean headerOnly, final String tempTableName) throws IOException {
final List<Comparable[]> rows = new ArrayList<>();
final List<HoodieInstant> commits = timeline.getCommitsTimeline().filterCompletedInstants().getInstantsAsStream().sorted(HoodieInstant.COMPARATOR.reversed()).collect(Collectors.toList());
for (final HoodieInstant commit
: commits) {
if (timeline.getInstantDetails(commit).isPresent()) {
final HoodieCommitMetadata commitMetadata = HoodieCommitMetadata.fromBytes(timeline.getInstantDetails(commit).get(), HoodieCommitMetadata.class);
rows.add(new Comparable[]{ commit.getTimestamp(), commitMetadata.fetchTotalBytesWritten(), commitMetadata.fetchTotalFilesInsert(), commitMetadata.fetchTotalFilesUpdated(), commitMetadata.fetchTotalPartitionsWritten(), commitMetadata.fetchTotalRecordsWritten(), commitMetadata.fetchTotalUpdateRecordsWritten(), commitMetadata.fetchTotalWriteErrors() });}
}
final Map<String, Function<Object, String>> fieldNameToConverterMap = new HashMap<>();
fieldNameToConverterMap.put(HoodieTableHeaderFields.HEADER_TOTAL_BYTES_WRITTEN, entry ->
NumericUtils.humanReadableByteCount(Double.parseDouble(entry.toString())));
final TableHeader header = HoodieTableHeaderFields.getTableHeader();
return HoodiePrintHelper.print(header, fieldNameToConverterMap, sortByField, descending, limit, headerOnly, rows, tempTableName);
}
| 3.26 |
hudi_HoodieInMemoryHashIndex_canIndexLogFiles_rdh
|
/**
* Mapping is available in HBase already.
*/
@Override
public boolean canIndexLogFiles() {
return true;
}
| 3.26 |
hudi_HoodieInMemoryHashIndex_isGlobal_rdh
|
/**
* Only looks up by recordKey.
*/
@Override
public boolean isGlobal() {
return true;
}
| 3.26 |
hudi_CleanActionExecutor_runPendingClean_rdh
|
/**
* Executes the Cleaner plan stored in the instant metadata.
*/
HoodieCleanMetadata runPendingClean(HoodieTable<T, I, K, O> table, HoodieInstant cleanInstant) {
try {
HoodieCleanerPlan cleanerPlan = CleanerUtils.getCleanerPlan(table.getMetaClient(),
cleanInstant);
return runClean(table, cleanInstant, cleanerPlan);
}
catch (IOException e) {
throw new HoodieIOException(e.getMessage(), e);
}
}
| 3.26 |
hudi_CleanActionExecutor_clean_rdh
|
/**
* Performs cleaning of partition paths according to cleaning policy and returns the number of files cleaned. Handles
* skews in partitions to clean by making files to clean as the unit of task distribution.
*
* @throws IllegalArgumentException
* if unknown cleaning policy is provided
*/
List<HoodieCleanStat> clean(HoodieEngineContext context, HoodieCleanerPlan cleanerPlan) {
int cleanerParallelism = Math.min(cleanerPlan.getFilePathsToBeDeletedPerPartition().values().stream().mapToInt(List::size).sum(), config.getCleanerParallelism());
LOG.info("Using cleanerParallelism: " + cleanerParallelism);
context.setJobStatus(this.getClass().getSimpleName(), "Perform cleaning of table: " + config.getTableName());
Stream<Pair<String, CleanFileInfo>> v12 = cleanerPlan.getFilePathsToBeDeletedPerPartition().entrySet().stream().flatMap(x -> x.getValue().stream().map(y -> new ImmutablePair<>(x.getKey(), new CleanFileInfo(y.getFilePath(), y.getIsBootstrapBaseFile()))));
Stream<ImmutablePair<String, PartitionCleanStat>> partitionCleanStats = context.mapPartitionsToPairAndReduceByKey(v12, iterator -> deleteFilesFunc(iterator, table), PartitionCleanStat::merge, cleanerParallelism);
Map<String, PartitionCleanStat> partitionCleanStatsMap = partitionCleanStats.collect(Collectors.toMap(Pair::getKey, Pair::getValue));
List<String> partitionsToBeDeleted = (table.getMetaClient().getTableConfig().isTablePartitioned() && (cleanerPlan.getPartitionsToBeDeleted() != null)) ? cleanerPlan.getPartitionsToBeDeleted() : new
ArrayList<>();
partitionsToBeDeleted.forEach(entry -> {
try {
if (!isNullOrEmpty(entry)) {
deleteFileAndGetResult(table.getMetaClient().getFs(), (table.getMetaClient().getBasePath() + "/") + entry);
}
} catch (IOException e) {
LOG.warn("Partition deletion failed " + entry);
}
});
// Return PartitionCleanStat for each partition passed.
return cleanerPlan.getFilePathsToBeDeletedPerPartition().keySet().stream().map(partitionPath -> {
PartitionCleanStat partitionCleanStat = (partitionCleanStatsMap.containsKey(partitionPath)) ? partitionCleanStatsMap.get(partitionPath) : new PartitionCleanStat(partitionPath);
HoodieActionInstant actionInstant = cleanerPlan.getEarliestInstantToRetain();
return HoodieCleanStat.newBuilder().withPolicy(config.getCleanerPolicy()).withPartitionPath(partitionPath).withEarliestCommitRetained(Option.ofNullable(actionInstant != null ? new HoodieInstant(HoodieInstant.State.valueOf(actionInstant.getState()), actionInstant.getAction(), actionInstant.getTimestamp()) : null)).withLastCompletedCommitTimestamp(cleanerPlan.getLastCompletedCommitTimestamp()).withDeletePathPattern(partitionCleanStat.deletePathPatterns()).withSuccessfulDeletes(partitionCleanStat.successDeleteFiles()).withFailedDeletes(partitionCleanStat.failedDeleteFiles()).withDeleteBootstrapBasePathPatterns(partitionCleanStat.getDeleteBootstrapBasePathPatterns()).withSuccessfulDeleteBootstrapBaseFiles(partitionCleanStat.getSuccessfulDeleteBootstrapBaseFiles()).withFailedDeleteBootstrapBaseFiles(partitionCleanStat.getFailedDeleteBootstrapBaseFiles()).isPartitionDeleted(partitionsToBeDeleted.contains(partitionPath)).build();
}).collect(Collectors.toList());
}
| 3.26 |
hudi_JavaExecutionStrategy_readRecordsForGroup_rdh
|
/**
* Get a list of all records for the group. This includes all records from file slice
* (Apply updates from log files, if any).
*/
private List<HoodieRecord<T>> readRecordsForGroup(HoodieClusteringGroup clusteringGroup, String instantTime) {
List<ClusteringOperation> clusteringOps = clusteringGroup.getSlices().stream().map(ClusteringOperation::create).collect(Collectors.toList());
boolean hasLogFiles = clusteringOps.stream().anyMatch(op -> op.getDeltaFilePaths().size() > 0);
if (hasLogFiles) {
// if there are log files, we read all records into memory for a file group and apply updates.
return readRecordsForGroupWithLogs(clusteringOps, instantTime);
} else {
// We want to optimize reading records for case there are no log files.
return readRecordsForGroupBaseFiles(clusteringOps);
}
}
| 3.26 |
hudi_JavaExecutionStrategy_runClusteringForGroup_rdh
|
/**
* Executes clustering for the group.
*/private List<WriteStatus> runClusteringForGroup(HoodieClusteringGroup
clusteringGroup, Map<String, String> strategyParams, boolean preserveHoodieMetadata, String instantTime) {
List<HoodieRecord<T>> inputRecords = readRecordsForGroup(clusteringGroup, instantTime);
Schema readerSchema = HoodieAvroUtils.addMetadataFields(new Schema.Parser().parse(getWriteConfig().getSchema()));
List<HoodieFileGroupId> inputFileIds = clusteringGroup.getSlices().stream().map(info -> new HoodieFileGroupId(info.getPartitionPath(), info.getFileId())).collect(Collectors.toList());
return performClusteringWithRecordList(inputRecords, clusteringGroup.getNumOutputFileGroups(), instantTime, strategyParams, readerSchema, inputFileIds, preserveHoodieMetadata);
}
| 3.26 |
hudi_JavaExecutionStrategy_getPartitioner_rdh
|
/**
* Create {@link BulkInsertPartitioner} based on strategy params.
*
* @param strategyParams
* Strategy parameters containing columns to sort the data by when clustering.
* @param schema
* Schema of the data including metadata fields.
* @return partitioner for the java engine
*/
protected BulkInsertPartitioner<List<HoodieRecord<T>>> getPartitioner(Map<String, String> strategyParams, Schema schema) {
if (strategyParams.containsKey(PLAN_STRATEGY_SORT_COLUMNS.key())) {
return new JavaCustomColumnsSortPartitioner(strategyParams.get(PLAN_STRATEGY_SORT_COLUMNS.key()).split(","), HoodieAvroUtils.addMetadataFields(schema), getWriteConfig());
} else {
return JavaBulkInsertInternalPartitionerFactory.get(getWriteConfig().getBulkInsertSortMode());
}
}
| 3.26 |
hudi_JavaExecutionStrategy_readRecordsForGroupBaseFiles_rdh
|
/**
* Read records from baseFiles.
*/
private List<HoodieRecord<T>> readRecordsForGroupBaseFiles(List<ClusteringOperation> clusteringOps) {
List<HoodieRecord<T>> records = new ArrayList<>();clusteringOps.forEach(clusteringOp -> {
try (HoodieFileReader baseFileReader = HoodieFileReaderFactory.getReaderFactory(recordType).getFileReader(getHoodieTable().getHadoopConf(), new Path(clusteringOp.getDataFilePath()))) {
Schema readerSchema = HoodieAvroUtils.addMetadataFields(new Schema.Parser().parse(getWriteConfig().getSchema()));
Iterator<HoodieRecord> recordIterator = baseFileReader.getRecordIterator(readerSchema);
// NOTE: Record have to be cloned here to make sure if it holds low-level engine-specific
// payload pointing into a shared, mutable (underlying) buffer we get a clean copy of
// it since these records will be put into the records(List).
recordIterator.forEachRemaining(record -> records.add(record.copy().wrapIntoHoodieRecordPayloadWithKeyGen(readerSchema, new Properties(), Option.empty())));
} catch (IOException e)
{
throw new <e>HoodieClusteringException((("Error reading input data for " + clusteringOp.getDataFilePath()) + " and ") + clusteringOp.getDeltaFilePaths());
}
});
return records;
}
| 3.26 |
hudi_JavaExecutionStrategy_readRecordsForGroupWithLogs_rdh
|
/**
* Read records from baseFiles and apply updates.
*/
private List<HoodieRecord<T>> readRecordsForGroupWithLogs(List<ClusteringOperation> clusteringOps, String instantTime) {
HoodieWriteConfig config = getWriteConfig();
HoodieTable table = getHoodieTable();
List<HoodieRecord<T>> v9 = new ArrayList<>();
clusteringOps.forEach(clusteringOp -> {
long maxMemoryPerCompaction = IOUtils.getMaxMemoryPerCompaction(new JavaTaskContextSupplier(), config);
LOG.info("MaxMemoryPerCompaction run as part of clustering => " + maxMemoryPerCompaction);
Option<HoodieFileReader> baseFileReader = Option.empty();
HoodieMergedLogRecordScanner scanner = null;
try {
Schema readerSchema = HoodieAvroUtils.addMetadataFields(new Schema.Parser().parse(config.getSchema()));
scanner = HoodieMergedLogRecordScanner.newBuilder().withFileSystem(table.getMetaClient().getFs()).withBasePath(table.getMetaClient().getBasePath()).withLogFilePaths(clusteringOp.getDeltaFilePaths()).withReaderSchema(readerSchema).withLatestInstantTime(instantTime).withMaxMemorySizeInBytes(maxMemoryPerCompaction).withReadBlocksLazily(config.getCompactionLazyBlockReadEnabled()).withReverseReader(config.getCompactionReverseLogReadEnabled()).withBufferSize(config.getMaxDFSStreamBufferSize()).withSpillableMapBasePath(config.getSpillableMapBasePath()).withPartition(clusteringOp.getPartitionPath()).withDiskMapType(config.getCommonConfig().getSpillableDiskMapType()).withBitCaskDiskMapCompressionEnabled(config.getCommonConfig().isBitCaskDiskMapCompressionEnabled()).withRecordMerger(config.getRecordMerger()).build();
baseFileReader = (StringUtils.isNullOrEmpty(clusteringOp.getDataFilePath())) ? Option.empty() : Option.of(HoodieFileReaderFactory.getReaderFactory(recordType).getFileReader(table.getHadoopConf(), new Path(clusteringOp.getDataFilePath())));
HoodieTableConfig tableConfig = table.getMetaClient().getTableConfig();
Iterator<HoodieRecord<T>> fileSliceReader = new HoodieFileSliceReader(baseFileReader,
scanner, readerSchema,
tableConfig.getPreCombineField(), writeConfig.getRecordMerger(), tableConfig.getProps(), tableConfig.populateMetaFields() ? Option.empty() : Option.of(Pair.of(tableConfig.getRecordKeyFieldProp(), tableConfig.getPartitionFieldProp())));
fileSliceReader.forEachRemaining(v9::add);
} catch (IOException e) {
throw new <e>HoodieClusteringException((("Error reading input data for " + clusteringOp.getDataFilePath()) + " and ") + clusteringOp.getDeltaFilePaths());
} finally {
if (scanner != null) {
scanner.close();
} if (baseFileReader.isPresent()) {baseFileReader.get().close();
}
}
});
return v9;
}
| 3.26 |
hudi_AbstractColumnReader_supportLazyDecode_rdh
|
/**
* Support lazy dictionary ids decode. See more in {@link ParquetDictionary}.
* If return false, we will decode all the data first.
*/
protected boolean supportLazyDecode() {
return true;
}
| 3.26 |
hudi_AbstractColumnReader_readToVector_rdh
|
/**
* Reads `total` values from this columnReader into column.
*/
@Override
public final void readToVector(int readNumber, V vector) throws IOException {
int rowId = 0;
WritableIntVector
dictionaryIds = null;
if (dictionary != null) {
dictionaryIds = vector.reserveDictionaryIds(readNumber);
}
while (readNumber > 0) {
// Compute the number of values we want to read in this page.
int leftInPage = ((int) (endOfPageValueCount - valuesRead));
if (leftInPage == 0) {
DataPage page = pageReader.readPage();
if (page instanceof DataPageV1) {
readPageV1(((DataPageV1) (page)));
} else if (page instanceof DataPageV2) {
readPageV2(((DataPageV2) (page)));
} else {
throw
new RuntimeException("Unsupported page type: " + page.getClass());
}leftInPage = ((int) (endOfPageValueCount - valuesRead));
}
int num = Math.min(readNumber, leftInPage);
if (isCurrentPageDictionaryEncoded) {
// Read and decode dictionary ids.
runLenDecoder.readDictionaryIds(num, dictionaryIds, vector, rowId, maxDefLevel, this.dictionaryIdsDecoder);
if (vector.hasDictionary() || ((rowId == 0) && supportLazyDecode())) {
// Column vector supports lazy decoding of dictionary values so just set the dictionary.
// We can't do this if rowId != 0 AND the column doesn't have a dictionary (i.e. some
// non-dictionary encoded values have already been added).
vector.setDictionary(new ParquetDictionary(dictionary));
} else {
readBatchFromDictionaryIds(rowId, num, vector, dictionaryIds);
}} else {
if (vector.hasDictionary() && (rowId != 0)) {
// This batch already has dictionary encoded values but this new page is not. The batch
// does not support a mix of dictionary and not so we will decode the dictionary.
readBatchFromDictionaryIds(0, rowId, vector, vector.getDictionaryIds());
}
vector.setDictionary(null);
readBatch(rowId, num, vector);
}
valuesRead += num;
rowId += num;
readNumber -= num;
}
}
| 3.26 |
hudi_HoodieRecordGlobalLocation_copy_rdh
|
/**
* Copy the location with given partition path.
*/
public HoodieRecordGlobalLocation copy(String partitionPath) {
return new HoodieRecordGlobalLocation(partitionPath,
instantTime, fileId, position);
}
| 3.26 |
hudi_HoodieRecordGlobalLocation_fromLocal_rdh
|
/**
* Returns the global record location from local.
*/
public static HoodieRecordGlobalLocation fromLocal(String partitionPath, HoodieRecordLocation localLoc) {return new HoodieRecordGlobalLocation(partitionPath, localLoc.getInstantTime(), localLoc.getFileId());
}
| 3.26 |
hudi_HoodieRecordGlobalLocation_toLocal_rdh
|
/**
* Returns the record location as local.
*/
public HoodieRecordLocation toLocal(String instantTime) {
return new HoodieRecordLocation(instantTime, fileId, position);
}
| 3.26 |
hudi_PostgresDebeziumAvroPayload_containsBytesToastedValues_rdh
|
/**
* Returns true if a column is either of type bytes or a union of one or more bytes that contain a debezium toasted value.
*
* @param incomingRecord
* The incoming avro record
* @param field
* the column of interest
* @return */
private boolean containsBytesToastedValues(IndexedRecord incomingRecord, Schema.Field field) {
return (((field.schema().getType() == Type.BYTES) || ((field.schema().getType() == Type.UNION) && field.schema().getTypes().stream().anyMatch(s -> s.getType() == Schema.Type.BYTES))) && // Check length first as an optimization
(((ByteBuffer) (((GenericData.Record) (incomingRecord)).get(field.name()))).array().length == DEBEZIUM_TOASTED_VALUE.length())) && DEBEZIUM_TOASTED_VALUE.equals(new String(((ByteBuffer) (((GenericData.Record) (incomingRecord)).get(field.name()))).array(), StandardCharsets.UTF_8));
}
| 3.26 |
hudi_PostgresDebeziumAvroPayload_containsStringToastedValues_rdh
|
/**
* Returns true if a column is either of type string or a union of one or more strings that contain a debezium toasted value.
*
* @param incomingRecord
* The incoming avro record
* @param field
* the column of interest
* @return */
private boolean containsStringToastedValues(IndexedRecord incomingRecord, Schema.Field field) {
return (((field.schema().getType() == Type.STRING) || ((field.schema().getType() == Type.UNION) &&
field.schema().getTypes().stream().anyMatch(s -> s.getType() == Schema.Type.STRING))) && // Check length first as an optimization
(((CharSequence) (((GenericData.Record) (incomingRecord)).get(field.name()))).length() == DEBEZIUM_TOASTED_VALUE.length())) && DEBEZIUM_TOASTED_VALUE.equals(((CharSequence) (((GenericData.Record) (incomingRecord)).get(field.name()))).toString());
}
| 3.26 |
hudi_ClusteringOperator_endInput_rdh
|
/**
* End input action for batch source.
*/
public void endInput() {
// no operation
}
| 3.26 |
hudi_ClusteringOperator_doClustering_rdh
|
// -------------------------------------------------------------------------
// Utilities
// -------------------------------------------------------------------------
private void doClustering(String instantTime, List<ClusteringOperation> clusteringOperations)
throws Exception {
BulkInsertWriterHelper writerHelper = new BulkInsertWriterHelper(this.conf, this.table, this.writeConfig, instantTime, this.f0, getRuntimeContext().getNumberOfParallelSubtasks(), getRuntimeContext().getAttemptNumber(), this.rowType, true);
Iterator<RowData> iterator;
if (clusteringOperations.stream().anyMatch(operation -> CollectionUtils.nonEmpty(operation.getDeltaFilePaths()))) {
// if there are log files, we read all records into memory for a file group and apply updates.
iterator = readRecordsForGroupWithLogs(clusteringOperations, instantTime);
} else {
// We want to optimize reading records for case there are no log files.
iterator = readRecordsForGroupBaseFiles(clusteringOperations);
}
if (this.sortClusteringEnabled) {
RowDataSerializer rowDataSerializer = new RowDataSerializer(rowType);
BinaryExternalSorter sorter = initSorter();
while (iterator.hasNext()) {
RowData rowData = iterator.next();
BinaryRowData binaryRowData = rowDataSerializer.toBinaryRow(rowData).copy();
sorter.write(binaryRowData);
}
BinaryRowData row = binarySerializer.createInstance();
while ((row = sorter.getIterator().next(row)) != null) {
writerHelper.write(row);
}
sorter.close();
} else {
while (iterator.hasNext()) {
writerHelper.write(iterator.next());
}
}
List<WriteStatus> v10 = writerHelper.getWriteStatuses(this.f0);
collector.collect(new ClusteringCommitEvent(instantTime, getFileIds(clusteringOperations), v10, this.f0));
writerHelper.close();
}
| 3.26 |
hudi_ClusteringOperator_readRecordsForGroupWithLogs_rdh
|
/**
* Read records from baseFiles, apply updates and convert to Iterator.
*/
@SuppressWarnings("unchecked")
private Iterator<RowData> readRecordsForGroupWithLogs(List<ClusteringOperation> clusteringOps, String instantTime) {
List<Iterator<RowData>> recordIterators = new ArrayList<>();
long maxMemoryPerCompaction = IOUtils.getMaxMemoryPerCompaction(new FlinkTaskContextSupplier(null), writeConfig);
LOG.info("MaxMemoryPerCompaction run as part of clustering => " + maxMemoryPerCompaction);
for (ClusteringOperation clusteringOp : clusteringOps) {try {
Option<HoodieFileReader> v14 = (StringUtils.isNullOrEmpty(clusteringOp.getDataFilePath())) ?
Option.empty() : Option.of(HoodieFileReaderFactory.getReaderFactory(table.getConfig().getRecordMerger().getRecordType()).getFileReader(table.getHadoopConf(), new Path(clusteringOp.getDataFilePath())));
HoodieMergedLogRecordScanner scanner = HoodieMergedLogRecordScanner.newBuilder().withFileSystem(table.getMetaClient().getFs()).withBasePath(table.getMetaClient().getBasePath()).withLogFilePaths(clusteringOp.getDeltaFilePaths()).withReaderSchema(readerSchema).withLatestInstantTime(instantTime).withMaxMemorySizeInBytes(maxMemoryPerCompaction).withReadBlocksLazily(writeConfig.getCompactionLazyBlockReadEnabled()).withReverseReader(writeConfig.getCompactionReverseLogReadEnabled()).withBufferSize(writeConfig.getMaxDFSStreamBufferSize()).withSpillableMapBasePath(writeConfig.getSpillableMapBasePath()).withDiskMapType(writeConfig.getCommonConfig().getSpillableDiskMapType()).withBitCaskDiskMapCompressionEnabled(writeConfig.getCommonConfig().isBitCaskDiskMapCompressionEnabled()).withRecordMerger(writeConfig.getRecordMerger()).build();
HoodieTableConfig tableConfig = table.getMetaClient().getTableConfig();
HoodieFileSliceReader<? extends IndexedRecord> hoodieFileSliceReader = new HoodieFileSliceReader(v14, scanner, readerSchema, tableConfig.getPreCombineField(), writeConfig.getRecordMerger(), tableConfig.getProps(), tableConfig.populateMetaFields() ? Option.empty() : Option.of(Pair.of(tableConfig.getRecordKeyFieldProp(), tableConfig.getPartitionFieldProp())));
recordIterators.add(StreamSupport.stream(Spliterators.spliteratorUnknownSize(hoodieFileSliceReader, Spliterator.NONNULL), false).map(hoodieRecord -> {
try {
return this.transform(hoodieRecord.toIndexedRecord(readerSchema, new Properties()).get().getData());
} catch (IOException e) {
throw new <e>HoodieIOException("Failed to read next record");
}
}).iterator());
} catch (IOException e) {
throw new HoodieClusteringException((("Error reading input data for " + clusteringOp.getDataFilePath())
+ " and ") + clusteringOp.getDeltaFilePaths(), e);
}
}
return new ConcatenatingIterator<>(recordIterators);
}
| 3.26 |
hudi_ClusteringOperator_m0_rdh
|
/**
* Transform IndexedRecord into HoodieRecord.
*/
private RowData m0(IndexedRecord indexedRecord) {
GenericRecord record = ((GenericRecord) (indexedRecord));
return ((RowData) (avroToRowDataConverter.convert(record)));
}
| 3.26 |
hudi_ClusteringOperator_readRecordsForGroupBaseFiles_rdh
|
/**
* Read records from baseFiles and get iterator.
*/
private Iterator<RowData> readRecordsForGroupBaseFiles(List<ClusteringOperation> clusteringOps) {
List<Iterator<RowData>> iteratorsForPartition = clusteringOps.stream().map(clusteringOp -> {
Iterable<IndexedRecord> indexedRecords = () ->
{
try {
HoodieFileReaderFactory fileReaderFactory = HoodieFileReaderFactory.getReaderFactory(table.getConfig().getRecordMerger().getRecordType());
HoodieAvroFileReader v21 = ((HoodieAvroFileReader) (fileReaderFactory.getFileReader(table.getHadoopConf(),
new Path(clusteringOp.getDataFilePath()))));
return new CloseableMappingIterator<>(v21.getRecordIterator(readerSchema), HoodieRecord::getData);
} catch (IOException e) {
throw new <e>HoodieClusteringException((("Error reading input data for " + clusteringOp.getDataFilePath()) + " and ")
+ clusteringOp.getDeltaFilePaths());
}
};
return StreamSupport.stream(indexedRecords.spliterator(), false).map(this::transform).iterator();
}).collect(Collectors.toList());
return new ConcatenatingIterator<>(iteratorsForPartition);
}
| 3.26 |
hudi_FileSystemBasedLockProvider_defaultLockPath_rdh
|
/**
* Returns the default lock file root path.
*
* <p>IMPORTANT: this path should be shared especially when there is engine cooperation.
*/
private static String defaultLockPath(String tablePath)
{
return (tablePath + Path.SEPARATOR) + AUXILIARYFOLDER_NAME;
}
| 3.26 |
hudi_FileSystemBasedLockProvider_getLockConfig_rdh
|
/**
* Returns a filesystem based lock config with given table path.
*/
public static TypedProperties getLockConfig(String tablePath) {
TypedProperties props = new
TypedProperties();
props.put(HoodieLockConfig.LOCK_PROVIDER_CLASS_NAME.key(), FileSystemBasedLockProvider.class.getName());
props.put(HoodieLockConfig.LOCK_ACQUIRE_WAIT_TIMEOUT_MS.key(), "2000");
props.put(HoodieLockConfig.FILESYSTEM_LOCK_EXPIRE.key(), "1");
props.put(HoodieLockConfig.LOCK_ACQUIRE_CLIENT_NUM_RETRIES.key(), "30");
props.put(HoodieLockConfig.FILESYSTEM_LOCK_PATH.key(), defaultLockPath(tablePath));
return props;
}
| 3.26 |
hudi_EmbeddedTimelineService_getOrStartEmbeddedTimelineService_rdh
|
/**
* Returns an existing embedded timeline service if one is running for the given configuration and reuse is enabled, or starts a new one.
*
* @param context
* The {@link HoodieEngineContext} for the client
* @param embeddedTimelineServiceHostAddr
* The host address to use for the service (nullable)
* @param writeConfig
* The {@link HoodieWriteConfig} for the client
* @return A running {@link EmbeddedTimelineService}
* @throws IOException
* if an error occurs while starting the service
*/
public static EmbeddedTimelineService getOrStartEmbeddedTimelineService(HoodieEngineContext context, String embeddedTimelineServiceHostAddr, HoodieWriteConfig writeConfig) throws IOException {
return getOrStartEmbeddedTimelineService(context, embeddedTimelineServiceHostAddr, writeConfig, TimelineService::new);
}
| 3.26 |
hudi_EmbeddedTimelineService_stopForBasePath_rdh
|
/**
* Stops the embedded timeline service for the given base path. If a timeline service is managing multiple tables, it will only be shutdown once all tables have been stopped.
*
* @param basePath
* For the table to stop the service for
*/public void stopForBasePath(String basePath) {
synchronized(SERVICE_LOCK) {
basePaths.remove(basePath);
if (basePaths.isEmpty()) {
RUNNING_SERVICES.remove(timelineServiceIdentifier);
}
}
if (this.server != null) {
this.server.unregisterBasePath(basePath);
}
// continue rest of shutdown outside of the synchronized block to avoid excess blocking
if (basePaths.isEmpty() && (null != server)) {
LOG.info("Closing Timeline server");
this.server.close();
METRICS_REGISTRY.set(NUM_EMBEDDED_TIMELINE_SERVERS, NUM_SERVERS_RUNNING.decrementAndGet());
this.server = null;
this.viewManager = null;
LOG.info("Closed Timeline server");
}
}
| 3.26 |
hudi_EmbeddedTimelineService_addBasePath_rdh
|
/**
* Adds a new base path to the set that are managed by this instance.
*
* @param basePath
* the new base path to add
*/
private void addBasePath(String basePath) {
basePaths.add(basePath);
}
| 3.26 |
hudi_EmbeddedTimelineService_getRemoteFileSystemViewConfig_rdh
|
/**
* Retrieves proper view storage configs for remote clients to access this service.
*/
public FileSystemViewStorageConfig getRemoteFileSystemViewConfig() {
FileSystemViewStorageType viewStorageType = (writeConfig.getClientSpecifiedViewStorageConfig().shouldEnableBackupForRemoteFileSystemView()) ? FileSystemViewStorageType.REMOTE_FIRST : FileSystemViewStorageType.REMOTE_ONLY;
return FileSystemViewStorageConfig.newBuilder().withStorageType(viewStorageType).withRemoteServerHost(hostAddr).withRemoteServerPort(serverPort).withRemoteTimelineClientTimeoutSecs(writeConfig.getClientSpecifiedViewStorageConfig().getRemoteTimelineClientTimeoutSecs()).withRemoteTimelineClientRetry(writeConfig.getClientSpecifiedViewStorageConfig().isRemoteTimelineClientRetryEnabled()).withRemoteTimelineClientMaxRetryNumbers(writeConfig.getClientSpecifiedViewStorageConfig().getRemoteTimelineClientMaxRetryNumbers()).withRemoteTimelineInitialRetryIntervalMs(writeConfig.getClientSpecifiedViewStorageConfig().getRemoteTimelineInitialRetryIntervalMs()).withRemoteTimelineClientMaxRetryIntervalMs(writeConfig.getClientSpecifiedViewStorageConfig().getRemoteTimelineClientMaxRetryIntervalMs()).withRemoteTimelineClientRetryExceptions(writeConfig.getClientSpecifiedViewStorageConfig().getRemoteTimelineClientRetryExceptions()).build();
}
| 3.26 |
hudi_HoodieRecordMerger_partialMerge_rdh
|
/**
* Merges records which can contain partial updates, i.e., only subset of fields and values are
* present in the record representing the updates, and absent fields are not updated. The fields
* exist in older and newer records indicate the fields with changed values. When merging, only
* the changed fields should be included in the merging results.
* <p>
* For example, the reader schema is
* {[
* {"name":"id", "type":"string"},
* {"name":"ts", "type":"long"},
* {"name":"name", "type":"string"},
* {"name":"price", "type":"double"},
* {"name":"tags", "type":"string"}
* ]}
* The older and newer records can be (omitting Hudi meta fields):
* <p>
* (1) older (complete record update):
* id | ts | name | price | tags
* 1 | 10 | apple | 2.3 | fruit
* <p>
* newer (partial record update):
* ts | price
* 16 | 2.8
* <p>
* In this case, in the newer record, only "ts" and "price" fields are updated. With the default
* merging strategy, the newer record updates the older record and the merging result is
* <p>
* id | ts | name | price | tags
* 1 | 16 | apple | 2.8 | fruit
* <p>
* (2) older (partial record update):
* ts | price
* 10 | 2.8
* <p>
* newer (partial record update):
* ts | tag
* 16 | fruit,juicy
* <p>
* In this case, in the older record, only "ts" and "price" fields are updated. In the newer
* record, only "ts" and "tag" fields are updated. With the default merging strategy, all the
* changed fields should be included in the merging results.
* <p>
* ts | price | tags
* 16 | 2.8 | fruit,juicy
*
* @param older
* Older record.
* @param oldSchema
* Schema of the older record.
* @param newer
* Newer record.
* @param newSchema
* Schema of the newer record.
* @param readerSchema
* Reader schema containing all the fields to read. This is used to maintain
* the ordering of the fields of the merged record.
* @param props
* Configuration in {@link TypedProperties}.
* @return The merged record and schema.
* @throws IOException
* upon merging error.
*/
default Option<Pair<HoodieRecord,
Schema>> partialMerge(HoodieRecord older, Schema oldSchema, HoodieRecord newer, Schema newSchema, Schema readerSchema, TypedProperties props) throws IOException {
throw new UnsupportedOperationException("Partial merging logic is not implemented.");
}
| 3.26 |
hudi_HoodieExampleDataGenerator_generateInserts_rdh
|
/**
* Generates new inserts, uniformly across the partition paths above. It also updates the list of existing keys.
*/
public List<HoodieRecord<T>> generateInserts(String commitTime, Integer n) {
return generateInsertsStream(commitTime, n).collect(Collectors.toList());
}
| 3.26 |
hudi_HoodieExampleDataGenerator_generateInsertsOnPartition_rdh
|
/**
* Generates new inserts, across a single partition path. It also updates the list of existing keys.
*/
public List<HoodieRecord<T>> generateInsertsOnPartition(String commitTime, Integer n, String partitionPath) {
return generateInsertsStreamOnPartition(commitTime, n, partitionPath).collect(Collectors.toList());
}
| 3.26 |
hudi_HoodieExampleDataGenerator_generateRandomValue_rdh
|
/**
* Generates a new avro record of the above schema format, retaining the key if optionally provided.
*/
@SuppressWarnings("unchecked")
public T generateRandomValue(HoodieKey key, String commitTime) {
GenericRecord rec = generateGenericRecord(key.getRecordKey(), "rider-" +
commitTime, "driver-" + commitTime, 0);
return ((T) (new HoodieAvroPayload(Option.of(rec))));
}
| 3.26 |
hudi_HoodieExampleDataGenerator_generateUniqueUpdates_rdh
|
/**
* Generates new updates, one for each of the keys above
* list
*
* @param commitTime
* Commit Timestamp
* @return list of hoodie record updates
*/
public List<HoodieRecord<T>> generateUniqueUpdates(String commitTime) {
List<HoodieRecord<T>> updates = new ArrayList<>();
for (int i = 0; i
< numExistingKeys; i++) {
KeyPartition kp = existingKeys.get(i);
HoodieRecord<T> record = generateUpdateRecord(kp.key, commitTime);
updates.add(record);
}
return updates;
}
| 3.26 |
hudi_HoodieExampleDataGenerator_generateUpdates_rdh
|
/**
* Generates new updates, randomly distributed across the keys above. There can be duplicates within the returned
* list
*
* @param commitTime
* Commit Timestamp
* @param n
* Number of updates (including dups)
* @return list of hoodie record updates
*/
public List<HoodieRecord<T>> generateUpdates(String commitTime, Integer n) {
List<HoodieRecord<T>> updates = new ArrayList<>();
for (int i = 0; i <
n; i++) {
KeyPartition kp =
existingKeys.get(RAND.nextInt(numExistingKeys - 1));
HoodieRecord<T> record = generateUpdateRecord(kp.key, commitTime);
updates.add(record);
}
return updates;
}
| 3.26 |
hudi_HoodieExampleDataGenerator_generateInsertsStream_rdh
|
/**
* Generates new inserts, uniformly across the partition paths above. It also updates the list of existing keys.
*/
public Stream<HoodieRecord<T>> generateInsertsStream(String commitTime, Integer n) {
int currSize = getNumExistingKeys();
return
IntStream.range(0, n).boxed().map(i -> {
String partitionPath = partitionPaths[RAND.nextInt(partitionPaths.length)];
HoodieKey key = new HoodieKey(UUID.randomUUID().toString(), partitionPath);
KeyPartition kp = new KeyPartition();
kp.key = key;
kp.partitionPath = partitionPath;
existingKeys.put(currSize + i, kp);
numExistingKeys++;
return new HoodieAvroRecord<>(key, generateRandomValue(key, commitTime));});
}
| 3.26 |
hudi_HoodieExampleDataGenerator_generateInsertsStreamOnPartition_rdh
|
/**
* Generates new inserts, across a single partition path. It also updates the list of existing keys.
*/
public Stream<HoodieRecord<T>> generateInsertsStreamOnPartition(String commitTime,
Integer n, String partitionPath) {
int currSize = getNumExistingKeys();
return IntStream.range(0, n).boxed().map(i -> {HoodieKey key = new HoodieKey(UUID.randomUUID().toString(), partitionPath);
KeyPartition v8 = new KeyPartition();
v8.key = key;
v8.partitionPath = partitionPath;existingKeys.put(currSize + i, v8);
numExistingKeys++;
return new HoodieAvroRecord<>(key, generateRandomValue(key, commitTime));
});
}
| 3.26 |
hudi_HoodieRepairTool_undoRepair_rdh
|
/**
* Undoes repair for UNDO mode.
*
* @throws IOException
* upon errors.
*/
boolean undoRepair() throws IOException {
FileSystem fs = metaClient.getFs();String v28 = f1.backupPath;Path backupPath =
new Path(v28);
if (!fs.exists(backupPath)) {
f0.error("Cannot find backup path: " + backupPath);
return false;
}
List<String> allPartitionPaths = tableMetadata.getAllPartitionPaths();
if (allPartitionPaths.isEmpty()) {
f0.error("Cannot get one partition path since there is no partition available");
return false;
}
int partitionLevels = getExpectedLevelBasedOnPartitionPath(allPartitionPaths.get(0));
List<String> relativeFilePaths = listFilesFromBasePath(context, v28, partitionLevels, f1.parallelism).stream().map(filePath -> FSUtils.getRelativePartitionPath(new Path(v28), new Path(filePath))).collect(Collectors.toList());
return restoreFiles(relativeFilePaths);
}
| 3.26 |
hudi_HoodieRepairTool_deleteFiles_rdh
|
/**
* Deletes files from table base path.
*
* @param context
* {@link HoodieEngineContext} instance.
* @param basePath
* Base path of the table.
* @param relativeFilePaths
* A {@link List} of relative file paths for deleting.
*/
static boolean deleteFiles(HoodieEngineContext context, String basePath, List<String> relativeFilePaths) {
SerializableConfiguration conf = context.getHadoopConf();
return context.parallelize(relativeFilePaths).mapPartitions(iterator -> {
FileSystem fs = FSUtils.getFs(basePath, conf.get());
List<Boolean> results
= new ArrayList<>();
iterator.forEachRemaining(relativeFilePath -> {
boolean success = false;
try {
success = fs.delete(new Path(basePath, relativeFilePath), false);
} catch (IOException e) {
f0.warn("Failed to delete file " + relativeFilePath);
} finally {
results.add(success);
}
});
return results.iterator();
}, true).collectAsList().stream().reduce((a, b) -> a && b).orElse(true);
}
| 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.