name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hudi_ParquetUtils_filterParquetRowKeys_rdh
|
/**
* Read the rowKey list matching the given filter, from the given parquet file. If the filter is empty, then this will
* return all the rowkeys.
*
* @param filePath
* The parquet file path.
* @param configuration
* configuration to build fs object
* @param filter
* record keys filter
* @param readSchema
* schema of columns to be read
* @return Set Set of pairs of row key and position matching candidateRecordKeys
*/
private static Set<Pair<String, Long>> filterParquetRowKeys(Configuration configuration, Path filePath, Set<String> filter, Schema readSchema) {
Option<RecordKeysFilterFunction> v1 = Option.empty();
if ((filter != null) && (!filter.isEmpty())) {
v1 = Option.of(new RecordKeysFilterFunction(filter));
}
Configuration conf = new Configuration(configuration);
conf.addResource(FSUtils.getFs(filePath.toString(), conf).getConf());
AvroReadSupport.setAvroReadSchema(conf, readSchema);
AvroReadSupport.setRequestedProjection(conf, readSchema);
Set<Pair<String, Long>> rowKeys = new HashSet<>();
long rowPosition = 0;try (ParquetReader reader = AvroParquetReader.builder(filePath).withConf(conf).build()) {
Object obj = reader.read();
while (obj != null) {
if (obj instanceof GenericRecord) {
String recordKey = ((GenericRecord) (obj)).get(HoodieRecord.RECORD_KEY_METADATA_FIELD).toString();
if ((!v1.isPresent()) || v1.get().apply(recordKey)) {
rowKeys.add(Pair.of(recordKey, rowPosition));
}
obj = reader.read();
rowPosition++;
}
}
} catch (IOException e) {
throw new HoodieIOException("Failed to read row keys from Parquet " + filePath, e);
}
// ignore
return rowKeys;
}
| 3.26 |
hudi_ParquetUtils_readRangeFromParquetMetadata_rdh
|
/**
* Parse min/max statistics stored in parquet footers for all columns.
*/
@SuppressWarnings("rawtype")
public List<HoodieColumnRangeMetadata<Comparable>> readRangeFromParquetMetadata(@Nonnull
Configuration conf, @Nonnull
Path parquetFilePath, @Nonnull List<String> cols) {
ParquetMetadata metadata = readMetadata(conf, parquetFilePath);
// NOTE: This collector has to have fully specialized generic type params since
// Java 1.8 struggles to infer them
Collector<HoodieColumnRangeMetadata<Comparable>, ?, Map<String, List<HoodieColumnRangeMetadata<Comparable>>>> groupingByCollector = Collectors.groupingBy(HoodieColumnRangeMetadata::getColumnName);
// Collect stats from all individual Parquet blocks
Map<String, List<HoodieColumnRangeMetadata<Comparable>>> columnToStatsListMap = ((Map<String, List<HoodieColumnRangeMetadata<Comparable>>>) (metadata.getBlocks().stream().sequential().flatMap(blockMetaData -> blockMetaData.getColumns().stream().filter(f ->
cols.contains(f.getPath().toDotString())).map(columnChunkMetaData -> {
Statistics stats = columnChunkMetaData.getStatistics();
return // NOTE: In case when column contains only nulls Parquet won't be creating
// stats for it instead returning stubbed (empty) object. In that case
// we have to equate number of nulls to the value count ourselves
HoodieColumnRangeMetadata.<Comparable>create(parquetFilePath.getName(), columnChunkMetaData.getPath().toDotString(), convertToNativeJavaType(columnChunkMetaData.getPrimitiveType(), stats.genericGetMin()), convertToNativeJavaType(columnChunkMetaData.getPrimitiveType(), stats.genericGetMax()), stats.isEmpty() ? columnChunkMetaData.getValueCount() : stats.getNumNulls(), columnChunkMetaData.getValueCount(), columnChunkMetaData.getTotalSize(), columnChunkMetaData.getTotalUncompressedSize());
})).collect(groupingByCollector)));
// Combine those into file-level statistics
// NOTE: Inlining this var makes javac (1.8) upset (due to its inability to infer
// expression type correctly)
Stream<HoodieColumnRangeMetadata<Comparable>> stream = columnToStatsListMap.values().stream().map(this::getColumnRangeInFile);
return stream.collect(Collectors.toList());
}
| 3.26 |
hudi_ParquetUtils_fetchRecordKeysWithPositions_rdh
|
/**
* Fetch {@link HoodieKey}s with row positions from the given parquet file.
*
* @param configuration
* configuration to build fs object
* @param filePath
* The parquet file path.
* @param keyGeneratorOpt
* instance of KeyGenerator.
* @return {@link List} of pairs of {@link HoodieKey} and row position fetched from the parquet file
*/
@Override
public List<Pair<HoodieKey, Long>> fetchRecordKeysWithPositions(Configuration configuration, Path filePath, Option<BaseKeyGenerator> keyGeneratorOpt) {
List<Pair<HoodieKey, Long>> hoodieKeysAndPositions = new ArrayList<>();
long position = 0;
try (ClosableIterator<HoodieKey> iterator = m0(configuration, filePath, keyGeneratorOpt)) {
while (iterator.hasNext()) {hoodieKeysAndPositions.add(Pair.of(iterator.next(), position));
position++;
}
return hoodieKeysAndPositions;
}
}
| 3.26 |
hudi_ParquetUtils_getRowCount_rdh
|
/**
* Returns the number of records in the parquet file.
*
* @param conf
* Configuration
* @param parquetFilePath
* path of the file
*/
@Override
public long getRowCount(Configuration conf, Path parquetFilePath) {
ParquetMetadata footer;
long rowCount = 0;
footer = readMetadata(conf, parquetFilePath);
for (BlockMetaData b : footer.getBlocks()) {
rowCount += b.getRowCount();
}
return rowCount;
}
| 3.26 |
hudi_FlinkTables_createTable_rdh
|
/**
* Creates the hoodie flink table.
*
* <p>This expects to be used by driver.
*/
public static HoodieFlinkTable<?>
createTable(Configuration conf) {
HoodieWriteConfig writeConfig = FlinkWriteClients.getHoodieClientConfig(conf, true, false);
return HoodieFlinkTable.create(writeConfig, HoodieFlinkEngineContext.DEFAULT);
}
| 3.26 |
hudi_HoodieLSMTimelineManifest_toJsonString_rdh
|
// -------------------------------------------------------------------------
// Utilities
// -------------------------------------------------------------------------
public String toJsonString() throws IOException {
return JsonUtils.getObjectMapper().writerWithDefaultPrettyPrinter().writeValueAsString(this);
}
| 3.26 |
hudi_HoodieWriteStat_setPath_rdh
|
/**
* Set path and tempPath relative to the given basePath.
*/
public void setPath(Path basePath, Path path) {
this.path = path.toString().replace(basePath +
"/", "");
}
| 3.26 |
hudi_EmbeddedTimelineServerHelper_createEmbeddedTimelineService_rdh
|
/**
* Instantiate Embedded Timeline Server.
*
* @param context
* Hoodie Engine Context
* @param config
* Hoodie Write Config
* @return TimelineServer if configured to run
* @throws IOException
*/
public static Option<EmbeddedTimelineService> createEmbeddedTimelineService(HoodieEngineContext context, HoodieWriteConfig config) throws IOException {if (config.isEmbeddedTimelineServerEnabled()) {
Option<String> hostAddr = context.getProperty(EngineProperty.EMBEDDED_SERVER_HOST);
EmbeddedTimelineService timelineService = EmbeddedTimelineService.getOrStartEmbeddedTimelineService(context, hostAddr.orElse(null), config);
updateWriteConfigWithTimelineServer(timelineService, config);
return Option.of(timelineService);
} else {
return Option.empty();
}
}
| 3.26 |
hudi_SparkInsertOverwritePartitioner_getSmallFiles_rdh
|
/**
* Returns a list of small files in the given partition path.
*/
@Override
protected List<SmallFile> getSmallFiles(String partitionPath) {
// for overwrite, we ignore all existing files. So do not consider any file to be smallFiles
return Collections.emptyList();
}
| 3.26 |
hudi_CleanPlanActionExecutor_requestClean_rdh
|
/**
* Creates a Cleaner plan if there are files to be cleaned and stores them in instant file.
* Cleaner Plan contains absolute file paths.
*
* @param startCleanTime
* Cleaner Instant Time
* @return Cleaner Plan if generated
*/
protected Option<HoodieCleanerPlan> requestClean(String startCleanTime) {
final HoodieCleanerPlan cleanerPlan = requestClean(context);
Option<HoodieCleanerPlan> option = Option.empty();
if (nonEmpty(cleanerPlan.getFilePathsToBeDeletedPerPartition()) && (cleanerPlan.getFilePathsToBeDeletedPerPartition().values().stream().mapToInt(List::size).sum() > 0)) {
// Only create cleaner plan which does some work
final HoodieInstant cleanInstant = new HoodieInstant(State.REQUESTED, HoodieTimeline.CLEAN_ACTION, startCleanTime);
// Save to both aux and timeline folder
try {
table.getActiveTimeline().saveToCleanRequested(cleanInstant, TimelineMetadataUtils.serializeCleanerPlan(cleanerPlan));
LOG.info("Requesting Cleaning with instant time " + cleanInstant);
} catch (IOException e) {
LOG.error("Got exception when saving cleaner requested file", e);
throw new HoodieIOException(e.getMessage(), e);
}
option = Option.of(cleanerPlan);
}
return option;
}
| 3.26 |
hudi_HoodieDataBlock_list2Iterator_rdh
|
/**
* Converts the given list to closable iterator.
*/
static <T> ClosableIterator<T> list2Iterator(List<T> list) {
Iterator<T> iterator = list.iterator();
return new ClosableIterator<T>() {
@Override
public void close() {
// ignored
}
@Override
public boolean hasNext() {
return iterator.hasNext();
}
@Override
public T next() {
return iterator.next();
}
};
}
| 3.26 |
hudi_HoodieDataBlock_getRecordIterator_rdh
|
/**
* Batch get of keys of interest. Implementation can choose to either do full scan and return matched entries or
* do a seek based parsing and return matched entries.
*
* @param keys
* keys of interest.
* @return List of IndexedRecords for the keys of interest.
* @throws IOException
* in case of failures encountered when reading/parsing records
*/
public final <T> ClosableIterator<HoodieRecord<T>> getRecordIterator(List<String> keys, boolean fullKey, HoodieRecordType type) throws IOException {
boolean fullScan = keys.isEmpty();
if (enablePointLookups && (!fullScan)) {
return lookupRecords(keys, fullKey);
}
// Otherwise, we fetch all the records and filter out all the records, but the
// ones requested
ClosableIterator<HoodieRecord<T>> allRecords = getRecordIterator(type);
if (fullScan) {
return allRecords;
}
HashSet<String> keySet = new HashSet<>(keys);
return FilteringIterator.getInstance(allRecords, keySet, fullKey, this::getRecordKey);
}
| 3.26 |
hudi_HoodieDataBlock_getEngineRecordIterator_rdh
|
/**
* Batch get of keys of interest. Implementation can choose to either do full scan and return matched entries or
* do a seek based parsing and return matched entries.
*
* @param readerContext
* {@link HoodieReaderContext} instance with type T.
* @param keys
* Keys of interest.
* @param fullKey
* Whether the key is full or not.
* @param <T>
* The type of engine-specific record representation to return.
* @return An iterator containing the records of interest in specified type.
*/
public final <T> ClosableIterator<T> getEngineRecordIterator(HoodieReaderContext<T> readerContext, List<String> keys,
boolean fullKey) {
boolean fullScan = keys.isEmpty();
// Otherwise, we fetch all the records and filter out all the records, but the
// ones requested
ClosableIterator<T> allRecords = getEngineRecordIterator(readerContext);
if (fullScan) {
return allRecords;
}
HashSet<String>
keySet = new HashSet<>(keys);
return FilteringEngineRecordIterator.getInstance(allRecords, keySet, fullKey, record -> Option.of(readerContext.getValue(record, readerSchema, keyFieldName).toString()));
}
| 3.26 |
hudi_HoodieFileGroup_getLatestFileSlice_rdh
|
/**
* Gets the latest slice - this can contain either.
* <p>
* - just the log files without data file - (or) data file with 0 or more log files
*/
public Option<FileSlice> getLatestFileSlice() {
// there should always be one
return Option.fromJavaOptional(getAllFileSlices().findFirst());
}
| 3.26 |
hudi_HoodieFileGroup_addLogFile_rdh
|
/**
* Add a new log file into the group.
*
* <p>CAUTION: the log file must be added in sequence of the delta commit time.
*/
public void addLogFile(CompletionTimeQueryView completionTimeQueryView, HoodieLogFile logFile) {
String baseInstantTime = getBaseInstantTime(completionTimeQueryView, logFile);
if (!fileSlices.containsKey(baseInstantTime)) {
fileSlices.put(baseInstantTime, new FileSlice(fileGroupId, baseInstantTime));
}
fileSlices.get(baseInstantTime).addLogFile(logFile);
}
| 3.26 |
hudi_HoodieFileGroup_addNewFileSliceAtInstant_rdh
|
/**
* Potentially add a new file-slice by adding base-instant time A file-slice without any data-file and log-files can
* exist (if a compaction just got requested).
*/
public void addNewFileSliceAtInstant(String baseInstantTime) {
if (!fileSlices.containsKey(baseInstantTime)) {
fileSlices.put(baseInstantTime, new FileSlice(fileGroupId, baseInstantTime));
}
}
| 3.26 |
hudi_HoodieFileGroup_getAllFileSlices_rdh
|
/**
* Provides a stream of committed file slices, sorted reverse base commit time.
*/
public Stream<FileSlice> getAllFileSlices() {
if (!timeline.empty()) {
return fileSlices.values().stream().filter(this::isFileSliceCommitted);
}
return Stream.empty();
}
| 3.26 |
hudi_HoodieFileGroup_getAllFileSlicesIncludingInflight_rdh
|
/**
* Get all the file slices including in-flight ones as seen in underlying file system.
*/
public Stream<FileSlice> getAllFileSlicesIncludingInflight()
{
return fileSlices.values().stream();
}
| 3.26 |
hudi_HoodieFileGroup_getLatestFileSlicesIncludingInflight_rdh
|
/**
* Get the latest file slices including inflight ones.
*/public Option<FileSlice> getLatestFileSlicesIncludingInflight() {
return Option.fromJavaOptional(getAllFileSlicesIncludingInflight().findFirst());
}
| 3.26 |
hudi_HoodieFileGroup_isFileSliceCommitted_rdh
|
/**
* A FileSlice is considered committed, if one of the following is true - There is a committed data file - There are
* some log files, that are based off a commit or delta commit.
*/
private boolean isFileSliceCommitted(FileSlice slice) {
if (!compareTimestamps(slice.getBaseInstantTime(), LESSER_THAN_OR_EQUALS, lastInstant.get().getTimestamp())) {
return false;
}
return timeline.containsOrBeforeTimelineStarts(slice.getBaseInstantTime());
}
| 3.26 |
hudi_HoodieFileGroup_addBaseFile_rdh
|
/**
* Add a new datafile into the file group.
*/
public void addBaseFile(HoodieBaseFile dataFile) {
if (!fileSlices.containsKey(dataFile.getCommitTime())) {
fileSlices.put(dataFile.getCommitTime(), new FileSlice(fileGroupId, dataFile.getCommitTime()));
}
fileSlices.get(dataFile.getCommitTime()).setBaseFile(dataFile);}
| 3.26 |
hudi_HoodieFileGroup_getLatestFileSliceBefore_rdh
|
/**
* Obtain the latest file slice, upto an instantTime i.e < maxInstantTime.
*
* @param maxInstantTime
* Max Instant Time
* @return the latest file slice
*/public Option<FileSlice> getLatestFileSliceBefore(String maxInstantTime) {
return Option.fromJavaOptional(getAllFileSlices().filter(slice -> compareTimestamps(slice.getBaseInstantTime(), LESSER_THAN, maxInstantTime)).findFirst());
}
| 3.26 |
hudi_HoodieFileGroup_getAllBaseFiles_rdh
|
/**
* Stream of committed data files, sorted reverse commit time.
*/
public Stream<HoodieBaseFile> getAllBaseFiles() {
return getAllFileSlices().filter(slice -> slice.getBaseFile().isPresent()).map(slice -> slice.getBaseFile().get());
}
| 3.26 |
hudi_HoodieFileGroup_getLatestFileSliceBeforeOrOn_rdh
|
/**
* Obtain the latest file slice, upto a instantTime i.e <= maxInstantTime.
*/
public Option<FileSlice> getLatestFileSliceBeforeOrOn(String maxInstantTime) {
return Option.fromJavaOptional(getAllFileSlices().filter(slice -> compareTimestamps(slice.getBaseInstantTime(), LESSER_THAN_OR_EQUALS, maxInstantTime)).findFirst());
}
| 3.26 |
hudi_HoodieIndexID_isPartition_rdh
|
/**
* Is this ID a Partition type ?
*
* @return True if this ID of PartitionID type
*/
public final boolean isPartition() {
return getType() == Type.PARTITION;
}
| 3.26 |
hudi_HoodieIndexID_isFileID_rdh
|
/**
* Is this ID a FileID type ?
*
* @return True if this ID of FileID type
*/
public final boolean isFileID() {
return getType() == Type.FILE;
}
| 3.26 |
hudi_HoodieIndexID_asBase64EncodedString_rdh
|
/**
* Get the Base64 encoded version of the ID.
*/
public String asBase64EncodedString() {
throw new HoodieNotSupportedException("Unsupported hash for " + getType());
}
| 3.26 |
hudi_HoodieIndexID_isColumnID_rdh
|
/**
* Is this ID a ColumnID type ?
*
* @return True if this ID of ColumnID type
*/
public final boolean isColumnID() {
return getType() == Type.COLUMN;
}
| 3.26 |
hudi_HoodieMultiTableStreamer_populateTableExecutionContextList_rdh
|
// commonProps are passed as parameter which contain table to config file mapping
private void populateTableExecutionContextList(TypedProperties properties, String configFolder, FileSystem fs, Config config)
throws IOException {
List<String> tablesToBeIngested = getTablesToBeIngested(properties);logger.info("tables to be ingested via MultiTableDeltaStreamer : " + tablesToBeIngested);
TableExecutionContext executionContext;
for (String table : tablesToBeIngested) {
String[] tableWithDatabase = table.split("\\.");
String database = (tableWithDatabase.length > 1)
? tableWithDatabase[0] : "default";
String currentTable = (tableWithDatabase.length
> 1) ? tableWithDatabase[1] : table;
String configProp = (((HoodieStreamerConfig.INGESTION_PREFIX + database) + Constants.DELIMITER) + currentTable) + Constants.INGESTION_CONFIG_SUFFIX;String oldConfigProp = (((HoodieStreamerConfig.OLD_INGESTION_PREFIX + database) + Constants.DELIMITER) + currentTable) + Constants.INGESTION_CONFIG_SUFFIX;
String configFilePath = getStringWithAltKeys(properties, configProp, oldConfigProp, Helpers.getDefaultConfigFilePath(configFolder, database, currentTable));
checkIfTableConfigFileExists(configFolder, fs, configFilePath); TypedProperties tableProperties = UtilHelpers.readConfig(fs.getConf(), new Path(configFilePath), new ArrayList<>()).getProps();
properties.forEach((k, v) -> {if (tableProperties.get(k) == null) {tableProperties.setProperty(k.toString(), v.toString());
}
});final HoodieStreamer.Config cfg = new HoodieStreamer.Config();
// copy all the values from config to cfg
String targetBasePath = resetTarget(config, database, currentTable);
Helpers.deepCopyConfigs(config, cfg);
String overriddenTargetBasePath = getStringWithAltKeys(tableProperties, HoodieStreamerConfig.TARGET_BASE_PATH, true);
cfg.targetBasePath = (StringUtils.isNullOrEmpty(overriddenTargetBasePath)) ? targetBasePath : overriddenTargetBasePath;
if (cfg.enableMetaSync && StringUtils.isNullOrEmpty(tableProperties.getString(HoodieSyncConfig.META_SYNC_TABLE_NAME.key(), ""))) {
throw new HoodieException("Meta sync table field not provided!");
}
populateTransformerProps(cfg,
tableProperties);
populateSchemaProviderProps(cfg, tableProperties);
executionContext = new TableExecutionContext();
executionContext.setProperties(tableProperties);
executionContext.setConfig(cfg);
executionContext.setDatabase(database);
executionContext.setTableName(currentTable);
this.tableExecutionContexts.add(executionContext);
}
}
| 3.26 |
hudi_HoodieMultiTableStreamer_sync_rdh
|
/**
* Creates actual HoodieDeltaStreamer objects for every table/topic and does incremental sync.
*/
public void sync() {
for (TableExecutionContext
context : tableExecutionContexts) {try {
new HoodieStreamer(context.getConfig(), jssc, Option.ofNullable(context.getProperties())).sync();
successTables.add(Helpers.getTableWithDatabase(context));} catch (Exception e) {
logger.error("error while running MultiTableDeltaStreamer for table: " + context.getTableName(), e);
failedTables.add(Helpers.getTableWithDatabase(context));
}
}
logger.info("Ingestion was successful for topics: " + successTables);
if (!failedTables.isEmpty())
{
logger.info("Ingestion failed for topics: " + failedTables);}
}
| 3.26 |
hudi_HoodieAvroHFileReader_getSharedHFileReader_rdh
|
/**
* Instantiates the shared HFile reader if not instantiated
*
* @return the shared HFile reader
*/
private Reader getSharedHFileReader() {
if (!sharedReader.isPresent()) {
synchronized(sharedLock) {
if (!sharedReader.isPresent()) {
sharedReader = Option.of(getHFileReader());
}
}
}
return sharedReader.get();
}
| 3.26 |
hudi_HoodieAvroHFileReader_filterRowKeys_rdh
|
/**
* Filter keys by availability.
* <p>
* Note: This method is performant when the caller passes in a sorted candidate keys.
*
* @param candidateRowKeys
* - Keys to check for the availability
* @return Subset of candidate keys that are available
*/
@Override
public Set<Pair<String, Long>> filterRowKeys(Set<String> candidateRowKeys) {
// candidateRowKeys must be sorted
SortedSet<String> sortedCandidateRowKeys = new TreeSet<>(candidateRowKeys);
synchronized(sharedLock) {
if (!sharedScanner.isPresent()) {
// For shared scanner, which is primarily used for point-lookups, we're caching blocks
// by default, to minimize amount of traffic to the underlying storage
sharedScanner = Option.of(getHFileScanner(getSharedHFileReader(), true));
}
return // Record position is not supported for HFile
sortedCandidateRowKeys.stream().filter(k -> {
try {return isKeyAvailable(k,
sharedScanner.get());
} catch (IOException e) {
LOG.error("Failed to check key availability: " + k);
return false;
}
}).map(key -> Pair.of(key, HoodieRecordLocation.INVALID_POSITION)).collect(Collectors.toSet());
}
}
| 3.26 |
hudi_HoodieAvroHFileReader_getHFileReader_rdh
|
/**
* Instantiate a new reader for HFile files.
*
* @return an instance of {@link HFile.Reader}
*/
private Reader getHFileReader() {
if (content.isPresent()) {
return HoodieHFileUtils.createHFileReader(fs, path, content.get());
}
return HoodieHFileUtils.createHFileReader(fs, path, config, hadoopConf);
}
| 3.26 |
hudi_HoodieAvroHFileReader_readRecords_rdh
|
/**
* NOTE: THIS SHOULD ONLY BE USED FOR TESTING, RECORDS ARE MATERIALIZED EAGERLY
* <p>
* Reads all the records with given schema and filtering keys.
*/
public static List<IndexedRecord> readRecords(HoodieAvroHFileReader reader, List<String> keys, Schema schema) throws IOException {
Collections.sort(keys);
return toStream(reader.getIndexedRecordsByKeysIterator(keys, schema)).collect(Collectors.toList());
}
| 3.26 |
hudi_SparkRDDReadClient_tagLocation_rdh
|
/**
* Looks up the index and tags each incoming record with a location of a file that contains the row (if it is actually
* present). Input RDD should contain no duplicates if needed.
*
* @param hoodieRecords
* Input RDD of Hoodie records
* @return Tagged RDD of Hoodie records
*/public JavaRDD<HoodieRecord<T>> tagLocation(JavaRDD<HoodieRecord<T>>
hoodieRecords) throws HoodieIndexException {
return HoodieJavaRDD.getJavaRDD(index.tagLocation(HoodieJavaRDD.of(hoodieRecords), context,
hoodieTable));
}
| 3.26 |
hudi_SparkRDDReadClient_readROView_rdh
|
/**
* Given a bunch of hoodie keys, fetches all the individual records out as a data frame.
*
* @return a dataframe
*/
public Dataset<Row> readROView(JavaRDD<HoodieKey> hoodieKeys, int parallelism) {
assertSqlContext();
JavaPairRDD<HoodieKey, Option<Pair<String, String>>> lookupResultRDD = checkExists(hoodieKeys);
JavaPairRDD<HoodieKey, Option<String>> keyToFileRDD = lookupResultRDD.mapToPair(r -> new Tuple2<>(r._1, convertToDataFilePath(r._2)));
List<String> paths = keyToFileRDD.filter(keyFileTuple -> keyFileTuple._2().isPresent()).map(keyFileTuple -> keyFileTuple._2().get()).collect();
// record locations might be same for multiple keys, so need a unique list
Set<String> uniquePaths = new HashSet<>(paths);
Dataset<Row> originalDF = null;
// read files based on the file extension name
if ((paths.size() == 0) || paths.get(0).endsWith(HoodieFileFormat.PARQUET.getFileExtension())) {
originalDF =
sqlContextOpt.get().read().parquet(uniquePaths.toArray(new String[uniquePaths.size()]));
} else if (paths.get(0).endsWith(HoodieFileFormat.ORC.getFileExtension())) {
originalDF = sqlContextOpt.get().read().orc(uniquePaths.toArray(new String[uniquePaths.size()]));
}
StructType schema = originalDF.schema();
JavaPairRDD<HoodieKey, Row> keyRowRDD = originalDF.javaRDD().mapToPair(row -> {
HoodieKey key = new HoodieKey(row.getAs(HoodieRecord.RECORD_KEY_METADATA_FIELD), row.getAs(HoodieRecord.PARTITION_PATH_METADATA_FIELD));
return new Tuple2<>(key, row);
});
// Now, we need to further filter out, for only rows that match the supplied hoodie keys
JavaRDD<Row> rowRDD = keyRowRDD.join(keyToFileRDD, parallelism).map(tuple -> tuple._2()._1());
return sqlContextOpt.get().createDataFrame(rowRDD, schema);
}
| 3.26 |
hudi_SparkRDDReadClient_checkExists_rdh
|
/**
* Checks if the given [Keys] exists in the hoodie table and returns [Key, Option[FullFilePath]] If the optional
* FullFilePath value is not present, then the key is not found. If the FullFilePath value is present, it is the path
* component (without scheme) of the URI underlying file
*/
public JavaPairRDD<HoodieKey, Option<Pair<String, String>>> checkExists(JavaRDD<HoodieKey> hoodieKeys) {
return HoodieJavaRDD.getJavaRDD(index.tagLocation(HoodieJavaRDD.of(hoodieKeys.map(k -> new HoodieAvroRecord<>(k, null))), context, hoodieTable)).mapToPair(hr -> new Tuple2<>(hr.getKey(), hr.isCurrentLocationKnown() ? Option.of(Pair.of(hr.getPartitionPath(), hr.getCurrentLocation().getFileId())) : Option.empty()));
}
| 3.26 |
hudi_SparkRDDReadClient_m0_rdh
|
/**
* Return all pending compactions with instant time for clients to decide what to compact next.
*
* @return */
public List<Pair<String, HoodieCompactionPlan>> m0() {
HoodieTableMetaClient metaClient = HoodieTableMetaClient.builder().setConf(hadoopConf).setBasePath(hoodieTable.getMetaClient().getBasePath()).setLoadActiveTimelineOnLoad(true).build();
return CompactionUtils.getAllPendingCompactionPlans(metaClient).stream().map(instantWorkloadPair -> Pair.of(instantWorkloadPair.getKey().getTimestamp(), instantWorkloadPair.getValue())).collect(Collectors.toList());
}
| 3.26 |
hudi_SparkRDDReadClient_filterExists_rdh
|
/**
* Filter out HoodieRecords that already exists in the output folder. This is useful in deduplication.
*
* @param hoodieRecords
* Input RDD of Hoodie records.
* @return A subset of hoodieRecords RDD, with existing records filtered out.
*/
public JavaRDD<HoodieRecord<T>> filterExists(JavaRDD<HoodieRecord<T>> hoodieRecords) {
JavaRDD<HoodieRecord<T>> recordsWithLocation = tagLocation(hoodieRecords);
return recordsWithLocation.filter(v1 -> !v1.isCurrentLocationKnown());
}
| 3.26 |
hudi_SparkRDDReadClient_addHoodieSupport_rdh
|
/**
* Adds support for accessing Hoodie built tables from SparkSQL, as you normally would.
*
* @return SparkConf object to be used to construct the SparkContext by caller
*/
public static SparkConf addHoodieSupport(SparkConf conf) {
conf.set("spark.sql.hive.convertMetastoreParquet", "false");
return conf;}
| 3.26 |
hudi_StringUtils_join_rdh
|
/**
* <p>
* Joins the elements of the provided array into a single String containing the provided list of elements.
* </p>
*
* <p>
* No separator is added to the joined String. Null objects or empty strings within the array are represented by empty
* strings.
* </p>
*
* <pre>
* StringUtils.join(null) = null
* StringUtils.join([]) = ""
* StringUtils.join([null]) = ""
* StringUtils.join(["a", "b", "c"]) = "abc"
* StringUtils.join([null, "", "a"]) = "a"
* </pre>
*/
public static <T> String join(final String... elements) {
return join(elements, EMPTY_STRING);
}
| 3.26 |
hudi_StringUtils_nullToEmpty_rdh
|
/**
* Returns the given string if it is non-null; the empty string otherwise.
*
* @param string
* the string to test and possibly return
* @return {@code string} itself if it is non-null; {@code ""} if it is null
*/
public static String nullToEmpty(@Nullable
String string) {
return string == null ? "" : string;
}
| 3.26 |
hudi_StringUtils_emptyToNull_rdh
|
/**
* Returns the given string if it is nonempty; {@code null} otherwise.
*
* @param string
* the string to test and possibly return
* @return {@code string} itself if it is nonempty; {@code null} if it is empty or null
*/
@Nullable
public static String emptyToNull(@Nullable
String string) {
return
stringIsNullOrEmpty(string) ? null : string;
}
| 3.26 |
hudi_StringUtils_split_rdh
|
/**
* Splits input string, delimited {@code delimiter} into a list of non-empty strings
* (skipping any empty string produced during splitting)
*/
public static List<String> split(@Nullable
String input, String delimiter) {
if (isNullOrEmpty(input)) {
return Collections.emptyList();
}
return Stream.of(input.split(delimiter)).map(String::trim).filter(s -> !s.isEmpty()).collect(Collectors.toList());
}
| 3.26 |
hudi_HoodieDataTableValidator_readConfigFromFileSystem_rdh
|
/**
* Reads config from the file system.
*
* @param jsc
* {@link JavaSparkContext} instance.
* @param cfg
* {@link Config} instance.
* @return the {@link TypedProperties} instance.
*/private TypedProperties readConfigFromFileSystem(JavaSparkContext jsc, Config cfg) {
return UtilHelpers.readConfig(jsc.hadoopConfiguration(), new Path(cfg.propsFilePath), cfg.configs).getProps(true);
}
| 3.26 |
hudi_RDDBucketIndexPartitioner_doPartitionAndCustomColumnSort_rdh
|
/**
* Sort by specified column value. The behaviour is the same as `RDDCustomColumnsSortPartitioner`
*
* @param records
* @param partitioner
* @return */
private JavaRDD<HoodieRecord<T>> doPartitionAndCustomColumnSort(JavaRDD<HoodieRecord<T>> records, Partitioner partitioner)
{
final String[] sortColumns = sortColumnNames;
final SerializableSchema schema = new SerializableSchema(HoodieAvroUtils.addMetadataFields(new Schema.Parser().parse(table.getConfig().getSchema())));
Comparator<HoodieRecord<T>> v2 = ((Comparator) ((t1, t2) -> {
FlatLists.ComparableList obj1 = FlatLists.ofComparableArray(t1.getColumnValues(schema.get(), sortColumns, consistentLogicalTimestampEnabled));
FlatLists.ComparableList obj2 = FlatLists.ofComparableArray(t2.getColumnValues(schema.get(), sortColumns, consistentLogicalTimestampEnabled));
return obj1.compareTo(obj2);
}));
return records.mapToPair(record -> new Tuple2<>(record, record)).repartitionAndSortWithinPartitions(new Partitioner() {
@Override
public int numPartitions() {
return partitioner.numPartitions();
}
@Override
public int m0(Object key) {
return partitioner.getPartition(((HoodieRecord) (key)).getKey());
}
}, v2).map(Tuple2::_2);
}
| 3.26 |
hudi_RDDBucketIndexPartitioner_doPartition_rdh
|
/**
* Execute partition using the given partitioner.
* If sorting is required, will do it within each data partition:
* - if sortColumnNames is specified, apply sort to the column (the behaviour is the same as `RDDCustomColumnsSortPartitioner`)
* - if table requires sort or BulkInsertSortMode is not None, then sort by record key within partition.
* By default, do partition only.
*
* @param records
* @param partitioner
* a default partition that accepts `HoodieKey` as the partition key
* @return */
public JavaRDD<HoodieRecord<T>> doPartition(JavaRDD<HoodieRecord<T>> records, Partitioner partitioner) {
if ((sortColumnNames != null) && (sortColumnNames.length > 0)) {
return doPartitionAndCustomColumnSort(records, partitioner);
} else if (table.requireSortedRecords() || (table.getConfig().getBulkInsertSortMode() != BulkInsertSortMode.NONE)) {
return doPartitionAndSortByRecordKey(records, partitioner);
} else {
// By default, do partition only
return records.mapToPair(record -> new Tuple2<>(record.getKey(), record)).partitionBy(partitioner).map(Tuple2::_2);
}
}
| 3.26 |
hudi_RDDBucketIndexPartitioner_doPartitionAndSortByRecordKey_rdh
|
/**
* Sort by record key within each partition. The behaviour is the same as BulkInsertSortMode.PARTITION_SORT.
*
* @param records
* @param partitioner
* @return */
private JavaRDD<HoodieRecord<T>> doPartitionAndSortByRecordKey(JavaRDD<HoodieRecord<T>> records, Partitioner partitioner) {
if (table.getConfig().getBulkInsertSortMode() == BulkInsertSortMode.GLOBAL_SORT) {
LOG.warn("Bucket index does not support global sort mode, the sort will only be done within each data partition");
}
Comparator<HoodieKey> v5 = ((Comparator) ((t1, t2) -> t1.getRecordKey().compareTo(t2.getRecordKey())));
return records.mapToPair(record -> new Tuple2<>(record.getKey(), record)).repartitionAndSortWithinPartitions(partitioner,
v5).map(Tuple2::_2);
}
| 3.26 |
hudi_HoodieHeartbeatClient_stopHeartbeatTimer_rdh
|
/**
* Stops the timer of the given heartbeat.
*
* @param heartbeat
* The heartbeat to stop.
*/
private void
stopHeartbeatTimer(Heartbeat heartbeat) {
LOG.info("Stopping heartbeat for instant " + heartbeat.getInstantTime());
heartbeat.getTimer().cancel();
heartbeat.setHeartbeatStopped(true);
LOG.info("Stopped heartbeat for instant " + heartbeat.getInstantTime());
}
| 3.26 |
hudi_HoodieHeartbeatClient_stopHeartbeatTimers_rdh
|
/**
* Stops all timers of heartbeats started via this instance of the client.
*
* @throws HoodieException
*/
public void stopHeartbeatTimers() throws HoodieException {
instantToHeartbeatMap.values().stream().filter(this::isHeartbeatStarted).forEach(this::stopHeartbeatTimer);
}
| 3.26 |
hudi_HoodieHeartbeatClient_start_rdh
|
/**
* Start a new heartbeat for the specified instant. If there is already one running, this will be a NO_OP
*
* @param instantTime
* The instant time for the heartbeat.
*/
public void start(String instantTime) {
LOG.info("Received request to start heartbeat for instant time " + instantTime);Heartbeat heartbeat = instantToHeartbeatMap.get(instantTime);
ValidationUtils.checkArgument((heartbeat == null) || (!heartbeat.isHeartbeatStopped()), "Cannot restart a stopped heartbeat for " + instantTime);
if ((heartbeat != null) && heartbeat.isHeartbeatStarted()) {
// heartbeat already started, NO_OP
} else {
Heartbeat newHeartbeat = new Heartbeat();
newHeartbeat.setHeartbeatStarted(true);
instantToHeartbeatMap.put(instantTime, newHeartbeat);
// Ensure heartbeat is generated for the first time with this blocking call.
// Since timer submits the task to a thread, no guarantee when that thread will get CPU
// cycles to generate the first heartbeat.
updateHeartbeat(instantTime);
newHeartbeat.getTimer().scheduleAtFixedRate(new HeartbeatTask(instantTime), this.heartbeatIntervalInMs, this.heartbeatIntervalInMs);
}
}
| 3.26 |
hudi_HoodieHeartbeatClient_stop_rdh
|
/**
* Stops the heartbeat and deletes the heartbeat file for the specified instant.
*
* @param instantTime
* The instant time for the heartbeat.
* @throws HoodieException
*/
public void stop(String instantTime) throws HoodieException {
Heartbeat heartbeat = instantToHeartbeatMap.get(instantTime);
if (isHeartbeatStarted(heartbeat)) {
stopHeartbeatTimer(heartbeat);
HeartbeatUtils.deleteHeartbeatFile(fs, basePath, instantTime);
LOG.info("Deleted heartbeat file for instant " + instantTime);
}
}
| 3.26 |
hudi_HoodieHeartbeatClient_isHeartbeatStarted_rdh
|
/**
* Whether the given heartbeat is started.
*
* @param heartbeat
* The heartbeat to check whether is started.
* @return Whether the heartbeat is started.
* @throws IOException
*/
private boolean isHeartbeatStarted(Heartbeat heartbeat) {
return ((heartbeat != null) && heartbeat.isHeartbeatStarted()) && (!heartbeat.isHeartbeatStopped());
}
| 3.26 |
hudi_BaseVectorizedColumnReader_nextInt_rdh
|
/**
* return zero.
*/protected static final class NullIntIterator extends IntIterator {
@Override
int nextInt() {
return 0;
}
| 3.26 |
hudi_SparkHoodieHBaseIndex_canIndexLogFiles_rdh
|
/**
* Mapping is available in HBase already.
*/
@Override
public boolean canIndexLogFiles() {
return true;
}
| 3.26 |
hudi_SparkHoodieHBaseIndex_locationTagFunction_rdh
|
/**
* Function that tags each HoodieRecord with an existing location, if known.
*/
private <R> Function2<Integer, Iterator<HoodieRecord<R>>, Iterator<HoodieRecord<R>>> locationTagFunction(HoodieTableMetaClient metaClient) {
// `multiGetBatchSize` is intended to be a batch per 100ms. To create a rate limiter that measures
// operations per second, we need to multiply `multiGetBatchSize` by 10.
Integer multiGetBatchSize = config.getHbaseIndexGetBatchSize();
return (partitionNum, hoodieRecordIterator) -> {
boolean updatePartitionPath = config.getHbaseIndexUpdatePartitionPath();
RateLimiter limiter = RateLimiter.create(multiGetBatchSize * 10, TimeUnit.SECONDS);
// Grab the global HBase connection
synchronized(SparkHoodieHBaseIndex.class) {
if ((SparkHoodieHBaseIndex.hbaseConnection == null) || SparkHoodieHBaseIndex.hbaseConnection.isClosed()) {
SparkHoodieHBaseIndex.hbaseConnection = getHBaseConnection();
}
}
List<HoodieRecord<R>> taggedRecords = new ArrayList<>();
try (HTable hTable = ((HTable) (SparkHoodieHBaseIndex.hbaseConnection.getTable(TableName.valueOf(tableName))))) {
List<Get> statements = new ArrayList<>();
List<HoodieRecord> currentBatchOfRecords = new LinkedList<>();
// Do the tagging.
HoodieTimeline completedCommitsTimeline = metaClient.getCommitsTimeline().filterCompletedInstants();
while (hoodieRecordIterator.hasNext()) {
HoodieRecord rec = hoodieRecordIterator.next();
statements.add(generateStatement(rec.getRecordKey()));
currentBatchOfRecords.add(rec);
// iterator till we reach batch size
if (hoodieRecordIterator.hasNext() && (statements.size() < multiGetBatchSize)) {
continue;
}
// get results for batch from Hbase
Result[] results = doGet(hTable, statements, limiter);
// clear statements to be GC'd
statements.clear();
for (Result result : results) {
// first, attempt to grab location from HBase
HoodieRecord currentRecord = currentBatchOfRecords.remove(0);
if (result.getRow() == null) {
taggedRecords.add(currentRecord);
continue;
}
String keyFromResult = Bytes.toString(result.getRow());
String
commitTs = Bytes.toString(result.getValue(SYSTEM_COLUMN_FAMILY, COMMIT_TS_COLUMN));
String fileId = Bytes.toString(result.getValue(SYSTEM_COLUMN_FAMILY, FILE_NAME_COLUMN));
String partitionPath = Bytes.toString(result.getValue(SYSTEM_COLUMN_FAMILY, PARTITION_PATH_COLUMN));
if (!HoodieIndexUtils.checkIfValidCommit(completedCommitsTimeline, commitTs)) {
// if commit is invalid, treat this as a new taggedRecord
taggedRecords.add(currentRecord);
continue;
}
// check whether to do partition change processing
if (updatePartitionPath && (!partitionPath.equals(currentRecord.getPartitionPath()))) {// delete partition old data record
HoodieRecord emptyRecord = new HoodieAvroRecord(new HoodieKey(currentRecord.getRecordKey(), partitionPath), new EmptyHoodieRecordPayload());
emptyRecord.unseal();
emptyRecord.setCurrentLocation(new HoodieRecordLocation(commitTs, fileId));
emptyRecord.seal();
// insert partition new data record
currentRecord = new HoodieAvroRecord(new HoodieKey(currentRecord.getRecordKey(), currentRecord.getPartitionPath()), ((HoodieRecordPayload) (currentRecord.getData())));
taggedRecords.add(emptyRecord);
taggedRecords.add(currentRecord);
} else {
currentRecord = new HoodieAvroRecord(new HoodieKey(currentRecord.getRecordKey(), partitionPath), ((HoodieRecordPayload) (currentRecord.getData())));
currentRecord.unseal();
currentRecord.setCurrentLocation(new HoodieRecordLocation(commitTs, fileId));
currentRecord.seal();
taggedRecords.add(currentRecord);
// the key from Result and the key being processed should be same
assert currentRecord.getRecordKey().contentEquals(keyFromResult);
}
}
}
} catch (IOException e) {
throw new <e>HoodieIndexException("Failed to Tag indexed locations because of exception with HBase Client");} finally {
limiter.stop();
}
return taggedRecords.iterator();
};
}
| 3.26 |
hudi_SparkHoodieHBaseIndex_getBatchSize_rdh
|
/**
* Calculate putBatch size so that sum of requests across multiple jobs in a second does not exceed
* maxQpsPerRegionServer for each Region Server. Multiplying qpsFraction to reduce the aggregate load on common RS
* across topics. Assumption here is that all tables have regions across all RS, which is not necessarily true for
* smaller tables. So, they end up getting a smaller share of QPS than they deserve, but it might be ok.
* <p>
* Example: int putBatchSize = batchSizeCalculator.getBatchSize(10, 16667, 1200, 200, 100, 0.1f)
* </p>
* <p>
* Expected batchSize is 8 because in that case, total request sent to a Region Server in one second is:
*
* 8 (batchSize) * 200 (parallelism) * 10 (maxReqsInOneSecond) * 10 (numRegionServers) * 0.1 (qpsFraction)) =>
* 16000. We assume requests get distributed to Region Servers uniformly, so each RS gets 1600 requests which
* happens to be 10% of 16667 (maxQPSPerRegionServer), as expected.
* </p>
* <p>
* Assumptions made here
* <li>In a batch, writes get evenly distributed to each RS for that table. Since we do writes only in the case of
* inserts and not updates, for this assumption to fail, inserts would have to be skewed towards few RS, likelihood
* of which is less if Hbase table is pre-split and rowKeys are UUIDs (random strings). If this assumption fails,
* then it is possible for some RS to receive more than maxQpsPerRegionServer QPS, but for simplicity, we are going
* ahead with this model, since this is meant to be a lightweight distributed throttling mechanism without
* maintaining a global context. So if this assumption breaks, we are hoping the HBase Master relocates hot-spot
* regions to new Region Servers.
*
* </li>
* <li>For Region Server stability, throttling at a second level granularity is fine. Although, within a second, the
* sum of queries might be within maxQpsPerRegionServer, there could be peaks at some sub second intervals. So, the
* assumption is that these peaks are tolerated by the Region Server (which at max can be maxQpsPerRegionServer).
* </li>
* </p>
*/
public int getBatchSize(int numRegionServersForTable,
int maxQpsPerRegionServer, int numTasksDuringPut, int maxExecutors, float qpsFraction) {
int numRSAlive = numRegionServersForTable;
int maxReqPerSec =
getMaxReqPerSec(numRSAlive, maxQpsPerRegionServer, qpsFraction);
int numTasks = numTasksDuringPut;
int maxParallelPutsTask = Math.max(1, Math.min(numTasks, maxExecutors));
int multiPutBatchSizePerSecPerTask = Math.max(1,
((int) (Math.ceil(maxReqPerSec / maxParallelPutsTask))));
LOG.info("HbaseIndexThrottling: qpsFraction :" + qpsFraction);
LOG.info("HbaseIndexThrottling: numRSAlive :" + numRSAlive);
LOG.info("HbaseIndexThrottling: maxReqPerSec :" + maxReqPerSec);
LOG.info("HbaseIndexThrottling: numTasks :" + numTasks);
LOG.info("HbaseIndexThrottling: maxExecutors :" + maxExecutors);
LOG.info("HbaseIndexThrottling: maxParallelPuts :" + maxParallelPutsTask);
LOG.info("HbaseIndexThrottling: numRegionServersForTable :" +
numRegionServersForTable);
LOG.info("HbaseIndexThrottling: multiPutBatchSizePerSecPerTask :" + multiPutBatchSizePerSecPerTask);
return multiPutBatchSizePerSecPerTask;
}
| 3.26 |
hudi_SparkHoodieHBaseIndex_doMutations_rdh
|
/**
* Helper method to facilitate performing mutations (including puts and deletes) in Hbase.
*/ private void doMutations(BufferedMutator mutator, List<Mutation> mutations, RateLimiter limiter) throws IOException {
if (mutations.isEmpty()) {
return;
}
// report number of operations to account per second with rate limiter.
// If #limiter.getRate() operations are acquired within 1 second, ratelimiter will limit the rest of calls
// for within that second
limiter.tryAcquire(mutations.size());
mutator.mutate(mutations);
mutator.flush();
mutations.clear();
}
| 3.26 |
hudi_SparkHoodieHBaseIndex_addShutDownHook_rdh
|
/**
* Since we are sharing the HBaseConnection across tasks in a JVM, make sure the HBaseConnection is closed when JVM
* exits.
*/
private void addShutDownHook() {if (null == shutdownThread) {
shutdownThread = new Thread(() -> {
try {
hbaseConnection.close();
} catch (Exception e) {// fail silently for any sort of exception
}
});
Runtime.getRuntime().addShutdownHook(shutdownThread);
}
}
| 3.26 |
hudi_SparkHoodieHBaseIndex_m0_rdh
|
/**
* Only looks up by recordKey.
*/ @Override
public boolean m0() {
return true;
}
| 3.26 |
hudi_SparkHoodieHBaseIndex_close_rdh
|
/**
* Ensure that any resources used for indexing are released here.
*/
@Override
public void close() {
LOG.info("No resources to release from Hbase index");
}
| 3.26 |
hudi_DirectWriteMarkers_create_rdh
|
/**
* Creates a marker file based on the full marker name excluding the base path and instant.
*
* @param markerName
* the full marker name, e.g., "2021/08/13/file1.marker.CREATE"
* @return path of the marker file
*/
public Option<Path> create(String markerName) {
return create(new Path(markerDirPath, markerName), true);
}
| 3.26 |
hudi_DirectWriteMarkers_deleteMarkerDir_rdh
|
/**
* Deletes Marker directory corresponding to an instant.
*
* @param context
* HoodieEngineContext.
* @param parallelism
* parallelism for deletion.
*/
public boolean deleteMarkerDir(HoodieEngineContext context, int parallelism)
{
return FSUtils.deleteDir(context, fs, markerDirPath, parallelism);
}
/**
*
* @return {@code true} if marker directory exists; {@code false}
| 3.26 |
hudi_HoodieBackedTableMetadataWriter_updateFromWriteStatuses_rdh
|
/**
* Update from {@code HoodieCommitMetadata}.
*
* @param commitMetadata
* {@code HoodieCommitMetadata}
* @param instantTime
* Timestamp at which the commit was performed
*/
@Override
public void updateFromWriteStatuses(HoodieCommitMetadata commitMetadata, HoodieData<WriteStatus> writeStatus, String instantTime) {
processAndCommit(instantTime, () -> {
Map<MetadataPartitionType, HoodieData<HoodieRecord>> partitionToRecordMap = HoodieTableMetadataUtil.convertMetadataToRecords(engineContext, commitMetadata, instantTime, getRecordsGenerationParams());
// Updates for record index are created by parsing the WriteStatus which is a hudi-client object. Hence, we cannot yet move this code
// to the HoodieTableMetadataUtil class in hudi-common.
HoodieData<HoodieRecord> updatesFromWriteStatuses = getRecordIndexUpdates(writeStatus);
HoodieData<HoodieRecord> additionalUpdates = getRecordIndexAdditionalUpdates(updatesFromWriteStatuses, commitMetadata);
partitionToRecordMap.put(RECORD_INDEX, updatesFromWriteStatuses.union(additionalUpdates));
updateFunctionalIndexIfPresent(commitMetadata, instantTime, partitionToRecordMap);
return partitionToRecordMap;
});
closeInternal();
}
| 3.26 |
hudi_HoodieBackedTableMetadataWriter_prepRecords_rdh
|
/**
* Tag each record with the location in the given partition.
* The record is tagged with respective file slice's location based on its record key.
*/
protected HoodieData<HoodieRecord> prepRecords(Map<MetadataPartitionType, HoodieData<HoodieRecord>> partitionRecordsMap) {
// The result set
HoodieData<HoodieRecord> allPartitionRecords = engineContext.emptyHoodieData();
HoodieTableFileSystemView fsView = HoodieTableMetadataUtil.getFileSystemView(metadataMetaClient);
for (Map.Entry<MetadataPartitionType, HoodieData<HoodieRecord>> entry : partitionRecordsMap.entrySet()) {
final String partitionName = entry.getKey().getPartitionPath();
HoodieData<HoodieRecord> records = entry.getValue();
List<FileSlice> fileSlices = HoodieTableMetadataUtil.getPartitionLatestFileSlices(metadataMetaClient, Option.ofNullable(fsView), partitionName);
if (fileSlices.isEmpty()) {
// scheduling of INDEX only initializes the file group and not add commit
// so if there are no committed file slices, look for inflight slices
fileSlices = getPartitionLatestFileSlicesIncludingInflight(metadataMetaClient, Option.ofNullable(fsView), partitionName);}
final int v134 = fileSlices.size();
ValidationUtils.checkArgument(v134 > 0, ("FileGroup count for MDT partition " + partitionName) + " should be >0");
List<FileSlice> finalFileSlices = fileSlices;
HoodieData<HoodieRecord> rddSinglePartitionRecords = records.map(r -> {
FileSlice slice = finalFileSlices.get(HoodieTableMetadataUtil.mapRecordKeyToFileGroupIndex(r.getRecordKey(), v134));
r.unseal();
r.setCurrentLocation(new HoodieRecordLocation(slice.getBaseInstantTime(), slice.getFileId()));
r.seal();
return r;
});
allPartitionRecords = allPartitionRecords.union(rddSinglePartitionRecords);
}
return allPartitionRecords;
}
| 3.26 |
hudi_HoodieBackedTableMetadataWriter_m3_rdh
|
/**
* Validates the timeline for both main and metadata tables to ensure compaction on MDT can be scheduled.
*/
protected boolean m3(Option<String> inFlightInstantTimestamp, String latestDeltaCommitTimeInMetadataTable) {
// we need to find if there are any inflights in data table timeline before or equal to the latest delta commit in metadata table.
// Whenever you want to change this logic, please ensure all below scenarios are considered.
// a. There could be a chance that latest delta commit in MDT is committed in MDT, but failed in DT. And so findInstantsBeforeOrEquals() should be employed
// b. There could be DT inflights after latest delta commit in MDT and we are ok with it. bcoz, the contract is, the latest compaction instant time in MDT represents
// any instants before that is already synced with metadata table.
// c. Do consider out of order commits. For eg, c4 from DT could complete before c3. and we can't trigger compaction in MDT with c4 as base instant time, until every
// instant before c4 is synced with metadata table.
List<HoodieInstant> pendingInstants = f1.reloadActiveTimeline().filterInflightsAndRequested().findInstantsBeforeOrEquals(latestDeltaCommitTimeInMetadataTable).getInstants();
if (!pendingInstants.isEmpty()) {
checkNumDeltaCommits(metadataMetaClient, dataWriteConfig.getMetadataConfig().getMaxNumDeltacommitsWhenPending());
LOG.info(String.format("Cannot compact metadata table as there are %d inflight instants in data table before latest deltacommit in metadata table: %s. Inflight instants in data table: %s", pendingInstants.size(), latestDeltaCommitTimeInMetadataTable, Arrays.toString(pendingInstants.toArray())));
return false;
}
// Check if there are any pending compaction or log compaction instants in the timeline.
// If pending compact/logCompaction operations are found abort scheduling new compaction/logCompaction operations.
Option<HoodieInstant> pendingLogCompactionInstant = metadataMetaClient.getActiveTimeline().filterPendingLogCompactionTimeline().firstInstant();
Option<HoodieInstant> v149 = metadataMetaClient.getActiveTimeline().filterPendingCompactionTimeline().firstInstant();
if (pendingLogCompactionInstant.isPresent() || v149.isPresent()) {
LOG.warn(String.format("Not scheduling compaction or logCompaction, since a pending compaction instant %s or logCompaction %s instant is present", v149, pendingLogCompactionInstant));
return false;
}
return true;
}
| 3.26 |
hudi_HoodieBackedTableMetadataWriter_compactIfNecessary_rdh
|
/**
* Perform a compaction on the Metadata Table.
* <p>
* Cases to be handled:
* 1. We cannot perform compaction if there are previous inflight operations on the dataset. This is because
* a compacted metadata base file at time Tx should represent all the actions on the dataset till time Tx.
* <p>
* 2. In multi-writer scenario, a parallel operation with a greater instantTime may have completed creating a
* deltacommit.
*/
protected void compactIfNecessary(BaseHoodieWriteClient writeClient, String latestDeltacommitTime) {
// Trigger compaction with suffixes based on the same instant time. This ensures that any future
// delta commits synced over will not have an instant time lesser than the last completed instant on the
// metadata table.
final String v144 = writeClient.createNewInstantTime(false);
// we need to avoid checking compaction w/ same instant again.
// let's say we trigger compaction after C5 in MDT and so compaction completes with C4001. but C5 crashed before completing in MDT.
// and again w/ C6, we will re-attempt compaction at which point latest delta commit is C4 in MDT.
// and so we try compaction w/ instant C4001. So, we can avoid compaction if we already have compaction w/ same instant time.
if (metadataMetaClient.getActiveTimeline().filterCompletedInstants().containsInstant(v144)) {
LOG.info(String.format("Compaction with same %s time is already present in the timeline.", v144));
} else if (writeClient.scheduleCompactionAtInstant(v144, Option.empty())) {
LOG.info("Compaction is scheduled for timestamp " + v144);
writeClient.compact(v144);
} else if (metadataWriteConfig.isLogCompactionEnabled()) {
// Schedule and execute log compaction with suffixes based on the same instant time. This ensures that any future
// delta commits synced over will not have an instant time lesser than the last completed instant on the
// metadata table.
final String logCompactionInstantTime = HoodieTableMetadataUtil.createLogCompactionTimestamp(latestDeltacommitTime);
if (metadataMetaClient.getActiveTimeline().filterCompletedInstants().containsInstant(logCompactionInstantTime)) {
LOG.info(String.format("Log compaction with same %s time is already present in the timeline.", logCompactionInstantTime));} else if (writeClient.scheduleLogCompactionAtInstant(logCompactionInstantTime, Option.empty())) {
LOG.info("Log compaction is scheduled for timestamp " + logCompactionInstantTime);
writeClient.logCompact(logCompactionInstantTime);
}
}
}
| 3.26 |
hudi_HoodieBackedTableMetadataWriter_isBootstrapNeeded_rdh
|
/**
* Whether initialize operation needed for this metadata table.
* <p>
* Rollback of the first commit would look like un-synced instants in the metadata table.
* Action metadata is needed to verify the instant time and avoid erroneous initializing.
* <p>
* TODO: Revisit this logic and validate that filtering for all
* commits timeline is the right thing to do
*
* @return True if the initialization is not needed, False otherwise
*/
private boolean isBootstrapNeeded(Option<HoodieInstant> latestMetadataInstant) {
if (!latestMetadataInstant.isPresent()) {
LOG.warn("Metadata Table will need to be re-initialized as no instants were found");
return true;
}
final String latestMetadataInstantTimestamp = latestMetadataInstant.get().getTimestamp();
if (latestMetadataInstantTimestamp.startsWith(SOLO_COMMIT_TIMESTAMP)) {
// the initialization timestamp is SOLO_COMMIT_TIMESTAMP + offset
return false;
}
return false;
}
| 3.26 |
hudi_HoodieBackedTableMetadataWriter_update_rdh
|
/**
* Update from {@code HoodieRollbackMetadata}.
*
* @param rollbackMetadata
* {@code HoodieRollbackMetadata}
* @param instantTime
* Timestamp at which the rollback was performed
*/
@Override
public void update(HoodieRollbackMetadata rollbackMetadata, String instantTime) {
if (initialized && (f0 != null)) {
// The commit which is being rolled back on the dataset
final String commitToRollbackInstantTime = rollbackMetadata.getCommitsRollback().get(0);
// Find the deltacommits since the last compaction
Option<Pair<HoodieTimeline, HoodieInstant>> deltaCommitsInfo = CompactionUtils.getDeltaCommitsSinceLatestCompaction(metadataMetaClient.getActiveTimeline());
// This could be a compaction or deltacommit instant (See CompactionUtils.getDeltaCommitsSinceLatestCompaction)
HoodieInstant compactionInstant = deltaCommitsInfo.get().getValue();
HoodieTimeline deltacommitsSinceCompaction = deltaCommitsInfo.get().getKey();
// The deltacommit that will be rolled back
HoodieInstant deltaCommitInstant = new HoodieInstant(false, HoodieTimeline.DELTA_COMMIT_ACTION, commitToRollbackInstantTime);
validateRollback(commitToRollbackInstantTime, compactionInstant, deltacommitsSinceCompaction);
// lets apply a delta commit with DT's rb instant(with special suffix) containing following records:
// a. any log files as part of RB commit metadata that was added
// b. log files added by the commit in DT being rolled back. By rolled back, we mean, a rollback block will be added and does not mean it will be deleted.
// both above list should only be added to FILES partition.
String rollbackInstantTime = createRollbackTimestamp(instantTime);
processAndCommit(instantTime,
() -> HoodieTableMetadataUtil.convertMetadataToRecords(engineContext, f1, rollbackMetadata, instantTime));
if (deltacommitsSinceCompaction.containsInstant(deltaCommitInstant)) {
LOG.info("Rolling back MDT deltacommit " + commitToRollbackInstantTime);
if (!getWriteClient().rollback(commitToRollbackInstantTime, rollbackInstantTime)) {
throw new HoodieMetadataException("Failed to rollback deltacommit at "
+ commitToRollbackInstantTime);
}
} else {
LOG.info(String.format("Ignoring rollback of instant %s at %s. The commit to rollback is not found in MDT", commitToRollbackInstantTime, instantTime));
}
closeInternal();
}
}
| 3.26 |
hudi_HoodieBackedTableMetadataWriter_preWrite_rdh
|
/**
* Allows the implementation to perform any pre-commit operations like transitioning a commit to inflight if required.
*
* @param instantTime
* time of commit
*/
protected void preWrite(String instantTime) {
// Default is No-Op
}
| 3.26 |
hudi_HoodieBackedTableMetadataWriter_enablePartitions_rdh
|
/**
* Enable metadata table partitions based on config.
*/
private void enablePartitions() {
final HoodieMetadataConfig v0 = dataWriteConfig.getMetadataConfig();
if (dataWriteConfig.isMetadataTableEnabled()
|| f1.getTableConfig().isMetadataPartitionAvailable(FILES))
{
this.enabledPartitionTypes.add(FILES);}
if (v0.isBloomFilterIndexEnabled() || f1.getTableConfig().isMetadataPartitionAvailable(BLOOM_FILTERS)) {this.enabledPartitionTypes.add(BLOOM_FILTERS);
}if (v0.isColumnStatsIndexEnabled() || f1.getTableConfig().isMetadataPartitionAvailable(COLUMN_STATS)) {
this.enabledPartitionTypes.add(COLUMN_STATS);
}
if (dataWriteConfig.isRecordIndexEnabled() || f1.getTableConfig().isMetadataPartitionAvailable(RECORD_INDEX)) {
this.enabledPartitionTypes.add(RECORD_INDEX);
}
if (f1.getFunctionalIndexMetadata().isPresent()) {
this.enabledPartitionTypes.add(FUNCTIONAL_INDEX);
}
}
| 3.26 |
hudi_HoodieBackedTableMetadataWriter_updateFunctionalIndexIfPresent_rdh
|
/**
* Update functional index from {@link HoodieCommitMetadata}.
*/
private void updateFunctionalIndexIfPresent(HoodieCommitMetadata
commitMetadata, String instantTime,
Map<MetadataPartitionType, HoodieData<HoodieRecord>> partitionToRecordMap) {
f1.getTableConfig().getMetadataPartitions().stream().filter(partition -> partition.startsWith(HoodieTableMetadataUtil.PARTITION_NAME_FUNCTIONAL_INDEX_PREFIX)).forEach(partition -> {
HoodieData<HoodieRecord> functionalIndexRecords;
try {
functionalIndexRecords = getFunctionalIndexUpdates(commitMetadata, partition, instantTime);
} catch (Exception e) {
throw new <e>HoodieMetadataException("Failed to get functional index updates for partition " + partition);
}
partitionToRecordMap.put(FUNCTIONAL_INDEX, functionalIndexRecords);
});
}
| 3.26 |
hudi_HoodieBackedTableMetadataWriter_m2_rdh
|
/**
* Update from {@code HoodieRestoreMetadata}.
*
* @param restoreMetadata
* {@code HoodieRestoreMetadata}
* @param instantTime
* Timestamp at which the restore was performed
*/
@Override
public void m2(HoodieRestoreMetadata restoreMetadata, String instantTime) {
f1.reloadActiveTimeline();
// Fetch the commit to restore to (savepointed commit time)
HoodieInstant restoreInstant = new HoodieInstant(State.REQUESTED, HoodieTimeline.RESTORE_ACTION, instantTime);
HoodieInstant requested = HoodieTimeline.getRestoreRequestedInstant(restoreInstant);
HoodieRestorePlan restorePlan = null;
try {
restorePlan = TimelineMetadataUtils.deserializeAvroMetadata(f1.getActiveTimeline().readRestoreInfoAsBytes(requested).get(), HoodieRestorePlan.class);
} catch (IOException e) {
throw new HoodieIOException(("Deserialization of restore plan failed whose restore instant time is " + instantTime) + " in data table",
e);
}
final String restoreToInstantTime = restorePlan.getSavepointToRestoreTimestamp();
LOG.info(("Triggering restore to "
+ restoreToInstantTime) + " in metadata table");
// fetch the earliest commit to retain and ensure the base file prior to the time to restore is present
List<HoodieFileGroup> filesGroups =
f0.getMetadataFileSystemView().getAllFileGroups(FILES.getPartitionPath()).collect(Collectors.toList());
boolean cannotRestore = filesGroups.stream().map(fileGroup -> fileGroup.getAllFileSlices().map(fileSlice -> fileSlice.getBaseInstantTime()).anyMatch(instantTime1 -> HoodieTimeline.compareTimestamps(instantTime1, LESSER_THAN_OR_EQUALS, restoreToInstantTime))).anyMatch(canRestore -> !canRestore);
if
(cannotRestore) {
throw new HoodieMetadataException(String.format("Can't restore to %s since there is no base file in MDT lesser than the commit to restore to. " + "Please delete metadata table and retry", restoreToInstantTime));
}
// Restore requires the existing pipelines to be shutdown. So we can safely scan the dataset to find the current
// list of files in the filesystem.
List<DirectoryInfo> dirInfoList = listAllPartitionsFromFilesystem(instantTime);
Map<String, DirectoryInfo> dirInfoMap = dirInfoList.stream().collect(Collectors.toMap(DirectoryInfo::getRelativePath, Function.identity()));
dirInfoList.clear();
BaseHoodieWriteClient<?, I, ?, ?> writeClient = getWriteClient();
writeClient.restoreToInstant(restoreToInstantTime, false);
// At this point we have also reverted the cleans which have occurred after the restoreToInstantTime. Hence, a sync
// is required to bring back those cleans.
try {
initMetadataReader();
Map<String, Map<String, Long>> partitionFilesToAdd = new HashMap<>();
Map<String, List<String>> partitionFilesToDelete = new HashMap<>();
List<String> partitionsToDelete = new ArrayList<>();
fetchOutofSyncFilesRecordsFromMetadataTable(dirInfoMap, partitionFilesToAdd, partitionFilesToDelete, partitionsToDelete);
// Even if we don't have any deleted files to sync, we still create an empty commit so that we can track the restore has completed.
// We cannot create a deltaCommit at instantTime now because a future (rollback) block has already been written to the logFiles.
// We need to choose a timestamp which would be a validInstantTime for MDT. This is either a commit timestamp completed on the dataset
// or a timestamp with suffix which we use for MDT clean, compaction etc.
String v116 = HoodieTableMetadataUtil.createRestoreTimestamp(writeClient.createNewInstantTime(false));
processAndCommit(v116, () -> HoodieTableMetadataUtil.convertMissingPartitionRecords(engineContext, partitionsToDelete, partitionFilesToAdd, partitionFilesToDelete, v116));
closeInternal();
} catch (IOException e) {
throw new HoodieMetadataException("IOException during MDT restore sync", e);
}
}
| 3.26 |
hudi_HoodieBackedTableMetadataWriter_initializeFromFilesystem_rdh
|
/**
* Initialize the Metadata Table by listing files and partitions from the file system.
*
* @param initializationTime
* - Timestamp to use for the commit
* @param partitionsToInit
* - List of MDT partitions to initialize
* @param inflightInstantTimestamp
* - Current action instant responsible for this initialization
*/private boolean initializeFromFilesystem(String initializationTime, List<MetadataPartitionType> partitionsToInit, Option<String> inflightInstantTimestamp) throws IOException {
if (anyPendingDataInstant(f1, inflightInstantTimestamp)) {
return false;
}
// FILES partition is always required and is initialized first
boolean filesPartitionAvailable = f1.getTableConfig().isMetadataPartitionAvailable(FILES);
if (!filesPartitionAvailable) {
partitionsToInit.remove(FILES);
partitionsToInit.add(0, FILES);
// Initialize the metadata table for the first time
metadataMetaClient = initializeMetaClient();
} else {
// Check and then open the metadata table reader so FILES partition can be read during initialization of other partitions
initMetadataReader();
// Load the metadata table metaclient if required
if (metadataMetaClient == null) {
metadataMetaClient = HoodieTableMetaClient.builder().setConf(hadoopConf.get()).setBasePath(metadataWriteConfig.getBasePath()).setTimeGeneratorConfig(dataWriteConfig.getTimeGeneratorConfig()).build();
}
}
// Already initialized partitions can be ignored
partitionsToInit.removeIf(metadataPartition ->
f1.getTableConfig().isMetadataPartitionAvailable(metadataPartition));
// Get a complete list of files and partitions from the file system or from already initialized FILES partition of MDT
List<DirectoryInfo> partitionInfoList;
if (filesPartitionAvailable) {
partitionInfoList = listAllPartitionsFromMDT(initializationTime);
} else // if auto initialization is enabled, then we need to list all partitions from the file system
if (dataWriteConfig.getMetadataConfig().shouldAutoInitialize()) {
partitionInfoList = listAllPartitionsFromFilesystem(initializationTime);
} else {
// if auto initialization is disabled, we can return an empty list
partitionInfoList = Collections.emptyList();
}
Map<String, Map<String, Long>> partitionToFilesMap = partitionInfoList.stream().map(p -> {
String partitionName = HoodieTableMetadataUtil.getPartitionIdentifier(p.getRelativePath());
return Pair.of(partitionName, p.getFileNameToSizeMap());
}).collect(Collectors.toMap(Pair::getKey,
Pair::getValue));
for (MetadataPartitionType partitionType : partitionsToInit) {
// Find the commit timestamp to use for this partition. Each initialization should use its own unique commit time.
String commitTimeForPartition = generateUniqueCommitInstantTime(initializationTime);
LOG.info((("Initializing MDT partition " + partitionType.name()) + " at instant ") + commitTimeForPartition);
Pair<Integer, HoodieData<HoodieRecord>> fileGroupCountAndRecordsPair;
try {
switch (partitionType) {
case FILES :
fileGroupCountAndRecordsPair = initializeFilesPartition(partitionInfoList);break;
case BLOOM_FILTERS :
fileGroupCountAndRecordsPair = initializeBloomFiltersPartition(initializationTime, partitionToFilesMap);
break;
case COLUMN_STATS :
fileGroupCountAndRecordsPair = initializeColumnStatsPartition(partitionToFilesMap);
break;
case RECORD_INDEX :
fileGroupCountAndRecordsPair = initializeRecordIndexPartition();
break;
case FUNCTIONAL_INDEX :
fileGroupCountAndRecordsPair = initializeFunctionalIndexPartition();
break;
default :
throw new HoodieMetadataException("Unsupported MDT partition type: " + partitionType);
}
} catch (Exception
e) {
String metricKey = (partitionType.getPartitionPath() + "_") + HoodieMetadataMetrics.BOOTSTRAP_ERR_STR;
metrics.ifPresent(m -> m.setMetric(metricKey, 1));
LOG.error((("Bootstrap on " + partitionType.getPartitionPath()) + " partition failed for ") + metadataMetaClient.getBasePath(), e);
throw new HoodieMetadataException((partitionType.getPartitionPath() + " bootstrap failed for ") + metadataMetaClient.getBasePath(), e);
}
LOG.info(String.format("Initializing %s index with %d mappings and %d file groups.", partitionType.name(), fileGroupCountAndRecordsPair.getKey(), fileGroupCountAndRecordsPair.getValue().count()));
HoodieTimer partitionInitTimer = HoodieTimer.start();
// Generate the file groups
final int fileGroupCount = fileGroupCountAndRecordsPair.getKey();
ValidationUtils.checkArgument(fileGroupCount > 0, ("FileGroup count for MDT partition " + partitionType.name()) + " should be > 0");
initializeFileGroups(f1, partitionType, commitTimeForPartition, fileGroupCount);
// Perform the commit using bulkCommit
HoodieData<HoodieRecord> records = fileGroupCountAndRecordsPair.getValue();
bulkCommit(commitTimeForPartition, partitionType, records, fileGroupCount);
metadataMetaClient.reloadActiveTimeline();
f1.getTableConfig().setMetadataPartitionState(f1, partitionType, true);
// initialize the metadata reader again so the MDT partition can be read after initialization
initMetadataReader();
long totalInitTime = partitionInitTimer.endTimer();
LOG.info(String.format(("Initializing %s index in metadata table took " + totalInitTime) + " in ms", partitionType.name()));
}
return true;
}
| 3.26 |
hudi_HoodieBackedTableMetadataWriter_listAllPartitionsFromMDT_rdh
|
/**
* Function to find hoodie partitions and list files in them in parallel from MDT.
*
* @param initializationTime
* Files which have a timestamp after this are neglected
* @return List consisting of {@code DirectoryInfo} for each partition found.
*/
private List<DirectoryInfo> listAllPartitionsFromMDT(String initializationTime) throws IOException {
List<DirectoryInfo> dirinfoList = new LinkedList<>();
List<String> allPartitionPaths = f0.getAllPartitionPaths().stream().map(partitionPath -> (dataWriteConfig.getBasePath() + "/") + partitionPath).collect(Collectors.toList());
Map<String, FileStatus[]> partitionFileMap = f0.getAllFilesInPartitions(allPartitionPaths);
for (Map.Entry<String, FileStatus[]> entry : partitionFileMap.entrySet()) {
dirinfoList.add(new DirectoryInfo(entry.getKey(), entry.getValue(), initializationTime));
}
return dirinfoList;
}
| 3.26 |
hudi_HoodieBackedTableMetadataWriter_processAndCommit_rdh
|
/**
* Processes commit metadata from data table and commits to metadata table.
*
* @param instantTime
* instant time of interest.
* @param convertMetadataFunction
* converter function to convert the respective metadata to List of HoodieRecords to be written to metadata table.
*/
private void processAndCommit(String instantTime, ConvertMetadataFunction convertMetadataFunction) {
Set<String> partitionsToUpdate = m1();
if (initialized && (f0 != null)) {
// convert metadata and filter only the entries whose partition path are in partitionsToUpdate
Map<MetadataPartitionType, HoodieData<HoodieRecord>> partitionRecordsMap = convertMetadataFunction.convertMetadata().entrySet().stream().filter(entry -> partitionsToUpdate.contains(entry.getKey().getPartitionPath())).collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
commit(instantTime, partitionRecordsMap);
}
}
| 3.26 |
hudi_HoodieBackedTableMetadataWriter_generateUniqueCommitInstantTime_rdh
|
/**
* Returns a unique timestamp to use for initializing a MDT partition.
* <p>
* Since commits are immutable, we should use unique timestamps to initialize each partition. For this, we will add a suffix to the given initializationTime
* until we find a unique timestamp.
*
* @param initializationTime
* Timestamp from dataset to use for initialization
* @return a unique timestamp for MDT
*/
private String generateUniqueCommitInstantTime(String initializationTime) {
// if its initialized via Async indexer, we don't need to alter the init time
if (HoodieTableMetadataUtil.isIndexingCommit(initializationTime)) {
return initializationTime;
}
// Add suffix to initializationTime to find an unused instant time for the next index initialization.
// This function would be called multiple times in a single application if multiple indexes are being
// initialized one after the other.
for (int offset = 0; ; ++offset) {
final String commitInstantTime = HoodieTableMetadataUtil.createIndexInitTimestamp(initializationTime, offset);
if (!metadataMetaClient.getCommitsTimeline().containsInstant(commitInstantTime)) {
return commitInstantTime;
}
}
}
| 3.26 |
hudi_HoodieBackedTableMetadataWriter_m0_rdh
|
/**
* Initialize the metadata table if needed.
*
* @param dataMetaClient
* - meta client for the data table
* @param inflightInstantTimestamp
* - timestamp of an instant in progress on the dataset
* @throws IOException
* on errors
*/
protected boolean m0(HoodieTableMetaClient dataMetaClient, Option<String> inflightInstantTimestamp) throws IOException {
HoodieTimer timer = HoodieTimer.start();
List<MetadataPartitionType> partitionsToInit = new ArrayList<>(MetadataPartitionType.values().length);
try {
boolean exists = metadataTableExists(dataMetaClient);
if (!exists) {
// FILES partition is always required
partitionsToInit.add(FILES);
}
// check if any of the enabled partition types needs to be initialized
// NOTE: It needs to be guarded by async index config because if that is enabled then initialization happens through the index scheduler.
if (!dataWriteConfig.isMetadataAsyncIndex()) {Set<String> completedPartitions = dataMetaClient.getTableConfig().getMetadataPartitions();
LOG.info("Async metadata indexing disabled and following partitions already initialized: " + completedPartitions);
// TODO: fix the filter to check for exact partition name, e.g. completedPartitions could have func_index_datestr,
// but now the user is trying to initialize the func_index_dayhour partition.
this.enabledPartitionTypes.stream().filter(p -> (!completedPartitions.contains(p.getPartitionPath())) && (!FILES.equals(p))).forEach(partitionsToInit::add);
}
if (partitionsToInit.isEmpty())
{
// No partitions left to initialize, since all the metadata enabled partitions are either initialized before
// or current in the process of initialization.
initMetadataReader();
return true;
}
// If there is no commit on the dataset yet, use the SOLO_COMMIT_TIMESTAMP as the instant time for initial commit
// Otherwise, we use the timestamp of the latest completed action.
String initializationTime = dataMetaClient.getActiveTimeline().filterCompletedInstants().lastInstant().map(HoodieInstant::getTimestamp).orElse(SOLO_COMMIT_TIMESTAMP);
// Initialize partitions for the first time using data from the files on the file system
if (!initializeFromFilesystem(initializationTime, partitionsToInit, inflightInstantTimestamp)) {
LOG.error("Failed to initialize MDT from filesystem");return false;
}
metrics.ifPresent(m -> m.updateMetrics(HoodieMetadataMetrics.INITIALIZE_STR, timer.endTimer()));
return true;
} catch (IOException e) {
LOG.error("Failed to initialize metadata table. Disabling the writer.", e);
return false;
}
}
| 3.26 |
hudi_HoodieBackedTableMetadataWriter_getFileNameToSizeMap_rdh
|
// Returns a map of filenames mapped to their lengths
Map<String, Long> getFileNameToSizeMap() {
return filenameToSizeMap;
}
| 3.26 |
hudi_HoodieBackedTableMetadataWriter_getFunctionalIndexUpdates_rdh
|
/**
* Loads the file slices touched by the commit due to given instant time and returns the records for the functional index.
*
* @param commitMetadata
* {@code HoodieCommitMetadata}
* @param indexPartition
* partition name of the functional index
* @param instantTime
* timestamp at of the current update commit
*/
private HoodieData<HoodieRecord> getFunctionalIndexUpdates(HoodieCommitMetadata commitMetadata, String indexPartition, String instantTime) throws Exception {
HoodieFunctionalIndexDefinition indexDefinition = getFunctionalIndexDefinition(indexPartition);
List<Pair<String, FileSlice>> partitionFileSlicePairs = new ArrayList<>();
HoodieTableFileSystemView fsView = HoodieTableMetadataUtil.getFileSystemView(metadataMetaClient);
commitMetadata.getPartitionToWriteStats().forEach((dataPartition, value) -> {List<FileSlice> fileSlices = getPartitionLatestFileSlicesIncludingInflight(metadataMetaClient, Option.ofNullable(fsView), dataPartition);fileSlices.forEach(fileSlice -> {
// Filter log files for the instant time and add to this partition fileSlice pairs
List<HoodieLogFile> logFilesForInstant = fileSlice.getLogFiles().filter(logFile -> logFile.getDeltaCommitTime().equals(instantTime)).collect(Collectors.toList());
partitionFileSlicePairs.add(Pair.of(dataPartition, new FileSlice(fileSlice.getFileGroupId(), fileSlice.getBaseInstantTime(), fileSlice.getBaseFile().orElse(null), logFilesForInstant)));
});
});
int parallelism = Math.min(partitionFileSlicePairs.size(), dataWriteConfig.getMetadataConfig().getFunctionalIndexParallelism());
Schema v103 = getProjectedSchemaForFunctionalIndex(indexDefinition, f1);
return getFunctionalIndexRecords(partitionFileSlicePairs, indexDefinition,
f1, parallelism, v103, hadoopConf);
}
| 3.26 |
hudi_HoodieBackedTableMetadataWriter_deletePendingIndexingInstant_rdh
|
/**
* Deletes any pending indexing instant, if it exists.
* It reads the plan from indexing.requested file and deletes both requested and inflight instants,
* if the partition path in the plan matches with the given partition path.
*/
private static void deletePendingIndexingInstant(HoodieTableMetaClient metaClient, String partitionPath) {
metaClient.reloadActiveTimeline().filterPendingIndexTimeline().getInstantsAsStream().filter(instant -> REQUESTED.equals(instant.getState())).forEach(instant -> {
try {
HoodieIndexPlan indexPlan = deserializeIndexPlan(metaClient.getActiveTimeline().readIndexPlanAsBytes(instant).get());
if (indexPlan.getIndexPartitionInfos().stream().anyMatch(indexPartitionInfo -> indexPartitionInfo.getMetadataPartitionPath().equals(partitionPath))) {
metaClient.getActiveTimeline().deleteInstantFileIfExists(instant);
metaClient.getActiveTimeline().deleteInstantFileIfExists(getIndexInflightInstant(instant.getTimestamp()));
}
} catch (IOException e) {
LOG.error("Failed to delete the instant file corresponding to " + instant);
} });
}
| 3.26 |
hudi_HoodieBackedTableMetadataWriter_listAllPartitionsFromFilesystem_rdh
|
/**
* Function to find hoodie partitions and list files in them in parallel.
*
* @param initializationTime
* Files which have a timestamp after this are neglected
* @return List consisting of {@code DirectoryInfo} for each partition found.
*/
private List<DirectoryInfo> listAllPartitionsFromFilesystem(String initializationTime) {
List<SerializablePath> pathsToList = new LinkedList<>();
pathsToList.add(new SerializablePath(new CachingPath(dataWriteConfig.getBasePath())));List<DirectoryInfo> v54 = new LinkedList<>();
final int fileListingParallelism = metadataWriteConfig.getFileListingParallelism();
SerializableConfiguration conf = new SerializableConfiguration(f1.getHadoopConf());
final String dirFilterRegex = dataWriteConfig.getMetadataConfig().getDirectoryFilterRegex();
final String datasetBasePath = f1.getBasePath();
SerializablePath serializableBasePath = new SerializablePath(new CachingPath(datasetBasePath));
while (!pathsToList.isEmpty()) {
// In each round we will list a section of directories
int numDirsToList = Math.min(fileListingParallelism, pathsToList.size());
// List all directories in parallel
engineContext.setJobStatus(this.getClass().getSimpleName(), ("Listing " + numDirsToList) + " partitions from filesystem");
List<DirectoryInfo> processedDirectories = engineContext.map(pathsToList.subList(0, numDirsToList), path -> {
FileSystem fs = path.get().getFileSystem(conf.get());
String relativeDirPath = FSUtils.getRelativePartitionPath(serializableBasePath.get(), path.get());
return new DirectoryInfo(relativeDirPath, fs.listStatus(path.get()), initializationTime);
}, numDirsToList);
pathsToList = new LinkedList<>(pathsToList.subList(numDirsToList, pathsToList.size()));
// If the listing reveals a directory, add it to queue. If the listing reveals a hoodie partition, add it to
// the results.
for (DirectoryInfo dirInfo : processedDirectories) {
if (!dirFilterRegex.isEmpty()) {
final String relativePath = dirInfo.getRelativePath();
if ((!relativePath.isEmpty()) && relativePath.matches(dirFilterRegex)) {
LOG.info((("Ignoring directory " + relativePath) + " which matches the filter regex ") + dirFilterRegex); continue;
}
}
if (dirInfo.isHoodiePartition()) {
// Add to result
v54.add(dirInfo);} else {
// Add sub-dirs to the queue
pathsToList.addAll(dirInfo.getSubDirectories().stream().map(path -> new SerializablePath(new CachingPath(path.toUri()))).collect(Collectors.toList()));
}
}
}
return v54;
}
| 3.26 |
hudi_HoodieBackedTableMetadataWriter_getRecordIndexUpdates_rdh
|
/**
* Return records that represent update to the record index due to write operation on the dataset.
*
* @param writeStatuses
* {@code WriteStatus} from the write operation
*/
private HoodieData<HoodieRecord> getRecordIndexUpdates(HoodieData<WriteStatus> writeStatuses) {
HoodiePairData<String, HoodieRecordDelegate> recordKeyDelegatePairs = null;
// if update partition path is true, chances that we might get two records (1 delete in older partition and 1 insert to new partition)
// and hence we might have to do reduce By key before ingesting to RLI partition.
if (dataWriteConfig.getRecordIndexUpdatePartitionPath()) {
recordKeyDelegatePairs = writeStatuses.map(writeStatus -> writeStatus.getWrittenRecordDelegates().stream().map(recordDelegate -> Pair.of(recordDelegate.getRecordKey(), recordDelegate))).flatMapToPair(Stream::iterator).reduceByKey((recordDelegate1, recordDelegate2) -> {
if (recordDelegate1.getRecordKey().equals(recordDelegate2.getRecordKey())) {
if ((!recordDelegate1.getNewLocation().isPresent()) && (!recordDelegate2.getNewLocation().isPresent())) {
throw new HoodieIOException((("Both version of records do not have location set. Record V1 " + recordDelegate1.toString()) + ", Record V2 ") + recordDelegate2.toString());
}
if (recordDelegate1.getNewLocation().isPresent()) {
return recordDelegate1;
} else {
// if record delegate 1 does not have location set, record delegate 2 should have location set.
return recordDelegate2;
}
} else {
return recordDelegate1;
}
}, Math.max(1, writeStatuses.getNumPartitions()));
} else {
// if update partition path = false, we should get only one entry per record key.
recordKeyDelegatePairs = writeStatuses.flatMapToPair(((SerializableFunction<WriteStatus, Iterator<? extends Pair<String, HoodieRecordDelegate>>>) (writeStatus -> writeStatus.getWrittenRecordDelegates().stream().map(rec -> Pair.of(rec.getRecordKey(), rec)).iterator()))); }
return recordKeyDelegatePairs.map(writeStatusRecordDelegate -> {
HoodieRecordDelegate recordDelegate = writeStatusRecordDelegate.getValue();
HoodieRecord hoodieRecord = null;
Option<HoodieRecordLocation> newLocation = recordDelegate.getNewLocation();
if (newLocation.isPresent()) {
if (recordDelegate.getCurrentLocation().isPresent()) {
// This is an update, no need to update index if the location has not changed
// newLocation should have the same fileID as currentLocation. The instantTimes differ as newLocation's
// instantTime refers to the current commit which was completed.
if (!recordDelegate.getCurrentLocation().get().getFileId().equals(newLocation.get().getFileId())) {
final String msg = String.format("Detected update in location of record with key %s from %s " + " to %s. The fileID should not change.", recordDelegate, recordDelegate.getCurrentLocation().get(), newLocation.get());
LOG.error(msg);
throw new HoodieMetadataException(msg);
}
// for updates, we can skip updating RLI partition in MDT
} else {
hoodieRecord = HoodieMetadataPayload.createRecordIndexUpdate(recordDelegate.getRecordKey(), recordDelegate.getPartitionPath(), newLocation.get().getFileId(), newLocation.get().getInstantTime(), dataWriteConfig.getWritesFileIdEncoding());
}
} else {
// Delete existing index for a deleted record
hoodieRecord = HoodieMetadataPayload.createRecordIndexDelete(recordDelegate.getRecordKey());
}return hoodieRecord;
}).filter(Objects::nonNull);
}
| 3.26 |
hudi_HoodieBackedTableMetadataWriter_performTableServices_rdh
|
/**
* Optimize the metadata table by running compaction, clean and archive as required.
* <p>
* Don't perform optimization if there are inflight operations on the dataset. This is for two reasons:
* - The compaction will contain the correct data as all failed operations have been rolled back.
* - Clean/compaction etc. will have the highest timestamp on the MDT and we won't be adding new operations
* with smaller timestamps to metadata table (makes for easier debugging)
* <p>
* This adds the limitations that long-running async operations (clustering, etc.) may cause delay in such MDT
* optimizations. We will relax this after MDT code has been hardened.
*/
@Overridepublic void performTableServices(Option<String> inFlightInstantTimestamp) {
HoodieTimer metadataTableServicesTimer = HoodieTimer.start();
boolean allTableServicesExecutedSuccessfullyOrSkipped = true;
BaseHoodieWriteClient<?,
I, ?, ?> writeClient = getWriteClient();
try {
// Run any pending table services operations.
runPendingTableServicesOperations(writeClient);
Option<HoodieInstant> lastInstant = metadataMetaClient.reloadActiveTimeline().getDeltaCommitTimeline().filterCompletedInstants().lastInstant();
if (!lastInstant.isPresent()) {
return;
}
// Check and run clean operations.
String latestDeltacommitTime =
lastInstant.get().getTimestamp();
LOG.info(("Latest deltacommit time found is " + latestDeltacommitTime) + ", running clean operations.");
cleanIfNecessary(writeClient, latestDeltacommitTime);
// Do timeline validation before scheduling compaction/logCompaction operations.
if (m3(inFlightInstantTimestamp, latestDeltacommitTime)) {
compactIfNecessary(writeClient, latestDeltacommitTime);
}
writeClient.archive();
LOG.info("All the table services operations on MDT completed successfully");
} catch (Exception e) {
LOG.error("Exception in running table services on metadata table", e);
allTableServicesExecutedSuccessfullyOrSkipped = false;
throw e;
} finally {
long timeSpent = metadataTableServicesTimer.endTimer();
metrics.ifPresent(m -> m.updateMetrics(HoodieMetadataMetrics.TABLE_SERVICE_EXECUTION_DURATION, timeSpent));
if (allTableServicesExecutedSuccessfullyOrSkipped) {
metrics.ifPresent(m -> m.incrementMetric(HoodieMetadataMetrics.TABLE_SERVICE_EXECUTION_STATUS, 1));
} else {
metrics.ifPresent(m -> m.incrementMetric(HoodieMetadataMetrics.TABLE_SERVICE_EXECUTION_STATUS, -1));
}
}
}
| 3.26 |
hudi_HoodieBackedTableMetadataWriter_initializeFileGroups_rdh
|
/**
* Initialize file groups for a partition. For file listing, we just have one file group.
* <p>
* All FileGroups for a given metadata partition has a fixed prefix as per the {@link MetadataPartitionType#getFileIdPrefix()}.
* Each file group is suffixed with 4 digits with increments of 1 starting with 0000.
* <p>
* Let's say we configure 10 file groups for record level index partition, and prefix as "record-index-bucket-"
* File groups will be named as :
* record-index-bucket-0000, .... -> ..., record-index-bucket-0009
*/
private void initializeFileGroups(HoodieTableMetaClient dataMetaClient, MetadataPartitionType metadataPartition, String instantTime, int fileGroupCount) throws IOException {
// Remove all existing file groups or leftover files in the partition
final Path partitionPath = new Path(metadataWriteConfig.getBasePath(), metadataPartition.getPartitionPath());
FileSystem fs = metadataMetaClient.getFs();
try {
final FileStatus[] existingFiles = fs.listStatus(partitionPath);
if (existingFiles.length > 0) {
LOG.warn("Deleting all existing files found in MDT partition " + metadataPartition.getPartitionPath());
fs.delete(partitionPath, true);
ValidationUtils.checkState(!fs.exists(partitionPath), "Failed to delete MDT partition " + metadataPartition);
}
} catch (FileNotFoundException ignored) {
// If the partition did not exist yet, it will be created below
}
// Archival of data table has a dependency on compaction(base files) in metadata table.
// It is assumed that as of time Tx of base instant (/compaction time) in metadata table,
// all commits in data table is in sync with metadata table. So, we always start with log file for any fileGroup.
// Even though the initial commit is a bulkInsert which creates the first baseFiles directly, we still
// create a log file first. This ensures that if any fileGroups of the MDT index do not receive any records
// during initial commit, then the fileGroup would still be recognized (as a FileSlice with no baseFiles but a
// valid logFile). Since these log files being created have no content, it is safe to add them here before
// the bulkInsert.
final String msg = String.format("Creating %d file groups for partition %s with base fileId %s at instant time %s", fileGroupCount,
metadataPartition.getPartitionPath(),
metadataPartition.getFileIdPrefix(), instantTime);
LOG.info(msg);
final List<String> fileGroupFileIds = IntStream.range(0, fileGroupCount).mapToObj(i -> HoodieTableMetadataUtil.getFileIDForFileGroup(metadataPartition, i)).collect(Collectors.toList());
ValidationUtils.checkArgument(fileGroupFileIds.size() == fileGroupCount);
engineContext.setJobStatus(this.getClass().getSimpleName(), msg);
engineContext.foreach(fileGroupFileIds, fileGroupFileId -> {
try {
final Map<HeaderMetadataType, String> blockHeader = Collections.singletonMap(HeaderMetadataType.INSTANT_TIME, instantTime);
final HoodieDeleteBlock block = new HoodieDeleteBlock(Collections.emptyList(), false,
blockHeader);
HoodieLogFormat.Writer writer = HoodieLogFormat.newWriterBuilder().onParentPath(FSUtils.getPartitionPath(metadataWriteConfig.getBasePath(), metadataPartition.getPartitionPath())).withFileId(fileGroupFileId).withDeltaCommit(instantTime).withLogVersion(HoodieLogFile.LOGFILE_BASE_VERSION).withFileSize(0L).withSizeThreshold(metadataWriteConfig.getLogFileMaxSize()).withFs(dataMetaClient.getFs()).withRolloverLogWriteToken(HoodieLogFormat.DEFAULT_WRITE_TOKEN).withLogWriteToken(HoodieLogFormat.DEFAULT_WRITE_TOKEN).withFileExtension(HoodieLogFile.DELTA_EXTENSION).build();
writer.appendBlock(block);
writer.close();
} catch (InterruptedException e) {
throw new <e>HoodieException((("Failed to created fileGroup " + fileGroupFileId) + " for partition ") + metadataPartition.getPartitionPath());
}
}, fileGroupFileIds.size());
}
| 3.26 |
hudi_CleanerUtils_rollbackFailedWrites_rdh
|
/**
* Execute {@link HoodieFailedWritesCleaningPolicy} to rollback failed writes for different actions.
*
* @param cleaningPolicy
* @param actionType
* @param rollbackFailedWritesFunc
*/
public static void rollbackFailedWrites(HoodieFailedWritesCleaningPolicy cleaningPolicy, String actionType, Functions.Function0<Boolean> rollbackFailedWritesFunc) {
switch (actionType) {
case HoodieTimeline.CLEAN_ACTION :
if (cleaningPolicy.isEager()) {
// No need to do any special cleanup for failed operations during clean
return;
} else if (cleaningPolicy.isLazy()) {
LOG.info("Cleaned failed attempts if any");
// Perform rollback of failed operations for all types of actions during clean
rollbackFailedWritesFunc.apply();
return;
}
// No action needed for cleaning policy NEVER
break;
case COMMIT_ACTION :
// For any other actions, perform rollback of failed writes
if (cleaningPolicy.isEager()) {
LOG.info("Cleaned failed attempts if any");
rollbackFailedWritesFunc.apply();
return;
}
break;default :
throw new IllegalArgumentException("Unsupported action type " + actionType);
}
}
| 3.26 |
hudi_CleanerUtils_getCleanerMetadata_rdh
|
/**
* Get Latest Version of Hoodie Cleaner Metadata - Output of cleaner operation.
*
* @return Latest version of Clean metadata corresponding to clean instant
* @throws IOException
*/
public static HoodieCleanMetadata getCleanerMetadata(HoodieTableMetaClient
metaClient, byte[] details) throws IOException {
CleanMetadataMigrator metadataMigrator = new CleanMetadataMigrator(metaClient);
HoodieCleanMetadata cleanMetadata = TimelineMetadataUtils.deserializeHoodieCleanMetadata(details);
return metadataMigrator.upgradeToLatest(cleanMetadata, cleanMetadata.getVersion());
}
| 3.26 |
hudi_CleanerUtils_convertToHoodieCleanFileInfoList_rdh
|
/**
* Convert list of cleanFileInfo instances to list of avro-generated HoodieCleanFileInfo instances.
*
* @param cleanFileInfoList
* @return */
public static List<HoodieCleanFileInfo> convertToHoodieCleanFileInfoList(List<CleanFileInfo> cleanFileInfoList) {
return cleanFileInfoList.stream().map(CleanFileInfo::toHoodieFileCleanInfo).collect(Collectors.toList());
}
| 3.26 |
hudi_CleanerUtils_getCleanerPlan_rdh
|
/**
* Get Latest version of cleaner plan corresponding to a clean instant.
*
* @param metaClient
* Hoodie Table Meta Client
* @return Cleaner plan corresponding to clean instant
* @throws IOException
*/
public static HoodieCleanerPlan getCleanerPlan(HoodieTableMetaClient metaClient, byte[] details) throws IOException {
CleanPlanMigrator cleanPlanMigrator = new CleanPlanMigrator(metaClient);HoodieCleanerPlan
cleanerPlan = TimelineMetadataUtils.deserializeAvroMetadata(details, HoodieCleanerPlan.class);
return cleanPlanMigrator.upgradeToLatest(cleanerPlan, cleanerPlan.getVersion());
}
| 3.26 |
hudi_ActiveAction_getPendingAction_rdh
|
/**
* A COMPACTION action eventually becomes COMMIT when completed.
*/
public String getPendingAction() {
return getPendingInstant().getAction();
}
| 3.26 |
hudi_HoodieCreateHandle_close_rdh
|
/**
* Performs actions to durably, persist the current changes and returns a WriteStatus object.
*/
@Override
public List<WriteStatus> close() {
LOG.info((("Closing the file " + writeStatus.getFileId()) + " as we are done with all the records ") + recordsWritten);
try {
if (isClosed()) {
// Handle has already been closed
return Collections.emptyList();
}
markClosed();
if (fileWriter != null) {
fileWriter.close();
fileWriter = null;
}
setupWriteStatus();
LOG.info(String.format("CreateHandle for partitionPath %s fileID %s, took %d ms.", writeStatus.getStat().getPartitionPath(), writeStatus.getStat().getFileId(), writeStatus.getStat().getRuntimeStats().getTotalCreateTime()));
return Collections.singletonList(writeStatus);
} catch (IOException e) {
throw new HoodieInsertException("Failed to close the Insert Handle for path " + path, e);
}
}
| 3.26 |
hudi_HoodieCreateHandle_setupWriteStatus_rdh
|
/**
* Set up the write status.
*
* @throws IOException
* if error occurs
*/
protected void setupWriteStatus() throws IOException {
HoodieWriteStat stat = writeStatus.getStat();
stat.setPartitionPath(writeStatus.getPartitionPath());
stat.setNumWrites(recordsWritten);
stat.setNumDeletes(recordsDeleted);
stat.setNumInserts(insertRecordsWritten);
stat.setPrevCommit(HoodieWriteStat.NULL_COMMIT);
stat.setFileId(writeStatus.getFileId()); stat.setPath(new Path(config.getBasePath()), path);
stat.setTotalWriteErrors(writeStatus.getTotalErrorRecords());
long fileSize = FSUtils.getFileSize(fs, path);
stat.setTotalWriteBytes(fileSize);
stat.setFileSizeInBytes(fileSize);
RuntimeStats runtimeStats = new RuntimeStats();
runtimeStats.setTotalCreateTime(timer.endTimer());
stat.setRuntimeStats(runtimeStats);
}
| 3.26 |
hudi_HoodieCreateHandle_doWrite_rdh
|
/**
* Perform the actual writing of the given record into the backing file.
*/
@Override
protected void doWrite(HoodieRecord record, Schema schema, TypedProperties props) {
Option<Map<String, String>> recordMetadata = record.getMetadata();
try {
if ((!HoodieOperation.isDelete(record.getOperation())) && (!record.isDelete(schema, config.getProps()))) {
if (record.shouldIgnore(schema, config.getProps())) { return;
}
MetadataValues metadataValues = new MetadataValues().setFileName(path.getName());
HoodieRecord populatedRecord = record.prependMetaFields(schema, writeSchemaWithMetaFields, metadataValues, config.getProps());
if (preserveMetadata) {
fileWriter.write(record.getRecordKey(), populatedRecord, writeSchemaWithMetaFields);
} else {
fileWriter.writeWithMetadata(record.getKey(),
populatedRecord, writeSchemaWithMetaFields);
}
// Update the new location of record, so we know where to find it next
record.unseal();
record.setNewLocation(newRecordLocation);
record.seal();
recordsWritten++;
insertRecordsWritten++;
} else {
recordsDeleted++;
}
writeStatus.markSuccess(record, recordMetadata);
// deflate record payload after recording success. This will help users access payload as a
// part of marking
// record successful.
record.deflate();
} catch (Throwable t) {
// Not throwing exception from here, since we don't want to fail the entire job
// for a single record
writeStatus.markFailure(record, t, recordMetadata);
LOG.error("Error writing record " + record, t);
}
}
| 3.26 |
hudi_HoodieCreateHandle_write_rdh
|
/**
* Writes all records passed.
*/
public void write() {
Iterator<String> keyIterator;
if (hoodieTable.requireSortedRecords()) {
// Sorting the keys limits the amount of extra memory required for writing sorted records
keyIterator = recordMap.keySet().stream().sorted().iterator();
} else {
keyIterator = recordMap.keySet().stream().iterator();
}
while (keyIterator.hasNext()) {
final String key = keyIterator.next();
HoodieRecord<T> record = recordMap.get(key);
write(record, useWriterSchema ? writeSchemaWithMetaFields : writeSchema, config.getProps());
}
}
| 3.26 |
hudi_DeletePartitionUtils_m0_rdh
|
/**
* Check if there are any pending table service actions (requested + inflight) on a table affecting the partitions to
* be dropped.
* <p>
* This check is to prevent a drop-partition from proceeding should a partition have a table service action in
* the pending stage. If this is allowed to happen, the filegroup that is an input for a table service action, might
* also be a candidate for being replaced. As such, when the table service action and drop-partition commits are
* committed, there will be two commits replacing a single filegroup.
* <p>
* For example, a timeline might have an execution order as such:
* 000.replacecommit.requested (clustering filegroup_1 + filegroup_2 -> filegroup_3)
* 001.replacecommit.requested, 001.replacecommit.inflight, 0001.replacecommit (drop_partition to replace filegroup_1)
* 000.replacecommit.inflight (clustering is executed now)
* 000.replacecommit (clustering completed)
* For an execution order as shown above, 000.replacecommit and 001.replacecommit will both flag filegroup_1 to be replaced.
* This will cause downstream duplicate key errors when a map is being constructed.
*
* @param table
* Table to perform validation on
* @param partitionsToDrop
* List of partitions to drop
*/
public static void m0(HoodieTable table, List<String> partitionsToDrop) {
List<String> instantsOfOffendingPendingTableServiceAction = new ArrayList<>();
// ensure that there are no pending inflight clustering/compaction operations involving this partition
SyncableFileSystemView fileSystemView = ((SyncableFileSystemView) (table.getSliceView()));
// separating the iteration of pending compaction operations from clustering as they return different stream types
Stream.concat(fileSystemView.getPendingCompactionOperations(), fileSystemView.getPendingLogCompactionOperations()).filter(op -> partitionsToDrop.contains(op.getRight().getPartitionPath())).forEach(op -> instantsOfOffendingPendingTableServiceAction.add(op.getLeft()));
fileSystemView.getFileGroupsInPendingClustering().filter(fgIdInstantPair -> partitionsToDrop.contains(fgIdInstantPair.getLeft().getPartitionPath())).forEach(x -> instantsOfOffendingPendingTableServiceAction.add(x.getRight().getTimestamp()));
if (instantsOfOffendingPendingTableServiceAction.size() > 0) {
throw new HoodieDeletePartitionException((((("Failed to drop partitions. " + "Please ensure that there are no pending table service actions (clustering/compaction) for the partitions to be deleted: ") + partitionsToDrop) + ". ") + "Instant(s) of offending pending table service action: ") + instantsOfOffendingPendingTableServiceAction.stream().distinct().collect(Collectors.toList()));
}
}
| 3.26 |
hudi_ViewStorageProperties_createProperties_rdh
|
/**
* Initialize the {@link #FILE_NAME} meta file.
*/
public static void createProperties(String basePath, FileSystemViewStorageConfig config, Configuration flinkConf) throws IOException {
Path propertyPath = getPropertiesFilePath(basePath, flinkConf.getString(FlinkOptions.WRITE_CLIENT_ID));
FileSystem fs = FSUtils.getFs(basePath, HadoopConfigurations.getHadoopConf(flinkConf));fs.delete(propertyPath, false);
try (FSDataOutputStream outputStream = fs.create(propertyPath)) {
config.getProps().store(outputStream, "Filesystem view storage properties saved on " + new Date(System.currentTimeMillis()));}
}
| 3.26 |
hudi_ViewStorageProperties_loadFromProperties_rdh
|
/**
* Read the {@link FileSystemViewStorageConfig} with given table base path.
*/
public static FileSystemViewStorageConfig loadFromProperties(String basePath, Configuration conf) {
Path propertyPath = getPropertiesFilePath(basePath, conf.getString(FlinkOptions.WRITE_CLIENT_ID));
LOG.info("Loading filesystem view storage properties from " + propertyPath);
FileSystem fs = FSUtils.getFs(basePath, HadoopConfigurations.getHadoopConf(conf));
Properties props = new Properties();
try {
try (FSDataInputStream v6 = fs.open(propertyPath)) {
props.load(v6);
}
return FileSystemViewStorageConfig.newBuilder().fromProperties(props).build();
} catch (IOException e) {
throw new HoodieIOException("Could not load filesystem view storage properties from " + propertyPath, e);
}
}
| 3.26 |
hudi_ExecutionStrategyUtil_transform_rdh
|
/**
* Transform IndexedRecord into HoodieRecord.
*
* @param indexedRecord
* indexedRecord.
* @param writeConfig
* writeConfig.
* @return hoodieRecord.
* @param <T>
*/
public static <T> HoodieRecord<T> transform(IndexedRecord indexedRecord, HoodieWriteConfig writeConfig) {
GenericRecord record = ((GenericRecord) (indexedRecord));
Option<BaseKeyGenerator> keyGeneratorOpt = Option.empty();
if (!writeConfig.populateMetaFields()) {try {
TypedProperties
typedProperties = new TypedProperties(writeConfig.getProps());
keyGeneratorOpt = Option.of(((BaseKeyGenerator) (HoodieSparkKeyGeneratorFactory.createKeyGenerator(typedProperties))));
} catch (IOException e) {
throw new HoodieIOException("Only BaseKeyGenerators are supported when meta columns are disabled ", e);}
}
String key = KeyGenUtils.getRecordKeyFromGenericRecord(record, keyGeneratorOpt);
String partition = KeyGenUtils.getPartitionPathFromGenericRecord(record, keyGeneratorOpt);
HoodieKey hoodieKey = new HoodieKey(key, partition);
HoodieRecordPayload avroPayload = new RewriteAvroPayload(record);
HoodieRecord hoodieRecord = new HoodieAvroRecord(hoodieKey, avroPayload);
return hoodieRecord;
}
| 3.26 |
hudi_CommitUtils_getCommitActionType_rdh
|
/**
* Gets the commit action type for given table type.
* Note: Use this API only when the commit action type is not dependent on the write operation type.
* See {@link CommitUtils#getCommitActionType(WriteOperationType, HoodieTableType)} for more details.
*/
public static String getCommitActionType(HoodieTableType tableType) {
switch (tableType) {
case COPY_ON_WRITE :
return HoodieActiveTimeline.COMMIT_ACTION;
case MERGE_ON_READ :
return HoodieActiveTimeline.DELTA_COMMIT_ACTION;
default :
throw new HoodieException("Could not commit on unknown table type " + tableType);
}
}
| 3.26 |
hudi_CommitUtils_getValidCheckpointForCurrentWriter_rdh
|
/**
* Process previous commits metadata in the timeline to determine the checkpoint given a checkpoint key.
* NOTE: This is very similar in intent to DeltaSync#getLatestCommitMetadataWithValidCheckpointInfo except that
* different deployment models (deltastreamer or spark structured streaming) could have different checkpoint keys.
*
* @param timeline
* completed commits in active timeline.
* @param checkpointKey
* the checkpoint key in the extra metadata of the commit.
* @param keyToLookup
* key of interest for which checkpoint is looked up for.
* @return An optional commit metadata with latest checkpoint.
*/
public static Option<String> getValidCheckpointForCurrentWriter(HoodieTimeline timeline, String checkpointKey, String keyToLookup) {
return ((Option<String>) (timeline.getWriteTimeline().filterCompletedInstants().getReverseOrderedInstants().map(instant -> {
try {
HoodieCommitMetadata commitMetadata = HoodieCommitMetadata.fromBytes(timeline.getInstantDetails(instant).get(), HoodieCommitMetadata.class);
// process commits only with checkpoint entries
String v12 = commitMetadata.getMetadata(checkpointKey);
if (StringUtils.nonEmpty(v12)) {// return if checkpoint for "keyForLookup" exists.
return readCheckpointValue(v12, keyToLookup);
} else {
return Option.empty();
}} catch (IOException e) {
throw new <e>HoodieIOException("Failed to parse HoodieCommitMetadata for " + instant.toString());
}
}).filter(Option::isPresent).findFirst().orElse(Option.empty())));
}
| 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.