name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hudi_SparkPreCommitValidator_m0_rdh
|
/**
* Publish pre-commit validator run stats for a given commit action.
*/ private void m0(String instantTime, long duration) {
// Record validator duration metrics.
if (getWriteConfig().isMetricsOn()) {
HoodieTableMetaClient metaClient = getHoodieTable().getMetaClient();
Option<HoodieInstant> currentInstant = metaClient.getActiveTimeline().findInstantsAfterOrEquals(instantTime, 1).firstInstant();
metrics.reportMetrics(currentInstant.get().getAction(), getClass().getSimpleName(), duration);
}
}
| 3.26 |
hudi_SparkPreCommitValidator_validate_rdh
|
/**
* Verify the data written as part of specified instant.
* Throw HoodieValidationException if any unexpected data is written (Example: data files are not readable for some reason).
*/
public void validate(String instantTime, HoodieWriteMetadata<O> writeResult, Dataset<Row> before, Dataset<Row> after) throws HoodieValidationException {
HoodieTimer timer = HoodieTimer.start();
try {
validateRecordsBeforeAndAfter(before, after, getPartitionsModified(writeResult));} finally {
long duration = timer.endTimer();
LOG.info(((((getClass() + " validator took ") + duration) + " ms") + ", metrics on? ") + getWriteConfig().isMetricsOn());
m0(instantTime, duration);
}
}
| 3.26 |
hudi_UpsertPartitioner_getPartitionPathToPendingClusteringFileGroupsId_rdh
|
/**
* Get the in pending clustering fileId for each partition path.
*
* @return partition path to pending clustering file groups id
*/
private Map<String, Set<String>> getPartitionPathToPendingClusteringFileGroupsId() {
Map<String, Set<String>> partitionPathToInPendingClusteringFileId = table.getFileSystemView().getFileGroupsInPendingClustering().map(fileGroupIdAndInstantPair -> Pair.of(fileGroupIdAndInstantPair.getKey().getPartitionPath(), fileGroupIdAndInstantPair.getKey().getFileId())).collect(Collectors.groupingBy(Pair::getKey, Collectors.mapping(Pair::getValue, Collectors.toSet())));
return partitionPathToInPendingClusteringFileId;
}
| 3.26 |
hudi_UpsertPartitioner_getSmallFiles_rdh
|
/**
* Returns a list of small files in the given partition path.
*/
protected List<SmallFile> getSmallFiles(String partitionPath) {
// smallFiles only for partitionPath
List<SmallFile> smallFileLocations = new ArrayList<>();
HoodieTimeline commitTimeline = table.getMetaClient().getCommitsTimeline().filterCompletedInstants();
if (!commitTimeline.empty())
{
// if we have some commits
HoodieInstant latestCommitTime = commitTimeline.lastInstant().get();List<HoodieBaseFile> allFiles = table.getBaseFileOnlyView().getLatestBaseFilesBeforeOrOn(partitionPath, latestCommitTime.getTimestamp()).collect(Collectors.toList());
for (HoodieBaseFile file : allFiles) {
if (file.getFileSize() < config.getParquetSmallFileLimit()) {
SmallFile sf = new SmallFile();
sf.location = new HoodieRecordLocation(file.getCommitTime(), file.getFileId());
sf.sizeBytes = file.getFileSize();
smallFileLocations.add(sf);
}
}
}
return smallFileLocations;
}
| 3.26 |
hudi_UpsertPartitioner_filterSmallFilesInClustering_rdh
|
/**
* Exclude small file handling for clustering since update path is not supported.
*
* @param pendingClusteringFileGroupsId
* pending clustering file groups id of partition
* @param smallFiles
* small files of partition
* @return smallFiles not in clustering
*/
private List<SmallFile> filterSmallFilesInClustering(final Set<String> pendingClusteringFileGroupsId, final List<SmallFile> smallFiles) {
if (!pendingClusteringFileGroupsId.isEmpty()) {
return smallFiles.stream().filter(smallFile -> !pendingClusteringFileGroupsId.contains(smallFile.location.getFileId())).collect(Collectors.toList());
} else {
return smallFiles;
}
}
| 3.26 |
hudi_UpsertPartitioner_averageBytesPerRecord_rdh
|
/**
* Obtains the average record size based on records written during previous commits. Used for estimating how many
* records pack into one file.
*/
protected static long averageBytesPerRecord(HoodieTimeline commitTimeline, HoodieWriteConfig hoodieWriteConfig) {
long avgSize = hoodieWriteConfig.getCopyOnWriteRecordSizeEstimate();
long fileSizeThreshold = ((long) (hoodieWriteConfig.getRecordSizeEstimationThreshold() * hoodieWriteConfig.getParquetSmallFileLimit()));
try {
if (!commitTimeline.empty()) {
// Go over the reverse ordered commits to get a more recent estimate of average record size.
Iterator<HoodieInstant> instants = commitTimeline.getReverseOrderedInstants().iterator();
while (instants.hasNext()) {
HoodieInstant instant = instants.next();
HoodieCommitMetadata commitMetadata = HoodieCommitMetadata.fromBytes(commitTimeline.getInstantDetails(instant).get(), HoodieCommitMetadata.class);
long totalBytesWritten = commitMetadata.fetchTotalBytesWritten();
long totalRecordsWritten = commitMetadata.fetchTotalRecordsWritten();
if ((totalBytesWritten > fileSizeThreshold) && (totalRecordsWritten > 0)) {
avgSize = ((long) (Math.ceil((1.0 * totalBytesWritten) / totalRecordsWritten)));
break;
}
}
}
} catch (Throwable t) {
// make this fail safe.
LOG.error("Error trying to compute average bytes/record ", t);
}
return avgSize;}
| 3.26 |
hudi_BaseClusterer_updateWriteClient_rdh
|
/**
* Update the write client used by async clustering.
*
* @param writeClient
*/
public void updateWriteClient(BaseHoodieWriteClient<T, I, K, O> writeClient) {this.clusteringClient = writeClient;
}
| 3.26 |
hudi_HoodieBigQuerySyncClient_updateTableSchema_rdh
|
/**
* Updates the schema for the given table if the schema has changed. The schema passed in will not have the partition columns defined,
* so we add them back to the schema with the values read from the existing BigQuery table. This allows us to keep the partition
* field type in sync with how it is registered in BigQuery.
*
* @param tableName
* name of the table in BigQuery
* @param schema
* latest schema for the table
*/
public void updateTableSchema(String tableName, Schema schema, List<String> partitionFields) {Table existingTable = bigquery.getTable(TableId.of(projectId, datasetName, tableName));
ExternalTableDefinition definition = existingTable.getDefinition();
Schema remoteTableSchema = definition.getSchema();
// Add the partition fields into the schema to avoid conflicts while updating
List<Field> updatedTableFields = remoteTableSchema.getFields().stream().filter(field -> partitionFields.contains(field.getName())).collect(Collectors.toList());
updatedTableFields.addAll(schema.getFields());Schema finalSchema =
Schema.of(updatedTableFields);
boolean sameSchema = (definition.getSchema() != null) && definition.getSchema().equals(finalSchema);
boolean samePartitionFilter = partitionFields.isEmpty() || (requirePartitionFilter == ((definition.getHivePartitioningOptions().getRequirePartitionFilter() != null) && definition.getHivePartitioningOptions().getRequirePartitionFilter()));
if (sameSchema && samePartitionFilter) {
return;// No need to update schema.
}
ExternalTableDefinition.Builder builder = definition.toBuilder();
builder.setSchema(finalSchema);
builder.setAutodetect(false);
if (definition.getHivePartitioningOptions() != null) {
builder.setHivePartitioningOptions(definition.getHivePartitioningOptions().toBuilder().setRequirePartitionFilter(requirePartitionFilter).build());
}
Table updatedTable = existingTable.toBuilder().setDefinition(builder.build()).build();
bigquery.update(updatedTable);
}
| 3.26 |
hudi_HoodieBigQuerySyncClient_tableNotExistsOrDoesNotMatchSpecification_rdh
|
/**
* Checks for the existence of a table that uses the manifest file approach and matches other requirements.
*
* @param tableName
* name of the table
* @return Returns true if the table does not exist or if the table does exist but does not use the manifest file. False otherwise.
*/
public boolean tableNotExistsOrDoesNotMatchSpecification(String tableName) {
TableId tableId = TableId.of(projectId, datasetName, tableName);
Table table = bigquery.getTable(tableId);
if ((table == null) || (!table.exists()))
{
return true;
}
ExternalTableDefinition externalTableDefinition = table.getDefinition();
boolean manifestDoesNotExist = (externalTableDefinition.getSourceUris() ==
null) || externalTableDefinition.getSourceUris().stream().noneMatch(uri -> uri.contains(ManifestFileWriter.ABSOLUTE_PATH_MANIFEST_FOLDER_NAME));
if (!StringUtils.isNullOrEmpty(config.getString(BIGQUERY_SYNC_BIG_LAKE_CONNECTION_ID))) {
// If bigLakeConnectionId is present and connectionId is not present in table definition, we need to replace the table.
return manifestDoesNotExist || (externalTableDefinition.getConnectionId() == null);
}
return manifestDoesNotExist;
}
| 3.26 |
hudi_HoodieTableMetadataUtil_metadataPartitionExists_rdh
|
/**
* Check if the given metadata partition exists.
*
* @param basePath
* base path of the dataset
* @param context
* instance of {@link HoodieEngineContext}.
*/
public static boolean metadataPartitionExists(String basePath, HoodieEngineContext context, MetadataPartitionType partitionType) {
final String metadataTablePath = HoodieTableMetadata.getMetadataTableBasePath(basePath);
FileSystem fs = FSUtils.getFs(metadataTablePath, context.getHadoopConf().get());
try {
return fs.exists(new Path(metadataTablePath,
partitionType.getPartitionPath()));
} catch (Exception e) {
throw new HoodieIOException(String.format("Failed to check metadata partition %s exists.", partitionType.getPartitionPath()));
}
}
| 3.26 |
hudi_HoodieTableMetadataUtil_isIndexingCommit_rdh
|
/**
* Checks if a delta commit in metadata table is written by async indexer.
* <p>
* TODO(HUDI-5733): This should be cleaned up once the proper fix of rollbacks in the
* metadata table is landed.
*
* @param instantTime
* Instant time to check.
* @return {@code true} if from async indexer; {@code false} otherwise.
*/
public static boolean isIndexingCommit(String instantTime) {
return (instantTime.length() == (MILLIS_INSTANT_ID_LENGTH + OperationSuffix.METADATA_INDEXER.getSuffix().length())) && instantTime.endsWith(OperationSuffix.METADATA_INDEXER.getSuffix());
}
| 3.26 |
hudi_HoodieTableMetadataUtil_convertMetadataToBloomFilterRecords_rdh
|
/**
* Convert clean metadata to bloom filter index records.
*
* @param cleanMetadata
* - Clean action metadata
* @param engineContext
* - Engine context
* @param instantTime
* - Clean action instant time
* @param recordsGenerationParams
* - Parameters for bloom filter record generation
* @return List of bloom filter index records for the clean metadata
*/
public static HoodieData<HoodieRecord> convertMetadataToBloomFilterRecords(HoodieCleanMetadata cleanMetadata, HoodieEngineContext engineContext, String instantTime, MetadataRecordsGenerationParams recordsGenerationParams) {
List<Pair<String, String>> deleteFileList = new ArrayList<>();
cleanMetadata.getPartitionMetadata().forEach((partition, partitionMetadata) -> {
// Files deleted from a partition
List<String> deletedFiles
= partitionMetadata.getDeletePathPatterns();
deletedFiles.forEach(entry -> {
final Path deletedFilePath = new Path(entry);
if (FSUtils.isBaseFile(deletedFilePath)) {
deleteFileList.add(Pair.of(partition, deletedFilePath.getName()));
}
});
});
final int parallelism = Math.max(Math.min(deleteFileList.size(), recordsGenerationParams.getBloomIndexParallelism()), 1);
HoodieData<Pair<String, String>> deleteFileListRDD = engineContext.parallelize(deleteFileList, parallelism);
return deleteFileListRDD.map(deleteFileInfoPair -> HoodieMetadataPayload.createBloomFilterMetadataRecord(deleteFileInfoPair.getLeft(), deleteFileInfoPair.getRight(), instantTime, StringUtils.EMPTY_STRING, ByteBuffer.allocate(0), true));
}
| 3.26 |
hudi_HoodieTableMetadataUtil_getPartitionLatestMergedFileSlices_rdh
|
/**
* Get the latest file slices for a Metadata Table partition. If the file slice is
* because of pending compaction instant, then merge the file slice with the one
* just before the compaction instant time. The list of file slices returned is
* sorted in the correct order of file group name.
*
* @param metaClient
* Instance of {@link HoodieTableMetaClient}.
* @param fsView
* Metadata table filesystem view.
* @param partition
* The name of the partition whose file groups are to be loaded.
* @return List of latest file slices for all file groups in a given partition.
*/
public static List<FileSlice> getPartitionLatestMergedFileSlices(HoodieTableMetaClient metaClient, HoodieTableFileSystemView fsView, String partition) {
LOG.info("Loading latest merged file slices for metadata table partition " + partition);
return getPartitionFileSlices(metaClient, Option.of(fsView), partition, true);
}
/**
* Get the latest file slices for a Metadata Table partition. The list of file slices
* returned is sorted in the correct order of file group name.
*
* @param metaClient
* - Instance of {@link HoodieTableMetaClient}
| 3.26 |
hudi_HoodieTableMetadataUtil_convertFilesToColumnStatsRecords_rdh
|
/**
* Convert added and deleted action metadata to column stats index records.
*/ public static HoodieData<HoodieRecord> convertFilesToColumnStatsRecords(HoodieEngineContext engineContext, Map<String, List<String>> partitionToDeletedFiles, Map<String, Map<String, Long>> partitionToAppendedFiles, MetadataRecordsGenerationParams recordsGenerationParams) {
// Find the columns to index
HoodieTableMetaClient dataTableMetaClient = recordsGenerationParams.getDataMetaClient();
final List<String> columnsToIndex = getColumnsToIndex(recordsGenerationParams, Lazy.lazily(() -> tryResolveSchemaForTable(dataTableMetaClient)));
if (columnsToIndex.isEmpty()) {
// In case there are no columns to index, bail
return engineContext.emptyHoodieData();
}
LOG.info(String.format("Indexing %d columns for column stats index", columnsToIndex.size()));
// Create the tuple (partition, filename, isDeleted) to handle both deletes and appends
final List<Tuple3<String, String,
Boolean>> partitionFileFlagTupleList = fetchPartitionFileInfoTriplets(partitionToDeletedFiles, partitionToAppendedFiles);// Create records MDT
int parallelism = Math.max(Math.min(partitionFileFlagTupleList.size(), recordsGenerationParams.getColumnStatsIndexParallelism()), 1);
return engineContext.parallelize(partitionFileFlagTupleList, parallelism).flatMap(partitionFileFlagTuple -> {
final String partitionName = partitionFileFlagTuple.f0;
final String filename = partitionFileFlagTuple.f1;
final boolean isDeleted = partitionFileFlagTuple.f2;if ((!FSUtils.isBaseFile(new Path(filename))) || (!filename.endsWith(HoodieFileFormat.PARQUET.getFileExtension()))) {
LOG.warn(String.format("Ignoring file %s as it is not a PARQUET file",
filename));
return Stream.<HoodieRecord>empty().iterator();
}
final String filePathWithPartition = (partitionName + "/") + filename;
final String v103 = getPartitionIdentifier(partitionName);
return getColumnStatsRecords(v103, filePathWithPartition, dataTableMetaClient, columnsToIndex, isDeleted).iterator();
});
}
| 3.26 |
hudi_HoodieTableMetadataUtil_convertMetadataToColumnStatsRecords_rdh
|
/**
* Convert clean metadata to column stats index records.
*
* @param cleanMetadata
* - Clean action metadata
* @param engineContext
* - Engine context
* @param recordsGenerationParams
* - Parameters for bloom filter record generation
* @return List of column stats index records for the clean metadata
*/
public static HoodieData<HoodieRecord> convertMetadataToColumnStatsRecords(HoodieCleanMetadata cleanMetadata, HoodieEngineContext engineContext, MetadataRecordsGenerationParams recordsGenerationParams) {
List<Pair<String, String>> deleteFileList = new ArrayList<>();
cleanMetadata.getPartitionMetadata().forEach((partition, partitionMetadata) -> {
// Files deleted from a partition
List<String> deletedFiles = partitionMetadata.getDeletePathPatterns();
deletedFiles.forEach(entry -> deleteFileList.add(Pair.of(partition, entry)));
});
HoodieTableMetaClient dataTableMetaClient = recordsGenerationParams.getDataMetaClient();
List<String> columnsToIndex = getColumnsToIndex(recordsGenerationParams, Lazy.lazily(() -> tryResolveSchemaForTable(dataTableMetaClient)));
if (columnsToIndex.isEmpty()) {
// In case there are no columns to index, bail
return engineContext.emptyHoodieData();
}
int parallelism = Math.max(Math.min(deleteFileList.size(), recordsGenerationParams.getColumnStatsIndexParallelism()), 1);
return engineContext.parallelize(deleteFileList, parallelism).flatMap(deleteFileInfoPair -> {
String partitionPath = deleteFileInfoPair.getLeft();String filePath = deleteFileInfoPair.getRight();
if (filePath.endsWith(HoodieFileFormat.PARQUET.getFileExtension())) {
return getColumnStatsRecords(partitionPath, filePath, dataTableMetaClient, columnsToIndex, true).iterator();
}
return Collections.emptyListIterator();
});
}
| 3.26 |
hudi_HoodieTableMetadataUtil_deleteMetadataTable_rdh
|
/**
* Delete the metadata table for the dataset and backup if required.
*
* @param dataMetaClient
* {@code HoodieTableMetaClient} of the dataset for which metadata table is to be deleted
* @param context
* instance of {@link HoodieEngineContext}.
* @param backup
* Whether metadata table should be backed up before deletion. If true, the table is backed up to the
* directory with name metadata_<current_timestamp>.
* @return The backup directory if backup was requested
*/
public static String deleteMetadataTable(HoodieTableMetaClient dataMetaClient, HoodieEngineContext context, boolean backup) {
final Path metadataTablePath = HoodieTableMetadata.getMetadataTableBasePath(dataMetaClient.getBasePathV2());
FileSystem fs = FSUtils.getFs(metadataTablePath.toString(),
context.getHadoopConf().get());
dataMetaClient.getTableConfig().clearMetadataPartitions(dataMetaClient);
try {
if (!fs.exists(metadataTablePath)) {
return null;
}
} catch (FileNotFoundException e) {
// Ignoring exception as metadata table already does not exist
return
null;
} catch (IOException e) {throw new HoodieMetadataException("Failed to check metadata table existence", e);
}
if (backup) {
final Path metadataBackupPath = new Path(metadataTablePath.getParent(), ".metadata_" + dataMetaClient.createNewInstantTime(false));
LOG.info(("Backing up metadata directory to " + metadataBackupPath) + " before deletion");
try {
if (fs.rename(metadataTablePath, metadataBackupPath)) {
return metadataBackupPath.toString();
}
} catch (Exception e) {
// If rename fails, we will ignore the backup and still delete the MDT
LOG.error("Failed to backup metadata table using rename", e);
}
}
LOG.info("Deleting metadata table from " + metadataTablePath);
try {
fs.delete(metadataTablePath, true);
} catch (Exception e) {throw new HoodieMetadataException("Failed to delete metadata table from path " + metadataTablePath, e);
}
return null;
}
| 3.26 |
hudi_HoodieTableMetadataUtil_deleteMetadataTablePartition_rdh
|
/**
* Delete a partition within the metadata table.
* <p>
* This can be used to delete a partition so that it can be re-bootstrapped.
*
* @param dataMetaClient
* {@code HoodieTableMetaClient} of the dataset for which metadata table is to be deleted
* @param context
* instance of {@code HoodieEngineContext}.
* @param backup
* Whether metadata table should be backed up before deletion. If true, the table is backed up to the
* directory with name metadata_<current_timestamp>.
* @param partitionType
* The partition to delete
* @return The backup directory if backup was requested, null otherwise
*/
public static String deleteMetadataTablePartition(HoodieTableMetaClient dataMetaClient, HoodieEngineContext context, MetadataPartitionType partitionType, boolean backup) {
if (partitionType.equals(MetadataPartitionType.FILES)) {
return deleteMetadataTable(dataMetaClient, context, backup);
}
final Path metadataTablePartitionPath = new Path(HoodieTableMetadata.getMetadataTableBasePath(dataMetaClient.getBasePath()), partitionType.getPartitionPath());
FileSystem fs = FSUtils.getFs(metadataTablePartitionPath.toString(), context.getHadoopConf().get());
dataMetaClient.getTableConfig().setMetadataPartitionState(dataMetaClient, partitionType, false);
try {
if (!fs.exists(metadataTablePartitionPath)) {
return null;
}
} catch (FileNotFoundException e) {
// Ignoring exception as metadata table already does not exist
LOG.debug((("Metadata table partition " + partitionType) + " not found at path ") + metadataTablePartitionPath);
return null;
} catch (Exception e) {
throw new HoodieMetadataException(String.format("Failed to check existence of MDT partition %s at path %s: ", partitionType, metadataTablePartitionPath), e);
}
if (backup) {
final Path metadataPartitionBackupPath = new Path(metadataTablePartitionPath.getParent().getParent(), String.format(".metadata_%s_%s", partitionType.getPartitionPath(), dataMetaClient.createNewInstantTime(false)));
LOG.info(String.format("Backing up MDT partition %s to %s before deletion",
partitionType, metadataPartitionBackupPath));
try {
if (fs.rename(metadataTablePartitionPath, metadataPartitionBackupPath)) {
return metadataPartitionBackupPath.toString();
}
} catch (Exception e) {
// If rename fails, we will try to delete the table instead
LOG.error(String.format("Failed to backup MDT partition %s using rename", partitionType), e);
}
} else {
LOG.info("Deleting metadata table partition from " + metadataTablePartitionPath);
try {
fs.delete(metadataTablePartitionPath, true);
} catch (Exception e) {
throw new HoodieMetadataException("Failed to delete metadata table partition from path " + metadataTablePartitionPath, e);
}
}
return null;
}
| 3.26 |
hudi_HoodieTableMetadataUtil_convertMetadataToRecords_rdh
|
/**
* Convert rollback action metadata to metadata table records.
* <p>
* We only need to handle FILES partition here as HUDI rollbacks on MOR table may end up adding a new log file. All other partitions
* are handled by actual rollback of the deltacommit which added records to those partitions.
*/
public static Map<MetadataPartitionType, HoodieData<HoodieRecord>> convertMetadataToRecords(HoodieEngineContext engineContext, HoodieTableMetaClient dataTableMetaClient, HoodieRollbackMetadata rollbackMetadata, String instantTime) {
List<HoodieRecord> filesPartitionRecords = convertMetadataToRollbackRecords(rollbackMetadata, instantTime, dataTableMetaClient);final HoodieData<HoodieRecord> rollbackRecordsRDD = (filesPartitionRecords.isEmpty()) ? engineContext.emptyHoodieData() : engineContext.parallelize(filesPartitionRecords, filesPartitionRecords.size());
return Collections.singletonMap(MetadataPartitionType.FILES, rollbackRecordsRDD);
}
| 3.26 |
hudi_HoodieTableMetadataUtil_createLogCompactionTimestamp_rdh
|
/**
* Create the timestamp for a compaction operation on the metadata table.
*/
public static String createLogCompactionTimestamp(String timestamp) {
return timestamp + OperationSuffix.LOG_COMPACTION.getSuffix();
}
| 3.26 |
hudi_HoodieTableMetadataUtil_coerceToComparable_rdh
|
/**
* Given a schema, coerces provided value to instance of {@link Comparable<?>} such that
* it could subsequently used in column stats
*
* NOTE: This method has to stay compatible with the semantic of
* {@link ParquetUtils#readRangeFromParquetMetadata} as they are used in tandem
*/
private static Comparable<?> coerceToComparable(Schema schema, Object val) {
if (val == null) {
return null;
}
switch (schema.getType()) {
case UNION :// TODO we need to handle unions in general case as well
return coerceToComparable(resolveNullableSchema(schema), val);
case FIXED :
case BYTES :
if (schema.getLogicalType() instanceof LogicalTypes.Decimal) {
return ((Comparable<?>) (val));
}
return ((ByteBuffer) (val));
case INT :
if ((schema.getLogicalType() == LogicalTypes.date()) || (schema.getLogicalType() == LogicalTypes.timeMillis())) {
// NOTE: This type will be either {@code java.sql.Date} or {org.joda.LocalDate}
// depending on the Avro version. Hence, we simply cast it to {@code Comparable<?>}
return ((Comparable<?>) (val));
}
return ((Integer) (val));
case LONG :
if (((schema.getLogicalType() == LogicalTypes.timeMicros()) || (schema.getLogicalType() ==
LogicalTypes.timestampMicros())) || (schema.getLogicalType() == LogicalTypes.timestampMillis())) {
// NOTE: This type will be either {@code java.sql.Date} or {org.joda.LocalDate}
// depending on the Avro version. Hence, we simply cast it to {@code Comparable<?>}
return ((Comparable<?>) (val));}
return ((Long) (val));
case STRING :
// unpack the avro Utf8 if possible
return val.toString();
case FLOAT :
case DOUBLE :
case BOOLEAN :
return ((Comparable<?>) (val));
// TODO add support for those types
case ENUM :
case MAP :
case NULL :
case RECORD :
case ARRAY :return null;
default :
throw new IllegalStateException("Unexpected type: " + schema.getType());
}
}
| 3.26 |
hudi_HoodieTableMetadataUtil_convertFilesToBloomFilterRecords_rdh
|
/**
* Convert added and deleted files metadata to bloom filter index records.
*/
public static HoodieData<HoodieRecord> convertFilesToBloomFilterRecords(HoodieEngineContext engineContext, Map<String, List<String>> partitionToDeletedFiles, Map<String, Map<String, Long>> partitionToAppendedFiles, MetadataRecordsGenerationParams recordsGenerationParams, String instantTime) {
// Create the tuple (partition, filename, isDeleted) to handle both deletes and appends
final List<Tuple3<String, String, Boolean>> partitionFileFlagTupleList = fetchPartitionFileInfoTriplets(partitionToDeletedFiles, partitionToAppendedFiles);
// Create records MDT
int parallelism = Math.max(Math.min(partitionFileFlagTupleList.size(), recordsGenerationParams.getBloomIndexParallelism()), 1);
return engineContext.parallelize(partitionFileFlagTupleList, parallelism).flatMap(partitionFileFlagTuple -> {
final String partitionName
= partitionFileFlagTuple.f0;
final String filename = partitionFileFlagTuple.f1;
final boolean isDeleted = partitionFileFlagTuple.f2;
if (!FSUtils.isBaseFile(new Path(filename))) {
LOG.warn(String.format("Ignoring file %s as it is not a base file", filename));
return Stream.<HoodieRecord>empty().iterator();
}
// Read the bloom filter from the base file if the file is being added
ByteBuffer bloomFilterBuffer = ByteBuffer.allocate(0);
if (!isDeleted) {
final String pathWithPartition = (partitionName + "/") + filename;
final Path addedFilePath = new Path(recordsGenerationParams.getDataMetaClient().getBasePath(), pathWithPartition);
bloomFilterBuffer = readBloomFilter(recordsGenerationParams.getDataMetaClient().getHadoopConf(), addedFilePath);
// If reading the bloom filter failed then do not add a record for this file
if (bloomFilterBuffer == null) {
LOG.error("Failed to read bloom filter from " + addedFilePath);
return Stream.<HoodieRecord>empty().iterator();
}
}
final String partition = getPartitionIdentifier(partitionName);
return Stream.<HoodieRecord>of(HoodieMetadataPayload.createBloomFilterMetadataRecord(partition, filename, instantTime, recordsGenerationParams.getBloomFilterType(), bloomFilterBuffer, partitionFileFlagTuple.f2)).iterator();
});
}
| 3.26 |
hudi_HoodieTableMetadataUtil_convertColumnStatsRecordToColumnRangeMetadata_rdh
|
/**
* Converts instance of {@link HoodieMetadataColumnStats} to {@link HoodieColumnRangeMetadata}
*/
public static HoodieColumnRangeMetadata<Comparable> convertColumnStatsRecordToColumnRangeMetadata(HoodieMetadataColumnStats columnStats) {
return HoodieColumnRangeMetadata.<Comparable>create(columnStats.getFileName(), columnStats.getColumnName(), unwrapAvroValueWrapper(columnStats.getMinValue()), unwrapAvroValueWrapper(columnStats.getMaxValue()), columnStats.getNullCount(), columnStats.getValueCount(), columnStats.getTotalSize(), columnStats.getTotalUncompressedSize());
}
| 3.26 |
hudi_HoodieTableMetadataUtil_convertMetadataToRollbackRecords_rdh
|
/**
* Convert rollback action metadata to files partition records.
* Consider only new log files added.
*/
private static List<HoodieRecord> convertMetadataToRollbackRecords(HoodieRollbackMetadata rollbackMetadata, String instantTime, HoodieTableMetaClient dataTableMetaClient) {
Map<String, Map<String, Long>> partitionToAppendedFiles = new HashMap<>();
processRollbackMetadata(rollbackMetadata, partitionToAppendedFiles);
reAddLogFilesFromRollbackPlan(dataTableMetaClient, instantTime, partitionToAppendedFiles);
return
convertFilesToFilesPartitionRecords(Collections.emptyMap(), partitionToAppendedFiles, instantTime, "Rollback");
}
| 3.26 |
hudi_HoodieTableMetadataUtil_getRollbackedCommits_rdh
|
/**
* Returns a list of commits which were rolled back as part of a Rollback or Restore operation.
*
* @param instant
* The Rollback operation to read
* @param timeline
* instant of timeline from dataset.
*/
private static List<String> getRollbackedCommits(HoodieInstant instant, HoodieActiveTimeline timeline) {
try {
List<String> commitsToRollback;
if (instant.getAction().equals(HoodieTimeline.ROLLBACK_ACTION)) {
try {
HoodieRollbackMetadata rollbackMetadata = TimelineMetadataUtils.deserializeHoodieRollbackMetadata(timeline.getInstantDetails(instant).get());commitsToRollback = rollbackMetadata.getCommitsRollback();
} catch (IOException e) {
// if file is empty, fetch the commits to rollback from rollback.requested file
HoodieRollbackPlan rollbackPlan = TimelineMetadataUtils.deserializeAvroMetadata(timeline.readRollbackInfoAsBytes(new HoodieInstant(State.REQUESTED, HoodieTimeline.ROLLBACK_ACTION, instant.getTimestamp())).get(), HoodieRollbackPlan.class);
commitsToRollback = Collections.singletonList(rollbackPlan.getInstantToRollback().getCommitTime());
LOG.warn("Had to fetch rollback info from requested instant since completed file is empty " + instant.toString());
}
return commitsToRollback;
}
List<String> rollbackedCommits = new LinkedList<>();
if (instant.getAction().equals(HoodieTimeline.RESTORE_ACTION)) {
// Restore is made up of several rollbacks
HoodieRestoreMetadata restoreMetadata = TimelineMetadataUtils.deserializeHoodieRestoreMetadata(timeline.getInstantDetails(instant).get());
restoreMetadata.getHoodieRestoreMetadata().values().forEach(rms -> {
rms.forEach(rm -> rollbackedCommits.addAll(rm.getCommitsRollback()));
});
}
return
rollbackedCommits;
}
catch (IOException e) {
throw new HoodieMetadataException("Error retrieving rollback commits for instant " + instant, e);
}
}
| 3.26 |
hudi_HoodieTableMetadataUtil_readRecordKeysFromFileSlices_rdh
|
/**
* Reads the record keys from the given file slices and returns a {@link HoodieData} of {@link HoodieRecord} to be updated in the metadata table.
* If file slice does not have any base file, then iterates over the log files to get the record keys.
*/
public static HoodieData<HoodieRecord> readRecordKeysFromFileSlices(HoodieEngineContext engineContext, List<Pair<String, FileSlice>> partitionFileSlicePairs, boolean forDelete, int recordIndexMaxParallelism, String activeModule, HoodieTableMetaClient metaClient, EngineType engineType) {
if (partitionFileSlicePairs.isEmpty()) {
return engineContext.emptyHoodieData();
}
engineContext.setJobStatus(activeModule, ("Record Index: reading record keys from " + partitionFileSlicePairs.size()) + " file slices");
final int v175 = Math.min(partitionFileSlicePairs.size(), recordIndexMaxParallelism);
final String basePath = metaClient.getBasePathV2().toString();
final SerializableConfiguration configuration = new SerializableConfiguration(metaClient.getHadoopConf());
return engineContext.parallelize(partitionFileSlicePairs, v175).flatMap(partitionAndBaseFile -> {
final String partition = partitionAndBaseFile.getKey();
final FileSlice fileSlice = partitionAndBaseFile.getValue();
if (!fileSlice.getBaseFile().isPresent()) {
List<String> logFilePaths = fileSlice.getLogFiles().sorted(HoodieLogFile.getLogFileComparator()).map(l -> l.getPath().toString()).collect(toList());
HoodieMergedLogRecordScanner mergedLogRecordScanner = HoodieMergedLogRecordScanner.newBuilder().withFileSystem(metaClient.getFs()).withBasePath(basePath).withLogFilePaths(logFilePaths).withReaderSchema(HoodieAvroUtils.getRecordKeySchema()).withLatestInstantTime(metaClient.getActiveTimeline().filterCompletedInstants().lastInstant().map(HoodieInstant::getTimestamp).orElse("")).withReadBlocksLazily(configuration.get().getBoolean("", true)).withReverseReader(false).withMaxMemorySizeInBytes(configuration.get().getLongBytes(MAX_MEMORY_FOR_COMPACTION.key(), DEFAULT_MAX_MEMORY_FOR_SPILLABLE_MAP_IN_BYTES)).withSpillableMapBasePath(FileIOUtils.getDefaultSpillableMapBasePath()).withPartition(fileSlice.getPartitionPath()).withOptimizedLogBlocksScan(configuration.get().getBoolean("hoodie" + HoodieMetadataConfig.OPTIMIZED_LOG_BLOCKS_SCAN, false)).withDiskMapType(configuration.get().getEnum(SPILLABLE_DISK_MAP_TYPE.key(), SPILLABLE_DISK_MAP_TYPE.defaultValue())).withBitCaskDiskMapCompressionEnabled(configuration.get().getBoolean(DISK_MAP_BITCASK_COMPRESSION_ENABLED.key(), DISK_MAP_BITCASK_COMPRESSION_ENABLED.defaultValue())).withRecordMerger(// TODO: support different merger classes, which is currently only known to write config
HoodieRecordUtils.createRecordMerger(metaClient.getBasePathV2().toString(), engineType, Collections.emptyList(), metaClient.getTableConfig().getRecordMergerStrategy())).build();
ClosableIterator<String> recordKeyIterator = ClosableIterator.wrap(mergedLogRecordScanner.getRecords().keySet().iterator());
return new ClosableIterator<HoodieRecord>() {
@Override
public void close() {
recordKeyIterator.close();
}
@Override
public boolean hasNext() {
return recordKeyIterator.hasNext();
}
@Override
public HoodieRecord next() {
return forDelete ? HoodieMetadataPayload.createRecordIndexDelete(recordKeyIterator.next()) : HoodieMetadataPayload.createRecordIndexUpdate(recordKeyIterator.next(), partition, fileSlice.getFileId(), fileSlice.getBaseInstantTime(), 0);
}
};
} final HoodieBaseFile baseFile = fileSlice.getBaseFile().get();
final String filename = baseFile.getFileName();
Path dataFilePath = new Path(basePath, (partition + Path.SEPARATOR) + filename);
final String fileId = baseFile.getFileId();
final String instantTime = baseFile.getCommitTime();
HoodieFileReader reader = HoodieFileReaderFactory.getReaderFactory(HoodieRecord.HoodieRecordType.AVRO).getFileReader(configuration.get(), dataFilePath);
ClosableIterator<String> recordKeyIterator = reader.getRecordKeyIterator();
return new ClosableIterator<HoodieRecord>() {
@Override
public void close() {
recordKeyIterator.close();
}
@Override
public boolean hasNext() {
return recordKeyIterator.hasNext();
}
@Override
public HoodieRecord next() {
return forDelete ? HoodieMetadataPayload.createRecordIndexDelete(recordKeyIterator.next()) : HoodieMetadataPayload.createRecordIndexUpdate(recordKeyIterator.next(), partition, fileId, instantTime, 0);
}
};
});
}
| 3.26 |
hudi_HoodieTableMetadataUtil_estimateFileGroupCount_rdh
|
/**
* Estimates the file group count to use for a MDT partition.
*
* @param partitionType
* Type of the partition for which the file group count is to be estimated.
* @param recordCount
* The number of records expected to be written.
* @param averageRecordSize
* Average size of each record to be writen.
* @param minFileGroupCount
* Minimum number of file groups to use.
* @param maxFileGroupCount
* Maximum number of file groups to use.
* @param growthFactor
* By what factor are the records (recordCount) expected to grow?
* @param maxFileGroupSizeBytes
* Maximum size of the file group.
* @return The estimated number of file groups.
*/
public static int estimateFileGroupCount(MetadataPartitionType partitionType, long recordCount, int averageRecordSize, int minFileGroupCount, int maxFileGroupCount, float growthFactor, int maxFileGroupSizeBytes) {
int fileGroupCount;
// If a fixed number of file groups are desired
if ((minFileGroupCount == maxFileGroupCount) && (minFileGroupCount != 0)) {
fileGroupCount = minFileGroupCount;
} else {// Number of records to estimate for
final long expectedNumRecords = ((long) (Math.ceil(((float) (recordCount)) * growthFactor)));
// Maximum records that should be written to each file group so that it does not go over the size limit required
final long v160 = maxFileGroupSizeBytes / Math.max(averageRecordSize, 1L);
final long estimatedFileGroupCount = expectedNumRecords / v160;if (estimatedFileGroupCount >= maxFileGroupCount) {
fileGroupCount = maxFileGroupCount;
} else
if (estimatedFileGroupCount <= minFileGroupCount) {
fileGroupCount = minFileGroupCount;
} else {
fileGroupCount = Math.max(1, ((int) (estimatedFileGroupCount)));
}
}
LOG.info(String.format(("Estimated file group count for MDT partition %s is %d " + "[recordCount=%d, avgRecordSize=%d, minFileGroupCount=%d, maxFileGroupCount=%d, growthFactor=%f, ") + "maxFileGroupSizeBytes=%d]", partitionType.name(), fileGroupCount, recordCount, averageRecordSize, minFileGroupCount, maxFileGroupCount, growthFactor, maxFileGroupSizeBytes));
return fileGroupCount;
}
| 3.26 |
hudi_HoodieTableMetadataUtil_deleteMetadataPartition_rdh
|
/**
* Deletes the metadata partition from the file system.
*
* @param basePath
* - base path of the dataset
* @param context
* - instance of {@link HoodieEngineContext}
* @param partitionType
* - {@link MetadataPartitionType} of the partition to delete
*/public static void deleteMetadataPartition(String basePath, HoodieEngineContext context, MetadataPartitionType partitionType) {
HoodieTableMetaClient dataMetaClient = HoodieTableMetaClient.builder().setBasePath(basePath).setConf(context.getHadoopConf().get()).build();
deleteMetadataTablePartition(dataMetaClient, context, partitionType, false);
}
| 3.26 |
hudi_HoodieTableMetadataUtil_getLocationFromRecordIndexInfo_rdh
|
/**
* Gets the location from record index content.
* Note that, a UUID based fileId is stored as 3 pieces in record index (fileIdHighBits,
* fileIdLowBits and fileIndex). FileID format is {UUID}-{fileIndex}.
* The arguments are consistent with what {@link HoodieRecordIndexInfo} contains.
*
* @param partition
* The partition name the record belongs to.
* @param fileIdEncoding
* FileId encoding. Possible values are 0 and 1. O represents UUID based
* fileID, and 1 represents raw string format of the fileId.
* @param fileIdHighBits
* High 64 bits if the fileId is based on UUID format.
* @param fileIdLowBits
* Low 64 bits if the fileId is based on UUID format.
* @param fileIndex
* Index representing file index which is used to re-construct UUID based fileID.
* @param originalFileId
* FileId of the location where record belongs to.
* When the encoding is 1, fileID is stored in raw string format.
* @param instantTime
* Epoch time in millisecond representing the commit time at which record was added.
* @return {@link HoodieRecordGlobalLocation} containing the location.
*/
public static HoodieRecordGlobalLocation getLocationFromRecordIndexInfo(String partition, int fileIdEncoding, long fileIdHighBits, long fileIdLowBits, int fileIndex, String originalFileId, Long instantTime)
{
String fileId = null;
if (fileIdEncoding == 0) {
// encoding 0 refers to UUID based fileID
final UUID v164 = new UUID(fileIdHighBits, fileIdLowBits);
fileId = v164.toString(); if (fileIndex != RECORD_INDEX_MISSING_FILEINDEX_FALLBACK) {
fileId += "-" + fileIndex;
}
} else {
// encoding 1 refers to no encoding. fileID as is.
fileId = originalFileId;
}
final Date instantDate = new Date(instantTime);
return new HoodieRecordGlobalLocation(partition, HoodieActiveTimeline.formatDate(instantDate), fileId);
}
/**
* Reads the record keys from the base files and returns a {@link HoodieData} of {@link HoodieRecord} to be updated in the metadata table.
* Use {@link #readRecordKeysFromFileSlices(HoodieEngineContext, List, boolean, int, String, HoodieTableMetaClient, EngineType)}
| 3.26 |
hudi_HoodieTableMetadataUtil_getFileSystemView_rdh
|
/**
* Get metadata table file system view.
*
* @param metaClient
* - Metadata table meta client
* @return Filesystem view for the metadata table
*/
public static HoodieTableFileSystemView getFileSystemView(HoodieTableMetaClient metaClient) {
// If there are no commits on the metadata table then the table's
// default FileSystemView will not return any file slices even
// though we may have initialized them.
HoodieTimeline timeline = metaClient.getActiveTimeline();
if (timeline.empty()) { final HoodieInstant instant = new HoodieInstant(false, HoodieTimeline.DELTA_COMMIT_ACTION, metaClient.createNewInstantTime(false));
timeline = new HoodieDefaultTimeline(Stream.of(instant), metaClient.getActiveTimeline()::getInstantDetails);
}
return new HoodieTableFileSystemView(metaClient, timeline);
}
/**
* Get the latest file slices for a given partition.
*
* @param metaClient
* - Instance of {@link HoodieTableMetaClient}
| 3.26 |
hudi_HoodieTableMetadataUtil_processRollbackMetadata_rdh
|
/**
* Extracts information about the deleted and append files from the {@code HoodieRollbackMetadata}.
* <p>
* During a rollback files may be deleted (COW, MOR) or rollback blocks be appended (MOR only) to files. This
* function will extract this change file for each partition.
*
* @param rollbackMetadata
* {@code HoodieRollbackMetadata}
* @param partitionToAppendedFiles
* The {@code Map} to fill with files appended per partition and their sizes.
*/
private static void processRollbackMetadata(HoodieRollbackMetadata rollbackMetadata, Map<String, Map<String, Long>> partitionToAppendedFiles) {
rollbackMetadata.getPartitionMetadata().values().forEach(pm -> {
// Has this rollback produced new files?
boolean hasRollbackLogFiles = (pm.getRollbackLogFiles() != null) && (!pm.getRollbackLogFiles().isEmpty());
final String partition = pm.getPartitionPath();
final String partitionId = getPartitionIdentifier(partition);
BiFunction<Long, Long, Long> fileMergeFn = (oldSize, newSizeCopy) -> {// if a file exists in both written log files and rollback log files, we want to pick the one that is higher
// as rollback file could have been updated after written log files are computed.
return oldSize > newSizeCopy ? oldSize : newSizeCopy;
};
if (hasRollbackLogFiles) {
if (!partitionToAppendedFiles.containsKey(partitionId)) {
partitionToAppendedFiles.put(partitionId, new HashMap<>());
}
// Extract appended file name from the absolute paths saved in getAppendFiles()
pm.getRollbackLogFiles().forEach((path, size) -> {
String fileName = new Path(path).getName();
partitionToAppendedFiles.get(partitionId).merge(fileName, size, fileMergeFn);
});
}
});
}
| 3.26 |
hudi_HoodieTableMetadataUtil_createIndexInitTimestamp_rdh
|
/**
* Create the timestamp for an index initialization operation on the metadata table.
* <p>
* Since many MDT partitions can be initialized one after other the offset parameter controls generating a
* unique timestamp.
*/
public static String createIndexInitTimestamp(String timestamp, int offset) {
return String.format("%s%03d", timestamp, PARTITION_INITIALIZATION_TIME_SUFFIX + offset);
}
| 3.26 |
hudi_HoodieTableMetadataUtil_getColumnsToIndex_rdh
|
/**
* Get the list of columns for the table for column stats indexing
*/
private static List<String> getColumnsToIndex(MetadataRecordsGenerationParams recordsGenParams, Lazy<Option<Schema>> lazyWriterSchemaOpt) {
checkState(recordsGenParams.isColumnStatsIndexEnabled());
List<String> targetColumns = recordsGenParams.getTargetColumnsForColumnStatsIndex();
if (!targetColumns.isEmpty()) {
return targetColumns;
}
Option<Schema> writerSchemaOpt = lazyWriterSchemaOpt.get();
return writerSchemaOpt.map(writerSchema -> writerSchema.getFields().stream().map(Schema.Field::name).collect(Collectors.toList())).orElse(Collections.emptyList());
}
| 3.26 |
hudi_HoodieTableMetadataUtil_createCleanTimestamp_rdh
|
/**
* Create the timestamp for a clean operation on the metadata table.
*/
public static String createCleanTimestamp(String timestamp) {
return timestamp + OperationSuffix.CLEAN.getSuffix();
}
| 3.26 |
hudi_HoodieTableMetadataUtil_convertMetadataToFilesPartitionRecords_rdh
|
/**
* Finds all files that were deleted as part of a clean and creates metadata table records for them.
*
* @param cleanMetadata
* @param instantTime
* @return a list of metadata table records
*/
public static List<HoodieRecord> convertMetadataToFilesPartitionRecords(HoodieCleanMetadata cleanMetadata, String instantTime) {
List<HoodieRecord> records = new LinkedList<>();
int[] fileDeleteCount = new int[]{ 0 };
List<String> deletedPartitions = new ArrayList<>();
cleanMetadata.getPartitionMetadata().forEach((partitionName, partitionMetadata) -> {
final String partition = getPartitionIdentifier(partitionName);
// Files deleted from a partition
List<String> deletedFiles = partitionMetadata.getDeletePathPatterns();
HoodieRecord record = HoodieMetadataPayload.createPartitionFilesRecord(partition, Collections.emptyMap(), deletedFiles);
records.add(record);fileDeleteCount[0] += deletedFiles.size();
boolean isPartitionDeleted = partitionMetadata.getIsPartitionDeleted();
if (isPartitionDeleted) {
deletedPartitions.add(partitionName);
}
});
if (!deletedPartitions.isEmpty()) {
// if there are partitions to be deleted, add them to delete list
records.add(HoodieMetadataPayload.createPartitionListRecord(deletedPartitions, true));
}
LOG.info((((((("Updating at " + instantTime) + " from Clean. #partitions_updated=") + records.size()) + ", #files_deleted=") + fileDeleteCount[0]) + ", #partitions_deleted=") + deletedPartitions.size());
return records;
}
| 3.26 |
hudi_HoodieTableMetadataUtil_m0_rdh
|
/**
* Delete the metadata table for the dataset. This will be invoked during upgrade/downgrade operation during which
* no other
* process should be running.
*
* @param basePath
* base path of the dataset
* @param context
* instance of {@link HoodieEngineContext}.
*/public static void m0(String basePath, HoodieEngineContext context) {HoodieTableMetaClient dataMetaClient = HoodieTableMetaClient.builder().setBasePath(basePath).setConf(context.getHadoopConf().get()).build();
deleteMetadataTable(dataMetaClient, context, false);
}
| 3.26 |
hudi_HoodieTableMetadataUtil_getFileGroupIndexFromFileId_rdh
|
/**
* Extract the index from the fileID of a file group in the MDT partition. See {@code getFileIDForFileGroup} for the format of the fileID.
*
* @param fileId
* fileID of a file group.
* @return The index of file group
*/
public static int getFileGroupIndexFromFileId(String fileId) {
final int endIndex
= m1(fileId); final int fromIndex = fileId.lastIndexOf("-", endIndex - 1);
return Integer.parseInt(fileId.substring(fromIndex + 1,
endIndex));
}
| 3.26 |
hudi_HoodieTableMetadataUtil_mapRecordKeyToFileGroupIndex_rdh
|
/**
* Map a record key to a file group in partition of interest.
* <p>
* Note: For hashing, the algorithm is same as String.hashCode() but is being defined here as hashCode()
* implementation is not guaranteed by the JVM to be consistent across JVM versions and implementations.
*
* @param recordKey
* record key for which the file group index is looked up for.
* @return An integer hash of the given string
*/
public static int mapRecordKeyToFileGroupIndex(String recordKey, int numFileGroups) {
int h = 0;
for (int i = 0; i <
recordKey.length(); ++i) {
h = (31 * h) + recordKey.charAt(i);
}
return Math.abs(Math.abs(h) % numFileGroups);
}
| 3.26 |
hudi_HoodieTableMetadataUtil_getPartitionLatestFileSlicesIncludingInflight_rdh
|
/**
* Get the latest file slices for a given partition including the inflight ones.
*
* @param metaClient
* - instance of {@link HoodieTableMetaClient}
* @param fileSystemView
* - hoodie table file system view, which will be fetched from meta client if not already present
* @param partition
* - name of the partition whose file groups are to be loaded
* @return */
public static List<FileSlice> getPartitionLatestFileSlicesIncludingInflight(HoodieTableMetaClient metaClient, Option<HoodieTableFileSystemView>
fileSystemView, String
partition) {
HoodieTableFileSystemView fsView = fileSystemView.orElse(getFileSystemView(metaClient));
Stream<FileSlice> fileSliceStream = fsView.fetchLatestFileSlicesIncludingInflight(partition);
return fileSliceStream.sorted(Comparator.comparing(FileSlice::getFileId)).collect(Collectors.toList());
}
| 3.26 |
hudi_HoodieTableMetadataUtil_getPartitionIdentifier_rdh
|
/**
* Returns partition name for the given path.
*/
public static String getPartitionIdentifier(@Nonnull
String relativePartitionPath) {
return EMPTY_PARTITION_NAME.equals(relativePartitionPath) ? NON_PARTITIONED_NAME : relativePartitionPath;
}
| 3.26 |
hudi_HoodieTableMetadataUtil_m1_rdh
|
/**
* Returns the length of the fileID ignoring the fileIndex suffix
* <p>
* 0.10 version MDT code added -0 (0th fileIndex) to the fileID. This was removed later.
* <p>
* Examples:
* 0.11+ version: fileID: files-0000 returns 10
* 0.10 version: fileID: files-0000-0 returns 10
*
* @param fileId
* The fileID
* @return The length of the fileID ignoring the fileIndex suffix
*/private static int
m1(String fileId) {
return fileId.endsWith("-0") ? fileId.length() - 2 : fileId.length();}
| 3.26 |
hudi_HoodieTableMetadataUtil_isValidInstant_rdh
|
/**
* Checks if the Instant is a delta commit and has a valid suffix for operations on MDT.
*
* @param instant
* {@code HoodieInstant} to check.
* @return {@code true} if the instant is valid.
*/
public static boolean isValidInstant(HoodieInstant instant) { // Should be a deltacommit
if
(!instant.getAction().equals(HoodieTimeline.DELTA_COMMIT_ACTION)) {
return
false;
}
// Check correct length. The timestamp should have a suffix over the timeline's timestamp format.
final String v143 = instant.getTimestamp();
if (!(v143.length() == (MILLIS_INSTANT_ID_LENGTH + OperationSuffix.METADATA_INDEXER.getSuffix().length()))) {
return false;
}
// Is this a fixed operations suffix
final String suffix = v143.substring(v143.length() - 3);
if (OperationSuffix.isValidSuffix(suffix)) {
return true;
}
// Is this a index init suffix?
if (suffix.compareTo(String.format("%03d", PARTITION_INITIALIZATION_TIME_SUFFIX)) >= 0) {
return true;
}
return false;
}
| 3.26 |
hudi_HoodieTableMetadataUtil_tryUpcastDecimal_rdh
|
/**
* Does an upcast for {@link BigDecimal} instance to align it with scale/precision expected by
* the {@link org.apache.avro.LogicalTypes.Decimal} Avro logical type
*/
public static BigDecimal tryUpcastDecimal(BigDecimal value, final LogicalTypes.Decimal decimal) {
final int scale = decimal.getScale();
final int valueScale = value.scale();
boolean scaleAdjusted = false;
if (valueScale != scale) { try {
value = value.setScale(scale, RoundingMode.UNNECESSARY);
scaleAdjusted = true;
} catch (ArithmeticException aex) {
throw new AvroTypeException(((("Cannot encode decimal with scale " + valueScale) + " as scale ") +
scale) + " without rounding");
}
}
int precision =
decimal.getPrecision();
int valuePrecision = value.precision();
if (valuePrecision > precision) {
if (scaleAdjusted) {
throw new AvroTypeException((((((("Cannot encode decimal with precision " +
valuePrecision) +
" as max precision ") + precision) + ". This is after safely adjusting scale from ") + valueScale) + " to required ") + scale);
} else {
throw new AvroTypeException((("Cannot encode decimal with precision " + valuePrecision) + " as max precision ") +
precision);
}
}
return value;
}
| 3.26 |
hudi_HoodieTableMetadataUtil_getMetadataPartitionsNeedingWriteStatusTracking_rdh
|
/**
* Returns true if any enabled metadata partition in the given hoodie table requires WriteStatus to track the written records.
*
* @param config
* MDT config
* @param metaClient
* {@code HoodieTableMetaClient} of the data table
* @return true if WriteStatus should track the written records else false.
*/
public static boolean getMetadataPartitionsNeedingWriteStatusTracking(HoodieMetadataConfig config, HoodieTableMetaClient metaClient) {
// Does any enabled partition need to track the written records
if (MetadataPartitionType.getMetadataPartitionsNeedingWriteStatusTracking().stream().anyMatch(p -> metaClient.getTableConfig().isMetadataPartitionAvailable(p))) {
return true;
}
// Does any inflight partitions need to track the written records
Set<String> metadataPartitionsInflight = metaClient.getTableConfig().getMetadataPartitionsInflight();
if (MetadataPartitionType.getMetadataPartitionsNeedingWriteStatusTracking().stream().anyMatch(p -> metadataPartitionsInflight.contains(p.getPartitionPath()))) {
return true;
}// Does any enabled partition being enabled need to track the written records
if (config.enableRecordIndex()) {
return true;
}
return false;
}
| 3.26 |
hudi_HoodieTableMetadataUtil_getFileGroupPrefix_rdh
|
/**
* Extract the fileID prefix from the fileID of a file group in the MDT partition. See {@code getFileIDForFileGroup} for the format of the fileID.
*
* @param fileId
* fileID of a file group.
* @return The fileID without the file index
*/
public static String getFileGroupPrefix(String fileId) {
return fileId.substring(0, m1(fileId));
}
| 3.26 |
hudi_AppendWriteFunction_initWriterHelper_rdh
|
// -------------------------------------------------------------------------
// Utilities
// -------------------------------------------------------------------------
private void initWriterHelper() {
final String instant = instantToWrite(true);
if (instant == null) {
// in case there are empty checkpoints that has no input data
throw new HoodieException("No inflight instant when flushing data!");
}
this.writerHelper = new BulkInsertWriterHelper(this.config, this.writeClient.getHoodieTable(), this.writeClient.getConfig(), instant, this.taskID, getRuntimeContext().getNumberOfParallelSubtasks(), getRuntimeContext().getAttemptNumber(),
this.rowType, false, Option.of(writeMetrics));
}
| 3.26 |
hudi_AppendWriteFunction_endInput_rdh
|
/**
* End input action for batch source.
*/
public void endInput() {
super.endInput();
flushData(true);
this.writeStatuses.clear();
}
| 3.26 |
hudi_AppendWriteFunction_m0_rdh
|
// -------------------------------------------------------------------------
// GetterSetter
// -------------------------------------------------------------------------
@VisibleForTestingpublic BulkInsertWriterHelper m0() {
return this.writerHelper;
}
| 3.26 |
hudi_BaseSparkCommitActionExecutor_buildProfile_rdh
|
/**
* Count the number of updates/inserts for each file in each partition.
*/
private Pair<HashMap<String, WorkloadStat>, WorkloadStat> buildProfile(HoodieData<HoodieRecord<T>> inputRecords) {
HashMap<String, WorkloadStat> partitionPathStatMap = new HashMap<>();
WorkloadStat globalStat = new WorkloadStat();
// group the records by partitionPath + currentLocation combination, count the number of
// records in each partition
Map<Tuple2<String, Option<HoodieRecordLocation>>, Long> partitionLocationCounts = inputRecords.mapToPair(record -> Pair.of(new Tuple2<>(record.getPartitionPath(), Option.ofNullable(record.getCurrentLocation())), record)).countByKey();
// count the number of both inserts and updates in each partition, update the counts to workLoadStats
for (Map.Entry<Tuple2<String, Option<HoodieRecordLocation>>, Long> e : partitionLocationCounts.entrySet()) {
String v16 = e.getKey()._1();
Long count
= e.getValue();
Option<HoodieRecordLocation> v18 = e.getKey()._2();if (!partitionPathStatMap.containsKey(v16)) {
partitionPathStatMap.put(v16, new WorkloadStat());
} if (v18.isPresent()) {
// update
partitionPathStatMap.get(v16).addUpdates(v18.get(), count);
globalStat.addUpdates(v18.get(), count);
} else {
// insert
partitionPathStatMap.get(v16).addInserts(count);
globalStat.addInserts(count);
}
}
return Pair.of(partitionPathStatMap, globalStat);
}
| 3.26 |
hudi_HoodieTableConfig_getTableVersion_rdh
|
/**
*
* @return the hoodie.table.version from hoodie.properties file.
*/
public HoodieTableVersion getTableVersion() {
return contains(VERSION) ? HoodieTableVersion.versionFromCode(getInt(VERSION)) : VERSION.defaultValue();
}
| 3.26 |
hudi_HoodieTableConfig_getIndexDefinitionPath_rdh
|
/**
*
* @returns the index definition path.
*/
public Option<String> getIndexDefinitionPath() {
return Option.ofNullable(getString(INDEX_DEFINITION_PATH));
}
| 3.26 |
hudi_HoodieTableConfig_getLogFileFormat_rdh
|
/**
* Get the log Storage Format.
*
* @return HoodieFileFormat for the log Storage format
*/
public HoodieFileFormat getLogFileFormat() {
return HoodieFileFormat.valueOf(getStringOrDefault(LOG_FILE_FORMAT));
}
| 3.26 |
hudi_HoodieTableConfig_getTableType_rdh
|
/**
* Read the table type from the table properties and if not found, return the default.
*/
public HoodieTableType getTableType() {
return HoodieTableType.valueOf(getStringOrDefault(TYPE));
}
| 3.26 |
hudi_HoodieTableConfig_setMetadataPartitionsInflight_rdh
|
/**
* Enables the specified metadata table partition as inflight.
*
* @param partitionTypes
* The list of partitions to enable as inflight.
*/
public void setMetadataPartitionsInflight(HoodieTableMetaClient metaClient, List<MetadataPartitionType> partitionTypes) {
Set<String> partitionsInflight = getMetadataPartitionsInflight();
partitionTypes.forEach(t -> {
ValidationUtils.checkArgument(!t.getPartitionPath().contains(CONFIG_VALUES_DELIMITER), "Metadata Table partition path cannot contain a comma: " + t.getPartitionPath());
partitionsInflight.add(t.getPartitionPath());
});
setValue(TABLE_METADATA_PARTITIONS_INFLIGHT, partitionsInflight.stream().sorted().collect(Collectors.joining(CONFIG_VALUES_DELIMITER)));
update(metaClient.getFs(), new Path(metaClient.getMetaPath()), getProps());
LOG.info(String.format("MDT %s partitions %s have been set to inflight", metaClient.getBasePathV2(), partitionTypes));
}
| 3.26 |
hudi_HoodieTableConfig_getTableChecksum_rdh
|
/**
* Read the table checksum.
*/
private Long getTableChecksum() {
return getLong(TABLE_CHECKSUM);
}
| 3.26 |
hudi_HoodieTableConfig_create_rdh
|
/**
* Initialize the hoodie meta directory and any necessary files inside the meta (including the hoodie.properties).
*/
public static void create(FileSystem fs, Path metadataFolder, Properties properties) throws IOException {
if (!fs.exists(metadataFolder)) {
fs.mkdirs(metadataFolder);
}
HoodieConfig hoodieConfig = new HoodieConfig(properties);
Path propertyPath = new Path(metadataFolder, HOODIE_PROPERTIES_FILE);
try (FSDataOutputStream outputStream = fs.create(propertyPath)) {
if (!hoodieConfig.contains(NAME)) {
throw new IllegalArgumentException(NAME.key() + " property needs to be specified");
}
hoodieConfig.setDefaultValue(TYPE);
if (hoodieConfig.getString(TYPE).equals(HoodieTableType.MERGE_ON_READ.name())) {
hoodieConfig.setDefaultValue(PAYLOAD_TYPE);
hoodieConfig.setDefaultValue(RECORD_MERGER_STRATEGY);
}
hoodieConfig.setDefaultValue(ARCHIVELOG_FOLDER);
if (!hoodieConfig.contains(TIMELINE_LAYOUT_VERSION)) {
// Use latest Version as default unless forced by client
hoodieConfig.setValue(TIMELINE_LAYOUT_VERSION, TimelineLayoutVersion.CURR_VERSION.toString());
}
if
(hoodieConfig.contains(BOOTSTRAP_BASE_PATH)) {
// Use the default bootstrap index class.
hoodieConfig.setDefaultValue(BOOTSTRAP_INDEX_CLASS_NAME, BootstrapIndexType.getDefaultBootstrapIndexClassName(hoodieConfig));
}if (hoodieConfig.contains(TIMELINE_TIMEZONE)) {
HoodieInstantTimeGenerator.setCommitTimeZone(HoodieTimelineTimeZone.valueOf(hoodieConfig.getString(TIMELINE_TIMEZONE)));
}
hoodieConfig.setDefaultValue(DROP_PARTITION_COLUMNS);
storeProperties(hoodieConfig.getProps(), outputStream);
}
}
| 3.26 |
hudi_HoodieTableConfig_m0_rdh
|
/**
* Read the payload class for HoodieRecords from the table properties.
*/
public String m0() {
return getStringOrDefault(RECORD_MERGER_STRATEGY);
}
| 3.26 |
hudi_HoodieTableConfig_storeProperties_rdh
|
/**
* Write the properties to the given output stream and return the table checksum.
*
* @param props
* - properties to be written
* @param outputStream
* - output stream to which properties will be written
* @return return the table checksum
* @throws IOException
*/
private static String storeProperties(Properties props, FSDataOutputStream outputStream) throws IOException {
final String checksum;
if (isValidChecksum(props)) {
checksum = props.getProperty(TABLE_CHECKSUM.key());
props.store(outputStream, "Updated at " + Instant.now());
} else {
Properties propsWithChecksum = getOrderedPropertiesWithTableChecksum(props);
propsWithChecksum.store(outputStream, "Properties saved on " + Instant.now());
checksum = propsWithChecksum.getProperty(TABLE_CHECKSUM.key());
props.setProperty(TABLE_CHECKSUM.key(), checksum);
}
return checksum;
}
| 3.26 |
hudi_HoodieTableConfig_update_rdh
|
/**
* Upserts the table config with the set of properties passed in. We implement a fail-safe backup protocol
* here for safely updating with recovery and also ensuring the table config continues to be readable.
*/
public static void update(FileSystem fs, Path metadataFolder, Properties updatedProps) {
modify(fs, metadataFolder, updatedProps, ConfigUtils::upsertProperties);
}
| 3.26 |
hudi_HoodieTableConfig_clearMetadataPartitions_rdh
|
/**
* Clear {@link HoodieTableConfig#TABLE_METADATA_PARTITIONS}
* {@link HoodieTableConfig#TABLE_METADATA_PARTITIONS_INFLIGHT}.
*/
public void clearMetadataPartitions(HoodieTableMetaClient metaClient) {
setMetadataPartitionState(metaClient, MetadataPartitionType.FILES, false);
}
| 3.26 |
hudi_HoodieTableConfig_getBaseFileFormat_rdh
|
/**
* Get the base file storage format.
*
* @return HoodieFileFormat for the base file Storage format
*/
public HoodieFileFormat getBaseFileFormat() {
return HoodieFileFormat.valueOf(getStringOrDefault(BASE_FILE_FORMAT));
}
| 3.26 |
hudi_HoodieTableConfig_populateMetaFields_rdh
|
/**
*
* @returns true is meta fields need to be populated. else returns false.
*/
public boolean populateMetaFields() {
return Boolean.parseBoolean(getStringOrDefault(POPULATE_META_FIELDS));
}
| 3.26 |
hudi_HoodieTableConfig_getRawRecordKeyFieldProp_rdh
|
/**
*
* @returns the record key field prop.
*/
public String getRawRecordKeyFieldProp() {return getStringOrDefault(RECORDKEY_FIELDS, null);
}
| 3.26 |
hudi_HoodieTableConfig_setMetadataPartitionState_rdh
|
/**
* Enables or disables the specified metadata table partition.
*
* @param partitionType
* The partition
* @param enabled
* If true, the partition is enabled, else disabled
*/
public void setMetadataPartitionState(HoodieTableMetaClient metaClient, MetadataPartitionType partitionType, boolean
enabled) {
ValidationUtils.checkArgument(!partitionType.getPartitionPath().contains(CONFIG_VALUES_DELIMITER), "Metadata Table partition path cannot contain a comma: " + partitionType.getPartitionPath());
Set<String> partitions = getMetadataPartitions();
Set<String> partitionsInflight = getMetadataPartitionsInflight();
if (enabled) {
partitions.add(partitionType.getPartitionPath());
partitionsInflight.remove(partitionType.getPartitionPath());} else if (partitionType.equals(MetadataPartitionType.FILES)) {
// file listing partition is required for all other partitions to work
// Disabling file partition will also disable all partitions
partitions.clear();
partitionsInflight.clear();
} else {
partitions.remove(partitionType.getPartitionPath());
partitionsInflight.remove(partitionType.getPartitionPath());
}
setValue(TABLE_METADATA_PARTITIONS, partitions.stream().sorted().collect(Collectors.joining(CONFIG_VALUES_DELIMITER))); setValue(TABLE_METADATA_PARTITIONS_INFLIGHT, partitionsInflight.stream().sorted().collect(Collectors.joining(CONFIG_VALUES_DELIMITER)));
update(metaClient.getFs(), new Path(metaClient.getMetaPath()), getProps());
LOG.info(String.format("MDT %s partition %s has been %s", metaClient.getBasePathV2(), partitionType.name(), enabled ? "enabled" : "disabled"));}
| 3.26 |
hudi_HoodieTableConfig_getDatabaseName_rdh
|
/**
* Read the database name.
*/
public String getDatabaseName() {
return getString(DATABASE_NAME);
}
| 3.26 |
hudi_HoodieTableConfig_getPayloadClass_rdh
|
/**
* Read the payload class for HoodieRecords from the table properties.
*/
public String getPayloadClass() {
return RecordPayloadType.getPayloadClassName(this);
}
| 3.26 |
hudi_HoodieTableConfig_getBootstrapIndexClass_rdh
|
/**
* Read the payload class for HoodieRecords from the table properties.
*/
public String getBootstrapIndexClass() {
if (!props.getBoolean(BOOTSTRAP_INDEX_ENABLE.key(), BOOTSTRAP_INDEX_ENABLE.defaultValue())) {
return BootstrapIndexType.NO_OP.getClassName();
}
String bootstrapIndexClassName;
if (contains(BOOTSTRAP_INDEX_TYPE)) {
bootstrapIndexClassName = BootstrapIndexType.valueOf(getString(BOOTSTRAP_INDEX_TYPE)).getClassName();
} else if (contains(BOOTSTRAP_INDEX_CLASS_NAME)) {
bootstrapIndexClassName = getString(BOOTSTRAP_INDEX_CLASS_NAME);
} else {
bootstrapIndexClassName = BootstrapIndexType.valueOf(BOOTSTRAP_INDEX_TYPE.defaultValue()).getClassName();
}
return bootstrapIndexClassName;}
| 3.26 |
hudi_HoodieTableConfig_getPartitionMetafileFormat_rdh
|
/**
* Returns the format to use for partition meta files.
*/ public Option<HoodieFileFormat> getPartitionMetafileFormat() {
if (getBooleanOrDefault(PARTITION_METAFILE_USE_BASE_FORMAT)) {
return Option.of(getBaseFileFormat());
}
return Option.empty();
}
| 3.26 |
hudi_HoodieTableConfig_getArchivelogFolder_rdh
|
/**
* Get the relative path of archive log folder under metafolder, for this table.
*/
public String getArchivelogFolder() {
return getStringOrDefault(ARCHIVELOG_FOLDER);}
| 3.26 |
hudi_HoodieTableConfig_isMetadataPartitionAvailable_rdh
|
/**
* Checks if metadata table is enabled and the specified partition has been initialized.
*
* @param partition
* The partition to check
* @returns true if the specific partition has been initialized, else returns false.
*/
public boolean isMetadataPartitionAvailable(MetadataPartitionType partition) {
return getMetadataPartitions().contains(partition.getPartitionPath());}
| 3.26 |
hudi_HoodieTableConfig_m2_rdh
|
/**
*
* @returns the partition field prop.
* @deprecated please use {@link #getPartitionFields()} instead
*/
@Deprecated
public String m2() {
// NOTE: We're adding a stub returning empty string to stay compatible w/ pre-existing
// behavior until this method is fully deprecated
return Option.ofNullable(getString(PARTITION_FIELDS)).orElse("");
}
| 3.26 |
hudi_HoodieTableConfig_getRecordKeyFieldProp_rdh
|
/**
*
* @returns the record key field prop.
*/
public String getRecordKeyFieldProp() {
return getStringOrDefault(RECORDKEY_FIELDS, HoodieRecord.RECORD_KEY_METADATA_FIELD);
}
| 3.26 |
hudi_HoodieRecord_setNewLocation_rdh
|
/**
* Sets the new currentLocation of the record, after being written. This again should happen exactly-once.
*/
public void setNewLocation(HoodieRecordLocation location) {
checkState();
assert newLocation == null;
this.newLocation = location;
}
| 3.26 |
hudi_HoodieRecord_read_rdh
|
/**
* NOTE: This method is declared final to make sure there's no polymorphism and therefore
* JIT compiler could perform more aggressive optimizations
*/
@Override
public final void read(Kryo kryo, Input input) {
this.key = kryo.readObjectOrNull(input, HoodieKey.class);
this.operation = kryo.readObjectOrNull(input, HoodieOperation.class);
this.currentLocation = ((HoodieRecordLocation) (kryo.readClassAndObject(input)));
this.newLocation = ((HoodieRecordLocation) (kryo.readClassAndObject(input)));
// NOTE: Reading out actual record payload is relegated to the actual
// implementation
this.data = readRecordPayload(kryo, input);
// NOTE: We're always seal object after deserialization
this.sealed = true;
}
| 3.26 |
hudi_HoodieRecord_write_rdh
|
/**
* NOTE: This method is declared final to make sure there's no polymorphism and therefore
* JIT compiler could perform more aggressive optimizations
*/
@Override
public final void write(Kryo kryo, Output output) {
kryo.writeObjectOrNull(output, key, HoodieKey.class);kryo.writeObjectOrNull(output, operation, HoodieOperation.class);
// NOTE: We have to write actual class along with the object here,
// since [[HoodieRecordLocation]] has inheritors
kryo.writeClassAndObject(output, currentLocation);
kryo.writeClassAndObject(output, newLocation);
// NOTE: Writing out actual record payload is relegated to the actual
// implementation
writeRecordPayload(data, kryo, output);
}
| 3.26 |
hudi_HoodieRecord_clearNewLocation_rdh
|
/**
* Clears the new currentLocation of the record.
*
* This is required in the delete path so that Index can track that this record was deleted.
*/
public void clearNewLocation() {
checkState();
this.newLocation = null;
}
| 3.26 |
hudi_HoodieRecord_deflate_rdh
|
/**
* Release the actual payload, to ease memory pressure. To be called after the record has been written to storage.
* Once deflated, cannot be inflated.
*/
public void deflate() {
this.data = null;}
| 3.26 |
hudi_HoodieFlinkCopyOnWriteTable_bulkInsertPrepped_rdh
|
/**
* Bulk inserts the given prepared records into the Hoodie table, at the supplied instantTime.
*
* <p>This implementation requires that the input records are already tagged, and de-duped if needed.
*
* <p>Specifies the write handle explicitly in order to have fine-grained control with
* the underneath file.
*
* @param context
* HoodieEngineContext
* @param instantTime
* Instant Time for the action
* @param preppedRecords
* Hoodie records to bulk_insert
* @return HoodieWriteMetadata
*/
public HoodieWriteMetadata<List<WriteStatus>> bulkInsertPrepped(HoodieEngineContext context, HoodieWriteHandle<?, ?, ?, ?> writeHandle, String instantTime, List<HoodieRecord<T>> preppedRecords) {
return new FlinkBulkInsertPreppedCommitActionExecutor<>(context, writeHandle, config, this, instantTime, preppedRecords).execute();
}
| 3.26 |
hudi_HoodieFlinkCopyOnWriteTable_handleUpdate_rdh
|
// -------------------------------------------------------------------------
// Used for compaction
// -------------------------------------------------------------------------
@Override
public Iterator<List<WriteStatus>> handleUpdate(String instantTime, String partitionPath, String fileId, Map<String, HoodieRecord<T>> keyToNewRecords, HoodieBaseFile oldDataFile) throws IOException {
// these are updates
HoodieMergeHandle upsertHandle = getUpdateHandle(instantTime, partitionPath, fileId, keyToNewRecords, oldDataFile);
return handleUpdateInternal(upsertHandle, instantTime, fileId);
}
| 3.26 |
hudi_HoodieFlinkCopyOnWriteTable_deletePrepped_rdh
|
/**
* Delete the given prepared records from the Hoodie table, at the supplied instantTime.
*
* <p>This implementation requires that the input records are already tagged, and de-duped if needed.
*
* <p>Specifies the write handle explicitly in order to have fine-grained control with
* the underneath file.
*
* @param context
* {@link HoodieEngineContext}
* @param instantTime
* Instant Time for the action
* @param preppedRecords
* Hoodie records to delete
* @return {@link HoodieWriteMetadata}
*/
public HoodieWriteMetadata<List<WriteStatus>> deletePrepped(HoodieEngineContext context, HoodieWriteHandle<?, ?, ?, ?> writeHandle, String instantTime, List<HoodieRecord<T>> preppedRecords) {
return new FlinkDeletePreppedCommitActionExecutor<>(context, writeHandle, config, this, instantTime, preppedRecords).execute();
}
| 3.26 |
hudi_HoodieFlinkCopyOnWriteTable_upsert_rdh
|
/**
* Upsert a batch of new records into Hoodie table at the supplied instantTime.
*
* <p>Specifies the write handle explicitly in order to have fine-grained control with
* the underneath file.
*
* @param context
* HoodieEngineContext
* @param writeHandle
* The write handle
* @param instantTime
* Instant Time for the action
* @param records
* hoodieRecords to upsert
* @return HoodieWriteMetadata
*/
public HoodieWriteMetadata<List<WriteStatus>> upsert(HoodieEngineContext context, HoodieWriteHandle<?, ?, ?, ?> writeHandle, String instantTime, List<HoodieRecord<T>> records) {
return new FlinkUpsertCommitActionExecutor<>(context, writeHandle, config, this, instantTime, records).execute();
}
| 3.26 |
hudi_HoodieFlinkCopyOnWriteTable_delete_rdh
|
/**
* Deletes a list of {@link HoodieKey}s from the Hoodie table, at the supplied instantTime {@link HoodieKey}s will be
* de-duped and non existent keys will be removed before deleting.
*
* <p>Specifies the write handle explicitly in order to have fine-grained control with
* the underneath file.
*
* @param context
* HoodieEngineContext
* @param writeHandle
* The write handle
* @param instantTime
* Instant Time for the action
* @param keys
* {@link List} of {@link HoodieKey}s to be deleted
* @return HoodieWriteMetadata
*/
public HoodieWriteMetadata<List<WriteStatus>> delete(HoodieEngineContext context, HoodieWriteHandle<?, ?, ?, ?> writeHandle, String instantTime, List<HoodieKey> keys) {
return new FlinkDeleteCommitActionExecutor<>(context, writeHandle, config, this, instantTime, keys).execute();
}
| 3.26 |
hudi_HoodieFlinkCopyOnWriteTable_upsertPrepped_rdh
|
/**
* Upserts the given prepared records into the Hoodie table, at the supplied instantTime.
*
* <p>This implementation requires that the input records are already tagged, and de-duped if needed.
*
* <p>Specifies the write handle explicitly in order to have fine-grained control with
* the underneath file.
*
* @param context
* HoodieEngineContext
* @param instantTime
* Instant Time for the action
* @param preppedRecords
* Hoodie records to upsert
* @return HoodieWriteMetadata
*/
public HoodieWriteMetadata<List<WriteStatus>> upsertPrepped(HoodieEngineContext context, HoodieWriteHandle<?, ?, ?, ?> writeHandle, String instantTime, List<HoodieRecord<T>> preppedRecords) {
return new FlinkUpsertPreppedCommitActionExecutor<>(context, writeHandle, config, this, instantTime, preppedRecords).execute();
}
| 3.26 |
hudi_HoodieFlinkCopyOnWriteTable_insert_rdh
|
/**
* Insert a batch of new records into Hoodie table at the supplied instantTime.
*
* <p>Specifies the write handle explicitly in order to have fine-grained control with
* the underneath file.
*
* @param context
* HoodieEngineContext
* @param writeHandle
* The write handle
* @param instantTime
* Instant Time for the action
* @param records
* hoodieRecords to upsert
* @return HoodieWriteMetadata
*/
public HoodieWriteMetadata<List<WriteStatus>> insert(HoodieEngineContext context, HoodieWriteHandle<?, ?, ?, ?> writeHandle, String instantTime, List<HoodieRecord<T>> records) {
return new FlinkInsertCommitActionExecutor<>(context, writeHandle, config, this, instantTime, records).execute();
}
| 3.26 |
hudi_HoodieFlinkCopyOnWriteTable_insertPrepped_rdh
|
/**
* Inserts the given prepared records into the Hoodie table, at the supplied instantTime.
*
* <p>This implementation requires that the input records are already tagged, and de-duped if needed.
*
* <p>Specifies the write handle explicitly in order to have fine-grained control with
* the underneath file.
*
* @param context
* HoodieEngineContext
* @param instantTime
* Instant Time for the action
* @param preppedRecords
* Hoodie records to insert
* @return HoodieWriteMetadata
*/
public HoodieWriteMetadata<List<WriteStatus>> insertPrepped(HoodieEngineContext context,
HoodieWriteHandle<?, ?, ?, ?> writeHandle, String instantTime, List<HoodieRecord<T>> preppedRecords) {
return new FlinkInsertPreppedCommitActionExecutor<>(context, writeHandle, config, this, instantTime, preppedRecords).execute();
}
| 3.26 |
hudi_HoodieFlinkCopyOnWriteTable_scheduleCleaning_rdh
|
/**
*
* @param context
* HoodieEngineContext
* @param instantTime
* Instant Time for scheduling cleaning
* @param extraMetadata
* additional metadata to write into plan
* @return */
@Override
public Option<HoodieCleanerPlan> scheduleCleaning(HoodieEngineContext context, String instantTime, Option<Map<String, String>> extraMetadata) {
return new
CleanPlanActionExecutor(context, config, this, instantTime, extraMetadata).execute();
}
| 3.26 |
hudi_SparkUtil_initLauncher_rdh
|
/**
* TODO: Need to fix a bunch of hardcoded stuff here eg: history server, spark distro.
*/public static SparkLauncher initLauncher(String propertiesFile) throws URISyntaxException {
String currentJar = new File(SparkUtil.class.getProtectionDomain().getCodeSource().getLocation().toURI().getPath()).getAbsolutePath();
Map<String, String> env = SparkEnvCommand.env;
SparkLauncher sparkLauncher = new SparkLauncher(env).setAppResource(currentJar).setMainClass(SparkMain.class.getName());
if (!StringUtils.isNullOrEmpty(propertiesFile)) {
sparkLauncher.setPropertiesFile(propertiesFile);
}
File libDirectory = new File(new File(currentJar).getParent(), "lib");
if (libDirectory.exists()) {// When directly using hudi-cli module, the jars under the lib directory
// generated by the compilation is required
Arrays.stream(libDirectory.list()).forEach(library -> sparkLauncher.addJar(new File(libDirectory, library).getAbsolutePath()));
} else {
// When using hudi-cli-bundle, we also need to add the hudi-spark*-bundle
// so that the Hudi Spark job can be launched
String sparkBundleJarPath = System.getenv("SPARK_BUNDLE_JAR");
if (!StringUtils.isNullOrEmpty(sparkBundleJarPath)) {
sparkLauncher.addJar(sparkBundleJarPath);
}
}
return sparkLauncher;
}
| 3.26 |
hudi_SparkUtil_getDefaultConf_rdh
|
/**
* Get the default spark configuration.
*
* @param appName
* - Spark application name
* @param sparkMaster
* - Spark master node name
* @return Spark configuration
*/
public static SparkConf getDefaultConf(final String appName, final Option<String> sparkMaster) {
final Properties properties = System.getProperties();
SparkConf sparkConf = new SparkConf().setAppName(appName);
// Configure the sparkMaster
String sparkMasterNode = DEFAULT_SPARK_MASTER;
if (properties.getProperty(HoodieCliSparkConfig.CLI_SPARK_MASTER) != null) {
sparkMasterNode = properties.getProperty(HoodieCliSparkConfig.CLI_SPARK_MASTER);
}
if (sparkMaster.isPresent() && (!sparkMaster.get().trim().isEmpty())) {
sparkMasterNode = sparkMaster.orElse(sparkMasterNode);
}
sparkConf.setMaster(sparkMasterNode);
// Configure driver
sparkConf.set(HoodieCliSparkConfig.CLI_DRIVER_MAX_RESULT_SIZE, "2g");
sparkConf.set(HoodieCliSparkConfig.CLI_EVENT_LOG_OVERWRITE, "true");
sparkConf.set(HoodieCliSparkConfig.CLI_EVENT_LOG_ENABLED, "false");
sparkConf.set(HoodieCliSparkConfig.CLI_SERIALIZER, "org.apache.spark.serializer.KryoSerializer"); sparkConf.set("spark.kryo.registrator", "org.apache.spark.HoodieSparkKryoRegistrar");
// Configure hadoop conf
sparkConf.set(HoodieCliSparkConfig.CLI_MAPRED_OUTPUT_COMPRESS, "true");
sparkConf.set(HoodieCliSparkConfig.CLI_MAPRED_OUTPUT_COMPRESSION_CODEC, "true");
sparkConf.set(HoodieCliSparkConfig.CLI_MAPRED_OUTPUT_COMPRESSION_CODEC, "org.apache.hadoop.io.compress.GzipCodec");
sparkConf.set(HoodieCliSparkConfig.CLI_MAPRED_OUTPUT_COMPRESSION_TYPE, "BLOCK");
return sparkConf;
}
| 3.26 |
hudi_HoodieWrapperFileSystem_createImmutableFileInPath_rdh
|
/**
* Creates a new file with overwrite set to false. This ensures files are created
* only once and never rewritten, also, here we take care if the content is not
* empty, will first write the content to a temp file if {needCreateTempFile} is
* true, and then rename it back after the content is written.
*
* @param fullPath
* File Path
* @param content
* Content to be stored
*/
public void createImmutableFileInPath(Path fullPath, Option<byte[]> content) throws HoodieIOException {
FSDataOutputStream fsout = null;
Path tmpPath = null;
boolean needTempFile = needCreateTempFile();
try {
if (!content.isPresent()) {
fsout = fileSystem.create(fullPath, false);
}
if (content.isPresent() && needTempFile) {
Path parent = fullPath.getParent();
tmpPath =
new Path(parent, fullPath.getName() + TMP_PATH_POSTFIX);
fsout = fileSystem.create(tmpPath, false);
fsout.write(content.get());}
if (content.isPresent() && (!needTempFile)) {
fsout = fileSystem.create(fullPath, false);
fsout.write(content.get());
}
} catch (IOException e) {
String errorMsg = "Failed to create file " + (tmpPath != null ?
tmpPath : fullPath);throw new HoodieIOException(errorMsg, e);
} finally {
try {
if (null != fsout) {
fsout.close();
}
} catch (IOException e) {
String errorMsg = "Failed to close file " + (needTempFile ? tmpPath : fullPath);
throw new HoodieIOException(errorMsg, e);
}
boolean renameSuccess = false;
try {
if (null !=
tmpPath) {
renameSuccess = fileSystem.rename(tmpPath, fullPath);
}
} catch (IOException e) {
throw new HoodieIOException((("Failed to rename " + tmpPath) + " to the target ") + fullPath, e);
} finally {
if ((!renameSuccess) && (null != tmpPath)) {try {
fileSystem.delete(tmpPath, false);
LOG.warn((((("Fail to rename " + tmpPath) + " to ") + fullPath)
+ ", target file exists: ") + fileSystem.exists(fullPath));
} catch (IOException e) {
throw new HoodieIOException("Failed to delete tmp file " + tmpPath, e);
}
}
}
}
}
| 3.26 |
hudi_HoodieColumnProjectionUtils_supportTimestamp_rdh
|
/**
* If schema contains timestamp columns, this method is used for compatibility when there is no timestamp fields.
*
* <p>We expect to use parquet-avro reader {@link org.apache.hudi.hadoop.avro.HoodieAvroParquetReader} to read
* timestamp column when read columns contain timestamp type.
*/
public static boolean supportTimestamp(Configuration conf) {
List<String> readCols = Arrays.asList(getReadColumnNames(conf));
if (readCols.isEmpty()) {
return false;
}
String colTypes = conf.get(IOConstants.COLUMNS_TYPES, "");
if ((colTypes == null) || colTypes.isEmpty()) {
return false;
}
ArrayList<TypeInfo> types = TypeInfoUtils.getTypeInfosFromTypeString(colTypes);
List<String> names = getIOColumns(conf);
return IntStream.range(0, names.size()).filter(i -> readCols.contains(names.get(i))).anyMatch(i
-> typeContainsTimestamp(types.get(i)));
}
| 3.26 |
hudi_HoodieColumnProjectionUtils_getReadColumnIDs_rdh
|
/**
* Returns an array of column ids(start from zero) which is set in the given
* parameter <tt>conf</tt>.
*/
public static List<Integer> getReadColumnIDs(Configuration conf) {
String skips = conf.get(READ_COLUMN_IDS_CONF_STR, READ_COLUMN_IDS_CONF_STR_DEFAULT);
String[] list = StringUtils.split(skips);
List<Integer> v2 = new ArrayList<Integer>(list.length);
for (String element : list) {
// it may contain duplicates, remove duplicates
Integer toAdd = Integer.parseInt(element);
if (!v2.contains(toAdd)) {
v2.add(toAdd);
}
// NOTE: some code uses this list to correlate with column names, and yet these lists may
// contain duplicates, which this call will remove and the other won't. As far as I can
// tell, no code will actually use these two methods together; all is good if the code
// gets the ID list without relying on this method. Or maybe it just works by magic.
}
return v2;
}
| 3.26 |
hudi_HoodieDeltaWriteStat_setRecordsStats_rdh
|
// keep for serialization efficiency
public void setRecordsStats(Map<String, HoodieColumnRangeMetadata<Comparable>> stats) {
recordsStats = Option.of(stats);
}
| 3.26 |
hudi_MetadataCommand_setMetadataBaseDirectory_rdh
|
/**
* Sets the directory to store/read Metadata Table.
* <p>
* This can be used to store the metadata table away from the dataset directory.
* - Useful for testing as well as for using via the HUDI CLI so that the actual dataset is not written to.
* - Useful for testing Metadata Table performance and operations on existing datasets before enabling.
*/
public static void setMetadataBaseDirectory(String metadataDir) {
ValidationUtils.checkState(metadataBaseDirectory == null, "metadataBaseDirectory is already set to " + metadataBaseDirectory);
metadataBaseDirectory = metadataDir; }
| 3.26 |
hudi_BaseHoodieTableServiceClient_scheduleClusteringAtInstant_rdh
|
/**
* Schedules a new clustering instant with passed-in instant time.
*
* @param instantTime
* clustering Instant Time
* @param extraMetadata
* Extra Metadata to be stored
*/
public boolean scheduleClusteringAtInstant(String instantTime, Option<Map<String, String>> extraMetadata) throws HoodieIOException {
return scheduleTableService(instantTime, extraMetadata, TableServiceType.CLUSTER).isPresent();
}
| 3.26 |
hudi_BaseHoodieTableServiceClient_inlineScheduleCompaction_rdh
|
/**
* Schedules compaction inline.
*
* @param extraMetadata
* extra metadata to be used.
* @return compaction instant if scheduled.
*/
protected Option<String> inlineScheduleCompaction(Option<Map<String, String>> extraMetadata) {
return scheduleCompaction(extraMetadata);
}
| 3.26 |
hudi_BaseHoodieTableServiceClient_getInflightTimelineExcludeCompactionAndClustering_rdh
|
/**
* Get inflight timeline excluding compaction and clustering.
*
* @param metaClient
* @return */
private HoodieTimeline getInflightTimelineExcludeCompactionAndClustering(HoodieTableMetaClient metaClient)
{
HoodieTimeline inflightTimelineWithReplaceCommit = metaClient.getCommitsTimeline().filterPendingExcludingCompaction();
HoodieTimeline inflightTimelineExcludeClusteringCommit = inflightTimelineWithReplaceCommit.filter(instant -> {if (instant.getAction().equals(HoodieTimeline.REPLACE_COMMIT_ACTION)) {Option<Pair<HoodieInstant, HoodieClusteringPlan>> instantPlan = ClusteringUtils.getClusteringPlan(metaClient, instant);
return !instantPlan.isPresent();
} else {
return true;
}
});return inflightTimelineExcludeClusteringCommit;
}
| 3.26 |
hudi_BaseHoodieTableServiceClient_clean_rdh
|
/**
* Clean up any stale/old files/data lying around (either on file storage or index storage) based on the
* configurations and CleaningPolicy used. (typically files that no longer can be used by a running query can be
* cleaned). This API provides the flexibility to schedule clean instant asynchronously via
* {@link BaseHoodieTableServiceClient#scheduleTableService(String, Option, TableServiceType)} and disable inline scheduling
* of clean.
*
* @param cleanInstantTime
* instant time for clean.
* @param scheduleInline
* true if needs to be scheduled inline. false otherwise.
*/
@Nullable
public HoodieCleanMetadata clean(String cleanInstantTime, boolean scheduleInline) throws HoodieIOException {
if (!tableServicesEnabled(config)) {
return null;
}
final Timer.Context timerContext = metrics.getCleanCtx();
CleanerUtils.rollbackFailedWrites(config.getFailedWritesCleanPolicy(), HoodieTimeline.CLEAN_ACTION, () -> rollbackFailedWrites());
HoodieTable table = createTable(config, hadoopConf);
if (config.allowMultipleCleans() || (!table.getActiveTimeline().getCleanerTimeline().filterInflightsAndRequested().firstInstant().isPresent())) {
LOG.info("Cleaner started");
// proceed only if multiple clean schedules are enabled or if there are no pending cleans.
if (scheduleInline) {
scheduleTableServiceInternal(cleanInstantTime, Option.empty(), TableServiceType.CLEAN);
table.getMetaClient().reloadActiveTimeline();
}
if (shouldDelegateToTableServiceManager(config, ActionType.clean)) {
LOG.warn("Cleaning is not yet supported with Table Service Manager.");
return null;
}
}
// Proceeds to execute any requested or inflight clean instances in the timeline
HoodieCleanMetadata metadata = table.clean(context, cleanInstantTime);
if ((timerContext != null) && (metadata != null)) {
long durationMs = metrics.getDurationInMs(timerContext.stop());
metrics.updateCleanMetrics(durationMs, metadata.getTotalFilesDeleted());
LOG.info(((((("Cleaned " + metadata.getTotalFilesDeleted()) + " files") + " Earliest Retained Instant :") + metadata.getEarliestCommitToRetain()) + " cleanerElapsedMs") + durationMs);
}
return metadata;
}
| 3.26 |
hudi_BaseHoodieTableServiceClient_completeLogCompaction_rdh
|
/**
* Commit Log Compaction and track metrics.
*/
protected void completeLogCompaction(HoodieCommitMetadata metadata, HoodieTable table, String logCompactionCommitTime) {
this.context.setJobStatus(this.getClass().getSimpleName(), "Collect log compaction write status and commit compaction");
List<HoodieWriteStat> writeStats = metadata.getWriteStats();
handleWriteErrors(writeStats, TableServiceType.LOG_COMPACT);
final HoodieInstant logCompactionInstant = new HoodieInstant(State.INFLIGHT, HoodieTimeline.LOG_COMPACTION_ACTION, logCompactionCommitTime);
try {
this.txnManager.beginTransaction(Option.of(logCompactionInstant), Option.empty());
preCommit(metadata);
finalizeWrite(table, logCompactionCommitTime, writeStats);
// commit to data table after committing to metadata table.
writeTableMetadata(table, logCompactionCommitTime, metadata, context.emptyHoodieData());
LOG.info((("Committing Log Compaction " + logCompactionCommitTime) + ". Finished with result ") + metadata);
CompactHelpers.getInstance().completeInflightLogCompaction(table, logCompactionCommitTime, metadata);
}
finally {
this.txnManager.endTransaction(Option.of(logCompactionInstant));
}
WriteMarkersFactory.get(config.getMarkersType(), table, logCompactionCommitTime).quietDeleteMarkerDir(context, config.getMarkersDeleteParallelism());
if (logCompactionTimer != null) {
long durationInMs = metrics.getDurationInMs(logCompactionTimer.stop());
HoodieActiveTimeline.parseDateFromInstantTimeSafely(logCompactionCommitTime).ifPresent(parsedInstant
-> metrics.updateCommitMetrics(parsedInstant.getTime(), durationInMs, metadata, HoodieActiveTimeline.LOG_COMPACTION_ACTION));
}
LOG.info("Log Compacted successfully on commit " + logCompactionCommitTime);
}
| 3.26 |
hudi_BaseHoodieTableServiceClient_archive_rdh
|
/**
* Trigger archival for the table. This ensures that the number of commits do not explode
* and keep increasing unbounded over time.
*
* @param table
* table to commit on.
*/
protected void archive(HoodieTable table) {
if (!tableServicesEnabled(config)) {
return;
}
try {
final Timer.Context timerContext = metrics.getArchiveCtx();
// We cannot have unbounded commit files. Archive commits if we have to archive
HoodieTimelineArchiver archiver = new HoodieTimelineArchiver(config,
table); int instantsToArchive = archiver.archiveIfRequired(context, true);
if (timerContext != null) {
long durationMs = metrics.getDurationInMs(timerContext.stop());
this.metrics.updateArchiveMetrics(durationMs, instantsToArchive);
}
} catch (IOException ioe) {
throw new HoodieIOException("Failed to archive", ioe);
}
}
| 3.26 |
hudi_BaseHoodieTableServiceClient_scheduleCompaction_rdh
|
/**
* Schedules a new compaction instant.
*
* @param extraMetadata
* Extra Metadata to be stored
*/
public Option<String>
scheduleCompaction(Option<Map<String,
String>> extraMetadata) throws HoodieIOException {
String instantTime = createNewInstantTime();
return scheduleCompactionAtInstant(instantTime, extraMetadata) ? Option.of(instantTime) : Option.empty();
}
| 3.26 |
hudi_BaseHoodieTableServiceClient_completeCompaction_rdh
|
/**
* Commit Compaction and track metrics.
*/
protected void completeCompaction(HoodieCommitMetadata metadata, HoodieTable table, String compactionCommitTime) {
this.context.setJobStatus(this.getClass().getSimpleName(), "Collect compaction write status and commit compaction: " + config.getTableName());
List<HoodieWriteStat> writeStats = metadata.getWriteStats();
handleWriteErrors(writeStats, TableServiceType.COMPACT);
final HoodieInstant
compactionInstant = HoodieTimeline.getCompactionInflightInstant(compactionCommitTime);
try {
this.txnManager.beginTransaction(Option.of(compactionInstant), Option.empty());
finalizeWrite(table, compactionCommitTime, writeStats);
// commit to data table after committing to metadata table.
writeTableMetadata(table, compactionCommitTime, metadata, context.emptyHoodieData());
LOG.info((("Committing Compaction " + compactionCommitTime) + ". Finished with result ") + metadata);
CompactHelpers.getInstance().completeInflightCompaction(table, compactionCommitTime, metadata);
} finally {
this.txnManager.endTransaction(Option.of(compactionInstant));
}
WriteMarkersFactory.get(config.getMarkersType(), table, compactionCommitTime).quietDeleteMarkerDir(context, config.getMarkersDeleteParallelism());
if (compactionTimer != null) {
long v18 = metrics.getDurationInMs(compactionTimer.stop());
HoodieActiveTimeline.parseDateFromInstantTimeSafely(compactionCommitTime).ifPresent(parsedInstant -> metrics.updateCommitMetrics(parsedInstant.getTime(), v18, metadata, COMPACTION_ACTION));
}
LOG.info("Compacted successfully on commit " + compactionCommitTime);
}
| 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.