name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hudi_HoodieRepairTool_copyFiles_rdh
|
/**
* Copies the list of files from source base path to destination base path.
* The destination file path (base + relative) should not already exist.
*
* @param context
* {@link HoodieEngineContext} instance.
* @param relativeFilePaths
* A {@link List} of relative file paths for copying.
* @param sourceBasePath
* Source base path.
* @param destBasePath
* Destination base path.
* @return {@code true} if all successful; {@code false} otherwise.
*/
static boolean copyFiles(HoodieEngineContext context, List<String> relativeFilePaths, String sourceBasePath, String destBasePath) {
SerializableConfiguration conf = context.getHadoopConf();
List<Boolean> allResults = context.parallelize(relativeFilePaths).mapPartitions(iterator -> {
List<Boolean> results = new ArrayList<>();
FileSystem fs = FSUtils.getFs(destBasePath, conf.get());
iterator.forEachRemaining(filePath -> {
boolean success = false;
Path v11 = new Path(sourceBasePath, filePath);
Path destPath = new Path(destBasePath, filePath);
try {
if (!fs.exists(destPath)) {
FileIOUtils.copy(fs, v11, destPath);
success = true;
}
} catch (IOException e) {
// Copy Fail
f0.error(String.format("Copying file fails: source [%s], destination [%s]", v11, destPath));
} finally {
results.add(success);
}
});
return results.iterator();
}, true).collectAsList();
return allResults.stream().reduce((r1, r2) -> r1 && r2).orElse(false);
}
/**
* Lists all Hoodie files from the table base path.
*
* @param context
* {@link HoodieEngineContext}
| 3.26 |
hudi_HoodieRepairTool_printRepairInfo_rdh
|
/**
* Prints the repair info.
*
* @param instantTimesToRepair
* A list instant times in consideration for repair
* @param instantsWithDanglingFiles
* A list of instants with dangling files.
*/
private void printRepairInfo(List<String> instantTimesToRepair, List<ImmutablePair<String, List<String>>> instantsWithDanglingFiles) {
int numInstantsToRepair = instantsWithDanglingFiles.size();
f0.warn("Number of instants verified based on the base and log files: " + instantTimesToRepair.size());
f0.warn("Instant timestamps: " + instantTimesToRepair);
f0.warn("Number of instants to repair: " + numInstantsToRepair);
if (numInstantsToRepair > 0) {
instantsWithDanglingFiles.forEach(e -> f0.warn(" ** Removing files: " +
e.getValue()));
}
}
| 3.26 |
hudi_HoodieRepairTool_doRepair_rdh
|
/**
* Does repair, either in REPAIR or DRY_RUN mode.
*
* @param startingInstantOption
* {@link Option} of starting instant for scanning, can be empty.
* @param endingInstantOption
* {@link Option} of ending instant for scanning, can be empty.
* @param isDryRun
* Is dry run.
* @throws IOException
* upon errors.
*/
boolean doRepair(Option<String> startingInstantOption, Option<String> endingInstantOption, boolean isDryRun) throws IOException {
// Scans all partitions to find base and log files in the base path
List<Path> allFilesInPartitions = HoodieDataTableUtils.getBaseAndLogFilePathsFromFileSystem(tableMetadata, f1.basePath);
// Buckets the files based on instant time
// instant time -> relative paths of base and log files to base path
Map<String, List<String>> instantToFilesMap = RepairUtils.tagInstantsOfBaseAndLogFiles(metaClient.getBasePath(), allFilesInPartitions);
List<String> instantTimesToRepair = instantToFilesMap.keySet().stream().filter(instant -> ((!startingInstantOption.isPresent()) || (instant.compareTo(startingInstantOption.get()) >= 0)) && ((!endingInstantOption.isPresent()) || (instant.compareTo(endingInstantOption.get()) <= 0))).collect(Collectors.toList());
HoodieActiveTimeline activeTimeline = metaClient.getActiveTimeline();
HoodieArchivedTimeline archivedTimeline = metaClient.getArchivedTimeline();
// This assumes that the archived timeline only has completed instants so this is safe
archivedTimeline.loadCompletedInstantDetailsInMemory();
List<ImmutablePair<String, List<String>>> instantFilesToRemove = context.parallelize(instantTimesToRepair).map(instantToRepair -> new ImmutablePair<>(instantToRepair, RepairUtils.findInstantFilesToRemove(instantToRepair, instantToFilesMap.get(instantToRepair), activeTimeline, archivedTimeline))).collectAsList();
List<ImmutablePair<String, List<String>>> instantsWithDanglingFiles = instantFilesToRemove.stream().filter(e -> !e.getValue().isEmpty()).collect(Collectors.toList());
printRepairInfo(instantTimesToRepair, instantsWithDanglingFiles);
if (!isDryRun) {
List<String> relativeFilePathsToDelete = instantsWithDanglingFiles.stream().flatMap(e -> e.getValue().stream()).collect(Collectors.toList());
if (relativeFilePathsToDelete.size() > 0) {
if (!backupFiles(relativeFilePathsToDelete)) {
f0.error("Error backing up dangling files. Exiting...");
return false;
}
return deleteFiles(context, f1.basePath, relativeFilePathsToDelete);
}f0.info(String.format("Table repair on %s is successful", f1.basePath));
}
return true;
}
| 3.26 |
hudi_HoodieRepairTool_m0_rdh
|
/**
* Reads config from the file system.
*
* @param jsc
* {@link JavaSparkContext} instance.
* @param cfg
* {@link Config} instance.
* @return the {@link TypedProperties} instance.
*/
private TypedProperties m0(JavaSparkContext jsc, Config cfg) {
return UtilHelpers.readConfig(jsc.hadoopConfiguration(), new Path(cfg.propsFilePath), cfg.configs).getProps(true);
}
| 3.26 |
hudi_HoodieRepairTool_checkBackupPathAgainstBasePath_rdh
|
/**
* Verifies the backup path against table base path.
* If the backup path is within the table base path, throws an error.
*
* @return {@code 0} if successful; {@code -1} otherwise.
*/
int checkBackupPathAgainstBasePath() {if (f1.backupPath == null) {
f0.error("Backup path is not configured");
return
-1;
}
if (f1.backupPath.contains(f1.basePath)) {
f0.error(String.format("Cannot use backup path %s: it resides in the base path %s",
f1.backupPath,
f1.basePath));
return -1;
}
return 0;
}
| 3.26 |
hudi_HoodieRepairTool_restoreFiles_rdh
|
/**
* Restores dangling files from backup path to table base path.
*
* @param relativeFilePaths
* A {@link List} of relative file paths for restoring.
* @return {@code true} if all successful; {@code false} otherwise.
*/
boolean restoreFiles(List<String> relativeFilePaths) {return copyFiles(context, relativeFilePaths, f1.backupPath, f1.basePath);
}
| 3.26 |
hudi_HoodieRepairTool_backupFiles_rdh
|
/**
* Backs up dangling files from table base path to backup path.
*
* @param relativeFilePaths
* A {@link List} of relative file paths for backup.
* @return {@code true} if all successful; {@code false} otherwise.
*/
boolean backupFiles(List<String> relativeFilePaths) {
return copyFiles(context, relativeFilePaths, f1.basePath, f1.backupPath);
}
| 3.26 |
hudi_HoodieRepairTool_checkBackupPathForRepair_rdh
|
/**
* Verifies the backup path for repair.
* If there is no backup path configured, creates a new one in temp folder.
* If the backup path already has files, throws an error to the user.
* If the backup path is within the table base path, throws an error too.
*
* @return {@code 0} if successful; {@code -1} otherwise.
* @throws IOException
* upon errors.
*/
int checkBackupPathForRepair() throws IOException {
if (f1.backupPath == null) {
SecureRandom random = new SecureRandom();
long randomLong = random.nextLong();f1.backupPath = ("/tmp/" + BACKUP_DIR_PREFIX) + randomLong;
}
Path backupPath = new Path(f1.backupPath);
if (metaClient.getFs().exists(backupPath) && (metaClient.getFs().listStatus(backupPath).length > 0)) {
f0.error(String.format("Cannot use backup path %s: it is not empty", f1.backupPath));return -1;
}
return checkBackupPathAgainstBasePath();
}
| 3.26 |
hudi_HoodiePartitionMetadata_hasPartitionMetadata_rdh
|
// methods related to partition meta data
public static boolean hasPartitionMetadata(FileSystem fs, Path partitionPath) {
try {
return textFormatMetaPathIfExists(fs, partitionPath).isPresent() ||
baseFormatMetaPathIfExists(fs, partitionPath).isPresent();
} catch (IOException
ioe) {
throw new HoodieIOException("Error checking presence of partition meta file for " + partitionPath, ioe);
}
}
| 3.26 |
hudi_HoodiePartitionMetadata_readPartitionCreatedCommitTime_rdh
|
/**
* Read out the COMMIT_TIME_KEY metadata for this partition.
*/
public Option<String> readPartitionCreatedCommitTime() {
try {
if (!props.containsKey(COMMIT_TIME_KEY)) {
readFromFS();
}
return Option.of(props.getProperty(COMMIT_TIME_KEY));
}
catch (IOException ioe) {
LOG.warn("Error fetch Hoodie partition metadata for " + partitionPath, ioe);
return Option.empty();
}
}
| 3.26 |
hudi_HoodiePartitionMetadata_writeMetafile_rdh
|
/**
* Write the partition metadata in the correct format in the given file path.
*
* @param filePath
* Path of the file to write
* @throws IOException
*/
private void writeMetafile(Path filePath)
throws IOException {
if (format.isPresent())
{
Schema schema = HoodieAvroUtils.getRecordKeySchema();
switch (format.get()) {
case PARQUET :
// Since we are only interested in saving metadata to the footer, the schema, blocksizes and other
// parameters are not important.
MessageType type = Types.buildMessage().optional(PrimitiveTypeName.INT64).named("dummyint").named("dummy");
HoodieAvroWriteSupport writeSupport = new HoodieAvroWriteSupport(type, schema, Option.empty(), new Properties());
try (ParquetWriter writer = new ParquetWriter(filePath, writeSupport, CompressionCodecName.UNCOMPRESSED, 1024, 1024)) {
for
(String key : props.stringPropertyNames()) {
writeSupport.addFooterMetadata(key, props.getProperty(key));
}
}
break;
case ORC :
// Since we are only interested in saving metadata to the footer, the schema, blocksizes and other
// parameters are not important.
OrcFile.WriterOptions writerOptions = OrcFile.writerOptions(fs.getConf()).fileSystem(fs).setSchema(AvroOrcUtils.createOrcSchema(schema));
try (Writer writer = OrcFile.createWriter(filePath, writerOptions)) {
for (String key : props.stringPropertyNames())
{
writer.addUserMetadata(key, ByteBuffer.wrap(getUTF8Bytes(props.getProperty(key))));}
}
break;
default :
throw new HoodieException("Unsupported format for partition metafiles: " + format.get());
}
} else {
// Backwards compatible properties file format
FSDataOutputStream os = fs.create(filePath, true);
props.store(os, "partition metadata");
os.hsync();
os.hflush();
os.close();
}
}
| 3.26 |
hudi_HoodiePartitionMetadata_readFromFS_rdh
|
/**
* Read out the metadata for this partition.
*/
public void readFromFS() throws IOException {
// first try reading the text format (legacy, currently widespread)
boolean readFile = readTextFormatMetaFile();
if (!readFile) {
// now try reading the base file formats.
readFile = readBaseFormatMetaFile();
}// throw exception.
if (!readFile) {
throw
new HoodieException("Unable to read any partition meta file to locate the table timeline.");
}
}
| 3.26 |
hudi_HoodiePartitionMetadata_getPartitionMetafilePath_rdh
|
/**
* Returns the name of the partition metadata.
*
* @return Name of the partition metafile or empty option
*/
public static Option<Path> getPartitionMetafilePath(FileSystem fs, Path partitionPath) {
// The partition listing is a costly operation so instead we are searching for existence of the files instead.
// This is in expected order as properties file based partition metafiles should be the most common.
try {
Option<Path> textFormatPath = textFormatMetaPathIfExists(fs, partitionPath);
if (textFormatPath.isPresent()) {return textFormatPath;} else {
return baseFormatMetaPathIfExists(fs, partitionPath);
}} catch (IOException ioe) {
throw new HoodieException("Error checking Hoodie partition metadata for " + partitionPath, ioe);
}
}
| 3.26 |
hudi_JavaUpsertPartitioner_getSmallFiles_rdh
|
/**
* Returns a list of small files in the given partition path.
*/protected List<SmallFile> getSmallFiles(String partitionPath) {
// smallFiles only for partitionPath
List<SmallFile> v29 = new ArrayList<>();
HoodieTimeline commitTimeline = table.getMetaClient().getCommitsTimeline().filterCompletedInstants();
if (!commitTimeline.empty()) {
// if we have some commits
HoodieInstant latestCommitTime = commitTimeline.lastInstant().get();
List<HoodieBaseFile> allFiles = table.getBaseFileOnlyView().getLatestBaseFilesBeforeOrOn(partitionPath, latestCommitTime.getTimestamp()).collect(Collectors.toList());
for (HoodieBaseFile file
: allFiles) {
if (file.getFileSize() < config.getParquetSmallFileLimit()) {
SmallFile sf = new SmallFile();
sf.location = new HoodieRecordLocation(file.getCommitTime(), file.getFileId());
sf.sizeBytes =
file.getFileSize();
v29.add(sf);
}
}
}
return v29;
}
| 3.26 |
hudi_JavaUpsertPartitioner_averageBytesPerRecord_rdh
|
/**
* Obtains the average record size based on records written during previous commits. Used for estimating how many
* records pack into one file.
*/
protected static long averageBytesPerRecord(HoodieTimeline commitTimeline, HoodieWriteConfig hoodieWriteConfig) {
long avgSize = hoodieWriteConfig.getCopyOnWriteRecordSizeEstimate();
long
fileSizeThreshold = ((long) (hoodieWriteConfig.getRecordSizeEstimationThreshold() * hoodieWriteConfig.getParquetSmallFileLimit()));try {if (!commitTimeline.empty()) {
// Go over the reverse ordered commits to get a more recent estimate of average record size.
Iterator<HoodieInstant> instants = commitTimeline.getReverseOrderedInstants().iterator();
while (instants.hasNext()) {
HoodieInstant instant = instants.next();
HoodieCommitMetadata commitMetadata = HoodieCommitMetadata.fromBytes(commitTimeline.getInstantDetails(instant).get(), HoodieCommitMetadata.class);
long totalBytesWritten
= commitMetadata.fetchTotalBytesWritten();
long totalRecordsWritten = commitMetadata.fetchTotalRecordsWritten();
if ((totalBytesWritten > fileSizeThreshold) && (totalRecordsWritten > 0)) {
avgSize = ((long) (Math.ceil((1.0 * totalBytesWritten) / totalRecordsWritten)));
break;
}
}
}
} catch (Throwable t) {
// make this fail safe.
LOG.error("Error trying to compute average bytes/record ", t);
} return avgSize;
}
| 3.26 |
hudi_HoodieInternalRowFileWriterFactory_getInternalRowFileWriter_rdh
|
/**
* Factory method to assist in instantiating an instance of {@link HoodieInternalRowFileWriter}.
*
* @param path
* path of the RowFileWriter.
* @param hoodieTable
* instance of {@link HoodieTable} in use.
* @param writeConfig
* instance of {@link HoodieWriteConfig} to use.
* @param schema
* schema of the dataset in use.
* @return the instantiated {@link HoodieInternalRowFileWriter}.
* @throws IOException
* if format is not supported or if any exception during instantiating the RowFileWriter.
*/
public static HoodieInternalRowFileWriter getInternalRowFileWriter(Path path, HoodieTable hoodieTable, HoodieWriteConfig writeConfig, StructType schema) throws IOException {
final String extension = FSUtils.getFileExtension(path.getName());
if (PARQUET.getFileExtension().equals(extension)) {
return newParquetInternalRowFileWriter(path, hoodieTable, writeConfig, schema, tryInstantiateBloomFilter(writeConfig));
}throw new UnsupportedOperationException(extension + " format not supported yet.");
}
| 3.26 |
hudi_AWSDmsAvroPayload_handleDeleteOperation_rdh
|
/**
* Handle a possible delete - check for "D" in Op column and return empty row if found.
*
* @param insertValue
* The new row that is being "inserted".
*/
private Option<IndexedRecord> handleDeleteOperation(IndexedRecord insertValue)
throws IOException {
boolean delete = false;
if (insertValue instanceof GenericRecord) {
GenericRecord
record = ((GenericRecord) (insertValue));
delete = isDMSDeleteRecord(record);
}
return delete ? Option.empty() : Option.of(insertValue);
}
| 3.26 |
hudi_InternalDynamicBloomFilter_write_rdh
|
// Writable
@Override
public void write(DataOutput out) throws IOException {
super.write(out);
out.writeInt(nr);
out.writeInt(currentNbRecord);
out.writeInt(matrix.length);
for (InternalBloomFilter bloomFilter : matrix) {bloomFilter.write(out);
}
}
| 3.26 |
hudi_InternalDynamicBloomFilter_addRow_rdh
|
/**
* Adds a new row to <i>this</i> dynamic Bloom filter.
*/
private void
addRow() {
InternalBloomFilter[] tmp = new
InternalBloomFilter[matrix.length + 1];
System.arraycopy(matrix, 0, tmp, 0, matrix.length);
tmp[tmp.length - 1] = new InternalBloomFilter(vectorSize, nbHash, hashType);
matrix = tmp;
}
| 3.26 |
hudi_CompactionCommitEvent_setInstant_rdh
|
// -------------------------------------------------------------------------
// Getter/Setter
// -------------------------------------------------------------------------
public void setInstant(String instant) {
this.instant = instant;
}
| 3.26 |
hudi_HoodieStreamerUtils_isDropPartitionColumns_rdh
|
/**
* Set based on hoodie.datasource.write.drop.partition.columns config.
* When set to true, will not write the partition columns into the table.
*/static Boolean isDropPartitionColumns(TypedProperties props)
{
return props.getBoolean(DROP_PARTITION_COLUMNS.key(), DROP_PARTITION_COLUMNS.defaultValue());
}
| 3.26 |
hudi_HoodieStreamerUtils_getPartitionColumns_rdh
|
/**
* Get the partition columns as a set of strings.
*
* @param props
* TypedProperties
* @return Set of partition columns.
*/
static Set<String> getPartitionColumns(TypedProperties props) {
String partitionColumns = SparkKeyGenUtils.getPartitionColumns(props);
return Arrays.stream(partitionColumns.split(",")).collect(Collectors.toSet());
}
| 3.26 |
hudi_IMetaStoreClientUtil_getMSC_rdh
|
/**
* Returns the Hive metastore client with given Hive conf.
*/
public static IMetaStoreClient getMSC(HiveConf hiveConf) throws HiveException, MetaException {
IMetaStoreClient metaStoreClient;
try {
metaStoreClient = ((Hive) (Hive.class.getMethod("getWithoutRegisterFns", HiveConf.class).invoke(null, hiveConf))).getMSC();
} catch (NoSuchMethodException | IllegalAccessException | IllegalArgumentException | InvocationTargetException ex) {
metaStoreClient = Hive.get(hiveConf).getMSC();
}
return metaStoreClient;
}
| 3.26 |
hudi_ExceptionUtil_getRootCause_rdh
|
/**
* Fetches inner-most cause of the provided {@link Throwable}
*/
@Nonnull
public static Throwable getRootCause(@Nonnull
Throwable t) {
Throwable cause = t;
while (cause.getCause() != null) {cause = cause.getCause();
}
return cause;
}
| 3.26 |
hudi_SchedulerConfGenerator_getSparkSchedulingConfigs_rdh
|
/**
* Helper to set Spark Scheduling Configs dynamically.
*
* @param cfg
* Config for HoodieDeltaStreamer
*/public static Map<String, String> getSparkSchedulingConfigs(HoodieStreamer.Config cfg) throws Exception {
Option<String> scheduleModeKeyOption = new SparkConf().getOption(SPARK_SCHEDULER_MODE_KEY);
final Option<String> sparkSchedulerMode = (scheduleModeKeyOption.isDefined()) ? Option.of(scheduleModeKeyOption.get()) : Option.empty();
Map<String, String> additionalSparkConfigs = new HashMap<>(1);
if (((sparkSchedulerMode.isPresent() && SPARK_SCHEDULER_FAIR_MODE.equals(sparkSchedulerMode.get())) && cfg.continuousMode) && cfg.tableType.equals(HoodieTableType.MERGE_ON_READ.name())) {
String sparkSchedulingConfFile = generateAndStoreConfig(cfg.deltaSyncSchedulingWeight, cfg.compactSchedulingWeight, cfg.deltaSyncSchedulingMinShare, cfg.compactSchedulingMinShare, cfg.clusterSchedulingWeight, cfg.clusterSchedulingMinShare);
LOG.warn("Spark scheduling config file "
+ sparkSchedulingConfFile);
additionalSparkConfigs.put(SparkConfigs.SPARK_SCHEDULER_ALLOCATION_FILE_KEY(), sparkSchedulingConfFile);
} else {
LOG.warn("Job Scheduling Configs will not be in effect as spark.scheduler.mode "
+ "is not set to FAIR at instantiation time. Continuing without scheduling configs");
}
return additionalSparkConfigs;
}
| 3.26 |
hudi_SchedulerConfGenerator_generateAndStoreConfig_rdh
|
/**
* Generate spark scheduling configs and store it to a randomly generated tmp file.
*
* @param deltaSyncWeight
* Scheduling weight for delta sync
* @param compactionWeight
* Scheduling weight for compaction
* @param deltaSyncMinShare
* Minshare for delta sync
* @param compactionMinShare
* Minshare for compaction
* @param clusteringMinShare
* Scheduling weight for clustering
* @param clusteringWeight
* Minshare for clustering
* @return Return the absolute path of the tmp file which stores the spark schedule configs
* @throws IOException
* Throws an IOException when write configs to file failed
*/
private static String generateAndStoreConfig(Integer deltaSyncWeight, Integer compactionWeight, Integer deltaSyncMinShare, Integer compactionMinShare, Integer clusteringWeight, Integer clusteringMinShare) throws IOException {
File v4 = File.createTempFile(UUID.randomUUID().toString(), ".xml");
BufferedWriter bw =
new BufferedWriter(new FileWriter(v4)); bw.write(generateConfig(deltaSyncWeight, compactionWeight, deltaSyncMinShare, compactionMinShare, clusteringWeight, clusteringMinShare));
bw.close();
// SPARK-35083 introduces remote scheduler pool files, so the file must include scheme since Spark 3.2
String path = (HoodieSparkUtils.gteqSpark3_2()) ? v4.toURI().toString() : v4.getAbsolutePath();
LOG.info("Configs written to file " + path);
return path;
}
| 3.26 |
hudi_SchedulerConfGenerator_generateConfig_rdh
|
/**
* Helper to generate spark scheduling configs in XML format with input params.
*
* @param deltaSyncWeight
* Scheduling weight for delta sync
* @param compactionWeight
* Scheduling weight for compaction
* @param deltaSyncMinShare
* Minshare for delta sync
* @param compactionMinShare
* Minshare for compaction
* @param clusteringMinShare
* Scheduling weight for clustering
* @param clusteringWeight
* Minshare for clustering
* @return Spark scheduling configs
*/
public static String generateConfig(Integer deltaSyncWeight, Integer compactionWeight, Integer deltaSyncMinShare, Integer compactionMinShare, Integer clusteringWeight, Integer clusteringMinShare) {
return String.format(SPARK_SCHEDULING_PATTERN, f0, SPARK_SCHEDULER_FAIR_MODE, deltaSyncWeight.toString(), deltaSyncMinShare.toString(), COMPACT_POOL_NAME, SPARK_SCHEDULER_FAIR_MODE, compactionWeight.toString(), compactionMinShare.toString(), CLUSTERING_POOL_NAME, SPARK_SCHEDULER_FAIR_MODE, clusteringWeight.toString(), clusteringMinShare.toString());
}
| 3.26 |
hudi_MarkerBasedRollbackUtils_getAllMarkerPaths_rdh
|
/**
* Gets all marker paths.
*
* @param table
* instance of {@code HoodieTable} to use
* @param context
* instance of {@code HoodieEngineContext} to use
* @param instant
* instant of interest to rollback
* @param parallelism
* parallelism to use
* @return a list of all markers
* @throws IOException
*/
public static List<String> getAllMarkerPaths(HoodieTable table, HoodieEngineContext
context, String instant, int parallelism) throws IOException {String markerDir =
table.getMetaClient().getMarkerFolderPath(instant);
FileSystem v1 = table.getMetaClient().getFs();
Option<MarkerType> markerTypeOption = readMarkerType(v1, markerDir);
// If there is no marker type file "MARKERS.type", first assume "DIRECT" markers are used.
// If not, then fallback to "TIMELINE_SERVER_BASED" markers.
if (!markerTypeOption.isPresent()) {
WriteMarkers writeMarkers = WriteMarkersFactory.get(DIRECT,
table, instant);try {
return new ArrayList<>(writeMarkers.allMarkerFilePaths());
} catch (IOException | IllegalArgumentException e) {
LOG.warn(String.format("%s not present and %s marker failed with error: %s. So, falling back to %s marker", MARKER_TYPE_FILENAME, DIRECT, e.getMessage(), TIMELINE_SERVER_BASED));
return getTimelineServerBasedMarkers(context, parallelism, markerDir, v1);
}
}
switch (markerTypeOption.get()) {
case TIMELINE_SERVER_BASED :
// Reads all markers written by the timeline server
return getTimelineServerBasedMarkers(context, parallelism, markerDir, v1);default :
throw new HoodieException(("The marker type \"" + markerTypeOption.get().name()) + "\" is not supported.");
}
}
| 3.26 |
hudi_CloudObjectsSelector_m0_rdh
|
/**
* Amazon SQS Client Builder.
*/
public SqsClient m0() {
return SqsClient.builder().region(Region.of(regionName)).build();
}
| 3.26 |
hudi_CloudObjectsSelector_createListPartitions_rdh
|
/**
* Create partitions of list using specific batch size. we can't use third party API for this
* functionality, due to https://github.com/apache/hudi/blob/master/style/checkstyle.xml#L270
*/
protected List<List<Message>> createListPartitions(List<Message> singleList, int eachBatchSize) {List<List<Message>>
listPartitions = new ArrayList<>();
if ((singleList.size() == 0) || (eachBatchSize < 1)) {
return listPartitions;
}
for (int start = 0; start < singleList.size(); start += eachBatchSize) {
int end = Math.min(start + eachBatchSize, singleList.size());
if (start > end) {
throw new IndexOutOfBoundsException(((("Index " + start) + " is out of the list range <0,") + (singleList.size() - 1)) + ">");
}
listPartitions.add(new ArrayList<>(singleList.subList(start, end)));
}
return listPartitions;
}
| 3.26 |
hudi_CloudObjectsSelector_getFileAttributesFromRecord_rdh
|
/**
* Get the file attributes filePath, eventTime and size from JSONObject record.
*
* @param record
* of object event
* @return map of file attribute
*/
protected Map<String, Object> getFileAttributesFromRecord(JSONObject record) throws UnsupportedEncodingException {
Map<String, Object> fileRecord = new HashMap<>();
String eventTimeStr = record.getString(f0);
long eventTime = Date.from(Instant.from(DateTimeFormatter.ISO_INSTANT.parse(eventTimeStr))).getTime();
JSONObject s3Object = record.getJSONObject("s3").getJSONObject("object");
String bucket = URLDecoder.decode(record.getJSONObject("s3").getJSONObject("bucket").getString("name"), "UTF-8");
String key = URLDecoder.decode(s3Object.getString("key"), "UTF-8");
String filePath = (((this.fsName + "://") + bucket) + "/") + key;
fileRecord.put(f0, eventTime);
fileRecord.put(S3_FILE_SIZE, s3Object.getLong("size"));
fileRecord.put(S3_FILE_PATH, filePath);
return fileRecord;
}
| 3.26 |
hudi_CloudObjectsSelector_deleteProcessedMessages_rdh
|
/**
* Delete Queue Messages after hudi commit. This method will be invoked by source.onCommit.
*/
public void deleteProcessedMessages(SqsClient sqs, String queueUrl, List<Message> processedMessages) {
if (!processedMessages.isEmpty()) {
// create batch for deletion, SES DeleteMessageBatchRequest only accept max 10 entries
List<List<Message>> deleteBatches = createListPartitions(processedMessages, 10);
for (List<Message> deleteBatch : deleteBatches) {
m1(sqs, queueUrl, deleteBatch);
}
}
}
| 3.26 |
hudi_CloudObjectsSelector_getMessagesToProcess_rdh
|
/**
* List messages from queue.
*/
protected List<Message> getMessagesToProcess(SqsClient sqsClient, String queueUrl, int longPollWait, int visibilityTimeout, int maxMessagePerBatch, int maxMessagesPerRequest) {
List<Message> messagesToProcess = new ArrayList<>();
ReceiveMessageRequest receiveMessageRequest = ReceiveMessageRequest.builder().queueUrl(queueUrl).waitTimeSeconds(longPollWait).visibilityTimeout(visibilityTimeout).maxNumberOfMessages(maxMessagesPerRequest).build();
// Get count for available messages
Map<String, String> queueAttributesResult = getSqsQueueAttributes(sqsClient, queueUrl);
long approxMessagesAvailable = Long.parseLong(queueAttributesResult.get(SQS_ATTR_APPROX_MESSAGES));
log.info(("Approximately " + approxMessagesAvailable) + " messages available in queue.");
long numMessagesToProcess = Math.min(approxMessagesAvailable, maxMessagePerBatch);
for (int i = 0; i < ((int) (Math.ceil(((double) (numMessagesToProcess)) / maxMessagesPerRequest))); ++i) {
List<Message> messages = sqsClient.receiveMessage(receiveMessageRequest).messages();
log.debug("Number of messages: " + messages.size());
messagesToProcess.addAll(messages);
if (messages.isEmpty()) {
// ApproximateNumberOfMessages value is eventually consistent.
// So, we still need to check and break if there are no messages.
break;}
}
return messagesToProcess;
}
| 3.26 |
hudi_HotSpotMemoryLayoutSpecification64bit_getArrayHeaderSize_rdh
|
/**
* Implementation of {@link MemoryLayoutSpecification} based on
* Hot Spot Memory Layout Specification on 64-bit.
*/public class HotSpotMemoryLayoutSpecification64bit implements MemoryLayoutSpecification {
@Override
public int getArrayHeaderSize() {return 24;
}
| 3.26 |
hudi_BaseConsistentHashingBucketClusteringPlanStrategy_isBucketClusteringSortEnabled_rdh
|
/**
* Whether generate regular sort clustering plans for buckets that are not involved in merge or split.
*
* @return true if generate regular sort clustering plans for buckets that are not involved in merge or split, false otherwise.
*/
protected boolean isBucketClusteringSortEnabled() {
return true;
}
| 3.26 |
hudi_BaseConsistentHashingBucketClusteringPlanStrategy_getFileSlicesEligibleForClustering_rdh
|
/**
* Generate candidate clustering file slices of the given partition.
* If there is inflight / requested clustering working on the partition, then return empty list
* to ensure serialized update to the hashing metadata.
*
* @return candidate file slices to be clustered (i.e., sort, bucket split or merge)
*/
@Override
protected Stream<FileSlice> getFileSlicesEligibleForClustering(String partition) {
TableFileSystemView fileSystemView = getHoodieTable().getFileSystemView();
boolean v12 = fileSystemView.getFileGroupsInPendingClustering().anyMatch(p -> p.getLeft().getPartitionPath().equals(partition));
if (v12) {
LOG.info("Partition {} is already in clustering, skip.", partition);
return Stream.empty();
}
return super.getFileSlicesEligibleForClustering(partition);
}
| 3.26 |
hudi_BaseConsistentHashingBucketClusteringPlanStrategy_buildClusteringGroupsForPartition_rdh
|
/**
* Generate cluster group based on split, merge and sort rules
*/
@Override
protected Stream<HoodieClusteringGroup> buildClusteringGroupsForPartition(String partitionPath, List<FileSlice> fileSlices) {
Option<HoodieConsistentHashingMetadata> metadata = ConsistentBucketIndexUtils.loadMetadata(getHoodieTable(), partitionPath);
ValidationUtils.checkArgument(metadata.isPresent(), "Metadata is empty for partition: " + partitionPath);
ConsistentBucketIdentifier identifier = new ConsistentBucketIdentifier(metadata.get());
// Apply split rule
int splitSlot = getWriteConfig().getBucketIndexMaxNumBuckets() - identifier.getNumBuckets();
Triple<List<HoodieClusteringGroup>, Integer, List<FileSlice>> splitResult = buildSplitClusteringGroups(identifier, fileSlices, splitSlot);
List<HoodieClusteringGroup> ret = new ArrayList<>(splitResult.getLeft());
List<FileSlice> remainedSlices = splitResult.getRight();
if (isBucketClusteringMergeEnabled()) {
// Apply merge rule
int mergeSlot = (identifier.getNumBuckets() - getWriteConfig().getBucketIndexMinNumBuckets()) + splitResult.getMiddle();
Triple<List<HoodieClusteringGroup>, Integer, List<FileSlice>> mergeResult = buildMergeClusteringGroup(identifier, remainedSlices, mergeSlot);
ret.addAll(mergeResult.getLeft());
remainedSlices = mergeResult.getRight();
}
if (isBucketClusteringSortEnabled()) {
// Apply sort only to the remaining file groups
ret.addAll(remainedSlices.stream().map(fs -> {
ConsistentHashingNode oldNode = identifier.getBucketByFileId(fs.getFileId());
ConsistentHashingNode newNode = new ConsistentHashingNode(oldNode.getValue(),
FSUtils.createNewFileIdPfx(), ConsistentHashingNode.NodeTag.REPLACE);
return HoodieClusteringGroup.newBuilder().setSlices(getFileSliceInfo(Collections.singletonList(fs))).setNumOutputFileGroups(1).setMetrics(buildMetrics(Collections.singletonList(fs))).setExtraMetadata(constructExtraMetadata(fs.getPartitionPath(), Collections.singletonList(newNode), identifier.getMetadata().getSeqNo())).build();
}).collect(Collectors.toList()));
}
return ret.stream();}
| 3.26 |
hudi_BaseConsistentHashingBucketClusteringPlanStrategy_isBucketClusteringMergeEnabled_rdh
|
/**
* Whether enable buckets merged when using consistent hashing bucket index.
*
* @return true if bucket merge is enabled, false otherwise.
*/
protected boolean isBucketClusteringMergeEnabled() {
return true;
}
| 3.26 |
hudi_BaseConsistentHashingBucketClusteringPlanStrategy_checkPrecondition_rdh
|
/**
* TODO maybe add force config to schedule the clustering. It could allow clustering on partitions that are not doing write operation.
* Block clustering if there is any ongoing concurrent writers
*
* @return true if the schedule can proceed
*/
@Override
public boolean checkPrecondition()
{
HoodieTimeline v0 = getHoodieTable().getActiveTimeline().getDeltaCommitTimeline().filterInflightsAndRequested();
if (!v0.empty()) {
LOG.warn("When using consistent bucket, clustering cannot be scheduled async if there are concurrent writers. " + "Writer instant: {}.", v0.getInstants());return false;
}
return true;
}
| 3.26 |
hudi_BaseConsistentHashingBucketClusteringPlanStrategy_buildMergeClusteringGroup_rdh
|
/**
* Generate clustering group according to merge rules
*
* @param identifier
* bucket identifier
* @param fileSlices
* file slice candidates to be built as merge clustering groups
* @param mergeSlot
* number of bucket allowed to be merged, in order to guarantee the lower bound of the total number of bucket
* @return list of clustering group, number of buckets merged (removed), remaining file slice (that does not be merged)
*/
protected Triple<List<HoodieClusteringGroup>, Integer, List<FileSlice>> buildMergeClusteringGroup(ConsistentBucketIdentifier identifier,
List<FileSlice> fileSlices, int mergeSlot)
{
if (fileSlices.size() <= 1) {
return Triple.of(Collections.emptyList(), 0, fileSlices);
}long mergeSize = getMergeSize();
int remainingMergeSlot = mergeSlot;
List<HoodieClusteringGroup> groups = new ArrayList<>();
boolean[] added = new boolean[fileSlices.size()];
fileSlices.sort(Comparator.comparingInt(a -> identifier.getBucketByFileId(a.getFileId()).getValue()));
// In each round, we check if the ith file slice can be merged with its predecessors and successors
for (int i = 0; i < fileSlices.size(); ++i) {
if (added[i] || (fileSlices.get(i).getTotalFileSize() > mergeSize)) {
continue;
}
// 0: startIdx, 1: endIdx
int[] rangeIdx = new int[]{ i, i };
long v28 = fileSlices.get(i).getTotalFileSize();
// Do backward check first (k == 0), and then forward check (k == 1)
for (int k = 0; k < 2; ++k) {
boolean forward = k == 1;
do {
int nextIdx = (forward) ? (rangeIdx[k] + 1) < fileSlices.size() ? rangeIdx[k] + 1 : 0 : rangeIdx[k] >= 1 ? rangeIdx[k] - 1 : fileSlices.size() - 1;
ConsistentHashingNode bucketOfNextFile = identifier.getBucketByFileId(fileSlices.get(nextIdx).getFileId());
ConsistentHashingNode nextBucket = (forward) ? identifier.getLatterBucket(fileSlices.get(rangeIdx[k]).getFileId()) : identifier.getFormerBucket(fileSlices.get(rangeIdx[k]).getFileId());
boolean isNeighbour = bucketOfNextFile == nextBucket;
/**
* Merge condition:
* 1. there is still slot to merge bucket
* 2. the previous file slices is not merged
* 3. the previous file slice and current file slice are neighbour in the hash ring
* 4. Both the total file size up to now and the previous file slice size are smaller than merge size threshold
*/
// if start equal to end after update range
if ((((((remainingMergeSlot == 0) || added[nextIdx]) || (!isNeighbour)) || (v28 > mergeSize)) || (fileSlices.get(nextIdx).getTotalFileSize() > mergeSize)) || (nextIdx == rangeIdx[1 - k])) {
break;
}// Mark preIdx as merge candidate
v28 += fileSlices.get(nextIdx).getTotalFileSize();
rangeIdx[k] = nextIdx;
remainingMergeSlot--;
} while
(rangeIdx[k] != i );
}
int startIdx = rangeIdx[0];
int endIdx = rangeIdx[1];
if ((endIdx == i) && (startIdx == i)) {
continue;
}
// Construct merge group if there is at least two file slices
List<FileSlice> fs = new ArrayList<>();
while (true) {
added[startIdx] = true;
fs.add(fileSlices.get(startIdx));
if (startIdx == endIdx) {
break;
}
startIdx = ((startIdx + 1) < fileSlices.size()) ? startIdx + 1 : 0;
}
groups.add(HoodieClusteringGroup.newBuilder().setSlices(getFileSliceInfo(fs)).setNumOutputFileGroups(1).setMetrics(buildMetrics(fs)).setExtraMetadata(constructExtraMetadata(fs.get(0).getPartitionPath(), identifier.mergeBucket(fs.stream().map(FileSlice::getFileId).collect(Collectors.toList())), identifier.getMetadata().getSeqNo())).build());}
// Collect file slices that are not involved in merge
List<FileSlice> fsUntouched = IntStream.range(0, fileSlices.size()).filter(i -> !added[i]).mapToObj(fileSlices::get).collect(Collectors.toList());
return Triple.of(groups, mergeSlot - remainingMergeSlot, fsUntouched);
}
| 3.26 |
hudi_BaseConsistentHashingBucketClusteringPlanStrategy_constructExtraMetadata_rdh
|
/**
* Construct extra metadata for clustering group
*/
private Map<String, String> constructExtraMetadata(String partition, List<ConsistentHashingNode> nodes, int seqNo) {
Map<String, String> extraMetadata = new HashMap<>();
try {
extraMetadata.put(METADATA_PARTITION_KEY, partition);
extraMetadata.put(METADATA_CHILD_NODE_KEY, ConsistentHashingNode.toJsonString(nodes));
extraMetadata.put(METADATA_SEQUENCE_NUMBER_KEY, Integer.toString(seqNo));
} catch (IOException e) {
LOG.error("Failed to construct extra metadata, partition: {}, nodes:{}", partition, nodes);
throw new HoodieClusteringException((("Failed to construct extra metadata, partition: " + partition) + ", nodes:") + nodes);}
return extraMetadata;
}
| 3.26 |
hudi_DynamoTableUtils_deleteTableIfExists_rdh
|
/**
* Deletes the table and ignores any errors if it doesn't exist.
*
* @param dynamo
* The Dynamo client to use.
* @param deleteTableRequest
* The delete table request.
* @return True if deleted, false otherwise.
*/
public static boolean deleteTableIfExists(final DynamoDbClient dynamo, final DeleteTableRequest deleteTableRequest) {
try {
dynamo.deleteTable(deleteTableRequest);
return true;
} catch (final ResourceNotFoundException e) {
if (LOGGER.isTraceEnabled()) {
LOGGER.trace(("Table " + deleteTableRequest.tableName()) + " does not exist", e);
}
}
return false;
}
| 3.26 |
hudi_DynamoTableUtils_m0_rdh
|
/**
* Waits up to 10 minutes for a specified DynamoDB table to resolve,
* indicating that it exists. If the table doesn't return a result after
* this time, a SdkClientException is thrown.
*
* @param dynamo
* The DynamoDB client to use to make requests.
* @param tableName
* The name of the table being resolved.
* @throws SdkClientException
* If the specified table does not resolve before this method
* times out and stops polling.
* @throws InterruptedException
* If the thread is interrupted while waiting for the table to
* resolve.
*/
public static void m0(final DynamoDbClient dynamo, final String tableName) throws InterruptedException {
waitUntilExists(dynamo, tableName, f0, DEFAULT_WAIT_INTERVAL);
}
| 3.26 |
hudi_DynamoTableUtils_createTableIfNotExists_rdh
|
/**
* Creates the table and ignores any errors if it already exists.
*
* @param dynamo
* The Dynamo client to use.
* @param createTableRequest
* The create table request.
* @return True if created, false otherwise.
*/
public static boolean createTableIfNotExists(final DynamoDbClient dynamo, final CreateTableRequest createTableRequest) {
try {
dynamo.createTable(createTableRequest);
return true;
} catch (final ResourceInUseException e) {
if (LOGGER.isTraceEnabled()) {
LOGGER.trace(("Table " + createTableRequest.tableName()) + " already exists", e);
}
}
return false;
}
| 3.26 |
hudi_DynamoTableUtils_waitUntilActive_rdh
|
/**
* Waits up to a specified amount of time for a specified DynamoDB table to
* move into the <code>ACTIVE</code> state. If the table does not exist or
* does not transition to the <code>ACTIVE</code> state after this time,
* then a SdkClientException is thrown.
*
* @param dynamo
* The DynamoDB client to use to make requests.
* @param tableName
* The name of the table whose status is being checked.
* @param timeout
* The maximum number of milliseconds to wait.
* @param interval
* The poll interval in milliseconds.
* @throws TableNeverTransitionedToStateException
* If the specified table does not exist or does not transition
* into the <code>ACTIVE</code> state before this method times
* out and stops polling.
* @throws InterruptedException
* If the thread is interrupted while waiting for the table to
* transition into the <code>ACTIVE</code> state.
*/
public static void waitUntilActive(final DynamoDbClient dynamo, final String tableName, final int timeout,
final int interval) throws InterruptedException, TableNeverTransitionedToStateException {
TableDescription table = waitForTableDescription(dynamo, tableName, TableStatus.ACTIVE, timeout, interval);
if ((table == null) || (!table.tableStatus().equals(TableStatus.ACTIVE))) {
throw new TableNeverTransitionedToStateException(tableName, TableStatus.ACTIVE);
}
}
/**
* Wait for the table to reach the desired status and returns the table
* description
*
* @param dynamo
* Dynamo client to use
* @param tableName
* Table name to poll status of
* @param desiredStatus
* Desired {@link TableStatus}
| 3.26 |
hudi_FlinkCreateHandle_deleteInvalidDataFile_rdh
|
/**
* The flink checkpoints start in sequence and asynchronously, when one write task finish the checkpoint(A)
* (thus the fs view got the written data files some of which may be invalid),
* it goes on with the next round checkpoint(B) write immediately,
* if it tries to reuse the last small data bucket(small file) of an invalid data file,
* finally, when the coordinator receives the checkpoint success event of checkpoint(A),
* the invalid data file would be cleaned,
* and this merger got a FileNotFoundException when it close the write file handle.
*
* <p> To solve, deletes the invalid data file eagerly
* so that the invalid file small bucket would never be reused.
*
* @param lastAttemptId
* The last attempt ID
*/
private void deleteInvalidDataFile(long lastAttemptId) {
final String lastWriteToken = FSUtils.makeWriteToken(getPartitionId(), getStageId(), lastAttemptId);
final String lastDataFileName = FSUtils.makeBaseFileName(instantTime, lastWriteToken, this.fileId, hoodieTable.getBaseFileExtension());
final Path path = makeNewFilePath(partitionPath, lastDataFileName);
try {
if (fs.exists(path)) {
f0.info("Deleting invalid INSERT file due to task retry: " + lastDataFileName);
fs.delete(path, false);
}
} catch (IOException e) {
throw new HoodieException("Error while deleting the INSERT file due to task retry: " + lastDataFileName, e);
}
}
| 3.26 |
hudi_FlinkCreateHandle_newFilePathWithRollover_rdh
|
/**
* Use the writeToken + "-" + rollNumber as the new writeToken of a mini-batch write.
*/
private Path newFilePathWithRollover(int rollNumber) {
final String dataFileName = FSUtils.makeBaseFileName(instantTime, (writeToken + "-") + rollNumber, fileId, hoodieTable.getBaseFileExtension());
return makeNewFilePath(partitionPath, dataFileName);
}
| 3.26 |
hudi_OverwriteNonDefaultsWithLatestAvroPayload_mergeRecords_rdh
|
/**
* Merges the given records into one.
* The fields in {@code baseRecord} has higher priority:
* it is set up into the merged record if it is not null or equals to the default.
*
* @param schema
* The record schema
* @param baseRecord
* The base record to merge with
* @param mergedRecord
* The record to be merged
* @return the merged record option
*/
protected Option<IndexedRecord> mergeRecords(Schema schema, GenericRecord baseRecord, GenericRecord mergedRecord) {
if (isDeleteRecord(baseRecord)) {
return Option.empty();
} else {
final GenericRecordBuilder builder = new GenericRecordBuilder(schema);
List<Schema.Field> fields = schema.getFields();
fields.forEach(field -> setField(baseRecord, mergedRecord, builder, field));
return Option.of(builder.build());
}
}
| 3.26 |
hudi_FutureUtils_allOf_rdh
|
/**
* Similar to {@link CompletableFuture#allOf(CompletableFuture[])} with a few important
* differences:
*
* <ol>
* <li>Completes successfully as soon as *all* of the futures complete successfully</li>
* <li>Completes exceptionally as soon as *any* of the futures complete exceptionally</li>
* <li>In case it's completed exceptionally all the other futures not completed yet, will be
* cancelled</li>
* </ol>
*
* @param futures
* list of {@link CompletableFuture}s
*/
public static <T> CompletableFuture<List<T>> allOf(List<CompletableFuture<T>> futures) {
CompletableFuture<Void> union = CompletableFuture.allOf(futures.toArray(new CompletableFuture[0]));
futures.forEach(future -> {// NOTE: We add a callback to every future, to cancel all the other not yet completed futures,
// which will be providing for an early termination semantic: whenever any of the futures
// fail other futures will be cancelled and the exception will be returned as a result
future.whenComplete((ignored, throwable) -> {
if (throwable != null) {
futures.forEach(f -> f.cancel(true));
union.completeExceptionally(throwable); }
});
});
return union.thenApply(aVoid
-> // NOTE: This join wouldn't block, since all the
// futures are completed at this point.
futures.stream().map(CompletableFuture::join).collect(Collectors.toList()));
}
| 3.26 |
hudi_BaseActionExecutor_writeTableMetadata_rdh
|
/**
* Writes restore metadata to table metadata.
*
* @param metadata
* restore metadata of interest.
*/protected
final void writeTableMetadata(HoodieRestoreMetadata metadata) {
Option<HoodieTableMetadataWriter> v6 = table.getMetadataWriter(instantTime);
if (v6.isPresent()) {
try (HoodieTableMetadataWriter metadataWriter = v6.get()) {
metadataWriter.update(metadata, instantTime);
} catch (Exception e) {if (e instanceof HoodieException) {
throw ((HoodieException) (e));
} else {
throw new HoodieException("Failed to apply restore to metadata", e);
}
}
}
}
| 3.26 |
hudi_ExternalFilePathUtil_appendCommitTimeAndExternalFileMarker_rdh
|
/**
* Appends the commit time and external file marker to the file path. Hudi relies on the commit time in the file name for properly generating views of the files in a table.
*
* @param filePath
* The original file path
* @param commitTime
* The time of the commit that added this file to the table
* @return The file path with this additional information appended
*/
public static String appendCommitTimeAndExternalFileMarker(String filePath, String commitTime) {
return ((filePath + "_") + commitTime) + EXTERNAL_FILE_SUFFIX;
}
| 3.26 |
hudi_ExternalFilePathUtil_isExternallyCreatedFile_rdh
|
/**
* Checks if the file name was created by an external system by checking for the external file marker at the end of the file name.
*
* @param fileName
* The file name
* @return True if the file was created by an external system, false otherwise
*/
public static boolean isExternallyCreatedFile(String fileName)
{
return fileName.endsWith(EXTERNAL_FILE_SUFFIX);
}
| 3.26 |
hudi_HoodieMetadataTableValidator_validatePartitions_rdh
|
/**
* Compare the listing partitions result between metadata table and fileSystem.
*/
private List<String> validatePartitions(HoodieSparkEngineContext engineContext, String basePath) {
// compare partitions
List<String> allPartitionPathsFromFS = FSUtils.getAllPartitionPaths(engineContext, basePath, false);
HoodieTimeline completedTimeline = metaClient.getCommitsTimeline().filterCompletedInstants();
// ignore partitions created by uncommitted ingestion.
allPartitionPathsFromFS = allPartitionPathsFromFS.stream().parallel().filter(part -> {
HoodiePartitionMetadata hoodiePartitionMetadata = new HoodiePartitionMetadata(metaClient.getFs(), FSUtils.getPartitionPath(basePath, part));
Option<String> instantOption = hoodiePartitionMetadata.readPartitionCreatedCommitTime();
if (instantOption.isPresent()) {
String instantTime = instantOption.get();
// There are two cases where the created commit time is written to the partition metadata:
// (1) Commit C1 creates the partition and C1 succeeds, the partition metadata has C1 as
// the created commit time.
// (2) Commit C1 creates the partition, the partition metadata is written, and C1 fails
// during writing data files. Next time, C2 adds new data to the same partition after C1
// is rolled back. In this case, the partition metadata still has C1 as the created commit
// time, since Hudi does not rewrite the partition metadata in C2.
if (!completedTimeline.containsOrBeforeTimelineStarts(instantTime)) {
Option<HoodieInstant> lastInstant = completedTimeline.lastInstant();
return lastInstant.isPresent() && HoodieTimeline.compareTimestamps(instantTime, LESSER_THAN_OR_EQUALS, lastInstant.get().getTimestamp());
}
return true;
} else {
return
false;
}
}).collect(Collectors.toList());
List<String> allPartitionPathsMeta = FSUtils.getAllPartitionPaths(engineContext, basePath,
true);
Collections.sort(allPartitionPathsFromFS);
Collections.sort(allPartitionPathsMeta);
if ((allPartitionPathsFromFS.size() != allPartitionPathsMeta.size()) ||
(!allPartitionPathsFromFS.equals(allPartitionPathsMeta))) {
String message = ((("Compare Partitions Failed! " + "AllPartitionPathsFromFS : ") + allPartitionPathsFromFS) + " and allPartitionPathsMeta : ") + allPartitionPathsMeta;
LOG.error(message);
throw
new HoodieValidationException(message);}
return allPartitionPathsMeta;
}
| 3.26 |
hudi_HoodieMetadataTableValidator_validateLatestFileSlices_rdh
|
/**
* Compare getLatestFileSlices between metadata table and fileSystem.
*/
private void validateLatestFileSlices(HoodieMetadataValidationContext metadataTableBasedContext, HoodieMetadataValidationContext fsBasedContext, String partitionPath, Set<String> baseDataFilesForCleaning) {
List<FileSlice> latestFileSlicesFromMetadataTable;
List<FileSlice> latestFileSlicesFromFS;if (!baseDataFilesForCleaning.isEmpty()) {
latestFileSlicesFromMetadataTable = filterFileSliceBasedOnInflightCleaning(metadataTableBasedContext.getSortedLatestFileSliceList(partitionPath), baseDataFilesForCleaning);
latestFileSlicesFromFS = filterFileSliceBasedOnInflightCleaning(fsBasedContext.getSortedLatestFileSliceList(partitionPath), baseDataFilesForCleaning);
} else {
latestFileSlicesFromMetadataTable = metadataTableBasedContext.getSortedLatestFileSliceList(partitionPath);
latestFileSlicesFromFS = fsBasedContext.getSortedLatestFileSliceList(partitionPath);
}
LOG.debug((("Latest file list from metadata: " + latestFileSlicesFromMetadataTable) + ". For partition ") + partitionPath);
LOG.debug((("Latest file list from direct listing: " + latestFileSlicesFromFS) + ". For partition ") + partitionPath);
validateFileSlices(latestFileSlicesFromMetadataTable, latestFileSlicesFromFS, partitionPath, fsBasedContext.getMetaClient(), "latest file slices");
}
| 3.26 |
hudi_HoodieMetadataTableValidator_validateFilesInPartition_rdh
|
/**
* Compare the file listing and index data between metadata table and fileSystem.
* For now, validate five kinds of apis:
* 1. HoodieMetadataFileSystemView::getLatestFileSlices
* 2. HoodieMetadataFileSystemView::getLatestBaseFiles
* 3. HoodieMetadataFileSystemView::getAllFileGroups and HoodieMetadataFileSystemView::getAllFileSlices
* 4. HoodieBackedTableMetadata::getColumnStats
* 5. HoodieBackedTableMetadata::getBloomFilters
*
* @param metadataTableBasedContext
* Validation context containing information based on metadata table
* @param fsBasedContext
* Validation context containing information based on the file system
* @param partitionPath
* Partition path String
* @param baseDataFilesForCleaning
* Base files for un-complete cleaner action
*/
private void validateFilesInPartition(HoodieMetadataValidationContext metadataTableBasedContext, HoodieMetadataValidationContext
fsBasedContext, String partitionPath, Set<String> baseDataFilesForCleaning)
{
if (cfg.f0) {
validateLatestFileSlices(metadataTableBasedContext, fsBasedContext, partitionPath, baseDataFilesForCleaning);
}
if (cfg.validateLatestBaseFiles) {
validateLatestBaseFiles(metadataTableBasedContext, fsBasedContext, partitionPath, baseDataFilesForCleaning);
}
if (cfg.validateAllFileGroups) {
validateAllFileGroups(metadataTableBasedContext, fsBasedContext, partitionPath, baseDataFilesForCleaning);
}
if (cfg.validateAllColumnStats) {
validateAllColumnStats(metadataTableBasedContext, fsBasedContext, partitionPath,
baseDataFilesForCleaning);
}
if (cfg.validateBloomFilters) {
validateBloomFilters(metadataTableBasedContext, fsBasedContext, partitionPath, baseDataFilesForCleaning);
}
}
| 3.26 |
hudi_HoodieMetadataTableValidator_checkMetadataTableIsAvailable_rdh
|
/**
* Check metadata is initialized and available to ready.
* If not we will log.warn and skip current validation.
*/
private boolean checkMetadataTableIsAvailable() {
try {
HoodieTableMetaClient mdtMetaClient = HoodieTableMetaClient.builder().setConf(jsc.hadoopConfiguration()).setBasePath(new Path(cfg.basePath, HoodieTableMetaClient.METADATA_TABLE_FOLDER_PATH).toString()).setLoadActiveTimelineOnLoad(true).build();
int finishedInstants = mdtMetaClient.getCommitsTimeline().filterCompletedInstants().countInstants();
if (finishedInstants == 0) {
if
(metaClient.getCommitsTimeline().filterCompletedInstants().countInstants() == 0) {
LOG.info("There is no completed commit in both metadata table and corresponding data table.");
return false;
} else {
throw new HoodieValidationException("There is no completed instant for metadata table.");
}
}
return true;
} catch (TableNotFoundException tbe) {
// Suppress the TableNotFound exception if Metadata table is not available to read for now
LOG.warn("Metadata table is not found. Skip current validation.");
return false;
} catch (Exception ex) {
LOG.warn("Metadata table is not available to read for now, ", ex);
return false;
}
}
| 3.26 |
hudi_HoodieMetadataTableValidator_validateLatestBaseFiles_rdh
|
/**
* Compare getLatestBaseFiles between metadata table and fileSystem.
*/
private void validateLatestBaseFiles(HoodieMetadataValidationContext metadataTableBasedContext, HoodieMetadataValidationContext fsBasedContext, String partitionPath, Set<String> baseDataFilesForCleaning) {
List<HoodieBaseFile> v35;
List<HoodieBaseFile> latestFilesFromFS;
if
(!baseDataFilesForCleaning.isEmpty()) {
v35 = filterBaseFileBasedOnInflightCleaning(metadataTableBasedContext.getSortedLatestBaseFileList(partitionPath), baseDataFilesForCleaning);
latestFilesFromFS = filterBaseFileBasedOnInflightCleaning(fsBasedContext.getSortedLatestBaseFileList(partitionPath), baseDataFilesForCleaning);
} else {
v35 = metadataTableBasedContext.getSortedLatestBaseFileList(partitionPath);
latestFilesFromFS = fsBasedContext.getSortedLatestBaseFileList(partitionPath);
}
LOG.debug((("Latest base file from metadata: " + v35) + ". For partitions ") + partitionPath);LOG.debug((("Latest base file from direct listing: " + latestFilesFromFS) + ". For partitions ") + partitionPath);
validate(v35, latestFilesFromFS, partitionPath, "latest base files");}
| 3.26 |
hudi_HoodieMetadataTableValidator_readConfigFromFileSystem_rdh
|
/**
* Reads config from the file system.
*
* @param jsc
* {@link JavaSparkContext} instance.
* @param cfg
* {@link Config} instance.
* @return the {@link TypedProperties} instance.
*/
private TypedProperties readConfigFromFileSystem(JavaSparkContext jsc, Config cfg) {
return UtilHelpers.readConfig(jsc.hadoopConfiguration(), new Path(cfg.f1), cfg.f2).getProps(true);
}
| 3.26 |
hudi_HoodieMetadataTableValidator_areFileSliceCommittedLogFilesMatching_rdh
|
/**
* Compares committed log files from two file slices.
*
* @param fs1
* File slice 1
* @param fs2
* File slice 2
* @param metaClient
* {@link HoodieTableMetaClient} instance
* @param committedFilesMap
* In-memory map for caching committed files of commits
* @return {@code true} if matching; {@code false} otherwise.
*/
private boolean areFileSliceCommittedLogFilesMatching(FileSlice fs1, FileSlice fs2, HoodieTableMetaClient metaClient, Map<String, Set<String>> committedFilesMap) {
Set<String> fs1LogPathSet = fs1.getLogFiles().map(f -> f.getPath().toString()).collect(Collectors.toSet());
Set<String> fs2LogPathSet = fs2.getLogFiles().map(f -> f.getPath().toString()).collect(Collectors.toSet());
Set<String> commonLogPathSet = new HashSet<>(fs1LogPathSet);
commonLogPathSet.retainAll(fs2LogPathSet);
// Only keep log file paths that differ
fs1LogPathSet.removeAll(commonLogPathSet);
fs2LogPathSet.removeAll(commonLogPathSet);
// Check if the remaining log files are uncommitted. If there is any log file
// that is committed, the committed log files of two file slices are different
FileSystem fileSystem = metaClient.getFs();
if (hasCommittedLogFiles(fileSystem, fs1LogPathSet, metaClient, committedFilesMap)) {
LOG.error((("The first file slice has committed log files that cause mismatching: " + fs1) + "; Different log files are: ") + fs1LogPathSet);
return false;
}
if (hasCommittedLogFiles(fileSystem, fs2LogPathSet, metaClient, committedFilesMap)) {
LOG.error((("The second file slice has committed log files that cause mismatching: " + fs2) + "; Different log files are: ") + fs2LogPathSet);
return false;
}
return true;
}
| 3.26 |
hudi_HiveSyncTool_getTablePartitions_rdh
|
/**
* Fetch partitions from meta service, will try to push down more filters to avoid fetching
* too many unnecessary partitions.
*
* @param writtenPartitions
* partitions has been added, updated, or dropped since last synced.
*/
private List<Partition> getTablePartitions(String tableName, List<String> writtenPartitions) {
if (!config.getBooleanOrDefault(HIVE_SYNC_FILTER_PUSHDOWN_ENABLED)) {
return syncClient.getAllPartitions(tableName);
}
List<String> partitionKeys = config.getSplitStrings(META_SYNC_PARTITION_FIELDS).stream().map(String::toLowerCase).collect(Collectors.toList());
List<FieldSchema> partitionFields = syncClient.getMetastoreFieldSchemas(tableName).stream().filter(f -> partitionKeys.contains(f.getName())).collect(Collectors.toList());
return syncClient.getPartitionsByFilter(tableName, PartitionFilterGenerator.generatePushDownFilter(writtenPartitions, partitionFields, config));
}
/**
* Syncs all partitions on storage to the metastore, by only making incremental changes.
*
* @param tableName
* The table name in the metastore.
* @return {@code true} if one or more partition(s) are changed in the metastore;
{@code false}
| 3.26 |
hudi_BaseHoodieWriteClient_scheduleTableService_rdh
|
/**
* Schedule table services such as clustering, compaction & cleaning.
*
* @param extraMetadata
* Metadata to pass onto the scheduled service instant
* @param tableServiceType
* Type of table service to schedule
* @return The given instant time option or empty if no table service plan is scheduled
*/
public Option<String> scheduleTableService(String instantTime, Option<Map<String, String>> extraMetadata, TableServiceType tableServiceType)
{
return tableServiceClient.scheduleTableService(instantTime, extraMetadata, tableServiceType);
}
| 3.26 |
hudi_BaseHoodieWriteClient_postWrite_rdh
|
/**
* Common method containing steps to be performed after write (upsert/insert/..) operations including auto-commit.
*
* @param result
* Commit Action Result
* @param instantTime
* Instant Time
* @param hoodieTable
* Hoodie Table
* @return Write Status
*/
public O postWrite(HoodieWriteMetadata<O> result, String instantTime,
HoodieTable hoodieTable) {
if (result.getIndexLookupDuration().isPresent()) {
metrics.updateIndexMetrics(getOperationType().name(), result.getIndexUpdateDuration().get().toMillis());
}
if (result.isCommitted()) {
// Perform post commit operations.
if (result.getFinalizeDuration().isPresent()) {
metrics.updateFinalizeWriteMetrics(result.getFinalizeDuration().get().toMillis(), result.getWriteStats().get().size());
}
postCommit(hoodieTable, result.getCommitMetadata().get(), instantTime, Option.empty());
m2(hoodieTable);
emitCommitMetrics(instantTime, result.getCommitMetadata().get(), hoodieTable.getMetaClient().getCommitActionType());
}
return result.getWriteStatuses();
}
| 3.26 |
hudi_BaseHoodieWriteClient_startCommit_rdh
|
/**
* Provides a new commit time for a write operation (insert/update/delete/insert_overwrite/insert_overwrite_table) with specified action.
*/
public String startCommit(String actionType, HoodieTableMetaClient metaClient) {
CleanerUtils.rollbackFailedWrites(config.getFailedWritesCleanPolicy(), HoodieTimeline.COMMIT_ACTION, () -> tableServiceClient.rollbackFailedWrites());
String instantTime = createNewInstantTime();
startCommit(instantTime, actionType, metaClient);
return
instantTime;
}
| 3.26 |
hudi_BaseHoodieWriteClient_scheduleLogCompactionAtInstant_rdh
|
/**
* Schedules a new log compaction instant with passed-in instant time.
*
* @param instantTime
* Log Compaction Instant Time
* @param extraMetadata
* Extra Metadata to be stored
*/
public boolean scheduleLogCompactionAtInstant(String instantTime, Option<Map<String, String>> extraMetadata) throws HoodieIOException {
return scheduleTableService(instantTime, extraMetadata, TableServiceType.LOG_COMPACT).isPresent();
}
| 3.26 |
hudi_BaseHoodieWriteClient_cluster_rdh
|
/**
* Ensures clustering instant is in expected state and performs clustering for the plan stored in metadata.
*
* @param clusteringInstant
* Clustering Instant Time
* @return Collection of Write Status
*/
public HoodieWriteMetadata<O> cluster(String clusteringInstant, boolean shouldComplete) {
HoodieTable table = createTable(config, context.getHadoopConf().get());
preWrite(clusteringInstant, WriteOperationType.CLUSTER, table.getMetaClient());
return tableServiceClient.cluster(clusteringInstant, shouldComplete);
}
| 3.26 |
hudi_BaseHoodieWriteClient_scheduleCompaction_rdh
|
/**
* Schedules a new compaction instant.
*
* @param extraMetadata
* Extra Metadata to be stored
*/
public Option<String> scheduleCompaction(Option<Map<String, String>> extraMetadata) throws HoodieIOException {
String instantTime = createNewInstantTime();
return scheduleCompactionAtInstant(instantTime, extraMetadata) ? Option.of(instantTime) : Option.empty();
}
| 3.26 |
hudi_BaseHoodieWriteClient_scheduleIndexing_rdh
|
/**
* Schedules INDEX action.
*
* @param partitionTypes
* - list of {@link MetadataPartitionType} which needs to be indexed
* @return instant time for the requested INDEX action
*/
public Option<String> scheduleIndexing(List<MetadataPartitionType> partitionTypes) {
String instantTime = createNewInstantTime();
Option<HoodieIndexPlan> indexPlan = createTable(config, hadoopConf).scheduleIndexing(context, instantTime, partitionTypes);
return indexPlan.isPresent() ? Option.of(instantTime) : Option.empty();
}
| 3.26 |
hudi_BaseHoodieWriteClient_updateColumnType_rdh
|
/**
* update col Type for hudi table.
* only support update primitive type to primitive type.
* cannot update nest type to nest type or primitive type eg: RecordType -> MapType, MapType -> LongType.
*
* @param colName
* col name to be changed. if we want to change col from a nested filed, the fullName should be specified
* @param newType
* .
*/
public void updateColumnType(String colName, Type newType) {
Pair<InternalSchema, HoodieTableMetaClient>
pair = getInternalSchemaAndMetaClient();
InternalSchema newSchema = new InternalSchemaChangeApplier(pair.getLeft()).applyColumnTypeChange(colName, newType);
commitTableChange(newSchema, pair.getRight()); }
| 3.26 |
hudi_BaseHoodieWriteClient_commitLogCompaction_rdh
|
/**
* Commit a log compaction operation. Allow passing additional meta-data to be stored in commit instant file.
*
* @param logCompactionInstantTime
* Log Compaction Instant Time
* @param metadata
* All the metadata that gets stored along with a commit
* @param extraMetadata
* Extra Metadata to be stored
*/
public void commitLogCompaction(String logCompactionInstantTime, HoodieCommitMetadata metadata, Option<Map<String, String>> extraMetadata) {
HoodieTable table = createTable(config, context.getHadoopConf().get());
extraMetadata.ifPresent(m -> m.forEach(metadata::addMetadata));
completeLogCompaction(metadata, table, logCompactionInstantTime);
}
| 3.26 |
hudi_BaseHoodieWriteClient_m2_rdh
|
/**
* Triggers cleaning and archival for the table of interest. This method is called outside of locks. So, internal callers should ensure they acquire lock whereever applicable.
*
* @param table
* instance of {@link HoodieTable} of interest.
*/
protected void m2(HoodieTable table) {
autoCleanOnCommit();
// reload table to that timeline reflects the clean commit
autoArchiveOnCommit(createTable(config, hadoopConf));
}
| 3.26 |
hudi_BaseHoodieWriteClient_index_rdh
|
/**
* Runs INDEX action to build out the metadata partitions as planned for the given instant time.
*
* @param indexInstantTime
* - instant time for the requested INDEX action
* @return {@link Option<HoodieIndexCommitMetadata>} after successful indexing.
*/
public Option<HoodieIndexCommitMetadata> index(String indexInstantTime) {
return createTable(config, hadoopConf).index(context, indexInstantTime);
}
| 3.26 |
hudi_BaseHoodieWriteClient_reOrderColPosition_rdh
|
/**
* reorder the position of col.
*
* @param colName
* column which need to be reordered. if we want to change col from a nested filed, the fullName should be specified.
* @param referColName
* reference position.
* @param orderType
* col position change type. now support three change types: first/after/before
*/
public void reOrderColPosition(String colName, String referColName, TableChange.ColumnPositionChange.ColumnPositionType orderType) {
if (((colName == null) || (orderType == null)) || (referColName == null)) {
return;
}
// get internalSchema
Pair<InternalSchema, HoodieTableMetaClient> pair = getInternalSchemaAndMetaClient();
InternalSchema newSchema = new InternalSchemaChangeApplier(pair.getLeft()).applyReOrderColPositionChange(colName, referColName, orderType);
commitTableChange(newSchema, pair.getRight());
}
| 3.26 |
hudi_BaseHoodieWriteClient_completeLogCompaction_rdh
|
/**
* Commit Log Compaction and track metrics.
*/
protected void completeLogCompaction(HoodieCommitMetadata metadata, HoodieTable table, String logCompactionCommitTime) {
tableServiceClient.completeLogCompaction(metadata, table, logCompactionCommitTime);
}
| 3.26 |
hudi_BaseHoodieWriteClient_scheduleCompactionAtInstant_rdh
|
/**
* Schedules a new compaction instant with passed-in instant time.
*
* @param instantTime
* Compaction Instant Time
* @param extraMetadata
* Extra Metadata to be stored
*/
public boolean scheduleCompactionAtInstant(String instantTime, Option<Map<String,
String>> extraMetadata) throws HoodieIOException {return scheduleTableService(instantTime, extraMetadata, TableServiceType.COMPACT).isPresent();
}
| 3.26 |
hudi_BaseHoodieWriteClient_deleteSavepoint_rdh
|
/**
* Delete a savepoint that was created. Once the savepoint is deleted, the commit can be rolledback and cleaner may
* clean up data files.
*
* @param savepointTime
* Savepoint time to delete
*/
public void deleteSavepoint(String savepointTime) {
HoodieTable<T, I, K, O> table = createTable(config, hadoopConf);
SavepointHelpers.deleteSavepoint(table, savepointTime);
}
| 3.26 |
hudi_BaseHoodieWriteClient_restoreToSavepoint_rdh
|
/**
* Restore the data to the savepoint.
*
* WARNING: This rolls back recent commits and deleted data files and also pending compactions after savepoint time.
* Queries accessing the files will mostly fail. This is expected to be a manual operation and no concurrent write or
* compaction is expected to be running
*
* @param savepointTime
* Savepoint time to rollback to
*/
public void restoreToSavepoint(String savepointTime) {
boolean initializeMetadataTableIfNecessary = config.isMetadataTableEnabled();
if (initializeMetadataTableIfNecessary) {
try {
// Delete metadata table directly when users trigger savepoint rollback if mdt existed and if the savePointTime is beforeTimelineStarts
// or before the oldest compaction on MDT.
// We cannot restore to before the oldest compaction on MDT as we don't have the basefiles before that time.
HoodieTableMetaClient mdtMetaClient = HoodieTableMetaClient.builder().setConf(hadoopConf).setBasePath(getMetadataTableBasePath(config.getBasePath())).build();
Option<HoodieInstant> oldestMdtCompaction =
mdtMetaClient.getCommitTimeline().filterCompletedInstants().firstInstant();
boolean deleteMDT = false;
if (oldestMdtCompaction.isPresent()) {
if (HoodieTimeline.LESSER_THAN_OR_EQUALS.test(savepointTime, oldestMdtCompaction.get().getTimestamp())) {
LOG.warn(String.format("Deleting MDT during restore to %s as the savepoint is older than oldest compaction %s on MDT", savepointTime, oldestMdtCompaction.get().getTimestamp()));deleteMDT =
true;
}
}
// The instant required to sync rollback to MDT has been archived and the mdt syncing will be failed
// So that we need to delete the whole MDT here.
if (!deleteMDT)
{
HoodieInstant syncedInstant = new HoodieInstant(false, HoodieTimeline.DELTA_COMMIT_ACTION, savepointTime);
if (mdtMetaClient.getCommitsTimeline().isBeforeTimelineStarts(syncedInstant.getTimestamp())) {
LOG.warn(String.format("Deleting MDT during restore to %s as the savepoint is older than the MDT timeline %s", savepointTime, mdtMetaClient.getCommitsTimeline().firstInstant().get().getTimestamp()));
deleteMDT = true;
}
}
if (deleteMDT) {
HoodieTableMetadataUtil.deleteMetadataTable(config.getBasePath(), context);
// rollbackToSavepoint action will try to bootstrap MDT at first but sync to MDT will fail at the current scenario.
// so that we need to disable metadata initialized here.
initializeMetadataTableIfNecessary = false;
}
} catch (Exception e) {
// Metadata directory does not exist
}
}
HoodieTable<T, I, K, O> table = initTable(WriteOperationType.UNKNOWN, Option.empty(), initializeMetadataTableIfNecessary);
SavepointHelpers.validateSavepointPresence(table, savepointTime);ValidationUtils.checkArgument(!config.shouldArchiveBeyondSavepoint(), ("Restore is not supported when " + HoodieArchivalConfig.ARCHIVE_BEYOND_SAVEPOINT.key()) + " is enabled");
m4(savepointTime, initializeMetadataTableIfNecessary);
SavepointHelpers.validateSavepointRestore(table, savepointTime);
}
| 3.26 |
hudi_BaseHoodieWriteClient_logCompact_rdh
|
/**
* Ensures compaction instant is in expected state and performs Log Compaction for the workload stored in instant-time.s
*
* @param logCompactionInstantTime
* Compaction Instant Time
* @return Collection of Write Status
*/
protected HoodieWriteMetadata<O> logCompact(String logCompactionInstantTime,
boolean shouldComplete) {
HoodieTable table = createTable(config, context.getHadoopConf().get());
preWrite(logCompactionInstantTime, WriteOperationType.LOG_COMPACT, table.getMetaClient());
return tableServiceClient.logCompact(logCompactionInstantTime, shouldComplete);
}
| 3.26 |
hudi_BaseHoodieWriteClient_saveInternalSchema_rdh
|
// Save internal schema
private void
saveInternalSchema(HoodieTable table, String instantTime, HoodieCommitMetadata metadata) {
TableSchemaResolver schemaUtil = new TableSchemaResolver(table.getMetaClient());
String historySchemaStr = schemaUtil.getTableHistorySchemaStrFromCommitMetadata().orElse("");
FileBasedInternalSchemaStorageManager schemasManager = new FileBasedInternalSchemaStorageManager(table.getMetaClient());
if ((!historySchemaStr.isEmpty()) || Boolean.parseBoolean(config.getString(HoodieCommonConfig.RECONCILE_SCHEMA.key()))) {
InternalSchema internalSchema;Schema avroSchema = HoodieAvroUtils.createHoodieWriteSchema(config.getSchema(), config.allowOperationMetadataField());
if (historySchemaStr.isEmpty()) {
internalSchema = SerDeHelper.fromJson(config.getInternalSchema()).orElse(AvroInternalSchemaConverter.convert(avroSchema));
internalSchema.setSchemaId(Long.parseLong(instantTime));
} else {
internalSchema = InternalSchemaUtils.searchSchema(Long.parseLong(instantTime), SerDeHelper.parseSchemas(historySchemaStr));
}
InternalSchema evolvedSchema = AvroSchemaEvolutionUtils.reconcileSchema(avroSchema, internalSchema);
if (evolvedSchema.equals(internalSchema)) {
metadata.addMetadata(SerDeHelper.LATEST_SCHEMA, SerDeHelper.toJson(evolvedSchema));
// TODO save history schema by metaTable
schemasManager.persistHistorySchemaStr(instantTime, historySchemaStr.isEmpty() ? SerDeHelper.inheritSchemas(evolvedSchema, "") : historySchemaStr);
} else {
evolvedSchema.setSchemaId(Long.parseLong(instantTime));
String newSchemaStr = SerDeHelper.toJson(evolvedSchema);
metadata.addMetadata(SerDeHelper.LATEST_SCHEMA, newSchemaStr);schemasManager.persistHistorySchemaStr(instantTime, SerDeHelper.inheritSchemas(evolvedSchema, historySchemaStr));
}
// update SCHEMA_KEY
metadata.addMetadata(SCHEMA_KEY, AvroInternalSchemaConverter.convert(evolvedSchema, avroSchema.getFullName()).toString());
}
}
| 3.26 |
hudi_BaseHoodieWriteClient_m3_rdh
|
/**
* Delete a savepoint based on the latest commit action on the savepoint timeline.
*/
public void m3() {
HoodieTable<T,
I, K, O> table = createTable(config, hadoopConf);
HoodieTimeline savePointTimeline = table.getActiveTimeline().getSavePointTimeline();
if (savePointTimeline.empty()) {
throw new HoodieSavepointException("Could not delete savepoint. Savepoint timeline is empty");
}
String savepointTime = savePointTimeline.lastInstant().get().getTimestamp();
LOG.info("Deleting latest savepoint time " + savepointTime);
deleteSavepoint(savepointTime);
}
| 3.26 |
hudi_BaseHoodieWriteClient_writeTableMetadata_rdh
|
/**
* Write the HoodieCommitMetadata to metadata table if available.
*
* @param table
* {@link HoodieTable} of interest.
* @param instantTime
* instant time of the commit.
* @param metadata
* instance of {@link HoodieCommitMetadata}.
* @param writeStatuses
* WriteStatuses for the completed action.
*/
protected void
writeTableMetadata(HoodieTable table, String instantTime, HoodieCommitMetadata metadata, HoodieData<WriteStatus> writeStatuses) {
context.setJobStatus(this.getClass().getSimpleName(), "Committing to metadata table: " + config.getTableName());
Option<HoodieTableMetadataWriter> metadataWriterOpt = table.getMetadataWriter(instantTime);
if (metadataWriterOpt.isPresent()) {try (HoodieTableMetadataWriter metadataWriter = metadataWriterOpt.get()) {
metadataWriter.updateFromWriteStatuses(metadata, writeStatuses, instantTime);
} catch (Exception e) {
if (e instanceof HoodieException) {
throw ((HoodieException) (e));
} else {
throw new HoodieException("Failed to update metadata", e);
}
}
}
}
| 3.26 |
hudi_BaseHoodieWriteClient_updateColumnNullability_rdh
|
/**
* update col nullable attribute for hudi table.
*
* @param colName
* col name to be changed. if we want to change col from a nested filed, the fullName should be specified
* @param nullable
* .
*/
public void updateColumnNullability(String colName, boolean nullable) {
Pair<InternalSchema, HoodieTableMetaClient> pair = getInternalSchemaAndMetaClient();
InternalSchema newSchema = new InternalSchemaChangeApplier(pair.getLeft()).applyColumnNullabilityChange(colName, nullable);
commitTableChange(newSchema, pair.getRight());
}
| 3.26 |
hudi_BaseHoodieWriteClient_dropIndex_rdh
|
/**
* Drops the index and removes the metadata partitions.
*
* @param partitionTypes
* - list of {@link MetadataPartitionType} which needs to be indexed
*/
public void dropIndex(List<MetadataPartitionType> partitionTypes) {
HoodieTable table = createTable(config, hadoopConf);
String v58 = createNewInstantTime();
HoodieInstant ownerInstant = new HoodieInstant(true, HoodieTimeline.INDEXING_ACTION, v58);
this.txnManager.beginTransaction(Option.of(ownerInstant), Option.empty());
try {
context.setJobStatus(this.getClass().getSimpleName(), "Dropping partitions from metadata table: " + config.getTableName());
Option<HoodieTableMetadataWriter> metadataWriterOpt = table.getMetadataWriter(v58);
if (metadataWriterOpt.isPresent()) {
try (HoodieTableMetadataWriter metadataWriter = metadataWriterOpt.get()) {
metadataWriter.dropMetadataPartitions(partitionTypes);
} catch (Exception e) {
if (e instanceof HoodieException) {
throw ((HoodieException) (e));
} else {
throw new HoodieException("Failed to drop partitions from metadata", e);}
}
}
} finally {
this.txnManager.endTransaction(Option.of(ownerInstant));}
}
| 3.26 |
hudi_BaseHoodieWriteClient_postCommit_rdh
|
/**
* Post Commit Hook. Derived classes use this method to perform post-commit processing
*
* @param table
* table to commit on
* @param metadata
* Commit Metadata corresponding to committed instant
* @param instantTime
* Instant Time
* @param extraMetadata
* Additional Metadata passed by user
*/
protected void postCommit(HoodieTable
table, HoodieCommitMetadata metadata, String instantTime, Option<Map<String, String>> extraMetadata) {
try {
context.setJobStatus(this.getClass().getSimpleName(), (("Cleaning up marker directories for commit " + instantTime)
+ " in table ") + config.getTableName());
// Delete the marker directory for the instant.
WriteMarkersFactory.get(config.getMarkersType(), table, instantTime).quietDeleteMarkerDir(context, config.getMarkersDeleteParallelism());
} finally {
this.heartbeatClient.stop(instantTime);
}
}
| 3.26 |
hudi_BaseHoodieWriteClient_initTable_rdh
|
/**
* Instantiates and initializes instance of {@link HoodieTable}, performing crucial bootstrapping
* operations such as:
*
* NOTE: This method is engine-agnostic and SHOULD NOT be overloaded, please check on
* {@link #doInitTable(WriteOperationType, HoodieTableMetaClient, Option)} instead
*
* <ul>
* <li>Checking whether upgrade/downgrade is required</li>
* <li>Bootstrapping Metadata Table (if required)</li>
* <li>Initializing metrics contexts</li>
* </ul>
*/
public final HoodieTable initTable(WriteOperationType operationType, Option<String> instantTime) {
HoodieTableMetaClient metaClient = createMetaClient(true);
// Setup write schemas for deletes
if (WriteOperationType.isDelete(operationType)) {
setWriteSchemaForDeletes(metaClient);
}
doInitTable(operationType, metaClient, instantTime);
HoodieTable table = createTable(config, hadoopConf, metaClient);
// Validate table properties
metaClient.validateTableProperties(config.getProps());
switch
(operationType) {
case INSERT :
case INSERT_PREPPED :
case UPSERT :
case UPSERT_PREPPED :
case BULK_INSERT :
case BULK_INSERT_PREPPED :
case INSERT_OVERWRITE :
case INSERT_OVERWRITE_TABLE :
setWriteTimer(table.getMetaClient().getCommitActionType());break;
case CLUSTER :
case COMPACT :
case LOG_COMPACT :
tableServiceClient.setTableServiceTimer(operationType);
break;
default :
}
return table;
}
| 3.26 |
hudi_BaseHoodieWriteClient_scheduleLogCompaction_rdh
|
/**
* Schedules a new log compaction instant.
*
* @param extraMetadata
* Extra Metadata to be stored
*/
public Option<String> scheduleLogCompaction(Option<Map<String, String>> extraMetadata) throws HoodieIOException {
String instantTime = createNewInstantTime();
return scheduleLogCompactionAtInstant(instantTime, extraMetadata) ? Option.of(instantTime) : Option.empty();
}
| 3.26 |
hudi_BaseHoodieWriteClient_completeCompaction_rdh
|
/**
* Commit Compaction and track metrics.
*/
protected void completeCompaction(HoodieCommitMetadata metadata, HoodieTable table, String compactionCommitTime) {
tableServiceClient.completeCompaction(metadata, table, compactionCommitTime);}
| 3.26 |
hudi_BaseHoodieWriteClient_tryUpgrade_rdh
|
/**
* Upgrades the hoodie table if need be when moving to a new Hudi version.
* This method is called within a lock. Try to avoid double locking from within this method.
*
* @param metaClient
* instance of {@link HoodieTableMetaClient} to use.
* @param instantTime
* instant time of interest if we have one.
*/
protected void tryUpgrade(HoodieTableMetaClient metaClient, Option<String> instantTime) {
UpgradeDowngrade upgradeDowngrade = new UpgradeDowngrade(metaClient, config, context, upgradeDowngradeHelper);
if (upgradeDowngrade.needsUpgradeOrDowngrade(HoodieTableVersion.current())) {
metaClient = HoodieTableMetaClient.reload(metaClient);
// Ensure no inflight commits by setting EAGER policy and explicitly cleaning all failed commits
List<String> instantsToRollback = tableServiceClient.getInstantsToRollback(metaClient, HoodieFailedWritesCleaningPolicy.EAGER, instantTime);
if (!instantsToRollback.isEmpty()) {
Map<String, Option<HoodiePendingRollbackInfo>> pendingRollbacks =
tableServiceClient.getPendingRollbackInfos(metaClient);
instantsToRollback.forEach(entry -> pendingRollbacks.putIfAbsent(entry, Option.empty()));tableServiceClient.rollbackFailedWrites(pendingRollbacks, true);
}
new UpgradeDowngrade(metaClient, config, context, upgradeDowngradeHelper).run(HoodieTableVersion.current(), instantTime.orElse(null));
metaClient.reloadActiveTimeline();
}
}
| 3.26 |
hudi_BaseHoodieWriteClient_bootstrap_rdh
|
/**
* Main API to run bootstrap to hudi.
*/
public void bootstrap(Option<Map<String, String>> extraMetadata) {
// TODO : MULTIWRITER -> check if failed bootstrap files can be cleaned later
if (config.getWriteConcurrencyMode().supportsMultiWriter()) {
throw new HoodieException("Cannot bootstrap the table in multi-writer mode");
}
HoodieTable<T, I, K, O> table = initTable(WriteOperationType.UPSERT, Option.ofNullable(HoodieTimeline.METADATA_BOOTSTRAP_INSTANT_TS));
tableServiceClient.rollbackFailedBootstrap();
table.bootstrap(context, extraMetadata);
}
| 3.26 |
hudi_BaseHoodieWriteClient_commitCompaction_rdh
|
/**
* Commit a compaction operation. Allow passing additional meta-data to be stored in commit instant file.
*
* @param compactionInstantTime
* Compaction Instant Time
* @param metadata
* All the metadata that gets stored along with a commit
* @param extraMetadata
* Extra Metadata to be stored
*/
public void commitCompaction(String
compactionInstantTime, HoodieCommitMetadata metadata, Option<Map<String, String>> extraMetadata) {
tableServiceClient.commitCompaction(compactionInstantTime, metadata, extraMetadata);
}
| 3.26 |
hudi_BaseHoodieWriteClient_preCommit_rdh
|
/**
* Any pre-commit actions like conflict resolution goes here.
*
* @param inflightInstant
* instant of inflight operation.
* @param metadata
* commit metadata for which pre commit is being invoked.
*/
protected void preCommit(HoodieInstant inflightInstant, HoodieCommitMetadata metadata) {
// Create a Hoodie table after startTxn which encapsulated the commits and files visible.
// Important to create this after the lock to ensure the latest commits show up in the timeline without need for reload
HoodieTable table = createTable(config, hadoopConf);
resolveWriteConflict(table, metadata, this.pendingInflightAndRequestedInstants);
}
| 3.26 |
hudi_BaseHoodieWriteClient_inlineScheduleCompaction_rdh
|
/**
* Schedules compaction inline.
*
* @param extraMetadata
* extra metadata to be used.
* @return compaction instant if scheduled.
*/ protected Option<String> inlineScheduleCompaction(Option<Map<String, String>> extraMetadata) {
return scheduleCompaction(extraMetadata);
}
| 3.26 |
hudi_BaseHoodieWriteClient_archive_rdh
|
/**
* Trigger archival for the table. This ensures that the number of commits do not explode
* and keep increasing unbounded over time.
*/
public void archive()
{
// Create a Hoodie table which encapsulated the commits and files visible
HoodieTable table = createTable(config, hadoopConf);
archive(table);
}
| 3.26 |
hudi_BaseHoodieWriteClient_runAnyPendingLogCompactions_rdh
|
/**
* Run any pending log compactions.
*/
public void runAnyPendingLogCompactions() {
tableServiceClient.runAnyPendingLogCompactions(createTable(config, hadoopConf));
}
| 3.26 |
hudi_BaseHoodieWriteClient_updateColumnComment_rdh
|
/**
* update col comment for hudi table.
*
* @param colName
* col name to be changed. if we want to change col from a nested filed, the fullName should be specified
* @param doc
* .
*/
public void updateColumnComment(String colName, String doc) {
Pair<InternalSchema, HoodieTableMetaClient> pair = getInternalSchemaAndMetaClient();
InternalSchema newSchema = new InternalSchemaChangeApplier(pair.getLeft()).applyColumnCommentChange(colName, doc);
commitTableChange(newSchema, pair.getRight());
}
| 3.26 |
hudi_BaseHoodieWriteClient_m1_rdh
|
/**
* Commit changes performed at the given instantTime marker.
*/
public boolean m1(String instantTime, O writeStatuses) {
return commit(instantTime, writeStatuses, Option.empty());
}
| 3.26 |
hudi_BaseHoodieWriteClient_addColumn_rdh
|
/**
* add columns to table.
*
* @param colName
* col name to be added. if we want to add col to a nested filed, the fullName should be specified
* @param schema
* col type to be added.
* @param doc
* col doc to be added.
* @param position
* col position to be added
* @param positionType
* col position change type. now support three change types: first/after/before
*/public void addColumn(String colName, Schema schema, String doc, String position, TableChange.ColumnPositionChange.ColumnPositionType positionType) {
Pair<InternalSchema, HoodieTableMetaClient> pair = getInternalSchemaAndMetaClient();
InternalSchema newSchema = new InternalSchemaChangeApplier(pair.getLeft()).applyAddChange(colName, AvroInternalSchemaConverter.convertToField(schema), doc, position,
positionType);
commitTableChange(newSchema, pair.getRight());}
| 3.26 |
hudi_BaseHoodieWriteClient_scheduleClusteringAtInstant_rdh
|
/**
* Schedules a new clustering instant with passed-in instant time.
*
* @param instantTime
* clustering Instant Time
* @param extraMetadata
* Extra Metadata to be stored
*/
public boolean scheduleClusteringAtInstant(String instantTime, Option<Map<String, String>>
extraMetadata) throws HoodieIOException {
return scheduleTableService(instantTime, extraMetadata, TableServiceType.CLUSTER).isPresent();
}
| 3.26 |
hudi_BaseHoodieWriteClient_scheduleCleaningAtInstant_rdh
|
/**
* Schedules a new cleaning instant with passed-in instant time.
*
* @param instantTime
* cleaning Instant Time
* @param extraMetadata
* Extra Metadata to be stored
*/
protected boolean scheduleCleaningAtInstant(String instantTime, Option<Map<String, String>> extraMetadata) throws HoodieIOException {return scheduleTableService(instantTime, extraMetadata, TableServiceType.CLEAN).isPresent();
}
| 3.26 |
hudi_BaseHoodieWriteClient_doInitTable_rdh
|
/**
* Performs necessary bootstrapping operations (for ex, validating whether Metadata Table has to be bootstrapped).
*
* <p>NOTE: THIS OPERATION IS EXECUTED UNDER LOCK, THEREFORE SHOULD AVOID ANY OPERATIONS
* NOT REQUIRING EXTERNAL SYNCHRONIZATION
*
* @param metaClient
* instance of {@link HoodieTableMetaClient}
* @param instantTime
* current inflight instant time
*/
protected void doInitTable(WriteOperationType operationType, HoodieTableMetaClient metaClient, Option<String> instantTime) {
Option<HoodieInstant> ownerInstant = Option.empty();
if (instantTime.isPresent()) {
ownerInstant = Option.of(new HoodieInstant(true, CommitUtils.getCommitActionType(operationType,
metaClient.getTableType()), instantTime.get()));
}
this.txnManager.beginTransaction(ownerInstant, Option.empty());
try {
tryUpgrade(metaClient, instantTime);
initMetadataTable(instantTime);
} finally {
this.txnManager.endTransaction(ownerInstant);
}
}
| 3.26 |
hudi_BaseHoodieWriteClient_savepoint_rdh
|
/**
* Savepoint a specific commit instant time. Latest version of data files as of the passed in instantTime
* will be referenced in the savepoint and will never be cleaned. The savepointed commit will never be rolledback or archived.
* <p>
* This gives an option to rollback the state to the savepoint anytime. Savepoint needs to be manually created and
* deleted.
* <p>
* Savepoint should be on a commit that could not have been cleaned.
*
* @param instantTime
* Commit that should be savepointed
* @param user
* User creating the savepoint
* @param comment
* Comment for the savepoint
*/
public void savepoint(String instantTime, String user, String comment) {HoodieTable<T, I, K, O>
table = createTable(config, hadoopConf);
table.savepoint(context, instantTime, user, comment);
}
| 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.