name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hudi_CleanPlanner_getFilesToCleanKeepingLatestVersions_rdh
|
/**
* Selects the older versions of files for cleaning, such that it bounds the number of versions of each file. This
* policy is useful, if you are simply interested in querying the table, and you don't want too many versions for a
* single file (i.e., run it with versionsRetained = 1)
*/
private Pair<Boolean, List<CleanFileInfo>> getFilesToCleanKeepingLatestVersions(String partitionPath) {
LOG.info(((("Cleaning " + partitionPath) + ", retaining latest ") + config.getCleanerFileVersionsRetained()) + " file versions. ");
List<CleanFileInfo> deletePaths = new ArrayList<>();
// Collect all the datafiles savepointed by all the savepoints
List<String> savepointedFiles = hoodieTable.getSavepointTimestamps().stream().flatMap(this::getSavepointedDataFiles).collect(Collectors.toList());
// In this scenario, we will assume that once replaced a file group automatically becomes eligible for cleaning completely
// In other words, the file versions only apply to the active file groups.
deletePaths.addAll(getReplacedFilesEligibleToClean(savepointedFiles, partitionPath, Option.empty()));
boolean toDeletePartition = false;
List<HoodieFileGroup> fileGroups = fileSystemView.getAllFileGroupsStateless(partitionPath).collect(Collectors.toList());
for (HoodieFileGroup fileGroup : fileGroups) {
int keepVersions = config.getCleanerFileVersionsRetained();
// do not cleanup slice required for pending compaction
Iterator<FileSlice> fileSliceIterator = fileGroup.getAllFileSlices().filter(fs -> !isFileSliceNeededForPendingMajorOrMinorCompaction(fs)).iterator();
if (isFileGroupInPendingMajorOrMinorCompaction(fileGroup)) {// We have already saved the last version of file-groups for pending compaction Id
keepVersions--;
}
while (fileSliceIterator.hasNext() && (keepVersions > 0)) {
// Skip this most recent version
fileSliceIterator.next();
keepVersions--;
}
// Delete the remaining files
while (fileSliceIterator.hasNext()) {
FileSlice nextSlice = fileSliceIterator.next();
if (isFileSliceExistInSavepointedFiles(nextSlice,
savepointedFiles)) {
// do not clean up a savepoint data file
continue;
}deletePaths.addAll(getCleanFileInfoForSlice(nextSlice));
}
}
// if there are no valid file groups
// and no pending data files under the partition [IMPORTANT],
// mark it to be deleted
if (fileGroups.isEmpty() && (!hasPendingFiles(partitionPath))) {
toDeletePartition = true;
}
return Pair.of(toDeletePartition, deletePaths);
}
| 3.26 |
hudi_CleanPlanner_getLastCompletedCommitTimestamp_rdh
|
/**
* Returns the last completed commit timestamp before clean.
*/
public String getLastCompletedCommitTimestamp() {
if (commitTimeline.lastInstant().isPresent()) {
return commitTimeline.lastInstant().get().getTimestamp();
} else {
return "";
}
}
| 3.26 |
hudi_CleanPlanner_m0_rdh
|
/**
* Gets the latest version < instantTime. This version file could still be used by queries.
*/
private String m0(List<FileSlice> fileSliceList, HoodieInstant instantTime) {
for (FileSlice file : fileSliceList) {
String fileCommitTime = file.getBaseInstantTime();
if (HoodieTimeline.compareTimestamps(instantTime.getTimestamp(), HoodieTimeline.GREATER_THAN, fileCommitTime))
{
// fileList is sorted on the reverse, so the first commit we find <= instantTime is the
// one we want
return fileCommitTime;
}
}
// There is no version of this file which is <= instantTime
return null;
}
| 3.26 |
hudi_CleanPlanner_getPartitionPathsForFullCleaning_rdh
|
/**
* Scan and list all partitions for cleaning.
*
* @return all partitions paths for the dataset.
*/
private List<String> getPartitionPathsForFullCleaning() {
// Go to brute force mode of scanning all partitions
return FSUtils.getAllPartitionPaths(context, config.getMetadataConfig(), config.getBasePath());
}
| 3.26 |
hudi_CleanPlanner_isFileSliceNeededForPendingLogCompaction_rdh
|
/**
* Determine if file slice needed to be preserved for pending logcompaction.
*
* @param fileSlice
* File Slice
* @return true if file slice needs to be preserved, false otherwise.
*/
private boolean isFileSliceNeededForPendingLogCompaction(FileSlice fileSlice) {
CompactionOperation op = fgIdToPendingLogCompactionOperations.get(fileSlice.getFileGroupId());
if (null != op) {// If file slice's instant time is newer or same as that of operation, do not clean
return HoodieTimeline.compareTimestamps(fileSlice.getBaseInstantTime(), HoodieTimeline.GREATER_THAN_OR_EQUALS, op.getBaseInstantTime());
}
return false;
}
| 3.26 |
hudi_CleanPlanner_getFilesToCleanKeepingLatestHours_rdh
|
/**
* This method finds the files to be cleaned based on the number of hours. If {@code config.getCleanerHoursRetained()} is set to 5,
* all the files with commit time earlier than 5 hours will be removed. Also the latest file for any file group is retained.
* This policy gives much more flexibility to users for retaining data for running incremental queries as compared to
* KEEP_LATEST_COMMITS cleaning policy. The default number of hours is 5.
*
* @param partitionPath
* partition path to check
* @param earliestCommitToRetain
* earliest commit to retain
* @return list of files to clean
*/
private Pair<Boolean, List<CleanFileInfo>> getFilesToCleanKeepingLatestHours(String partitionPath, Option<HoodieInstant> earliestCommitToRetain) {
return getFilesToCleanKeepingLatestCommits(partitionPath, 0, earliestCommitToRetain, HoodieCleaningPolicy.KEEP_LATEST_BY_HOURS);
}
| 3.26 |
hudi_CleanPlanner_isFileSliceNeededForPendingMajorOrMinorCompaction_rdh
|
/* Determine if file slice needed to be preserved for pending compaction or log compaction.
@param fileSlice File slice
@return true if file slice needs to be preserved, false otherwise.
*/
private boolean isFileSliceNeededForPendingMajorOrMinorCompaction(FileSlice fileSlice) {
return isFileSliceNeededForPendingCompaction(fileSlice) || isFileSliceNeededForPendingLogCompaction(fileSlice);
}
| 3.26 |
hudi_CleanPlanner_getPartitionPathsForCleanByCommits_rdh
|
/**
* Return partition paths for cleaning by commits mode.
*
* @param instantToRetain
* Earliest Instant to retain
* @return list of partitions
* @throws IOException
*/
private List<String> getPartitionPathsForCleanByCommits(Option<HoodieInstant> instantToRetain) throws IOException {
if (!instantToRetain.isPresent()) {
LOG.info("No earliest commit to retain. No need to scan partitions !!");
return Collections.emptyList();
}
if (config.incrementalCleanerModeEnabled()) {
Option<HoodieInstant> lastClean = hoodieTable.getCleanTimeline().filterCompletedInstants().lastInstant();
if (lastClean.isPresent()) {
if (hoodieTable.getActiveTimeline().isEmpty(lastClean.get())) {
hoodieTable.getActiveTimeline().deleteEmptyInstantIfExists(lastClean.get());
} else {
HoodieCleanMetadata cleanMetadata = TimelineMetadataUtils.deserializeHoodieCleanMetadata(hoodieTable.getActiveTimeline().getInstantDetails(lastClean.get()).get());
if (((cleanMetadata.getEarliestCommitToRetain() != null) && (cleanMetadata.getEarliestCommitToRetain().length() > 0)) && (!hoodieTable.getActiveTimeline().getCommitsTimeline().isBeforeTimelineStarts(cleanMetadata.getEarliestCommitToRetain()))) {
return getPartitionPathsForIncrementalCleaning(cleanMetadata, instantToRetain);
}
}
}
}return getPartitionPathsForFullCleaning();}
| 3.26 |
hudi_CleanPlanner_getPartitionPathsForIncrementalCleaning_rdh
|
/**
* Use Incremental Mode for finding partition paths.
*
* @param cleanMetadata
* @param newInstantToRetain
* @return */
private List<String> getPartitionPathsForIncrementalCleaning(HoodieCleanMetadata cleanMetadata, Option<HoodieInstant> newInstantToRetain) {
LOG.info(((("Incremental Cleaning mode is enabled. Looking up partition-paths that have since changed " + "since last cleaned at ") + cleanMetadata.getEarliestCommitToRetain()) + ". New Instant to retain : ") + newInstantToRetain);
return hoodieTable.getCompletedCommitsTimeline().getInstantsAsStream().filter(instant -> HoodieTimeline.compareTimestamps(instant.getTimestamp(), HoodieTimeline.GREATER_THAN_OR_EQUALS, cleanMetadata.getEarliestCommitToRetain()) && HoodieTimeline.compareTimestamps(instant.getTimestamp(), HoodieTimeline.LESSER_THAN, newInstantToRetain.get().getTimestamp())).flatMap(instant -> {
try {
if (HoodieTimeline.REPLACE_COMMIT_ACTION.equals(instant.getAction())) {
HoodieReplaceCommitMetadata replaceCommitMetadata = HoodieReplaceCommitMetadata.fromBytes(hoodieTable.getActiveTimeline().getInstantDetails(instant).get(), HoodieReplaceCommitMetadata.class);
return Stream.concat(replaceCommitMetadata.getPartitionToReplaceFileIds().keySet().stream(), replaceCommitMetadata.getPartitionToWriteStats().keySet().stream());
} else {
HoodieCommitMetadata commitMetadata = HoodieCommitMetadata.fromBytes(hoodieTable.getActiveTimeline().getInstantDetails(instant).get(), HoodieCommitMetadata.class);
return commitMetadata.getPartitionToWriteStats().keySet().stream();
}
} catch (IOException
e) {
throw new <e>HoodieIOException(e.getMessage());
}
}).distinct().collect(Collectors.toList());
}
| 3.26 |
hudi_CleanPlanner_getPartitionPathsToClean_rdh
|
/**
* Returns list of partitions where clean operations needs to be performed.
*
* @param earliestRetainedInstant
* New instant to be retained after this cleanup operation
* @return list of partitions to scan for cleaning
* @throws IOException
* when underlying file-system throws this exception
*/
public List<String> getPartitionPathsToClean(Option<HoodieInstant> earliestRetainedInstant) throws IOException {
switch (config.getCleanerPolicy()) {
case KEEP_LATEST_COMMITS :
case KEEP_LATEST_BY_HOURS :return getPartitionPathsForCleanByCommits(earliestRetainedInstant);
case KEEP_LATEST_FILE_VERSIONS :
return getPartitionPathsForFullCleaning();
default :
throw new IllegalStateException("Unknown Cleaner Policy");
} }
| 3.26 |
hudi_CleanPlanner_isFileSliceExistInSavepointedFiles_rdh
|
/**
* Verify whether file slice exists in savepointedFiles, check both base file and log files
*/
private boolean isFileSliceExistInSavepointedFiles(FileSlice fs, List<String> savepointedFiles) {
if (fs.getBaseFile().isPresent() && savepointedFiles.contains(fs.getBaseFile().get().getFileName())) {return true;
}
for (HoodieLogFile hoodieLogFile : fs.getLogFiles().collect(Collectors.toList())) {
if
(savepointedFiles.contains(hoodieLogFile.getFileName())) {
return true;
}
}
return false;}
| 3.26 |
hudi_CleanPlanner_hasPendingFiles_rdh
|
/**
* Returns whether there are uncommitted data files under the given partition,
* the pending files are generated by the inflight instants and maybe ready to commit,
* the partition can not be deleted as a whole if any pending file exists.
*
* <p>IMPORTANT: {@code fsView.getAllFileGroups} does not return pending file groups for metadata table,
* file listing must be used instead.
*/
private boolean hasPendingFiles(String partitionPath) {
try {
HoodieTableFileSystemView fsView = new HoodieTableFileSystemView(hoodieTable.getMetaClient(), hoodieTable.getActiveTimeline());
Path v29 = new Path(hoodieTable.getMetaClient().getBasePathV2(), partitionPath);
fsView.addFilesToView(FSUtils.getAllDataFilesInPartition(hoodieTable.getMetaClient().getFs(), v29));
// use #getAllFileGroups(partitionPath) instead of #getAllFileGroups() to exclude the replaced file groups.
return fsView.getAllFileGroups(partitionPath).findAny().isPresent();
} catch (Exception ex) {
// if any exception throws, assume there are existing pending files
LOG.warn(("Error while checking the pending files under partition: " + partitionPath) + ", assumes the files exist", ex);
return true;}
}
| 3.26 |
hudi_CleanPlanner_getDeletePaths_rdh
|
/**
* Returns files to be cleaned for the given partitionPath based on cleaning policy.
*/
public Pair<Boolean, List<CleanFileInfo>> getDeletePaths(String partitionPath, Option<HoodieInstant> earliestCommitToRetain) {
HoodieCleaningPolicy policy = config.getCleanerPolicy();
Pair<Boolean, List<CleanFileInfo>> deletePaths;
if (policy == HoodieCleaningPolicy.KEEP_LATEST_COMMITS) {
deletePaths = getFilesToCleanKeepingLatestCommits(partitionPath, earliestCommitToRetain);
} else if (policy == HoodieCleaningPolicy.KEEP_LATEST_FILE_VERSIONS) {
deletePaths =
getFilesToCleanKeepingLatestVersions(partitionPath);
} else if (policy == HoodieCleaningPolicy.KEEP_LATEST_BY_HOURS) {
deletePaths = getFilesToCleanKeepingLatestHours(partitionPath, earliestCommitToRetain);
} else {
throw new IllegalArgumentException("Unknown cleaning policy : " + policy.name());
}
LOG.info((deletePaths.getValue().size() + " patterns used to delete in partition path:") + partitionPath);
if (deletePaths.getKey()) {
LOG.info(("Partition " + partitionPath) + " to be deleted");
}
return deletePaths;
}
| 3.26 |
hudi_HeartbeatUtils_deleteHeartbeatFile_rdh
|
/**
* Deletes the heartbeat file for the specified instant.
*
* @param fs
* Hadoop FileSystem instance
* @param basePath
* Hoodie table base path
* @param instantTime
* Commit instant time
* @param config
* HoodieWriteConfig instance
* @return Boolean indicating whether heartbeat file was deleted or not
*/
public static boolean deleteHeartbeatFile(FileSystem fs, String basePath, String instantTime, HoodieWriteConfig config) {
if (config.getFailedWritesCleanPolicy().isLazy()) {
return deleteHeartbeatFile(fs, basePath, instantTime);
}
return false;
}
| 3.26 |
hudi_HeartbeatUtils_abortIfHeartbeatExpired_rdh
|
/**
* Check if the heartbeat corresponding to instantTime has expired. If yes, abort by throwing an exception.
*
* @param instantTime
* @param table
* @param heartbeatClient
* @param config
*/
public static void abortIfHeartbeatExpired(String instantTime, HoodieTable table, HoodieHeartbeatClient heartbeatClient, HoodieWriteConfig config) {
ValidationUtils.checkArgument(heartbeatClient
!= null);
try {
if (config.getFailedWritesCleanPolicy().isLazy() && heartbeatClient.isHeartbeatExpired(instantTime)) {
throw new HoodieException((("Heartbeat for instant " + instantTime) + " has expired, last heartbeat ") + getLastHeartbeatTime(table.getMetaClient().getFs(), config.getBasePath(), instantTime));
}
} catch (IOException io) {
throw new HoodieException("Unable to read heartbeat", io);
}
}
| 3.26 |
hudi_MetadataMigrator_migrateToVersion_rdh
|
/**
* Migrate metadata to a specific version.
*
* @param metadata
* Hoodie Table Meta Client
* @param metadataVersion
* Metadata Version
* @param targetVersion
* Target Version
* @return Metadata conforming to the target version
*/
public T migrateToVersion(T metadata, int metadataVersion, int targetVersion) {
ValidationUtils.checkArgument(targetVersion >= oldestVersion);
ValidationUtils.checkArgument(targetVersion <= latestVersion);
if (metadataVersion == targetVersion) {
return metadata;
} else if (metadataVersion > targetVersion) {
return downgradeToVersion(metadata, metadataVersion, targetVersion);
} else {
return upgradeToVersion(metadata,
metadataVersion, targetVersion);
}
}
| 3.26 |
hudi_MetadataMigrator_upgradeToLatest_rdh
|
/**
* Upgrade Metadata version to its latest.
*
* @param metadata
* Metadata
* @param metadataVersion
* Current version of metadata
* @return Metadata conforming to the latest version of this metadata
*/
public T upgradeToLatest(T metadata, int metadataVersion) {
if (metadataVersion == latestVersion) {
return metadata;
}
int newVersion = metadataVersion + 1;
while (newVersion <= latestVersion) {
VersionMigrator<T> upgrader = migrators.get(newVersion);
metadata = upgrader.upgradeFrom(metadata);
newVersion += 1;
}
return metadata;
}
| 3.26 |
hudi_CsvDFSSource_fromFiles_rdh
|
/**
* Reads the CSV files and parsed the lines into {@link Dataset} of {@link Row}.
*
* @param pathStr
* The list of file paths, separated by ','.
* @return {@link Dataset} of {@link Row} containing the records.
*/
private Option<Dataset<Row>> fromFiles(Option<String> pathStr) {
if (pathStr.isPresent()) {
DataFrameReader dataFrameReader = sparkSession.read().format("csv");
CSV_CONFIG_KEYS.forEach(optionKey -> {
String configPropName = CSV_SRC_CONFIG_PREFIX + optionKey;
String oldConfigPropName = OLD_CSV_SRC_CONFIG_PREFIX + optionKey;
String value = props.getString(configPropName, props.getString(oldConfigPropName, null));
// Pass down the Hudi CSV configs to Spark DataFrameReader
if (value != null) {dataFrameReader.option(optionKey, value);
}
});
if (sourceSchema != null) {
// Source schema is specified, pass it to the reader
dataFrameReader.schema(sourceSchema);
}
dataFrameReader.option("inferSchema", Boolean.toString(sourceSchema == null));
return Option.of(dataFrameReader.load(pathStr.get().split(",")));
} else {
return Option.empty();
}
}
| 3.26 |
hudi_HoodieCombineHiveInputFormat_getPaths_rdh
|
/**
* Returns all the Paths in the split.
*/
@Override
public Path[] getPaths() {
return inputSplitShim.getPaths();
}
| 3.26 |
hudi_HoodieCombineHiveInputFormat_getPath_rdh
|
/**
* Returns the i<sup>th</sup> Path.
*/
@Override
public Path getPath(int i) {
return inputSplitShim.getPath(i);
}
| 3.26 |
hudi_HoodieCombineHiveInputFormat_getLengths_rdh
|
/**
* Returns an array containing the lengths of the files in the split.
*/
@Override
public long[] getLengths() {return inputSplitShim.getLengths();
}
| 3.26 |
hudi_HoodieCombineHiveInputFormat_sampleSplits_rdh
|
/**
* This function is used to sample inputs for clauses like "TABLESAMPLE(1 PERCENT)"
* <p>
* First, splits are grouped by alias they are for. If one split serves more than one alias or not for any sampled
* alias, we just directly add it to returned list. Then we find a list of exclusive splits for every alias to be
* sampled. For each alias, we start from position of seedNumber%totalNumber, and keep add splits until the total size
* hits percentage.
*
* @return the sampled splits
*/
private List<CombineFileSplit> sampleSplits(List<CombineFileSplit> splits) {
HashMap<String, SplitSample> nameToSamples = mrwork.getNameToSplitSample();
List<CombineFileSplit> retLists = new ArrayList<>();
Map<String, ArrayList<CombineFileSplit>> aliasToSplitList = new HashMap<>();
Map<Path, ArrayList<String>> pathToAliases = mrwork.getPathToAliases();
Map<Path, ArrayList<String>> pathToAliasesNoScheme = removeScheme(pathToAliases);
// Populate list of exclusive splits for every sampled alias
//
for (CombineFileSplit split : splits) {
String v59 = null;
for (Path v60 : split.getPaths()) {
boolean schemeless = v60.toUri().getScheme() == null;
List<String> l = HiveFileFormatUtils.doGetAliasesFromPath(schemeless ? pathToAliasesNoScheme : pathToAliases, v60);
// a path for a split unqualified the split from being sampled if:
// 1. it serves more than one alias
// 2. the alias it serves is not sampled
// 3. it serves different alias than another path for the same split
if (((l.size() != 1) || (!nameToSamples.containsKey(l.get(0)))) || ((v59 != null) && (!Objects.equals(l.get(0), v59)))) {
v59 = null;
break;
}
v59 = l.get(0);
}
if (v59 != null) {
// split exclusively serves alias, which needs to be sampled
// add it to the split list of the alias.
if (!aliasToSplitList.containsKey(v59)) {
aliasToSplitList.put(v59, new ArrayList<>());
}
aliasToSplitList.get(v59).add(split);
} else
{
// The split doesn't exclusively serve one alias
retLists.add(split);
}
}
// for every sampled alias, we figure out splits to be sampled and add
// them to return list
//
for (Map.Entry<String, ArrayList<CombineFileSplit>> entry : aliasToSplitList.entrySet()) {
ArrayList<CombineFileSplit> splitList = entry.getValue();
long totalSize = 0;
for (CombineFileSplit split : splitList) {
totalSize += split.getLength();
}
SplitSample splitSample = nameToSamples.get(entry.getKey());
long targetSize = splitSample.getTargetSize(totalSize);
int startIndex = splitSample.getSeedNum() % splitList.size();long size = 0;
for (int i = 0; i < splitList.size(); i++) {
CombineFileSplit split = splitList.get((startIndex + i) % splitList.size());
retLists.add(split);
long splitgLength = split.getLength();
if ((size + splitgLength) >= targetSize)
{LOG.info(((("Sample alias " + entry.getValue()) + " using ") + (i + 1)) + "splits");
if ((size + splitgLength) > targetSize) {
((InputSplitShim) (split)).shrinkSplit(targetSize - size);
}
break;
}
size += splitgLength;
}
}return retLists;
}
| 3.26 |
hudi_HoodieCombineHiveInputFormat_getInputPaths_rdh
|
/**
* MOD - Just added this for visibility.
*/
Path[] getInputPaths(JobConf job) throws IOException {
Path[] dirs = FileInputFormat.getInputPaths(job);
if (dirs.length == 0) {
// on tez we're avoiding to duplicate the file info in FileInputFormat.
if (HiveConf.getVar(job, ConfVars.HIVE_EXECUTION_ENGINE).equals("tez")) {
try
{
List<Path> paths = Utilities.getInputPathsTez(job, mrwork);
dirs = paths.toArray(new Path[paths.size()]);
} catch (Exception e) {
throw new IOException("Could not create input files", e);
}} else {
throw new IOException("No input paths specified in job");
}
}
return dirs;
}
| 3.26 |
hudi_HoodieCombineHiveInputFormat_readFields_rdh
|
/**
* Writable interface.
*/
@Override
public void readFields(DataInput in) throws IOException {
inputFormatClassName = Text.readString(in);
if (HoodieParquetRealtimeInputFormat.class.getName().equals(inputFormatClassName)) {String
inputShimClassName = Text.readString(in);
inputSplitShim = ReflectionUtils.loadClass(inputShimClassName);
inputSplitShim.readFields(in);
} else {
inputSplitShim.readFields(in);
}
}
| 3.26 |
hudi_HoodieCombineHiveInputFormat_getNumPaths_rdh
|
/**
* Returns the number of Paths in the split.
*/
@Override
public int getNumPaths() {
return inputSplitShim.getNumPaths();
}
| 3.26 |
hudi_HoodieCombineHiveInputFormat_accept_rdh
|
// returns true if the specified path matches the prefix stored
// in this TestFilter.
@Override
public boolean accept(Path path) {
boolean find = false;
while (path != null) {
if (pStrings.contains(path.toUri().getPath())) {
find = true;
break;
}
path = path.getParent();
}
return find;
}
| 3.26 |
hudi_HoodieCombineHiveInputFormat_getRecordReader_rdh
|
/**
* Create a generic Hive RecordReader than can iterate over all chunks in a CombinedFileSplit.
*/
@Override
public RecordReader getRecordReader(InputSplit split, JobConf job, Reporter reporter) throws IOException {
if (!(split instanceof HoodieCombineHiveInputFormat.CombineHiveInputSplit)) {
return super.getRecordReader(split, job, reporter);
}
CombineHiveInputSplit hsplit = ((CombineHiveInputSplit) (split));
String inputFormatClassName = null;
Class<?> inputFormatClass;
try {
inputFormatClassName =
hsplit.inputFormatClassName();
inputFormatClass = job.getClassByName(inputFormatClassName);
} catch (Exception e) {
throw new IOException("cannot find class " + inputFormatClassName);
}
pushProjectionsAndFilters(job, inputFormatClass, hsplit.getPath(0));
if (inputFormatClass.getName().equals(getParquetRealtimeInputFormatClassName())) {
HoodieCombineFileInputFormatShim shims = createInputFormatShim();
IOContextMap.get(job).setInputPath(((CombineHiveInputSplit) (split)).getPath(0));
return shims.getRecordReader(job, ((CombineHiveInputSplit) (split)).getInputSplitShim(), reporter, CombineHiveRecordReader.class);
} else {
return ShimLoader.getHadoopShims().getCombineFileInputFormat().getRecordReader(job, ((CombineFileSplit) (split)), reporter, CombineHiveRecordReader.class);
}
}
| 3.26 |
hudi_HoodieCombineHiveInputFormat_getStartOffsets_rdh
|
/**
* Returns an array containing the startoffsets of the files in the split.
*/
@Override
public long[] getStartOffsets() {
return inputSplitShim.getStartOffsets();
}
| 3.26 |
hudi_HoodieCombineHiveInputFormat_getLocations_rdh
|
/**
* Returns all the Paths where this input-split resides.
*/
@Override
public String[] getLocations() throws IOException {
return inputSplitShim.getLocations();
}
| 3.26 |
hudi_HoodieCombineHiveInputFormat_inputFormatClassName_rdh
|
/**
* Returns the inputFormat class name for the i-th chunk.
*/
public String inputFormatClassName()
{
return inputFormatClassName;
}
| 3.26 |
hudi_HoodieCombineHiveInputFormat_getSplits_rdh
|
/**
* Create Hive splits based on CombineFileSplit.
*/
@Override
public InputSplit[] getSplits(JobConf job, int
numSplits) throws IOException {
PerfLogger perfLogger = SessionState.getPerfLogger();
perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.GET_SPLITS);
init(job);
List<InputSplit> result = new ArrayList<>();
Path[] paths = getInputPaths(job);
List<Path> nonCombinablePaths = new ArrayList<>(paths.length / 2);List<Path> combinablePaths = new ArrayList<>(paths.length / 2);
int numThreads = Math.min(MAX_CHECK_NONCOMBINABLE_THREAD_NUM, ((int) (Math.ceil(((double) (paths.length)) / DEFAULT_NUM_PATH_PER_THREAD))));
// This check is necessary because for Spark branch, the result array from
// getInputPaths() above could be empty, and therefore numThreads could be 0.
// In that case, Executors.newFixedThreadPool will fail.
if (numThreads > 0) {
try {
Set<Integer> nonCombinablePathIndices =
getNonCombinablePathIndices(job, paths,
numThreads);
for (int i = 0; i < paths.length; i++) {
if (nonCombinablePathIndices.contains(i)) {
nonCombinablePaths.add(paths[i]);
} else {
combinablePaths.add(paths[i]);
}
}
} catch (Exception e) {
LOG.error("Error checking non-combinable path", e);
perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.GET_SPLITS);
throw new IOException(e);
}
}
// Store the previous value for the path specification
String oldPaths = job.get(FileInputFormat.INPUT_DIR);
if (LOG.isDebugEnabled()) {
LOG.debug((("The received input paths are: [" + oldPaths) + "] against the property ") + FileInputFormat.INPUT_DIR);
}
// Process the normal splits
if (nonCombinablePaths.size() > 0) {
FileInputFormat.setInputPaths(job, nonCombinablePaths.toArray(new Path[0]));
InputSplit[] splits = super.getSplits(job, numSplits);
Collections.addAll(result, splits);
}
// Process the combine splits
if (combinablePaths.size() > 0) {
FileInputFormat.setInputPaths(job, combinablePaths.toArray(new Path[0]));
Map<Path, PartitionDesc> pathToPartitionInfo = (this.pathToPartitionInfo != null) ? this.pathToPartitionInfo : Utilities.getMapWork(job).getPathToPartitionInfo();
InputSplit[] splits = getCombineSplits(job, numSplits, pathToPartitionInfo);
Collections.addAll(result, splits);
}
// Restore the old path information back
// This is just to prevent incompatibilities with previous versions Hive
// if some application depends on the original value being set.
if (oldPaths != null) {
job.set(FileInputFormat.INPUT_DIR, oldPaths);
}
// clear work from ThreadLocal after splits generated in case of thread is reused in pool.
Utilities.clearWorkMapForConf(job);
LOG.info("Number of all splits "
+
result.size());
perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.GET_SPLITS);
return result.toArray(new InputSplit[result.size()]);
}
| 3.26 |
hudi_HoodieCombineHiveInputFormat_getOffset_rdh
|
/**
* Returns the start offset of the i<sup>th</sup> Path.
*/
@Override
public long getOffset(int i) {
return inputSplitShim.getOffset(i);
}
| 3.26 |
hudi_HoodieCombineHiveInputFormat_toString_rdh
|
/**
* Prints this object as a string.
*/
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append(inputSplitShim.toString());
sb.append("InputFormatClass: " + inputFormatClassName);
sb.append("\n");
return sb.toString();
}
| 3.26 |
hudi_HoodieCombineHiveInputFormat_getLength_rdh
|
/**
* Returns the length of the i<sup>th</sup> Path.
*/
@Override
public long getLength(int i) {
return inputSplitShim.getLength(i);
}
| 3.26 |
hudi_HoodieCombineHiveInputFormat_getCombineSplits_rdh
|
/**
* Create Hive splits based on CombineFileSplit.
*/
private InputSplit[] getCombineSplits(JobConf job, int numSplits, Map<Path, PartitionDesc> pathToPartitionInfo) throws IOException {
init(job);
Map<Path, ArrayList<String>> v0 = mrwork.getPathToAliases();
Map<String, Operator<? extends OperatorDesc>> aliasToWork = mrwork.getAliasToWork();
/* MOD - Initialize a custom combine input format shim that will call listStatus on the custom inputFormat * */
HoodieCombineHiveInputFormat.HoodieCombineFileInputFormatShim combine = createInputFormatShim();
InputSplit[] splits;
if (combine.getInputPathsShim(job).length == 0) {
throw new IOException("No input paths specified in job");
}
List<InputSplit> result = new ArrayList<>();
// combine splits only from same tables and same partitions. Do not combine splits from multiple
// tables or multiple partitions.
Path[] paths
= StringInternUtils.internUriStringsInPathArray(combine.getInputPathsShim(job));
List<Path> inpDirs = new ArrayList<>();
List<Path> inpFiles = new ArrayList<>();
Map<CombinePathInputFormat, CombineFilter> poolMap = new HashMap<>();
Set<Path> poolSet = new HashSet<>();
for (Path path : paths) {
PartitionDesc part = getPartitionFromPath(pathToPartitionInfo,
path, IOPrepareCache.get().allocatePartitionDescMap());
TableDesc tableDesc
= part.getTableDesc();
if ((tableDesc != null) && tableDesc.isNonNative()) {
return super.getSplits(job, numSplits);
}
// Use HiveInputFormat if any of the paths is not splittable
Class<?> inputFormatClass = part.getInputFileFormatClass();
String inputFormatClassName = inputFormatClass.getName();
InputFormat inputFormat = getInputFormatFromCache(inputFormatClass, job);
LOG.info("Input Format => " + inputFormatClass.getName()); // **MOD** Set the hoodie filter in the combine
if (inputFormatClass.getName().equals(getParquetInputFormatClassName())) {
combine.setHoodieFilter(true);
} else if (inputFormatClass.getName().equals(getParquetRealtimeInputFormatClassName())) {
LOG.info("Setting hoodie filter and realtime input format");
combine.setHoodieFilter(true);
combine.setRealTime(true);
if (job.get(hive_metastoreConstants.META_TABLE_PARTITION_COLUMNS, "").isEmpty()) {
List<String> partitions = new ArrayList<>(part.getPartSpec().keySet());
if (!partitions.isEmpty()) {
String partitionStr = String.join("/", partitions);
LOG.info((("Setting Partitions in jobConf - Partition Keys for Path : " + path) + " is :") + partitionStr);
job.set(hive_metastoreConstants.META_TABLE_PARTITION_COLUMNS, partitionStr);
} else
{
job.set(hive_metastoreConstants.META_TABLE_PARTITION_COLUMNS, "");
}
}
}
String deserializerClassName = null;try {
deserializerClassName = part.getDeserializer(job).getClass().getName();
} catch (Exception e) {
// ignore
LOG.error("Getting deserializer class name error ", e);
}
// don't combine if inputformat is a SymlinkTextInputFormat
if (inputFormat instanceof SymlinkTextInputFormat) {
splits = super.getSplits(job, numSplits);
return splits;
}Path filterPath = path;
// Does a pool exist for this path already
CombineFilter f;
List<Operator<? extends OperatorDesc>> v21;
if (!mrwork.isMapperCannotSpanPartns()) {
// if mapper can span partitions, make sure a splits does not contain multiple
// opList + inputFormatClassName + deserializerClassName combination
// This is done using the Map of CombinePathInputFormat to PathFilter
v21 = HiveFileFormatUtils.doGetWorksFromPath(v0, aliasToWork, filterPath);
CombinePathInputFormat combinePathInputFormat = new CombinePathInputFormat(v21, inputFormatClassName, deserializerClassName);
f = poolMap.get(combinePathInputFormat);
if (f == null) {
f = new CombineFilter(filterPath);
LOG.info((("CombineHiveInputSplit creating pool for " + path) + "; using filter path ") + filterPath);
combine.createPool(job, f);
poolMap.put(combinePathInputFormat, f);
} else {
LOG.info((("CombineHiveInputSplit: pool is already created for " + path) + "; using filter path ") + filterPath);
f.addPath(filterPath);
}
} else // In the case of tablesample, the input paths are pointing to files rather than directories.
// We need to get the parent directory as the filtering path so that all files in the same
// parent directory will be grouped into one pool but not files from different parent
// directories. This guarantees that a split will combine all files in the same partition
// but won't cross multiple partitions if the user has asked so.
if (!path.getFileSystem(job).getFileStatus(path).isDirectory()) {
// path is not directory
filterPath = path.getParent();
inpFiles.add(path);
poolSet.add(filterPath);
} else {
inpDirs.add(path);
}
}
// Processing directories
List<CombineFileSplit> iss = new ArrayList<>();
if (!mrwork.isMapperCannotSpanPartns()) {
// mapper can span partitions
// combine into as few as one split, subject to the PathFilters set
// using combine.createPool.
iss = Arrays.asList(combine.getSplits(job, 1));
} else {
for (Path path : inpDirs) {
processPaths(job, combine, iss, path);
}
if (inpFiles.size() > 0) {
// Processing files
for (Path filterPath : poolSet) {
combine.createPool(job, new CombineFilter(filterPath));
}
processPaths(job, combine,
iss, inpFiles.toArray(new Path[0]));
}
} if ((mrwork.getNameToSplitSample() != null) && (!mrwork.getNameToSplitSample().isEmpty())) {
iss = sampleSplits(iss);
}
for (CombineFileSplit is : iss) {
final InputSplit csplit;
if (combine.isRealTime) {
if (is instanceof HoodieCombineRealtimeHiveSplit) {
csplit = is;
} else {
csplit = new
HoodieCombineRealtimeHiveSplit(job, is, pathToPartitionInfo);
}
} else {
csplit = new CombineHiveInputSplit(job, is, pathToPartitionInfo);
}
result.add(csplit);
}
LOG.info("number of splits " + result.size());
return result.toArray(new CombineHiveInputSplit[result.size()]);
}
| 3.26 |
hudi_HoodieCombineHiveInputFormat_write_rdh
|
/**
* Writable interface.
*/
@Override
public void write(DataOutput out) throws IOException {
if (inputFormatClassName == null) {
if (pathToPartitionInfo == null) {
pathToPartitionInfo = Utilities.getMapWork(getJob()).getPathToPartitionInfo();
}
// extract all the inputFormatClass names for each chunk in the
// CombinedSplit.
PartitionDesc part = getPartitionFromPath(pathToPartitionInfo, inputSplitShim.getPath(0), IOPrepareCache.get().getPartitionDescMap());
// create a new InputFormat instance if this is the first time to see
// this class
inputFormatClassName = part.getInputFileFormatClass().getName();
}
Text.writeString(out, inputFormatClassName);
if (HoodieParquetRealtimeInputFormat.class.getName().equals(inputFormatClassName)) {
// Write Shim Class Name
Text.writeString(out, inputSplitShim.getClass().getName());
}
inputSplitShim.write(out);
}
| 3.26 |
hudi_HoodieCombineHiveInputFormat_getNonCombinablePathIndices_rdh
|
/**
* Gets all the path indices that should not be combined.
*/
public Set<Integer> getNonCombinablePathIndices(JobConf job, Path[] paths, int numThreads) throws ExecutionException, InterruptedException {
LOG.info(((("Total number of paths: " + paths.length) + ", launching ") + numThreads) + " threads to check non-combinable ones.");
int numPathPerThread = ((int) (Math.ceil(((double) (paths.length)) / numThreads)));
ExecutorService v29 = Executors.newFixedThreadPool(numThreads);
List<Future<Set<Integer>>> futureList = new ArrayList<>(numThreads);
try {
for (int i = 0; i < numThreads; i++) {
int start = i * numPathPerThread;int length = (i != (numThreads - 1)) ?
numPathPerThread : paths.length - start;
futureList.add(v29.submit(new CheckNonCombinablePathCallable(paths, start, length, job)));
}
Set<Integer> nonCombinablePathIndices = new HashSet<>();
for (Future<Set<Integer>> future : futureList) {
nonCombinablePathIndices.addAll(future.get());
}
return nonCombinablePathIndices;
} finally {
v29.shutdownNow();
}
}
| 3.26 |
hudi_CatalogOptions_allOptions_rdh
|
/**
* Returns all the config options.
*/
public static List<ConfigOption<?>> allOptions() {
return OptionsResolver.allOptions(CatalogOptions.class);
}
| 3.26 |
hudi_CatalogOptions_tableCommonOptions_rdh
|
/**
* Returns all the common table options that can be shared.
*
* @param catalogOptions
* The catalog options
*/
public static Map<String, String> tableCommonOptions(Configuration catalogOptions) {
Configuration copied = new Configuration(catalogOptions);
copied.removeConfig(DEFAULT_DATABASE);
copied.removeConfig(CATALOG_PATH);
return copied.toMap();
}
| 3.26 |
hudi_WaitStrategyFactory_build_rdh
|
/**
* Build WaitStrategy for disruptor
*/
public static WaitStrategy build(String name) {DisruptorWaitStrategyType strategyType = DisruptorWaitStrategyType.valueOf(name);
switch (strategyType) {
case BLOCKING_WAIT :
return new BlockingWaitStrategy();
case SLEEPING_WAIT :
return new SleepingWaitStrategy();
case YIELDING_WAIT :
return new YieldingWaitStrategy();
case BUSY_SPIN_WAIT :return new BusySpinWaitStrategy();
default
:
throw new HoodieException("Unsupported Executor Type " + name);
}
}
| 3.26 |
hudi_HoodieSparkTable_getMetadataWriter_rdh
|
/**
* Fetch instance of {@link HoodieTableMetadataWriter}.
*
* @return instance of {@link HoodieTableMetadataWriter}
*/
@Override
protected Option<HoodieTableMetadataWriter> getMetadataWriter(String triggeringInstantTimestamp, HoodieFailedWritesCleaningPolicy failedWritesCleaningPolicy) {
if (config.isMetadataTableEnabled()) {
// if any partition is deleted, we need to reload the metadata table writer so that new table configs are picked up
// to reflect the delete mdt partitions.
deleteMetadataIndexIfNecessary();
// Create the metadata table writer. First time after the upgrade this creation might trigger
// metadata table bootstrapping. Bootstrapping process could fail and checking the table
// existence after the creation is needed.
HoodieTableMetadataWriter metadataWriter = SparkHoodieBackedTableMetadataWriter.create(context.getHadoopConf().get(), config, failedWritesCleaningPolicy, context, Option.of(triggeringInstantTimestamp));
try {
if (isMetadataTableExists || metaClient.getFs().exists(new Path(HoodieTableMetadata.getMetadataTableBasePath(metaClient.getBasePath())))) {
isMetadataTableExists = true;
return Option.of(metadataWriter);
}
} catch (IOException e) {
throw new HoodieMetadataException("Checking existence of metadata table failed", e);
}
} else {
// if metadata is not enabled in the write config, we should try and delete it (if present)
maybeDeleteMetadataTable();
}
return Option.empty();
}
| 3.26 |
hudi_HashID_byteSize_rdh
|
/**
* Get this Hash size in bytes.
*
* @return Bytes needed to represent this size
*/
public int byteSize() {
return ((this.bits - 1) / Byte.SIZE) + 1;
}
| 3.26 |
hudi_HashID_bits_rdh
|
/**
* Get this Hash size in bits.
*
* @return bits needed to represent the size
*/
public int bits() {return this.bits;
}
| 3.26 |
hudi_HashID_hash_rdh
|
/**
* Get the hash value for a byte array and for the desired @{@link Size}.
*
* @param messageBytes
* - Byte array message to get the hash value for
* @param bits
* - @{@link Size} of the hash value
* @return Hash value for the message as byte array
*/
public static byte[] hash(final byte[] messageBytes, final Size
bits) {
switch (bits) {
case BITS_32 :
case BITS_64 :
return getXXHash(messageBytes, bits);
case BITS_128 :
return getMD5Hash(messageBytes);
default :
throw new IllegalArgumentException("Unexpected Hash size bits: " + bits);
}
}
| 3.26 |
hudi_TerminationStrategyUtils_createPostWriteTerminationStrategy_rdh
|
/**
* Create a PostWriteTerminationStrategy class via reflection,
* <br>
* if the class name of PostWriteTerminationStrategy is configured through the {@link HoodieStreamer.Config#postWriteTerminationStrategyClass}.
*/
public static Option<PostWriteTerminationStrategy> createPostWriteTerminationStrategy(TypedProperties properties, String postWriteTerminationStrategyClass) throws HoodieException {
try {
return StringUtils.isNullOrEmpty(postWriteTerminationStrategyClass) ? Option.empty() : Option.of(((PostWriteTerminationStrategy) (ReflectionUtils.loadClass(postWriteTerminationStrategyClass, properties))));
} catch (Throwable e) {
throw new HoodieException("Could not create PostWritTerminationStrategy class " + postWriteTerminationStrategyClass, e);
}
}
| 3.26 |
hudi_HoodieIndexUtils_tagAsNewRecordIfNeeded_rdh
|
/**
* Get tagged record for the passed in {@link HoodieRecord}.
*
* @param record
* instance of {@link HoodieRecord} for which tagging is requested
* @param location
* {@link HoodieRecordLocation} for the passed in {@link HoodieRecord}
* @return the tagged {@link HoodieRecord}
*/public static <R> HoodieRecord<R> tagAsNewRecordIfNeeded(HoodieRecord<R> record, Option<HoodieRecordLocation> location) {
if (location.isPresent()) {
// When you have a record in multiple files in the same partition, then <row key, record> collection
// will have 2 entries with the same exact in memory copy of the HoodieRecord and the 2
// separate filenames that the record is found in. This will result in setting
// currentLocation 2 times and it will fail the second time. So creating a new in memory
// copy of the hoodie record.
HoodieRecord<R> newRecord = record.newInstance();newRecord.unseal();
newRecord.setCurrentLocation(location.get());
newRecord.seal();
return newRecord;
} else {
return record;
}}
| 3.26 |
hudi_HoodieIndexUtils_mergeIncomingWithExistingRecord_rdh
|
/**
* Merge the incoming record with the matching existing record loaded via {@link HoodieMergedReadHandle}. The existing record is the latest version in the table.
*/
private static <R> Option<HoodieRecord<R>> mergeIncomingWithExistingRecord(HoodieRecord<R> incoming, HoodieRecord<R> existing, Schema writeSchema, HoodieWriteConfig config, HoodieRecordMerger recordMerger) throws IOException {
Schema existingSchema = HoodieAvroUtils.addMetadataFields(new Schema.Parser().parse(config.getSchema()), config.allowOperationMetadataField());
Schema writeSchemaWithMetaFields = HoodieAvroUtils.addMetadataFields(writeSchema, config.allowOperationMetadataField());
// prepend the hoodie meta fields as the incoming record does not have them
HoodieRecord incomingPrepended = incoming.prependMetaFields(writeSchema, writeSchemaWithMetaFields, new MetadataValues().setRecordKey(incoming.getRecordKey()).setPartitionPath(incoming.getPartitionPath()), config.getProps());
// after prepend the meta fields, convert the record back to the original payload
HoodieRecord incomingWithMetaFields = incomingPrepended.wrapIntoHoodieRecordPayloadWithParams(writeSchema, config.getProps(), Option.empty(), config.allowOperationMetadataField(), Option.empty(), false, Option.empty());
Option<Pair<HoodieRecord, Schema>> mergeResult = recordMerger.merge(existing, existingSchema, incomingWithMetaFields, writeSchemaWithMetaFields, config.getProps());
if (mergeResult.isPresent()) {
// the merged record needs to be converted back to the original payload
HoodieRecord<R> merged = mergeResult.get().getLeft().wrapIntoHoodieRecordPayloadWithParams(writeSchemaWithMetaFields, config.getProps(), Option.empty(), config.allowOperationMetadataField(), Option.empty(), false, Option.of(writeSchema));
return Option.of(merged);
} else {
return Option.empty();
}}
| 3.26 |
hudi_HoodieIndexUtils_mergeForPartitionUpdatesIfNeeded_rdh
|
/**
* Merge tagged incoming records with existing records in case of partition path updated.
*/
public static <R> HoodieData<HoodieRecord<R>> mergeForPartitionUpdatesIfNeeded(HoodieData<Pair<HoodieRecord<R>, Option<HoodieRecordGlobalLocation>>> incomingRecordsAndLocations, HoodieWriteConfig config, HoodieTable hoodieTable) {
// completely new records
HoodieData<HoodieRecord<R>> taggedNewRecords = incomingRecordsAndLocations.filter(p -> !p.getRight().isPresent()).map(Pair::getLeft);
// the records found in existing base files
HoodieData<HoodieRecord<R>> untaggedUpdatingRecords = incomingRecordsAndLocations.filter(p -> p.getRight().isPresent()).map(Pair::getLeft).distinctWithKey(HoodieRecord::getRecordKey, config.getGlobalIndexReconcileParallelism());
// the tagging partitions and locations
// NOTE: The incoming records may only differ in record position, however, for the purpose of
// merging in case of partition updates, it is safe to ignore the record positions.
HoodieData<HoodieRecordGlobalLocation> globalLocations = incomingRecordsAndLocations.filter(p -> p.getRight().isPresent()).map(p -> new HoodieRecordGlobalLocation(p.getRight().get().getPartitionPath(), p.getRight().get().getInstantTime(), p.getRight().get().getFileId())).distinct(config.getGlobalIndexReconcileParallelism());
// merged existing records with current locations being set
HoodieData<HoodieRecord<R>> existingRecords = getExistingRecords(globalLocations, config, hoodieTable);final HoodieRecordMerger recordMerger = config.getRecordMerger();
HoodieData<HoodieRecord<R>> taggedUpdatingRecords = untaggedUpdatingRecords.mapToPair(r -> Pair.of(r.getRecordKey(), r)).leftOuterJoin(existingRecords.mapToPair(r -> Pair.of(r.getRecordKey(), r))).values().flatMap(entry -> {
HoodieRecord<R> incoming = entry.getLeft();
Option<HoodieRecord<R>> v23 = entry.getRight();
if (!v23.isPresent()) {
// existing record not found (e.g., due to delete log not merged to base file): tag as a new record
return Collections.singletonList(incoming).iterator();
}
HoodieRecord<R> existing = v23.get();
Schema writeSchema = new Schema.Parser().parse(config.getWriteSchema());
if (incoming.isDelete(writeSchema, config.getProps())) {
// incoming is a delete: force tag the incoming to the old partition
return Collections.singletonList(tagRecord(incoming.newInstance(existing.getKey()), existing.getCurrentLocation())).iterator();
}
Option<HoodieRecord<R>> mergedOpt = mergeIncomingWithExistingRecord(incoming, existing, writeSchema, config, recordMerger);
if (!mergedOpt.isPresent()) {
// merge resulted in delete: force tag the incoming to the old partition
return Collections.singletonList(tagRecord(incoming.newInstance(existing.getKey()), existing.getCurrentLocation())).iterator();
}
HoodieRecord<R> merged = mergedOpt.get();
if (Objects.equals(merged.getPartitionPath(), existing.getPartitionPath())) {
// merged record has the same partition: route the merged result to the current location as an update
return Collections.singletonList(tagRecord(merged, existing.getCurrentLocation())).iterator();
} else {
// merged record has a different partition: issue a delete to the old partition and insert the merged record to the new partition
HoodieRecord<R> deleteRecord = createDeleteRecord(config, existing.getKey());
return Arrays.asList(tagRecord(deleteRecord, existing.getCurrentLocation()), merged).iterator();
}
});
return taggedUpdatingRecords.union(taggedNewRecords);
}
| 3.26 |
hudi_HoodieIndexUtils_tagRecord_rdh
|
/**
* Tag the record to an existing location. Not creating any new instance.
*/
public static <R> HoodieRecord<R> tagRecord(HoodieRecord<R> record, HoodieRecordLocation location) {
record.unseal();
record.setCurrentLocation(location);
record.seal();
return
record;
}
| 3.26 |
hudi_HoodieIndexUtils_checkIfValidCommit_rdh
|
/**
* Check if the given commit timestamp is valid for the timeline.
*
* The commit timestamp is considered to be valid if:
* 1. the commit timestamp is present in the timeline, or
* 2. the commit timestamp is less than the first commit timestamp in the timeline
*
* @param commitTimeline
* The timeline
* @param commitTs
* The commit timestamp to check
* @return true if the commit timestamp is valid for the timeline
*/
public static boolean checkIfValidCommit(HoodieTimeline commitTimeline, String commitTs) {
return (!commitTimeline.empty()) && commitTimeline.containsOrBeforeTimelineStarts(commitTs);
}
| 3.26 |
hudi_HoodieIndexUtils_getLatestBaseFilesForPartition_rdh
|
/**
* Fetches Pair of partition path and {@link HoodieBaseFile}s for interested partitions.
*
* @param partition
* Partition of interest
* @param hoodieTable
* Instance of {@link HoodieTable} of interest
* @return the list of {@link HoodieBaseFile}
*/
public static List<HoodieBaseFile> getLatestBaseFilesForPartition(String partition, HoodieTable hoodieTable) {
Option<HoodieInstant> latestCommitTime = hoodieTable.getMetaClient().getCommitsTimeline().filterCompletedInstants().lastInstant();
if (latestCommitTime.isPresent()) {
return hoodieTable.getBaseFileOnlyView().getLatestBaseFilesBeforeOrOn(partition, latestCommitTime.get().getTimestamp()).collect(toList());
}
return Collections.emptyList();
}
| 3.26 |
hudi_HoodieIndexUtils_getLatestBaseFilesForAllPartitions_rdh
|
/**
* Fetches Pair of partition path and {@link HoodieBaseFile}s for interested partitions.
*
* @param partitions
* list of partitions of interest
* @param context
* instance of {@link HoodieEngineContext} to use
* @param hoodieTable
* instance of {@link HoodieTable} of interest
* @return the list of Pairs of partition path and fileId
*/
public static List<Pair<String, HoodieBaseFile>> getLatestBaseFilesForAllPartitions(final List<String> partitions, final HoodieEngineContext context, final HoodieTable hoodieTable) {
context.setJobStatus(HoodieIndexUtils.class.getSimpleName(), "Load latest base files from all partitions: " + hoodieTable.getConfig().getTableName());
return context.flatMap(partitions, partitionPath -> {
List<Pair<String, HoodieBaseFile>> filteredFiles = getLatestBaseFilesForPartition(partitionPath, hoodieTable).stream().map(baseFile -> Pair.of(partitionPath, baseFile)).collect(toList());
return filteredFiles.stream();
}, Math.max(partitions.size(), 1));
}
| 3.26 |
hudi_HoodieIndexUtils_getLatestFileSlicesForPartition_rdh
|
/**
* Fetches Pair of partition path and {@link FileSlice}s for interested partitions.
*
* @param partition
* Partition of interest
* @param hoodieTable
* Instance of {@link HoodieTable} of interest
* @return the list of {@link FileSlice}
*/
public static List<FileSlice> getLatestFileSlicesForPartition(final String partition, final HoodieTable hoodieTable) {
Option<HoodieInstant>
latestCommitTime = hoodieTable.getMetaClient().getCommitsTimeline().filterCompletedInstants().lastInstant();
if (latestCommitTime.isPresent()) {
return hoodieTable.getHoodieView().getLatestFileSlicesBeforeOrOn(partition, latestCommitTime.get().getTimestamp(), true).collect(toList());
}
return Collections.emptyList();
}
| 3.26 |
hudi_InternalSchemaUtils_collectTypeChangedCols_rdh
|
/**
* Collect all type changed cols to build a colPosition -> (newColType, oldColType) map.
* only collect top level col changed. eg: a is a nest field(record(b int, d long), now a.b is changed from int to long,
* only a will be collected, a.b will excluded.
*
* @param schema
* a type changed internalSchema
* @param oldSchema
* an old internalSchema.
* @return a map.
*/
public static Map<Integer, Pair<Type, Type>> collectTypeChangedCols(InternalSchema schema, InternalSchema oldSchema) {
Set<Integer> ids = schema.getAllIds();Set<Integer> otherIds = oldSchema.getAllIds();
Map<Integer, Pair<Type, Type>> result = new HashMap<>();
ids.stream().filter(f -> otherIds.contains(f)).forEach(f -> {
if (!schema.findType(f).equals(oldSchema.findType(f))) {
String[] fieldNameParts = schema.findFullName(f).split("\\.");
String[] v27 = oldSchema.findFullName(f).split("\\.");
String parentName = fieldNameParts[0];String
otherParentName = v27[0];
if ((fieldNameParts.length == v27.length) && (schema.findIdByName(parentName) == oldSchema.findIdByName(otherParentName))) {
int index = schema.findIdByName(parentName);
int position = schema.getRecord().fields().stream().map(s -> s.fieldId()).collect(Collectors.toList()).indexOf(index);
if (!result.containsKey(position)) {
result.put(position, Pair.of(schema.findType(parentName), oldSchema.findType(otherParentName)));
}
}
}
});
return result;
}
| 3.26 |
hudi_InternalSchemaUtils_pruneInternalSchemaByID_rdh
|
/**
* Create project internalSchema.
* support nested project.
*
* @param schema
* a internal schema.
* @param fieldIds
* project col field_ids.
* @return a project internalSchema.
*/
public static InternalSchema pruneInternalSchemaByID(InternalSchema schema, List<Integer> fieldIds, List<Integer> topParentFieldIds) {
Types.RecordType recordType = ((Types.RecordType) (m0(schema.getRecord(), fieldIds)));
// reorder top parent fields, since the recordType.fields() produced by pruneType maybe out of order.
List<Types.Field> newFields = new ArrayList<>();
if ((topParentFieldIds != null) && (!topParentFieldIds.isEmpty())) {
for (int id : topParentFieldIds) {
Types.Field f = recordType.field(id);
if (f != null) {
newFields.add(f);
} else {
throw new HoodieSchemaException(String.format("cannot find pruned id %s in currentSchema %s",
id, schema.toString()));}
}
}
return new InternalSchema(newFields.isEmpty() ? recordType : Types.RecordType.get(newFields));
}
| 3.26 |
hudi_InternalSchemaUtils_reBuildFilterName_rdh
|
/**
* A helper function to help correct the colName of pushed filters.
*
* @param name
* origin col name from pushed filters.
* @param fileSchema
* the real schema of avro/parquet file.
* @param querySchema
* the query schema which query engine produced.
* @return a corrected name.
*/
public static String reBuildFilterName(String name, InternalSchema fileSchema, InternalSchema querySchema) {
int nameId = querySchema.findIdByName(name);
if (nameId == (-1)) {
throw new IllegalArgumentException(String.format("cannot find filter col name:%s from querySchema: %s", name, querySchema));
}
if (fileSchema.findField(nameId) == null) {
// added operation found
// the read file does not contain current col, so current colFilter is invalid
return "";
} else if (name.equals(fileSchema.findFullName(nameId))) {
// no change happened on current col
return name;
} else {
// find rename operation on current col
// return the name from fileSchema
return fileSchema.findFullName(nameId);
}
}
| 3.26 |
hudi_InternalSchemaUtils_m0_rdh
|
/**
* Project hudi type by projected cols field_ids
* this is auxiliary function used by pruneInternalSchema.
*/
private static Type m0(Type type, List<Integer> fieldIds) {
switch (type.typeId()) {
case RECORD :
Types.RecordType record = ((Types.RecordType) (type));
List<Types.Field> fields = record.fields();
List<Type> newTypes = new ArrayList<>();
for (Types.Field f : fields) {
Type newType = m0(f.type(), fieldIds);
if (fieldIds.contains(f.fieldId())) {
newTypes.add(f.type());
} else if (newType != null) {
newTypes.add(newType);
} else {
newTypes.add(null);
}
}
boolean changed = false;List<Field> newFields = new ArrayList<>();
for (int i = 0; i < fields.size(); i++) {
Types.Field oldField = fields.get(i);
Type newType = newTypes.get(i);
if (oldField.type() == newType) {
newFields.add(oldField);
} else if
(newType != null) {
changed = true;
newFields.add(Types.Field.get(oldField.fieldId(), oldField.isOptional(), oldField.name(), newType, oldField.doc()));
}
}
if (newFields.isEmpty()) {
return null;
}
if ((newFields.size() == fields.size()) && (!changed)) {
return record;
} else {
return Types.RecordType.get(newFields);
}
case ARRAY :
Types.ArrayType array = ((Types.ArrayType) (type));
Type newElementType = m0(array.elementType(), fieldIds);
if (fieldIds.contains(array.elementId())) {
return array;
} else if (newElementType != null) {
if (array.elementType() == newElementType) {
return array;
}
return Types.ArrayType.get(array.elementId(), array.isElementOptional(), newElementType);
}
return null;
case MAP :
Types.MapType map = ((Types.MapType) (type));
Type newValueType = m0(map.valueType(), fieldIds);
if (fieldIds.contains(map.valueId())) {
return map;
} else if (newValueType != null) {
if (map.valueType() == newValueType) {
return map;}
return Types.MapType.get(map.keyId(), map.valueId(), map.keyType(), newValueType, map.isValueOptional());
}
return null;
default :
return null;
}
}
| 3.26 |
hudi_InternalSchemaUtils_pruneInternalSchema_rdh
|
/**
* Create project internalSchema, based on the project names which produced by query engine.
* support nested project.
*
* @param schema
* a internal schema.
* @param names
* project names produced by query engine.
* @return a project internalSchema.
*/
public static InternalSchema
pruneInternalSchema(InternalSchema schema, List<String> names) {
// do check
List<Integer> prunedIds
= names.stream().map(name -> {
int id = schema.findIdByName(name);
if (id == (-1)) {
throw new IllegalArgumentException(String.format("cannot prune col: %s which does not exist in hudi table", name));
}
return id;
}).collect(Collectors.toList());
// find top parent field ID. eg: a.b.c, f.g.h, only collect id of a and f ignore all child field.
List<Integer> topParentFieldIds = new ArrayList<>();
names.stream().forEach(f -> {
int id = schema.findIdByName(f.split("\\.")[0]);
if
(!topParentFieldIds.contains(id)) {
topParentFieldIds.add(id);
}
});
return pruneInternalSchemaByID(schema, prunedIds, topParentFieldIds);
}
| 3.26 |
hudi_InternalSchemaUtils_searchSchema_rdh
|
/**
* Search target internalSchema by version number.
*
* @param versionId
* the internalSchema version to be search.
* @param treeMap
* internalSchemas collections to be searched.
* @return a internalSchema.
*/
public static InternalSchema searchSchema(long versionId, TreeMap<Long, InternalSchema> treeMap) {
if (treeMap.containsKey(versionId)) {
return treeMap.get(versionId);} else {
SortedMap<Long, InternalSchema> headMap = treeMap.headMap(versionId);
if (!headMap.isEmpty()) {
return headMap.get(headMap.lastKey());
}
}
return InternalSchema.getEmptyInternalSchema();
}
| 3.26 |
hudi_InternalSchemaUtils_collectRenameCols_rdh
|
/**
* Try to find all renamed cols between oldSchema and newSchema.
*
* @param oldSchema
* oldSchema
* @param newSchema
* newSchema which modified from oldSchema
* @return renameCols Map. (k, v) -> (colNameFromNewSchema, colNameLastPartFromOldSchema)
*/
public static Map<String, String> collectRenameCols(InternalSchema oldSchema, InternalSchema newSchema) {
List<String> colNamesFromWriteSchema = oldSchema.getAllColsFullName();
return colNamesFromWriteSchema.stream().filter(f -> {
int fieldIdFromWriteSchema = oldSchema.findIdByName(f);
// try to find the cols which has the same id, but have different colName;
return newSchema.getAllIds().contains(fieldIdFromWriteSchema) && (!newSchema.findFullName(fieldIdFromWriteSchema).equalsIgnoreCase(f));
}).collect(Collectors.toMap(e -> newSchema.findFullName(oldSchema.findIdByName(e)), e -> {
int v38 = e.lastIndexOf(".");
return e.substring(v38 == (-1) ? 0 : v38 + 1);
}));
}
| 3.26 |
hudi_SchemaRegistryProvider_fetchSchemaFromRegistry_rdh
|
/**
* The method takes the provided url {@code registryUrl} and gets the schema from the schema registry using that url.
* If the caller provides userInfo credentials in the url (e.g "https://foo:[email protected]") then the credentials
* are extracted the url using the Matcher and the extracted credentials are set on the request as an Authorization
* header.
*
* @param registryUrl
* @return the Schema in String form.
* @throws IOException
*/
public String fetchSchemaFromRegistry(String registryUrl) throws IOException {
URL registry;
HttpURLConnection connection;
Matcher v2 = Pattern.compile("://(.*?)@").matcher(registryUrl);
if (v2.find()) {
String creds = v2.group(1);
String urlWithoutCreds = registryUrl.replace(creds + "@", "");
registry = new URL(urlWithoutCreds);
connection = ((HttpURLConnection) (registry.openConnection()));
setAuthorizationHeader(v2.group(1), connection);
} else {
registry = new URL(registryUrl);connection = ((HttpURLConnection) (registry.openConnection()));
}
ObjectMapper mapper = new ObjectMapper();
JsonNode node = mapper.readTree(getStream(connection));
return node.get("schema").asText();
}
| 3.26 |
hudi_HoodieAvroFileReaderBase_getRecordIterator_rdh
|
/**
* Base class for every {@link HoodieAvroFileReader}
*/abstract class HoodieAvroFileReaderBase implements HoodieAvroFileReader {
@Override
public ClosableIterator<HoodieRecord<IndexedRecord>> getRecordIterator(Schema readerSchema, Schema requestedSchema) throws IOException {ClosableIterator<IndexedRecord> iterator =
getIndexedRecordIterator(readerSchema, requestedSchema);
return new CloseableMappingIterator<>(iterator, data -> unsafeCast(new HoodieAvroIndexedRecord(data)));
}
| 3.26 |
hudi_HoodieAvroDataBlock_getBlock_rdh
|
/**
* This method is retained to provide backwards compatibility to HoodieArchivedLogs which were written using
* HoodieLogFormat V1.
*/
@Deprecated
public static HoodieAvroDataBlock getBlock(byte[] content, Schema readerSchema, InternalSchema internalSchema) throws IOException {
SizeAwareDataInputStream dis = new SizeAwareDataInputStream(new
DataInputStream(new ByteArrayInputStream(content)));
// 1. Read the schema written out
int schemaLength = dis.readInt();
byte[] compressedSchema = new byte[schemaLength];
dis.readFully(compressedSchema, 0, schemaLength);
Schema writerSchema = new Schema.Parser().parse(decompress(compressedSchema));
if (readerSchema == null) {
readerSchema = writerSchema;
}
if (!internalSchema.isEmptySchema()) {
readerSchema = writerSchema;
}
GenericDatumReader<IndexedRecord> reader = new GenericDatumReader<>(writerSchema, readerSchema);
// 2. Get the total records
int totalRecords = dis.readInt();
List<HoodieRecord> records = new ArrayList<>(totalRecords);
// 3. Read the content
for (int i = 0; i < totalRecords; i++) {
int recordLength = dis.readInt();
Decoder decoder = DecoderFactory.get().binaryDecoder(content, dis.getNumberOfBytesRead(), recordLength, null);
IndexedRecord record = reader.read(null, decoder);
records.add(new HoodieAvroIndexedRecord(record));
dis.skipBytes(recordLength);
}
dis.close();
return new HoodieAvroDataBlock(records, readerSchema); }
| 3.26 |
hudi_HoodieAvroDataBlock_deserializeRecords_rdh
|
// TODO (na) - Break down content into smaller chunks of byte [] to be GC as they are used
@Override
protected <T> ClosableIterator<HoodieRecord<T>> deserializeRecords(byte[] content, HoodieRecordType type) throws IOException {
checkState(this.readerSchema != null, "Reader's schema has to be non-null");
checkArgument(type != HoodieRecordType.SPARK, "Not support read avro to spark record");
// TODO AvroSparkReader need
RecordIterator v9
= RecordIterator.getInstance(this, content);
return new CloseableMappingIterator<>(v9, data -> ((HoodieRecord<T>) (new HoodieAvroIndexedRecord(data))));
}
| 3.26 |
hudi_FileSlice_getLatestInstantTime_rdh
|
/**
* Returns the latest instant time of the file slice.
*/
public String getLatestInstantTime() {
Option<String> latestDeltaCommitTime = getLatestLogFile().map(HoodieLogFile::getDeltaCommitTime);return latestDeltaCommitTime.isPresent() ? HoodieTimeline.maxInstant(latestDeltaCommitTime.get(), getBaseInstantTime()) : getBaseInstantTime();}
| 3.26 |
hudi_FileSlice_isEmpty_rdh
|
/**
* Returns true if there is no data file and no log files. Happens as part of pending compaction.
*/
public boolean isEmpty() {
return (baseFile ==
null) && logFiles.isEmpty();
}
| 3.26 |
hudi_ArrayUtils_toPrimitive_rdh
|
// Long array converters
// ----------------------------------------------------------------------
/**
* <p>Converts an array of object Longs to primitives.</p>
*
* <p>This method returns {@code null} for a {@code null} input array.</p>
*
* @param array
* a {@code Long} array, may be {@code null}
* @return a {@code long} array, {@code null} if null array input
* @throws NullPointerException
* if array content is {@code null}
*/
public static long[] toPrimitive(Long[] array) {
if (array == null) {
return null;
} else if (array.length == 0) {
return EMPTY_LONG_ARRAY;
}
final long[] result = new long[array.length];
for (int i = 0; i < array.length; i++) {
result[i] = array[i].longValue();
}
return result;
}
| 3.26 |
hudi_HoodieOperation_isInsert_rdh
|
/**
* Returns whether the operation is INSERT.
*/
public static boolean isInsert(HoodieOperation operation) {
return operation == INSERT;
}
| 3.26 |
hudi_HoodieOperation_isUpdateAfter_rdh
|
/**
* Returns whether the operation is UPDATE_AFTER.
*/
public static boolean isUpdateAfter(HoodieOperation operation) {
return operation == UPDATE_AFTER;
}
| 3.26 |
hudi_HoodieOperation_isUpdateBefore_rdh
|
/**
* Returns whether the operation is UPDATE_BEFORE.
*/
public static boolean isUpdateBefore(HoodieOperation operation) {
return operation == UPDATE_BEFORE;
}
| 3.26 |
hudi_HoodieOperation_isDelete_rdh
|
/**
* Returns whether the operation is DELETE.
*/
public static boolean isDelete(HoodieOperation operation) {return operation == DELETE;
}
| 3.26 |
hudi_HoodieReaderContext_updateSchemaAndResetOrderingValInMetadata_rdh
|
/**
* Updates the schema and reset the ordering value in existing metadata mapping of a record.
*
* @param meta
* Metadata in a mapping.
* @param schema
* New schema to set.
* @return The input metadata mapping.
*/
public Map<String, Object>
updateSchemaAndResetOrderingValInMetadata(Map<String, Object> meta, Schema schema)
{
meta.remove(INTERNAL_META_ORDERING_FIELD);
meta.put(INTERNAL_META_SCHEMA, schema);
return meta;
}
| 3.26 |
hudi_HoodieReaderContext_generateMetadataForRecord_rdh
|
/**
* Generates metadata of the record. Only fetches record key that is necessary for merging.
*
* @param record
* The record.
* @param schema
* The Avro schema of the record.
* @return A mapping containing the metadata.
*/
public Map<String, Object> generateMetadataForRecord(T record, Schema schema) {
Map<String, Object> meta = new HashMap<>();
meta.put(INTERNAL_META_RECORD_KEY, getRecordKey(record, schema));
meta.put(INTERNAL_META_SCHEMA, schema);
return meta;
}
| 3.26 |
hudi_InternalSchemaChangeApplier_applyRenameChange_rdh
|
/**
* Rename col name for hudi table.
*
* @param colName
* col name to be renamed. if we want to rename col from a nested filed, the fullName should be specify
* @param newName
* new name for current col. no need to specify fullName.
*/
public InternalSchema applyRenameChange(String colName, String newName) {
TableChanges.ColumnUpdateChange updateChange = TableChanges.ColumnUpdateChange.get(latestSchema);
updateChange.renameColumn(colName, newName);
return SchemaChangeUtils.applyTableChanges2Schema(latestSchema, updateChange);
}
| 3.26 |
hudi_InternalSchemaChangeApplier_applyReOrderColPositionChange_rdh
|
/**
* Reorder the position of col.
*
* @param colName
* column which need to be reordered. if we want to change col from a nested filed, the fullName should be specify.
* @param referColName
* reference position.
* @param positionType
* col position change type. now support three change types: first/after/before
*/
public InternalSchema
applyReOrderColPositionChange(String colName, String referColName, TableChange.ColumnPositionChange.ColumnPositionType
positionType) {
TableChanges.ColumnUpdateChange updateChange = TableChanges.ColumnUpdateChange.get(latestSchema);
String parentName = TableChangesHelper.getParentName(colName);
String referParentName = TableChangesHelper.getParentName(referColName);
if (positionType.equals(ColumnPositionType.FIRST)) {
updateChange.addPositionChange(colName, "", positionType);
} else if (parentName.equals(referParentName)) {
updateChange.addPositionChange(colName, referColName, positionType);} else {
throw
new IllegalArgumentException("cannot reorder two columns which has different parent");
}
return SchemaChangeUtils.applyTableChanges2Schema(latestSchema, updateChange);
}
| 3.26 |
hudi_InternalSchemaChangeApplier_applyColumnCommentChange_rdh
|
/**
* Update col comment for hudi table.
*
* @param colName
* col name to be changed. if we want to change col from a nested filed, the fullName should be specify
* @param doc
* .
*/
public InternalSchema applyColumnCommentChange(String colName, String doc) {
TableChanges.ColumnUpdateChange updateChange = TableChanges.ColumnUpdateChange.get(latestSchema);
updateChange.updateColumnComment(colName, doc);
return SchemaChangeUtils.applyTableChanges2Schema(latestSchema, updateChange);
}
| 3.26 |
hudi_InternalSchemaChangeApplier_applyColumnTypeChange_rdh
|
/**
* Update col type for hudi table.
*
* @param colName
* col name to be changed. if we want to change col from a nested filed, the fullName should be specify
* @param newType
* .
*/
public InternalSchema applyColumnTypeChange(String colName, Type newType) {
TableChanges.ColumnUpdateChange updateChange = TableChanges.ColumnUpdateChange.get(latestSchema);
updateChange.updateColumnType(colName, newType);
return SchemaChangeUtils.applyTableChanges2Schema(latestSchema, updateChange);
}
| 3.26 |
hudi_InternalSchemaChangeApplier_applyDeleteChange_rdh
|
/**
* Delete columns to table.
*
* @param colNames
* col name to be deleted. if we want to delete col from a nested filed, the fullName should be specify
*/
public InternalSchema applyDeleteChange(String... colNames) {
TableChanges.ColumnDeleteChange delete = TableChanges.ColumnDeleteChange.get(latestSchema);
Arrays.stream(colNames).forEach(colName -> delete.deleteColumn(colName));
return SchemaChangeUtils.applyTableChanges2Schema(latestSchema, delete);
}
| 3.26 |
hudi_InternalSchemaChangeApplier_applyAddChange_rdh
|
/**
* Add columns to table.
*
* @param colName
* col name to be added. if we want to add col to a nested filed, the fullName should be specify
* @param colType
* col type to be added.
* @param doc
* col doc to be added.
* @param position
* col position to be added
* @param positionType
* col position change type. now support three change types: first/after/before
*/
public InternalSchema applyAddChange(String colName, Type colType, String doc, String position, TableChange.ColumnPositionChange.ColumnPositionType positionType) {
TableChanges.ColumnAddChange add
= TableChanges.ColumnAddChange.get(latestSchema);
String parentName = TableChangesHelper.getParentName(colName);
String leafName = TableChangesHelper.getLeafName(colName);
add.addColumns(parentName, leafName, colType, doc);
if (positionType != null) {
switch (positionType) {
case NO_OPERATION :
break;
case FIRST :
add.addPositionChange(colName, "", positionType);
break;
case AFTER :case BEFORE :
if ((position == null) || position.isEmpty()) {
throw new IllegalArgumentException("position should not be null/empty_string when specify positionChangeType as after/before");
}
String referParentName = TableChangesHelper.getParentName(position);if (!parentName.equals(referParentName)) {
throw new IllegalArgumentException("cannot reorder two columns which has different parent");
}
add.addPositionChange(colName, position, positionType);
break;
default :
throw new IllegalArgumentException(String.format("only support first/before/after but found: %s", positionType));
}} else {
throw new IllegalArgumentException(String.format("positionType should be specified"));
}
return SchemaChangeUtils.applyTableChanges2Schema(latestSchema, add);
}
| 3.26 |
hudi_InternalSchemaChangeApplier_applyColumnNullabilityChange_rdh
|
/**
* Update col nullability for hudi table.
*
* @param colName
* col name to be changed. if we want to change col from a nested filed, the fullName should be specify
* @param nullable
* .
*/
public InternalSchema applyColumnNullabilityChange(String colName, boolean nullable) {
TableChanges.ColumnUpdateChange updateChange = TableChanges.ColumnUpdateChange.get(latestSchema);
updateChange.updateColumnNullability(colName, nullable);
return SchemaChangeUtils.applyTableChanges2Schema(latestSchema, updateChange);
}
| 3.26 |
hudi_CopyOnWriteInputFormat_acceptFile_rdh
|
/**
* A simple hook to filter files and directories from the input.
* The method may be overridden. Hadoop's FileInputFormat has a similar mechanism and applies the
* same filters by default.
*
* @param fileStatus
* The file status to check.
* @return true, if the given file or directory is accepted
*/
public boolean acceptFile(FileStatus fileStatus) {
final String name = fileStatus.getPath().getName();
return ((!name.startsWith("_")) && (!name.startsWith("."))) && (!f0.filterPath(new Path(fileStatus.getPath().toUri())));
}
| 3.26 |
hudi_CopyOnWriteInputFormat_addFilesInDir_rdh
|
/**
* Enumerate all files in the directory and recursive if enumerateNestedFiles is true.
*
* @return the total length of accepted files.
*/
private long addFilesInDir(Path path, List<FileStatus> files, boolean logExcludedFiles) throws IOException {
final Path hadoopPath = new
Path(path.toUri());
final FileSystem fs = FSUtils.getFs(hadoopPath.toString(), this.conf.conf());
long length = 0;
for (FileStatus dir : fs.listStatus(hadoopPath)) {
if (dir.isDirectory()) {
if (acceptFile(dir) && enumerateNestedFiles) {
length += addFilesInDir(dir.getPath(), files, logExcludedFiles);
} else if (logExcludedFiles && LOG.isDebugEnabled()) {
LOG.debug(("Directory " + dir.getPath().toString()) + " did not pass the file-filter and is excluded.");}
} else if (acceptFile(dir)) {
files.add(dir);
length += dir.getLen();
testForUnsplittable(dir);
} else if (logExcludedFiles && LOG.isDebugEnabled()) {
LOG.debug(("Directory " +
dir.getPath().toString()) + " did not pass the file-filter and is excluded.");
}
}
return length;
}
| 3.26 |
hudi_CopyOnWriteInputFormat_getBlockIndexForPosition_rdh
|
/**
* Retrieves the index of the <tt>BlockLocation</tt> that contains the part of the file described by the given
* offset.
*
* @param blocks
* The different blocks of the file. Must be ordered by their offset.
* @param offset
* The offset of the position in the file.
* @param startIndex
* The earliest index to look at.
* @return The index of the block containing the given position.
*/
private int getBlockIndexForPosition(BlockLocation[] blocks, long offset, long halfSplitSize, int startIndex) {
// go over all indexes after the startIndex
for (int i = startIndex; i < blocks.length;
i++) {
long blockStart = blocks[i].getOffset();
long blockEnd = blockStart + blocks[i].getLength();
if ((offset >= blockStart) && (offset < blockEnd)) {
// got the block where the split starts
// check if the next block contains more than this one does
if ((i < (blocks.length - 1)) && ((blockEnd - offset) < halfSplitSize)) {
return i + 1;
} else {
return i;
}
}
} throw new IllegalArgumentException("The given offset is not contained in the any block.");
}
| 3.26 |
hudi_ConsistentBucketIndexBulkInsertPartitionerWithRows_prepareRepartition_rdh
|
/**
* Prepare consistent hashing metadata for repartition
*
* @param rows
* input records
*/
private void prepareRepartition(JavaRDD<Row> rows) {
this.partitionToIdentifier = initializeBucketIdentifier(rows);
this.partitionToFileIdPfxIdxMap = ConsistentBucketIndexUtils.generatePartitionToFileIdPfxIdxMap(partitionToIdentifier);
partitionToIdentifier.values().forEach(identifier -> {
fileIdPfxList.addAll(identifier.getNodes().stream().map(ConsistentHashingNode::getFileIdPrefix).collect(Collectors.toList()));
});
}
| 3.26 |
hudi_JsonEncoder_configure_rdh
|
/**
* Reconfigures this JsonEncoder to output to the JsonGenerator provided.
* <p/>
* If the JsonGenerator provided is null, a NullPointerException is thrown.
* <p/>
* Otherwise, this JsonEncoder will flush its current output and then
* reconfigure its output to use the provided JsonGenerator.
*
* @param generator
* The JsonGenerator to direct output to. Cannot be null.
* @return this JsonEncoder
* @throws IOException
* @throws NullPointerException
* if {@code generator} is {@code null}
*/
private JsonEncoder configure(JsonGenerator generator) throws IOException {
Objects.requireNonNull(generator, "JsonGenerator cannot be null");
if (null != parser) {
flush();
}
this.out = generator;
return this;
}
| 3.26 |
hudi_JsonEncoder_getJsonGenerator_rdh
|
// by default, one object per line.
// with pretty option use default pretty printer with root line separator.
private static JsonGenerator getJsonGenerator(OutputStream out, Set<JsonOptions> options) throws
IOException {
Objects.requireNonNull(out, "OutputStream cannot be null");
JsonGenerator g = new JsonFactory().createJsonGenerator(out,
JsonEncoding.UTF8);
if (options.contains(JsonOptions.NoFlushStream)) {
g = g.configure(Feature.FLUSH_PASSED_TO_STREAM, false);
}
MinimalPrettyPrinter pp = new MinimalPrettyPrinter();
pp.setRootValueSeparator(LINE_SEPARATOR);
g.setPrettyPrinter(pp);
return g;
}
| 3.26 |
hudi_HoodieMetaserverBasedTimeline_getInstantFileName_rdh
|
/**
* Completion time is essential for {@link HoodieActiveTimeline},
* TODO [HUDI-6883] We should change HoodieMetaserverBasedTimeline to store completion time as well.
*/
@Override
protected String getInstantFileName(HoodieInstant instant) {
if (instant.isCompleted()) {
// Set a fake completion time.
return instant.getFileName("0").replace("_0", "");
}
return instant.getFileName();
}
| 3.26 |
hudi_HDFSParquetImporterUtils_load_rdh
|
/**
* Imports records to Hoodie table.
*
* @param client
* Hoodie Client
* @param instantTime
* Instant Time
* @param hoodieRecords
* Hoodie Records
* @param <T>
* Type
*/
public <T extends HoodieRecordPayload> JavaRDD<WriteStatus> load(SparkRDDWriteClient<T> client, String instantTime, JavaRDD<HoodieRecord<T>> hoodieRecords) {
switch (this.command.toLowerCase()) {
case "upsert" :
{
return client.upsert(hoodieRecords, instantTime);
}
case "bulkinsert" :
{
return client.bulkInsert(hoodieRecords, instantTime);}
default :
{
return client.insert(hoodieRecords, instantTime);
}
}
}
| 3.26 |
hudi_HDFSParquetImporterUtils_parseSchema_rdh
|
/**
* Parse Schema from file.
*
* @param fs
* File System
* @param schemaFile
* Schema File
*/
public static String parseSchema(FileSystem fs, String schemaFile) throws Exception {
// Read schema file.
Path p = new Path(schemaFile);
if (!fs.exists(p)) {
throw new Exception(String.format("Could not find - %s - schema file.", schemaFile));
}
long len = fs.getFileStatus(p).getLen();
ByteBuffer buf = ByteBuffer.allocate(((int) (len)));
try (FSDataInputStream inputStream = fs.open(p)) {
inputStream.readFully(0, buf.array(), 0, buf.array().length);
}
return new String(buf.array(), StandardCharsets.UTF_8);
}
| 3.26 |
hudi_HDFSParquetImporterUtils_createHoodieClient_rdh
|
/**
* Build Hoodie write client.
*
* @param jsc
* Java Spark Context
* @param basePath
* Base Path
* @param schemaStr
* Schema
* @param parallelism
* Parallelism
*/
public static SparkRDDWriteClient<HoodieRecordPayload> createHoodieClient(JavaSparkContext jsc, String basePath, String schemaStr,
int parallelism, Option<String> compactionStrategyClass, TypedProperties properties) {
HoodieCompactionConfig compactionConfig = compactionStrategyClass.map(strategy -> HoodieCompactionConfig.newBuilder().withInlineCompaction(false).withCompactionStrategy(ReflectionUtils.loadClass(strategy)).build()).orElse(HoodieCompactionConfig.newBuilder().withInlineCompaction(false).build());
HoodieWriteConfig config = HoodieWriteConfig.newBuilder().withPath(basePath).withParallelism(parallelism, parallelism).withBulkInsertParallelism(parallelism).withDeleteParallelism(parallelism).withSchema(schemaStr).combineInput(true, true).withCompactionConfig(compactionConfig).withIndexConfig(HoodieIndexConfig.newBuilder().withIndexType(IndexType.BLOOM).build()).withProps(properties).build();
return new SparkRDDWriteClient<>(new HoodieSparkEngineContext(jsc), config);
}
| 3.26 |
hudi_BufferedRandomAccessFile_spaceAvailableInBuffer_rdh
|
/**
*
* @return - whether space is available at the end of the buffer.
*/
private boolean spaceAvailableInBuffer() {
return this.isEOF && (this.validLastPosition < this.endPosition());
}
| 3.26 |
hudi_BufferedRandomAccessFile_m1_rdh
|
/**
* Given a byte array, offset in the array and length of bytes to be written,
* update the buffer/file.
*
* @param b
* - byte array of data to be written
* @param off
* - starting offset.
* @param len
* - length of bytes to be written
* @return - number of bytes written
* @throws IOException
*/
private int m1(byte[] b, int off, int len) throws IOException {
if (endOfBufferReached()) {
expandBufferToCapacityIfNeeded();
}
// copy data to buffer, until all data is copied or to buffer capacity.
len
= Math.min(len, ((int) (this.validLastPosition - this.currentPosition)));
int buffOff = ((int) (this.currentPosition - this.startPosition));
System.arraycopy(b, off, this.dataBuffer.array(), buffOff, len);
this.currentPosition += len;
return len;
}
| 3.26 |
hudi_BufferedRandomAccessFile_alignDiskPositionToBufferStartIfNeeded_rdh
|
/**
* If the diskPosition differs from the startPosition, flush the data in the buffer
* and realign/fill the buffer at startPosition.
*
* @throws IOException
*/
private void alignDiskPositionToBufferStartIfNeeded() throws IOException {
if (this.diskPosition !=
this.startPosition) {
super.seek(this.startPosition);
this.diskPosition = this.startPosition;
}
}
| 3.26 |
hudi_BufferedRandomAccessFile_loadNewBlockToBuffer_rdh
|
/**
* Load a new data block. Returns false, when EOF is reached.
*
* @return - whether new data block was loaded or not
* @throws IOException
*/
private boolean loadNewBlockToBuffer() throws IOException {
if (this.isEOF) {
return false;
}
// read next block into buffer
this.seek(this.currentPosition);
// if currentPosition is at start, EOF has been reached
return this.currentPosition != this.validLastPosition;}
| 3.26 |
hudi_BufferedRandomAccessFile_flush_rdh
|
/**
* If the file is writable, flush any bytes in the buffer that have not yet been written to disk.
*
* @throws IOException
*/
public void flush() throws IOException {
this.flushBuffer();
}
| 3.26 |
hudi_BufferedRandomAccessFile_endOfBufferReached_rdh
|
/**
*
* @return whether currentPosition has reached the end of valid buffer.
*/
private boolean endOfBufferReached() {
return this.currentPosition >= this.validLastPosition;
}
| 3.26 |
hudi_BufferedRandomAccessFile_write_rdh
|
/**
* Write specified number of bytes into buffer/file, with given starting offset and length.
*
* @param b
* - byte array with data to be written
* @param off
* - starting offset.
* @param len
* - length of bytes to be written
* @throws IOException
*/
@Override
public void write(byte[] b, int off, int len) throws IOException {
// As all data may not fit into the buffer, more than one write would be required.
while (len > 0) {
int n = this.m1(b, off, len);
off += n;
len -= n;
this.f0 = true;
}
}
| 3.26 |
hudi_BufferedRandomAccessFile_m0_rdh
|
/**
* write an array of bytes to the buffer/file.
*
* @param b
* - byte array with data to be written
* @throws IOException
*/
@Override
public void m0(byte[] b) throws IOException {
this.write(b, 0, b.length);
}
| 3.26 |
hudi_BufferedRandomAccessFile_expandBufferToCapacityIfNeeded_rdh
|
/**
* If space is available at the end of the buffer, start using it. Otherwise,
* flush the unwritten data into the file and load the buffer corresponding to startPosition.
*
* @throws IOException
*/
private void expandBufferToCapacityIfNeeded() throws IOException {
if (spaceAvailableInBuffer()) {
// space available at end of buffer -- adjust validLastPosition
this.validLastPosition = this.endPosition();
} else {
loadNewBlockToBuffer();
// appending to EOF, adjust validLastPosition.
if (this.currentPosition == this.validLastPosition) {
this.validLastPosition = this.endPosition();
}
}
}
| 3.26 |
hudi_BufferedRandomAccessFile_close_rdh
|
/**
* Close the file, after flushing data in the buffer.
*
* @throws IOException
*/
@Override
public void close() throws IOException {
if (!isClosed) {
this.flush();
super.close();
this.isClosed = true;
}
}
| 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.