name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hudi_HoodieDefaultTimeline_getCommitsAndCompactionTimeline_rdh
|
/**
* Get all instants (commits, delta commits, replace, compaction) that produce new data or merge file, in the active timeline.
*/
public HoodieTimeline getCommitsAndCompactionTimeline() {
return getTimelineOfActions(CollectionUtils.createSet(COMMIT_ACTION, DELTA_COMMIT_ACTION, REPLACE_COMMIT_ACTION, COMPACTION_ACTION));
}
| 3.26 |
hudi_HoodieDefaultTimeline_getCleanerTimeline_rdh
|
/**
* Get only the cleaner action (inflight and completed) in the active timeline.
*/
public HoodieTimeline getCleanerTimeline() {
return new HoodieDefaultTimeline(filterInstantsByAction(CLEAN_ACTION), ((Function) (this::getInstantDetails)));
}
| 3.26 |
hudi_HoodieDefaultTimeline_getCommitTimeline_rdh
|
/**
* Get only pure commits (inflight and completed) in the active timeline.
*/
public HoodieTimeline getCommitTimeline() {
// TODO: Make sure this change does not break existing functionality.
return getTimelineOfActions(CollectionUtils.createSet(COMMIT_ACTION, REPLACE_COMMIT_ACTION));
}
| 3.26 |
hudi_HoodieDefaultTimeline_getRestoreTimeline_rdh
|
/**
* Get only the restore action (inflight and completed) in the active timeline.
*/
public HoodieTimeline getRestoreTimeline() {
return new HoodieDefaultTimeline(filterInstantsByAction(RESTORE_ACTION), ((Function) (this::getInstantDetails)));
}
| 3.26 |
hudi_HoodieDefaultTimeline_mergeTimeline_rdh
|
/**
* Merge this timeline with the given timeline.
*/
public HoodieDefaultTimeline mergeTimeline(HoodieDefaultTimeline timeline) {
Stream<HoodieInstant> instantStream = Stream.concat(getInstantsAsStream(), timeline.getInstantsAsStream()).sorted(); Function<HoodieInstant, Option<byte[]>> details = instant -> {
if (getInstantsAsStream().anyMatch(i -> i.equals(instant))) {
return this.getInstantDetails(instant);
} else {return timeline.getInstantDetails(instant);
}
};
return new HoodieDefaultTimeline(instantStream, details);
}
| 3.26 |
hudi_HoodieDefaultTimeline_getTimelineOfActions_rdh
|
/**
* Get a timeline of a specific set of actions. useful to create a merged timeline of multiple actions.
*
* @param actions
* actions allowed in the timeline
*/
public HoodieTimeline getTimelineOfActions(Set<String> actions) {
return new HoodieDefaultTimeline(getInstantsAsStream().filter(s -> actions.contains(s.getAction())), ((Function) (this::getInstantDetails)));}
| 3.26 |
hudi_HoodieDefaultTimeline_getSavePointTimeline_rdh
|
/**
* Get only the save point action (inflight and completed) in the active timeline.
*/
public HoodieTimeline getSavePointTimeline() {
return new HoodieDefaultTimeline(filterInstantsByAction(SAVEPOINT_ACTION), ((Function) (this::getInstantDetails)));
}
| 3.26 |
hudi_HoodieDefaultTimeline_getDeltaCommitTimeline_rdh
|
/**
* Get only the delta commits (inflight and completed) in the active timeline.
*/
public HoodieTimeline getDeltaCommitTimeline() {
return new HoodieDefaultTimeline(filterInstantsByAction(DELTA_COMMIT_ACTION), ((Function) (this::getInstantDetails)));
}
| 3.26 |
hudi_HoodieDefaultTimeline_filterPendingExcludingMajorAndMinorCompaction_rdh
|
// TODO: Use a better naming convention for this.
@Override
public HoodieTimeline filterPendingExcludingMajorAndMinorCompaction() {
return
new HoodieDefaultTimeline(getInstantsAsStream().filter(instant -> (!instant.isCompleted()) && ((!instant.getAction().equals(HoodieTimeline.COMPACTION_ACTION)) || (!instant.getAction().equals(HoodieTimeline.LOG_COMPACTION_ACTION)))), details);}
| 3.26 |
hudi_HoodieDefaultTimeline_findFirstNonSavepointCommit_rdh
|
/**
* Returns the first non savepoint commit on the timeline.
*/
private static Option<HoodieInstant> findFirstNonSavepointCommit(List<HoodieInstant> instants) {
Set<String> savepointTimestamps = instants.stream().filter(entry -> entry.getAction().equals(HoodieTimeline.SAVEPOINT_ACTION)).map(HoodieInstant::getTimestamp).collect(Collectors.toSet());
if (!savepointTimestamps.isEmpty()) {
// There are chances that there could be holes in the timeline due to archival and savepoint interplay.
// So, the first non-savepoint commit is considered as beginning of the active timeline.
return Option.fromJavaOptional(instants.stream().filter(entry -> !savepointTimestamps.contains(entry.getTimestamp())).findFirst());
}
return Option.fromJavaOptional(instants.stream().findFirst());
}
| 3.26 |
hudi_HoodieDefaultTimeline_filterPendingMajorOrMinorCompactionTimeline_rdh
|
/**
* Compaction and logcompaction operation on MOR table is called major and minor compaction respectively.
*/
@Override
public HoodieTimeline filterPendingMajorOrMinorCompactionTimeline() {
return new HoodieDefaultTimeline(getInstantsAsStream().filter(s -> s.getAction().equals(HoodieTimeline.COMPACTION_ACTION) || (s.getAction().equals(HoodieTimeline.LOG_COMPACTION_ACTION) && (!s.isCompleted()))), details);
}
| 3.26 |
hudi_HoodieDefaultTimeline_m1_rdh
|
/**
* Get only the rollback and restore action (inflight and completed) in the active timeline.
*/
public HoodieTimeline m1() {
return getTimelineOfActions(CollectionUtils.createSet(ROLLBACK_ACTION, RESTORE_ACTION));
}
| 3.26 |
hudi_HoodieDefaultTimeline_getRollbackTimeline_rdh
|
/**
* Get only the rollback action (inflight and completed) in the active timeline.
*/
public HoodieTimeline getRollbackTimeline() {return new HoodieDefaultTimeline(filterInstantsByAction(ROLLBACK_ACTION), ((Function) (this::getInstantDetails)));
}
| 3.26 |
hudi_OrcUtils_fetchRecordKeysWithPositions_rdh
|
/**
* Fetch {@link HoodieKey}s from the given ORC file.
*
* @param filePath
* The ORC file path.
* @param configuration
* configuration to build fs object
* @return {@link List} of {@link HoodieKey}s fetched from the ORC file
*/
@Override
public List<Pair<HoodieKey, Long>> fetchRecordKeysWithPositions(Configuration configuration, Path filePath) {
try {if (!filePath.getFileSystem(configuration).exists(filePath)) {
return Collections.emptyList();
}
} catch (IOException e) {
throw new HoodieIOException("Failed to read from ORC file:"
+ filePath, e);
}
List<Pair<HoodieKey, Long>> hoodieKeysAndPositions = new ArrayList<>();
long position = 0;
try (ClosableIterator<HoodieKey> iterator = getHoodieKeyIterator(configuration, filePath, Option.empty())) {
while (iterator.hasNext()) {
hoodieKeysAndPositions.add(Pair.of(iterator.next(), position));
position++;
}
}
return hoodieKeysAndPositions;
}
| 3.26 |
hudi_OrcUtils_getHoodieKeyIterator_rdh
|
/**
* Provides a closable iterator for reading the given ORC file.
*
* @param configuration
* configuration to build fs object
* @param filePath
* The ORC file path
* @return {@link ClosableIterator} of {@link HoodieKey}s for reading the ORC file
*/
@Override
public ClosableIterator<HoodieKey> getHoodieKeyIterator(Configuration configuration, Path filePath) {
try {Configuration conf = new Configuration(configuration);
conf.addResource(FSUtils.getFs(filePath.toString(), conf).getConf());
Reader reader = OrcFile.createReader(filePath, OrcFile.readerOptions(conf));
Schema readSchema = HoodieAvroUtils.getRecordKeyPartitionPathSchema();
TypeDescription orcSchema = AvroOrcUtils.createOrcSchema(readSchema);
RecordReader recordReader = reader.rows(new Options(conf).schema(orcSchema));
List<String> fieldNames = orcSchema.getFieldNames();
// column indices for the RECORD_KEY_METADATA_FIELD, PARTITION_PATH_METADATA_FIELD fields
int keyCol = -1;int partitionCol = -1;
for (int
i = 0; i < fieldNames.size(); i++) {
if (fieldNames.get(i).equals(HoodieRecord.RECORD_KEY_METADATA_FIELD)) { keyCol = i;
}
if (fieldNames.get(i).equals(HoodieRecord.PARTITION_PATH_METADATA_FIELD)) {
partitionCol =
i;
}
}
if ((keyCol == (-1)) || (partitionCol == (-1))) {
throw new
HoodieException(String.format("Couldn't find row keys or partition path in %s.", filePath));
}
return new OrcReaderIterator<>(recordReader, readSchema, orcSchema);
} catch (IOException e) {
throw new HoodieIOException("Failed to open reader from ORC file:" + filePath, e);
}
}
| 3.26 |
hudi_OrcUtils_readAvroRecords_rdh
|
/**
* NOTE: This literally reads the entire file contents, thus should be used with caution.
*/
@Override
public List<GenericRecord> readAvroRecords(Configuration configuration, Path filePath, Schema avroSchema) {
List<GenericRecord> records = new ArrayList<>();
try (Reader reader = OrcFile.createReader(filePath, OrcFile.readerOptions(configuration))) {
TypeDescription orcSchema = reader.getSchema();
try (RecordReader recordReader = reader.rows(new Options(configuration).schema(orcSchema))) {
OrcReaderIterator<GenericRecord> iterator =
new OrcReaderIterator<>(recordReader, avroSchema, orcSchema);
while (iterator.hasNext()) {
GenericRecord record = iterator.next();
records.add(record);
}
}} catch (IOException io) {
throw new HoodieIOException("Unable to create an ORC reader for ORC file:" + filePath, io);
}
return records;
}
| 3.26 |
hudi_OrcUtils_filterRowKeys_rdh
|
/**
* Read the rowKey list matching the given filter, from the given ORC file. If the filter is empty, then this will
* return all the rowkeys.
*
* @param conf
* configuration to build fs object.
* @param filePath
* The ORC file path.
* @param filter
* record keys filter
* @return Set Set of pairs of row key and position matching candidateRecordKeys
*/
@Override
public Set<Pair<String, Long>> filterRowKeys(Configuration conf, Path filePath, Set<String> filter) throws HoodieIOException {
long rowPosition = 0;
try (Reader reader = OrcFile.createReader(filePath, OrcFile.readerOptions(conf))) {
TypeDescription v22
= reader.getSchema();try (RecordReader recordReader = reader.rows(new Options(conf).schema(v22))) {
Set<Pair<String, Long>> filteredRowKeys = new HashSet<>();
List<String> fieldNames = v22.getFieldNames();
VectorizedRowBatch batch = v22.createRowBatch();
// column index for the RECORD_KEY_METADATA_FIELD field
int colIndex = -1;
for (int i = 0; i < fieldNames.size(); i++) {
if (fieldNames.get(i).equals(HoodieRecord.RECORD_KEY_METADATA_FIELD)) {
colIndex = i;
break;
}
}
if (colIndex == (-1)) {
throw new HoodieException(String.format("Couldn't find row keys in %s.", filePath));
}
while (recordReader.nextBatch(batch)) {
BytesColumnVector rowKeys = ((BytesColumnVector) (batch.cols[colIndex]));for (int i = 0; i < batch.size; i++) {
String rowKey = rowKeys.toString(i);
if (filter.isEmpty() || filter.contains(rowKey)) {
filteredRowKeys.add(Pair.of(rowKey, rowPosition));
}
rowPosition++;
}
}
return filteredRowKeys;
}
} catch (IOException io) {
throw new HoodieIOException("Unable to read row keys for ORC file:" + filePath, io);
}
}
| 3.26 |
hudi_SerializableSchema_readObjectFrom_rdh
|
// create a public read method for unit test
public void readObjectFrom(ObjectInputStream in) throws IOException {
try {
schema = new Schema.Parser().parse(in.readObject().toString());
} catch (ClassNotFoundException e) {
throw new IOException("unable to parse schema", e);
}
}
| 3.26 |
hudi_SerializableSchema_writeObjectTo_rdh
|
// create a public write method for unit test
public void writeObjectTo(ObjectOutputStream out) throws IOException {
// Note: writeUTF cannot support string length > 64K. So use writeObject which has small overhead (relatively).
out.writeObject(schema.toString());
}
| 3.26 |
hudi_BootstrapOperator_preLoadIndexRecords_rdh
|
/**
* Load the index records before {@link #processElement}.
*/
protected void preLoadIndexRecords() throws Exception {
String basePath
= hoodieTable.getMetaClient().getBasePath();
int taskID = getRuntimeContext().getIndexOfThisSubtask();
LOG.info("Start loading records in table {} into the index state, taskId = {}", basePath,
taskID);
for (String partitionPath : FSUtils.getAllPartitionPaths(new HoodieFlinkEngineContext(hadoopConf), metadataConfig(conf),
basePath)) {
if
(pattern.matcher(partitionPath).matches()) {
loadRecords(partitionPath);
}
}
LOG.info("Finish sending index records, taskId = {}.", getRuntimeContext().getIndexOfThisSubtask());
// wait for the other bootstrap tasks finish bootstrapping.
waitForBootstrapReady(getRuntimeContext().getIndexOfThisSubtask());
hoodieTable = null;
}
| 3.26 |
hudi_BootstrapOperator_waitForBootstrapReady_rdh
|
/**
* Wait for other bootstrap tasks to finish the index bootstrap.
*/
private void waitForBootstrapReady(int taskID) {
int taskNum
= getRuntimeContext().getNumberOfParallelSubtasks();
int readyTaskNum = 1;
while (taskNum != readyTaskNum) {
try {
readyTaskNum = aggregateManager.updateGlobalAggregate(BootstrapAggFunction.NAME + conf.getString(FlinkOptions.TABLE_NAME), taskID, new BootstrapAggFunction());
LOG.info("Waiting for other bootstrap tasks to complete, taskId = {}.", taskID);
TimeUnit.SECONDS.sleep(5);
} catch (Exception e) {
LOG.warn("Update global task bootstrap summary error", e);
}
}
}
| 3.26 |
hudi_BootstrapOperator_loadRecords_rdh
|
/**
* Loads all the indices of give partition path into the backup state.
*
* @param partitionPath
* The partition path
*/
@SuppressWarnings("unchecked")
protected void loadRecords(String partitionPath) throws Exception {
long start = System.currentTimeMillis();
final int parallelism = getRuntimeContext().getNumberOfParallelSubtasks();
final int v9 = getRuntimeContext().getMaxNumberOfParallelSubtasks();
final int taskID = getRuntimeContext().getIndexOfThisSubtask(); HoodieTimeline commitsTimeline = this.hoodieTable.getMetaClient().getCommitsTimeline();
if (!StringUtils.isNullOrEmpty(lastInstantTime)) {
commitsTimeline = commitsTimeline.findInstantsAfter(lastInstantTime);
}
Option<HoodieInstant> latestCommitTime = commitsTimeline.filterCompletedAndCompactionInstants().lastInstant();
if (latestCommitTime.isPresent()) {
BaseFileUtils fileUtils = BaseFileUtils.getInstance(this.hoodieTable.getBaseFileFormat());
Schema schema = new TableSchemaResolver(this.hoodieTable.getMetaClient()).getTableAvroSchema();
List<FileSlice> fileSlices = this.hoodieTable.getSliceView().getLatestMergedFileSlicesBeforeOrOn(partitionPath, latestCommitTime.get().getTimestamp()).collect(toList());
for (FileSlice fileSlice : fileSlices) {
if (!shouldLoadFile(fileSlice.getFileId(), v9, parallelism, taskID)) {continue;
}
LOG.info("Load records from {}.", fileSlice);
// load parquet records
fileSlice.getBaseFile().ifPresent(baseFile -> {
// filter out crushed files
if (!isValidFile(baseFile.getFileStatus())) {
return;
}
try (ClosableIterator<HoodieKey> iterator = fileUtils.getHoodieKeyIterator(this.hadoopConf, new Path(baseFile.getPath()))) {
iterator.forEachRemaining(hoodieKey -> {
output.collect(new StreamRecord(new IndexRecord(generateHoodieRecord(hoodieKey, fileSlice))));
});
}
});
// load avro log records
List<String> logPaths = // filter out crushed files
fileSlice.getLogFiles().sorted(HoodieLogFile.getLogFileComparator()).filter(logFile -> isValidFile(logFile.getFileStatus())).map(logFile -> logFile.getPath().toString()).collect(toList());
try (HoodieMergedLogRecordScanner
scanner = FormatUtils.logScanner(logPaths, schema, latestCommitTime.get().getTimestamp(), writeConfig, hadoopConf)) {
for (String recordKey : scanner.getRecords().keySet())
{
output.collect(new StreamRecord(new IndexRecord(generateHoodieRecord(new HoodieKey(recordKey, partitionPath), fileSlice))));
}
} catch (Exception e) {
throw new HoodieException(String.format("Error when loading record keys from files: %s", logPaths), e);
}
}
}
long cost = System.currentTimeMillis() - start;
LOG.info("Task [{}}:{}}] finish loading the index under partition {}
| 3.26 |
hudi_MarkerHandler_getCreateAndMergeMarkers_rdh
|
/**
*
* @param markerDir
* marker directory path
* @return all marker paths of write IO type "CREATE" and "MERGE"
*/
public Set<String> getCreateAndMergeMarkers(String markerDir) {
return getAllMarkers(markerDir).stream().filter(markerName -> !markerName.endsWith(IOType.APPEND.name())).collect(Collectors.toSet());
}
| 3.26 |
hudi_MarkerHandler_stop_rdh
|
/**
* Stops the dispatching of marker creation requests.
*/
public void stop() {
if (dispatchingThreadFuture != null) {
dispatchingThreadFuture.cancel(true);
}
dispatchingExecutorService.shutdown();
batchingExecutorService.shutdown();
}
| 3.26 |
hudi_MarkerHandler_deleteMarkers_rdh
|
/**
* Deletes markers in the directory.
*
* @param markerDir
* marker directory path
* @return {@code true} if successful; {@code false} otherwise.
*/
public Boolean deleteMarkers(String markerDir) {
boolean result
= getMarkerDirState(markerDir).deleteAllMarkers();
markerDirStateMap.remove(markerDir);
return result;
}
| 3.26 |
hudi_MarkerHandler_m0_rdh
|
/**
*
* @param markerDir
* marker directory path.
* @return Pending markers from the requests to process.
*/
public Set<String> m0(String markerDir) {
if (markerDirStateMap.containsKey(markerDir)) {
MarkerDirState markerDirState = getMarkerDirState(markerDir);return markerDirState.getPendingMarkerCreationRequests(false).stream().map(MarkerCreationFuture::getMarkerName).collect(Collectors.toSet());
}
return Collections.emptySet();
}
| 3.26 |
hudi_MarkerHandler_doesMarkerDirExist_rdh
|
/**
*
* @param markerDir
* marker directory path
* @return {@code true} if the marker directory exists; {@code false} otherwise.
*/
public boolean doesMarkerDirExist(String markerDir) {
MarkerDirState markerDirState = getMarkerDirState(markerDir);
return markerDirState.exists();
}
| 3.26 |
hudi_MarkerHandler_createMarker_rdh
|
/**
* Generates a future for an async marker creation request
*
* The future is added to the marker creation future list and waits for the next batch processing
* of marker creation requests.
*
* @param context
* Javalin app context
* @param markerDir
* marker directory path
* @param markerName
* marker name
* @return the {@code CompletableFuture} instance for the request
*/
public CompletableFuture<String> createMarker(Context context, String markerDir,
String markerName, String basePath) {
// Step1 do early conflict detection if enable
if (timelineServiceConfig.earlyConflictDetectionEnable) {
try {
synchronized(earlyConflictDetectionLock) {
if (earlyConflictDetectionStrategy == null) {
String strategyClassName = timelineServiceConfig.earlyConflictDetectionStrategy;
if (!ReflectionUtils.isSubClass(strategyClassName, TimelineServerBasedDetectionStrategy.class)) {
LOG.warn(("Cannot use " + strategyClassName) + " for timeline-server-based markers.");
strategyClassName = "org.apache.hudi.timeline.service.handlers.marker.AsyncTimelineServerBasedDetectionStrategy";
LOG.warn("Falling back to " + strategyClassName);
}
earlyConflictDetectionStrategy = ((TimelineServerBasedDetectionStrategy) (ReflectionUtils.loadClass(strategyClassName, basePath, markerDir, markerName, timelineServiceConfig.checkCommitConflict)));
}
// markerDir => $base_path/.hoodie/.temp/$instant_time
// If markerDir is changed like move to the next instant action, we need to fresh this earlyConflictDetectionStrategy.
// For specific instant related create marker action, we only call this check/fresh once
// instead of starting the conflict detector for every request
if (!markerDir.equalsIgnoreCase(currentMarkerDir)) {
this.currentMarkerDir = markerDir;
Set<String> actions = CollectionUtils.createSet(COMMIT_ACTION, DELTA_COMMIT_ACTION, REPLACE_COMMIT_ACTION);
Set<HoodieInstant> completedCommits = new HashSet<>(viewManager.getFileSystemView(basePath).getTimeline().filterCompletedInstants().filter(instant -> actions.contains(instant.getAction())).getInstants());
earlyConflictDetectionStrategy.startAsyncDetection(timelineServiceConfig.asyncConflictDetectorInitialDelayMs, timelineServiceConfig.asyncConflictDetectorPeriodMs, markerDir, basePath, timelineServiceConfig.maxAllowableHeartbeatIntervalInMs, fileSystem, this,
completedCommits);
}
}
earlyConflictDetectionStrategy.detectAndResolveConflictIfNecessary();
}
catch (HoodieEarlyConflictDetectionException he) {
LOG.warn("Detected the write conflict due to a concurrent writer, " + "failing the marker creation as the early conflict detection is enabled", he);
return finishCreateMarkerFuture(context, markerDir, markerName);
} catch (Exception e) {
LOG.warn("Failed to execute early conflict detection." + e.getMessage());
// When early conflict detection fails to execute, we still allow the marker creation
// to continue
return addMarkerCreationRequestForAsyncProcessing(context, markerDir, markerName);
}
}
// Step 2 create marker
return addMarkerCreationRequestForAsyncProcessing(context, markerDir, markerName);
}
| 3.26 |
hudi_ClusteringUtils_getAllPendingClusteringPlans_rdh
|
/**
* Get all pending clustering plans along with their instants.
*/
public static Stream<Pair<HoodieInstant, HoodieClusteringPlan>> getAllPendingClusteringPlans(HoodieTableMetaClient metaClient) {
List<HoodieInstant> v0 = metaClient.getActiveTimeline().filterPendingReplaceTimeline().getInstants();
return v0.stream().map(instant -> getClusteringPlan(metaClient, instant)).filter(Option::isPresent).map(Option::get);
}
| 3.26 |
hudi_ClusteringUtils_getClusteringPlan_rdh
|
/**
* Get Clustering plan from timeline.
*
* @param metaClient
* @param pendingReplaceInstant
* @return */
public static Option<Pair<HoodieInstant, HoodieClusteringPlan>> getClusteringPlan(HoodieTableMetaClient metaClient, HoodieInstant pendingReplaceInstant) {
try {
Option<HoodieRequestedReplaceMetadata> requestedReplaceMetadata = getRequestedReplaceMetadata(metaClient, pendingReplaceInstant);
if (requestedReplaceMetadata.isPresent() && WriteOperationType.CLUSTER.name().equals(requestedReplaceMetadata.get().getOperationType())) {
return Option.of(Pair.of(pendingReplaceInstant, requestedReplaceMetadata.get().getClusteringPlan()));
}
return Option.empty();
} catch (IOException e) {
throw new HoodieIOException("Error reading clustering plan " + pendingReplaceInstant.getTimestamp(), e);
}
}
| 3.26 |
hudi_ClusteringUtils_isClusteringCommit_rdh
|
/**
* Checks if the replacecommit is clustering commit.
*/
public static boolean isClusteringCommit(HoodieTableMetaClient metaClient, HoodieInstant pendingReplaceInstant) {
return getClusteringPlan(metaClient, pendingReplaceInstant).isPresent(); }
| 3.26 |
hudi_ClusteringUtils_m0_rdh
|
/**
* Get filegroups to pending clustering instant mapping for all pending clustering plans.
* This includes all clustering operations in 'requested' and 'inflight' states.
*/
public static Map<HoodieFileGroupId, HoodieInstant> m0(HoodieTableMetaClient metaClient) {
Stream<Pair<HoodieInstant, HoodieClusteringPlan>>
pendingClusteringPlans = getAllPendingClusteringPlans(metaClient);
Stream<Map.Entry<HoodieFileGroupId, HoodieInstant>> resultStream = pendingClusteringPlans.flatMap(clusteringPlan -> // get all filegroups in the plan
getFileGroupEntriesInClusteringPlan(clusteringPlan.getLeft(), clusteringPlan.getRight()));
Map<HoodieFileGroupId, HoodieInstant> resultMap;
try {
resultMap = resultStream.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));} catch (Exception e) {
if ((e instanceof IllegalStateException) && e.getMessage().contains("Duplicate key")) {
throw new HoodieException("Found duplicate file groups pending clustering. If you're running deltastreamer in continuous mode, consider adding delay using --min-sync-interval-seconds. " + "Or consider setting write concurrency mode to OPTIMISTIC_CONCURRENCY_CONTROL.", e);
}
throw new HoodieException("Error getting all file groups in pending clustering", e);
}
LOG.info(("Found " + resultMap.size()) + " files in pending clustering operations");
return resultMap;
}
| 3.26 |
hudi_ClusteringUtils_getRequestedReplaceMetadata_rdh
|
/**
* Get requested replace metadata from timeline.
*
* @param metaClient
* @param pendingReplaceInstant
* @return * @throws IOException
*/
private static Option<HoodieRequestedReplaceMetadata> getRequestedReplaceMetadata(HoodieTableMetaClient metaClient, HoodieInstant pendingReplaceInstant) throws IOException {
final HoodieInstant requestedInstant;
if (!pendingReplaceInstant.isRequested()) {
// inflight replacecommit files don't have clustering plan.
// This is because replacecommit inflight can have workload profile for 'insert_overwrite'.
// Get the plan from corresponding requested instant.
requestedInstant = HoodieTimeline.getReplaceCommitRequestedInstant(pendingReplaceInstant.getTimestamp());
} else {
requestedInstant =
pendingReplaceInstant;
}
Option<byte[]> content = metaClient.getActiveTimeline().getInstantDetails(requestedInstant);
if ((!content.isPresent()) || (content.get().length == 0)) {
// few operations create requested file without any content. Assume these are not clustering
return Option.empty();
}
return Option.of(TimelineMetadataUtils.deserializeRequestedReplaceMetadata(content.get()));
}
| 3.26 |
hudi_ClusteringUtils_createClusteringPlan_rdh
|
/**
* Create clustering plan from input fileSliceGroups.
*/
public static HoodieClusteringPlan createClusteringPlan(String strategyClassName, Map<String, String> strategyParams, List<FileSlice>[] fileSliceGroups, Map<String, String> extraMetadata) {
List<HoodieClusteringGroup> clusteringGroups = Arrays.stream(fileSliceGroups).map(fileSliceGroup -> {
Map<String, Double> groupMetrics = buildMetrics(fileSliceGroup);
List<HoodieSliceInfo> sliceInfos
= getFileSliceInfo(fileSliceGroup);return HoodieClusteringGroup.newBuilder().setSlices(sliceInfos).setMetrics(groupMetrics).build();
}).collect(Collectors.toList());
HoodieClusteringStrategy strategy = HoodieClusteringStrategy.newBuilder().setStrategyClassName(strategyClassName).setStrategyParams(strategyParams).build();
return HoodieClusteringPlan.newBuilder().setInputGroups(clusteringGroups).setExtraMetadata(extraMetadata).setStrategy(strategy).setPreserveHoodieMetadata(true).build();
}
| 3.26 |
hudi_RealtimeCompactedRecordReader_getMergedLogRecordScanner_rdh
|
/**
* Goes through the log files and populates a map with latest version of each key logged, since the base split was
* written.
*/
private HoodieMergedLogRecordScanner getMergedLogRecordScanner() throws IOException {
// NOTE: HoodieCompactedLogRecordScanner will not return records for an in-flight commit
// but can return records for completed commits > the commit we are trying to read (if using
// readCommit() API)
return HoodieMergedLogRecordScanner.newBuilder().withFileSystem(FSUtils.getFs(split.getPath().toString(),
jobConf)).withBasePath(split.getBasePath()).withLogFilePaths(split.getDeltaLogPaths()).withReaderSchema(getLogScannerReaderSchema()).withLatestInstantTime(split.getMaxCommitTime()).withMaxMemorySizeInBytes(HoodieRealtimeRecordReaderUtils.getMaxCompactionMemoryInBytes(jobConf)).withReadBlocksLazily(ConfigUtils.getBooleanWithAltKeys(jobConf, HoodieReaderConfig.COMPACTION_LAZY_BLOCK_READ_ENABLE)).withReverseReader(false).withBufferSize(jobConf.getInt(HoodieMemoryConfig.MAX_DFS_STREAM_BUFFER_SIZE.key(), HoodieMemoryConfig.DEFAULT_MR_MAX_DFS_STREAM_BUFFER_SIZE)).withSpillableMapBasePath(jobConf.get(HoodieMemoryConfig.SPILLABLE_MAP_BASE_PATH.key(), FileIOUtils.getDefaultSpillableMapBasePath())).withDiskMapType(jobConf.getEnum(HoodieCommonConfig.SPILLABLE_DISK_MAP_TYPE.key(), HoodieCommonConfig.SPILLABLE_DISK_MAP_TYPE.defaultValue())).withBitCaskDiskMapCompressionEnabled(jobConf.getBoolean(HoodieCommonConfig.DISK_MAP_BITCASK_COMPRESSION_ENABLED.key(), HoodieCommonConfig.DISK_MAP_BITCASK_COMPRESSION_ENABLED.defaultValue())).withOptimizedLogBlocksScan(jobConf.getBoolean(HoodieReaderConfig.ENABLE_OPTIMIZED_LOG_BLOCKS_SCAN.key(), Boolean.parseBoolean(HoodieReaderConfig.ENABLE_OPTIMIZED_LOG_BLOCKS_SCAN.defaultValue()))).withInternalSchema(schemaEvolutionContext.internalSchemaOption.orElse(InternalSchema.getEmptyInternalSchema())).build();
}
| 3.26 |
hudi_HoodieWriteCommitCallbackUtil_convertToJsonString_rdh
|
/**
* Convert data to json string format.
*/
public static String convertToJsonString(Object obj) {
try {
return mapper.writeValueAsString(obj);
} catch (IOException e) {
throw new HoodieCommitCallbackException("Callback service convert data to json failed", e);
}
}
| 3.26 |
hudi_LogFileCreationCallback_preFileCreation_rdh
|
/**
* Executes action right before log file is created.
*
* @param logFile
* The log file.
* @return true if the action executes successfully.
*/
default boolean preFileCreation(HoodieLogFile logFile) {
return true;
}
| 3.26 |
hudi_AvroOrcUtils_addToVector_rdh
|
/**
* Add an object (of a given ORC type) to the column vector at a given position.
*
* @param type
* ORC schema of the value Object.
* @param colVector
* The column vector to store the value Object.
* @param avroSchema
* Avro schema of the value Object.
* Only used to check logical types for timestamp unit conversion.
* @param value
* Object to be added to the column vector
* @param vectorPos
* The position in the vector where value will be stored at.
*/
public static void addToVector(TypeDescription type, ColumnVector colVector, Schema avroSchema, Object value, int vectorPos) {
final int currentVecLength = colVector.isNull.length;
if (vectorPos >= currentVecLength) {
colVector.ensureSize(2
*
currentVecLength, true);
}
if (value == null) {
colVector.isNull[vectorPos] = true;
colVector.noNulls = false;
return;
}
if (avroSchema.getType().equals(Type.UNION)) {
avroSchema = getActualSchemaType(avroSchema);
}
LogicalType logicalType = (avroSchema != null) ? avroSchema.getLogicalType() : null;
switch (type.getCategory()) {
case BOOLEAN :
LongColumnVector boolVec =
((LongColumnVector) (colVector));
boolVec.vector[vectorPos]
= (((boolean) (value))) ? 1 : 0;
break;
case BYTE :
LongColumnVector byteColVec = ((LongColumnVector) (colVector));
byteColVec.vector[vectorPos] = ((byte) (value));
break;
case SHORT :LongColumnVector shortColVec = ((LongColumnVector) (colVector));
shortColVec.vector[vectorPos] = ((short) (value));
break;
case INT :
// the Avro logical type could be AvroTypeUtil.LOGICAL_TYPE_TIME_MILLIS, but we will ignore that fact here
// since Orc has no way to represent a time in the way Avro defines it; we will simply preserve the int value
LongColumnVector intColVec = ((LongColumnVector) (colVector));
intColVec.vector[vectorPos] = ((int) (value));
break;
case LONG :
// the Avro logical type could be AvroTypeUtil.LOGICAL_TYPE_TIME_MICROS, but we will ignore that fact here
// since Orc has no way to represent a time in the way Avro defines it; we will simply preserve the long value
LongColumnVector longColVec = ((LongColumnVector) (colVector)); longColVec.vector[vectorPos] = ((long) (value));
break;
case FLOAT :
DoubleColumnVector floatColVec
= ((DoubleColumnVector) (colVector));
floatColVec.vector[vectorPos] = ((float) (value));
break;
case DOUBLE :
DoubleColumnVector doubleColVec = ((DoubleColumnVector) (colVector));
doubleColVec.vector[vectorPos] = ((double) (value));
break;
case VARCHAR :
case CHAR :
case STRING :
BytesColumnVector bytesColVec = ((BytesColumnVector) (colVector));byte[] bytes = null;
if (value instanceof String) {
bytes = getUTF8Bytes(((String) (value)));
} else if (value instanceof Utf8) {
final Utf8 utf8 = ((Utf8) (value));
bytes =
utf8.getBytes();
} else if (value instanceof GenericData.EnumSymbol) {
bytes = getUTF8Bytes(((GenericData.EnumSymbol) (value)).toString());} else {
throw new IllegalStateException(String.format("Unrecognized type for Avro %s field value, which has type %s, value %s", type.getCategory().getName(), value.getClass().getName(), value.toString()));
}
if (bytes == null) {
bytesColVec.isNull[vectorPos] = true;
bytesColVec.noNulls = false;
} else {
bytesColVec.setRef(vectorPos, bytes, 0, bytes.length);
}
break;
case DATE :
LongColumnVector dateColVec = ((LongColumnVector) (colVector));
int daysSinceEpoch;
if (logicalType instanceof LogicalTypes.Date) {
daysSinceEpoch = ((int) (value));
} else if (value instanceof Date) {
daysSinceEpoch = DateWritable.dateToDays(((Date) (value)));
} else if (value instanceof Date) {
daysSinceEpoch = DateWritable.millisToDays(((Date) (value)).getTime());
} else {
throw new IllegalStateException(String.format("Unrecognized type for Avro DATE field value, which has type %s, value %s", value.getClass().getName(), value.toString()));
}
dateColVec.vector[vectorPos] = daysSinceEpoch;
break;
case TIMESTAMP :
TimestampColumnVector tsColVec = ((TimestampColumnVector) (colVector));
long time;
int v16 = 0;
// The unit for Timestamp in ORC is millis, convert timestamp to millis if needed
if (logicalType instanceof LogicalTypes.TimestampMillis) {
time = ((long) (value));
}
else if (logicalType instanceof LogicalTypes.TimestampMicros) {
final long logicalTsValue = ((long) (value));
time = logicalTsValue / MICROS_PER_MILLI;
v16 = NANOS_PER_MICRO * ((int) (logicalTsValue % MICROS_PER_MILLI));
} else if (value instanceof Timestamp) {
Timestamp tsValue = ((Timestamp) (value));time = tsValue.getTime();
v16 = tsValue.getNanos();
} else if (value instanceof Date) {
Date sqlDateValue = ((Date) (value));
time = sqlDateValue.getTime();
} else if (value instanceof
Date) {
Date
v20 = ((Date) (value));
time = v20.getTime();
} else {
throw new IllegalStateException(String.format("Unrecognized type for Avro TIMESTAMP field value, which has type %s, value %s", value.getClass().getName(), value.toString())); }
tsColVec.time[vectorPos] = time;
tsColVec.nanos[vectorPos] = v16;
break;
case
BINARY :
BytesColumnVector binaryColVec = ((BytesColumnVector) (colVector));
byte[] binaryBytes;
if (value instanceof GenericData.Fixed) {
binaryBytes = ((GenericData.Fixed) (value)).bytes();
} else if (value instanceof ByteBuffer) {
final ByteBuffer byteBuffer = ((ByteBuffer) (value));
binaryBytes = toBytes(byteBuffer);
} else if (value instanceof byte[])
{
binaryBytes = ((byte[]) (value));} else {
throw new
IllegalStateException(String.format("Unrecognized type for Avro BINARY field value, which has type %s, value %s", value.getClass().getName(), value.toString()));
}
binaryColVec.setRef(vectorPos, binaryBytes, 0, binaryBytes.length);
break;
case DECIMAL :
DecimalColumnVector decimalColVec = ((DecimalColumnVector) (colVector));
HiveDecimal decimalValue;
if (value instanceof BigDecimal) {
final BigDecimal decimal = ((BigDecimal) (value));
decimalValue = HiveDecimal.create(decimal);
} else if (value instanceof ByteBuffer) {
final ByteBuffer byteBuffer = ((ByteBuffer) (value));final byte[] decimalBytes = new byte[byteBuffer.remaining()];
byteBuffer.get(decimalBytes);
final BigInteger bigInt = new BigInteger(decimalBytes);
final int scale = type.getScale();
BigDecimal bigDecVal = new BigDecimal(bigInt, scale);
decimalValue = HiveDecimal.create(bigDecVal);
if
((decimalValue == null) && (decimalBytes.length > 0)) {throw new IllegalStateException("Unexpected read null HiveDecimal from bytes (base-64 encoded): " + Base64.getEncoder().encodeToString(decimalBytes));
}
} else if (value instanceof GenericData.Fixed) {
final BigDecimal
decimal = new Conversions.DecimalConversion().fromFixed(((GenericData.Fixed) (value)), avroSchema, logicalType);
decimalValue = HiveDecimal.create(decimal);
} else {
throw new IllegalStateException(String.format("Unexpected type for decimal (%s), cannot convert from Avro value", value.getClass().getCanonicalName()));
}
if (decimalValue == null) {
decimalColVec.isNull[vectorPos]
= true;
decimalColVec.noNulls = false;
} else {
decimalColVec.set(vectorPos, decimalValue);
}
break;
case LIST :
List<?> list = ((List<?>) (value));
ListColumnVector listColVec
= ((ListColumnVector) (colVector));
listColVec.offsets[vectorPos] = listColVec.childCount;
listColVec.lengths[vectorPos] = list.size();
TypeDescription listType = type.getChildren().get(0);
for (Object listItem : list) {
addToVector(listType, listColVec.child, avroSchema.getElementType(), listItem, listColVec.childCount++);
}
break;
case MAP :
Map<String, ?> mapValue = ((Map<String, ?>) (value));
MapColumnVector mapColumnVector = ((MapColumnVector) (colVector));
mapColumnVector.offsets[vectorPos] = mapColumnVector.childCount;
mapColumnVector.lengths[vectorPos] = mapValue.size();
// keys are always strings
Schema keySchema = Schema.create(Type.STRING);for (Map.Entry<String, ?> entry :
mapValue.entrySet()) {
addToVector(type.getChildren().get(0), mapColumnVector.keys, keySchema, entry.getKey(), mapColumnVector.childCount);
addToVector(type.getChildren().get(1), mapColumnVector.values, avroSchema.getValueType(), entry.getValue(), mapColumnVector.childCount);
mapColumnVector.childCount++;
}
break;
case STRUCT :
StructColumnVector structColVec = ((StructColumnVector) (colVector)); GenericData.Record record = ((GenericData.Record) (value));
for (int i = 0; i < type.getFieldNames().size(); i++) {
String fieldName = type.getFieldNames().get(i);
Object fieldValue = record.get(fieldName);
TypeDescription fieldType = type.getChildren().get(i);
addToVector(fieldType, structColVec.fields[i], avroSchema.getFields().get(i).schema(), fieldValue, vectorPos);
}break;
case UNION :
UnionColumnVector unionColVec = ((UnionColumnVector) (colVector));
List<TypeDescription> childTypes = type.getChildren();
boolean added = addUnionValue(unionColVec, childTypes, avroSchema, value, vectorPos);
if (!added) {
throw new IllegalStateException(String.format("Failed to add value %s to union with type %s", value == null ? "null" : value.toString(), type.toString()));
}
break;
default :
throw new IllegalArgumentException(("Invalid TypeDescription " + type.toString()) + ".");
}
}
| 3.26 |
hudi_AvroOrcUtils_readFromVector_rdh
|
/**
* Read the Column vector at a given position conforming to a given ORC schema.
*
* @param type
* ORC schema of the object to read.
* @param colVector
* The column vector to read.
* @param avroSchema
* Avro schema of the object to read.
* Only used to check logical types for timestamp unit conversion.
* @param vectorPos
* The position in the vector where the value to read is stored at.
* @return The object being read.
*/
public static Object readFromVector(TypeDescription type, ColumnVector colVector, Schema avroSchema, int vectorPos) {if (colVector.isRepeating) {
vectorPos = 0;
}
if (colVector.isNull[vectorPos]) {
return null;
}
if (avroSchema.getType().equals(Type.UNION)) {
avroSchema = getActualSchemaType(avroSchema);
}
LogicalType logicalType = (avroSchema != null) ? avroSchema.getLogicalType() : null;
switch (type.getCategory()) { case BOOLEAN :
return ((LongColumnVector) (colVector)).vector[vectorPos] != 0;
case BYTE :
return ((byte) (((LongColumnVector) (colVector)).vector[vectorPos]));
case SHORT :
return ((short) (((LongColumnVector) (colVector)).vector[vectorPos]));
case INT :
return ((int) (((LongColumnVector) (colVector)).vector[vectorPos]));
case LONG :
return ((LongColumnVector) (colVector)).vector[vectorPos];
case FLOAT :
return ((float) (((DoubleColumnVector) (colVector)).vector[vectorPos]));
case DOUBLE :
return ((DoubleColumnVector) (colVector)).vector[vectorPos];
case VARCHAR :
case CHAR :
int maxLength = type.getMaxLength();String result = ((BytesColumnVector) (colVector)).toString(vectorPos);
if (result.length() <= maxLength) {
return result;
} else {
throw new HoodieIOException(("CHAR/VARCHAR has length " + result.length()) + " greater than Max Length allowed");
}
case STRING :
String stringType
= avroSchema.getProp(GenericData.STRING_PROP);
if ((stringType == null) || (!stringType.equals(StringType.String))) {
int stringLength = ((BytesColumnVector) (colVector)).length[vectorPos];
int stringOffset = ((BytesColumnVector) (colVector)).start[vectorPos];byte[] stringBytes = new byte[stringLength];
System.arraycopy(((BytesColumnVector) (colVector)).vector[vectorPos],
stringOffset, stringBytes, 0, stringLength);
return new Utf8(stringBytes);
} else {
return ((BytesColumnVector) (colVector)).toString(vectorPos);
}
case DATE :// convert to daysSinceEpoch for LogicalType.Date
return ((int) (((LongColumnVector) (colVector)).vector[vectorPos]));
case TIMESTAMP :
// The unit of time in ORC is millis. Convert (time,nanos) to the desired unit per logicalType
long time = ((TimestampColumnVector) (colVector)).time[vectorPos];
int nanos = ((TimestampColumnVector) (colVector)).nanos[vectorPos];
if (logicalType instanceof LogicalTypes.TimestampMillis) {return time;
} else if (logicalType instanceof LogicalTypes.TimestampMicros) {
return (time
* MICROS_PER_MILLI) + (nanos / NANOS_PER_MICRO);
} else {
return ((TimestampColumnVector) (colVector)).getTimestampAsLong(vectorPos);
}
case BINARY :
int binaryLength = ((BytesColumnVector) (colVector)).length[vectorPos];
int binaryOffset = ((BytesColumnVector) (colVector)).start[vectorPos];
byte[] binaryBytes = new byte[binaryLength];
System.arraycopy(((BytesColumnVector) (colVector)).vector[vectorPos], binaryOffset, binaryBytes, 0, binaryLength);
// return a ByteBuffer to be consistent with AvroRecordConverter
return ByteBuffer.wrap(binaryBytes);
case DECIMAL :
// HiveDecimal always ignores trailing zeros, thus modifies the scale implicitly,
// therefore, the scale must be enforced here.
BigDecimal bigDecimal = ((DecimalColumnVector) (colVector)).vector[vectorPos].getHiveDecimal().bigDecimalValue().setScale(((LogicalTypes.Decimal) (logicalType)).getScale());
Schema.Type baseType = avroSchema.getType();if (baseType.equals(Type.FIXED)) {
return new Conversions.DecimalConversion().toFixed(bigDecimal, avroSchema, logicalType);
} else if (baseType.equals(Type.BYTES)) {
return
bigDecimal.unscaledValue().toByteArray();
} else {
throw new HoodieIOException(baseType.getName() + "is not a valid type for LogicalTypes.DECIMAL.");
}
case LIST :
ArrayList<Object> v71 = new ArrayList<>();
ListColumnVector listVector = ((ListColumnVector) (colVector));
int listLength = ((int) (listVector.lengths[vectorPos]));
int listOffset = ((int) (listVector.offsets[vectorPos]));
v71.ensureCapacity(listLength);
TypeDescription childType = type.getChildren().get(0);
for (int i = 0; i < listLength; i++) {
v71.add(readFromVector(childType, listVector.child, avroSchema.getElementType(), listOffset + i));
}
return v71;
case MAP :
Map<String, Object> map = new HashMap<String, Object>();
MapColumnVector mapVector = ((MapColumnVector) (colVector));
int v79 = ((int) (mapVector.lengths[vectorPos]));
int mapOffset = ((int) (mapVector.offsets[vectorPos]));
// keys are always strings for maps in Avro
Schema keySchema = Schema.create(Type.STRING);
for (int i = 0; i < v79; i++) {
map.put(readFromVector(type.getChildren().get(0), mapVector.keys, keySchema, i + mapOffset).toString(), readFromVector(type.getChildren().get(1), mapVector.values, avroSchema.getValueType(), i + mapOffset));
}
return map;
case STRUCT :
StructColumnVector structVector = ((StructColumnVector) (colVector));
List<TypeDescription> children = type.getChildren();
GenericData.Record record = new GenericData.Record(avroSchema);
for (int i = 0; i < children.size(); i++) {
record.put(i, readFromVector(children.get(i), structVector.fields[i], avroSchema.getFields().get(i).schema(), vectorPos));
}
return record;
case
UNION :
UnionColumnVector unionVector = ((UnionColumnVector) (colVector));
int tag = unionVector.tags[vectorPos];
ColumnVector fieldVector = unionVector.fields[tag];
return readFromVector(type.getChildren().get(tag), fieldVector, avroSchema.getTypes().get(tag), vectorPos);
default :
throw new HoodieIOException("Unrecognized TypeDescription " + type.toString());
}
}
| 3.26 |
hudi_AvroOrcUtils_addUnionValue_rdh
|
/**
* Match value with its ORC type and add to the union vector at a given position.
*
* @param unionVector
* The vector to store value.
* @param unionChildTypes
* All possible types for the value Object.
* @param avroSchema
* Avro union schema for the value Object.
* @param value
* Object to be added to the unionVector
* @param vectorPos
* The position in the vector where value will be stored at.
* @return succeeded or failed
*/
public static boolean addUnionValue(UnionColumnVector unionVector, List<TypeDescription> unionChildTypes, Schema avroSchema, Object value, int vectorPos) {
int matchIndex = -1;
TypeDescription matchType = null;Object
matchValue = null;
for (int t = 0; t < unionChildTypes.size(); t++) {
TypeDescription childType = unionChildTypes.get(t);
boolean matches = false;
switch (childType.getCategory()) {
case BOOLEAN :
matches = value instanceof Boolean;
break;
case BYTE :
matches = value instanceof Byte;
break;
case SHORT :
matches = value instanceof Short;
break;
case INT :
matches = value instanceof Integer;
break;
case LONG :
matches =
value instanceof Long;
break;
case FLOAT :
matches = value instanceof Float;
break;
case DOUBLE :
matches = value instanceof Double;
break;case STRING :
case VARCHAR :
case CHAR :
if (value instanceof
String) {
matches = true;
matchValue = getUTF8Bytes(((String) (value)));} else if (value instanceof Utf8) {
matches = true;
matchValue = ((Utf8) (value)).getBytes();
}
break;
case DATE :
matches = value instanceof Date;
break;
case TIMESTAMP :
matches = value instanceof Timestamp;
break;
case BINARY :
matches = (value instanceof byte[]) || (value instanceof GenericData.Fixed);
break;
case DECIMAL :
matches = value instanceof BigDecimal;
break;
case LIST :
matches = value instanceof List;
break;
case MAP :
matches = value instanceof Map;
break;
case STRUCT :
throw new UnsupportedOperationException("Cannot handle STRUCT within UNION.");
case UNION :List<TypeDescription> children =
childType.getChildren();
if (value == null) {
matches = (children == null) || (children.size() == 0);
} else {
matches = addUnionValue(unionVector, children, avroSchema, value, vectorPos);
}
break;
default :
throw new IllegalArgumentException(("Invalid TypeDescription " + childType.getCategory().toString()) + ".");
}
if (matches) {
matchIndex = t;
matchType =
childType;
break;
}
}
if ((value == null) && (matchValue != null)) {
value = matchValue;
}
if (matchIndex >= 0) {
unionVector.tags[vectorPos] = matchIndex;
if (value == null) {
unionVector.isNull[vectorPos] = true;
unionVector.noNulls = false;
} else {
addToVector(matchType, unionVector.fields[matchIndex], avroSchema.getTypes().get(matchIndex), value, vectorPos);
}
return true;
} else {
return false;
}
}
| 3.26 |
hudi_AvroOrcUtils_getActualSchemaType_rdh
|
/**
* Returns the actual schema of a field.
*
* All types in ORC is nullable whereas Avro uses a union that contains the NULL type to imply
* the nullability of an Avro type. To achieve consistency between the Avro and ORC schema,
* non-NULL types are extracted from the union type.
*
* @param unionSchema
* A schema of union type.
* @return An Avro schema that is either NULL or a UNION without NULL fields.
*/
private static Schema getActualSchemaType(Schema unionSchema) {
final List<Schema> nonNullMembers = unionSchema.getTypes().stream().filter(schema -> !Schema.Type.NULL.equals(schema.getType())).collect(Collectors.toList());
if (nonNullMembers.isEmpty()) {return Schema.create(Type.NULL);
} else if (nonNullMembers.size() == 1) {
return nonNullMembers.get(0);
} else {
return Schema.createUnion(nonNullMembers);
}
}
| 3.26 |
hudi_Triple_compareTo_rdh
|
// -----------------------------------------------------------------------
/**
* <p>
* Compares the triple based on the left element, followed by the middle element, finally the right element. The types
* must be {@code Comparable}.
* </p>
*
* @param other
* the other triple, not null
* @return negative if this is less, zero if equal, positive if greater
*/
@Override
public int compareTo(final Triple<L, M, R> other) {
checkComparable(this);
checkComparable(other);
Comparable thisLeft = ((Comparable) (getLeft()));
Comparable otherLeft = ((Comparable) (other.getLeft()));
if (thisLeft.compareTo(otherLeft) == 0) {
return Pair.of(getMiddle(), getRight()).compareTo(Pair.of(other.getMiddle(), other.getRight()));
} else {
return thisLeft.compareTo(otherLeft);
}
}
| 3.26 |
hudi_Triple_toString_rdh
|
/**
* <p>
* Formats the receiver using the given format.
* </p>
*
* <p>
* This uses {@link java.util.Formattable} to perform the formatting. Three variables may be used to embed the left
* and right elements. Use {@code %1$s} for the left element, {@code %2$s} for the middle and {@code %3$s} for the
* right element. The default format used by {@code toString()} is {@code (%1$s,%2$s,%3$s)}.
* </p>
*
* @param format
* the format string, optionally containing {@code %1$s}, {@code %2$s} and {@code %3$s}, not null
* @return the formatted string, not null
*/
public String toString(final String format) {
return String.format(format, getLeft(), getMiddle(), getRight());
}
| 3.26 |
hudi_Triple_equals_rdh
|
/**
* <p>
* Compares this triple to another based on the three elements.
* </p>
*
* @param obj
* the object to compare to, null returns false
* @return true if the elements of the triple are equal
*/
// ObjectUtils.equals(Object, Object) has been deprecated in 3.2
@SuppressWarnings("deprecation")
@Override
public boolean equals(final Object obj) {
if (obj == this) {
return true;
}
if (obj instanceof Triple<?, ?, ?>) {
final Triple<?, ?, ?> other = ((Triple<?, ?, ?>) (obj));
return (getLeft().equals(other.getLeft()) && getMiddle().equals(other.getMiddle())) && getRight().equals(other.getRight());
}
return false;
}
| 3.26 |
hudi_Triple_of_rdh
|
/**
* <p>
* Obtains an immutable triple of from three objects inferring the generic types.
* </p>
*
* <p>
* This factory allows the triple to be created using inference to obtain the generic types.
* </p>
*
* @param <L>
* the left element type
* @param <M>
* the middle element type
* @param <R>
* the right element type
* @param left
* the left element, may be null
* @param middle
* the middle element, may be null
* @param right
* the right element, may be null
* @return a triple formed from the three parameters, not null
*/
public static <L, M, R> Triple<L, M, R> of(final L left, final M middle, final R right) {return new ImmutableTriple<L, M, R>(left, middle, right);
}
| 3.26 |
hudi_Triple_hashCode_rdh
|
/**
* <p>
* Returns a suitable hash code.
* </p>
*
* @return the hash code
*/
@Override
public int hashCode() {
return ((getLeft() == null ? 0 :
getLeft().hashCode()) ^ (getMiddle() == null ? 0 : getMiddle().hashCode())) ^ (getRight() == null ? 0 : getRight().hashCode());
}
| 3.26 |
hudi_HivePartitionUtil_getPartitionClauseForDrop_rdh
|
/**
* Build String, example as year=2021/month=06/day=25
*/
public static String getPartitionClauseForDrop(String partition, PartitionValueExtractor partitionValueExtractor, HiveSyncConfig config) {
List<String> partitionValues = partitionValueExtractor.extractPartitionValuesInPath(partition);
ValidationUtils.checkArgument(config.getSplitStrings(META_SYNC_PARTITION_FIELDS).size() == partitionValues.size(), ((("Partition key parts " + config.getSplitStrings(META_SYNC_PARTITION_FIELDS)) + " does not match with partition values ")
+ partitionValues) + ". Check partition strategy. ");
List<String> partBuilder = new ArrayList<>();
List<String> partitionKeys = config.getSplitStrings(META_SYNC_PARTITION_FIELDS);
for (int i = 0; i < partitionKeys.size(); i++) {
String partitionValue = partitionValues.get(i);
// decode the partition before sync to hive to prevent multiple escapes of HIVE
if (config.getBoolean(META_SYNC_DECODE_PARTITION)) {
// This is a decode operator for encode in KeyGenUtils#getRecordPartitionPath
partitionValue = PartitionPathEncodeUtils.unescapePathName(partitionValue);
}
partBuilder.add((partitionKeys.get(i).toLowerCase() + "=") + partitionValue);
}
return String.join("/", partBuilder);
}
| 3.26 |
hudi_RecordIterators_getParquetRecordIterator_rdh
|
/**
* Factory clazz for record iterators.
*/
public abstract class RecordIterators {public static ClosableIterator<RowData> getParquetRecordIterator(InternalSchemaManager internalSchemaManager, boolean utcTimestamp, boolean caseSensitive, Configuration conf, String[] fieldNames, DataType[] fieldTypes, Map<String, Object> partitionSpec, int[] selectedFields, int batchSize, Path path, long splitStart, long splitLength, List<Predicate> predicates) throws IOException {
FilterPredicate filterPredicate = getFilterPredicate(conf);
for (Predicate predicate :
predicates) {
FilterPredicate filter = predicate.filter();
if (filter != null) {
filterPredicate = (filterPredicate == null) ? filter : and(filterPredicate, filter);
}
}
UnboundRecordFilter recordFilter = getUnboundRecordFilterInstance(conf);
InternalSchema mergeSchema = internalSchemaManager.getMergeSchema(path.getName());
if (mergeSchema.isEmptySchema()) {
return new ParquetSplitRecordIterator(ParquetSplitReaderUtil.genPartColumnarRowReader(utcTimestamp, caseSensitive, conf, fieldNames, fieldTypes, partitionSpec, selectedFields, batchSize, path, splitStart, splitLength, filterPredicate, recordFilter));
} else {CastMap castMap = internalSchemaManager.getCastMap(mergeSchema, fieldNames, fieldTypes, selectedFields);
Option<RowDataProjection> castProjection = castMap.toRowDataProjection(selectedFields);
ClosableIterator<RowData> itr = new ParquetSplitRecordIterator(// the reconciled field names
// the reconciled field types
ParquetSplitReaderUtil.genPartColumnarRowReader(utcTimestamp, caseSensitive, conf, internalSchemaManager.getMergeFieldNames(mergeSchema, fieldNames), castMap.getFileFieldTypes(), partitionSpec, selectedFields, batchSize, path, splitStart, splitLength, filterPredicate, recordFilter));
if (castProjection.isPresent()) {
return new SchemaEvolvedRecordIterator(itr, castProjection.get());
} else {
return itr;
}
}
| 3.26 |
hudi_Option_fromJavaOptional_rdh
|
/**
* Convert from java.util.Optional.
*
* @param v
* java.util.Optional object
* @param <T>
* type of the value stored in java.util.Optional object
* @return Option
*/
public static <T> Option<T> fromJavaOptional(Optional<T> v) {
return Option.ofNullable(v.orElse(null));}
| 3.26 |
hudi_Option_orElseGet_rdh
|
/**
* Identical to {@code Optional.orElseGet}
*/
public T
orElseGet(Supplier<?
extends T> other) {
return val != null
? val : other.get();
}
| 3.26 |
hudi_Option_toJavaOptional_rdh
|
/**
* Convert to java Optional.
*/
public Optional<T> toJavaOptional() {
return Optional.ofNullable(val);
}
| 3.26 |
hudi_Option_orElseThrow_rdh
|
/**
* Identical to {@code Optional.orElseThrow}
*/
public <X extends Throwable> T orElseThrow(Supplier<? extends X> exceptionSupplier) throws X {
if (val != null) {
return val;
} else {
throw exceptionSupplier.get();
}
}
| 3.26 |
hudi_Option_or_rdh
|
/**
* Returns this {@link Option} if not empty, otherwise evaluates provided supplier
* and returns its result
*/
public Option<T> or(Supplier<? extends Option<T>> other) {
return val != null ? this : other.get();
}
/**
* Identical to {@code Optional.orElse}
| 3.26 |
hudi_MarkerCreationDispatchingRunnable_run_rdh
|
/**
* Dispatches the marker creation requests that can be process to a worker thread of batch
* processing the requests.
*
* For each marker directory, goes through the following steps:
* (1) find the next available file index for writing. If no file index is available,
* skip the processing of this marker directory;
* (2) fetch the pending marker creation requests for this marker directory. If there is
* no request, skip this marker directory;
* (3) put the marker directory, marker dir state, list of requests futures, and the file index
* to a {@code MarkerDirRequestContext} instance and add the instance to the request context list.
*
* If the request context list is not empty, spins up a worker thread, {@code MarkerCreationBatchingRunnable},
* and pass all the request context to the thread for batch processing. The thread is responsible
* for responding to the request futures directly.
*/
@Override
public void run() {
List<BatchedMarkerCreationContext> requestContextList = new ArrayList<>();
// Only fetch pending marker creation requests that can be processed,
// i.e., that markers can be written to a underlying file
// markerDirStateMap is used in other thread, need to ensure thread safety
for (Map.Entry<String, MarkerDirState> entry : markerDirStateMap.entrySet())
{
String markerDir = entry.getKey();
MarkerDirState markerDirState = entry.getValue();
Option<Integer> fileIndex
= markerDirState.getNextFileIndexToUse();
if (!fileIndex.isPresent()) {
LOG.debug("All marker files are busy, skip batch processing of create marker requests in " + markerDir);
continue;
}
List<MarkerCreationFuture> futures = markerDirState.fetchPendingMarkerCreationRequests();
if (futures.isEmpty()) {
markerDirState.markFileAsAvailable(fileIndex.get());
continue;
}requestContextList.add(new BatchedMarkerCreationContext(markerDir, markerDirState, futures, fileIndex.get()));
}
if (requestContextList.size() > 0) {
executorService.execute(new BatchedMarkerCreationRunnable(requestContextList));
}
}
| 3.26 |
hudi_SparkMain_upgradeOrDowngradeTable_rdh
|
/**
* Upgrade or downgrade table.
*
* @param jsc
* instance of {@link JavaSparkContext} to use.
* @param basePath
* base path of the dataset.
* @param toVersion
* version to which upgrade/downgrade to be done.
* @return 0 if success, else -1.
* @throws Exception
*/
protected static int upgradeOrDowngradeTable(JavaSparkContext jsc, String basePath, String toVersion) {
HoodieWriteConfig config = getWriteConfig(basePath, Boolean.parseBoolean(HoodieWriteConfig.ROLLBACK_USING_MARKERS_ENABLE.defaultValue()), false);
HoodieTableMetaClient metaClient = HoodieTableMetaClient.builder().setConf(jsc.hadoopConfiguration()).setBasePath(config.getBasePath()).setLoadActiveTimelineOnLoad(false).setConsistencyGuardConfig(config.getConsistencyGuardConfig()).setLayoutVersion(Option.of(new TimelineLayoutVersion(config.getTimelineLayoutVersion()))).setFileSystemRetryConfig(config.getFileSystemRetryConfig()).build();
HoodieWriteConfig updatedConfig = HoodieWriteConfig.newBuilder().withProps(config.getProps()).forTable(metaClient.getTableConfig().getTableName()).build();
try {
new UpgradeDowngrade(metaClient, updatedConfig, new HoodieSparkEngineContext(jsc), SparkUpgradeDowngradeHelper.getInstance()).run(HoodieTableVersion.valueOf(toVersion), null);
LOG.info(String.format("Table at \"%s\" upgraded / downgraded to version \"%s\".", basePath, toVersion));
return 0;
} catch (Exception e) {
LOG.warn(String.format("Failed: Could not upgrade/downgrade table at \"%s\" to version \"%s\".", basePath, toVersion), e);
return -1;
}}
| 3.26 |
hudi_QuickstartUtils_generateUniqueUpdates_rdh
|
/**
* Generates new updates, one for each of the keys above
* list
*
* @param n
* Number of updates (must be no more than number of existing keys)
* @return list of hoodie record updates
*/
public List<HoodieRecord> generateUniqueUpdates(Integer n) {
if (numExistingKeys < n) {
throw new HoodieException("Data must have been written before performing the update operation");
}
List<Integer> keys = IntStream.range(0, numExistingKeys).boxed().collect(Collectors.toCollection(ArrayList::new));
Collections.shuffle(keys);
String randomString = generateRandomString();
return IntStream.range(0, n).boxed().map(x -> {
try
{
return generateUpdateRecord(existingKeys.get(keys.get(x)), randomString);
} catch (IOException e) {
throw new HoodieIOException(e.getMessage(), e);
}
}).collect(Collectors.toList());
}
| 3.26 |
hudi_QuickstartUtils_generateRangeRandomTimestamp_rdh
|
/**
* Generate timestamp range from {@param daysTillNow} before to now.
*/
private static long generateRangeRandomTimestamp(int daysTillNow) {
long maxIntervalMillis = (((daysTillNow * 24) * 60) * 60) * 1000L;
return System.currentTimeMillis() - ((long) (Math.random() * maxIntervalMillis));
}
| 3.26 |
hudi_QuickstartUtils_generateInsertsStream_rdh
|
/**
* Generates new inserts, uniformly across the partition paths above. It also updates the list of existing keys.
*/
public Stream<HoodieRecord> generateInsertsStream(String randomString, Integer n) {
int currSize = getNumExistingKeys();
return IntStream.range(0, n).boxed().map(i -> {
String partitionPath = partitionPaths[rand.nextInt(partitionPaths.length)];
HoodieKey key = new HoodieKey(UUID.randomUUID().toString(), partitionPath);
existingKeys.put(currSize + i, key);
numExistingKeys++;
try
{
return new HoodieAvroRecord(key, generateRandomValue(key, randomString));
} catch (IOException e) {
throw new HoodieIOException(e.getMessage(), e);
}
});
}
| 3.26 |
hudi_QuickstartUtils_generateUpdates_rdh
|
/**
* Generates new updates, randomly distributed across the keys above. There can be duplicates within the returned
* list
*
* @param n
* Number of updates (including dups)
* @return list of hoodie record updates
*/
public List<HoodieRecord> generateUpdates(Integer n) {
if (numExistingKeys == 0) {
throw new HoodieException("Data must have been written before performing the update operation");
}
String randomString = generateRandomString();
return IntStream.range(0,
n).boxed().map(x -> {
try {
return generateUpdateRecord(existingKeys.get(rand.nextInt(numExistingKeys)), randomString);
} catch (IOException e) {throw new HoodieIOException(e.getMessage(), e);
}
}).collect(Collectors.toList());
}
| 3.26 |
hudi_QuickstartUtils_generateRandomValue_rdh
|
/**
* Generates a new avro record of the above schema format, retaining the key if optionally provided. The
* riderDriverSuffix string is a random String to simulate updates by changing the rider driver fields for records
* belonging to the same commit. It is purely used for demo purposes. In real world, the actual updates are assumed
* to be provided based on the application requirements.
*/
public static OverwriteWithLatestAvroPayload generateRandomValue(HoodieKey key, String riderDriverSuffix) throws IOException {
// The timestamp generated is limited to range from 7 days before to now, to avoid generating too many
// partitionPaths when user use timestamp as partitionPath filed.
GenericRecord rec = generateGenericRecord(key.getRecordKey(), "rider-" + riderDriverSuffix, "driver-" + riderDriverSuffix, generateRangeRandomTimestamp(7));
return new OverwriteWithLatestAvroPayload(Option.of(rec));
}
| 3.26 |
hudi_QuickstartUtils_generateDeletes_rdh
|
/**
* Generates delete records for the passed in rows.
*
* @param rows
* List of {@link Row}s for which delete record need to be generated
* @return list of hoodie records to delete
*/
public List<String> generateDeletes(List<Row> rows) {
// if row.length() == 2, then the record contains "uuid" and "partitionpath" fields, otherwise,
// another field "ts" is available
return rows.stream().map(row ->
row.length() == 2 ? convertToString(row.getAs("uuid"), row.getAs("partitionpath"), null) : convertToString(row.getAs("uuid"), row.getAs("partitionpath"), row.getAs("ts"))).filter(os -> os.isPresent()).map(os -> os.get()).collect(Collectors.toList());
}
| 3.26 |
hudi_QuickstartUtils_generateInserts_rdh
|
/**
* Generates new inserts, uniformly across the partition paths above. It also updates the list of existing keys.
*/ public List<HoodieRecord> generateInserts(Integer n) throws IOException {
String randomString = generateRandomString();
return generateInsertsStream(randomString, n).collect(Collectors.toList());
}
| 3.26 |
hudi_CachingPath_resolveRelativePath_rdh
|
// are already normalized
private static String resolveRelativePath(String basePath, String relativePath) {
StringBuffer sb = new StringBuffer(basePath);
if (basePath.endsWith("/")) {
if (relativePath.startsWith("/")) {
sb.append(relativePath.substring(1));
} else {
sb.append(relativePath);
}} else if (relativePath.startsWith("/")) {
sb.append(relativePath);
} else {
sb.append('/');
sb.append(relativePath);
}
return sb.toString();
}
| 3.26 |
hudi_CachingPath_concatPathUnsafe_rdh
|
// TODO java-doc
public static CachingPath concatPathUnsafe(Path basePath, String relativePath) {try {
URI baseURI = basePath.toUri();
// NOTE: {@code normalize} is going to be invoked by {@code Path} ctor, so there's no
// point in invoking it here
String v2 = resolveRelativePath(baseURI.getPath(), relativePath);
URI resolvedURI = new URI(baseURI.getScheme(), baseURI.getAuthority(), v2, baseURI.getQuery(), baseURI.getFragment());
return new CachingPath(resolvedURI);
} catch (URISyntaxException e) {
throw new HoodieException("Failed to instantiate relative path", e);
}
}
| 3.26 |
hudi_CachingPath_createRelativePathUnsafe_rdh
|
/**
* Creates path based on the provided *relative* path
*
* NOTE: This is an unsafe version that is relying on the fact that the caller is aware
* what they are doing this is not going to work with paths having scheme (which require
* parsing) and is only meant to work w/ relative paths in a few specific cases.
*/
public static CachingPath createRelativePathUnsafe(String relativePath) {
try {
// NOTE: {@code normalize} is going to be invoked by {@code Path} ctor, so there's no
// point in invoking it here
URI uri = new URI(null, null, relativePath, null, null);
return new CachingPath(uri); } catch (URISyntaxException e) {
throw new HoodieException("Failed to instantiate relative path", e);
}
}
| 3.26 |
hudi_CachingPath_getPathWithoutSchemeAndAuthority_rdh
|
/**
* This is {@link Path#getPathWithoutSchemeAndAuthority(Path)} counterpart, instantiating
* {@link CachingPath}
*/
public static Path getPathWithoutSchemeAndAuthority(Path path) {
// This code depends on Path.toString() to remove the leading slash before
// the drive specification on Windows.
return path.isUriPathAbsolute() ? createRelativePathUnsafe(path.toUri().getPath()) : path;
}
| 3.26 |
hudi_BaseSparkUpdateStrategy_getGroupIdsWithUpdate_rdh
|
/**
* Get records matched file group ids.
*
* @param inputRecords
* the records to write, tagged with target file id
* @return the records matched file group ids
*/
protected List<HoodieFileGroupId> getGroupIdsWithUpdate(HoodieData<HoodieRecord<T>> inputRecords) {
return inputRecords.filter(record -> record.getCurrentLocation() != null).map(record -> new HoodieFileGroupId(record.getPartitionPath(), record.getCurrentLocation().getFileId())).distinct().collectAsList();
}
| 3.26 |
hudi_RDDConsistentBucketBulkInsertPartitioner_initializeBucketIdentifier_rdh
|
/**
* Initialize hashing metadata of input records. The metadata of all related partitions will be loaded, and
* the mapping from partition to its bucket identifier is constructed.
*/
private Map<String, ConsistentBucketIdentifier> initializeBucketIdentifier(JavaRDD<HoodieRecord<T>> records) {
return records.map(HoodieRecord::getPartitionPath).distinct().collect().stream().collect(Collectors.toMap(p -> p, this::getBucketIdentifier));
}
| 3.26 |
hudi_RDDConsistentBucketBulkInsertPartitioner_repartitionRecords_rdh
|
/**
* Repartition the records to conform the bucket index storage layout constraints.
* Specifically, partition the records based on consistent bucket index, which is computed
* using hashing metadata and records' key.
*
* @param records
* Input Hoodie records
* @param outputSparkPartitions
* Not used, the actual parallelism is determined by the bucket number
* @return partitioned records, each partition of data corresponds to a bucket (i.e., file group)
*/
@Override
public JavaRDD<HoodieRecord<T>> repartitionRecords(JavaRDD<HoodieRecord<T>> records, int outputSparkPartitions) {
Map<String, ConsistentBucketIdentifier> partitionToIdentifier = initializeBucketIdentifier(records);
Map<String, Map<String, Integer>> partitionToFileIdPfxIdxMap = m0(partitionToIdentifier);
return doPartition(records, new Partitioner() {
@Override
public int numPartitions() {
return fileIdPfxList.size();
}
@Override
public int getPartition(Object key) {
HoodieKey hoodieKey = ((HoodieKey) (key));
String partition = hoodieKey.getPartitionPath();
ConsistentHashingNode node = partitionToIdentifier.get(partition).getBucket(hoodieKey, indexKeyFields);
return partitionToFileIdPfxIdxMap.get(partition).get(node.getFileIdPrefix());
}
});}
| 3.26 |
hudi_RDDConsistentBucketBulkInsertPartitioner_m0_rdh
|
/**
* Initialize fileIdPfx for each data partition. Specifically, the following fields is constructed:
* - fileIdPfxList: the Nth element corresponds to the Nth data partition, indicating its fileIdPfx
* - partitionToFileIdPfxIdxMap (return value): (table partition) -> (fileIdPfx -> idx) mapping
* - doAppend: represents if the Nth data partition should use AppendHandler
*
* @param partitionToIdentifier
* Mapping from table partition to bucket identifier
*/
private Map<String, Map<String, Integer>> m0(Map<String, ConsistentBucketIdentifier> partitionToIdentifier) {
Map<String,
Map<String, Integer>> partitionToFileIdPfxIdxMap = ConsistentBucketIndexUtils.generatePartitionToFileIdPfxIdxMap(partitionToIdentifier);
int count = 0;
for (ConsistentBucketIdentifier identifier : partitionToIdentifier.values()) {
fileIdPfxList.addAll(identifier.getNodes().stream().map(ConsistentHashingNode::getFileIdPrefix).collect(Collectors.toList()));
Map<String, Integer> fileIdPfxToIdx = new HashMap();
for (ConsistentHashingNode node : identifier.getNodes()) {
fileIdPfxToIdx.put(node.getFileIdPrefix(), count++);
}
if (identifier.getMetadata().isFirstCreated()) {
// Create new file group when the hashing metadata is new (i.e., first write to the partition)
doAppend.addAll(Collections.nCopies(identifier.getNodes().size(), false));
} else {
// Child node requires generating a fresh new base file, rather than log file
doAppend.addAll(identifier.getNodes().stream().map(n -> n.getTag() == ConsistentHashingNode.NodeTag.NORMAL).collect(Collectors.toList()));
}
partitionToFileIdPfxIdxMap.put(identifier.getMetadata().getPartitionPath(), fileIdPfxToIdx);
}
ValidationUtils.checkState(fileIdPfxList.size() == partitionToIdentifier.values().stream().mapToInt(ConsistentBucketIdentifier::getNumBuckets).sum(), "Error state after constructing fileId & idx mapping");
return partitionToFileIdPfxIdxMap;}
| 3.26 |
hudi_RDDConsistentBucketBulkInsertPartitioner_getBucketIdentifier_rdh
|
/**
* Get (construct) the bucket identifier of the given partition
*/
private ConsistentBucketIdentifier getBucketIdentifier(String partition) {
HoodieSparkConsistentBucketIndex index = ((HoodieSparkConsistentBucketIndex) (table.getIndex()));
HoodieConsistentHashingMetadata metadata = ConsistentBucketIndexUtils.loadOrCreateMetadata(this.table, partition, index.getNumBuckets());
if
(hashingChildrenNodes.containsKey(partition)) {
metadata.setChildrenNodes(hashingChildrenNodes.get(partition));
}
return new ConsistentBucketIdentifier(metadata);
}
| 3.26 |
hudi_ValidationUtils_checkArgument_rdh
|
/**
* Ensures the truth of an expression, throwing the custom errorMessage otherwise.
*/
public static void checkArgument(final boolean expression, final Supplier<String> errorMessageSupplier) {
checkArgument(expression, errorMessageSupplier.get());
}
| 3.26 |
hudi_ValidationUtils_checkState_rdh
|
/**
* Ensures the truth of an expression involving the state of the calling instance, but not
* involving any parameters to the calling method.
*
* @param expression
* a boolean expression
* @param errorMessage
* - error message
* @throws IllegalStateException
* if {@code expression} is false
*/
public static void checkState(final boolean expression, String errorMessage) {
if (!expression) {throw new IllegalStateException(errorMessage);
}
}
| 3.26 |
hudi_BaseJavaCommitActionExecutor_getInsertPartitioner_rdh
|
/**
* Provides a partitioner to perform the insert operation, based on the workload profile.
*/
public Partitioner getInsertPartitioner(WorkloadProfile profile) {
return getUpsertPartitioner(profile);
}
| 3.26 |
hudi_BaseJavaCommitActionExecutor_getUpsertPartitioner_rdh
|
/**
* Provides a partitioner to perform the upsert operation, based on the workload profile.
*/
public Partitioner getUpsertPartitioner(WorkloadProfile profile) {
if (profile == null) {
throw new HoodieUpsertException("Need workload profile to construct the upsert partitioner.");
}
return new JavaUpsertPartitioner(profile, context, table, config);
}
| 3.26 |
hudi_HoodieHiveCatalog_isHoodieTable_rdh
|
// ------ tables ------
private Table isHoodieTable(Table hiveTable) {
if ((!hiveTable.getParameters().getOrDefault(SPARK_SOURCE_PROVIDER, "").equalsIgnoreCase("hudi")) && (!isFlinkHoodieTable(hiveTable))) {
throw new HoodieCatalogException(String.format("the %s is not hoodie table", hiveTable.getTableName()));
}
return hiveTable;
}
| 3.26 |
hudi_HoodieHiveCatalog_listDatabases_rdh
|
// ------ databases ------
@Override
public List<String> listDatabases() throws CatalogException {
try {
return client.getAllDatabases();
} catch (TException e) {
throw new HoodieCatalogException(String.format("Failed to list all databases in %s", getName()), e);
}
}
| 3.26 |
hudi_CompactionTask_newBuilder_rdh
|
/**
* Utility to create builder for {@link CompactionTask}.
*
* @return Builder for {@link CompactionTask}.
*/
public static Builder newBuilder() {
return
new Builder();
}
| 3.26 |
hudi_HoodieIndex_tagLocation_rdh
|
/**
* Looks up the index and tags each incoming record with a location of a file that contains the row (if it is actually
* present).
*/
@Deprecated
@PublicAPIMethod(maturity = ApiMaturityLevel.DEPRECATED)
public I tagLocation(I records, HoodieEngineContext context, HoodieTable hoodieTable) throws HoodieIndexException {
throw new HoodieNotSupportedException("Deprecated API should not be called");
}
| 3.26 |
hudi_HoodieIndex_requiresTagging_rdh
|
/**
* To indicate if an operation type requires location tagging before writing
*/
@PublicAPIMethod(maturity = ApiMaturityLevel.EVOLVING)
public boolean requiresTagging(WriteOperationType operationType) {
switch (operationType) {case DELETE :
case DELETE_PREPPED :case UPSERT :
return true;
default :
return false;
}
}
| 3.26 |
hudi_HoodieIndex_updateLocation_rdh
|
/**
* Extracts the location of written records, and updates the index.
*/
@PublicAPIMethod(maturity = ApiMaturityLevel.EVOLVING)
public HoodieData<WriteStatus> updateLocation(HoodieData<WriteStatus> writeStatuses, HoodieEngineContext context, HoodieTable hoodieTable, String instant) throws HoodieIndexException {
return updateLocation(writeStatuses,
context, hoodieTable);}
| 3.26 |
hudi_HoodieBaseParquetWriter_handleParquetBloomFilters_rdh
|
/**
* Once we get parquet version >= 1.12 among all engines we can cleanup the reflexion hack.
*
* @param parquetWriterbuilder
* @param hadoopConf
*/
protected void handleParquetBloomFilters(ParquetWriter.Builder parquetWriterbuilder, Configuration hadoopConf) {
// inspired from https://github.com/apache/parquet-mr/blob/master/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/ParquetOutputFormat.java#L458-L464
hadoopConf.forEach(conf -> {
String key = conf.getKey();
if (key.startsWith(BLOOM_FILTER_ENABLED)) {
String column = key.substring(BLOOM_FILTER_ENABLED.length() + 1, key.length());
try {
Method method = parquetWriterbuilder.getClass().getMethod("withBloomFilterEnabled", String.class, boolean.class);
method.invoke(parquetWriterbuilder, column, Boolean.valueOf(conf.getValue()).booleanValue());
} catch (NoSuchMethodException |
IllegalAccessException | InvocationTargetException e) {
// skip
}
}
if (key.startsWith(BLOOM_FILTER_EXPECTED_NDV)) {
String column = key.substring(BLOOM_FILTER_EXPECTED_NDV.length() + 1, key.length());
try {
Method method = parquetWriterbuilder.getClass().getMethod("withBloomFilterNDV", String.class, long.class);
method.invoke(parquetWriterbuilder, column, Long.valueOf(conf.getValue()).longValue());
} catch (NoSuchMethodException | IllegalAccessException | InvocationTargetException e) {
// skip
}
}
});
}
| 3.26 |
hudi_PartialBindVisitor_visitNameReference_rdh
|
/**
* If the attribute cannot find from the schema, directly return null, visitPredicate
* will handle it.
*/
@Override
public Expression visitNameReference(NameReference attribute) {
Types.Field field = (caseSensitive) ? recordType.fieldByName(attribute.getName()) : recordType.fieldByNameCaseInsensitive(attribute.getName());
if (field == null) {return null;
}
return new BoundReference(field.fieldId(), field.type());
}
| 3.26 |
hudi_PartialBindVisitor_visitPredicate_rdh
|
/**
* If an expression is null after accept method, which means it cannot be bounded from
* schema, we'll directly return {@link Predicates.TrueExpression}.
*/
@Override
public Expression visitPredicate(Predicate predicate) {
if (predicate instanceof Predicates.BinaryComparison) {
Predicates.BinaryComparison binaryExp = ((Predicates.BinaryComparison) (predicate));
Expression left = binaryExp.getLeft().accept(this);
if (left == null) {
return alwaysTrue();
} else {
Expression right = binaryExp.getRight().accept(this);if (right == null) {
return alwaysTrue();
}
return new Predicates.BinaryComparison(left, binaryExp.getOperator(), right);
}
}if (predicate instanceof Predicates.Not) {
Expression expr = ((Predicates.Not) (predicate)).child.accept(this);
if (expr instanceof Predicates.TrueExpression) {
return alwaysFalse();
}
if (expr instanceof Predicates.FalseExpression) {
return alwaysTrue();
}
return Predicates.not(expr);
}
if (predicate instanceof Predicates.In) {
Predicates.In in = ((Predicates.In) (predicate));
Expression valueExpression = in.value.accept(this);
if (valueExpression == null) {
return alwaysTrue();
}
List<Expression> validValues = in.validValues.stream().map(validValue -> validValue.accept(this)).collect(Collectors.toList());
if (validValues.stream().anyMatch(Objects::isNull)) {
return alwaysTrue();
}
return Predicates.in(valueExpression, validValues);
}
if (predicate instanceof Predicates.IsNull) {
Predicates.IsNull isNull =
((Predicates.IsNull) (predicate));
return Option.ofNullable(isNull.child.accept(this)).map(expr -> ((Expression)
(Predicates.isNull(expr)))).orElse(alwaysTrue());
}
if (predicate instanceof Predicates.IsNotNull) {
Predicates.IsNotNull isNotNull = ((Predicates.IsNotNull) (predicate));
return Option.ofNullable(isNotNull.child.accept(this)).map(expr -> ((Expression) (Predicates.isNotNull(expr)))).orElse(alwaysTrue());
}
if (predicate instanceof Predicates.StringStartsWith) {
Predicates.StringStartsWith startsWith = ((Predicates.StringStartsWith) (predicate));
Expression left = startsWith.getLeft().accept(this);
if (left == null) {
return alwaysTrue();
} else {
Expression right = startsWith.getRight().accept(this);
if (right == null) {
return alwaysTrue();}
return Predicates.startsWith(left, right);
}
}
if (predicate instanceof Predicates.StringContains) {
Predicates.StringContains contains = ((Predicates.StringContains) (predicate));
Expression left = contains.getLeft().accept(this);
if (left == null) {
return alwaysTrue();
} else {
Expression right = contains.getRight().accept(this);
if (right == null) {
return alwaysTrue();
}
return Predicates.contains(left, right);
}
}
throw new IllegalArgumentException(("The expression " + predicate) + " cannot be visited as predicate");
}
| 3.26 |
hudi_HoodieWriteMetadata_clone_rdh
|
/**
* Clones the write metadata with transformed write statuses.
*
* @param transformedWriteStatuses
* transformed write statuses
* @param <T>
* type of transformed write statuses
* @return Cloned {@link HoodieWriteMetadata<T>} instance
*/
public <T> HoodieWriteMetadata<T> clone(T transformedWriteStatuses) {
HoodieWriteMetadata<T> newMetadataInstance = new HoodieWriteMetadata<>();
newMetadataInstance.setWriteStatuses(transformedWriteStatuses);
if (indexLookupDuration.isPresent()) {
newMetadataInstance.setIndexLookupDuration(indexLookupDuration.get());
}
newMetadataInstance.m1(isCommitted);
newMetadataInstance.setCommitMetadata(commitMetadata);
if (writeStats.isPresent()) {
newMetadataInstance.setWriteStats(writeStats.get());
}
if (indexUpdateDuration.isPresent()) {
newMetadataInstance.setIndexUpdateDuration(indexUpdateDuration.get());
}if (finalizeDuration.isPresent()) {
newMetadataInstance.setFinalizeDuration(finalizeDuration.get());
}if (partitionToReplaceFileIds.isPresent()) {
newMetadataInstance.setPartitionToReplaceFileIds(partitionToReplaceFileIds.get());
}
return newMetadataInstance;
}
| 3.26 |
hudi_HoodieRowCreateHandle_close_rdh
|
/**
* Closes the {@link HoodieRowCreateHandle} and returns an instance of {@link WriteStatus} containing the stats and
* status of the writes to this handle.
*
* @return the {@link WriteStatus} containing the stats and status of the writes to this handle.
*/
public WriteStatus close() throws IOException {
fileWriter.close();
HoodieWriteStat stat = writeStatus.getStat();
stat.setPartitionPath(partitionPath);
stat.setNumWrites(writeStatus.getTotalRecords());
stat.setNumDeletes(0);
stat.setNumInserts(writeStatus.getTotalRecords());
stat.setPrevCommit(HoodieWriteStat.NULL_COMMIT);
stat.setFileId(fileId);
stat.setPath(new Path(writeConfig.getBasePath()), path);
long fileSizeInBytes = FSUtils.getFileSize(table.getMetaClient().getFs(), path);
stat.setTotalWriteBytes(fileSizeInBytes);
stat.setFileSizeInBytes(fileSizeInBytes);
stat.setTotalWriteErrors(writeStatus.getTotalErrorRecords());
for (Pair<HoodieRecordDelegate, Throwable> pair : writeStatus.getFailedRecords()) {
LOG.error("Failed to write {}", pair.getLeft(), pair.getRight());
}
HoodieWriteStat.RuntimeStats runtimeStats = new HoodieWriteStat.RuntimeStats();
runtimeStats.setTotalCreateTime(currTimer.endTimer());
stat.setRuntimeStats(runtimeStats);
return writeStatus;
}
| 3.26 |
hudi_HoodieRowCreateHandle_canWrite_rdh
|
/**
* Returns {@code true} if this handle can take in more writes. else {@code false}.
*/
public boolean canWrite() {
return fileWriter.canWrite();
}
| 3.26 |
hudi_HoodieRowCreateHandle_createMarkerFile_rdh
|
/**
* Creates an empty marker file corresponding to storage writer path.
*
* @param partitionPath
* Partition path
*/
private static void createMarkerFile(String partitionPath,
String dataFileName, String instantTime, HoodieTable<?, ?, ?, ?> table, HoodieWriteConfig writeConfig) {
WriteMarkersFactory.get(writeConfig.getMarkersType(), table, instantTime).create(partitionPath, dataFileName, IOType.CREATE);
}
| 3.26 |
hudi_HoodieRowCreateHandle_getWriteToken_rdh
|
// TODO extract to utils
private static String getWriteToken(int taskPartitionId, long taskId, long taskEpochId) {
return (((taskPartitionId + "-") + taskId) + "-") + taskEpochId;
}
| 3.26 |
hudi_HoodieRowCreateHandle_write_rdh
|
/**
* Writes an {@link InternalRow} to the underlying HoodieInternalRowFileWriter. Before writing, value for meta columns are computed as required
* and wrapped in {@link HoodieInternalRow}. {@link HoodieInternalRow} is what gets written to HoodieInternalRowFileWriter.
*
* @param row
* instance of {@link InternalRow} that needs to be written to the fileWriter.
* @throws IOException
*/
public void write(InternalRow row) throws IOException {
if (populateMetaFields) {
writeRow(row);
} else {
writeRowNoMetaFields(row);
}
}
| 3.26 |
hudi_HoodieAppendHandle_appendDataAndDeleteBlocks_rdh
|
/**
* Appends data and delete blocks. When appendDeleteBlocks value is false, only data blocks are appended.
* This is done so that all the data blocks are created first and then a single delete block is added.
* Otherwise what can end up happening is creation of multiple small delete blocks get added after each data block.
*/protected void appendDataAndDeleteBlocks(Map<HeaderMetadataType, String> header, boolean appendDeleteBlocks) {
try {
header.put(HeaderMetadataType.INSTANT_TIME, instantTime);
header.put(HeaderMetadataType.SCHEMA, writeSchemaWithMetaFields.toString());
List<HoodieLogBlock> blocks = new ArrayList<>(2);
if (recordList.size() > 0) {
String keyField = (config.populateMetaFields()) ? HoodieRecord.RECORD_KEY_METADATA_FIELD : hoodieTable.getMetaClient().getTableConfig().getRecordKeyFieldProp();
blocks.add(getBlock(config, pickLogDataBlockFormat(), recordList, shouldWriteRecordPositions, getUpdatedHeader(header, blockSequenceNumber++, attemptNumber, config, addBlockIdentifier()), keyField));
}
if (appendDeleteBlocks && (recordsToDeleteWithPositions.size() > 0)) {
blocks.add(new HoodieDeleteBlock(recordsToDeleteWithPositions, shouldWriteRecordPositions, getUpdatedHeader(header, blockSequenceNumber++, attemptNumber, config, addBlockIdentifier())));
}
if (blocks.size() > 0) {
AppendResult appendResult = writer.appendBlocks(blocks);
processAppendResult(appendResult, recordList);
recordList.clear();
if (appendDeleteBlocks) {
recordsToDeleteWithPositions.clear();
}
}
} catch (Exception e) {
throw new HoodieAppendException("Failed while appending records to " +
writer.getLogFile().getPath(), e);
}
}
| 3.26 |
hudi_HoodieAppendHandle_isUpdateRecord_rdh
|
/**
* Returns whether the hoodie record is an UPDATE.
*/
protected boolean isUpdateRecord(HoodieRecord<T> hoodieRecord) {
// If currentLocation is present, then this is an update
return hoodieRecord.getCurrentLocation() != null;
}
| 3.26 |
hudi_HoodieAppendHandle_flushToDiskIfRequired_rdh
|
/**
* Checks if the number of records have reached the set threshold and then flushes the records to disk.
*/
private void flushToDiskIfRequired(HoodieRecord record, boolean appendDeleteBlocks) {
if ((numberOfRecords >= ((int) (maxBlockSize / averageRecordSize))) || ((numberOfRecords % NUMBER_OF_RECORDS_TO_ESTIMATE_RECORD_SIZE) == 0)) {
averageRecordSize = ((long) ((averageRecordSize * 0.8) + (sizeEstimator.sizeEstimate(record) * 0.2)));
}
// Append if max number of records reached to achieve block size
if (numberOfRecords >= (maxBlockSize / averageRecordSize)) {
// Recompute averageRecordSize before writing a new block and update existing value with
// avg of new and old
LOG.info("Flush log block to disk, the current avgRecordSize => " + averageRecordSize);
// Delete blocks will be appended after appending all the data blocks.
appendDataAndDeleteBlocks(header, appendDeleteBlocks);
estimatedNumberOfBytesWritten += averageRecordSize * numberOfRecords;
numberOfRecords =
0;
}
}
| 3.26 |
hudi_HoodieAppendHandle_needsUpdateLocation_rdh
|
/**
* Whether there is need to update the record location.
*/
protected boolean needsUpdateLocation() {
return true;
}
| 3.26 |
hudi_HoodieAppendHandle_getFileInstant_rdh
|
/**
* Returns the instant time to use in the log file name.
*/
private String getFileInstant(HoodieRecord<?> record) {
if (config.isConsistentHashingEnabled()) {
// Handle log file only case. This is necessary for the concurrent clustering and writer case (e.g., consistent hashing bucket index).
// NOTE: flink engine use instantTime to mark operation type, check BaseFlinkCommitActionExecutor::execute
String taggedInstant = HoodieRecordUtils.getCurrentLocationInstant(record);
if (HoodieInstantTimeGenerator.isValidInstantTime(taggedInstant)
&& (!instantTime.equals(taggedInstant))) {
// the tagged instant is the pending clustering instant, use this instant in the file name so that
// the dual-write file is shadowed to the reader view.
return taggedInstant;
}
}
return instantTime;
}
| 3.26 |
hudi_HoodieSyncClient_getDroppedPartitionsSince_rdh
|
/**
* Get the set of dropped partitions since the last synced commit.
* If last sync time is not known then consider only active timeline.
* Going through archive timeline is a costly operation, and it should be avoided unless some start time is given.
*/
public Set<String> getDroppedPartitionsSince(Option<String> lastCommitTimeSynced, Option<String> lastCommitCompletionTimeSynced) {
HoodieTimeline timeline = (lastCommitTimeSynced.isPresent()) ? TimelineUtils.getCommitsTimelineAfter(metaClient, lastCommitTimeSynced.get(), lastCommitCompletionTimeSynced) : metaClient.getActiveTimeline();
return new HashSet<>(TimelineUtils.getDroppedPartitions(timeline));
}
| 3.26 |
hudi_HoodieSyncClient_getPartitionEvents_rdh
|
/**
* Iterate over the storage partitions and find if there are any new partitions that need to be added or updated.
* Generate a list of PartitionEvent based on the changes required.
*/
public List<PartitionEvent> getPartitionEvents(List<Partition> partitionsInMetastore, List<String> writtenPartitionsOnStorage, Set<String> droppedPartitionsOnStorage) {
Map<String, String> paths = getPartitionValuesToPathMapping(partitionsInMetastore);
List<PartitionEvent> events = new ArrayList<>();
for (String storagePartition : writtenPartitionsOnStorage) {
Path storagePartitionPath = FSUtils.getPartitionPath(config.getString(META_SYNC_BASE_PATH), storagePartition);
String fullStoragePartitionPath = Path.getPathWithoutSchemeAndAuthority(storagePartitionPath).toUri().getPath();
// Check if the partition values or if hdfs path is the same
List<String> storagePartitionValues = partitionValueExtractor.extractPartitionValuesInPath(storagePartition);
if (droppedPartitionsOnStorage.contains(storagePartition)) {
events.add(PartitionEvent.newPartitionDropEvent(storagePartition));
} else if (!storagePartitionValues.isEmpty()) {String storageValue = String.join(", ", storagePartitionValues);
if (!paths.containsKey(storageValue)) {
events.add(PartitionEvent.newPartitionAddEvent(storagePartition));
} else if (!paths.get(storageValue).equals(fullStoragePartitionPath)) {
events.add(PartitionEvent.newPartitionUpdateEvent(storagePartition));
}
}
}
return
events;
}
| 3.26 |
hudi_HoodieSyncClient_getAllPartitionPathsOnStorage_rdh
|
/**
* Gets all relative partitions paths in the Hudi table on storage.
*
* @return All relative partitions paths.
*/
public List<String> getAllPartitionPathsOnStorage() {
HoodieLocalEngineContext engineContext = new HoodieLocalEngineContext(metaClient.getHadoopConf());
return FSUtils.getAllPartitionPaths(engineContext, config.getString(META_SYNC_BASE_PATH), config.getBoolean(META_SYNC_USE_FILE_LISTING_FROM_METADATA));}
| 3.26 |
hudi_HoodieFunctionalIndexMetadata_fromJson_rdh
|
/**
* Deserialize from JSON string to create an instance of this class.
*
* @param json
* Input JSON string.
* @return Deserialized instance of HoodieFunctionalIndexMetadata.
* @throws IOException
* If any deserialization errors occur.
*/
public static HoodieFunctionalIndexMetadata fromJson(String json) throws IOException {
if ((json == null) || json.isEmpty()) {
return new HoodieFunctionalIndexMetadata();
}
return JsonUtils.getObjectMapper().readValue(json, HoodieFunctionalIndexMetadata.class);
}
| 3.26 |
hudi_HoodieFunctionalIndexMetadata_toJson_rdh
|
/**
* Serialize this object to JSON string.
*
* @return Serialized JSON string.
* @throws JsonProcessingException
* If any serialization errors occur.
*/
public String toJson() throws JsonProcessingException {
if (f0.containsKey(null)) {
LOG.info("null index name for the index definition " + f0.get(null));f0.remove(null);
}
return JsonUtils.getObjectMapper().writerWithDefaultPrettyPrinter().writeValueAsString(this);
}
| 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.