name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hudi_StreamWriteFunction_preWrite_rdh
|
/**
* Sets up before flush: patch up the first record with correct partition path and fileID.
*
* <p>Note: the method may modify the given records {@code records}.
*/
public void preWrite(List<HoodieRecord> records) {
// rewrite the first record with expected fileID
HoodieRecord<?> first = records.get(0);
HoodieRecord<?> record = new HoodieAvroRecord<>(first.getKey(), ((HoodieRecordPayload) (first.getData())), first.getOperation());
HoodieRecordLocation newLoc = new HoodieRecordLocation(first.getCurrentLocation().getInstantTime(), fileID);
record.setCurrentLocation(newLoc);
records.set(0, record);
}
| 3.26 |
hudi_ClusteringUtil_m0_rdh
|
/**
* Schedules clustering plan by condition.
*
* @param conf
* The configuration
* @param writeClient
* The write client
* @param committed
* Whether the instant was committed
*/
public static void m0(Configuration conf, HoodieFlinkWriteClient writeClient, boolean committed) {
validateClusteringScheduling(conf);
if (committed) {
writeClient.scheduleClustering(Option.empty());
}
}
| 3.26 |
hudi_ClusteringUtil_rollbackClustering_rdh
|
/**
* Force rolls back the inflight clustering instant, for handling failure case.
*
* @param table
* The hoodie table
* @param writeClient
* The write client
* @param instantTime
* The instant time
*/
public static void rollbackClustering(HoodieFlinkTable<?> table, HoodieFlinkWriteClient<?> writeClient, String
instantTime) {
HoodieInstant inflightInstant = HoodieTimeline.getReplaceCommitInflightInstant(instantTime);
if (table.getMetaClient().reloadActiveTimeline().filterPendingReplaceTimeline().containsInstant(inflightInstant)) {
LOG.warn(("Rollback failed clustering instant: [" + instantTime) + "]");
table.rollbackInflightClustering(inflightInstant, commitToRollback -> writeClient.getTableServiceClient().getPendingRollbackInfo(table.getMetaClient(), commitToRollback, false));
}
}
| 3.26 |
hudi_ClusteringUtil_m1_rdh
|
/**
* Returns whether the given instant {@code instant} is with clustering operation.
*/
public static boolean m1(HoodieInstant instant, HoodieTimeline timeline) {
if (!instant.getAction().equals(HoodieTimeline.REPLACE_COMMIT_ACTION)) {
return false;
}try {
return TimelineUtils.getCommitMetadata(instant, timeline).getOperationType().equals(WriteOperationType.CLUSTER);
} catch (IOException e) {
throw new HoodieException("Resolve replace commit metadata error for instant: " + instant, e);
}}
| 3.26 |
hudi_HoodieSimpleIndex_fetchRecordLocationsForAffectedPartitions_rdh
|
/**
* Fetch record locations for passed in {@link HoodieKey}s.
*
* @param hoodieKeys
* {@link HoodieData} of {@link HoodieKey}s for which locations are fetched
* @param context
* instance of {@link HoodieEngineContext} to use
* @param hoodieTable
* instance of {@link HoodieTable} of interest
* @param parallelism
* parallelism to use
* @return {@link HoodiePairData} of {@link HoodieKey} and {@link HoodieRecordLocation}
*/
protected HoodiePairData<HoodieKey, HoodieRecordLocation> fetchRecordLocationsForAffectedPartitions(HoodieData<HoodieKey> hoodieKeys, HoodieEngineContext context, HoodieTable hoodieTable, int parallelism) {
List<String> affectedPartitionPathList = hoodieKeys.map(HoodieKey::getPartitionPath).distinct().collectAsList();
List<Pair<String, HoodieBaseFile>> latestBaseFiles = getLatestBaseFilesForAllPartitions(affectedPartitionPathList, context, hoodieTable);
return fetchRecordLocations(context, hoodieTable,
parallelism, latestBaseFiles);
}
| 3.26 |
hudi_HoodieSimpleIndex_tagLocationInternal_rdh
|
/**
* Tags records location for incoming records.
*
* @param inputRecords
* {@link HoodieData} of incoming records
* @param context
* instance of {@link HoodieEngineContext} to use
* @param hoodieTable
* instance of {@link HoodieTable} to use
* @return {@link HoodieData} of records with record locations set
*/
protected <R> HoodieData<HoodieRecord<R>> tagLocationInternal(HoodieData<HoodieRecord<R>> inputRecords, HoodieEngineContext context, HoodieTable hoodieTable) {
if (config.getSimpleIndexUseCaching()) {
inputRecords.persist(new HoodieConfig(config.getProps()).getString(HoodieIndexConfig.SIMPLE_INDEX_INPUT_STORAGE_LEVEL_VALUE));
}
int inputParallelism = inputRecords.getNumPartitions();
int configuredSimpleIndexParallelism = config.getSimpleIndexParallelism();
// NOTE: Target parallelism could be overridden by the config
int targetParallelism = (configuredSimpleIndexParallelism > 0) ? configuredSimpleIndexParallelism : inputParallelism;HoodiePairData<HoodieKey, HoodieRecord<R>> keyedInputRecords = inputRecords.mapToPair(record -> new ImmutablePair<>(record.getKey(), record));
HoodiePairData<HoodieKey, HoodieRecordLocation> existingLocationsOnTable = fetchRecordLocationsForAffectedPartitions(keyedInputRecords.keys(), context, hoodieTable, targetParallelism);
HoodieData<HoodieRecord<R>> taggedRecords = keyedInputRecords.leftOuterJoin(existingLocationsOnTable).map(entry -> {
final HoodieRecord<R> untaggedRecord = entry.getRight().getLeft();
final Option<HoodieRecordLocation> location = Option.ofNullable(entry.getRight().getRight().orElse(null));
return tagAsNewRecordIfNeeded(untaggedRecord, location);
});
if (config.getSimpleIndexUseCaching()) {
inputRecords.unpersist();
}
return taggedRecords;
}
| 3.26 |
hudi_RequestHandler_registerTimelineAPI_rdh
|
/**
* Register Timeline API calls.
*/
private void registerTimelineAPI() {
app.get(RemoteHoodieTableFileSystemView.LAST_INSTANT, new ViewHandler(ctx -> {
metricsRegistry.add("LAST_INSTANT", 1);
List<InstantDTO> dtos = instantHandler.getLastInstant(ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.BASEPATH_PARAM, String.class).get());
writeValueAsString(ctx, dtos);
}, false));
app.get(RemoteHoodieTableFileSystemView.TIMELINE, new ViewHandler(ctx -> {metricsRegistry.add("TIMELINE", 1);
TimelineDTO v15 = instantHandler.getTimeline(ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.BASEPATH_PARAM, String.class).get());
writeValueAsString(ctx, v15);
}, false));
}
| 3.26 |
hudi_RequestHandler_shouldThrowExceptionIfLocalViewBehind_rdh
|
/**
* Determine whether to throw an exception when local view of table's timeline is behind that of client's view.
*/
private boolean shouldThrowExceptionIfLocalViewBehind(HoodieTimeline localTimeline, String timelineHashFromClient) {Option<HoodieInstant> lastInstant = localTimeline.lastInstant();
// When performing async clean, we may have one more .clean.completed after lastInstantTs.
// In this case, we do not need to throw an exception.
return ((!lastInstant.isPresent()) || (!lastInstant.get().getAction().equals(HoodieTimeline.CLEAN_ACTION))) || (!localTimeline.findInstantsBefore(lastInstant.get().getTimestamp()).getTimelineHash().equals(timelineHashFromClient));
}
| 3.26 |
hudi_RequestHandler_registerDataFilesAPI_rdh
|
/**
* Register Data-Files API calls.
*/
private void registerDataFilesAPI() {
app.get(RemoteHoodieTableFileSystemView.LATEST_PARTITION_DATA_FILES_URL, new ViewHandler(ctx -> {
metricsRegistry.add("LATEST_PARTITION_DATA_FILES", 1);
List<BaseFileDTO> dtos = dataFileHandler.getLatestDataFiles(ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.BASEPATH_PARAM, String.class).getOrThrow(e -> new HoodieException("Basepath is invalid")), ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.PARTITION_PARAM, String.class).getOrDefault(""));
writeValueAsString(ctx, dtos);
}, true));
app.get(RemoteHoodieTableFileSystemView.LATEST_PARTITION_DATA_FILE_URL, new ViewHandler(ctx -> {
metricsRegistry.add("LATEST_PARTITION_DATA_FILE", 1);
List<BaseFileDTO> dtos = dataFileHandler.getLatestDataFile(ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.BASEPATH_PARAM, String.class).getOrThrow(e -> new HoodieException("Basepath is invalid")), ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.PARTITION_PARAM, String.class).getOrDefault(""), ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.FILEID_PARAM, String.class).getOrThrow(e -> new HoodieException("FILEID is invalid")));
writeValueAsString(ctx, dtos);}, true));
app.get(RemoteHoodieTableFileSystemView.LATEST_ALL_DATA_FILES, new ViewHandler(ctx -> {
metricsRegistry.add("LATEST_ALL_DATA_FILES", 1);
List<BaseFileDTO> dtos = dataFileHandler.getLatestDataFiles(ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.BASEPATH_PARAM, String.class).getOrThrow(e -> new
HoodieException("Basepath is invalid")));
writeValueAsString(ctx, dtos);
}, true));
app.get(RemoteHoodieTableFileSystemView.LATEST_DATA_FILES_BEFORE_ON_INSTANT_URL, new ViewHandler(ctx -> {
metricsRegistry.add("LATEST_DATA_FILES_BEFORE_ON_INSTANT", 1);
List<BaseFileDTO> dtos = dataFileHandler.getLatestDataFilesBeforeOrOn(ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.BASEPATH_PARAM, String.class).getOrThrow(e -> new HoodieException("Basepath is invalid")), ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.PARTITION_PARAM, String.class).getOrDefault(""), ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.MAX_INSTANT_PARAM, String.class).getOrThrow(e -> new HoodieException("MAX_INSTANT_PARAM is invalid")));
writeValueAsString(ctx, dtos);
}, true));
app.get(RemoteHoodieTableFileSystemView.ALL_LATEST_BASE_FILES_BEFORE_ON_INSTANT_URL, new ViewHandler(ctx -> {
metricsRegistry.add("ALL_LATEST_BASE_FILES_BEFORE_ON_INSTANT", 1);
Map<String, List<BaseFileDTO>> dtos = dataFileHandler.getAllLatestDataFilesBeforeOrOn(ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.BASEPATH_PARAM, String.class).getOrThrow(e -> new HoodieException("Basepath is invalid")), ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.MAX_INSTANT_PARAM, String.class).getOrThrow(e -> new HoodieException("MAX_INSTANT_PARAM is invalid")));
writeValueAsString(ctx, dtos);
}, true));
app.get(RemoteHoodieTableFileSystemView.LATEST_DATA_FILE_ON_INSTANT_URL, new ViewHandler(ctx -> {
metricsRegistry.add("LATEST_DATA_FILE_ON_INSTANT", 1);
List<BaseFileDTO> dtos = dataFileHandler.getLatestDataFileOn(ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.BASEPATH_PARAM, String.class).getOrThrow(e -> new HoodieException("Basepath is invalid")), ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.PARTITION_PARAM, String.class).getOrDefault(""), ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.INSTANT_PARAM, String.class).get(), ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.FILEID_PARAM, String.class).getOrThrow(e -> new HoodieException("FILEID is invalid")));
writeValueAsString(ctx, dtos);
}, true));
app.get(RemoteHoodieTableFileSystemView.ALL_DATA_FILES, new ViewHandler(ctx -> {
metricsRegistry.add("ALL_DATA_FILES", 1);List<BaseFileDTO> dtos = dataFileHandler.getAllDataFiles(ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.BASEPATH_PARAM, String.class).getOrThrow(e -> new HoodieException("Basepath is invalid")), ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.PARTITION_PARAM, String.class).getOrDefault(""));
writeValueAsString(ctx, dtos);
}, true));
app.get(RemoteHoodieTableFileSystemView.LATEST_DATA_FILES_RANGE_INSTANT_URL, new ViewHandler(ctx -> {metricsRegistry.add("LATEST_DATA_FILES_RANGE_INSTANT", 1);
List<BaseFileDTO> dtos = dataFileHandler.getLatestDataFilesInRange(ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.BASEPATH_PARAM, String.class).getOrThrow(e -> new HoodieException("Basepath is invalid")), Arrays.asList(ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.INSTANTS_PARAM, String.class).getOrThrow(e -> new HoodieException("INSTANTS_PARAM is invalid")).split(",")));
writeValueAsString(ctx, dtos);
}, true));
}
| 3.26 |
hudi_RequestHandler_isLocalViewBehind_rdh
|
/**
* Determines if local view of table's timeline is behind that of client's view.
*/
private boolean isLocalViewBehind(Context ctx) {
String basePath = ctx.queryParam(RemoteHoodieTableFileSystemView.BASEPATH_PARAM);
String v5 = ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.LAST_INSTANT_TS, String.class).getOrDefault(HoodieTimeline.INVALID_INSTANT_TS);
String timelineHashFromClient = ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.TIMELINE_HASH, String.class).getOrDefault("");
HoodieTimeline
localTimeline =
viewManager.getFileSystemView(basePath).getTimeline().filterCompletedOrMajorOrMinorCompactionInstants();
if (LOG.isDebugEnabled()) {
LOG.debug((((("Client [ LastTs=" + v5) + ", TimelineHash=") + timelineHashFromClient) + "], localTimeline=") + localTimeline.getInstants());
}
if ((!localTimeline.getInstantsAsStream().findAny().isPresent()) && HoodieTimeline.INVALID_INSTANT_TS.equals(v5))
{
return false;
}
String localTimelineHash = localTimeline.getTimelineHash();
// refresh if timeline hash mismatches
if
(!localTimelineHash.equals(timelineHashFromClient)) {
return true;
}
// As a safety check, even if hash is same, ensure instant is present
return !localTimeline.containsOrBeforeTimelineStarts(v5);
}
| 3.26 |
hudi_RequestHandler_registerFileSlicesAPI_rdh
|
/**
* Register File Slices API calls.
*/
private void registerFileSlicesAPI() {
app.get(RemoteHoodieTableFileSystemView.LATEST_PARTITION_SLICES_URL, new ViewHandler(ctx -> {
metricsRegistry.add("LATEST_PARTITION_SLICES", 1);
List<FileSliceDTO> dtos = sliceHandler.getLatestFileSlices(ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.BASEPATH_PARAM, String.class).getOrThrow(e -> new HoodieException("Basepath is invalid")), ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.PARTITION_PARAM, String.class).getOrDefault(""));
writeValueAsString(ctx, dtos);
}, true));
app.get(RemoteHoodieTableFileSystemView.LATEST_PARTITION_SLICES_STATELESS_URL, new
ViewHandler(ctx -> {
metricsRegistry.add("LATEST_PARTITION_SLICES_STATELESS", 1);
List<FileSliceDTO> dtos = sliceHandler.getLatestFileSlicesStateless(ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.BASEPATH_PARAM,
String.class).getOrThrow(e -> new HoodieException("Basepath is invalid")), ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.PARTITION_PARAM, String.class).getOrDefault(""));
writeValueAsString(ctx, dtos);
}, true));
app.get(RemoteHoodieTableFileSystemView.LATEST_PARTITION_SLICE_URL, new ViewHandler(ctx -> {
metricsRegistry.add("LATEST_PARTITION_SLICE", 1);
List<FileSliceDTO> dtos = sliceHandler.getLatestFileSlice(ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.BASEPATH_PARAM, String.class).getOrThrow(e -> new HoodieException("Basepath is invalid")), ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.PARTITION_PARAM, String.class).getOrDefault(""), ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.FILEID_PARAM, String.class).getOrThrow(e -> new HoodieException("FILEID is invalid")));
writeValueAsString(ctx, dtos);
}, true));
app.get(RemoteHoodieTableFileSystemView.LATEST_PARTITION_UNCOMPACTED_SLICES_URL, new ViewHandler(ctx -> {
metricsRegistry.add("LATEST_PARTITION_UNCOMPACTED_SLICES", 1);
List<FileSliceDTO> dtos = sliceHandler.getLatestUnCompactedFileSlices(ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.BASEPATH_PARAM, String.class).getOrThrow(e -> new HoodieException("Basepath is invalid")), ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.PARTITION_PARAM, String.class).getOrDefault(""));
writeValueAsString(ctx, dtos);
}, true));
app.get(RemoteHoodieTableFileSystemView.ALL_SLICES_URL, new ViewHandler(ctx -> {
metricsRegistry.add("ALL_SLICES", 1);
List<FileSliceDTO> dtos = sliceHandler.getAllFileSlices(ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.BASEPATH_PARAM, String.class).getOrThrow(e -> new HoodieException("Basepath is invalid")), ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.PARTITION_PARAM, String.class).getOrDefault(""));
writeValueAsString(ctx, dtos);
}, true));
app.get(RemoteHoodieTableFileSystemView.LATEST_SLICES_RANGE_INSTANT_URL, new ViewHandler(ctx -> {metricsRegistry.add("LATEST_SLICE_RANGE_INSTANT", 1);
List<FileSliceDTO> dtos = sliceHandler.getLatestFileSliceInRange(ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.BASEPATH_PARAM, String.class).getOrThrow(e -> new HoodieException("Basepath is invalid")), Arrays.asList(ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.INSTANTS_PARAM, String.class).getOrThrow(e -> new HoodieException("INSTANTS_PARAM is invalid")).split(",")));
writeValueAsString(ctx, dtos);
}, true));
app.get(RemoteHoodieTableFileSystemView.LATEST_SLICES_MERGED_BEFORE_ON_INSTANT_URL, new ViewHandler(ctx -> {
metricsRegistry.add("LATEST_SLICES_MERGED_BEFORE_ON_INSTANT", 1);
List<FileSliceDTO> dtos = sliceHandler.getLatestMergedFileSlicesBeforeOrOn(ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.BASEPATH_PARAM, String.class).getOrThrow(e -> new HoodieException("Basepath is invalid")), ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.PARTITION_PARAM, String.class).getOrDefault(""), ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.MAX_INSTANT_PARAM, String.class).getOrThrow(e -> new HoodieException("MAX_INSTANT_PARAM is invalid")));
writeValueAsString(ctx, dtos);
}, true));
app.get(RemoteHoodieTableFileSystemView.LATEST_SLICES_BEFORE_ON_INSTANT_URL, new ViewHandler(ctx -> {metricsRegistry.add("LATEST_SLICES_BEFORE_ON_INSTANT", 1);
List<FileSliceDTO> dtos = sliceHandler.getLatestFileSlicesBeforeOrOn(ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.BASEPATH_PARAM, String.class).getOrThrow(e -> new HoodieException("Basepath is invalid")), ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.PARTITION_PARAM, String.class).getOrDefault(""), ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.MAX_INSTANT_PARAM, String.class).getOrThrow(e -> new HoodieException("MAX_INSTANT_PARAM is invalid")), Boolean.parseBoolean(ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.INCLUDE_FILES_IN_PENDING_COMPACTION_PARAM, String.class).getOrThrow(e -> new HoodieException("INCLUDE_FILES_IN_PENDING_COMPACTION_PARAM is invalid"))));
writeValueAsString(ctx, dtos);
}, true));
app.get(RemoteHoodieTableFileSystemView.ALL_LATEST_SLICES_BEFORE_ON_INSTANT_URL, new ViewHandler(ctx -> {
metricsRegistry.add("ALL_LATEST_SLICES_BEFORE_ON_INSTANT", 1);
Map<String, List<FileSliceDTO>> dtos = sliceHandler.getAllLatestFileSlicesBeforeOrOn(ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.BASEPATH_PARAM, String.class).getOrThrow(e -> new HoodieException("Basepath is invalid")), ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.MAX_INSTANT_PARAM, String.class).getOrThrow(e -> new HoodieException("MAX_INSTANT_PARAM is invalid")));
writeValueAsString(ctx, dtos);
},
true));
app.get(RemoteHoodieTableFileSystemView.PENDING_COMPACTION_OPS, new ViewHandler(ctx -> {
metricsRegistry.add("PEDING_COMPACTION_OPS", 1);
List<CompactionOpDTO> dtos = sliceHandler.getPendingCompactionOperations(ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.BASEPATH_PARAM, String.class).getOrThrow(e -> new HoodieException("Basepath is invalid")));
writeValueAsString(ctx, dtos);
}, true));
app.get(RemoteHoodieTableFileSystemView.PENDING_LOG_COMPACTION_OPS, new ViewHandler(ctx -> {
metricsRegistry.add("PEDING_LOG_COMPACTION_OPS", 1);
List<CompactionOpDTO> dtos = sliceHandler.getPendingLogCompactionOperations(ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.BASEPATH_PARAM, String.class).getOrThrow(e -> new HoodieException("Basepath is invalid")));
writeValueAsString(ctx, dtos);
}, true));
app.get(RemoteHoodieTableFileSystemView.ALL_FILEGROUPS_FOR_PARTITION_URL, new ViewHandler(ctx -> {
metricsRegistry.add("ALL_FILEGROUPS_FOR_PARTITION", 1);List<FileGroupDTO> dtos = sliceHandler.getAllFileGroups(ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.BASEPATH_PARAM, String.class).getOrThrow(e -> new HoodieException("Basepath is invalid")), ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.PARTITION_PARAM, String.class).getOrDefault(""));
writeValueAsString(ctx, dtos);
}, true));
app.get(RemoteHoodieTableFileSystemView.ALL_FILEGROUPS_FOR_PARTITION_STATELESS_URL, new ViewHandler(ctx -> {
metricsRegistry.add("ALL_FILEGROUPS_FOR_PARTITION_STATELESS", 1);
List<FileGroupDTO> dtos = sliceHandler.getAllFileGroupsStateless(ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.BASEPATH_PARAM, String.class).getOrThrow(e -> new HoodieException("Basepath is invalid")), ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.PARTITION_PARAM, String.class).getOrDefault(""));
writeValueAsString(ctx, dtos);
}, true));
app.post(RemoteHoodieTableFileSystemView.REFRESH_TABLE, new ViewHandler(ctx -> {metricsRegistry.add("REFRESH_TABLE", 1);
boolean success
= sliceHandler.refreshTable(ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.BASEPATH_PARAM, String.class).getOrThrow(e -> new HoodieException("Basepath is invalid")));
writeValueAsString(ctx, success);
}, false));
app.post(RemoteHoodieTableFileSystemView.LOAD_ALL_PARTITIONS_URL, new ViewHandler(ctx -> {
metricsRegistry.add("LOAD_ALL_PARTITIONS", 1);
boolean v38 = sliceHandler.loadAllPartitions(ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.BASEPATH_PARAM, String.class).getOrThrow(e -> new HoodieException("Basepath is invalid")));
writeValueAsString(ctx, v38);
}, false));
app.get(RemoteHoodieTableFileSystemView.ALL_REPLACED_FILEGROUPS_BEFORE_OR_ON, new ViewHandler(ctx -> {
metricsRegistry.add("ALL_REPLACED_FILEGROUPS_BEFORE_OR_ON", 1);
List<FileGroupDTO> dtos = sliceHandler.getReplacedFileGroupsBeforeOrOn(ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.BASEPATH_PARAM, String.class).getOrThrow(e -> new HoodieException("Basepath is invalid")), ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.MAX_INSTANT_PARAM, String.class).getOrDefault(""), ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.PARTITION_PARAM, String.class).getOrDefault(""));
writeValueAsString(ctx,
dtos);}, true));
app.get(RemoteHoodieTableFileSystemView.ALL_REPLACED_FILEGROUPS_BEFORE, new ViewHandler(ctx -> {
metricsRegistry.add("ALL_REPLACED_FILEGROUPS_BEFORE", 1);
List<FileGroupDTO> dtos = sliceHandler.getReplacedFileGroupsBefore(ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.BASEPATH_PARAM, String.class).getOrThrow(e ->
new HoodieException("Basepath is invalid")), ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.MAX_INSTANT_PARAM, String.class).getOrDefault(""), ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.PARTITION_PARAM, String.class).getOrDefault(""));
writeValueAsString(ctx, dtos);
}, true));
app.get(RemoteHoodieTableFileSystemView.ALL_REPLACED_FILEGROUPS_AFTER_OR_ON, new ViewHandler(ctx -> {
metricsRegistry.add("ALL_REPLACED_FILEGROUPS_AFTER_OR_ON", 1);
List<FileGroupDTO> dtos =
sliceHandler.getReplacedFileGroupsAfterOrOn(ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.BASEPATH_PARAM, String.class).getOrThrow(e -> new HoodieException("Basepath is invalid")), ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.MIN_INSTANT_PARAM, String.class).getOrDefault(""), ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.PARTITION_PARAM, String.class).getOrDefault(""));
writeValueAsString(ctx, dtos);}, true));
app.get(RemoteHoodieTableFileSystemView.ALL_REPLACED_FILEGROUPS_PARTITION, new ViewHandler(ctx -> {
metricsRegistry.add("ALL_REPLACED_FILEGROUPS_PARTITION", 1);
List<FileGroupDTO> dtos = sliceHandler.getAllReplacedFileGroups(ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.BASEPATH_PARAM, String.class).getOrThrow(e -> new HoodieException("Basepath is invalid")), ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.PARTITION_PARAM, String.class).getOrDefault(""));
writeValueAsString(ctx, dtos);
}, true));
app.get(RemoteHoodieTableFileSystemView.PENDING_CLUSTERING_FILEGROUPS, new ViewHandler(ctx -> {
metricsRegistry.add("PENDING_CLUSTERING_FILEGROUPS", 1);
List<ClusteringOpDTO> dtos = sliceHandler.getFileGroupsInPendingClustering(ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.BASEPATH_PARAM, String.class).getOrThrow(e -> new HoodieException("Basepath is invalid")));
writeValueAsString(ctx, dtos);
}, true));
}
| 3.26 |
hudi_RequestHandler_jsonifyResult_rdh
|
/**
* Serializes the result into JSON String.
*
* @param ctx
* Javalin context
* @param obj
* object to serialize
* @param metricsRegistry
* {@code Registry} instance for storing metrics
* @param objectMapper
* JSON object mapper
* @param logger
* {@code Logger} instance
* @return JSON String from the input object
* @throws JsonProcessingException
*/
public static String jsonifyResult(Context ctx, Object obj, Registry metricsRegistry, ObjectMapper objectMapper, Logger logger) throws JsonProcessingException {
HoodieTimer timer = HoodieTimer.start();
boolean prettyPrint = ctx.queryParam("pretty") != null;
String v2 = (prettyPrint) ? objectMapper.writerWithDefaultPrettyPrinter().writeValueAsString(obj) : objectMapper.writeValueAsString(obj);
final long jsonifyTime = timer.endTimer();
metricsRegistry.add("WRITE_VALUE_CNT", 1);
metricsRegistry.add("WRITE_VALUE_TIME", jsonifyTime);
if (logger.isDebugEnabled()) {
logger.debug("Jsonify TimeTaken=" + jsonifyTime);
}
return v2;
}
| 3.26 |
hudi_RequestHandler_syncIfLocalViewBehind_rdh
|
/**
* Syncs data-set view if local view is behind.
*/
private boolean syncIfLocalViewBehind(Context ctx) {
String basePath = ctx.queryParam(RemoteHoodieTableFileSystemView.BASEPATH_PARAM);
SyncableFileSystemView view = viewManager.getFileSystemView(basePath);
synchronized(view) {
if (isLocalViewBehind(ctx)) {String lastKnownInstantFromClient = ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.LAST_INSTANT_TS, String.class).getOrDefault(HoodieTimeline.INVALID_INSTANT_TS);
HoodieTimeline localTimeline = viewManager.getFileSystemView(basePath).getTimeline();
LOG.info((("Syncing view as client passed last known instant " + lastKnownInstantFromClient) + " as last known instant but server has the following last instant on timeline :") + localTimeline.lastInstant());
view.sync();
return true;
}
}
return false;
}
| 3.26 |
hudi_GcsEventsSource_processMessages_rdh
|
/**
* Convert Pubsub messages into a batch of GCS file MetadataMsg objects, skipping those that
* don't need to be processed.
*
* @param receivedMessages
* Pubsub messages
* @return A batch of GCS file metadata messages
*/
private MessageBatch processMessages(List<ReceivedMessage> receivedMessages) {
List<String> messages = new ArrayList<>();
for (ReceivedMessage received : receivedMessages) {
MetadataMessage message = new MetadataMessage(received.getMessage());
String msgStr = message.toStringUtf8();
logDetails(message, msgStr);
f0.add(received.getAckId());
MessageValidity messageValidity =
message.shouldBeProcessed();
if (messageValidity.getDecision() == DO_SKIP) {
LOG.info("Skipping message: " + messageValidity.getDescription());
continue;
}
messages.add(msgStr);
}return new MessageBatch(messages);
}
| 3.26 |
hudi_HoodieTableMetaserverClient_getTableType_rdh
|
/**
*
* @return Hoodie Table Type
*/
public HoodieTableType getTableType()
{
return HoodieTableType.valueOf(table.getTableType());
}
| 3.26 |
hudi_HoodieTableMetaserverClient_getActiveTimeline_rdh
|
/**
* Get the active instants as a timeline.
*
* @return Active instants timeline
*/
public synchronized HoodieActiveTimeline getActiveTimeline() {
if (activeTimeline == null) {activeTimeline = new HoodieMetaserverBasedTimeline(this, metaserverConfig);
}
return activeTimeline;}
| 3.26 |
hudi_HoodieTableMetaserverClient_reloadActiveTimeline_rdh
|
/**
* Reload ActiveTimeline.
*
* @return Active instants timeline
*/
public synchronized HoodieActiveTimeline reloadActiveTimeline() {
activeTimeline = new HoodieMetaserverBasedTimeline(this,
metaserverConfig);
return activeTimeline;
}
| 3.26 |
hudi_HoodieKeyLookupHandle_getLookupResult_rdh
|
/**
* Of all the keys, that were added, return a list of keys that were actually found in the file group.
*/
public HoodieKeyLookupResult getLookupResult() {
if (LOG.isDebugEnabled()) {
LOG.debug((("#The candidate row keys for " + partitionPathFileIDPair) + " => ") + candidateRecordKeys);
}
HoodieBaseFile baseFile = getLatestBaseFile();
List<Pair<String, Long>> matchingKeysAndPositions = HoodieIndexUtils.filterKeysFromFile(new Path(baseFile.getPath()), candidateRecordKeys, hoodieTable.getHadoopConf());
LOG.info(String.format("Total records (%d), bloom filter candidates (%d)/fp(%d), actual matches (%d)", totalKeysChecked, candidateRecordKeys.size(), candidateRecordKeys.size() - matchingKeysAndPositions.size(), matchingKeysAndPositions.size()));
return new HoodieKeyLookupResult(partitionPathFileIDPair.getRight(), partitionPathFileIDPair.getLeft(), baseFile.getCommitTime(), matchingKeysAndPositions);
}
| 3.26 |
hudi_HoodieKeyLookupHandle_addKey_rdh
|
/**
* Adds the key for look up.
*/
public void addKey(String recordKey) {
// check record key against bloom filter of current file & add to possible keys if needed
if (bloomFilter.mightContain(recordKey)) {
if (LOG.isDebugEnabled()) {
LOG.debug((("Record key " + recordKey) + " matches bloom filter in ") + partitionPathFileIDPair);
}
candidateRecordKeys.add(recordKey);
}
totalKeysChecked++;
}
| 3.26 |
hudi_WriteMarkersFactory_get_rdh
|
/**
*
* @param markerType
* the type of markers to use
* @param table
* {@code HoodieTable} instance
* @param instantTime
* current instant time
* @return {@code WriteMarkers} instance based on the {@code MarkerType}
*/
public static WriteMarkers get(MarkerType markerType, HoodieTable table, String instantTime) {
LOG.debug("Instantiated MarkerFiles with marker type: " + markerType.toString());
switch (markerType) {
case DIRECT :
return new DirectWriteMarkers(table, instantTime);
case TIMELINE_SERVER_BASED :
if (!table.getConfig().isEmbeddedTimelineServerEnabled()) {
LOG.warn("Timeline-server-based markers are configured as the marker type " + "but embedded timeline server is not enabled. Falling back to direct markers.");
return new DirectWriteMarkers(table, instantTime);
}
String basePath = table.getMetaClient().getBasePath();
if
(StorageSchemes.HDFS.getScheme().equals(FSUtils.getFs(basePath, table.getContext().getHadoopConf().newCopy()).getScheme())) {
LOG.warn((("Timeline-server-based markers are not supported for HDFS: " + "base path ") + basePath) + ". Falling back to direct markers.");
return new DirectWriteMarkers(table, instantTime);
}
return new TimelineServerBasedWriteMarkers(table, instantTime);
default :
throw new HoodieException(("The marker type \"" + markerType.name()) + "\" is not supported.");
}
}
| 3.26 |
hudi_HoodieRealtimeRecordReaderUtils_arrayWritableToString_rdh
|
/**
* Prints a JSON representation of the ArrayWritable for easier debuggability.
*/
public static String arrayWritableToString(ArrayWritable writable) {
if (writable == null) {
return "null";
}
Random random = new
Random(2);
StringBuilder builder = new
StringBuilder();
Writable[] values = writable.get();
builder.append(((("\"values_" + random.nextDouble()) + "_") + values.length) + "\": {");
int
i = 0;
for (Writable w : values) {
if (w instanceof ArrayWritable) {
builder.append(arrayWritableToString(((ArrayWritable) (w)))).append(",");
} else {
builder.append(((("\"value" + i) + "\":\"") + w) + "\"").append(",");
if (w == null) {
builder.append(("\"type" + i) + "\":\"unknown\"").append(",");
} else {
builder.append(((("\"type" + i) + "\":\"") + w.getClass().getSimpleName()) + "\"").append(",");
}
}
i++;
}
builder.deleteCharAt(builder.length() - 1);builder.append("}");
return builder.toString();
}
| 3.26 |
hudi_HoodieRealtimeRecordReaderUtils_avroToArrayWritable_rdh
|
/**
* Convert the projected read from delta record into an array writable.
*/
public static Writable avroToArrayWritable(Object value, Schema schema) {
return avroToArrayWritable(value, schema, false);
}
| 3.26 |
hudi_HoodieRealtimeRecordReaderUtils_generateProjectionSchema_rdh
|
/**
* Generate a reader schema off the provided writeSchema, to just project out the provided columns.
*/
public static Schema generateProjectionSchema(Schema writeSchema, Map<String, Schema.Field> schemaFieldsMap, List<String> fieldNames) {
/**
* Avro & Presto field names seems to be case sensitive (support fields differing only in case) whereas
* Hive/Impala/SparkSQL(default) are case-insensitive. Spark allows this to be configurable using
* spark.sql.caseSensitive=true
*
* For a RT table setup with no delta-files (for a latest file-slice) -> we translate parquet schema to Avro Here
* the field-name case is dependent on parquet schema. Hive (1.x/2.x/CDH) translate column projections to
* lower-cases
*/
List<Schema.Field> projectedFields = new ArrayList<>();
for (String fn : fieldNames) {
Schema.Field v7 = schemaFieldsMap.get(fn.toLowerCase());
if (v7 == null) {
throw new HoodieException(((("Field " + fn) + " not found in log schema. Query cannot proceed! ") + "Derived Schema Fields: ") + new ArrayList<>(schemaFieldsMap.keySet()));
} else {
projectedFields.add(new Schema.Field(v7.name(), v7.schema(), v7.doc(), v7.defaultVal()));
}
}
Schema projectedSchema = Schema.createRecord(writeSchema.getName(), writeSchema.getDoc(), writeSchema.getNamespace(), writeSchema.isError());
projectedSchema.setFields(projectedFields);
return projectedSchema;
}
| 3.26 |
hudi_HoodieRealtimeRecordReaderUtils_m0_rdh
|
/**
* get the max compaction memory in bytes from JobConf.
*/
public static long m0(JobConf jobConf) {
// jobConf.getMemoryForMapTask() returns in MB
return ((long) (Math.ceil(((Double.parseDouble(ConfigUtils.getRawValueWithAltKeys(jobConf, HoodieMemoryConfig.MAX_MEMORY_FRACTION_FOR_COMPACTION).orElse(HoodieMemoryConfig.DEFAULT_MR_COMPACTION_MEMORY_FRACTION)) * jobConf.getMemoryForMapTask()) * 1024) * 1024L)));
}
| 3.26 |
hudi_HoodieRealtimeRecordReaderUtils_addPartitionFields_rdh
|
/**
* Hive implementation of ParquetRecordReader results in partition columns not present in the original parquet file to
* also be part of the projected schema. Hive expects the record reader implementation to return the row in its
* entirety (with un-projected column having null values). As we use writerSchema for this, make sure writer schema
* also includes partition columns
*
* @param schema
* Schema to be changed
*/
public static Schema addPartitionFields(Schema schema, List<String> partitioningFields) {
final Set<String> firstLevelFieldNames = schema.getFields().stream().map(Schema.Field::name).map(String::toLowerCase).collect(Collectors.toSet());
List<String> fieldsToAdd = partitioningFields.stream().map(String::toLowerCase).filter(x -> !firstLevelFieldNames.contains(x)).collect(Collectors.toList());
return appendNullSchemaFields(schema, fieldsToAdd);
}
| 3.26 |
hudi_FileSystemViewCommand_buildFileSystemView_rdh
|
/**
* Build File System View.
*
* @param globRegex
* Path Regex
* @param maxInstant
* Max Instants to be used for displaying file-instants
* @param basefileOnly
* Include only base file view
* @param includeMaxInstant
* Include Max instant
* @param includeInflight
* Include inflight instants
* @param excludeCompaction
* Exclude Compaction instants
* @return * @throws IOException
*/
private HoodieTableFileSystemView buildFileSystemView(String globRegex, String maxInstant, boolean basefileOnly, boolean includeMaxInstant, boolean includeInflight, boolean excludeCompaction) throws IOException {
HoodieTableMetaClient client = HoodieCLI.getTableMetaClient();
HoodieTableMetaClient metaClient = HoodieTableMetaClient.builder().setConf(client.getHadoopConf()).setBasePath(client.getBasePath()).setLoadActiveTimelineOnLoad(true).build();
FileSystem fs = HoodieCLI.fs;
String globPath = String.format("%s/%s/*", client.getBasePath(), globRegex);
List<FileStatus> statuses = FSUtils.getGlobStatusExcludingMetaFolder(fs, new Path(globPath));
Stream<HoodieInstant> instantsStream;
HoodieTimeline timeline;
if (basefileOnly) {
timeline = metaClient.getActiveTimeline().getCommitTimeline();
} else if (excludeCompaction) {
timeline = metaClient.getActiveTimeline().getCommitsTimeline();
} else {
timeline = metaClient.getActiveTimeline().getWriteTimeline();
}
if (!includeInflight) {
timeline =
timeline.filterCompletedInstants();}
instantsStream = timeline.getInstantsAsStream();
if (!maxInstant.isEmpty()) {
final BiPredicate<String, String> predicate;if (includeMaxInstant) {
predicate = HoodieTimeline.GREATER_THAN_OR_EQUALS;
} else {
predicate = HoodieTimeline.GREATER_THAN;
}
instantsStream = instantsStream.filter(is -> predicate.test(maxInstant, is.getTimestamp()));
}HoodieTimeline
filteredTimeline = new HoodieDefaultTimeline(instantsStream, ((Function) (metaClient.getActiveTimeline()::getInstantDetails)));
return new HoodieTableFileSystemView(metaClient, filteredTimeline, statuses.toArray(new FileStatus[0]));
}
| 3.26 |
hudi_CollectionUtils_zipToMap_rdh
|
/**
* Zip two lists into a Map. Will throw Exception if the size is different between these two lists.
*/
public static <K, V> Map<K, V> zipToMap(List<K> keys, List<V> values) {
checkArgument(keys.size() == values.size(), "keys' size must be equal with the values' size");
return IntStream.range(0, keys.size()).boxed().collect(Collectors.toMap(keys::get, values::get));
}
| 3.26 |
hudi_CollectionUtils_elementsEqual_rdh
|
/**
* Determines whether two iterators contain equal elements in the same order. More specifically,
* this method returns {@code true} if {@code iterator1} and {@code iterator2} contain the same
* number of elements and every element of {@code iterator1} is equal to the corresponding element
* of {@code iterator2}.
*
* <p>Note that this will modify the supplied iterators, since they will have been advanced some
* number of elements forward.
*/
public static boolean elementsEqual(Iterator<?> iterator1, Iterator<?> iterator2) {
while (iterator1.hasNext()) {
if (!iterator2.hasNext()) {
return false;
}Object o1 = iterator1.next();
Object o2 = iterator2.next();
if (!Objects.equals(o1, o2)) {
return false;
}
}
return !iterator2.hasNext();
}
| 3.26 |
hudi_CollectionUtils_toStream_rdh
|
/**
* Collects provided {@link Iterator} to a {@link Stream}
*/
public static <T> Stream<T> toStream(Iterator<T> iterator) {
return StreamSupport.stream(Spliterators.spliteratorUnknownSize(iterator, Spliterator.ORDERED), false);
}
| 3.26 |
hudi_CollectionUtils_combine_rdh
|
/**
* Combines provided {@link Map}s into one, returning new instance of {@link HashMap}.
*
* NOTE: That values associated with overlapping keys from the second map, will override
* values from the first one
*/
public static <K, V> HashMap<K, V> combine(Map<K, V> one, Map<K, V>
another, BiFunction<V, V, V> merge) {
HashMap<K, V> combined = new HashMap<>(one.size() +
another.size());
combined.putAll(one);
another.forEach((k, v) -> combined.merge(k, v, merge));
return combined;
}
| 3.26 |
hudi_CollectionUtils_m0_rdh
|
/**
* Combines provided array and an element into a new array
*/
@SuppressWarnings("unchecked")
public static <T> T[] m0(T[] array, T elem) {
T[] combined = ((T[]) (Array.newInstance(array.getClass().getComponentType(), array.length + 1)));
System.arraycopy(array, 0, combined, 0, array.length);
combined[array.length] = elem;
return combined;
}
| 3.26 |
hudi_CollectionUtils_copy_rdh
|
/**
* Makes a copy of provided {@link Properties} object
*/
public static Properties copy(Properties props) {Properties copy = new Properties();
copy.putAll(props);
return copy;
}
| 3.26 |
hudi_CollectionUtils_reduce_rdh
|
/**
* Reduces provided {@link Collection} using provided {@code reducer} applied to
* every element of the collection like following
*
* {@code reduce(reduce(reduce(identity, e1), e2), ...)}
*
* @param c
* target collection to be reduced
* @param identity
* element for reducing to start from
* @param reducer
* actual reducing operator
* @return result of the reduction of the collection using reducing operator
*/
public static <T, U>
U reduce(Collection<T> c, U identity, BiFunction<U, T, U> reducer) {
return c.stream().sequential().reduce(identity, reducer, (a, b) -> {
throw new UnsupportedOperationException();
});
}
| 3.26 |
hudi_CollectionUtils_emptyProps_rdh
|
/**
* Returns an empty {@code Properties} instance. The props instance is a singleton,
* it should not be modified in any case.
*/
public static Properties emptyProps() {
return EMPTY_PROPERTIES;
}
| 3.26 |
hudi_CollectionUtils_tail_rdh
|
/**
* Returns last element of the array of {@code T}
*/
public static <T>
T tail(T[] ts) {
checkArgument(ts.length > 0);
return ts[ts.length - 1];
}
| 3.26 |
hudi_CollectionUtils_diff_rdh
|
/**
* Returns difference b/w {@code one} {@link List} of elements and {@code another}
*
* NOTE: This is less optimal counterpart to {@link #diff(Collection, Collection)}, accepting {@link List}
* as a holding collection to support duplicate elements use-cases
*/
public static <E> List<E> diff(Collection<E> one, Collection<E> another) {
List<E> diff = new ArrayList<>(one);
diff.removeAll(another);
return diff;
}
| 3.26 |
hudi_CollectionUtils_diffSet_rdh
|
/**
* Returns difference b/w {@code one} {@link Collection} of elements and {@code another}
* The elements in collection {@code one} are also duplicated and returned as a {@link Set}.
*/public static <E> Set<E> diffSet(Collection<E> one, Set<E> another) {
Set<E> diff = new HashSet<>(one);
diff.removeAll(another);
return diff;
}
| 3.26 |
hudi_WriteOperationType_m0_rdh
|
/**
* Whether the operation changes the dataset.
*/public
static boolean m0(WriteOperationType operation) {
return (((((((((operation == WriteOperationType.INSERT) || (operation == WriteOperationType.UPSERT)) || (operation == WriteOperationType.UPSERT_PREPPED)) || (operation == WriteOperationType.DELETE)) || (operation == WriteOperationType.DELETE_PREPPED)) || (operation == WriteOperationType.BULK_INSERT)) || (operation == WriteOperationType.DELETE_PARTITION)) || (operation == WriteOperationType.INSERT_OVERWRITE)) || (operation == WriteOperationType.INSERT_OVERWRITE_TABLE)) || (operation == WriteOperationType.BOOTSTRAP);
}
| 3.26 |
hudi_WriteOperationType_fromValue_rdh
|
/**
* Convert string value to WriteOperationType.
*/
public static WriteOperationType fromValue(String value) {
switch (value.toLowerCase(Locale.ROOT)) {
case "insert" :
return INSERT;
case "insert_prepped" :
return INSERT_PREPPED;
case "upsert" :
return UPSERT;
case "upsert_prepped" :
return UPSERT_PREPPED;
case "bulk_insert" :
return BULK_INSERT;
case "bulk_insert_prepped" :
return BULK_INSERT_PREPPED;
case "delete" :
return DELETE;
case "delete_prepped" :
return DELETE_PREPPED;
case "insert_overwrite" :
return INSERT_OVERWRITE;
case "delete_partition" :
return DELETE_PARTITION;
case "insert_overwrite_table" :
return INSERT_OVERWRITE_TABLE;
case "cluster" :
return CLUSTER;
case "compact" :
return COMPACT;
case "index" :
return INDEX;
case "alter_schema" :
return ALTER_SCHEMA;
case "unknown" :
return UNKNOWN;
default
:
throw new HoodieException("Invalid value of Type.");
}
}
| 3.26 |
hudi_WriteOperationType_value_rdh
|
/**
* Getter for value.
*
* @return string form of WriteOperationType
*/
public String value() {
return value;
}
| 3.26 |
hudi_KafkaConnectHdfsProvider_buildCheckpointStr_rdh
|
/**
* Convert map contains max offset of each partition to string.
*
* @param topic
* Topic name
* @param checkpoint
* Map with partition as key and max offset as value
* @return Checkpoint string
*/
private static String buildCheckpointStr(final String
topic, final HashMap<Integer, Integer> checkpoint) {
final StringBuilder checkpointStr = new StringBuilder();
checkpointStr.append(topic);
for (int i = 0; i < checkpoint.size(); ++i) {
checkpointStr.append(",").append(i).append(":").append(checkpoint.get(i));
}
return checkpointStr.toString();
}
| 3.26 |
hudi_KafkaConnectHdfsProvider_listAllFileStatus_rdh
|
/**
* List file status recursively.
*
* @param curPath
* Current Path
* @param filter
* PathFilter
* @return All file status match kafka connect naming convention
* @throws IOException
*/
private ArrayList<FileStatus> listAllFileStatus(Path curPath, KafkaConnectPathFilter filter) throws IOException {
ArrayList<FileStatus> allFileStatus = new ArrayList<>();
FileStatus[] fileStatus =
this.fs.listStatus(curPath);
for (FileStatus status : fileStatus) {
if (status.isDirectory() && filter.acceptDir(status.getPath())) {
allFileStatus.addAll(listAllFileStatus(status.getPath(), filter)); } else if (filter.accept(status.getPath())) {
allFileStatus.add(status);
}
}
return allFileStatus;
}
| 3.26 |
hudi_HoodieRecordUtils_createRecordMerger_rdh
|
/**
* Instantiate a given class with a record merge.
*/
public static HoodieRecordMerger createRecordMerger(String basePath, EngineType engineType, List<String> mergerClassList, String recordMergerStrategy) {
if (mergerClassList.isEmpty() || HoodieTableMetadata.isMetadataTable(basePath)) {
return HoodieAvroRecordMerger.INSTANCE;
} else {
return mergerClassList.stream().map(clazz -> loadRecordMerger(clazz)).filter(Objects::nonNull).filter(merger -> merger.getMergingStrategy().equals(recordMergerStrategy)).filter(merger -> recordTypeCompatibleEngine(merger.getRecordType(), engineType)).findFirst().orElse(HoodieAvroRecordMerger.INSTANCE);
}
}
| 3.26 |
hudi_HoodieRecordUtils_loadRecordMerger_rdh
|
/**
* Instantiate a given class with a record merge.
*/
public static HoodieRecordMerger loadRecordMerger(String mergerClass) {
try {
HoodieRecordMerger recordMerger = ((HoodieRecordMerger) (INSTANCE_CACHE.get(mergerClass)));
if (null == recordMerger) {synchronized(HoodieRecordMerger.class) {
recordMerger = ((HoodieRecordMerger) (INSTANCE_CACHE.get(mergerClass)));
if (null == recordMerger) {
recordMerger = ((HoodieRecordMerger) (ReflectionUtils.loadClass(mergerClass, new Object[]{ })));
INSTANCE_CACHE.put(mergerClass,
recordMerger); }
}
}
return recordMerger;
}
catch (HoodieException e) {
throw new HoodieException("Unable to instantiate hoodie merge class ", e);
}
}
| 3.26 |
hudi_S3EventsSource_fetchNextBatch_rdh
|
/**
* Fetches next events from the queue.
*
* @param lastCkptStr
* The last checkpoint instant string, empty if first run.
* @param sourceLimit
* Limit on the size of data to fetch. For {@link S3EventsSource},
* {@link S3SourceConfig#S3_SOURCE_QUEUE_MAX_MESSAGES_PER_BATCH} is used.
* @return A pair of dataset of event records and the next checkpoint instant string
*/
@Override
public Pair<Option<Dataset<Row>>, String>
fetchNextBatch(Option<String> lastCkptStr, long sourceLimit) {
Pair<List<String>, String> selectPathsWithLatestSqsMessage = pathSelector.getNextEventsFromQueue(sqs, lastCkptStr, processedMessages);
if (selectPathsWithLatestSqsMessage.getLeft().isEmpty()) {
return Pair.of(Option.empty(), selectPathsWithLatestSqsMessage.getRight());
} else {Dataset<String> eventRecords = sparkSession.createDataset(selectPathsWithLatestSqsMessage.getLeft(), Encoders.STRING());
StructType sourceSchema = UtilHelpers.getSourceSchema(schemaProvider);
if (sourceSchema != null) {
return Pair.of(Option.of(sparkSession.read().schema(sourceSchema).json(eventRecords)), selectPathsWithLatestSqsMessage.getRight());
} else {
return Pair.of(Option.of(sparkSession.read().json(eventRecords)), selectPathsWithLatestSqsMessage.getRight());
}
}
}
| 3.26 |
hudi_HoodieTableFileSystemView_fetchLatestFileSlicesIncludingInflight_rdh
|
/**
* Get the latest file slices for a given partition including the inflight ones.
*
* @param partitionPath
* @return Stream of latest {@link FileSlice} in the partition path.
*/public Stream<FileSlice> fetchLatestFileSlicesIncludingInflight(String partitionPath) {
return fetchAllStoredFileGroups(partitionPath).map(HoodieFileGroup::getLatestFileSlicesIncludingInflight).filter(Option::isPresent).map(Option::get);
}
| 3.26 |
hudi_HoodieTableFileSystemView_readObject_rdh
|
/**
* This method is only used when this object is deserialized in a spark executor.
*
* @deprecated */
private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException {
in.defaultReadObject();
}
| 3.26 |
hudi_JdbcSource_m0_rdh
|
/**
* Does a full scan on the RDBMS data source.
*
* @return The {@link Dataset} after running full scan.
*/
private Dataset<Row> m0(long sourceLimit) {
final String ppdQuery = "(%s) rdbms_table";
final SqlQueryBuilder queryBuilder = SqlQueryBuilder.select("*").from(getStringWithAltKeys(props, JdbcSourceConfig.RDBMS_TABLE_NAME));
if (sourceLimit > 0) {
URI jdbcURI = URI.create(getStringWithAltKeys(props, JdbcSourceConfig.URL).substring(URI_JDBC_PREFIX.length()));
if (DB_LIMIT_CLAUSE.contains(jdbcURI.getScheme())) {
if (containsConfigProperty(props, JdbcSourceConfig.INCREMENTAL_COLUMN)) {
queryBuilder.orderBy(getStringWithAltKeys(props, JdbcSourceConfig.INCREMENTAL_COLUMN)).limit(sourceLimit);
} else {
queryBuilder.limit(sourceLimit);
}
}
}
String query = String.format(ppdQuery, queryBuilder.toString());
return validatePropsAndGetDataFrameReader(sparkSession, props).option(Config.RDBMS_TABLE_PROP, query).load();
}
| 3.26 |
hudi_JdbcSource_fetch_rdh
|
/**
* Decide to do a full RDBMS table scan or an incremental scan based on the lastCkptStr. If previous checkpoint
* value exists then we do an incremental scan with a PPD query or else we do a full scan. In certain cases where the
* incremental query fails, we fallback to a full scan.
*
* @param lastCkptStr
* Last checkpoint.
* @return The pair of {@link Dataset} and current checkpoint.
*/
private Pair<Option<Dataset<Row>>, String> fetch(Option<String> lastCkptStr, long sourceLimit) {
Dataset<Row> dataset;
if (lastCkptStr.isPresent() &&
(!StringUtils.isNullOrEmpty(lastCkptStr.get()))) {
dataset = incrementalFetch(lastCkptStr, sourceLimit);
} else {
LOG.info("No checkpoint references found. Doing a full rdbms table fetch");dataset = m0(sourceLimit);
}
dataset.persist(StorageLevel.fromString(getStringWithAltKeys(props, JdbcSourceConfig.STORAGE_LEVEL, "MEMORY_AND_DISK_SER")));
boolean isIncremental = getBooleanWithAltKeys(props, JdbcSourceConfig.IS_INCREMENTAL);Pair<Option<Dataset<Row>>, String> pair = Pair.of(Option.of(dataset), checkpoint(dataset, isIncremental, lastCkptStr));
dataset.unpersist();
return pair;
}
| 3.26 |
hudi_JdbcSource_addExtraJdbcOptions_rdh
|
/**
* Accepts spark JDBC options from the user in terms of EXTRA_OPTIONS adds them to {@link DataFrameReader} Example: In
* a normal spark code you would do something like: session.read.format('jdbc') .option(fetchSize,1000)
* .option(timestampFormat,"yyyy-mm-dd hh:mm:ss")
* <p>
* The way to pass these properties to HUDI is through the config file. Any property starting with
* hoodie.streamer.jdbc.extra.options. will be added.
* <p>
* Example: hoodie.streamer.jdbc.extra.options.fetchSize=100
* hoodie.streamer.jdbc.extra.options.upperBound=1
* hoodie.streamer.jdbc.extra.options.lowerBound=100
*
* @param properties
* The JDBC connection properties and data source options.
* @param dataFrameReader
* The {@link DataFrameReader} to which data source options will be added.
*/
private static void addExtraJdbcOptions(TypedProperties properties, DataFrameReader dataFrameReader) {
Set<Object> objects = properties.keySet();
for (Object property : objects) {
String prop = property.toString();
Option<String> keyOption = stripPrefix(prop, JdbcSourceConfig.EXTRA_OPTIONS);
if (keyOption.isPresent()) {
String key = keyOption.get();
String value = properties.getString(prop);
if (!StringUtils.isNullOrEmpty(value)) {
LOG.info(String.format("Adding %s -> %s to jdbc options", key, value));
dataFrameReader.option(key, value);
}
}
}
}
| 3.26 |
hudi_JdbcSource_incrementalFetch_rdh
|
/**
* Does an incremental scan with PPQ query prepared on the bases of previous checkpoint.
*
* @param lastCheckpoint
* Last checkpoint.
* Note that the records fetched will be exclusive of the last checkpoint (i.e. incremental column value > lastCheckpoint).
* @return The {@link Dataset} after incremental fetch from RDBMS.
*/
private Dataset<Row> incrementalFetch(Option<String> lastCheckpoint, long sourceLimit) {
try {
final String ppdQuery = "(%s) rdbms_table";
final SqlQueryBuilder queryBuilder = SqlQueryBuilder.select("*").from(getStringWithAltKeys(props, JdbcSourceConfig.RDBMS_TABLE_NAME)).where(String.format(" %s > '%s'", getStringWithAltKeys(props, JdbcSourceConfig.INCREMENTAL_COLUMN), lastCheckpoint.get()));
if (sourceLimit > 0) {
URI jdbcURI = URI.create(getStringWithAltKeys(props,
JdbcSourceConfig.URL).substring(URI_JDBC_PREFIX.length()));
if (DB_LIMIT_CLAUSE.contains(jdbcURI.getScheme())) {
queryBuilder.orderBy(getStringWithAltKeys(props, JdbcSourceConfig.INCREMENTAL_COLUMN)).limit(sourceLimit);
}
}
String query = String.format(ppdQuery, queryBuilder.toString());
LOG.info("PPD QUERY: " + query);LOG.info(String.format("Referenced last checkpoint and prepared new predicate pushdown query for jdbc pull %s", query));
return validatePropsAndGetDataFrameReader(sparkSession, props).option(Config.RDBMS_TABLE_PROP, query).load();
} catch (Exception e) {
LOG.error("Error while performing an incremental fetch. Not all database support the PPD query we generate to do an incremental scan", e);
if (containsConfigProperty(props, JdbcSourceConfig.FALLBACK_TO_FULL_FETCH) && getBooleanWithAltKeys(props, JdbcSourceConfig.FALLBACK_TO_FULL_FETCH)) {
LOG.warn("Falling back to full scan.");
return m0(sourceLimit);
}
throw e;
}}
| 3.26 |
hudi_SimpleExecutor_execute_rdh
|
/**
* Consuming records from input iterator directly without any producers and inner message queue.
*/
@Override
public E execute() {
try {
LOG.info("Starting consumer, consuming records from the records iterator directly");
while (itr.hasNext()) {
O payload = transformFunction.apply(itr.next());consumer.consume(payload);
}
return consumer.finish();
} catch (Exception e) {
LOG.error("Failed consuming records", e);
throw new HoodieException(e);
}
}
| 3.26 |
hudi_BloomIndexFileInfo_isKeyInRange_rdh
|
/**
* Does the given key fall within the range (inclusive).
*/
public boolean isKeyInRange(String recordKey) {
return (Objects.requireNonNull(f0).compareTo(recordKey) <= 0) && (Objects.requireNonNull(maxRecordKey).compareTo(recordKey) >= 0);
}
| 3.26 |
hudi_HoodieCDCLogRecordIterator_closeReader_rdh
|
// -------------------------------------------------------------------------
// Utilities
// -------------------------------------------------------------------------
private void closeReader() throws IOException {
if (reader != null) {
reader.close();
reader = null;
}
}
| 3.26 |
hudi_SanitizationUtils_parseSanitizedAvroSchemaNoThrow_rdh
|
/**
* Sanitizes illegal field names in the schema using recursive calls to transformMap and transformList
*/
private static Option<Schema>
parseSanitizedAvroSchemaNoThrow(String schemaStr, String invalidCharMask) {
try {
OM.enable(JsonParser.Feature.ALLOW_COMMENTS);
Map<String, Object> objMap = OM.readValue(schemaStr, Map.class);
Map<String, Object> modifiedMap = transformMap(objMap, invalidCharMask);
return Option.of(new Schema.Parser().parse(OM.writeValueAsString(modifiedMap)));
}
catch
(Exception ex) {
return Option.empty();
}
}
| 3.26 |
hudi_SanitizationUtils_sanitizeStructTypeForAvro_rdh
|
// TODO(HUDI-5256): Refactor this to use InternalSchema when it is ready.
private static StructType sanitizeStructTypeForAvro(StructType structType, String invalidCharMask) {
StructType sanitizedStructType = new StructType();
StructField[] structFields = structType.fields();
for (StructField s : structFields) {
DataType currFieldDataTypeSanitized = sanitizeDataTypeForAvro(s.dataType(), invalidCharMask);
StructField structFieldCopy = new StructField(HoodieAvroUtils.sanitizeName(s.name(), invalidCharMask), currFieldDataTypeSanitized, s.nullable(), s.metadata());
sanitizedStructType = sanitizedStructType.add(structFieldCopy);
}
return sanitizedStructType;
}
| 3.26 |
hudi_SanitizationUtils_transformList_rdh
|
/**
* Parse list for sanitizing
*
* @param src
* - deserialized schema
* @param invalidCharMask
* - mask to replace invalid characters with
*/
private static List<Object> transformList(List<Object> src, String invalidCharMask)
{
return src.stream().map(obj -> {
if (obj instanceof List) {
return transformList(((List<Object>) (obj)), invalidCharMask);
} else if (obj instanceof Map) {
return transformMap(((Map<String, Object>) (obj)), invalidCharMask);
} else {
return obj;
}
}).collect(Collectors.toList());
}
| 3.26 |
hudi_SanitizationUtils_transformMap_rdh
|
/**
* Parse map for sanitizing. If we have a string in the map, and it is an avro field name key, then we sanitize the name.
* Otherwise, we keep recursively going through the schema.
*
* @param src
* - deserialized schema
* @param invalidCharMask
* - mask to replace invalid characters with
*/
private static Map<String, Object> transformMap(Map<String, Object> src, String invalidCharMask) {
return src.entrySet().stream().map(kv -> {
if (kv.getValue() instanceof List) {
return Pair.of(kv.getKey(), transformList(((List<Object>) (kv.getValue())), invalidCharMask));
} else if
(kv.getValue() instanceof Map) {
return Pair.of(kv.getKey(), transformMap(((Map<String, Object>) (kv.getValue())), invalidCharMask));
} else if (kv.getValue() instanceof String) {
String currentStrValue = ((String) (kv.getValue()));
if (kv.getKey().equals(AVRO_FIELD_NAME_KEY)) {
return Pair.of(kv.getKey(), HoodieAvroUtils.sanitizeName(currentStrValue, invalidCharMask));
}
return Pair.of(kv.getKey(), currentStrValue);
} else {
return Pair.of(kv.getKey(), kv.getValue());
}
}).collect(Collectors.toMap(Pair::getLeft, Pair::getRight));
}
| 3.26 |
hudi_HiveMetastoreBasedLockProvider_acquireLock_rdh
|
// This API is exposed for tests and not intended to be used elsewhere
public boolean acquireLock(long time, TimeUnit unit, final LockComponent component) throws InterruptedException, ExecutionException, TimeoutException, TException {
ValidationUtils.checkArgument(this.lock == null, ALREADY_ACQUIRED.name());
acquireLockInternal(time, unit, component);
return (this.lock != null) && (this.lock.getState() == LockState.ACQUIRED);
}
| 3.26 |
hudi_HiveMetastoreBasedLockProvider_close_rdh
|
// NOTE: HiveMetastoreClient does not implement AutoCloseable. Additionally, we cannot call close() after unlock()
// because if there are multiple operations started from the same WriteClient (via multiple threads), closing the
// hive client causes all other threads who may have already initiated the tryLock() to fail since the
// HiveMetastoreClient is shared.
@Override
public void close() {
try {
if (lock != null) {
f0.unlock(lock.getLockid());
lock = null;
}
Hive.closeCurrent();
} catch (Exception e) {
LOG.error(generateLogStatement(LockState, generateLogSuffixString()));
}
}
| 3.26 |
hudi_HoodieCatalogUtil_inferPartitionPath_rdh
|
/**
* Returns the partition path with given {@link CatalogPartitionSpec}.
*/
public static String inferPartitionPath(boolean hiveStylePartitioning, CatalogPartitionSpec
catalogPartitionSpec) {
return catalogPartitionSpec.getPartitionSpec().entrySet().stream().map(entry -> hiveStylePartitioning ? String.format("%s=%s", entry.getKey(), entry.getValue()) : entry.getValue()).collect(Collectors.joining("/"));
}
| 3.26 |
hudi_HoodieCatalogUtil_createHiveConf_rdh
|
/**
* Returns a new {@code HiveConf}.
*
* @param hiveConfDir
* Hive conf directory path.
* @return A HiveConf instance.
*/
public static HiveConf createHiveConf(@Nullable
String hiveConfDir, Configuration flinkConf) {
// create HiveConf from hadoop configuration with hadoop conf directory configured.
Configuration hadoopConf = HadoopConfigurations.getHadoopConf(flinkConf);
// ignore all the static conf file URLs that HiveConf may have set
HiveConf.setHiveSiteLocation(null);
HiveConf.setLoadMetastoreConfig(false);
HiveConf.setLoadHiveServer2Config(false);
HiveConf hiveConf = new HiveConf(hadoopConf, HiveConf.class);
LOG.info("Setting hive conf dir as {}", hiveConfDir);
if (hiveConfDir != null) {
Path hiveSite = new Path(hiveConfDir, HIVE_SITE_FILE);
if (!hiveSite.toUri().isAbsolute()) {
// treat relative URI as local file to be compatible with previous behavior
hiveSite = new Path(new File(hiveSite.toString()).toURI());
}
try (InputStream v3 = hiveSite.getFileSystem(hadoopConf).open(hiveSite)) {
hiveConf.addResource(v3, hiveSite.toString());
// trigger a read from the conf so that the input stream is read
isEmbeddedMetastore(hiveConf);
} catch (IOException e) {
throw new CatalogException("Failed to load hive-site.xml from specified path:" + hiveSite, e);
}
} else {
// user doesn't provide hive conf dir, we try to find it in classpath
URL hiveSite = Thread.currentThread().getContextClassLoader().getResource(HIVE_SITE_FILE);
if (hiveSite != null) {
LOG.info("Found {} in classpath: {}", HIVE_SITE_FILE, hiveSite);
hiveConf.addResource(hiveSite);
}
}
return hiveConf;
}
| 3.26 |
hudi_HoodieCatalogUtil_isEmbeddedMetastore_rdh
|
/**
* Check whether the hive.metastore.uris is empty
*/
public static boolean isEmbeddedMetastore(HiveConf hiveConf) {
return isNullOrWhitespaceOnly(hiveConf.getVar(ConfVars.METASTOREURIS));
}
| 3.26 |
hudi_HoodieCatalogUtil_getPartitionKeys_rdh
|
/**
* Returns the partition key list with given table.
*/
public static List<String> getPartitionKeys(CatalogTable table) {
// the PARTITIONED BY syntax always has higher priority than option FlinkOptions#PARTITION_PATH_FIELD
if
(table.isPartitioned()) {return table.getPartitionKeys();
} else if (table.getOptions().containsKey(FlinkOptions.PARTITION_PATH_FIELD.key())) {
return Arrays.stream(table.getOptions().get(FlinkOptions.PARTITION_PATH_FIELD.key()).split(",")).collect(Collectors.toList());
}
return Collections.emptyList();
}
| 3.26 |
hudi_HoodieCatalogUtil_getOrderedPartitionValues_rdh
|
/**
* Returns a list of ordered partition values by re-arranging them based on the given list of
* partition keys. If the partition value is null, it'll be converted into default partition
* name.
*
* @param partitionSpec
* The partition spec
* @param partitionKeys
* The partition keys
* @param tablePath
* The table path
* @return A list of partition values ordered by partition keys
* @throws PartitionSpecInvalidException
* thrown if partitionSpec and partitionKeys have
* different sizes, or any key in partitionKeys doesn't exist in partitionSpec.
*/
@VisibleForTesting
public static List<String> getOrderedPartitionValues(String catalogName, HiveConf hiveConf, CatalogPartitionSpec partitionSpec, List<String> partitionKeys, ObjectPath tablePath) throws PartitionSpecInvalidException {
Map<String, String> spec = partitionSpec.getPartitionSpec();
if (spec.size() != partitionKeys.size()) {
throw new PartitionSpecInvalidException(catalogName, partitionKeys, tablePath, partitionSpec);
}
List<String> values = new ArrayList<>(spec.size());
for (String key : partitionKeys) {
if (!spec.containsKey(key)) {
throw new PartitionSpecInvalidException(catalogName, partitionKeys, tablePath, partitionSpec);
} else {
String value = spec.get(key);
if (value == null) {
value = hiveConf.getVar(ConfVars.DEFAULTPARTITIONNAME);
}
values.add(value);
}
}
return values;
}
| 3.26 |
hudi_OpenJ9MemoryLayoutSpecification64bitCompressed_getArrayHeaderSize_rdh
|
/**
* Implementation of {@link MemoryLayoutSpecification} based on
* OpenJ9 Memory Layout Specification on 64-bit compressed.
*/public class OpenJ9MemoryLayoutSpecification64bitCompressed implements MemoryLayoutSpecification {
@Override
public int getArrayHeaderSize() {
return 16;
}
| 3.26 |
hudi_HoodieRealtimeInputFormatUtils_addProjectionField_rdh
|
/**
* Add a field to the existing fields projected.
*/
private static Configuration addProjectionField(Configuration conf, String fieldName, int fieldIndex) {
String readColNames = conf.get(ColumnProjectionUtils.READ_COLUMN_NAMES_CONF_STR, "");
String readColIds = conf.get(ColumnProjectionUtils.READ_COLUMN_IDS_CONF_STR, "");
String readColNamesPrefix = readColNames + ",";
if ((readColNames == null) || readColNames.isEmpty()) {
readColNamesPrefix = "";}
String readColIdsPrefix = readColIds + ",";
if ((readColIds == null) || readColIds.isEmpty()) {
readColIdsPrefix = "";
}
if (!Arrays.asList(readColNames.split(",")).contains(fieldName)) {
// If not already in the list - then add it
conf.set(ColumnProjectionUtils.READ_COLUMN_NAMES_CONF_STR, readColNamesPrefix + fieldName);
conf.set(ColumnProjectionUtils.READ_COLUMN_IDS_CONF_STR, readColIdsPrefix + fieldIndex);
if (LOG.isDebugEnabled()) {
LOG.debug(String.format(("Adding extra column " + fieldName) + ", to enable log merging cols (%s) ids (%s) ", conf.get(ColumnProjectionUtils.READ_COLUMN_NAMES_CONF_STR), conf.get(ColumnProjectionUtils.READ_COLUMN_IDS_CONF_STR)));
}
}
return conf;
}
| 3.26 |
hudi_HoodieRealtimeInputFormatUtils_cleanProjectionColumnIds_rdh
|
/**
* Hive will append read columns' ids to old columns' ids during getRecordReader. In some cases, e.g. SELECT COUNT(*),
* the read columns' id is an empty string and Hive will combine it with Hoodie required projection ids and becomes
* e.g. ",2,0,3" and will cause an error. Actually this method is a temporary solution because the real bug is from
* Hive. Hive has fixed this bug after 3.0.0, but the version before that would still face this problem. (HIVE-22438)
*/
public static void cleanProjectionColumnIds(Configuration conf) {
String columnIds = conf.get(ColumnProjectionUtils.READ_COLUMN_IDS_CONF_STR);
if ((!columnIds.isEmpty()) && (columnIds.charAt(0) == ',')) {
conf.set(ColumnProjectionUtils.READ_COLUMN_IDS_CONF_STR, columnIds.substring(1));
if (LOG.isDebugEnabled()) {
LOG.debug(("The projection Ids: {" + columnIds) + "} start with ','. First comma is removed");
}
}
}
| 3.26 |
hudi_RocksDbBasedFileSystemView_applyDeltaFileSlicesToPartitionView_rdh
|
/* This is overridden to incrementally apply file-slices to rocks DB */
@Override
protected void applyDeltaFileSlicesToPartitionView(String partition, List<HoodieFileGroup> deltaFileGroups, DeltaApplyMode mode) {
rocksDB.writeBatch(batch -> deltaFileGroups.forEach(fg -> fg.getAllRawFileSlices().map(fs -> {
Option<FileSlice> oldSliceOption = fetchLatestFileSliceBeforeOrOn(partition, fs.getFileId(), fs.getBaseInstantTime());
if ((!oldSliceOption.isPresent()) || (!shouldMergeFileSlice(oldSliceOption.get(), fs))) {
return fs;
} else {
FileSlice oldSlice = oldSliceOption.get();
// First remove the file-slice
LOG.info("Removing old Slice in DB. FS=" + oldSlice);
rocksDB.deleteInBatch(batch, schemaHelper.getColFamilyForView(), schemaHelper.getKeyForSliceView(fg, oldSlice));
rocksDB.deleteInBatch(batch, schemaHelper.getColFamilyForView(), schemaHelper.getKeyForDataFileView(fg, oldSlice));
Map<String, HoodieLogFile> logFiles = oldSlice.getLogFiles().map(lf -> Pair.of(Path.getPathWithoutSchemeAndAuthority(lf.getPath()).toString(), lf)).collect(Collectors.toMap(Pair::getKey, Pair::getValue));
Map<String, HoodieLogFile> deltaLogFiles = fs.getLogFiles().map(lf -> Pair.of(Path.getPathWithoutSchemeAndAuthority(lf.getPath()).toString(), lf)).collect(Collectors.toMap(Pair::getKey, Pair::getValue));
switch (mode) {
case ADD :
{ FileSlice newFileSlice = new FileSlice(oldSlice.getFileGroupId(), oldSlice.getBaseInstantTime());
oldSlice.getBaseFile().ifPresent(newFileSlice::setBaseFile);
fs.getBaseFile().ifPresent(newFileSlice::setBaseFile);
Map<String, HoodieLogFile> newLogFiles = new HashMap<>(logFiles); deltaLogFiles.entrySet().stream().filter(e -> !logFiles.containsKey(e.getKey())).forEach(p -> newLogFiles.put(p.getKey(), p.getValue()));
newLogFiles.values().forEach(newFileSlice::addLogFile);
LOG.info("Adding back new File Slice after add FS=" + newFileSlice);
return newFileSlice;
}
case REMOVE :
{
LOG.info("Removing old File Slice =" + fs);
FileSlice v15 = new FileSlice(oldSlice.getFileGroupId(), oldSlice.getBaseInstantTime());
fs.getBaseFile().orElseGet(() -> {
oldSlice.getBaseFile().ifPresent(v15::setBaseFile);
return null;
});
deltaLogFiles.keySet().forEach(logFiles::remove);
// Add remaining log files back
logFiles.values().forEach(v15::addLogFile);
if (v15.getBaseFile().isPresent() || (v15.getLogFiles().count() > 0)) {
LOG.info("Adding back new file-slice after remove FS=" + v15);
return v15;
}
return null;
}
default :
throw new IllegalStateException("Unknown diff apply mode=" + mode);
}
}}).filter(Objects::nonNull).forEach(fs -> {
rocksDB.putInBatch(batch, schemaHelper.getColFamilyForView(), schemaHelper.getKeyForSliceView(fg, fs), fs);
fs.getBaseFile().ifPresent(df -> rocksDB.putInBatch(batch, schemaHelper.getColFamilyForView(), schemaHelper.getKeyForDataFileView(fg, fs), df));
})));
}
| 3.26 |
hudi_HoodieCompactor_validateRunningMode_rdh
|
// make sure that cfg.runningMode couldn't be null
private static void validateRunningMode(Config cfg) {
// --mode has a higher priority than --schedule
// If we remove --schedule option in the future we need to change runningMode default value to EXECUTE
if (StringUtils.isNullOrEmpty(cfg.runningMode)) {
cfg.runningMode = (cfg.runSchedule) ? SCHEDULE : EXECUTE;
}
}
| 3.26 |
hudi_AvroInternalSchemaConverter_nullableSchema_rdh
|
/**
* Returns schema with nullable true.
*/
public static Schema nullableSchema(Schema schema) {
if (schema.getType()
== UNION) {if (!isOptional(schema)) {
throw new HoodieSchemaException(String.format("Union schemas are not supported: %s", schema));
}
return schema;
} else {
return Schema.createUnion(Schema.create(Type.NULL), schema);
}
}
| 3.26 |
hudi_AvroInternalSchemaConverter_visitInternalPrimitiveToBuildAvroPrimitiveType_rdh
|
/**
* Converts hudi PrimitiveType to Avro PrimitiveType.
* this is auxiliary function used by visitInternalSchemaToBuildAvroSchema
*/
private static Schema visitInternalPrimitiveToBuildAvroPrimitiveType(Type.PrimitiveType primitive, String recordName) {
switch (primitive.typeId()) {
case BOOLEAN :
return Schema.create(Type.BOOLEAN);
case INT :
return Schema.create(Type.INT);
case LONG :
return Schema.create(Type.LONG);
case FLOAT :
return Schema.create(Type.FLOAT);
case DOUBLE :
return Schema.create(Type.DOUBLE);
case DATE :
return LogicalTypes.date().addToSchema(Schema.create(Type.INT));
case TIME :
return LogicalTypes.timeMicros().addToSchema(Schema.create(Type.LONG));
case TIMESTAMP :
return LogicalTypes.timestampMicros().addToSchema(Schema.create(Type.LONG));
case STRING :
return Schema.create(Type.STRING);
case BINARY :
return Schema.create(Type.BYTES);
case UUID :
{
// NOTE: All schemas corresponding to Avro's type [[FIXED]] are generated
// with the "fixed" name to stay compatible w/ [[SchemaConverters]]
String name = (recordName
+ f0) + "fixed";
Schema fixedSchema = Schema.createFixed(name, null, null, 16);
return LogicalTypes.uuid().addToSchema(fixedSchema);
}
case FIXED :
{
Types.FixedType fixed = ((Types.FixedType) (primitive));
// NOTE: All schemas corresponding to Avro's type [[FIXED]] are generated
// with the "fixed" name to stay compatible w/ [[SchemaConverters]]
String name = (recordName + f0) + "fixed";
return Schema.createFixed(name, null, null, fixed.getFixedSize());
}
case DECIMAL :{
Types.DecimalType decimal = ((Types.DecimalType) (primitive));
// NOTE: All schemas corresponding to Avro's type [[FIXED]] are generated
// with the "fixed" name to stay compatible w/ [[SchemaConverters]]
String name = (recordName + f0) + "fixed";
Schema fixedSchema = Schema.createFixed(name, null, null, computeMinBytesForPrecision(decimal.precision()));return LogicalTypes.decimal(decimal.precision(), decimal.scale()).addToSchema(fixedSchema);
}
default :
throw new UnsupportedOperationException("Unsupported type ID: " + primitive.typeId());
}
}
| 3.26 |
hudi_AvroInternalSchemaConverter_computeMinBytesForPrecision_rdh
|
/**
* Return the minimum number of bytes needed to store a decimal with a give 'precision'.
* reference from Spark release 3.1 .
*/
private static int computeMinBytesForPrecision(int precision) {
int numBytes = 1;
while (Math.pow(2.0, (8 * numBytes) - 1) < Math.pow(10.0, precision)) {
numBytes += 1;
}
return numBytes;
}
| 3.26 |
hudi_AvroInternalSchemaConverter_buildAvroSchemaFromType_rdh
|
/**
* Converts hudi type into an Avro Schema.
*
* @param type
* a hudi type.
* @param recordName
* the record name
* @return a Avro schema match this type
*/
public static Schema buildAvroSchemaFromType(Type type, String recordName)
{
Map<Type, Schema> cache
= new HashMap<>(); return visitInternalSchemaToBuildAvroSchema(type, cache, recordName);
}
| 3.26 |
hudi_AvroInternalSchemaConverter_visitInternalMapToBuildAvroMap_rdh
|
/**
* Converts hudi MapType to Avro MapType.
* this is auxiliary function used by visitInternalSchemaToBuildAvroSchema
*/
private static Schema visitInternalMapToBuildAvroMap(Types.MapType map, Schema keySchema, Schema valueSchema) { Schema
mapSchema;
if (keySchema.getType() == Type.STRING) {
mapSchema = Schema.createMap(map.isValueOptional() ? AvroInternalSchemaConverter.nullableSchema(valueSchema) : valueSchema);} else {
throw new HoodieSchemaException("only support StringType key for avro MapType");
}
return mapSchema;
}
| 3.26 |
hudi_AvroInternalSchemaConverter_visitInternalSchemaToBuildAvroSchema_rdh
|
/**
* Converts hudi type into an Avro Schema.
*
* @param type
* a hudi type.
* @param cache
* use to cache intermediate convert result to save cost.
* @param recordName
* auto-generated record name used as a fallback, in case
* {@link org.apache.hudi.internal.schema.Types.RecordType} doesn't bear original record-name
* @return a Avro schema match this type
*/
private static Schema visitInternalSchemaToBuildAvroSchema(Type type, Map<Type, Schema> cache, String recordName) {
switch (type.typeId()) {
case RECORD :
Types.RecordType record = ((Types.RecordType) (type));
List<Schema> v22 = new ArrayList<>();
record.fields().forEach(f -> { String nestedRecordName = (recordName + f0) + f.name();
Schema tempSchema
= visitInternalSchemaToBuildAvroSchema(f.type(), cache, nestedRecordName);
// convert tempSchema
Schema result = (f.isOptional()) ? AvroInternalSchemaConverter.nullableSchema(tempSchema) :
tempSchema;
v22.add(result);
});
// check visited
Schema recordSchema;
recordSchema = cache.get(record);
if (recordSchema != null) {return recordSchema;
}recordSchema = visitInternalRecordToBuildAvroRecord(record, v22, recordName);
cache.put(record,
recordSchema);
return recordSchema;
case ARRAY :
Types.ArrayType array = ((Types.ArrayType) (type));
Schema elementSchema;
elementSchema = visitInternalSchemaToBuildAvroSchema(array.elementType(), cache, recordName);
Schema v29;
v29 = cache.get(array);
if (v29 != null) {
return v29;
}
v29 = visitInternalArrayToBuildAvroArray(array, elementSchema);
cache.put(array, v29);
return v29;
case MAP :
Types.MapType map = ((Types.MapType) (type));
Schema keySchema;
Schema valueSchema;
keySchema = visitInternalSchemaToBuildAvroSchema(map.keyType(), cache, recordName);
valueSchema = visitInternalSchemaToBuildAvroSchema(map.valueType(), cache, recordName);
Schema mapSchema;
mapSchema = cache.get(map);
if (mapSchema != null) {
return mapSchema;
}
mapSchema = visitInternalMapToBuildAvroMap(map, keySchema, valueSchema);
cache.put(map, mapSchema);
return mapSchema;
default :
Schema primitiveSchema = visitInternalPrimitiveToBuildAvroPrimitiveType(((Type.PrimitiveType) (type)), recordName);
cache.put(type,
primitiveSchema);
return
primitiveSchema;
}}
| 3.26 |
hudi_AvroInternalSchemaConverter_buildAvroSchemaFromInternalSchema_rdh
|
/**
* Converts hudi internal Schema into an Avro Schema.
*
* @param schema
* a hudi internal Schema.
* @param recordName
* the record name
* @return a Avro schema match hudi internal schema.
*/
public static Schema buildAvroSchemaFromInternalSchema(InternalSchema schema, String recordName) {
Map<Type, Schema> cache = new HashMap<>();
return visitInternalSchemaToBuildAvroSchema(schema.getRecord(), cache, recordName);
}
| 3.26 |
hudi_AvroInternalSchemaConverter_visitInternalRecordToBuildAvroRecord_rdh
|
/**
* Converts hudi RecordType to Avro RecordType.
* this is auxiliary function used by visitInternalSchemaToBuildAvroSchema
*/
private static Schema visitInternalRecordToBuildAvroRecord(Types.RecordType recordType, List<Schema> fieldSchemas, String recordNameFallback)
{
List<Types.Field> fields = recordType.fields();
List<Schema.Field> avroFields = new ArrayList<>();
for (int v37 = 0; v37 < fields.size(); v37++) {
Types.Field f = fields.get(v37);
Schema.Field field =
new Schema.Field(f.name(), fieldSchemas.get(v37), f.doc(), f.isOptional() ? JsonProperties.NULL_VALUE : null);
avroFields.add(field);
}
String recordName = Option.ofNullable(recordType.name()).orElse(recordNameFallback);
return Schema.createRecord(recordName, null, null, false, avroFields);
}
| 3.26 |
hudi_AvroInternalSchemaConverter_isOptional_rdh
|
/**
* Check whether current avro schema is optional?.
*/
public static boolean isOptional(Schema schema) {
if ((schema.getType() == UNION) && (schema.getTypes().size() == 2)) {
return (schema.getTypes().get(0).getType() == Type.NULL) || (schema.getTypes().get(1).getType() == Type.NULL);
}
return false;
}
| 3.26 |
hudi_AvroInternalSchemaConverter_convert_rdh
|
/**
* Convert an avro schema into internalSchema.
*/
public static InternalSchema convert(Schema schema) {
return new InternalSchema(((Types.RecordType) (convertToField(schema))));
}
| 3.26 |
hudi_AvroInternalSchemaConverter_visitInternalArrayToBuildAvroArray_rdh
|
/**
* Converts hudi ArrayType to Avro ArrayType.
* this is auxiliary function used by visitInternalSchemaToBuildAvroSchema
*/
private static Schema visitInternalArrayToBuildAvroArray(Types.ArrayType array, Schema elementSchema) {
Schema result;
if (array.isElementOptional()) {
result = Schema.createArray(AvroInternalSchemaConverter.nullableSchema(elementSchema));
} else
{
result = Schema.createArray(elementSchema);}
return result;
}
| 3.26 |
hudi_AvroInternalSchemaConverter_fixNullOrdering_rdh
|
/**
* Converting from avro -> internal schema -> avro
* causes null to always be first in unions.
* if we compare a schema that has not been converted to internal schema
* at any stage, the difference in ordering can cause issues. To resolve this,
* we order null to be first for any avro schema that enters into hudi.
* AvroSchemaUtils.isProjectionOfInternal uses index based comparison for unions.
* Spark and flink don't support complex unions so this would not be an issue
* but for the metadata table HoodieMetadata.avsc uses a trick where we have a bunch of
* different types wrapped in record for col stats.
*
* @param Schema
* avro schema.
* @return an avro Schema where null is the first.
*/
public static Schema fixNullOrdering(Schema schema) {
if (schema.getType() == Type.NULL) {
return schema;
}
return convert(convert(schema), schema.getFullName());
}
| 3.26 |
hudi_AvroInternalSchemaConverter_convertToField_rdh
|
/**
* Convert an avro schema into internal type.
*/
public static Type convertToField(Schema schema) {
return buildTypeFromAvroSchema(schema);
}
| 3.26 |
hudi_JenkinsHash_main_rdh
|
/**
* Compute the hash of the specified file
*
* @param args
* name of file to compute hash of.
* @throws IOException
*/
public static void main(String[] args) throws IOException {
if (args.length != 1) {
System.err.println("Usage: JenkinsHash filename");
System.exit(-1);
}
try (FileInputStream in = new FileInputStream(args[0])) {
byte[] bytes = new byte[512];
int value = 0;
JenkinsHash hash = new JenkinsHash();
for (int length =
in.read(bytes); length > 0; length = in.read(bytes)) {
value = hash.hash(bytes, length, value);
}
System.out.println(Math.abs(value));
}
}
| 3.26 |
hudi_HoodieConfig_setDefaultValue_rdh
|
/**
* Sets the default value of a config if user does not set it already.
* The default value can only be set if the config property has a built-in
* default value or an infer function. When the infer function is present,
* the infer function is used first to derive the config value based on other
* configs. If the config value cannot be inferred, the built-in default value
* is used if present.
*
* @param configProperty
* Config to set a default value.
* @param <T>
* Data type of the config.
*/
public <T> void setDefaultValue(ConfigProperty<T> configProperty) {
if (!contains(configProperty)) {
Option<T>
inferValue = Option.empty();
if (configProperty.hasInferFunction()) {
inferValue = configProperty.getInferFunction().get().apply(this);
}
if (inferValue.isPresent() || configProperty.hasDefaultValue()) {
props.setProperty(configProperty.key(), inferValue.isPresent() ? inferValue.get().toString() : configProperty.defaultValue().toString());
}
}
}
| 3.26 |
hudi_HoodieTablePreCommitFileSystemView_getLatestBaseFiles_rdh
|
/**
* Combine committed base files + new files created/replaced for given partition.
*/
public final Stream<HoodieBaseFile> getLatestBaseFiles(String partitionStr) {
// get fileIds replaced by current inflight commit
List<String> replacedFileIdsForPartition = partitionToReplaceFileIds.getOrDefault(partitionStr, Collections.emptyList()); // get new files written by current inflight commit
Map<String, HoodieBaseFile> v1 = filesWritten.stream().filter(file -> partitionStr.equals(file.getPartitionPath())).collect(Collectors.toMap(HoodieWriteStat::getFileId, writeStat -> new HoodieBaseFile(new CachingPath(tableMetaClient.getBasePath(), writeStat.getPath()).toString(), writeStat.getFileId(), preCommitInstantTime, null)));
Stream<HoodieBaseFile> committedBaseFiles = this.completedCommitsFileSystemView.getLatestBaseFiles(partitionStr);
Map<String, HoodieBaseFile> allFileIds = // Remove files replaced by current inflight commit
committedBaseFiles.filter(baseFile -> !replacedFileIdsForPartition.contains(baseFile.getFileId())).collect(Collectors.toMap(HoodieBaseFile::getFileId, baseFile -> baseFile));
allFileIds.putAll(v1);
return allFileIds.values().stream();
}
| 3.26 |
hudi_FileStatusUtils_safeReadAndSetMetadata_rdh
|
/**
* Used to safely handle FileStatus calls which might fail on some FileSystem implementation.
* (DeprecatedLocalFileSystem)
*/
private static void safeReadAndSetMetadata(HoodieFileStatus fStatus, FileStatus fileStatus) {
try {
fStatus.setOwner(fileStatus.getOwner());
fStatus.setGroup(fileStatus.getGroup());
fStatus.setPermission(fromFSPermission(fileStatus.getPermission()));
} catch (IllegalArgumentException ie) {
// Deprecated File System (testing) does not work well with this call
// skipping
}
}
| 3.26 |
hudi_BaseRollbackPlanActionExecutor_requestRollback_rdh
|
/**
* Creates a Rollback plan if there are files to be rolled back and stores them in instant file.
* Rollback Plan contains absolute file paths.
*
* @param startRollbackTime
* Rollback Instant Time
* @return Rollback Plan if generated
*/
protected Option<HoodieRollbackPlan> requestRollback(String startRollbackTime) {
final HoodieInstant rollbackInstant = new HoodieInstant(State.REQUESTED, HoodieTimeline.ROLLBACK_ACTION, startRollbackTime);
try {
List<HoodieRollbackRequest> rollbackRequests = new ArrayList<>();
if (!instantToRollback.isRequested()) {
rollbackRequests.addAll(getRollbackStrategy().getRollbackRequests(instantToRollback));
}
HoodieRollbackPlan rollbackPlan = new HoodieRollbackPlan(new HoodieInstantInfo(instantToRollback.getTimestamp(), instantToRollback.getAction()), rollbackRequests, LATEST_ROLLBACK_PLAN_VERSION);
if (!skipTimelinePublish) {
if (table.getRollbackTimeline().filterInflightsAndRequested().containsInstant(rollbackInstant.getTimestamp())) {
LOG.warn(("Request Rollback found with instant time " + rollbackInstant) + ", hence skipping scheduling rollback");
} else {
table.getActiveTimeline().saveToRollbackRequested(rollbackInstant, TimelineMetadataUtils.serializeRollbackPlan(rollbackPlan));
table.getMetaClient().reloadActiveTimeline();
LOG.info("Requesting Rollback with instant time " + rollbackInstant);
}
}
return Option.of(rollbackPlan);
} catch (IOException
e) {
LOG.error("Got exception when saving rollback requested file", e);
throw new HoodieIOException(e.getMessage(), e);
} }
| 3.26 |
hudi_BaseRollbackPlanActionExecutor_getRollbackStrategy_rdh
|
/**
* Fetch the Rollback strategy used.
*
* @return */
private BaseRollbackPlanActionExecutor.RollbackStrategy getRollbackStrategy() {
if (shouldRollbackUsingMarkers) {
return new
MarkerBasedRollbackStrategy(table, context, config, instantTime);
} else {
return new ListingBasedRollbackStrategy(table, context, config, instantTime, isRestore);
}
}
| 3.26 |
hudi_ValidateNode_execute_rdh
|
/**
* Method to start the validate operation. Exceptions will be thrown if its parent nodes exist and WAIT_FOR_PARENTS
* was set to true or default, but the parent nodes have not completed yet.
*
* @param executionContext
* Context to execute this node
* @param curItrCount
* current iteration count.
*/
@Override
public void execute(ExecutionContext executionContext, int curItrCount) {
if ((this.getParentNodes().size() > 0) && ((Boolean) (this.config.getOtherConfigs().getOrDefault("WAIT_FOR_PARENTS", true)))) {
for (DagNode node : ((List<DagNode>) (this.getParentNodes()))) {
if (!node.isCompleted()) {
throw new RuntimeException("cannot validate before parent nodes are complete");
}
}
}
this.result = this.function.apply(((List<DagNode>) (this.getParentNodes())));
}
| 3.26 |
hudi_KeyRangeNode_addFiles_rdh
|
/**
* Adds a new file name list to existing list of file names.
*
* @param newFiles
* {@link List} of file names to be added
*/
void addFiles(List<String> newFiles) {
this.fileNameList.addAll(newFiles);
}
| 3.26 |
hudi_HoodieConsistentBucketIndex_rollbackCommit_rdh
|
/**
* Do nothing.
* A failed write may create a hashing metadata for a partition. In this case, we still do nothing when rolling back
* the failed write. Because the hashing metadata created by a writer must have 00000000000000 timestamp and can be viewed
* as the initialization of a partition rather than as a part of the failed write.
*/
@Override
public boolean rollbackCommit(String instantTime) {
return
true;
}
| 3.26 |
hudi_HoodieSortedMergeHandle_write_rdh
|
/**
* Go through an old record. Here if we detect a newer version shows up, we write the new one to the file.
*/
@Override
public void write(HoodieRecord oldRecord) {
Schema oldSchema = (config.populateMetaFields()) ?
writeSchemaWithMetaFields : writeSchema;
Schema newSchema = (useWriterSchemaForCompaction) ? writeSchemaWithMetaFields : writeSchema;
String key = oldRecord.getRecordKey(oldSchema, keyGeneratorOpt);
// To maintain overall sorted order across updates and inserts, write any new inserts whose keys are less than
// the oldRecord's key.
while ((!newRecordKeysSorted.isEmpty()) && (newRecordKeysSorted.peek().compareTo(key) <= 0)) {
String keyToPreWrite = newRecordKeysSorted.remove();
if (keyToPreWrite.equals(key)) {
// will be handled as an update later
break;
}
// This is a new insert
HoodieRecord<T> hoodieRecord = keyToNewRecords.get(keyToPreWrite).newInstance();
if (writtenRecordKeys.contains(keyToPreWrite)) {throw new HoodieUpsertException("Insert/Update not in sorted order");
}
try {
writeRecord(hoodieRecord, Option.of(hoodieRecord), newSchema, config.getProps());
insertRecordsWritten++;
writtenRecordKeys.add(keyToPreWrite);
} catch (IOException e) {
throw new HoodieUpsertException("Failed to write records", e);
}}
super.write(oldRecord);
}
| 3.26 |
hudi_HoodieLogFileReader_getFSDataInputStreamForGCS_rdh
|
/**
* GCS FileSystem needs some special handling for seek and hence this method assists to fetch the right {@link FSDataInputStream} to be
* used by wrapping with required input streams.
*
* @param fsDataInputStream
* original instance of {@link FSDataInputStream}.
* @param bufferSize
* buffer size to be used.
* @return the right {@link FSDataInputStream} as required.
*/
private static FSDataInputStream getFSDataInputStreamForGCS(FSDataInputStream fsDataInputStream, HoodieLogFile logFile, int bufferSize) {
// in case of GCS FS, there are two flows.
// a. fsDataInputStream.getWrappedStream() instanceof FSInputStream
// b. fsDataInputStream.getWrappedStream() not an instanceof FSInputStream, but an instance of FSDataInputStream.
// (a) is handled in the first if block and (b) is handled in the second if block. If not, we fallback to original fsDataInputStream
if (fsDataInputStream.getWrappedStream() instanceof FSInputStream) {
return new TimedFSDataInputStream(logFile.getPath(),
new FSDataInputStream(new
BufferedFSInputStream(((FSInputStream) (fsDataInputStream.getWrappedStream())),
bufferSize)));
}
if ((fsDataInputStream.getWrappedStream() instanceof FSDataInputStream) && (((FSDataInputStream) (fsDataInputStream.getWrappedStream())).getWrappedStream() instanceof FSInputStream)) {
FSInputStream inputStream = ((FSInputStream) (((FSDataInputStream) (fsDataInputStream.getWrappedStream())).getWrappedStream()));
return new TimedFSDataInputStream(logFile.getPath(), new FSDataInputStream(new BufferedFSInputStream(inputStream, bufferSize)));
}
return fsDataInputStream;
}
| 3.26 |
hudi_HoodieLogFileReader_hasPrev_rdh
|
/**
* hasPrev is not idempotent.
*/
@Override
public boolean hasPrev() {
try {
if (!this.reverseReader) {
throw new HoodieNotSupportedException("Reverse log reader has not been enabled");
}
reverseLogFilePosition = lastReverseLogFilePosition;
reverseLogFilePosition -=
Long.BYTES;
lastReverseLogFilePosition = reverseLogFilePosition;
inputStream.seek(reverseLogFilePosition);
} catch (Exception e) {
// Either reached EOF while reading backwards or an exception
return false;
}
return true;
}
| 3.26 |
hudi_HoodieLogFileReader_readBlock_rdh
|
// TODO : convert content and block length to long by using ByteBuffer, raw byte [] allows
// for max of Integer size
private HoodieLogBlock readBlock() throws IOException {
int blockSize;
long blockStartPos = inputStream.getPos();
try {
// 1 Read the total size of the block
blockSize = ((int) (inputStream.readLong()));
} catch (EOFException | CorruptedLogFileException e) {
// An exception reading any of the above indicates a corrupt block
// Create a corrupt block by finding the next MAGIC marker or EOF
return createCorruptBlock(blockStartPos);
}
// We may have had a crash which could have written this block partially
// Skip blockSize in the stream and we should either find a sync marker (start of the next
// block) or EOF. If we did not find either of it, then this block is a corrupted block.
boolean isCorrupted = isBlockCorrupted(blockSize);
if (isCorrupted) {
return createCorruptBlock(blockStartPos);
}
// 2. Read the version for this log format
HoodieLogFormat.LogFormatVersion nextBlockVersion = readVersion();
// 3. Read the block type for a log block
HoodieLogBlockType blockType = tryReadBlockType(nextBlockVersion);
// 4. Read the header for a log block, if present
Map<HeaderMetadataType, String> v6 = (nextBlockVersion.hasHeader()) ? HoodieLogBlock.getLogMetadata(inputStream) : null;
// 5. Read the content length for the content
// Fallback to full-block size if no content-length
// TODO replace w/ hasContentLength
int contentLength = (nextBlockVersion.getVersion() != HoodieLogFormatVersion.DEFAULT_VERSION) ? ((int) (inputStream.readLong())) : blockSize;
// 6. Read the content or skip content based on IO vs Memory trade-off by client
long contentPosition = inputStream.getPos();
boolean shouldReadLazily = readBlockLazily && (nextBlockVersion.getVersion() != HoodieLogFormatVersion.DEFAULT_VERSION);
Option<byte[]> content = HoodieLogBlock.tryReadContent(inputStream, contentLength, shouldReadLazily);
// 7. Read footer if any
Map<HeaderMetadataType, String> footer = (nextBlockVersion.hasFooter()) ? HoodieLogBlock.getLogMetadata(inputStream) : null;
// 8. Read log block length, if present. This acts as a reverse pointer when traversing a
// log file in reverse
if (nextBlockVersion.hasLogBlockLength()) {
inputStream.readLong();}
// 9. Read the log block end position in the log file
long blockEndPos = inputStream.getPos();
HoodieLogBlock.HoodieLogBlockContentLocation logBlockContentLoc = new HoodieLogBlock.HoodieLogBlockContentLocation(hadoopConf, logFile, contentPosition, contentLength, blockEndPos);
switch (Objects.requireNonNull(blockType)) {
case AVRO_DATA_BLOCK :
if (nextBlockVersion.getVersion() == HoodieLogFormatVersion.DEFAULT_VERSION) {
return HoodieAvroDataBlock.getBlock(content.get(), readerSchema, internalSchema);
} else {
return new HoodieAvroDataBlock(inputStream, content, readBlockLazily, logBlockContentLoc, getTargetReaderSchemaForBlock(), v6, footer, keyField);
}
case HFILE_DATA_BLOCK :
checkState(nextBlockVersion.getVersion() != HoodieLogFormatVersion.DEFAULT_VERSION, String.format("HFile block could not be of version (%d)", HoodieLogFormatVersion.DEFAULT_VERSION));
return new HoodieHFileDataBlock(inputStream, content, readBlockLazily, logBlockContentLoc, Option.ofNullable(readerSchema), v6, footer, enableRecordLookups, logFile.getPath());
case PARQUET_DATA_BLOCK :
checkState(nextBlockVersion.getVersion() !=
HoodieLogFormatVersion.DEFAULT_VERSION, String.format("Parquet block could not be of version (%d)", HoodieLogFormatVersion.DEFAULT_VERSION));
return new HoodieParquetDataBlock(inputStream, content, readBlockLazily, logBlockContentLoc, getTargetReaderSchemaForBlock(), v6, footer, keyField);
case DELETE_BLOCK :
return new HoodieDeleteBlock(content, inputStream, readBlockLazily, Option.of(logBlockContentLoc), v6, footer);
case COMMAND_BLOCK :
return new HoodieCommandBlock(content, inputStream, readBlockLazily, Option.of(logBlockContentLoc), v6, footer);
case CDC_DATA_BLOCK :
return new
HoodieCDCDataBlock(inputStream, content, readBlockLazily, logBlockContentLoc, readerSchema, v6, keyField);
default :
throw new HoodieNotSupportedException("Unsupported Block " + blockType);
}
}
| 3.26 |
hudi_HoodieLogFileReader_readVersion_rdh
|
/**
* Read log format version from log file.
*/
private LogFormatVersion readVersion() throws IOException {
return new HoodieLogFormatVersion(inputStream.readInt());
}
| 3.26 |
hudi_HoodieLogFileReader_addShutDownHook_rdh
|
/**
* Close the inputstream if not closed when the JVM exits.
*/
private void addShutDownHook() {
shutdownThread = new Thread(() -> {
try {
close();
} catch (Exception e) {
LOG.warn("unable to close input stream for log file " + logFile, e);
// fail silently for any sort of exception
}
});
Runtime.getRuntime().addShutdownHook(shutdownThread);
}
| 3.26 |
hudi_HoodieLogFileReader_moveToPrev_rdh
|
/**
* Reverse pointer, does not read the block. Return the current position of the log file (in reverse) If the pointer
* (inputstream) is moved in any way, it is the job of the client of this class to seek/reset it back to the file
* position returned from the method to expect correct results
*/
public long moveToPrev() throws IOException {
if (!this.reverseReader) {
throw new HoodieNotSupportedException("Reverse log reader has not been enabled");
}
inputStream.seek(lastReverseLogFilePosition);
long
blockSize = inputStream.readLong();
// blocksize should be everything about a block including the length as well
inputStream.seek(reverseLogFilePosition - blockSize);
reverseLogFilePosition -= blockSize;
lastReverseLogFilePosition = reverseLogFilePosition;
return reverseLogFilePosition;
}
| 3.26 |
hudi_HoodieLogFileReader_hasNext_rdh
|
/* hasNext is not idempotent. TODO - Fix this. It is okay for now - PR */
@Override
public boolean hasNext() {
try {
return readMagic();
} catch (IOException e) {
throw new HoodieIOException("IOException when reading logfile " + logFile, e);
}
}
| 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.