name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hudi_BulkInsertWriterHelper_getInstantTime_rdh
|
/**
* Returns the write instant time.
*/
public String getInstantTime() {
return this.instantTime;
}
| 3.26 |
hudi_ClusteringPlanStrategy_getFileSlicesEligibleForClustering_rdh
|
/**
* Return file slices eligible for clustering. FileIds in pending clustering/compaction are not eligible for clustering.
*/
protected Stream<FileSlice> getFileSlicesEligibleForClustering(String partition) {
SyncableFileSystemView fileSystemView = ((SyncableFileSystemView) (getHoodieTable().getSliceView()));
Set<HoodieFileGroupId> fgIdsInPendingCompactionLogCompactionAndClustering = Stream.concat(fileSystemView.getPendingCompactionOperations(), fileSystemView.getPendingLogCompactionOperations()).map(instantTimeOpPair -> instantTimeOpPair.getValue().getFileGroupId()).collect(Collectors.toSet());
fgIdsInPendingCompactionLogCompactionAndClustering.addAll(fileSystemView.getFileGroupsInPendingClustering().map(Pair::getKey).collect(Collectors.toSet()));
return // file ids already in clustering are not eligible
hoodieTable.getSliceView().getLatestFileSlicesStateless(partition).filter(slice -> !fgIdsInPendingCompactionLogCompactionAndClustering.contains(slice.getFileGroupId()));}
| 3.26 |
hudi_ClusteringPlanStrategy_buildMetrics_rdh
|
/**
* Generate metrics for the data to be clustered.
*/
protected Map<String, Double> buildMetrics(List<FileSlice> fileSlices) {
Map<String, Double> v9 = new HashMap<>();
FileSliceMetricUtils.addFileSliceCommonMetrics(fileSlices, v9, getWriteConfig().getParquetMaxFileSize());
return v9;
}
| 3.26 |
hudi_ClusteringPlanStrategy_getPlanVersion_rdh
|
/**
* Version to support future changes for plan.
*/
protected int getPlanVersion() {
return CLUSTERING_PLAN_VERSION_1;
}
| 3.26 |
hudi_ClusteringPlanStrategy_checkAndGetClusteringPlanStrategy_rdh
|
/**
* Check if the given class is deprecated.
* If it is, then try to convert it to suitable one and update the write config accordingly.
*
* @param config
* write config
* @return class name of clustering plan strategy
*/
public static String checkAndGetClusteringPlanStrategy(HoodieWriteConfig config) {
String className = config.getClusteringPlanStrategyClass();String sparkSizeBasedClassName = HoodieClusteringConfig.SPARK_SIZED_BASED_CLUSTERING_PLAN_STRATEGY;String sparkSelectedPartitionsClassName = "org.apache.hudi.client.clustering.plan.strategy.SparkSelectedPartitionsClusteringPlanStrategy";
String sparkRecentDaysClassName = "org.apache.hudi.client.clustering.plan.strategy.SparkRecentDaysClusteringPlanStrategy";
String javaSelectedPartitionClassName = "org.apache.hudi.client.clustering.plan.strategy.JavaRecentDaysClusteringPlanStrategy";
String javaSizeBasedClassName = HoodieClusteringConfig.JAVA_SIZED_BASED_CLUSTERING_PLAN_STRATEGY;
String logStr = "The clustering plan '%s' is deprecated. Please set the plan as '%s' and set '%s' as '%s' to achieve the same behaviour";
if (sparkRecentDaysClassName.equals(className)) {
config.setValue(HoodieClusteringConfig.PLAN_PARTITION_FILTER_MODE_NAME, ClusteringPlanPartitionFilterMode.RECENT_DAYS.name());
LOG.warn(String.format(logStr, className, sparkSizeBasedClassName, HoodieClusteringConfig.PLAN_PARTITION_FILTER_MODE_NAME.key(), ClusteringPlanPartitionFilterMode.RECENT_DAYS.name()));
return sparkSizeBasedClassName;
} else if (sparkSelectedPartitionsClassName.equals(className)) {
config.setValue(HoodieClusteringConfig.PLAN_PARTITION_FILTER_MODE_NAME, ClusteringPlanPartitionFilterMode.SELECTED_PARTITIONS.name());
LOG.warn(String.format(logStr, className, sparkSizeBasedClassName, HoodieClusteringConfig.PLAN_PARTITION_FILTER_MODE_NAME.key(), ClusteringPlanPartitionFilterMode.SELECTED_PARTITIONS.name()));
return sparkSizeBasedClassName;
} else if (javaSelectedPartitionClassName.equals(className)) {
config.setValue(HoodieClusteringConfig.PLAN_PARTITION_FILTER_MODE_NAME, ClusteringPlanPartitionFilterMode.RECENT_DAYS.name());
LOG.warn(String.format(logStr, className, javaSizeBasedClassName, HoodieClusteringConfig.PLAN_PARTITION_FILTER_MODE_NAME.key(), ClusteringPlanPartitionFilterMode.SELECTED_PARTITIONS.name()));
return javaSizeBasedClassName;
}
return className;
}
| 3.26 |
hudi_ClusteringPlanStrategy_getExtraMetadata_rdh
|
/**
* Returns any specific parameters to be stored as part of clustering metadata.
*/protected Map<String, String> getExtraMetadata() {
return Collections.emptyMap();
}
| 3.26 |
hudi_ClusteringPlanStrategy_checkPrecondition_rdh
|
/**
* Check if the clustering can proceed. If not (i.e., return false), the PlanStrategy will generate an empty plan to stop the scheduling.
*/
public boolean checkPrecondition() {
return true;
}
| 3.26 |
hudi_ClusteringPlanStrategy_getFileSliceInfo_rdh
|
/**
* Transform {@link FileSlice} to {@link HoodieSliceInfo}.
*/
protected static List<HoodieSliceInfo> getFileSliceInfo(List<FileSlice> slices) {
return slices.stream().map(slice -> new HoodieSliceInfo().newBuilder().setPartitionPath(slice.getPartitionPath()).setFileId(slice.getFileId()).setDataFilePath(slice.getBaseFile().map(BaseFile::getPath).orElse(StringUtils.EMPTY_STRING)).setDeltaFilePaths(slice.getLogFiles().map(f -> f.getPath().toString()).collect(Collectors.toList())).setBootstrapFilePath(slice.getBaseFile().map(bf -> bf.getBootstrapBaseFile().map(bbf -> bbf.getPath()).orElse(StringUtils.EMPTY_STRING)).orElse(StringUtils.EMPTY_STRING)).build()).collect(Collectors.toList());
}
| 3.26 |
hudi_HoodieAvroReadSupport_checkLegacyMode_rdh
|
/**
* Check whether write map/list with legacy mode.
* legacy:
* list:
* optional group obj_ids (LIST) {
* repeated binary array (UTF8);
* }
* map:
* optional group obj_ids (MAP) {
* repeated group map (MAP_KEY_VALUE) {
* required binary key (UTF8);
* required binary value (UTF8);
* }
* }
* non-legacy:
* optional group obj_ids (LIST) {
* repeated group list {
* optional binary element (UTF8);
* }
* }
* optional group obj_maps (MAP) {
* repeated group key_value {
* required binary key (UTF8);
* optional binary value (UTF8);
* }
* }
*/
private boolean checkLegacyMode(List<Type> parquetFields) {
for (Type type : parquetFields) {
if (!type.isPrimitive()) {
GroupType groupType = type.asGroupType();
OriginalType originalType = groupType.getOriginalType();
if ((originalType == OriginalType.MAP) && (groupType.getFields().get(0).getOriginalType() != OriginalType.MAP_KEY_VALUE)) {
return false;
}
if ((originalType == OriginalType.LIST) && (!groupType.getType(0).getName().equals("array"))) {
return false;
}
if (!checkLegacyMode(groupType.getFields())) {
return false;
}
}
}
return true;
}
| 3.26 |
hudi_FlinkConsistentBucketUpdateStrategy_patchFileIdToRecords_rdh
|
/**
* Rewrite the first record with given fileID
*/
private void patchFileIdToRecords(List<HoodieRecord> records, String fileId) {
HoodieRecord first = records.get(0);
HoodieRecord record = new HoodieAvroRecord<>(first.getKey(), ((HoodieRecordPayload) (first.getData())), first.getOperation());
HoodieRecordLocation newLoc = new HoodieRecordLocation("U", fileId);
record.setCurrentLocation(newLoc);
records.set(0, record);
}
| 3.26 |
hudi_MysqlDebeziumSource_m0_rdh
|
/**
* Debezium Kafka Payload has a nested structure (see https://debezium.io/documentation/reference/1.4/connectors/mysql.html).
* This function flattens this nested structure for the Mysql data, and also extracts a subset of Debezium metadata fields.
*
* @param rowDataset
* Dataset containing Debezium Payloads
* @return New dataset with flattened columns
*/
@Override
protected Dataset<Row> m0(Dataset<Row> rowDataset) {
Dataset<Row> flattenedDataset = rowDataset;
if (rowDataset.columns().length > 0) {
// Only flatten for non-empty schemas
Dataset<Row> insertedOrUpdatedData = rowDataset.selectExpr(String.format("%s as %s", DebeziumConstants.INCOMING_OP_FIELD, DebeziumConstants.FLATTENED_OP_COL_NAME), String.format("%s as %s", DebeziumConstants.INCOMING_TS_MS_FIELD, DebeziumConstants.UPSTREAM_PROCESSING_TS_COL_NAME),
String.format("%s as %s", DebeziumConstants.INCOMING_SOURCE_NAME_FIELD, DebeziumConstants.FLATTENED_SHARD_NAME), String.format("%s as %s", DebeziumConstants.INCOMING_SOURCE_TS_MS_FIELD, DebeziumConstants.FLATTENED_TS_COL_NAME), String.format("%s as %s", DebeziumConstants.INCOMING_SOURCE_FILE_FIELD, DebeziumConstants.FLATTENED_FILE_COL_NAME), String.format("%s as %s", DebeziumConstants.INCOMING_SOURCE_POS_FIELD, DebeziumConstants.FLATTENED_POS_COL_NAME), String.format("%s as %s", DebeziumConstants.INCOMING_SOURCE_ROW_FIELD, DebeziumConstants.FLATTENED_ROW_COL_NAME), String.format("%s.*", DebeziumConstants.INCOMING_AFTER_FIELD)).filter(rowDataset.col(DebeziumConstants.INCOMING_OP_FIELD).notEqual(DebeziumConstants.DELETE_OP));
Dataset<Row> deletedData = rowDataset.selectExpr(String.format("%s as %s", DebeziumConstants.INCOMING_OP_FIELD, DebeziumConstants.FLATTENED_OP_COL_NAME), String.format("%s as %s", DebeziumConstants.INCOMING_TS_MS_FIELD, DebeziumConstants.UPSTREAM_PROCESSING_TS_COL_NAME), String.format("%s as %s", DebeziumConstants.INCOMING_SOURCE_NAME_FIELD, DebeziumConstants.FLATTENED_SHARD_NAME), String.format("%s as %s", DebeziumConstants.INCOMING_SOURCE_TS_MS_FIELD, DebeziumConstants.FLATTENED_TS_COL_NAME), String.format("%s as %s", DebeziumConstants.INCOMING_SOURCE_FILE_FIELD, DebeziumConstants.FLATTENED_FILE_COL_NAME), String.format("%s as %s", DebeziumConstants.INCOMING_SOURCE_POS_FIELD, DebeziumConstants.FLATTENED_POS_COL_NAME), String.format("%s as %s", DebeziumConstants.INCOMING_SOURCE_ROW_FIELD, DebeziumConstants.FLATTENED_ROW_COL_NAME), String.format("%s.*", DebeziumConstants.INCOMING_BEFORE_FIELD)).filter(rowDataset.col(DebeziumConstants.INCOMING_OP_FIELD).equalTo(DebeziumConstants.DELETE_OP));
flattenedDataset = insertedOrUpdatedData.union(deletedData);
}
return
flattenedDataset.withColumn(DebeziumConstants.ADDED_SEQ_COL_NAME, callUDF(generateUniqueSeqUdfFn, flattenedDataset.col(DebeziumConstants.FLATTENED_FILE_COL_NAME), flattenedDataset.col(DebeziumConstants.FLATTENED_POS_COL_NAME)));
}
| 3.26 |
hudi_AbstractRealtimeRecordReader_init_rdh
|
/**
* Gets schema from HoodieTableMetaClient. If not, falls
* back to the schema from the latest parquet file. Finally, sets the partition column and projection fields into the
* job conf.
*/
private void init() throws Exception {
LOG.info("Getting writer schema from table avro schema ");
writerSchema = new TableSchemaResolver(metaClient).getTableAvroSchema();// Add partitioning fields to writer schema for resulting row to contain null values for these fields
String partitionFields = jobConf.get(hive_metastoreConstants.META_TABLE_PARTITION_COLUMNS, "");
List<String> partitioningFields = (partitionFields.length() > 0) ? Arrays.stream(partitionFields.split("/")).collect(Collectors.toList()) : new ArrayList<>();
writerSchema = HoodieRealtimeRecordReaderUtils.addPartitionFields(writerSchema, partitioningFields);
List<String> projectionFields = HoodieRealtimeRecordReaderUtils.orderFields(jobConf.get(ColumnProjectionUtils.READ_COLUMN_NAMES_CONF_STR, EMPTY_STRING), jobConf.get(ColumnProjectionUtils.READ_COLUMN_IDS_CONF_STR, EMPTY_STRING), partitioningFields);
Map<String, Field> schemaFieldsMap = HoodieRealtimeRecordReaderUtils.getNameToFieldMap(writerSchema);
hiveSchema = constructHiveOrderedSchema(writerSchema, schemaFieldsMap, jobConf.get(hive_metastoreConstants.META_TABLE_COLUMNS, EMPTY_STRING));
// TODO(vc): In the future, the reader schema should be updated based on log files & be able
// to null out fields not present before
readerSchema = HoodieRealtimeRecordReaderUtils.generateProjectionSchema(writerSchema, schemaFieldsMap, projectionFields);
LOG.info(String.format("About to read compacted logs %s for base split %s, projecting cols %s", split.getDeltaLogPaths(),
split.getPath(), projectionFields));
// get timestamp columns
supportTimestamp = HoodieColumnProjectionUtils.supportTimestamp(jobConf);
}
| 3.26 |
hudi_HoodieWriteCommitPulsarCallbackConfig_setCallbackPulsarConfigIfNeeded_rdh
|
/**
* Set default value for {@link HoodieWriteCommitPulsarCallbackConfig} if needed.
*/
public static void setCallbackPulsarConfigIfNeeded(HoodieConfig config) {
config.setDefaultValue(PRODUCER_ROUTE_MODE);
config.setDefaultValue(OPERATION_TIMEOUT);
config.setDefaultValue(CONNECTION_TIMEOUT);
config.setDefaultValue(REQUEST_TIMEOUT);
config.setDefaultValue(KEEPALIVE_INTERVAL);
config.setDefaultValue(PRODUCER_SEND_TIMEOUT);
config.setDefaultValue(PRODUCER_PENDING_QUEUE_SIZE);
config.setDefaultValue(PRODUCER_PENDING_SIZE);
config.setDefaultValue(PRODUCER_BLOCK_QUEUE_FULL);
}
| 3.26 |
hudi_BaseHoodieFunctionalIndexClient_register_rdh
|
/**
* Register a functional index.
* Index definitions are stored in user-specified path or, by default, in .hoodie/.index_defs/index.json.
* For the first time, the index definition file will be created if not exists.
* For the second time, the index definition file will be updated if exists.
* Table Config is updated if necessary.
*/
public void register(HoodieTableMetaClient metaClient, String
indexName, String indexType, Map<String, Map<String, String>> columns, Map<String, String> options) {
LOG.info("Registering index {} of using {}", indexName, indexType);
String indexMetaPath = metaClient.getTableConfig().getIndexDefinitionPath().orElse((((metaClient.getMetaPath() + Path.SEPARATOR) + HoodieTableMetaClient.INDEX_DEFINITION_FOLDER_NAME) + Path.SEPARATOR) + HoodieTableMetaClient.INDEX_DEFINITION_FILE_NAME);
// build HoodieFunctionalIndexMetadata and then add to index definition file
metaClient.buildFunctionalIndexDefinition(indexMetaPath, indexName, indexType, columns, options);
// update table config if necessary
if ((!metaClient.getTableConfig().getProps().containsKey(HoodieTableConfig.INDEX_DEFINITION_PATH)) || (!metaClient.getTableConfig().getIndexDefinitionPath().isPresent())) {
metaClient.getTableConfig().setValue(HoodieTableConfig.INDEX_DEFINITION_PATH, indexMetaPath);
HoodieTableConfig.update(metaClient.getFs(), new Path(metaClient.getMetaPath()), metaClient.getTableConfig().getProps());
}
}
| 3.26 |
hudi_SparkRDDWriteClient_bootstrap_rdh
|
/**
* Main API to run bootstrap to hudi.
*/
@Override
public void bootstrap(Option<Map<String, String>> extraMetadata) {initTable(WriteOperationType.UPSERT, Option.ofNullable(HoodieTimeline.METADATA_BOOTSTRAP_INSTANT_TS)).bootstrap(context, extraMetadata);}
| 3.26 |
hudi_SparkRDDWriteClient_insertOverwrite_rdh
|
/**
* Removes all existing records from the partitions affected and inserts the given HoodieRecords, into the table.
*
* @param records
* HoodieRecords to insert
* @param instantTime
* Instant time of the commit
* @return JavaRDD[WriteStatus] - RDD of WriteStatus to inspect errors and counts
*/
public HoodieWriteResult insertOverwrite(JavaRDD<HoodieRecord<T>> records, final String instantTime) {
HoodieTable<T, HoodieData<HoodieRecord<T>>, HoodieData<HoodieKey>, HoodieData<WriteStatus>> table = initTable(WriteOperationType.INSERT_OVERWRITE, Option.ofNullable(instantTime));
table.validateInsertSchema();
preWrite(instantTime, WriteOperationType.INSERT_OVERWRITE, table.getMetaClient());
HoodieWriteMetadata<HoodieData<WriteStatus>> result = table.insertOverwrite(context, instantTime, HoodieJavaRDD.of(records));
HoodieWriteMetadata<JavaRDD<WriteStatus>> resultRDD = result.clone(HoodieJavaRDD.getJavaRDD(result.getWriteStatuses()));
return new HoodieWriteResult(postWrite(resultRDD, instantTime, table), result.getPartitionToReplaceFileIds());
}
| 3.26 |
hudi_SparkRDDWriteClient_insertOverwriteTable_rdh
|
/**
* Removes all existing records of the Hoodie table and inserts the given HoodieRecords, into the table.
*
* @param records
* HoodieRecords to insert
* @param instantTime
* Instant time of the commit
* @return JavaRDD[WriteStatus] - RDD of WriteStatus to inspect errors and counts
*/
public HoodieWriteResult insertOverwriteTable(JavaRDD<HoodieRecord<T>> records, final String instantTime) {
HoodieTable<T, HoodieData<HoodieRecord<T>>, HoodieData<HoodieKey>, HoodieData<WriteStatus>> table = initTable(WriteOperationType.INSERT_OVERWRITE_TABLE, Option.ofNullable(instantTime));
table.validateInsertSchema();
preWrite(instantTime, WriteOperationType.INSERT_OVERWRITE_TABLE, table.getMetaClient());
HoodieWriteMetadata<HoodieData<WriteStatus>> result = table.insertOverwriteTable(context, instantTime, HoodieJavaRDD.of(records));
HoodieWriteMetadata<JavaRDD<WriteStatus>> resultRDD = result.clone(HoodieJavaRDD.getJavaRDD(result.getWriteStatuses()));
return new HoodieWriteResult(postWrite(resultRDD, instantTime, table), result.getPartitionToReplaceFileIds());
}
| 3.26 |
hudi_SparkRDDWriteClient_commit_rdh
|
/**
* Complete changes performed at the given instantTime marker with specified action.
*/
@Override
public boolean commit(String instantTime, JavaRDD<WriteStatus> writeStatuses, Option<Map<String, String>> extraMetadata, String commitActionType, Map<String, List<String>> partitionToReplacedFileIds, Option<BiConsumer<HoodieTableMetaClient,
HoodieCommitMetadata>> extraPreCommitFunc) { context.setJobStatus(this.getClass().getSimpleName(), "Committing stats: " + config.getTableName());
List<HoodieWriteStat> writeStats = writeStatuses.map(WriteStatus::getStat).collect();
return commitStats(instantTime, HoodieJavaRDD.of(writeStatuses), writeStats, extraMetadata, commitActionType, partitionToReplacedFileIds, extraPreCommitFunc);
}
| 3.26 |
hudi_SparkRDDWriteClient_initializeMetadataTable_rdh
|
/**
* Initialize the metadata table if needed. Creating the metadata table writer
* will trigger the initial bootstrapping from the data table.
*
* @param inFlightInstantTimestamp
* - The in-flight action responsible for the metadata table initialization
*/
private void initializeMetadataTable(Option<String> inFlightInstantTimestamp) {
if (!config.isMetadataTableEnabled()) {
return;
}
try (HoodieTableMetadataWriter writer = SparkHoodieBackedTableMetadataWriter.create(context.getHadoopConf().get(), config, context, inFlightInstantTimestamp)) {
if (writer.isInitialized()) {
writer.performTableServices(inFlightInstantTimestamp);
}
} catch (Exception e) {
throw new HoodieException("Failed to instantiate Metadata table ", e);
}
}
| 3.26 |
hudi_StreamReadOperator_consumeAsMiniBatch_rdh
|
/**
* Consumes at most {@link #MINI_BATCH_SIZE} number of records
* for the given input split {@code split}.
*
* <p>Note: close the input format and remove the input split for the queue {@link #splits}
* if the split reads to the end.
*
* @param split
* The input split
*/
private void consumeAsMiniBatch(MergeOnReadInputSplit split) throws IOException {
for (int i = 0;
i < MINI_BATCH_SIZE; i++) {
if (!format.reachedEnd()) {
sourceContext.collect(format.nextRecord(null));
split.consume();
} else {
// close the input format
format.close();
// remove the split
splits.poll();
break;
}
}
}
| 3.26 |
hudi_CompactNode_execute_rdh
|
/**
* Method helps to start the compact operation. It will compact the last pending compact instant in the timeline
* if it has one.
*
* @param executionContext
* Execution context to run this compaction
* @param curItrCount
* cur iteration count.
* @throws Exception
* will be thrown if any error occurred.
*/
@Override
public void execute(ExecutionContext executionContext, int
curItrCount) throws Exception {
HoodieTableMetaClient metaClient = HoodieTableMetaClient.builder().setConf(executionContext.getHoodieTestSuiteWriter().getConfiguration()).setBasePath(executionContext.getHoodieTestSuiteWriter().getCfg().targetBasePath).build();
Option<HoodieInstant> lastInstant = metaClient.getActiveTimeline().getWriteTimeline().filterPendingCompactionTimeline().lastInstant();
if (lastInstant.isPresent()) {
log.info("Compacting instant {}", lastInstant.get());
this.result = executionContext.getHoodieTestSuiteWriter().compact(Option.of(lastInstant.get().getTimestamp()));
executionContext.getHoodieTestSuiteWriter().commitCompaction(result, executionContext.getJsc().emptyRDD(), Option.of(lastInstant.get().getTimestamp()));
}
}
| 3.26 |
hudi_KafkaOffsetGen_isValidTimestampCheckpointType_rdh
|
/**
* Check if the checkpoint is a timestamp.
*
* @param lastCheckpointStr
* @return */
private Boolean isValidTimestampCheckpointType(Option<String> lastCheckpointStr) {
if (!lastCheckpointStr.isPresent()) {
return false;
}
Pattern pattern = Pattern.compile("[-+]?[0-9]+(\\.[0-9]+)?");
Matcher isNum = pattern.matcher(lastCheckpointStr.get());
return isNum.matches() && ((lastCheckpointStr.get().length() == 13)
|| (lastCheckpointStr.get().length() == 10));
}
| 3.26 |
hudi_KafkaOffsetGen_fetchValidOffsets_rdh
|
/**
* Fetch checkpoint offsets for each partition.
*
* @param consumer
* instance of {@link KafkaConsumer} to fetch offsets from.
* @param lastCheckpointStr
* last checkpoint string.
* @param topicPartitions
* set of topic partitions.
* @return a map of Topic partitions to offsets.
*/
private Map<TopicPartition, Long> fetchValidOffsets(KafkaConsumer
consumer, Option<String> lastCheckpointStr, Set<TopicPartition> topicPartitions) {
Map<TopicPartition, Long> earliestOffsets = consumer.beginningOffsets(topicPartitions);
Map<TopicPartition, Long> checkpointOffsets = CheckpointUtils.strToOffsets(lastCheckpointStr.get());
boolean isCheckpointOutOfBounds = checkpointOffsets.entrySet().stream().anyMatch(offset -> offset.getValue() < earliestOffsets.get(offset.getKey()));
if (isCheckpointOutOfBounds) {
if (getBooleanWithAltKeys(this.props, KafkaSourceConfig.ENABLE_FAIL_ON_DATA_LOSS)) {
throw new HoodieStreamerException("Some data may have been lost because they are not available in Kafka any more;" + " either the data was aged out by Kafka or the topic may have been deleted before all the data in the topic was processed.");
} else {
LOG.warn(((("Some data may have been lost because they are not available in Kafka any more;" + " either the data was aged out by Kafka or the topic may have been deleted before all the data in the topic was processed.") + " If you want Hudi Streamer to fail on such cases, set \"") + KafkaSourceConfig.ENABLE_FAIL_ON_DATA_LOSS.key()) + "\" to \"true\".");
}
}
return isCheckpointOutOfBounds ? earliestOffsets : checkpointOffsets;
}
| 3.26 |
hudi_KafkaOffsetGen_offsetsToStr_rdh
|
/**
* String representation of checkpoint
* <p>
* Format: topic1,0:offset0,1:offset1,2:offset2, .....
*/
public static String offsetsToStr(OffsetRange[] ranges) {
// merge the ranges by partition to maintain one offset range map to one topic partition.
ranges =
mergeRangesByTopicPartition(ranges);
StringBuilder sb = new StringBuilder();
// at least 1 partition will be present.
sb.append(ranges[0].topic() + ",");sb.append(Arrays.stream(ranges).map(r -> String.format("%s:%d", r.partition(), r.untilOffset())).collect(Collectors.joining(",")));
return sb.toString();
}
| 3.26 |
hudi_KafkaOffsetGen_checkTopicExists_rdh
|
/**
* Check if topic exists.
*
* @param consumer
* kafka consumer
* @return */
public boolean checkTopicExists(KafkaConsumer consumer) {
Map<String, List<PartitionInfo>> result = consumer.listTopics();
return result.containsKey(topicName);
}
| 3.26 |
hudi_KafkaOffsetGen_getOffsetsByTimestamp_rdh
|
/**
* Get the checkpoint by timestamp.
* This method returns the checkpoint format based on the timestamp.
* example:
* 1. input: timestamp, etc.
* 2. output: topicName,partition_num_0:100,partition_num_1:101,partition_num_2:102.
*
* @param consumer
* @param topicName
* @param timestamp
* @return */
private Option<String> getOffsetsByTimestamp(KafkaConsumer consumer, List<PartitionInfo> partitionInfoList, Set<TopicPartition> topicPartitions, String topicName,
Long timestamp) {
Map<TopicPartition, Long> topicPartitionsTimestamp = partitionInfoList.stream().map(x -> new TopicPartition(x.topic(), x.partition())).collect(Collectors.toMap(Function.identity(), x -> timestamp));
Map<TopicPartition, Long> earliestOffsets = consumer.beginningOffsets(topicPartitions);
Map<TopicPartition, OffsetAndTimestamp> offsetAndTimestamp = consumer.offsetsForTimes(topicPartitionsTimestamp);
StringBuilder sb = new StringBuilder();
sb.append(topicName + ",");
for (Map.Entry<TopicPartition, OffsetAndTimestamp> map : offsetAndTimestamp.entrySet()) {
if (map.getValue() != null) {
sb.append(map.getKey().partition()).append(":").append(map.getValue().offset()).append(",");
} else {
sb.append(map.getKey().partition()).append(":").append(earliestOffsets.get(map.getKey())).append(",");
}
}
return Option.of(sb.deleteCharAt(sb.length() - 1).toString());
}
| 3.26 |
hudi_KafkaOffsetGen_strToOffsets_rdh
|
/**
* Reconstruct checkpoint from timeline.
*/
public static Map<TopicPartition, Long> strToOffsets(String checkpointStr) {
Map<TopicPartition,
Long> offsetMap = new HashMap<>();
String[] splits = checkpointStr.split(",");String topic = splits[0];
for (int i = 1; i < splits.length; i++) {String[] subSplits = splits[i].split(":");offsetMap.put(new TopicPartition(topic, Integer.parseInt(subSplits[0])), Long.parseLong(subSplits[1]));
}
return offsetMap;
}
| 3.26 |
hudi_KafkaOffsetGen_computeOffsetRanges_rdh
|
/**
* Compute the offset ranges to read from Kafka, while handling newly added partitions, skews, event limits.
*
* @param fromOffsetMap
* offsets where we left off last time
* @param toOffsetMap
* offsets of where each partitions is currently at
* @param numEvents
* maximum number of events to read.
*/
public static OffsetRange[] computeOffsetRanges(Map<TopicPartition, Long> fromOffsetMap, Map<TopicPartition, Long> toOffsetMap, long numEvents, long minPartitions)
{
// Create initial offset ranges for each 'to' partition, with default from = 0 offsets.
OffsetRange[] ranges = toOffsetMap.keySet().stream().map(tp -> {
long fromOffset = fromOffsetMap.getOrDefault(tp, 0L);
return OffsetRange.create(tp, fromOffset, toOffsetMap.get(tp));
}).sorted(SORT_BY_PARTITION).collect(Collectors.toList()).toArray(new OffsetRange[toOffsetMap.size()]);
LOG.debug("numEvents {}, minPartitions {}, ranges {}", numEvents, minPartitions, ranges);
boolean needSplitToMinPartitions = minPartitions > toOffsetMap.size();
long totalEvents = m0(ranges);
long allocedEvents = 0;
Set<Integer> exhaustedPartitions = new HashSet<>();List<OffsetRange> finalRanges = new ArrayList<>();
// choose the actualNumEvents with min(totalEvents, numEvents)
long actualNumEvents = Math.min(totalEvents, numEvents);
// keep going until we have events to allocate and partitions still not exhausted.
while ((allocedEvents < numEvents) && (exhaustedPartitions.size() <
toOffsetMap.size())) {
// Allocate the remaining events to non-exhausted partitions, in round robin fashion
Set<Integer> allocatedPartitionsThisLoop = new HashSet<>(exhaustedPartitions);
for (int i = 0; i < ranges.length; i++) {
long remainingEvents = actualNumEvents - allocedEvents;
long remainingPartitions = toOffsetMap.size() - allocatedPartitionsThisLoop.size();
// if need tp split into minPartitions, recalculate the remainingPartitions
if (needSplitToMinPartitions) {
remainingPartitions = minPartitions - finalRanges.size();
}
long eventsPerPartition = ((long) (Math.ceil((1.0
* remainingEvents) / remainingPartitions)));
OffsetRange range = ranges[i];
if (exhaustedPartitions.contains(range.partition())) {
continue;
}
long v20 = Math.min(range.untilOffset(), range.fromOffset() + eventsPerPartition);
if (v20 == range.untilOffset()) {
exhaustedPartitions.add(range.partition());
}
allocedEvents += v20 - range.fromOffset();
// We need recompute toOffset if allocedEvents larger than actualNumEvents.
if (allocedEvents > actualNumEvents) {long offsetsToAdd = Math.min(eventsPerPartition, actualNumEvents - allocedEvents);
v20 = Math.min(range.untilOffset(), v20 + offsetsToAdd);
}
OffsetRange thisRange = OffsetRange.create(range.topicPartition(), range.fromOffset(), v20);
finalRanges.add(thisRange);
ranges[i] = OffsetRange.create(range.topicPartition(), range.fromOffset() + thisRange.count(), range.untilOffset());
allocatedPartitionsThisLoop.add(range.partition());
}
}
if (!needSplitToMinPartitions) {
LOG.debug("final ranges merged by topic partition {}", Arrays.toString(mergeRangesByTopicPartition(finalRanges.toArray(new OffsetRange[0]))));
return mergeRangesByTopicPartition(finalRanges.toArray(new OffsetRange[0]));
}
finalRanges.sort(SORT_BY_PARTITION);
LOG.debug("final ranges {}", Arrays.toString(finalRanges.toArray(new
OffsetRange[0])));
return finalRanges.toArray(new OffsetRange[0]);
}
| 3.26 |
hudi_KafkaOffsetGen_mergeRangesByTopicPartition_rdh
|
/**
* Merge ranges by topic partition, because we need to maintain the checkpoint with one offset range per topic partition.
*
* @param oldRanges
* to merge
* @return ranges merged by partition
*/
public static OffsetRange[] mergeRangesByTopicPartition(OffsetRange[] oldRanges) {
List<OffsetRange> newRanges = new ArrayList<>();
Map<TopicPartition, List<OffsetRange>> tpOffsets = Arrays.stream(oldRanges).collect(Collectors.groupingBy(OffsetRange::topicPartition));
for (Map.Entry<TopicPartition, List<OffsetRange>> entry : tpOffsets.entrySet()) {
long from = entry.getValue().stream().map(OffsetRange::fromOffset).min(Long::compare).get();
long until = entry.getValue().stream().map(OffsetRange::untilOffset).max(Long::compare).get();
newRanges.add(OffsetRange.create(entry.getKey(), from, until));
}
// make sure the result ranges is order by partition
newRanges.sort(SORT_BY_PARTITION);
return newRanges.toArray(new OffsetRange[0]);
}
| 3.26 |
hudi_KafkaOffsetGen_commitOffsetToKafka_rdh
|
/**
* Commit offsets to Kafka only after hoodie commit is successful.
*
* @param checkpointStr
* checkpoint string containing offsets.
*/
public void commitOffsetToKafka(String checkpointStr) {
checkRequiredProperties(props, Collections.singletonList(ConsumerConfig.GROUP_ID_CONFIG));
Map<TopicPartition, Long> offsetMap = CheckpointUtils.strToOffsets(checkpointStr);
Map<TopicPartition, OffsetAndMetadata> offsetAndMetadataMap = new HashMap<>(offsetMap.size());
try (KafkaConsumer consumer = new KafkaConsumer(kafkaParams)) {
offsetMap.forEach((topicPartition, offset) -> offsetAndMetadataMap.put(topicPartition, new OffsetAndMetadata(offset)));
consumer.commitSync(offsetAndMetadataMap);
} catch (CommitFailedException | TimeoutException e) {
LOG.warn("Committing offsets to Kafka failed, this does not impact processing of records", e);
}
}
| 3.26 |
hudi_TableChange_addPositionChange_rdh
|
/**
* Add position change.
*
* @param srcName
* column which need to be reordered
* @param dsrName
* reference position
* @param orderType
* change types
* @return this
*/public BaseColumnChange addPositionChange(String srcName, String dsrName, ColumnPositionChange.ColumnPositionType orderType) {
Integer srcId = findIdByFullName(srcName);
Option<Integer> dsrIdOpt = (dsrName.isEmpty()) ? Option.empty() : Option.of(findIdByFullName(dsrName)); Integer srcParentId = id2parent.get(srcId);
Option<Integer> dsrParentIdOpt = dsrIdOpt.map(id2parent::get);
// forbid adjust hoodie metadata columns.
switch (orderType) {
case BEFORE :
checkColModifyIsLegal(dsrName);
break;
case FIRST :
if ((((srcId == null) || (srcId == (-1))) || (srcParentId == null)) || (srcParentId == (-1))) {
throw new HoodieSchemaException("forbid adjust top-level columns position by using through first syntax"); }
break; case AFTER :
List<String> checkColumns = HoodieRecord.HOODIE_META_COLUMNS.subList(0, HoodieRecord.HOODIE_META_COLUMNS.size() - 2);
if (checkColumns.stream().anyMatch(f -> f.equalsIgnoreCase(dsrName))) {
throw new HoodieSchemaException("forbid adjust the position of ordinary columns between meta columns");
}
break;
case NO_OPERATION :
default :
break;
}
int parentId;
if (((srcParentId != null) && dsrParentIdOpt.isPresent()) && srcParentId.equals(dsrParentIdOpt.get())) {
Types.Field parentField = internalSchema.findField(srcParentId);
if (!(parentField.type() instanceof Types.RecordType)) {
throw new HoodieSchemaException(String.format("only support reorder fields in struct type, but find: %s", parentField.type()));
}
parentId = parentField.fieldId();
} else if ((srcParentId == null) && (!dsrParentIdOpt.isPresent())) {
parentId = -1;} else if (((srcParentId != null) && (!dsrParentIdOpt.isPresent())) && orderType.equals(ColumnPositionChange.ColumnPositionType.FIRST)) {
parentId = srcParentId;
} else {
throw new HoodieSchemaException("cannot order position from different parent");
}
ArrayList<ColumnPositionChange> changes = positionChangeMap.getOrDefault(parentId, new ArrayList<>());
changes.add(ColumnPositionChange.get(srcId, dsrIdOpt.orElse(-1), orderType));
positionChangeMap.put(parentId, changes);
return this;}
| 3.26 |
hudi_TableChange_checkColModifyIsLegal_rdh
|
// Modify hudi meta columns is prohibited
protected void checkColModifyIsLegal(String colNeedToModify) {
if (HoodieRecord.HOODIE_META_COLUMNS.stream().anyMatch(f -> f.equalsIgnoreCase(colNeedToModify))) {
throw new IllegalArgumentException(String.format("cannot modify hudi meta col: %s", colNeedToModify));
}
}
| 3.26 |
hudi_HoodieCompactionAdminTool_printOperationResult_rdh
|
/**
* Print Operation Result.
*
* @param initialLine
* Initial Line
* @param result
* Result
*/
private <T> void printOperationResult(String
initialLine, List<T> result) {
System.out.println(initialLine);
for (T r : result) {
System.out.print(r);
}
}
| 3.26 |
hudi_HoodieCompactionAdminTool_m0_rdh
|
/**
* Executes one of compaction admin operations.
*/
public void m0(JavaSparkContext jsc) throws Exception {
HoodieTableMetaClient v3 = HoodieTableMetaClient.builder().setConf(jsc.hadoopConfiguration()).setBasePath(cfg.basePath).build();
try (CompactionAdminClient admin = new CompactionAdminClient(new HoodieSparkEngineContext(jsc), cfg.basePath)) {
final FileSystem fs = FSUtils.getFs(cfg.basePath, jsc.hadoopConfiguration());
if ((cfg.outputPath != null) && fs.exists(new Path(cfg.outputPath))) {
throw new IllegalStateException("Output File Path already exists");
}
switch (cfg.operation) {
case VALIDATE :
List<ValidationOpResult> res = admin.validateCompactionPlan(v3, cfg.compactionInstantTime, cfg.parallelism);
if (cfg.printOutput) {
printOperationResult("Result of Validation Operation :", res);
}
serializeOperationResult(fs, res);
break;
case UNSCHEDULE_FILE :
List<RenameOpResult> r = admin.unscheduleCompactionFileId(new HoodieFileGroupId(cfg.partitionPath, cfg.fileId), cfg.skipValidation, cfg.dryRun);
if (cfg.printOutput) {
System.out.println(r);
}
serializeOperationResult(fs, r);
break;
case UNSCHEDULE_PLAN :
List<RenameOpResult> r2 = admin.unscheduleCompactionPlan(cfg.compactionInstantTime, cfg.skipValidation, cfg.parallelism, cfg.dryRun);if (cfg.printOutput) {
printOperationResult("Result of Unscheduling Compaction Plan :", r2);
}
serializeOperationResult(fs, r2);
break;case REPAIR :
List<RenameOpResult> v9 = admin.repairCompaction(cfg.compactionInstantTime, cfg.parallelism, cfg.dryRun);
if (cfg.printOutput) {
printOperationResult("Result of Repair Operation :", v9);
}
serializeOperationResult(fs, v9);
break;
default :
throw
new IllegalStateException("Not yet implemented !!");
}
}
}
| 3.26 |
hudi_MetadataTableUtils_shouldUseBatchLookup_rdh
|
/**
* Whether to use batch lookup for listing the latest base files in metadata table.
* <p>
* Note that metadata table has to be enabled, and the storage type of the file system view
* cannot be EMBEDDED_KV_STORE or SPILLABLE_DISK (these two types are not integrated with
* metadata table, see HUDI-5612).
*
* @param config
* Write configs.
* @return {@code true} if using batch lookup; {@code false} otherwise.
*/
public static boolean shouldUseBatchLookup(HoodieTableConfig tableConfig, HoodieWriteConfig config) {
FileSystemViewStorageType storageType = config.getClientSpecifiedViewStorageConfig().getStorageType();
return (tableConfig.isMetadataTableAvailable() && (!FileSystemViewStorageType.EMBEDDED_KV_STORE.equals(storageType))) && (!FileSystemViewStorageType.SPILLABLE_DISK.equals(storageType));
}
| 3.26 |
hudi_TableCommand_refreshMetadata_rdh
|
/**
* Refresh table metadata.
*/@ShellMethod(key = { "refresh", "metadata refresh", "commits refresh", "cleans refresh", "savepoints refresh" }, value = "Refresh table metadata")
public String refreshMetadata() {
HoodieCLI.refreshTableMetadata();
return ("Metadata for table " + HoodieCLI.getTableMetaClient().getTableConfig().getTableName()) + " refreshed.";
}
| 3.26 |
hudi_TableCommand_descTable_rdh
|
/**
* Describes table properties.
*/
@ShellMethod(key = "desc", value = "Describe Hoodie Table properties")
public String descTable() {
HoodieTableMetaClient client =
HoodieCLI.getTableMetaClient();
TableHeader header = new TableHeader().addTableHeaderField("Property").addTableHeaderField("Value");
List<Comparable[]> rows = new ArrayList<>();
rows.add(new Comparable[]{ "basePath", client.getBasePath() });
rows.add(new Comparable[]{ "metaPath", client.getMetaPath() });
rows.add(new Comparable[]{ "fileSystem", client.getFs().getScheme() });
client.getTableConfig().propsMap().entrySet().forEach(e -> {rows.add(new Comparable[]{ e.getKey(), e.getValue() });});
return HoodiePrintHelper.print(header, new HashMap<>(), "", false, -1, false, rows);
}
| 3.26 |
hudi_TableCommand_createTable_rdh
|
/**
* Create a Hoodie Table if it does not exist.
*
* @param path
* Base Path
* @param name
* Hoodie Table Name
* @param tableTypeStr
* Hoodie Table Type
* @param payloadClass
* Payload Class
*/
@ShellMethod(key = "create", value = "Create a hoodie table if not present")
public String createTable(@ShellOption(value = { "--path" }, help = "Base Path of the table")
final String path, @ShellOption(value = { "--tableName" }, help = "Hoodie Table Name")
final String name, @ShellOption(value = { "--tableType" }, defaultValue = "COPY_ON_WRITE", help = "Hoodie Table Type. Must be one of : COPY_ON_WRITE or MERGE_ON_READ")
final String tableTypeStr, @ShellOption(value = { "--archiveLogFolder" }, help = "Folder Name for storing archived timeline", defaultValue = ShellOption.NULL)
String archiveFolder, @ShellOption(value = { "--layoutVersion" }, help = "Specific Layout Version to use", defaultValue = ShellOption.NULL)
Integer layoutVersion, @ShellOption(value =
{ "--payloadClass" }, defaultValue = "org.apache.hudi.common.model.HoodieAvroPayload", help = "Payload Class")
final String payloadClass) throws IOException {
boolean initialized = HoodieCLI.initConf();
HoodieCLI.initFS(initialized);
boolean existing = false;
try {
HoodieTableMetaClient.builder().setConf(HoodieCLI.conf).setBasePath(path).build();
existing = true;
} catch (TableNotFoundException dfe) {
// expected
}
// Do not touch table that already exist
if (existing) {
throw new IllegalStateException("Table already existing in path : " + path);
}
HoodieTableMetaClient.withPropertyBuilder().setTableType(tableTypeStr).setTableName(name).setArchiveLogFolder(archiveFolder).setPayloadClassName(payloadClass).setTimelineLayoutVersion(layoutVersion).initTable(HoodieCLI.conf, path);
// Now connect to ensure loading works
return m0(path, layoutVersion, false, 0, 0, 0);
}
| 3.26 |
hudi_TableCommand_writeToFile_rdh
|
/**
* Use Streams when you are dealing with raw data.
*
* @param filePath
* output file path.
* @param data
* to be written to file.
*/
private static void writeToFile(String filePath, String data) throws IOException {
File outFile = new File(filePath);
if (outFile.exists()) {outFile.delete();
}
OutputStream os = null;
try {
os = new FileOutputStream(outFile);
os.write(getUTF8Bytes(data), 0, data.length());
} finally {
os.close();
}
}
| 3.26 |
hudi_TableCommand_fetchTableSchema_rdh
|
/**
* Fetches table schema in avro format.
*/
@ShellMethod(key = "fetch table schema", value = "Fetches latest table schema")
public String fetchTableSchema(@ShellOption(value = { "--outputFilePath" }, defaultValue = ShellOption.NULL, help = "File path to write schema")
final String outputFilePath) throws Exception {HoodieTableMetaClient client = HoodieCLI.getTableMetaClient();
TableSchemaResolver tableSchemaResolver = new TableSchemaResolver(client);
Schema schema = tableSchemaResolver.getTableAvroSchema();
if (outputFilePath != null) {
LOG.info("Latest table schema : " + schema.toString(true));
writeToFile(outputFilePath, schema.toString(true));
return String.format("Latest table schema written to %s", outputFilePath);
} else {
return String.format("Latest table schema %s", schema.toString(true));
}
}
| 3.26 |
hudi_HoodieAvroIndexedRecord_readRecordPayload_rdh
|
/**
* NOTE: This method is declared final to make sure there's no polymorphism and therefore
* JIT compiler could perform more aggressive optimizations
*/
@SuppressWarnings("unchecked")
@Overrideprotected final IndexedRecord readRecordPayload(Kryo kryo, Input input)
{// NOTE: We're leveraging Spark's default [[GenericAvroSerializer]] to serialize Avro
Serializer<GenericRecord> avroSerializer = kryo.getSerializer(GenericRecord.class);
return kryo.readObjectOrNull(input, GenericRecord.class, avroSerializer);
}
| 3.26 |
hudi_HoodieAvroIndexedRecord_writeRecordPayload_rdh
|
/**
* NOTE: This method is declared final to make sure there's no polymorphism and therefore
* JIT compiler could perform more aggressive optimizations
*/
@SuppressWarnings("unchecked")
@Override
protected final void writeRecordPayload(IndexedRecord payload, Kryo kryo, Output output) {
// NOTE: We're leveraging Spark's default [[GenericAvroSerializer]] to serialize Avro
Serializer<GenericRecord> avroSerializer = kryo.getSerializer(GenericRecord.class);
kryo.writeObjectOrNull(output, payload, avroSerializer);
}
| 3.26 |
hudi_HoodieClusteringJob_validateRunningMode_rdh
|
// make sure that cfg.runningMode couldn't be null
private static void validateRunningMode(Config cfg) {
// --mode has a higher priority than --schedule
// If we remove --schedule option in the future we need to change runningMode default value to EXECUTE
if (StringUtils.isNullOrEmpty(cfg.runningMode)) {
cfg.runningMode = (cfg.runSchedule) ? SCHEDULE : EXECUTE;
}
}
| 3.26 |
hudi_ImmutablePair_getRight_rdh
|
/**
* {@inheritDoc }
*/@Override
public R getRight() {
return right;
}
| 3.26 |
hudi_ImmutablePair_of_rdh
|
/**
* <p>
* Obtains an immutable pair of from two objects inferring the generic types.
* </p>
*
* <p>
* This factory allows the pair to be created using inference to obtain the generic types.
* </p>
*
* @param <L>
* the left element type
* @param <R>
* the right element type
* @param left
* the left element, may be null
* @param right
* the right element, may be null
* @return a pair formed from the two parameters, not null
*/
public static <L, R> ImmutablePair<L, R> of(final L left, final R right) {
return new ImmutablePair<L, R>(left, right);
}
| 3.26 |
hudi_ImmutablePair_getLeft_rdh
|
// -----------------------------------------------------------------------
/**
* {@inheritDoc }
*/
@Override
public L getLeft() {
return left;
}
| 3.26 |
hudi_ImmutablePair_setValue_rdh
|
/**
* <p>
* Throws {@code UnsupportedOperationException}.
* </p>
*
* <p>
* This pair is immutable, so this operation is not supported.
* </p>
*
* @param value
* the value to set
* @return never
* @throws UnsupportedOperationException
* as this operation is not supported
*/
@Override
public R setValue(final R value) {
throw new
UnsupportedOperationException();
}
| 3.26 |
hudi_ArrayColumnReader_collectDataFromParquetPage_rdh
|
/**
* Collects data from a parquet page and returns the final row index where it stopped. The
* returned index can be equal to or less than total.
*
* @param total
* maximum number of rows to collect
* @param lcv
* column vector to do initial setup in data collection time
* @param valueList
* collection of values that will be fed into the vector later
* @param category
* @return int
* @throws IOException
*/
private int collectDataFromParquetPage(int total, HeapArrayVector lcv,
List<Object> valueList, LogicalType category) throws IOException {
int index = 0;
/* Here is a nested loop for collecting all values from a parquet page.
A column of array type can be considered as a list of lists, so the two loops are as below:
1. The outer loop iterates on rows (index is a row index, so points to a row in the batch), e.g.:
[0, 2, 3] <- index: 0
[NULL, 3, 4] <- index: 1
2. The inner loop iterates on values within a row (sets all data from parquet data page
for an element in ListColumnVector), so fetchNextValue returns values one-by-one:
0, 2, 3, NULL, 3, 4
As described below, the repetition level (repetitionLevel != 0)
can be used to decide when we'll start to read values for the next list.
*/
while ((!eof) && (index < total)) {
// add element to ListColumnVector one by one
lcv.offsets[index] = valueList.size();
/* Let's collect all values for a single list.
Repetition level = 0 means that a new list started there in the parquet page,
in that case, let's exit from the loop, and start to collect value for a new list.
*/
do {
/* Definition level = 0 when a NULL value was returned instead of a list
(this is not the same as a NULL value in of a list).
*/
if (definitionLevel == 0) {
lcv.setNullAt(index);
}
valueList.add(isCurrentPageDictionaryEncoded ?
dictionaryDecodeValue(category, ((Integer) (lastValue))) : lastValue);
} while (fetchNextValue(category) && (repetitionLevel != 0) );
lcv.lengths[index] = valueList.size() - lcv.offsets[index];
index++;
}
return index;
}
| 3.26 |
hudi_ArrayColumnReader_fetchNextValue_rdh
|
/**
* Reads a single value from parquet page, puts it into lastValue. Returns a boolean indicating
* if there is more values to read (true).
*
* @param category
* @return boolean
* @throws IOException
*/
private boolean fetchNextValue(LogicalType category) throws IOException {
int left = readPageIfNeed();
if (left >
0) {
// get the values of repetition and definitionLevel
readRepetitionAndDefinitionLevels();
// read the data if it isn't null
if (definitionLevel == maxDefLevel) {
if (isCurrentPageDictionaryEncoded) {
lastValue = dataColumn.readValueDictionaryId();
} else {
lastValue = readPrimitiveTypedRow(category);
}
} else {
lastValue = null;
}
return true;
} else {
eof = true;
return false;
}
}
| 3.26 |
hudi_ArrayColumnReader_setChildrenInfo_rdh
|
/**
* The lengths & offsets will be initialized as default size (1024), it should be set to the
* actual size according to the element number.
*/
private void setChildrenInfo(HeapArrayVector lcv, int itemNum, int elementNum) {
lcv.setSize(itemNum);long[] lcvLength = new long[elementNum];
long[] lcvOffset = new long[elementNum];
System.arraycopy(lcv.lengths, 0, lcvLength, 0, elementNum);
System.arraycopy(lcv.offsets, 0, lcvOffset, 0, elementNum);
lcv.lengths = lcvLength;
lcv.offsets = lcvOffset;
}
| 3.26 |
hudi_ArrayColumnReader_readPrimitiveTypedRow_rdh
|
// Need to be in consistent with that VectorizedPrimitiveColumnReader#readBatchHelper
// TODO Reduce the duplicated code
private Object readPrimitiveTypedRow(LogicalType category) {
switch (category.getTypeRoot()) {
case CHAR :
case VARCHAR :
case BINARY :
case VARBINARY :
return dataColumn.readString();case BOOLEAN :
return dataColumn.readBoolean();
case TIME_WITHOUT_TIME_ZONE :
case DATE :
case INTEGER :
return dataColumn.readInteger();
case TINYINT :
return dataColumn.readTinyInt();
case SMALLINT :
return dataColumn.readSmallInt();
case BIGINT :
return dataColumn.readLong();
case FLOAT :
return dataColumn.readFloat();
case DOUBLE :
return dataColumn.readDouble();
case DECIMAL :
switch (descriptor.getPrimitiveType().getPrimitiveTypeName()) {
case INT32 :
return dataColumn.readInteger();
case INT64 :
return dataColumn.readLong();
case BINARY :
case FIXED_LEN_BYTE_ARRAY :
return dataColumn.readString();
default :
throw new AssertionError();
}
case TIMESTAMP_WITHOUT_TIME_ZONE :
case TIMESTAMP_WITH_LOCAL_TIME_ZONE :
return dataColumn.readTimestamp();
default :
throw new RuntimeException("Unsupported type in the list: " + type);
}
}
| 3.26 |
hudi_TimestampBasedAvroKeyGenerator_initIfNeeded_rdh
|
/**
* The function takes care of lazily initialising dateTimeFormatter variables only once.
*/
private void initIfNeeded() {if (this.inputFormatter == null) {
this.inputFormatter = parser.getInputFormatter();
}
if (this.partitionFormatter == null) {
this.partitionFormatter = DateTimeFormat.forPattern(outputDateFormat);
if (this.outputDateTimeZone != null) {
partitionFormatter = partitionFormatter.withZone(outputDateTimeZone);
}
}
}
| 3.26 |
hudi_TimestampBasedAvroKeyGenerator_getDefaultPartitionVal_rdh
|
/**
* Set default value to partitionVal if the input value of partitionPathField is null.
*/
public Object getDefaultPartitionVal() {
Object result = 1L;
if ((timestampType == TimestampType.DATE_STRING) || (timestampType == TimestampType.MIXED)) {
// since partitionVal is null, we can set a default value of any format as TIMESTAMP_INPUT_DATE_FORMAT_PROP
// configured, here we take the first.
// {Config.TIMESTAMP_INPUT_DATE_FORMAT_PROP} won't be null, it has been checked in the initialization process of
// inputFormatter
String delimiter = parser.getConfigInputDateFormatDelimiter();
String format = getStringWithAltKeys(config, TIMESTAMP_INPUT_DATE_FORMAT, true).split(delimiter)[0];
// if both input and output timeZone are not configured, use GMT.
if (null != inputDateTimeZone) {
return new DateTime(result, inputDateTimeZone).toString(format);
} else if (null != outputDateTimeZone) {
return new DateTime(result, outputDateTimeZone).toString(format);
} else {
return new DateTime(result, DateTimeZone.forTimeZone(TimeZone.getTimeZone("GMT"))).toString(format);
}
}
return result;
}
| 3.26 |
hudi_TimestampBasedAvroKeyGenerator_getPartitionPath_rdh
|
/**
* Parse and fetch partition path based on data type.
*
* @param partitionVal
* partition path object value fetched from record/row
* @return the parsed partition path based on data type
*/
public String getPartitionPath(Object partitionVal) {
initIfNeeded();
long
timeMs;
if (partitionVal instanceof Double) {
timeMs = m0(((Double) (partitionVal)).longValue());
} else if (partitionVal instanceof Float) {
timeMs = m0(((Float) (partitionVal)).longValue());
} else if (partitionVal instanceof Long) {
timeMs = m0(((Long) (partitionVal)));
} else if ((partitionVal instanceof Timestamp) && isConsistentLogicalTimestampEnabled()) {
timeMs = ((Timestamp) (partitionVal)).getTime();
} else if (partitionVal instanceof Integer) {
timeMs = m0(((Integer) (partitionVal)).longValue());
} else if (partitionVal instanceof BigDecimal) {
timeMs = m0(((BigDecimal) (partitionVal)).longValue());}
else if (partitionVal instanceof LocalDate) {
// Avro uses LocalDate to represent the Date value internal.
timeMs = m0(((LocalDate) (partitionVal)).toEpochDay());
} else if (partitionVal instanceof CharSequence) {
if (!inputFormatter.isPresent()) {
throw new HoodieException(("Missing input formatter. Ensure " + TIMESTAMP_INPUT_DATE_FORMAT.key()) + " config is set when timestampType is DATE_STRING or MIXED!");
}
DateTime parsedDateTime = inputFormatter.get().parseDateTime(partitionVal.toString());
if (this.outputDateTimeZone == null)
{
// Use the timezone that came off the date that was passed in, if it had one
partitionFormatter = partitionFormatter.withZone(parsedDateTime.getZone());
}
timeMs
= inputFormatter.get().parseDateTime(partitionVal.toString()).getMillis();
} else {
throw new HoodieNotSupportedException("Unexpected type for partition field: " + partitionVal.getClass().getName());
}
DateTime v8 = new DateTime(timeMs, outputDateTimeZone);
String partitionPath = v8.toString(partitionFormatter);
if (encodePartitionPath) {
partitionPath = PartitionPathEncodeUtils.escapePathName(partitionPath);
}
return hiveStylePartitioning ? (getPartitionPathFields().get(0) + "=") + partitionPath : partitionPath;
}
| 3.26 |
hudi_MetadataPartitionType_getMetadataPartitionsNeedingWriteStatusTracking_rdh
|
/**
* Returns the list of metadata table partitions which require WriteStatus to track written records.
* <p>
* These partitions need the list of written records so that they can update their metadata.
*/
public static List<MetadataPartitionType> getMetadataPartitionsNeedingWriteStatusTracking() {
return Collections.singletonList(MetadataPartitionType.RECORD_INDEX);
}
| 3.26 |
hudi_PreferWriterConflictResolutionStrategy_getCandidateInstants_rdh
|
/**
* For tableservices like replacecommit and compaction commits this method also returns ingestion inflight commits.
*/
@Override
public Stream<HoodieInstant> getCandidateInstants(HoodieTableMetaClient metaClient, HoodieInstant currentInstant, Option<HoodieInstant>
lastSuccessfulInstant) {
HoodieActiveTimeline activeTimeline = metaClient.reloadActiveTimeline();
if ((REPLACE_COMMIT_ACTION.equals(currentInstant.getAction()) && ClusteringUtils.isClusteringCommit(metaClient, currentInstant)) ||
COMPACTION_ACTION.equals(currentInstant.getAction())) {
return getCandidateInstantsForTableServicesCommits(activeTimeline, currentInstant);
} else {
return getCandidateInstantsForNonTableServicesCommits(activeTimeline, currentInstant);
}
}
| 3.26 |
hudi_PreferWriterConflictResolutionStrategy_getCandidateInstantsForTableServicesCommits_rdh
|
/**
* To find which instants are conflicting, we apply the following logic
* Get both completed instants and ingestion inflight commits that have happened since the last successful write.
* We need to check for write conflicts since they may have mutated the same files
* that are being newly created by the current write.
*/
private Stream<HoodieInstant> getCandidateInstantsForTableServicesCommits(HoodieActiveTimeline activeTimeline, HoodieInstant currentInstant) {
// Fetch list of completed commits.
Stream<HoodieInstant> completedCommitsStream = activeTimeline.getTimelineOfActions(CollectionUtils.createSet(COMMIT_ACTION, REPLACE_COMMIT_ACTION, COMPACTION_ACTION, DELTA_COMMIT_ACTION)).filterCompletedInstants().findInstantsModifiedAfterByCompletionTime(currentInstant.getTimestamp()).getInstantsAsStream();
// Fetch list of ingestion inflight commits.
Stream<HoodieInstant> inflightIngestionCommitsStream = activeTimeline.getTimelineOfActions(CollectionUtils.createSet(COMMIT_ACTION, DELTA_COMMIT_ACTION)).filterInflights().getInstantsAsStream();
// Merge and sort the instants and return.
List<HoodieInstant> instantsToConsider = Stream.concat(completedCommitsStream, inflightIngestionCommitsStream).sorted(Comparator.comparing(o -> o.getCompletionTime())).collect(Collectors.toList());
LOG.info(String.format("Instants that may have conflict with %s are %s", currentInstant, instantsToConsider));
return instantsToConsider.stream();
}
| 3.26 |
hudi_InLineFSUtils_length_rdh
|
/**
* Returns length of the block (embedded w/in the base file) identified by the given InlineFS path
*
* input: "inlinefs:/file1/s3a/?start_offset=20&length=40".
* output: 40
*/
public static long length(Path inlinePath) {
assertInlineFSPath(inlinePath);
String[] slices = inlinePath.toString().split("[?&=]");
return Long.parseLong(slices[slices.length - 1]);
}
| 3.26 |
hudi_InLineFSUtils_startOffset_rdh
|
/**
* Returns start offset w/in the base for the block identified by the given InlineFS path
*
* input: "inlinefs://file1/s3a/?start_offset=20&length=40".
* output: 20
*/
public static long startOffset(Path inlineFSPath) {
assertInlineFSPath(inlineFSPath);
String[] slices = inlineFSPath.toString().split("[?&=]");
return Long.parseLong(slices[slices.length - 3]);
}
| 3.26 |
hudi_InLineFSUtils_getOuterFilePathFromInlinePath_rdh
|
/**
* InlineFS Path format:
* "inlinefs://path/to/outer/file/outer_file_scheme/?start_offset=start_offset>&length=<length>"
* <p>
* Outer File Path format:
* "outer_file_scheme://path/to/outer/file"
* <p>
* Example
* Input: "inlinefs://file1/s3a/?start_offset=20&length=40".
* Output: "s3a://file1"
*
* @param inlineFSPath
* InLineFS Path to get the outer file Path
* @return Outer file Path from the InLineFS Path
*/public static Path getOuterFilePathFromInlinePath(Path inlineFSPath) {
assertInlineFSPath(inlineFSPath);
final String outerFileScheme = inlineFSPath.getParent().getName();
final Path basePath = inlineFSPath.getParent().getParent();
checkArgument(basePath.toString().contains(SCHEME_SEPARATOR), "Invalid InLineFS path: " + inlineFSPath);
final String pathExceptScheme = basePath.toString().substring(basePath.toString().indexOf(SCHEME_SEPARATOR) + 1);
final String fullPath
= ((outerFileScheme + SCHEME_SEPARATOR) + (outerFileScheme.equals(LOCAL_FILESYSTEM_SCHEME) ? PATH_SEPARATOR : "")) + pathExceptScheme;
return
new Path(fullPath);
}
| 3.26 |
hudi_InLineFSUtils_getInlineFilePath_rdh
|
/**
* Get the InlineFS Path for a given schema and its Path.
* <p>
* Examples:
* Input Path: s3a://file1, origScheme: file, startOffset = 20, length = 40
* Output: "inlinefs://file1/s3a/?start_offset=20&length=40"
*
* @param outerPath
* The outer file Path
* @param origScheme
* The file schema
* @param inLineStartOffset
* Start offset for the inline file
* @param inLineLength
* Length for the inline file
* @return InlineFS Path for the requested outer path and schema
*/
public static Path getInlineFilePath(Path outerPath, String origScheme, long inLineStartOffset, long inLineLength) {
final String subPath = new File(outerPath.toString().substring(outerPath.toString().indexOf(":") + 1)).getPath();
return new Path((((((((((((((InLineFileSystem.SCHEME + SCHEME_SEPARATOR) + PATH_SEPARATOR) + subPath) + PATH_SEPARATOR) + origScheme) + PATH_SEPARATOR) + "?") + START_OFFSET_STR) + f0) + inLineStartOffset) + "&") + LENGTH_STR) + f0) + inLineLength);
}
| 3.26 |
hudi_IncrSourceCloudStorageHelper_fetchFileData_rdh
|
/**
*
* @param filepaths
* Files from which to fetch data
* @return Data in the given list of files, as a Spark DataSet
*/
public static Option<Dataset<Row>> fetchFileData(SparkSession spark, List<String> filepaths, TypedProperties props, String fileFormat) {
if (filepaths.isEmpty()) {
return Option.empty();
}
DataFrameReader dfReader = m0(spark, props, fileFormat);
Dataset<Row> fileDataDs = dfReader.load(filepaths.toArray(new String[0]));
return Option.of(fileDataDs);
}
| 3.26 |
hudi_BoundedFsDataInputStream_getFileLength_rdh
|
/* Return the file length */
private long getFileLength() throws IOException {
if (fileLen == (-1L)) {
fileLen = f0.getContentSummary(file).getLength();
}
return fileLen;
}
| 3.26 |
hudi_HiveSchemaUtils_toHiveTypeInfo_rdh
|
/**
* Convert Flink DataType to Hive TypeInfo. For types with a precision parameter, e.g.
* timestamp, the supported precisions in Hive and Flink can be different. Therefore the
* conversion will fail for those types if the precision is not supported by Hive and
* checkPrecision is true.
*
* @param dataType
* a Flink DataType
* @return the corresponding Hive data type
*/
public static TypeInfo toHiveTypeInfo(DataType dataType) {
checkNotNull(dataType, "type cannot be null");
LogicalType logicalType = dataType.getLogicalType();
return logicalType.accept(new TypeInfoLogicalTypeVisitor(dataType));
}
| 3.26 |
hudi_HiveSchemaUtils_createHiveColumns_rdh
|
/**
* Create Hive columns from Flink table schema.
*/
private static List<FieldSchema> createHiveColumns(TableSchema schema) {
final DataType dataType = schema.toPersistedRowDataType();
final RowType rowType = ((RowType) (dataType.getLogicalType()));
final String[] fieldNames = rowType.getFieldNames().toArray(new String[0]);
final DataType[] fieldTypes = dataType.getChildren().toArray(new DataType[0]);
List<FieldSchema> columns =
new ArrayList<>(fieldNames.length);
for (int i = 0; i < fieldNames.length; i++) {
columns.add(new FieldSchema(fieldNames[i], toHiveTypeInfo(fieldTypes[i]).getTypeName(), null));
}
return columns;
}
| 3.26 |
hudi_HiveSchemaUtils_getFieldNames_rdh
|
/**
* Get field names from field schemas.
*/
public static List<String> getFieldNames(List<FieldSchema> fieldSchemas) {
return fieldSchemas.stream().map(FieldSchema::getName).collect(Collectors.toList());
}
| 3.26 |
hudi_HiveSchemaUtils_splitSchemaByPartitionKeys_rdh
|
/**
* Split the field schemas by given partition keys.
*
* @param fieldSchemas
* The Hive field schemas.
* @param partitionKeys
* The partition keys.
* @return The pair of (regular columns, partition columns) schema fields
*/
public static Pair<List<FieldSchema>, List<FieldSchema>> splitSchemaByPartitionKeys(List<FieldSchema> fieldSchemas, List<String> partitionKeys) {
List<FieldSchema> regularColumns = new ArrayList<>();
List<FieldSchema> partitionColumns = new ArrayList<>();
for (FieldSchema fieldSchema : fieldSchemas) {
if (partitionKeys.contains(fieldSchema.getName())) {
partitionColumns.add(fieldSchema);
} else
{
regularColumns.add(fieldSchema);
}
}
return Pair.of(regularColumns, partitionColumns);
}
| 3.26 |
hudi_HiveSchemaUtils_toFlinkType_rdh
|
/**
* Convert Hive data type to a Flink data type.
*
* @param hiveType
* a Hive data type
* @return the corresponding Flink data type
*/
public static DataType toFlinkType(TypeInfo
hiveType) {
checkNotNull(hiveType, "hiveType cannot be null");
switch (hiveType.getCategory()) {
case PRIMITIVE :
return toFlinkPrimitiveType(((PrimitiveTypeInfo) (hiveType)));
case LIST :
ListTypeInfo listTypeInfo = ((ListTypeInfo) (hiveType));
return DataTypes.ARRAY(toFlinkType(listTypeInfo.getListElementTypeInfo()));
case MAP :
MapTypeInfo mapTypeInfo = ((MapTypeInfo) (hiveType));
return DataTypes.MAP(toFlinkType(mapTypeInfo.getMapKeyTypeInfo()), toFlinkType(mapTypeInfo.getMapValueTypeInfo()));
case STRUCT :
StructTypeInfo structTypeInfo = ((StructTypeInfo) (hiveType));
List<String> names = structTypeInfo.getAllStructFieldNames();
List<TypeInfo> typeInfos = structTypeInfo.getAllStructFieldTypeInfos();
DataTypes[] fields = new DataTypes.Field[names.size()];
for (int i = 0; i < fields.length; i++) {
fields[i] = DataTypes.FIELD(names.get(i), toFlinkType(typeInfos.get(i)));}
return DataTypes.ROW(fields);
default :throw new UnsupportedOperationException(String.format("Flink doesn't support Hive data type %s yet.", hiveType));
}
}
| 3.26 |
hudi_BloomFilterFactory_createBloomFilter_rdh
|
/**
* A Factory class to generate different versions of {@link BloomFilter}.
*/ public class BloomFilterFactory {
/**
* Creates a new {@link BloomFilter} with the given args.
*
* @param numEntries
* total number of entries
* @param errorRate
* max allowed error rate
* @param bloomFilterTypeCode
* bloom filter type code
* @return the {@link BloomFilter} thus created
*/
public static BloomFilter
createBloomFilter(int numEntries, double errorRate, int maxNumberOfEntries, String bloomFilterTypeCode) {
if (bloomFilterTypeCode.equalsIgnoreCase(BloomFilterTypeCode.SIMPLE.name())) {
return new SimpleBloomFilter(numEntries, errorRate, Hash.MURMUR_HASH);
} else if (bloomFilterTypeCode.equalsIgnoreCase(BloomFilterTypeCode.DYNAMIC_V0.name())) {
return new HoodieDynamicBoundedBloomFilter(numEntries, errorRate, Hash.MURMUR_HASH, maxNumberOfEntries);
} else {
throw new IllegalArgumentException("Bloom Filter type code not recognizable " + bloomFilterTypeCode);}
}
/**
* Generate {@link BloomFilter} from serialized String.
*
* @param serString
* the serialized string of the {@link BloomFilter}
* @param bloomFilterTypeCode
* bloom filter type code as string
* @return the {@link BloomFilter}
| 3.26 |
hudi_AvroSchemaUtils_m0_rdh
|
/**
* Passed in {@code Union} schema and will try to resolve the field with the {@code fieldSchemaFullName}
* w/in the union returning its corresponding schema
*
* @param schema
* target schema to be inspected
* @param fieldSchemaFullName
* target field-name to be looked up w/in the union
* @return schema of the field w/in the union identified by the {@code fieldSchemaFullName}
*/
public static Schema m0(Schema schema, String fieldSchemaFullName) {
if (schema.getType() != Type.UNION) {
return schema;
}
List<Schema> innerTypes =
schema.getTypes();
if ((innerTypes.size() == 2) && isNullable(schema)) {
// this is a basic nullable field so handle it more efficiently
return resolveNullableSchema(schema);
}
Schema nonNullType = innerTypes.stream().filter(it -> (it.getType() != Schema.Type.NULL) && Objects.equals(it.getFullName(), fieldSchemaFullName)).findFirst().orElse(null);
if (nonNullType == null) {
throw new AvroRuntimeException(String.format("Unsupported Avro UNION type %s: Only UNION of a null type and a non-null type is supported", schema));
}
return nonNullType;
}
| 3.26 |
hudi_AvroSchemaUtils_getAvroRecordQualifiedName_rdh
|
/**
* Generates fully-qualified name for the Avro's schema based on the Table's name
*
* NOTE: PLEASE READ CAREFULLY BEFORE CHANGING
* This method should not change for compatibility reasons as older versions
* of Avro might be comparing fully-qualified names rather than just the record
* names
*/
public static String getAvroRecordQualifiedName(String tableName) {
String sanitizedTableName = HoodieAvroUtils.sanitizeName(tableName);
return
((("hoodie." + sanitizedTableName) + ".") + sanitizedTableName) + "_record";
}
| 3.26 |
hudi_AvroSchemaUtils_isAtomicSchemasCompatibleEvolution_rdh
|
/**
* Establishes whether {@code newReaderSchema} is compatible w/ {@code prevWriterSchema}, as
* defined by Avro's {@link AvroSchemaCompatibility}.
* {@code newReaderSchema} is considered compatible to {@code prevWriterSchema}, iff data written using {@code prevWriterSchema}
* could be read by {@code newReaderSchema}
*
* @param newReaderSchema
* new reader schema instance.
* @param prevWriterSchema
* prev writer schema instance.
* @return true if its compatible. else false.
*/
private static boolean isAtomicSchemasCompatibleEvolution(Schema newReaderSchema,
Schema prevWriterSchema) {// NOTE: Checking for compatibility of atomic types, we should ignore their
// corresponding fully-qualified names (as irrelevant)
return isSchemaCompatible(prevWriterSchema, newReaderSchema, false, true);
}
| 3.26 |
hudi_AvroSchemaUtils_isSchemaCompatible_rdh
|
/**
* Establishes whether {@code newSchema} is compatible w/ {@code prevSchema}, as
* defined by Avro's {@link AvroSchemaCompatibility}.
* From avro's compatability standpoint, prevSchema is writer schema and new schema is reader schema.
* {@code newSchema} is considered compatible to {@code prevSchema}, iff data written using {@code prevSchema}
* could be read by {@code newSchema}
*
* @param prevSchema
* previous instance of the schema
* @param newSchema
* new instance of the schema
* @param checkNaming
* controls whether schemas fully-qualified names should be checked
*/
public static boolean isSchemaCompatible(Schema prevSchema, Schema newSchema, boolean checkNaming, boolean allowProjection) {
// NOTE: We're establishing compatibility of the {@code prevSchema} and {@code newSchema}
// as following: {@code newSchema} is considered compatible to {@code prevSchema},
// iff data written using {@code prevSchema} could be read by {@code newSchema}
// In case schema projection is not allowed, new schema has to have all the same fields as the
// old schema
if (!allowProjection) {
if (!canProject(prevSchema, newSchema)) {
return false;
}
}
AvroSchemaCompatibility.SchemaPairCompatibility result = AvroSchemaCompatibility.checkReaderWriterCompatibility(newSchema, prevSchema, checkNaming);
return result.getType() == SchemaCompatibilityType.COMPATIBLE;
}
| 3.26 |
hudi_AvroSchemaUtils_canProject_rdh
|
/**
* Check that each field in the prevSchema can be populated in the newSchema except specified columns
*
* @param prevSchema
* prev schema.
* @param newSchema
* new schema
* @return true if prev schema is a projection of new schema.
*/
public static boolean canProject(Schema prevSchema, Schema newSchema, Set<String> exceptCols) {
return prevSchema.getFields().stream().filter(f -> !exceptCols.contains(f.name())).map(oldSchemaField -> SchemaCompatibility.lookupWriterField(newSchema, oldSchemaField)).noneMatch(Objects::isNull);
}
| 3.26 |
hudi_AvroSchemaUtils_isValidEvolutionOf_rdh
|
/**
* Validate whether the {@code targetSchema} is a valid evolution of {@code sourceSchema}.
* Basically {@link #isCompatibleProjectionOf(Schema, Schema)} but type promotion in the
* opposite direction
*/
public static boolean isValidEvolutionOf(Schema sourceSchema, Schema targetSchema) {return (sourceSchema.getType() == Type.NULL) || isProjectionOfInternal(sourceSchema, targetSchema, AvroSchemaUtils::isAtomicSchemasCompatibleEvolution);
}
| 3.26 |
hudi_AvroSchemaUtils_isStrictProjectionOf_rdh
|
/**
* Validate whether the {@code targetSchema} is a strict projection of {@code sourceSchema}.
*
* Schema B is considered a strict projection of schema A iff
* <ol>
* <li>Schemas A and B are equal, or</li>
* <li>Schemas A and B are array schemas and element-type of B is a strict projection
* of the element-type of A, or</li>
* <li>Schemas A and B are map schemas and value-type of B is a strict projection
* of the value-type of A, or</li>
* <li>Schemas A and B are union schemas (of the same size) and every element-type of B
* is a strict projection of the corresponding element-type of A, or</li>
* <li>Schemas A and B are record schemas and every field of the record B has corresponding
* counterpart (w/ the same name) in the schema A, such that the schema of the field of the schema
* B is also a strict projection of the A field's schema</li>
* </ol>
*/
public static boolean isStrictProjectionOf(Schema sourceSchema, Schema targetSchema) {
return isProjectionOfInternal(sourceSchema, targetSchema, Objects::equals);
}
| 3.26 |
hudi_AvroSchemaUtils_isCompatibleProjectionOf_rdh
|
/**
* Validate whether the {@code targetSchema} is a "compatible" projection of {@code sourceSchema}.
* Only difference of this method from {@link #isStrictProjectionOf(Schema, Schema)} is
* the fact that it allows some legitimate type promotions (like {@code int -> long},
* {@code decimal(3, 2) -> decimal(5, 2)}, etc) that allows projection to have a "wider"
* atomic type (whereas strict projection requires atomic type to be identical)
*/
public static boolean isCompatibleProjectionOf(Schema sourceSchema, Schema targetSchema) {
return isProjectionOfInternal(sourceSchema, targetSchema, AvroSchemaUtils::isAtomicSchemasCompatible);}
| 3.26 |
hudi_AvroSchemaUtils_checkSchemaCompatible_rdh
|
/**
* Checks whether writer schema is compatible with table schema considering {@code AVRO_SCHEMA_VALIDATE_ENABLE}
* and {@code SCHEMA_ALLOW_AUTO_EVOLUTION_COLUMN_DROP} options.
* To avoid collision of {@code SCHEMA_ALLOW_AUTO_EVOLUTION_COLUMN_DROP} and {@code DROP_PARTITION_COLUMNS}
* partition column names should be passed as {@code dropPartitionColNames}.
* Passed empty set means {@code DROP_PARTITION_COLUMNS} is disabled.
*
* @param tableSchema
* the latest dataset schema
* @param writerSchema
* writer schema
* @param shouldValidate
* whether {@link AvroSchemaCompatibility} check being performed
* @param allowProjection
* whether column dropping check being performed
* @param dropPartitionColNames
* partition column names to being excluded from column dropping check
* @throws SchemaCompatibilityException
* if writer schema is not compatible
*/
public static void checkSchemaCompatible(Schema tableSchema, Schema writerSchema, boolean shouldValidate, boolean allowProjection, Set<String> dropPartitionColNames) throws
SchemaCompatibilityException {
String errorMessage = null;
if ((!allowProjection) && (!canProject(tableSchema, writerSchema, dropPartitionColNames))) {
errorMessage = "Column dropping is not allowed";
}
// TODO(HUDI-4772) re-enable validations in case partition columns
// being dropped from the data-file after fixing the write schema
if ((dropPartitionColNames.isEmpty() && shouldValidate) && (!isSchemaCompatible(tableSchema, writerSchema))) {
errorMessage = "Failed schema compatibility check";
}
if (errorMessage != null) {
String errorDetails = String.format("%s\nwriterSchema: %s\ntableSchema: %s", errorMessage, writerSchema, tableSchema);
throw new SchemaCompatibilityException(errorDetails);
}
}
| 3.26 |
hudi_AvroSchemaUtils_createNullableSchema_rdh
|
/**
* Creates schema following Avro's typical nullable schema definition: {@code Union(Schema.Type.NULL, <NonNullType>)},
* wrapping around provided target non-null type
*/
public static Schema createNullableSchema(Schema.Type avroType) {
return createNullableSchema(Schema.create(avroType));
}
| 3.26 |
hudi_AvroSchemaUtils_resolveNullableSchema_rdh
|
/**
* Resolves typical Avro's nullable schema definition: {@code Union(Schema.Type.NULL, <NonNullType>)},
* decomposing union and returning the target non-null type
*/
public static Schema resolveNullableSchema(Schema schema) {
if (schema.getType() != Type.UNION) {
return schema;
}
List<Schema> innerTypes = schema.getTypes();
if (innerTypes.size() != 2) {
throw new AvroRuntimeException(String.format("Unsupported Avro UNION type %s: Only UNION of a null type and a non-null type is supported", schema));
}
Schema firstInnerType = innerTypes.get(0);
Schema secondInnerType = innerTypes.get(1);
if (((firstInnerType.getType() != Type.NULL) && (secondInnerType.getType() != Type.NULL)) || ((firstInnerType.getType()
== Type.NULL) && (secondInnerType.getType() == Type.NULL))) {
throw new AvroRuntimeException(String.format("Unsupported Avro UNION type %s: Only UNION of a null type and a non-null type is supported", schema));
}
return firstInnerType.getType() == Type.NULL ? secondInnerType : firstInnerType;
}
| 3.26 |
hudi_AvroSchemaUtils_containsFieldInSchema_rdh
|
/**
* Returns true in case when schema contains the field w/ provided name
*/
public static boolean containsFieldInSchema(Schema schema, String fieldName) {
try {
Schema.Field field = schema.getField(fieldName);
return field != null;
} catch (Exception e) {
return false;
}
}
| 3.26 |
hudi_AvroSchemaUtils_isNullable_rdh
|
/**
* Returns true in case provided {@link Schema} is nullable (ie accepting null values),
* returns false otherwise
*/
public static boolean isNullable(Schema schema) {
if (schema.getType() != Type.UNION) {
return false;
}
List<Schema> innerTypes = schema.getTypes();
return (innerTypes.size() > 1) && innerTypes.stream().anyMatch(it -> it.getType() == Schema.Type.NULL);
}
| 3.26 |
hudi_SourceCommitCallback_onCommit_rdh
|
/**
* Performs some action on successful Hudi commit like committing offsets to Kafka.
*
* @param lastCkptStr
* last checkpoint string.
*/default void
onCommit(String lastCkptStr) {
}
| 3.26 |
hudi_InternalFilter_write_rdh
|
/**
* Serialize the fields of this object to <code>out</code>.
*
* @param out
* <code>DataOuput</code> to serialize this object into.
* @throws IOException
*/
public void write(DataOutput out) throws IOException {
out.writeInt(VERSION);
out.writeInt(this.nbHash);out.writeByte(this.hashType);
out.writeInt(this.vectorSize);
}
| 3.26 |
hudi_InternalFilter_readFields_rdh
|
/**
* Deserialize the fields of this object from <code>in</code>.
*
* <p>For efficiency, implementations should attempt to re-use storage in the
* existing object where possible.</p>
*
* @param in
* <code>DataInput</code> to deseriablize this object from.
* @throws IOException
*/
public void readFields(DataInput in) throws IOException {
int ver = in.readInt();
if (ver > 0)
{
// old non-versioned format
this.nbHash = ver;
this.hashType = Hash.JENKINS_HASH;
} else if (ver == VERSION) {
this.nbHash = in.readInt();
this.hashType = in.readByte();
} else {throw new IOException("Unsupported version: " + ver);
}
this.vectorSize = in.readInt();
this.hash = new HashFunction(this.vectorSize, this.nbHash, this.hashType);
}
| 3.26 |
hudi_InternalFilter_add_rdh
|
/**
* Adds an array of keys to <i>this</i> filter.
*
* @param keys
* The array of keys.
*/
public void add(Key[] keys) {
if (keys == null) {
throw new IllegalArgumentException("Key[] may not be null");}
for (Key key : keys) {
add(key);
}
}
| 3.26 |
hudi_HiveSchemaUtil_convertMapSchemaToHiveFieldSchema_rdh
|
/**
*
* @param schema
* Intermediate schema in the form of Map<String,String>
* @param syncConfig
* @return List of FieldSchema objects derived from schema without the partition fields as the HMS api expects them as different arguments for alter table commands.
* @throws IOException
*/
public static List<FieldSchema> convertMapSchemaToHiveFieldSchema(LinkedHashMap<String, String> schema, HiveSyncConfig syncConfig) throws IOException {
return schema.keySet().stream().map(key -> new FieldSchema(key, schema.get(key).toLowerCase(), "")).filter(field -> !syncConfig.getSplitStrings(META_SYNC_PARTITION_FIELDS).contains(field.getName())).collect(Collectors.toList());}
| 3.26 |
hudi_HiveSchemaUtil_convertField_rdh
|
/**
* Convert one field data type of parquet schema into an equivalent Hive schema.
*
* @param parquetType
* : Single parquet field
* @return : Equivalent sHive schema
*/
private static String convertField(final Type parquetType, boolean supportTimestamp, boolean doFormat) {
StringBuilder field = new StringBuilder();
if (parquetType.isPrimitive()) {
final PrimitiveType.PrimitiveTypeName parquetPrimitiveTypeName = parquetType.asPrimitiveType().getPrimitiveTypeName();
final OriginalType originalType = parquetType.getOriginalType();
if (originalType == OriginalType.DECIMAL) {
final DecimalMetadata decimalMetadata = parquetType.asPrimitiveType().getDecimalMetadata();
return field.append("DECIMAL(").append(decimalMetadata.getPrecision()).append(doFormat ? " , " : ",").append(decimalMetadata.getScale()).append(")").toString();
} else if (originalType == OriginalType.DATE) {
return field.append("DATE").toString();} else if (supportTimestamp && ((originalType == OriginalType.TIMESTAMP_MICROS) || (originalType == OriginalType.TIMESTAMP_MILLIS))) {
return field.append("TIMESTAMP").toString();
}
// TODO - fix the method naming here
return parquetPrimitiveTypeName.convert(new PrimitiveType.PrimitiveTypeNameConverter<String, RuntimeException>() {
@Override
public String convertBOOLEAN(PrimitiveType.PrimitiveTypeName primitiveTypeName) {
return HiveSchemaUtil.BOOLEAN_TYPE_NAME;
}
@Override
public String m1(PrimitiveType.PrimitiveTypeName primitiveTypeName) {
return HiveSchemaUtil.INT_TYPE_NAME;
}
@Override
public String convertINT64(PrimitiveType.PrimitiveTypeName primitiveTypeName) {
return HiveSchemaUtil.BIGINT_TYPE_NAME;
}
@Override
public String convertINT96(PrimitiveType.PrimitiveTypeName primitiveTypeName) {return "timestamp-millis";
}
@Override
public String convertFLOAT(PrimitiveType.PrimitiveTypeName primitiveTypeName) {
return HiveSchemaUtil.FLOAT_TYPE_NAME;
}
@Override
public String convertDOUBLE(PrimitiveType.PrimitiveTypeName primitiveTypeName) {
return HiveSchemaUtil.DOUBLE_TYPE_NAME;
}
@Override
public String convertFIXED_LEN_BYTE_ARRAY(PrimitiveType.PrimitiveTypeName primitiveTypeName) {
return HiveSchemaUtil.BINARY_TYPE_NAME;
}
@Override
public String convertBINARY(PrimitiveType.PrimitiveTypeName primitiveTypeName) {
if ((originalType == OriginalType.UTF8) || (originalType
== OriginalType.ENUM)) {return HiveSchemaUtil.STRING_TYPE_NAME;
} else {
return HiveSchemaUtil.BINARY_TYPE_NAME;
}
}
});
} else {
GroupType parquetGroupType = parquetType.asGroupType();
OriginalType originalType = parquetGroupType.getOriginalType();
if (originalType != null) {
switch (originalType) {
case LIST :
if (parquetGroupType.getFieldCount() != 1)
{
throw new UnsupportedOperationException("Invalid list type " + parquetGroupType);
}
Type v24 = parquetGroupType.getType(0);
if (!v24.isRepetition(Repetition.REPEATED)) {
throw new UnsupportedOperationException("Invalid list type " + parquetGroupType);}
return createHiveArray(v24, parquetGroupType.getName(), supportTimestamp, doFormat);
case MAP :
if ((parquetGroupType.getFieldCount() != 1) || parquetGroupType.getType(0).isPrimitive()) {
throw new UnsupportedOperationException("Invalid map type "
+ parquetGroupType);
}
GroupType mapKeyValType = parquetGroupType.getType(0).asGroupType();
if (((!mapKeyValType.isRepetition(Repetition.REPEATED)) || (!mapKeyValType.getOriginalType().equals(OriginalType.MAP_KEY_VALUE))) ||
(mapKeyValType.getFieldCount() != 2)) {
throw new UnsupportedOperationException("Invalid map type " + parquetGroupType);
}
Type keyType = mapKeyValType.getType(0);
if (((!keyType.isPrimitive()) || (!keyType.asPrimitiveType().getPrimitiveTypeName().equals(PrimitiveTypeName.BINARY))) || (!keyType.getOriginalType().equals(OriginalType.UTF8))) {
throw new UnsupportedOperationException("Map key type must be binary (UTF8): " + keyType);
}
Type valueType = mapKeyValType.getType(1);return createHiveMap(convertField(keyType, supportTimestamp, doFormat), convertField(valueType, supportTimestamp, doFormat), doFormat);
case ENUM :
case UTF8 :
return STRING_TYPE_NAME;
case MAP_KEY_VALUE :
// MAP_KEY_VALUE was supposed to be used to annotate key and
// value group levels in a
// MAP. However, that is always implied by the structure of
// MAP. Hence, PARQUET-113
// dropped the requirement for having MAP_KEY_VALUE.
default :
throw new UnsupportedOperationException("Cannot convert Parquet type " + parquetType);}} else {
// if no original type then it's a record
return createHiveStruct(parquetGroupType.getFields(),
supportTimestamp, doFormat);
}
}
}
| 3.26 |
hudi_HiveSchemaUtil_parquetSchemaToMapSchema_rdh
|
/**
* Returns schema in Map<String,String> form read from a parquet file.
*
* @param messageType
* : parquet Schema
* @param supportTimestamp
* @param doFormat
* : This option controls whether schema will have spaces in the value part of the schema map. This is required because spaces in complex schema trips the HMS create table calls.
* This value will be false for HMS but true for QueryBasedDDLExecutors
* @return : Intermediate schema in the form of Map<String, String>
*/
public static LinkedHashMap<String, String> parquetSchemaToMapSchema(MessageType messageType, boolean supportTimestamp, boolean doFormat) throws IOException {
LinkedHashMap<String, String> schema = new LinkedHashMap<>();
List<Type> parquetFields = messageType.getFields();
for (Type parquetType : parquetFields) {
StringBuilder result = new StringBuilder();
String key = parquetType.getName();if (parquetType.isRepetition(Repetition.REPEATED)) {
result.append(createHiveArray(parquetType, "", supportTimestamp, doFormat));
} else {
result.append(convertField(parquetType, supportTimestamp, doFormat));
}
schema.put(key, result.toString());
}
return schema;
}
| 3.26 |
hudi_HiveSchemaUtil_createHiveStruct_rdh
|
/**
* Return a 'struct' Hive schema from a list of Parquet fields.
*
* @param parquetFields
* : list of parquet fields
* @return : Equivalent 'struct' Hive schema
*/
private static String createHiveStruct(List<Type> parquetFields, boolean supportTimestamp, boolean doFormat) {
StringBuilder struct = new StringBuilder();
struct.append(doFormat ? "STRUCT< " : "STRUCT<");
for (Type field : parquetFields) {
// TODO: struct field name is only translated to support special char($)
// We will need to extend it to other collection type
struct.append(hiveCompatibleFieldName(field.getName(), true, doFormat)).append(doFormat ? " : " : ":");
struct.append(convertField(field, supportTimestamp, doFormat)).append(doFormat ? ", " : ",");
}
struct.delete(struct.length() - (doFormat ?
2 : 1), struct.length());// Remove the last
// ", "
struct.append(">");
String finalStr = struct.toString();
// Struct cannot have - in them. userstore_udr_entities has uuid in struct. This breaks the
// schema.
// HDrone sync should not fail because of this.
finalStr = finalStr.replaceAll("-", "_");
return finalStr;
}
| 3.26 |
hudi_HiveSchemaUtil_createHiveMap_rdh
|
/**
* Create a 'Map' schema from Parquet map field.
*/
private static String createHiveMap(String keyType, String valueType, boolean doFormat) {
return ((((doFormat ? "MAP< " : "MAP<") + keyType) + (doFormat ? ", " : ",")) + valueType) + ">";
}
| 3.26 |
hudi_HiveSchemaUtil_m0_rdh
|
/**
* Get the schema difference between the storage schema and hive table schema.
*/
public static SchemaDifference m0(MessageType storageSchema, Map<String, String> tableSchema, List<String> partitionKeys) {
return getSchemaDifference(storageSchema, tableSchema, partitionKeys, false);
}
| 3.26 |
hudi_HiveSchemaUtil_convertParquetSchemaToHiveFieldSchema_rdh
|
/**
* Returns equivalent Hive table Field schema read from a parquet file.
*
* @param messageType
* : Parquet Schema
* @return : Hive Table schema read from parquet file List[FieldSchema] without partitionField
*/
public static List<FieldSchema> convertParquetSchemaToHiveFieldSchema(MessageType messageType, HiveSyncConfig syncConfig) throws IOException {
return convertMapSchemaToHiveFieldSchema(parquetSchemaToMapSchema(messageType, syncConfig.getBoolean(HIVE_SUPPORT_TIMESTAMP_TYPE), false), syncConfig);
}
| 3.26 |
hudi_HiveSchemaUtil_createHiveArray_rdh
|
/**
* Create an Array Hive schema from equivalent parquet list type.
*/
private static String createHiveArray(Type elementType, String elementName, boolean supportTimestamp, boolean
doFormat) {
StringBuilder array = new StringBuilder();
array.append(doFormat ? "ARRAY< " : "ARRAY<");
if (elementType.isPrimitive()) {
array.append(convertField(elementType, supportTimestamp, doFormat));
} else {
final GroupType groupType = elementType.asGroupType();
final List<Type> groupFields = groupType.getFields();
if ((groupFields.size() > 1) || ((groupFields.size() == 1) && (elementType.getName().equals("array") ||
elementType.getName().equals(elementName + "_tuple")))) {
array.append(convertField(elementType, supportTimestamp, doFormat));
} else {
array.append(convertField(groupType.getFields().get(0),
supportTimestamp, doFormat));
}
}
array.append(">");
return array.toString();}
| 3.26 |
hudi_ClientIds_getHeartbeatFolderPath_rdh
|
// Utilities
// -------------------------------------------------------------------------
private String getHeartbeatFolderPath(String basePath) {
return (((basePath + Path.SEPARATOR) + AUXILIARYFOLDER_NAME) + Path.SEPARATOR) + HEARTBEAT_FOLDER_NAME;
}
| 3.26 |
hudi_ClientIds_builder_rdh
|
/**
* Returns the builder.
*/
public static Builder builder() {
return new Builder();
}
| 3.26 |
hudi_ClientIds_getClientId_rdh
|
/**
* Returns the client id from the heartbeat file path, the path name follows
* the naming convention: _, _1, _2, ... _N.
*/
private static String getClientId(Path path) {
String[] splits = path.getName().split(HEARTBEAT_FILE_NAME_PREFIX);
return splits.length > 1 ? splits[1] : INIT_CLIENT_ID;
}
| 3.26 |
hudi_FlatteningTransformer_apply_rdh
|
/**
* Configs supported.
*/
@Override
public Dataset<Row> apply(JavaSparkContext jsc, SparkSession sparkSession, Dataset<Row> rowDataset, TypedProperties properties) {
try
{ // tmp table name doesn't like dashes
String tmpTable = TMP_TABLE.concat(UUID.randomUUID().toString().replace("-", "_"));
LOG.info("Registering tmp table : "
+ tmpTable);
rowDataset.createOrReplaceTempView(tmpTable);
Dataset<Row> transformed = sparkSession.sql((("select " + flattenSchema(rowDataset.schema(), null)) + " from ") + tmpTable);
sparkSession.catalog().dropTempView(tmpTable);
return transformed;
} catch (Exception e) {
throw new HoodieTransformExecutionException("Failed to apply flattening transformer", e);
}
}
| 3.26 |
hudi_CleanPlanner_getEarliestCommitToRetain_rdh
|
/**
* Returns the earliest commit to retain based on cleaning policy.
*/
public Option<HoodieInstant> getEarliestCommitToRetain() {
return CleanerUtils.getEarliestCommitToRetain(hoodieTable.getMetaClient().getActiveTimeline().getCommitsAndCompactionTimeline(),
config.getCleanerPolicy(), config.getCleanerCommitsRetained(), Instant.now(), config.getCleanerHoursRetained(), hoodieTable.getMetaClient().getTableConfig().getTimelineTimezone());}
| 3.26 |
hudi_CleanPlanner_getSavepointedDataFiles_rdh
|
/**
* Get the list of data file names savepointed.
*/public Stream<String> getSavepointedDataFiles(String savepointTime) {
if (!hoodieTable.getSavepointTimestamps().contains(savepointTime)) {
throw new HoodieSavepointException(("Could not get data files for savepoint " + savepointTime) + ". No such savepoint.");
}
HoodieInstant instant = new HoodieInstant(false, HoodieTimeline.SAVEPOINT_ACTION, savepointTime);
HoodieSavepointMetadata metadata;
try { metadata = TimelineMetadataUtils.deserializeHoodieSavepointMetadata(hoodieTable.getActiveTimeline().getInstantDetails(instant).get());
} catch (IOException e) {throw new HoodieSavepointException("Could not get savepointed data files for savepoint " + savepointTime, e);
}
return metadata.getPartitionMetadata().values().stream().flatMap(s -> s.getSavepointDataFile().stream());
}
| 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.