name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hudi_HoodieTable_validateSchema_rdh
|
/**
* Ensure that the current writerSchema is compatible with the latest schema of this dataset.
*
* When inserting/updating data, we read records using the last used schema and convert them to the
* GenericRecords with writerSchema. Hence, we need to ensure that this conversion can take place without errors.
*/private void validateSchema() throws HoodieUpsertException, HoodieInsertException {
boolean shouldValidate = config.shouldValidateAvroSchema();
boolean allowProjection = config.shouldAllowAutoEvolutionColumnDrop();
if ((((!shouldValidate) && allowProjection) || getActiveTimeline().getCommitsTimeline().filterCompletedInstants().empty()) || StringUtils.isNullOrEmpty(config.getSchema())) {
// Check not required
return;
}
try {
TableSchemaResolver schemaResolver = new TableSchemaResolver(getMetaClient());
Option<Schema> existingTableSchema = schemaResolver.getTableAvroSchemaIfPresent(false);
if (!existingTableSchema.isPresent()) {return;
}
Schema writerSchema = HoodieAvroUtils.createHoodieWriteSchema(config.getSchema());
Schema tableSchema = HoodieAvroUtils.createHoodieWriteSchema(existingTableSchema.get());
AvroSchemaUtils.checkSchemaCompatible(tableSchema, writerSchema, shouldValidate, allowProjection, getDropPartitionColNames());
} catch (Exception e) {
throw new HoodieException("Failed to read schema/check compatibility for base path " +
metaClient.getBasePath(), e);
}
}
| 3.26 |
hudi_HoodieTable_getIndex_rdh
|
/**
* Return the index.
*/
public HoodieIndex<?, ?> getIndex() {
return index;
}
| 3.26 |
hudi_HoodieTable_getIndexingMetadataWriter_rdh
|
/**
* Gets the metadata writer for async indexer.
*
* @param triggeringInstantTimestamp
* The instant that is triggering this metadata write.
* @return An instance of {@link HoodieTableMetadataWriter}.
*/
public Option<HoodieTableMetadataWriter> getIndexingMetadataWriter(String triggeringInstantTimestamp) {
return getMetadataWriter(triggeringInstantTimestamp, LAZY);
}
| 3.26 |
hudi_HoodieTable_clearMetadataTablePartitionsConfig_rdh
|
/**
* Clears hoodie.table.metadata.partitions in hoodie.properties
*/
private void clearMetadataTablePartitionsConfig(Option<MetadataPartitionType> partitionType, boolean clearAll) {
Set<String> partitions = metaClient.getTableConfig().getMetadataPartitions();
if (clearAll && (partitions.size() > 0)) {
LOG.info("Clear hoodie.table.metadata.partitions in hoodie.properties");
metaClient.getTableConfig().setValue(TABLE_METADATA_PARTITIONS.key(), EMPTY_STRING);
HoodieTableConfig.update(metaClient.getFs(), new Path(metaClient.getMetaPath()), metaClient.getTableConfig().getProps());
} else if (partitionType.isPresent()
&& partitions.remove(partitionType.get().getPartitionPath())) {
metaClient.getTableConfig().setValue(HoodieTableConfig.TABLE_METADATA_PARTITIONS.key(), String.join(",", partitions));
HoodieTableConfig.update(metaClient.getFs(), new Path(metaClient.getMetaPath()), metaClient.getTableConfig().getProps());
}
}
| 3.26 |
hudi_HoodieTable_getBaseFileOnlyView_rdh
|
/**
* Get the base file only view of the file system for this table.
*/
public BaseFileOnlyView getBaseFileOnlyView() {
return getViewManager().getFileSystemView(metaClient);
}
| 3.26 |
hudi_HoodieTable_rollbackInflightCompaction_rdh
|
/**
* Rollback failed compactions. Inflight rollbacks for compactions revert the .inflight file
* to the .requested file.
*
* @param inflightInstant
* Inflight Compaction Instant
*/
public void rollbackInflightCompaction(HoodieInstant inflightInstant, Function<String, Option<HoodiePendingRollbackInfo>> getPendingRollbackInstantFunc) {
ValidationUtils.checkArgument(inflightInstant.getAction().equals(HoodieTimeline.COMPACTION_ACTION));rollbackInflightInstant(inflightInstant, getPendingRollbackInstantFunc);
}
| 3.26 |
hudi_HoodieTable_m4_rdh
|
/**
* Finalize the written data onto storage. Perform any final cleanups.
*
* @param context
* HoodieEngineContext
* @param stats
* List of HoodieWriteStats
* @throws HoodieIOException
* if some paths can't be finalized on storage
*/public void m4(HoodieEngineContext context,
String instantTs, List<HoodieWriteStat> stats) throws HoodieIOException {
reconcileAgainstMarkers(context, instantTs, stats, config.getConsistencyGuardConfig().isConsistencyCheckEnabled());
}
| 3.26 |
hudi_HoodieTable_logCompact_rdh
|
/**
* Run Log Compaction on the table. Log Compaction arranges the data so that it is optimized for data access.
*
* @param context
* HoodieEngineContext
* @param logCompactionInstantTime
* Instant Time
*/
public HoodieWriteMetadata<O> logCompact(HoodieEngineContext context,
String logCompactionInstantTime) {
throw new UnsupportedOperationException("Log compaction is not supported for this table type");
}
| 3.26 |
hudi_HoodieTable_reconcileAgainstMarkers_rdh
|
/**
* Reconciles WriteStats and marker files to detect and safely delete duplicate data files created because of Spark
* retries.
*
* @param context
* HoodieEngineContext
* @param instantTs
* Instant Timestamp
* @param stats
* Hoodie Write Stat
* @param consistencyCheckEnabled
* Consistency Check Enabled
* @throws HoodieIOException
*/
protected void reconcileAgainstMarkers(HoodieEngineContext context, String instantTs, List<HoodieWriteStat> stats, boolean consistencyCheckEnabled) throws HoodieIOException {
try {
// Reconcile marker and data files with WriteStats so that partially written data-files due to failed
// (but succeeded on retry) tasks are removed.
String basePath = getMetaClient().getBasePath();
WriteMarkers markers = WriteMarkersFactory.get(config.getMarkersType(), this, instantTs);
if (!markers.doesMarkerDirExist()) {
// can happen if it was an empty write say.
return;
}
// Ignores log file appended for update, since they are already fail-safe.
// but new created log files should be included.
Set<String> invalidDataPaths = getInvalidDataPaths(markers);
Set<String> v7 = stats.stream().map(HoodieWriteStat::getPath).collect(Collectors.toSet());
Set<String> validCdcDataPaths = stats.stream().map(HoodieWriteStat::getCdcStats).filter(Objects::nonNull).flatMap(cdcStat -> cdcStat.keySet().stream()).collect(Collectors.toSet());
// Contains list of partially created files. These needs to be cleaned up.
invalidDataPaths.removeAll(v7);
invalidDataPaths.removeAll(validCdcDataPaths);
if (!invalidDataPaths.isEmpty()) {
LOG.info("Removing duplicate files created due to task retries before committing. Paths=" + invalidDataPaths);
Map<String, List<Pair<String, String>>> invalidPathsByPartition = invalidDataPaths.stream().map(dp -> Pair.of(new Path(basePath, dp).getParent().toString(), new Path(basePath, dp).toString())).collect(Collectors.groupingBy(Pair::getKey));
// Ensure all files in delete list is actually present. This is mandatory for an eventually consistent FS.
// Otherwise, we may miss deleting such files. If files are not found even after retries, fail the commit
if (consistencyCheckEnabled) {
// This will either ensure all files to be deleted are present.
waitForAllFiles(context, invalidPathsByPartition, FileVisibility.APPEAR);
}
// Now delete partially written files
context.setJobStatus(this.getClass().getSimpleName(), "Delete all partially written files: " + config.getTableName());
deleteInvalidFilesByPartitions(context, invalidPathsByPartition);
// Now ensure the deleted files disappear
if (consistencyCheckEnabled) {
// This will either ensure all files to be deleted are absent.
waitForAllFiles(context, invalidPathsByPartition, FileVisibility.DISAPPEAR);
}
}
} catch (IOException ioe) {throw new HoodieIOException(ioe.getMessage(), ioe);
}
}
| 3.26 |
hudi_HoodieTable_getSliceView_rdh
|
/**
* Get the full view of the file system for this table.
*/
public SliceView getSliceView() {
return
getViewManager().getFileSystemView(metaClient);
}
| 3.26 |
hudi_HoodieTable_getCleanTimeline_rdh
|
/**
* Get clean timeline.
*/
public HoodieTimeline getCleanTimeline() {
return getActiveTimeline().getCleanerTimeline();
}
| 3.26 |
hudi_HoodieTable_rollbackInflightInstant_rdh
|
/**
* Rollback inflight instant to requested instant
*
* @param inflightInstant
* Inflight instant
* @param getPendingRollbackInstantFunc
* Function to get rollback instant
*/
private void rollbackInflightInstant(HoodieInstant inflightInstant, Function<String, Option<HoodiePendingRollbackInfo>> getPendingRollbackInstantFunc) {
final String
commitTime = getPendingRollbackInstantFunc.apply(inflightInstant.getTimestamp()).map(entry -> entry.getRollbackInstant().getTimestamp()).orElse(getMetaClient().createNewInstantTime());
scheduleRollback(f0, commitTime, inflightInstant, false, config.shouldRollbackUsingMarkers(), false);
rollback(f0, commitTime, inflightInstant, false, false);
getActiveTimeline().revertInstantFromInflightToRequested(inflightInstant);
}
| 3.26 |
hudi_HoodieTable_getCompletedCommitsTimeline_rdh
|
/**
* Get only the completed (no-inflights) commit + deltacommit timeline.
*/
public HoodieTimeline getCompletedCommitsTimeline() {
return metaClient.getCommitsTimeline().filterCompletedInstants();
}
| 3.26 |
hudi_HoodieTable_getSavepointTimestamps_rdh
|
/**
* Get the list of savepoint timestamps in this table.
*/
public Set<String> getSavepointTimestamps() {
return m3().getInstantsAsStream().map(HoodieInstant::getTimestamp).collect(Collectors.toSet());
}
| 3.26 |
hudi_HoodieTable_deleteMetadataIndexIfNecessary_rdh
|
/**
* Deletes the metadata partition if the writer disables any metadata index.
*/
public void deleteMetadataIndexIfNecessary() {
Stream.of(MetadataPartitionType.values()).forEach(partitionType -> {
if (shouldDeleteMetadataPartition(partitionType)) {
try {LOG.info("Deleting metadata partition because it is disabled in writer: " + partitionType.name());
if (metadataPartitionExists(metaClient.getBasePath(), f0, partitionType)) {
deleteMetadataPartition(metaClient.getBasePath(), f0, partitionType);
}
clearMetadataTablePartitionsConfig(Option.of(partitionType), false);
} catch (HoodieMetadataException e) {
throw new <e>HoodieException("Failed to delete metadata partition: " + partitionType.name());
}
}
});
}
| 3.26 |
hudi_HoodieTable_getHoodieView_rdh
|
/**
* Get complete view of the file system for this table with ability to force sync.
*/
public SyncableFileSystemView getHoodieView() {
return getViewManager().getFileSystemView(metaClient);
}
| 3.26 |
hudi_HoodieTable_getConsistencyGuard_rdh
|
/**
* Instantiate {@link ConsistencyGuard} based on configs.
* <p>
* Default consistencyGuard class is {@link OptimisticConsistencyGuard}.
*/
public static ConsistencyGuard getConsistencyGuard(FileSystem fs, ConsistencyGuardConfig consistencyGuardConfig)
throws IOException {
try {
return consistencyGuardConfig.shouldEnableOptimisticConsistencyGuard() ? new OptimisticConsistencyGuard(fs, consistencyGuardConfig) : new FailSafeConsistencyGuard(fs, consistencyGuardConfig);
} catch (Throwable e) {
throw new IOException("Could not load ConsistencyGuard ", e);
}
}
| 3.26 |
hudi_HoodieTable_getPendingCommitTimeline_rdh
|
/**
* Get only the inflights (no-completed) commit timeline.
*/
public HoodieTimeline getPendingCommitTimeline() {
return metaClient.getCommitsTimeline().filterPendingExcludingMajorAndMinorCompaction();
}
| 3.26 |
hudi_HoodieTable_isPartitioned_rdh
|
/**
*
* @return if the table is physically partitioned, based on the partition fields stored in the table config.
*/
public boolean isPartitioned() {
return getMetaClient().getTableConfig().isTablePartitioned();
}
| 3.26 |
hudi_GenericRecordPartialPayloadGenerator_validate_rdh
|
// Atleast 1 entry should be null
private boolean validate(Object object) {
if (object == null) {
return true;
} else if (object instanceof GenericRecord) {for (Schema.Field field : ((GenericRecord) (object)).getSchema().getFields())
{
boolean ret = validate(((GenericRecord) (object)).get(field.name()));
if (ret) {
return ret;
}
}
}
return false;
}
| 3.26 |
hudi_ThreadUtils_collectActiveThreads_rdh
|
/**
* Fetches all active threads currently running in the JVM
*/
public static List<Thread> collectActiveThreads() {
ThreadGroup threadGroup =
Thread.currentThread().getThreadGroup();
while (threadGroup.getParent() != null) {
threadGroup = threadGroup.getParent();
}
Thread[] activeThreads = new Thread[threadGroup.activeCount()];
threadGroup.enumerate(activeThreads);
return Arrays.asList(activeThreads);}
| 3.26 |
hudi_ClusteringTask_newBuilder_rdh
|
/**
* Utility to create builder for {@link ClusteringTask}.
*
* @return Builder for {@link ClusteringTask}.
*/
public static Builder newBuilder() {
return new Builder();
}
| 3.26 |
hudi_HoodieRowDataFileWriterFactory_getRowDataFileWriter_rdh
|
/**
* Factory method to assist in instantiating an instance of {@link HoodieRowDataFileWriter}.
*
* @param path
* path of the RowFileWriter.
* @param hoodieTable
* instance of {@link HoodieTable} in use.
* @param config
* instance of {@link HoodieWriteConfig} to use.
* @param schema
* schema of the dataset in use.
* @return the instantiated {@link HoodieRowDataFileWriter}.
* @throws IOException
* if format is not supported or if any exception during instantiating the RowFileWriter.
*/
public static HoodieRowDataFileWriter getRowDataFileWriter(Path path, HoodieTable hoodieTable, HoodieWriteConfig config, RowType schema) throws IOException {
final String extension = FSUtils.getFileExtension(path.getName());
if (PARQUET.getFileExtension().equals(extension)) {
return newParquetInternalRowFileWriter(path, config, schema, hoodieTable);
}
throw new UnsupportedOperationException(extension + " format not supported yet.");
}
| 3.26 |
hudi_IOUtils_getMaxMemoryAllowedForMerge_rdh
|
/**
* Dynamic calculation of max memory to use for spillable map. There is always more than one task
* running on an executor and each task maintains a spillable map.
* user.available.memory = executor.memory * (1 - memory.fraction)
* spillable.available.memory = user.available.memory * hoodie.memory.fraction / executor.cores.
* Anytime the engine memory fractions/total memory is changed, the memory used for spillable map
* changes accordingly.
*/
public static long getMaxMemoryAllowedForMerge(TaskContextSupplier context, String maxMemoryFraction) {
Option<String> totalMemoryOpt = context.getProperty(EngineProperty.TOTAL_MEMORY_AVAILABLE);
Option<String> memoryFractionOpt = context.getProperty(EngineProperty.MEMORY_FRACTION_IN_USE);
Option<String> totalCoresOpt = context.getProperty(EngineProperty.TOTAL_CORES_PER_EXECUTOR);
if ((totalMemoryOpt.isPresent() && memoryFractionOpt.isPresent()) && totalCoresOpt.isPresent()) {
long executorMemoryInBytes = Long.parseLong(totalMemoryOpt.get());
double memoryFraction =
Double.parseDouble(memoryFractionOpt.get());
double maxMemoryFractionForMerge = Double.parseDouble(maxMemoryFraction);long executorCores = Long.parseLong(totalCoresOpt.get());
double userAvailableMemory = (executorMemoryInBytes * (1 - memoryFraction)) / executorCores; long v8 = ((long) (Math.floor(userAvailableMemory * maxMemoryFractionForMerge)));
return Math.max(DEFAULT_MIN_MEMORY_FOR_SPILLABLE_MAP_IN_BYTES, v8);
} else {
return DEFAULT_MAX_MEMORY_FOR_SPILLABLE_MAP_IN_BYTES;
}
}
| 3.26 |
hudi_HoodieJavaRDD_of_rdh
|
/**
*
* @param data
* a {@link List} of objects in type T.
* @param context
* {@link HoodieSparkEngineContext} to use.
* @param parallelism
* parallelism for the {@link JavaRDD<T>}.
* @param <T>
* type of object.
* @return a new instance containing the {@link JavaRDD<T>} instance.
*/public static <T> HoodieJavaRDD<T> of(List<T> data, HoodieSparkEngineContext context, int parallelism) {
return new HoodieJavaRDD<>(context.getJavaSparkContext().parallelize(data, parallelism));
}
| 3.26 |
hudi_HoodieJavaRDD_getJavaRDD_rdh
|
/**
*
* @param hoodieData
* {@link HoodieJavaRDD <T>} instance containing the {@link JavaRDD} of objects.
* @param <T>
* type of object.
* @return the a {@link JavaRDD} of objects in type T.
*/
public static <T> JavaRDD<T> getJavaRDD(HoodieData<T> hoodieData) {
return
((HoodieJavaRDD<T>) (hoodieData)).rddData;
}
| 3.26 |
hudi_HoodieMetaSyncOperations_addPartitionsToTable_rdh
|
/**
* Add partitions to the table in metastore.
*/
default void addPartitionsToTable(String tableName, List<String> partitionsToAdd) {
}
| 3.26 |
hudi_HoodieMetaSyncOperations_dropTable_rdh
|
/**
* Drop table from metastore.
*/
default void dropTable(String tableName) {
}
| 3.26 |
hudi_HoodieMetaSyncOperations_updateTableSchema_rdh
|
/**
* Update schema for the table in the metastore.
*/
default void updateTableSchema(String tableName, MessageType newSchema) {
}
| 3.26 |
hudi_HoodieMetaSyncOperations_getPartitionsByFilter_rdh
|
/**
* Get the metadata of partitions that belong to the specified table
*
* @param tableName
* @return */
default List<Partition> getPartitionsByFilter(String tableName, String filter) {
return Collections.emptyList();
}
| 3.26 |
hudi_HoodieMetaSyncOperations_updateTableComments_rdh
|
/**
* Update the field comments for table in metastore, by using the ones from storage.
*
* @return */
default boolean updateTableComments(String tableName, List<FieldSchema> fromMetastore, List<FieldSchema> fromStorage) {
return false;
}
| 3.26 |
hudi_HoodieMetaSyncOperations_updateTableProperties_rdh
|
/**
* Update the table properties in metastore.
*
* @return true if properties updated.
*/
default boolean updateTableProperties(String tableName, Map<String, String> tableProperties) {
return false;
}
| 3.26 |
hudi_HoodieMetaSyncOperations_getMetastoreFieldSchemas_rdh
|
/**
* Get the list of field schemas from metastore.
*/
default List<FieldSchema> getMetastoreFieldSchemas(String tableName) {
return Collections.emptyList();
}
| 3.26 |
hudi_HoodieMetaSyncOperations_getLastReplicatedTime_rdh
|
/**
* Get the timestamp of last replication.
*/
default Option<String> getLastReplicatedTime(String tableName) {
return Option.empty();
}
| 3.26 |
hudi_HoodieMetaSyncOperations_getLastCommitCompletionTimeSynced_rdh
|
/**
* Get the commit completion time of last sync
*/
default Option<String> getLastCommitCompletionTimeSynced(String tableName) {
return Option.empty();
}
| 3.26 |
hudi_HoodieMetaSyncOperations_createDatabase_rdh
|
/**
* Create a database in the metastore.
*/
default void createDatabase(String databaseName) {
}
| 3.26 |
hudi_HoodieMetaSyncOperations_updatePartitionsToTable_rdh
|
/**
* Update partitions to the table in metastore.
*/
default void updatePartitionsToTable(String tableName, List<String> changedPartitions) {
}
| 3.26 |
hudi_HoodieMetaSyncOperations_dropPartitions_rdh
|
/**
* Drop partitions from the table in metastore.
*/
default void dropPartitions(String tableName, List<String> partitionsToDrop) {
}
| 3.26 |
hudi_HoodieMetaSyncOperations_getMetastoreSchema_rdh
|
/**
* Get the schema from metastore.
*/
default Map<String, String> getMetastoreSchema(String tableName) {
return Collections.emptyMap();
}
| 3.26 |
hudi_HoodieMetaSyncOperations_updateSerdeProperties_rdh
|
/**
* Update the SerDe properties in metastore.
*
* @return true if properties updated.
*/
default boolean updateSerdeProperties(String tableName, Map<String, String> serdeProperties, boolean useRealtimeFormat) {
return false;
}
| 3.26 |
hudi_HoodieMetaSyncOperations_updateLastCommitTimeSynced_rdh
|
/**
* Update the timestamp of last sync.
*/
default void updateLastCommitTimeSynced(String tableName) {
}
| 3.26 |
hudi_HoodieMetaSyncOperations_getAllPartitions_rdh
|
/**
* Get all partitions for the table in the metastore.
*/
default List<Partition> getAllPartitions(String tableName) {
return Collections.emptyList();
}
| 3.26 |
hudi_HoodieMetaSyncOperations_tableExists_rdh
|
/**
* Check if table exists in metastore.
*/
default boolean tableExists(String tableName) {
return false;
}
| 3.26 |
hudi_HoodieMetaSyncOperations_deleteLastReplicatedTimeStamp_rdh
|
/**
* Delete the timestamp of last replication.
*/
default void deleteLastReplicatedTimeStamp(String tableName) {
}
| 3.26 |
hudi_HoodieMetaSyncOperations_databaseExists_rdh
|
/**
* Check if a database already exists in the metastore.
*/
default boolean databaseExists(String databaseName) {
return false;}
| 3.26 |
hudi_HoodieMetaSyncOperations_getLastCommitTimeSynced_rdh
|
/**
* Get the timestamp of last sync.
*/
default Option<String> getLastCommitTimeSynced(String tableName) {
return Option.empty();}
| 3.26 |
hudi_HoodieMetaSyncOperations_createTable_rdh
|
/**
* Create the table.
*
* @param tableName
* The table name.
* @param storageSchema
* The table schema.
* @param inputFormatClass
* The input format class of this table.
* @param outputFormatClass
* The output format class of this table.
* @param serdeClass
* The serde class of this table.
* @param serdeProperties
* The serde properties of this table.
* @param tableProperties
* The table properties for this table.
*/
default void createTable(String tableName, MessageType storageSchema, String inputFormatClass, String outputFormatClass, String serdeClass, Map<String, String> serdeProperties, Map<String, String> tableProperties) {
}
| 3.26 |
hudi_HoodieMetaSyncOperations_updateLastReplicatedTimeStamp_rdh
|
/**
* Update the timestamp of last replication.
*/
default void updateLastReplicatedTimeStamp(String tableName, String timeStamp) {
}
| 3.26 |
hudi_HoodieMetaSyncOperations_getStorageSchema_rdh
|
/**
* Get the schema from the Hudi table on storage.
*
* @param includeMetadataField
* true if to include metadata fields in the schema
*/
default MessageType getStorageSchema(boolean includeMetadataField) {
return null;
}
| 3.26 |
hudi_HoodieAvroPayload_getRecordBytes_rdh
|
// for examples
public byte[] getRecordBytes() {
return recordBytes;
}
| 3.26 |
hudi_TableHeader_addTableHeaderFields_rdh
|
/**
* Add fields from another {@link TableHeader} instance.
*
* @param tableHeader
* {@link TableHeader} instance.
*/
public TableHeader addTableHeaderFields(TableHeader tableHeader) {
fieldNames.addAll(tableHeader.getFieldNames());
return this;
}
| 3.26 |
hudi_TableHeader_get_rdh
|
/**
* Lookup field by offset.
*/
public String get(int index) {
return fieldNames.get(index);
}
| 3.26 |
hudi_TableHeader_indexOf_rdh
|
/**
* Index of the field in the table.
*
* @param fieldName
* Field Name
*/
public int indexOf(String fieldName) {
return fieldNames.indexOf(fieldName);
}
| 3.26 |
hudi_TableHeader_addTableHeaderField_rdh
|
/**
* Add a field (column) to table.
*
* @param fieldName
* field Name
*/public TableHeader addTableHeaderField(String fieldName) {
fieldNames.add(fieldName);
return this;
}
| 3.26 |
hudi_TableHeader_getNumFields_rdh
|
/**
* Get number of fields in the table.
*/
public int getNumFields() {
return fieldNames.size();
}
| 3.26 |
hudi_TableHeader_getFieldNames_rdh
|
/**
* Get all field names.
*/
public List<String> getFieldNames() {
return fieldNames;
}
| 3.26 |
hudi_TimelineServerBasedWriteMarkers_executeCreateMarkerRequest_rdh
|
/**
* Executes marker creation request with specific parameters.
*
* @param paramsMap
* Parameters to be included in the marker request.
* @param partitionPath
* Relative partition path.
* @param markerFileName
* Marker file name.
* @return {@code true} if successful; {@code false} otherwise.
*/
private boolean executeCreateMarkerRequest(Map<String, String> paramsMap, String partitionPath, String markerFileName) {
boolean success;
try {
success
= httpRequestClient.executeRequest(CREATE_MARKER_URL, paramsMap, BOOLEAN_TYPE_REFERENCE, RequestMethod.POST);
} catch (IOException e) {
throw new HoodieRemoteException((("Failed to create marker file " + partitionPath) + "/") + markerFileName, e);
}
return success;
}
| 3.26 |
hudi_TimelineServerBasedWriteMarkers_m0_rdh
|
/**
* Gets parameter map for marker creation request.
*
* @param partitionPath
* Relative partition path.
* @param markerFileName
* Marker file name.
* @return parameter map.
*/ private Map<String, String> m0(String partitionPath, String markerFileName, boolean initEarlyConflictDetectionConfigs) {
Map<String, String> paramsMap = new HashMap<>();
paramsMap.put(MARKER_DIR_PATH_PARAM, markerDirPath.toString());
if (StringUtils.isNullOrEmpty(partitionPath))
{
paramsMap.put(MARKER_NAME_PARAM, markerFileName);
} else {
paramsMap.put(MARKER_NAME_PARAM, (partitionPath + "/") + markerFileName);
}
if (initEarlyConflictDetectionConfigs) {
paramsMap.put(MARKER_BASEPATH_PARAM, basePath);
}
return paramsMap;
}
| 3.26 |
hudi_InternalSchemaCache_getInternalSchemaAndAvroSchemaForClusteringAndCompaction_rdh
|
/**
* Get internalSchema and avroSchema for compaction/cluster operation.
*
* @param metaClient
* current hoodie metaClient
* @param compactionAndClusteringInstant
* first instant before current compaction/cluster instant
* @return (internalSchemaStrOpt, avroSchemaStrOpt) a pair of InternalSchema/avroSchema
*/
public static Pair<Option<String>, Option<String>> getInternalSchemaAndAvroSchemaForClusteringAndCompaction(HoodieTableMetaClient metaClient, String compactionAndClusteringInstant) {// try to load internalSchema to support Schema Evolution
HoodieTimeline timelineBeforeCurrentCompaction = metaClient.getCommitsAndCompactionTimeline().findInstantsBefore(compactionAndClusteringInstant).filterCompletedInstants();
Option<HoodieInstant> lastInstantBeforeCurrentCompaction = timelineBeforeCurrentCompaction.lastInstant();
if (lastInstantBeforeCurrentCompaction.isPresent()) {
// try to find internalSchema
byte[] data = timelineBeforeCurrentCompaction.getInstantDetails(lastInstantBeforeCurrentCompaction.get()).get();
HoodieCommitMetadata metadata;
try {
metadata = HoodieCommitMetadata.fromBytes(data, HoodieCommitMetadata.class);
} catch (Exception e) {
throw new HoodieException(String.format("cannot read metadata from commit: %s", lastInstantBeforeCurrentCompaction.get()), e);
}
String internalSchemaStr = metadata.getMetadata(SerDeHelper.LATEST_SCHEMA);
if (internalSchemaStr != null) { String existingSchemaStr = metadata.getMetadata(HoodieCommitMetadata.SCHEMA_KEY);
return Pair.of(Option.of(internalSchemaStr), Option.of(existingSchemaStr));
}
}
return Pair.of(Option.empty(), Option.empty());
}
| 3.26 |
hudi_InternalSchemaCache_searchSchemaAndCache_rdh
|
/**
* Search internalSchema based on versionID.
* first step: try to get internalSchema from hoodie commit files, we no need to add lock.
* if we cannot get internalSchema by first step, then we try to get internalSchema from cache.
*
* @param versionID
* schema version_id need to search
* @param metaClient
* current hoodie metaClient
* @return internalSchema
*/
public static InternalSchema searchSchemaAndCache(long versionID, HoodieTableMetaClient metaClient, boolean cacheEnable) {
Option<InternalSchema> candidateSchema = getSchemaByReadingCommitFile(versionID, metaClient);
if (candidateSchema.isPresent()) {return candidateSchema.get();
}
if
(!cacheEnable) {
// parse history schema and return directly
return InternalSchemaUtils.searchSchema(versionID, getHistoricalSchemas(metaClient));
}
String v2 = metaClient.getBasePath();
// use segment lock to reduce competition.
synchronized(lockList[v2.hashCode() & (lockList.length - 1)]) {
TreeMap<Long, InternalSchema> v3 = HISTORICAL_SCHEMA_CACHE.getIfPresent(v2);
if ((v3 ==
null) || (InternalSchemaUtils.searchSchema(versionID, v3) == null)) {
v3 = getHistoricalSchemas(metaClient);
HISTORICAL_SCHEMA_CACHE.put(v2, v3);
} else {
long maxVersionId = v3.keySet().stream().max(Long::compareTo).get();
if (versionID > maxVersionId) {
v3 = getHistoricalSchemas(metaClient);
HISTORICAL_SCHEMA_CACHE.put(v2, v3);
}
}
return InternalSchemaUtils.searchSchema(versionID, v3);
}
}
| 3.26 |
hudi_InternalSchemaCache_getInternalSchemaByVersionId_rdh
|
/**
* Give a schema versionId return its internalSchema.
* This method will be called by spark tasks, we should minimize time cost.
* We try our best to not use metaClient, since the initialization of metaClient is time cost
* step1:
* try to parser internalSchema from HoodieInstant directly
* step2:
* if we cannot parser internalSchema in step1, (eg: current versionId HoodieInstant has been archived)
* try to find internalSchema in historySchema.
* step3:
* if we cannot parser internalSchema in step2 (eg: schema evolution is not enabled when we create hoodie table, however after some inserts we enable schema evolution)
* try to convert table schema to internalSchema.
*
* @param versionId
* the internalSchema version to be search.
* @param tablePath
* table path
* @param hadoopConf
* conf
* @param validCommits
* current validate commits, use to make up the commit file path/verify the validity of the history schema files
* @return a internalSchema.
*/
public static InternalSchema getInternalSchemaByVersionId(long versionId, String tablePath, Configuration hadoopConf, String validCommits) {
String avroSchema = "";
Set<String> commitSet = Arrays.stream(validCommits.split(",")).collect(Collectors.toSet());
List<String> validateCommitList = commitSet.stream().map(HoodieInstant::extractTimestamp).collect(Collectors.toList());
FileSystem fs = FSUtils.getFs(tablePath, hadoopConf);
Path hoodieMetaPath = new Path(tablePath, HoodieTableMetaClient.METAFOLDER_NAME);
// step1:
Path candidateCommitFile = commitSet.stream().filter(fileName -> HoodieInstant.extractTimestamp(fileName).equals(versionId + "")).findFirst().map(f -> new Path(hoodieMetaPath, f)).orElse(null);
if (candidateCommitFile != null) {
try {
byte[] data;
try (FSDataInputStream is = fs.open(candidateCommitFile)) {
data = FileIOUtils.readAsByteArray(is);
} catch (IOException e) {
throw e;
}
HoodieCommitMetadata metadata = HoodieCommitMetadata.fromBytes(data, HoodieCommitMetadata.class);
String latestInternalSchemaStr = metadata.getMetadata(SerDeHelper.LATEST_SCHEMA);
avroSchema = metadata.getMetadata(HoodieCommitMetadata.SCHEMA_KEY);
if (latestInternalSchemaStr != null) {
return SerDeHelper.fromJson(latestInternalSchemaStr).orElse(null);
}
} catch (Exception e1) {
// swallow this exception.
LOG.warn(String.format("Cannot find internal schema from commit file %s. Falling back to parsing historical internal schema", candidateCommitFile.toString()));
}
}// step2:
FileBasedInternalSchemaStorageManager fileBasedInternalSchemaStorageManager = new FileBasedInternalSchemaStorageManager(hadoopConf, new Path(tablePath));String latestHistorySchema = fileBasedInternalSchemaStorageManager.getHistorySchemaStrByGivenValidCommits(validateCommitList);
if (latestHistorySchema.isEmpty()) {
return InternalSchema.getEmptyInternalSchema();
}
InternalSchema fileSchema = InternalSchemaUtils.searchSchema(versionId, SerDeHelper.parseSchemas(latestHistorySchema));
// step3:
return fileSchema.isEmptySchema() ? StringUtils.isNullOrEmpty(avroSchema) ? InternalSchema.getEmptyInternalSchema() : AvroInternalSchemaConverter.convert(HoodieAvroUtils.addMetadataFields(new Schema.Parser().parse(avroSchema))) : fileSchema;
}
| 3.26 |
hudi_BaseHoodieLogRecordReader_reconcileSpuriousBlocksAndGetValidOnes_rdh
|
/**
* There could be spurious log blocks due to spark task retries. So, we will use BLOCK_SEQUENCE_NUMBER in the log block header to deduce such spurious log blocks and return
* a deduped set of log blocks.
*
* @param allValidLogBlocks
* all valid log blocks parsed so far.
* @param blockSequenceMapPerCommit
* map containing block sequence numbers for every commit.
* @return a Pair of boolean and list of deduped valid block blocks, where boolean of true means, there have been dups detected.
*/
private Pair<Boolean, List<HoodieLogBlock>> reconcileSpuriousBlocksAndGetValidOnes(List<HoodieLogBlock> allValidLogBlocks, Map<String, Map<Long, List<Pair<Integer, HoodieLogBlock>>>> blockSequenceMapPerCommit) {
boolean dupsFound = blockSequenceMapPerCommit.values().stream().anyMatch(perCommitBlockList -> perCommitBlockList.size() > 1);
if (dupsFound)
{
// duplicates are found. we need to remove duplicate log blocks.
for (Map.Entry<String, Map<Long, List<Pair<Integer, HoodieLogBlock>>>> entry : blockSequenceMapPerCommit.entrySet()) {
Map<Long, List<Pair<Integer, HoodieLogBlock>>> perCommitBlockSequences =
entry.getValue();if (perCommitBlockSequences.size() > 1) {
// only those that have more than 1 sequence needs deduping.
int maxSequenceCount = -1;
int
maxAttemptNo = -1;
int totalSequences = perCommitBlockSequences.size();
int counter = 0;
for (Map.Entry<Long, List<Pair<Integer, HoodieLogBlock>>> v28 : perCommitBlockSequences.entrySet()) {
Long attemptNo =
v28.getKey();
int size = v28.getValue().size();
if (maxSequenceCount < size) {
maxSequenceCount = size;
maxAttemptNo = Math.toIntExact(attemptNo);
}
counter++;
}
// for other sequence (!= maxSequenceIndex), we need to remove the corresponding logBlocks from allValidLogBlocks
for (Map.Entry<Long, List<Pair<Integer, HoodieLogBlock>>> perAttemptEntries : perCommitBlockSequences.entrySet()) {
Long attemptNo = perAttemptEntries.getKey();
if (maxAttemptNo
!= attemptNo) {
List<HoodieLogBlock> logBlocksToRemove = perCommitBlockSequences.get(attemptNo).stream().map(pair -> pair.getValue()).collect(Collectors.toList());
logBlocksToRemove.forEach(logBlockToRemove -> allValidLogBlocks.remove(logBlocksToRemove));
}
}
}
}
return Pair.of(true, allValidLogBlocks);
} else {
return Pair.of(false, allValidLogBlocks);
}
}
| 3.26 |
hudi_BaseHoodieLogRecordReader_getProgress_rdh
|
/**
* Return progress of scanning as a float between 0.0 to 1.0.
*/
public float getProgress() {
return progress;}
| 3.26 |
hudi_BaseHoodieLogRecordReader_isNewInstantBlock_rdh
|
/**
* Checks if the current logblock belongs to a later instant.
*/
private boolean isNewInstantBlock(HoodieLogBlock logBlock) {
return ((currentInstantLogBlocks.size() > 0) && (currentInstantLogBlocks.peek().getBlockType() != CORRUPT_BLOCK)) && (!logBlock.getLogBlockHeader().get(INSTANT_TIME).contentEquals(currentInstantLogBlocks.peek().getLogBlockHeader().get(INSTANT_TIME)));}
| 3.26 |
hudi_BaseHoodieLogRecordReader_scanInternal_rdh
|
/**
*
* @param keySpecOpt
* specifies target set of keys to be scanned
* @param skipProcessingBlocks
* controls, whether (delta) blocks have to actually be processed
*/
protected final void scanInternal(Option<KeySpec> keySpecOpt, boolean skipProcessingBlocks) {
synchronized(this) {
if (enableOptimizedLogBlocksScan) {
scanInternalV2(keySpecOpt, skipProcessingBlocks);
} else {
scanInternalV1(keySpecOpt);
}
}
}
| 3.26 |
hudi_BaseHoodieLogRecordReader_updateBlockSequenceTracker_rdh
|
/**
* Updates map tracking block seq no.
* Here is the map structure.
* Map<String, Map<Long, List<Pair<Integer, HoodieLogBlock>>>> blockSequenceMapPerCommit
* Key: Commit time.
* Value: Map<Long, List<Pair<Integer, HoodieLogBlock>>>>
* Value refers to a Map of different attempts for the commit of interest. List contains the block seq number and the resp HoodieLogBlock.
* <p>
* For eg, if there were two attempts for a file slice while writing(due to spark task retries), here is how the map might look like
* key: commit1
* value : {
* 0L = List = { {0, lb1}, {1, lb2} },
* 1L = List = { {0, lb3}, {1, lb4}, {2, lb5}}
* }
* Meaning: for commit1, there was two attempts with Append Handle while writing. In first attempt, lb1 and lb2 was added. And in 2nd attempt lb3, lb4 and lb5 was added.
* We keep populating this entire map and finally detect spurious log blocks and ignore them.
* In most cases, we might just see one set of sequence for a given commit.
*
* @param logBlock
* log block of interest to be added.
* @param instantTime
* commit time of interest.
* @param blockSeqNo
* block sequence number.
* @param blockSequenceMapPerCommit
* map tracking per commit block sequences.
*/
private void
updateBlockSequenceTracker(HoodieLogBlock logBlock, String instantTime, int blockSeqNo, long attemptNo, Map<String, Map<Long, List<Pair<Integer,
HoodieLogBlock>>>> blockSequenceMapPerCommit) {
if ((blockSeqNo != (-1)) && (attemptNo != (-1))) {
// update the block sequence tracker for log blocks containing the same.
blockSequenceMapPerCommit.computeIfAbsent(instantTime, entry -> new HashMap<>());
Map<Long, List<Pair<Integer, HoodieLogBlock>>> curCommitBlockMap = blockSequenceMapPerCommit.get(instantTime);
if (curCommitBlockMap.containsKey(attemptNo)) {
// append to existing map entry
curCommitBlockMap.get(attemptNo).add(Pair.of(blockSeqNo, logBlock));
} else {
// create a new map entry
curCommitBlockMap.put(attemptNo, new ArrayList<>());
curCommitBlockMap.get(attemptNo).add(Pair.of(blockSeqNo, logBlock));
}
// update the latest to block sequence tracker
blockSequenceMapPerCommit.put(instantTime, curCommitBlockMap);
} else {
// all of older blocks are considered valid. there should be only one list for older commits where block sequence number is not present.
blockSequenceMapPerCommit.computeIfAbsent(instantTime, entry -> new HashMap<>());
Map<Long, List<Pair<Integer, HoodieLogBlock>>> curCommitBlockMap = blockSequenceMapPerCommit.get(instantTime);
curCommitBlockMap.put(0L, new ArrayList<>());
curCommitBlockMap.get(0L).add(Pair.of(blockSeqNo, logBlock));
// update the latest to block sequence tracker
blockSequenceMapPerCommit.put(instantTime, curCommitBlockMap);
} }
| 3.26 |
hudi_BaseHoodieLogRecordReader_processQueuedBlocksForInstant_rdh
|
/**
* Process the set of log blocks belonging to the last instant which is read fully.
*/
private void processQueuedBlocksForInstant(Deque<HoodieLogBlock> logBlocks, int numLogFilesSeen, Option<KeySpec> keySpecOpt) throws Exception {while (!logBlocks.isEmpty()) {
LOG.info("Number of remaining logblocks to merge " + logBlocks.size());
// poll the element at the bottom of the stack since that's the order it was inserted
HoodieLogBlock lastBlock = logBlocks.pollLast();
switch (lastBlock.getBlockType()) {
case AVRO_DATA_BLOCK :case HFILE_DATA_BLOCK :
case PARQUET_DATA_BLOCK :
recordBuffer.processDataBlock(((HoodieDataBlock) (lastBlock)), keySpecOpt);
break;
case DELETE_BLOCK :
recordBuffer.processDeleteBlock(((HoodieDeleteBlock) (lastBlock)));
break;
case CORRUPT_BLOCK :
LOG.warn("Found a corrupt block which was not rolled back");
break;
default :
break;
}
}
// At this step the lastBlocks are consumed. We track approximate progress by number of log-files seen
progress = (numLogFilesSeen - 1) / logFilePaths.size();
}
| 3.26 |
hudi_AvroSchemaCompatibility_objectsEqual_rdh
|
/**
* Borrowed from Guava's Objects.equal(a, b)
*/
private static boolean objectsEqual(Object obj1, Object
obj2) {
return Objects.equals(obj1, obj2);
}
| 3.26 |
hudi_AvroSchemaCompatibility_getReader_rdh
|
/**
* Gets the reader schema that was validated.
*
* @return reader schema that was validated.
*/
public Schema getReader() {
return mReader;
}
| 3.26 |
hudi_AvroSchemaCompatibility_getReaderFragment_rdh
|
/**
* Returns the fragment of the reader schema that failed compatibility check.
*
* @return a Schema instance (fragment of the reader schema).
*/
public Schema getReaderFragment() {
return mReaderFragment;
}
| 3.26 |
hudi_AvroSchemaCompatibility_checkReaderWriterCompatibility_rdh
|
/**
* Validates that the provided reader schema can be used to decode avro data
* written with the provided writer schema.
*
* @param reader
* schema to check.
* @param writer
* schema to check.
* @return a result object identifying any compatibility errors.
*/
public static SchemaPairCompatibility checkReaderWriterCompatibility(final Schema reader, final Schema writer, boolean checkNamingOverride) {
final SchemaCompatibilityResult compatibility = new ReaderWriterCompatibilityChecker(checkNamingOverride).getCompatibility(reader, writer);
final String message;
switch (compatibility.m0()) {
case INCOMPATIBLE :
{
message = String.format("Data encoded using writer schema:%n%s%n" + "will or may fail to decode using reader schema:%n%s%n", writer.toString(true), reader.toString(true));
break;
}
case COMPATIBLE :
{
message = f0;
break;
}
default : throw new AvroRuntimeException("Unknown compatibility: " + compatibility);
}
return new SchemaPairCompatibility(compatibility, reader, writer, message);}
| 3.26 |
hudi_AvroSchemaCompatibility_calculateCompatibility_rdh
|
/**
* Calculates the compatibility of a reader/writer schema pair.
*
* <p>
* Relies on external memoization performed by
* {@link #getCompatibility(Schema, Schema)}.
* </p>
*
* @param reader
* Reader schema to test.
* @param writer
* Writer schema to test.
* @param locations
* Stack with which to track the location within the schema.
* @return the compatibility of the reader/writer schema pair.
*/
private SchemaCompatibilityResult calculateCompatibility(final Schema
reader, final Schema writer, final Deque<LocationInfo> locations) {
SchemaCompatibilityResult result = SchemaCompatibilityResult.compatible();
if (reader.getType() == writer.getType()) {
switch (reader.getType()) {
case NULL :
case BOOLEAN :
case INT :
case LONG :
case FLOAT :
case DOUBLE :
case BYTES :
case STRING :
{
return result;}
case ARRAY :
{
return result.mergedWith(getCompatibility(reader.getElementType(),
writer.getElementType(), locations));
}
case MAP :
{
return result.mergedWith(getCompatibility(reader.getValueType(), writer.getValueType(), locations));
} case FIXED :
{
result = result.mergedWith(checkSchemaNames(reader, writer, locations));
return result.mergedWith(checkFixedSize(reader, writer, locations));
}
case ENUM :
{
result = result.mergedWith(checkSchemaNames(reader, writer, locations));
return result.mergedWith(checkReaderEnumContainsAllWriterEnumSymbols(reader, writer, locations));
}
case RECORD :
{
result = result.mergedWith(checkSchemaNames(reader, writer, locations));
return result.mergedWith(checkReaderWriterRecordFields(reader,
writer, locations));
}
case UNION :
{
// Check that each individual branch of the writer union can be decoded:
for (final Schema writerBranch : writer.getTypes()) {
SchemaCompatibilityResult compatibility = getCompatibility(reader, writerBranch, locations);
if (compatibility.m0() == SchemaCompatibilityType.INCOMPATIBLE) {
String message = String.format("reader union lacking writer type: %s", writerBranch.getType());
result = result.mergedWith(SchemaCompatibilityResult.incompatible(SchemaIncompatibilityType.MISSING_UNION_BRANCH, reader, writer, message, asList(locations)));
}
}
// Each schema in the writer union can be decoded with the reader:
return result;
}
default :
{
throw new AvroRuntimeException("Unknown schema type: " + reader.getType());
}
}
} else {
// Reader and writer have different schema types:
// Reader compatible with all branches of a writer union is compatible
if
(writer.getType() == Type.UNION) {
for (Schema s : writer.getTypes()) {
result = result.mergedWith(getCompatibility(reader, s, locations));
}
return result;
}
switch
(reader.getType()) {
case NULL :
return result.mergedWith(typeMismatch(reader, writer, locations));
case BOOLEAN :
return result.mergedWith(typeMismatch(reader, writer, locations));
case INT :
return result.mergedWith(typeMismatch(reader, writer, locations));
case LONG :
{return writer.getType() ==
Type.INT ? result : result.mergedWith(typeMismatch(reader, writer, locations));
}
case FLOAT :
{
return (writer.getType() == Type.INT) || (writer.getType() == Type.LONG) ? result : result.mergedWith(typeMismatch(reader, writer, locations));
}
case DOUBLE :
{
return ((writer.getType() == Type.INT) || (writer.getType() == Type.LONG)) || (writer.getType() == Type.FLOAT) ? result : result.mergedWith(typeMismatch(reader, writer, locations));
}
case BYTES :
{
return writer.getType() == Type.STRING ? result : result.mergedWith(typeMismatch(reader, writer, locations));
}
case STRING :
{
return isTypeNumeric(writer.getType()) || (writer.getType() == Type.BYTES) ? result : result.mergedWith(typeMismatch(reader, writer, locations));
}
case ARRAY :
return result.mergedWith(typeMismatch(reader, writer, locations));
case MAP :
return result.mergedWith(typeMismatch(reader, writer, locations));
case FIXED
:
return result.mergedWith(typeMismatch(reader, writer, locations));
case ENUM :
return result.mergedWith(typeMismatch(reader, writer, locations));
case RECORD :
return result.mergedWith(typeMismatch(reader, writer, locations));
case UNION :
{
for
(final Schema readerBranch : reader.getTypes()) {
SchemaCompatibilityResult v16 = getCompatibility(readerBranch, writer, locations);
if (v16.m0() == SchemaCompatibilityType.COMPATIBLE) {
return result;
}
}
// No branch in the reader union has been found compatible with the writer
// schema:
String message = String.format("reader union lacking writer type: %s", writer.getType());
return result.mergedWith(SchemaCompatibilityResult.incompatible(SchemaIncompatibilityType.MISSING_UNION_BRANCH, reader, writer, message, asList(locations)));
}
default :
{
throw new AvroRuntimeException("Unknown schema type: " + reader.getType());
}
}
}
}
| 3.26 |
hudi_AvroSchemaCompatibility_lookupWriterField_rdh
|
/**
* Identifies the writer field that corresponds to the specified reader field.
*
* <p>
* Matching includes reader name aliases.
* </p>
*
* @param writerSchema
* Schema of the record where to look for the writer field.
* @param readerField
* Reader field to identify the corresponding writer field
* of.
* @return the writer field, if any does correspond, or None.
*/
public static Field lookupWriterField(final Schema writerSchema, final Field readerField) {
assert writerSchema.getType() == Type.RECORD;
final List<Field> writerFields = new ArrayList<>();final Field direct = writerSchema.getField(readerField.name());
if (direct != null) {
writerFields.add(direct);
}
for (final String readerFieldAliasName
: readerField.aliases()) {
final Field writerField = writerSchema.getField(readerFieldAliasName);
if (writerField != null) {
writerFields.add(writerField);
}
}
switch (writerFields.size()) {
case 0 :
return null;
case 1 :
return writerFields.get(0);
default :
{
throw new AvroRuntimeException(String.format("Reader record field %s matches multiple fields in writer record schema %s", readerField, writerSchema));
}
}
}
| 3.26 |
hudi_AvroSchemaCompatibility_getMessage_rdh
|
/**
* Returns a human-readable message with more details about what failed. Syntax
* depends on the SchemaIncompatibilityType.
*
* @return a String with details about the incompatibility.
* @see #getType()
*/
public String getMessage() {return mMessage;
}
| 3.26 |
hudi_AvroSchemaCompatibility_m2_rdh
|
/**
* {@inheritDoc }
*/
@Override
public String m2() {
return String.format("SchemaCompatibilityResult{compatibility:%s, incompatibilities:%s}", mCompatibilityType, mIncompatibilities);
}
| 3.26 |
hudi_AvroSchemaCompatibility_getLocation_rdh
|
/**
* Returns a
* <a href="https://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-08">JSON
* Pointer</a> describing the node location within the schema's JSON document
* tree where the incompatibility was encountered.
*
* @return JSON Pointer encoded as a string.
*/
public String getLocation() {
StringBuilder s = new StringBuilder("/");boolean first = true;
// ignore root element
for
(String coordinate : f2.subList(1, f2.size())) {
if (first) {
first = false;
} else {
s.append('/');
}
// Apply JSON pointer escaping.
s.append(coordinate.replace("~", "~0").replace("/", "~1"));
}
return s.toString();
}
| 3.26 |
hudi_AvroSchemaCompatibility_equals_rdh
|
/**
* {@inheritDoc }
*/
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
Incompatibility other = ((Incompatibility) (obj));
if (mType != other.mType) {
return
false;
}
if (mReaderFragment == null) {
if (other.mReaderFragment != null) {
return false;
}
} else if (!mReaderFragment.equals(other.mReaderFragment)) {
return false;
}
if (mWriterFragment == null) {
if (other.mWriterFragment != null) {
return false;
}
} else if (!mWriterFragment.equals(other.mWriterFragment)) {
return
false;
}
if (mMessage == null) {
if (other.mMessage != null) {
return false;
}
} else if (!mMessage.equals(other.mMessage)) {
return false;
}
if (f2 == null) {
return other.f2 == null;
} else {
return f2.equals(other.f2);
}
}
| 3.26 |
hudi_AvroSchemaCompatibility_schemaNameEquals_rdh
|
// -----------------------------------------------------------------------------------------------
/**
* Tests the equality of two Avro named schemas.
*
* <p>
* Matching includes reader name aliases.
* </p>
*
* @param reader
* Named reader schema.
* @param writer
* Named writer schema.
* @return whether the names of the named schemas match or not.
*/
public static boolean schemaNameEquals(final Schema reader,
final Schema writer) { if (objectsEqual(reader.getName(), writer.getName())) {
return true;
}
// Apply reader aliases:
return reader.getAliases().contains(writer.getFullName());
}
| 3.26 |
hudi_AvroSchemaCompatibility_toString_rdh
|
/**
* {@inheritDoc }
*/@Override
public String toString() {
return String.format("SchemaPairCompatibility{result:%s, readerSchema:%s, writerSchema:%s, description:%s}", mResult, mReader, mWriter, f3);}
/**
* {@inheritDoc }
| 3.26 |
hudi_AvroSchemaCompatibility_getDescription_rdh
|
/**
* Gets a human readable description of this validation result.
*
* @return a human readable description of this validation result.
*/
public String getDescription() {
return f3;
}
| 3.26 |
hudi_AvroSchemaCompatibility_m0_rdh
|
/**
* Returns the SchemaCompatibilityType, always non-null.
*
* @return a SchemaCompatibilityType instance, always non-null
*/
public SchemaCompatibilityType m0() {
return mCompatibilityType; }
/**
* If the compatibility is INCOMPATIBLE, returns {@link Incompatibility
* Incompatibilities} found, otherwise an empty list.
*
* @return a list of {@link Incompatibility Incompatibilities}
| 3.26 |
hudi_AvroSchemaCompatibility_getWriterFragment_rdh
|
/**
* Returns the fragment of the writer schema that failed compatibility check.
*
* @return a Schema instance (fragment of the writer schema).
*/
public Schema getWriterFragment() {
return mWriterFragment;
}
| 3.26 |
hudi_AvroSchemaCompatibility_hashCode_rdh
|
/**
* {@inheritDoc }
*/
@Override
public int hashCode()
{
return Arrays.hashCode(new Object[]{ mResult, mReader, mWriter, f3
});
}
| 3.26 |
hudi_AvroSchemaCompatibility_getWriter_rdh
|
/**
* Gets the writer schema that was validated.
*
* @return writer schema that was validated.
*/
public Schema getWriter() {
return mWriter;
}
| 3.26 |
hudi_AvroSchemaCompatibility_getType_rdh
|
/**
* Gets the type of this result.
*
* @return the type of this result.
*/
public SchemaCompatibilityType getType() {
return mResult.m0();
}
| 3.26 |
hudi_AvroSchemaCompatibility_getResult_rdh
|
/**
* Gets more details about the compatibility, in particular if getType() is
* INCOMPATIBLE.
*
* @return the details of this compatibility check.
*/
public SchemaCompatibilityResult getResult() {
return mResult;
}
| 3.26 |
hudi_AvroSchemaCompatibility_getCompatibility_rdh
|
/**
* Reports the compatibility of a reader/writer schema pair.
* <p>
* Memorizes the compatibility results.
* </p>
*
* @param reader
* Reader schema to test.
* @param writer
* Writer schema to test.
* @param locations
* Stack tracking the path (chain of locations) within the
* schema.
* @return the compatibility of the reader/writer schema pair.
*/
private SchemaCompatibilityResult getCompatibility(final Schema reader, final Schema
writer, final Deque<LocationInfo> locations) {
LOG.debug("Checking compatibility of reader {} with writer {}", reader, writer);
final ReaderWriter pair = new ReaderWriter(reader, writer);
SchemaCompatibilityResult result = mMemoizeMap.get(pair);
if (result != null) {if (result.m0() == SchemaCompatibilityType.RECURSION_IN_PROGRESS) {
// Break the recursion here.
// schemas are compatible unless proven incompatible:
result = SchemaCompatibilityResult.compatible();
}
} else {
// Mark this reader/writer pair as "in progress":
mMemoizeMap.put(pair, SchemaCompatibilityResult.recursionInProgress());result = calculateCompatibility(reader,
writer, locations);
mMemoizeMap.put(pair, result);
}
return result;
}
| 3.26 |
hudi_HoodieInstantTimeGenerator_getInstantForDateString_rdh
|
/**
* Creates an instant string given a valid date-time string.
*
* @param dateString
* A date-time string in the format yyyy-MM-dd HH:mm:ss[.SSS]
* @return A timeline instant
* @throws ParseException
* If we cannot parse the date string
*/
public static String getInstantForDateString(String dateString) {
try {
return getInstantFromTemporalAccessor(LocalDateTime.parse(dateString, MILLIS_GRANULARITY_DATE_FORMATTER));
} catch (Exception e)
{
// Attempt to add the milliseconds in order to complete parsing
return getInstantFromTemporalAccessor(LocalDateTime.parse(String.format("%s.%s", dateString, DEFAULT_MILLIS_EXT), MILLIS_GRANULARITY_DATE_FORMATTER));
}
}
| 3.26 |
hudi_HoodieInstantTimeGenerator_createNewInstantTime_rdh
|
/**
* Returns next instant time in the correct format.
* Ensures each instant time is at least 1 millisecond apart since we create instant times at millisecond granularity.
*
* @param shouldLock
* Whether the lock should be enabled to get the instant time.
* @param timeGenerator
* TimeGenerator used to generate the instant time.
* @param milliseconds
* Milliseconds to add to current time while generating the new instant time
*/
public static String createNewInstantTime(boolean shouldLock, TimeGenerator timeGenerator, long milliseconds) {
return lastInstantTime.updateAndGet(oldVal ->
{
String newCommitTime;do {
Date d = new Date(timeGenerator.currentTimeMillis(!shouldLock) +
milliseconds);
if
(commitTimeZone.equals(HoodieTimelineTimeZone.UTC)) {
newCommitTime = d.toInstant().atZone(HoodieTimelineTimeZone.UTC.getZoneId()).toLocalDateTime().format(f0);
} else {
newCommitTime = f0.format(convertDateToTemporalAccessor(d));
}
} while (HoodieTimeline.compareTimestamps(newCommitTime, HoodieActiveTimeline.LESSER_THAN_OR_EQUALS, oldVal) );
return newCommitTime;
});
}
| 3.26 |
hudi_HoodieDataTableUtils_getBaseAndLogFilePathsFromFileSystem_rdh
|
/**
*
* @return All hoodie files of the table from the file system.
* @throws IOException
* upon errors.
*/
static List<Path> getBaseAndLogFilePathsFromFileSystem(HoodieTableMetadata tableMetadata, String basePath) throws IOException {
List<String> allPartitionPaths = tableMetadata.getAllPartitionPaths().stream().map(partitionPath -> FSUtils.getPartitionPath(basePath, partitionPath).toString()).collect(Collectors.toList());
return tableMetadata.getAllFilesInPartitions(allPartitionPaths).values().stream().map(fileStatuses
-> Arrays.stream(fileStatuses).map(fileStatus -> fileStatus.getPath()).collect(Collectors.toList())).flatMap(list -> list.stream()).collect(Collectors.toList());
}
| 3.26 |
hudi_S3EventsMetaSelector_getNextEventsFromQueue_rdh
|
/**
* Get the list of events from queue.
*
* @param lastCheckpointStr
* The last checkpoint instant string, empty if first run.
* @return A pair of dataset of event records and the next checkpoint instant string.
*/
public Pair<List<String>, String> getNextEventsFromQueue(SqsClient sqs, Option<String> lastCheckpointStr, List<Message> processedMessages) {
processedMessages.clear();
log.info("Reading messages....");
try
{log.info("Start Checkpoint : " + lastCheckpointStr);
List<Map<String, Object>> eventRecords =
getValidEvents(sqs, processedMessages);
log.info("Number of valid events: " + eventRecords.size());
List<String> filteredEventRecords = new ArrayList<>();
long newCheckpointTime = eventRecords.stream().mapToLong(eventRecord -> Date.from(Instant.from(DateTimeFormatter.ISO_INSTANT.parse(((String) (eventRecord.get(S3_MODEL_EVENT_TIME)))))).getTime()).max().orElse(lastCheckpointStr.map(Long::parseLong).orElse(0L));
for (Map<String, Object> eventRecord : eventRecords) {
filteredEventRecords.add(new ObjectMapper().writeValueAsString(eventRecord).replace("%3D", "=").replace("%24", "$").replace("%A3", "£").replace("%23", "#").replace("%26",
"&").replace("%3F", "?").replace("%7E", "~").replace("%25", "%").replace("%2B", "+"));
}
// Return the old checkpoint if no messages to consume from queue.
String newCheckpoint = (newCheckpointTime == 0) ? lastCheckpointStr.orElse(null) : String.valueOf(newCheckpointTime);
return new ImmutablePair<>(filteredEventRecords, newCheckpoint);
} catch (JSONException | IOException e) {
throw new HoodieException("Unable to read from SQS: ", e);
}
}
| 3.26 |
hudi_S3EventsMetaSelector_getValidEvents_rdh
|
/**
* List messages from queue, filter out illegible events while doing so. It will also delete the
* ineligible messages from queue.
*
* @param processedMessages
* array of processed messages to add more messages
* @return the filtered list of valid S3 events in SQS.
*/
protected List<Map<String, Object>> getValidEvents(SqsClient sqs, List<Message> processedMessages) throws IOException {
List<Message> v2 = getMessagesToProcess(sqs, this.queueUrl, this.longPollWait,
this.visibilityTimeout, this.maxMessagePerBatch, this.maxMessagesPerRequest);
return processAndDeleteInvalidMessages(processedMessages, v2);
}
| 3.26 |
hudi_S3EventsMetaSelector_createSourceSelector_rdh
|
/**
* Factory method for creating custom CloudObjectsMetaSelector. Default selector to use is {@link S3EventsMetaSelector}
*/
public static S3EventsMetaSelector createSourceSelector(TypedProperties props) {
String v0 = getStringWithAltKeys(props, DFSPathSelectorConfig.SOURCE_INPUT_SELECTOR, S3EventsMetaSelector.class.getName());
try {
S3EventsMetaSelector selector = ((S3EventsMetaSelector) (ReflectionUtils.loadClass(v0, new Class<?>[]{ TypedProperties.class }, props)));
log.info("Using path selector " + selector.getClass().getName());
return selector;
} catch (Exception e) {
throw new HoodieException("Could not load source selector class " + v0, e);
}
}
| 3.26 |
hudi_SparkBulkInsertHelper_bulkInsert_rdh
|
/**
* Do bulk insert using WriteHandleFactory from the partitioner (i.e., partitioner.getWriteHandleFactory)
*/
public HoodieData<WriteStatus> bulkInsert(HoodieData<HoodieRecord<T>> inputRecords, String instantTime, HoodieTable<T, HoodieData<HoodieRecord<T>>, HoodieData<HoodieKey>, HoodieData<WriteStatus>> table, HoodieWriteConfig config, boolean performDedupe, BulkInsertPartitioner partitioner, boolean useWriterSchema, int parallelism) {
return bulkInsert(inputRecords, instantTime, table, config, performDedupe, partitioner, useWriterSchema, parallelism, null);
}
| 3.26 |
hudi_TimelineUtils_concatTimeline_rdh
|
/**
* Concat two timelines timeline1 and timeline2 to build a new timeline.
*/
public static HoodieTimeline concatTimeline(HoodieTimeline timeline1, HoodieTimeline
timeline2, HoodieTableMetaClient metaClient) {
return new HoodieDefaultTimeline(Stream.concat(timeline1.getInstantsAsStream(), timeline2.getInstantsAsStream()).sorted(), instant -> metaClient.getActiveTimeline().getInstantDetails(instant));
}
| 3.26 |
hudi_TimelineUtils_getDroppedPartitions_rdh
|
/**
* Returns partitions that have been deleted or marked for deletion in the given timeline.
* Does not include internal operations such as clean in the timeline.
*/
public static List<String> getDroppedPartitions(HoodieTimeline timeline) {
HoodieTimeline replaceCommitTimeline = timeline.getWriteTimeline().filterCompletedInstants().getCompletedReplaceTimeline();
return replaceCommitTimeline.getInstantsAsStream().flatMap(instant -> {
try {
HoodieReplaceCommitMetadata commitMetadata = HoodieReplaceCommitMetadata.fromBytes(replaceCommitTimeline.getInstantDetails(instant).get(), HoodieReplaceCommitMetadata.class);
if (WriteOperationType.DELETE_PARTITION.equals(commitMetadata.getOperationType())) {
Map<String, List<String>> partitionToReplaceFileIds = commitMetadata.getPartitionToReplaceFileIds();
return partitionToReplaceFileIds.keySet().stream();
} else {
return Stream.empty();
}
} catch (IOException e) {
throw new
<e>HoodieIOException("Failed to get partitions modified at " + instant);
}
}).distinct().filter(partition -> !partition.isEmpty()).collect(Collectors.toList());
}
| 3.26 |
hudi_TimelineUtils_getCommitMetadata_rdh
|
/**
* Returns the commit metadata of the given instant.
*
* @param instant
* The hoodie instant
* @param timeline
* The timeline
* @return the commit metadata
*/
public static HoodieCommitMetadata getCommitMetadata(HoodieInstant instant, HoodieTimeline timeline) throws IOException {byte[] data = timeline.getInstantDetails(instant).get();
if (instant.getAction().equals(REPLACE_COMMIT_ACTION)) {
return HoodieReplaceCommitMetadata.fromBytes(data, HoodieReplaceCommitMetadata.class);
} else {
return HoodieCommitMetadata.fromBytes(data, HoodieCommitMetadata.class);
}
}
| 3.26 |
hudi_TimelineUtils_validateTimestampAsOf_rdh
|
/**
* Validate user-specified timestamp of time travel query against incomplete commit's timestamp.
*
* @throws HoodieException
* when time travel query's timestamp >= incomplete commit's timestamp
*/
public static void validateTimestampAsOf(HoodieTableMetaClient metaClient, String timestampAsOf) {
Option<HoodieInstant> firstIncompleteCommit = metaClient.getCommitsTimeline().filterInflightsAndRequested().filter(instant -> (!HoodieTimeline.REPLACE_COMMIT_ACTION.equals(instant.getAction())) || (!ClusteringUtils.getClusteringPlan(metaClient, instant).isPresent())).firstInstant();
if (firstIncompleteCommit.isPresent()) {
String incompleteCommitTime = firstIncompleteCommit.get().getTimestamp();
if (compareTimestamps(timestampAsOf, GREATER_THAN_OR_EQUALS, incompleteCommitTime)) {
throw new HoodieTimeTravelException(String.format("Time travel's timestamp '%s' must be earlier than the first incomplete commit timestamp '%s'.", timestampAsOf, incompleteCommitTime));
}
}
// also timestamp as of cannot query cleaned up data.
Option<HoodieInstant> latestCleanOpt = metaClient.getActiveTimeline().getCleanerTimeline().filterCompletedInstants().lastInstant();
if (latestCleanOpt.isPresent()) {
// Ensure timestamp as of is > than the earliest commit to retain and
try {
HoodieCleanMetadata cleanMetadata = CleanerUtils.getCleanerMetadata(metaClient, latestCleanOpt.get());
String earliestCommitToRetain = cleanMetadata.getEarliestCommitToRetain();
if (!StringUtils.isNullOrEmpty(earliestCommitToRetain)) {
ValidationUtils.checkArgument(HoodieTimeline.compareTimestamps(earliestCommitToRetain, LESSER_THAN_OR_EQUALS, timestampAsOf),
"Cleaner cleaned up the timestamp of interest. Please ensure sufficient commits are retained with cleaner " + "for Timestamp as of query to work");
} else {
// when cleaner is based on file versions, we may not find value for earliestCommitToRetain.
// so, lets check if timestamp of interest is archived based on first entry in active timeline
Option<HoodieInstant> firstCompletedInstant = metaClient.getActiveTimeline().getWriteTimeline().filterCompletedInstants().firstInstant();if (firstCompletedInstant.isPresent()) {
ValidationUtils.checkArgument(HoodieTimeline.compareTimestamps(firstCompletedInstant.get().getTimestamp(), LESSER_THAN_OR_EQUALS, timestampAsOf), "Please ensure sufficient commits are retained (uncleaned and un-archived) for timestamp as of query to work.");
}
}
} catch (IOException e) {
throw new HoodieTimeTravelException("Cleaner cleaned up the timestamp of interest. " + "Please ensure sufficient commits are retained with cleaner for Timestamp as of query to work ");
}
}}
| 3.26 |
hudi_TimelineUtils_getEarliestInstantForMetadataArchival_rdh
|
/**
* Gets the qualified earliest instant from the active timeline of the data table
* for the archival in metadata table.
* <p>
* the qualified earliest instant is chosen as the earlier one between the earliest
* commit (COMMIT, DELTA_COMMIT, and REPLACE_COMMIT only, considering non-savepoint
* commit only if enabling archive beyond savepoint) and the earliest inflight
* instant (all actions).
*
* @param dataTableActiveTimeline
* the active timeline of the data table.
* @param shouldArchiveBeyondSavepoint
* whether to archive beyond savepoint.
* @return the instant meeting the requirement.
*/
public static Option<HoodieInstant> getEarliestInstantForMetadataArchival(HoodieActiveTimeline
dataTableActiveTimeline, boolean shouldArchiveBeyondSavepoint) {
// This is for commits only, not including CLEAN, ROLLBACK, etc.
// When archive beyond savepoint is enabled, there are chances that there could be holes
// in the timeline due to archival and savepoint interplay. So, the first non-savepoint
// commit in the data timeline is considered as beginning of the active timeline.
Option<HoodieInstant> earliestCommit = (shouldArchiveBeyondSavepoint) ? dataTableActiveTimeline.getTimelineOfActions(CollectionUtils.createSet(COMMIT_ACTION, DELTA_COMMIT_ACTION, REPLACE_COMMIT_ACTION, SAVEPOINT_ACTION)).getFirstNonSavepointCommit() : dataTableActiveTimeline.getCommitsTimeline().firstInstant();
// This is for all instants which are in-flight
Option<HoodieInstant> earliestInflight = dataTableActiveTimeline.filterInflightsAndRequested().firstInstant();
if (earliestCommit.isPresent() && earliestInflight.isPresent()) {if (earliestCommit.get().compareTo(earliestInflight.get()) < 0) {
return earliestCommit;
} return earliestInflight;
} else if (earliestCommit.isPresent()) {
return earliestCommit;
} else if (earliestInflight.isPresent()) {
return earliestInflight;
} else {
return Option.empty();
}
}
| 3.26 |
hudi_TimelineUtils_getWrittenPartitions_rdh
|
/**
* Returns partitions that have new data strictly after commitTime.
* Does not include internal operations such as clean in the timeline.
*/
public static List<String> getWrittenPartitions(HoodieTimeline timeline)
{
HoodieTimeline timelineToSync = timeline.getWriteTimeline();
return getAffectedPartitions(timelineToSync);
}
| 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.