name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hudi_BaseHoodieWriteClient_preWrite_rdh
|
/**
* Common method containing steps to be performed before write (upsert/insert/...
*
* @param instantTime
* @param writeOperationType
* @param metaClient
*/
public void preWrite(String instantTime, WriteOperationType writeOperationType, HoodieTableMetaClient metaClient) {setOperationType(writeOperationType);
this.lastCompletedTxnAndMetadata = (txnManager.isLockRequired()) ? TransactionUtils.getLastCompletedTxnInstantAndMetadata(metaClient) : Option.empty();
this.pendingInflightAndRequestedInstants = TransactionUtils.getInflightAndRequestedInstants(metaClient);
this.pendingInflightAndRequestedInstants.remove(instantTime);
tableServiceClient.setPendingInflightAndRequestedInstants(this.pendingInflightAndRequestedInstants);
tableServiceClient.startAsyncCleanerService(this);
tableServiceClient.startAsyncArchiveService(this);
}
| 3.26 |
hudi_BaseHoodieWriteClient_renameColumn_rdh
|
/**
* rename col name for hudi table.
*
* @param colName
* col name to be renamed. if we want to rename col from a nested filed, the fullName should be specified
* @param newName
* new name for current col. no need to specify fullName.
*/
public void renameColumn(String colName, String newName) {
Pair<InternalSchema,
HoodieTableMetaClient> pair = getInternalSchemaAndMetaClient();
InternalSchema
newSchema = new InternalSchemaChangeApplier(pair.getLeft()).applyRenameChange(colName, newName);
commitTableChange(newSchema, pair.getRight());
}
| 3.26 |
hudi_BaseHoodieWriteClient_clean_rdh
|
/**
* Triggers clean for the table. This refers to Clean up any stale/old files/data lying around (either on file storage or index storage) based on the
* * configurations and CleaningPolicy used.
*
* @param skipLocking
* if this is triggered by another parent transaction, locking can be skipped.
* @return instance of {@link HoodieCleanMetadata}.
*/
@Deprecated
public HoodieCleanMetadata clean(boolean skipLocking) {
return clean(createNewInstantTime());
}
| 3.26 |
hudi_BaseHoodieWriteClient_scheduleClustering_rdh
|
/**
* Schedules a new clustering instant.
*
* @param extraMetadata
* Extra Metadata to be stored
*/
public Option<String> scheduleClustering(Option<Map<String, String>> extraMetadata) throws HoodieIOException {
String instantTime = createNewInstantTime();
return scheduleClusteringAtInstant(instantTime, extraMetadata) ? Option.of(instantTime) : Option.empty();
}
| 3.26 |
hudi_BaseHoodieWriteClient_setWriteSchemaForDeletes_rdh
|
/**
* Sets write schema from last instant since deletes may not have schema set in the config.
*/
protected void setWriteSchemaForDeletes(HoodieTableMetaClient metaClient) {
try {
HoodieActiveTimeline activeTimeline = metaClient.getActiveTimeline();
Option<HoodieInstant> lastInstant = activeTimeline.filterCompletedInstants().filter(s -> s.getAction().equals(metaClient.getCommitActionType()) || s.getAction().equals(HoodieActiveTimeline.REPLACE_COMMIT_ACTION)).lastInstant();
if (lastInstant.isPresent()) {
HoodieCommitMetadata commitMetadata = HoodieCommitMetadata.fromBytes(activeTimeline.getInstantDetails(lastInstant.get()).get(), HoodieCommitMetadata.class);
String extraSchema = commitMetadata.getExtraMetadata().get(SCHEMA_KEY);
if (!StringUtils.isNullOrEmpty(extraSchema)) {
config.setSchema(commitMetadata.getExtraMetadata().get(SCHEMA_KEY));} else {
throw new HoodieIOException("Latest commit does not have any schema in commit metadata");
}
} else {
LOG.warn("None rows are deleted because the table is empty");
}
} catch (IOException e) {
throw new HoodieIOException("IOException thrown while reading last commit metadata", e);
}
}
| 3.26 |
hudi_BaseHoodieWriteClient_runAnyPendingCompactions_rdh
|
/**
* Run any pending compactions.
*/
public void runAnyPendingCompactions() {
tableServiceClient.runAnyPendingCompactions(createTable(config,
hadoopConf));
}
| 3.26 |
hudi_BaseHoodieWriteClient_initMetadataTable_rdh
|
/**
* Bootstrap the metadata table.
*
* @param instantTime
* current inflight instant time
*/protected void initMetadataTable(Option<String> instantTime) {
// by default do nothing.
}
| 3.26 |
hudi_BaseHoodieWriteClient_scheduleAndGetRestorePlan_rdh
|
/**
* Check if there is a failed restore with the same savepointToRestoreTimestamp. Reusing the commit instead of
* creating a new one will prevent causing some issues with the metadata table.
*/
private Pair<String, Option<HoodieRestorePlan>> scheduleAndGetRestorePlan(final String savepointToRestoreTimestamp, HoodieTable<T, I, K, O> table) throws
IOException {
Option<HoodieInstant> failedRestore = table.getRestoreTimeline().filterInflightsAndRequested().lastInstant();
if (failedRestore.isPresent() && savepointToRestoreTimestamp.equals(RestoreUtils.getSavepointToRestoreTimestamp(table, failedRestore.get()))) {
return Pair.of(failedRestore.get().getTimestamp(), Option.of(RestoreUtils.getRestorePlan(table.getMetaClient(), failedRestore.get())));
}
final String restoreInstantTimestamp = createNewInstantTime();return Pair.of(restoreInstantTimestamp, table.scheduleRestore(context, restoreInstantTimestamp, savepointToRestoreTimestamp));
}
| 3.26 |
hudi_BaseHoodieWriteClient_lazyRollbackFailedIndexing_rdh
|
/**
* Rolls back the failed delta commits corresponding to the indexing action.
* <p>
* TODO(HUDI-5733): This should be cleaned up once the proper fix of rollbacks
* in the metadata table is landed.
*
* @return {@code true} if rollback happens; {@code false} otherwise.
*/
public boolean lazyRollbackFailedIndexing() {
return tableServiceClient.rollbackFailedIndexingCommits();
}
| 3.26 |
hudi_BaseHoodieWriteClient_m4_rdh
|
/**
* NOTE : This action requires all writers (ingest and compact) to a table to be stopped before proceeding. Revert
* the (inflight/committed) record changes for all commits after the provided instant time.
*
* @param savepointToRestoreTimestamp
* savepoint instant time to which restoration is requested
*/
public HoodieRestoreMetadata m4(final String savepointToRestoreTimestamp, boolean initialMetadataTableIfNecessary) throws HoodieRestoreException {
LOG.info("Begin restore to instant " + savepointToRestoreTimestamp);
Timer.Context v38 = metrics.getRollbackCtx();
try {
HoodieTable<T, I, K, O> table = initTable(WriteOperationType.UNKNOWN, Option.empty(), initialMetadataTableIfNecessary);
Pair<String, Option<HoodieRestorePlan>> timestampAndRestorePlan = scheduleAndGetRestorePlan(savepointToRestoreTimestamp, table);
final String
restoreInstantTimestamp = timestampAndRestorePlan.getLeft();
Option<HoodieRestorePlan> restorePlanOption = timestampAndRestorePlan.getRight();
if (restorePlanOption.isPresent()) {
HoodieRestoreMetadata restoreMetadata = table.restore(context, restoreInstantTimestamp, savepointToRestoreTimestamp);
if (v38 != null) {
final long durationInMs =
metrics.getDurationInMs(v38.stop());
final long totalFilesDeleted = restoreMetadata.getHoodieRestoreMetadata().values().stream().flatMap(Collection::stream).mapToLong(HoodieRollbackMetadata::getTotalFilesDeleted).sum();
metrics.updateRollbackMetrics(durationInMs, totalFilesDeleted);
}
return restoreMetadata;
} else {
throw new HoodieRestoreException((("Failed to restore " + config.getBasePath()) + " to commit ") + savepointToRestoreTimestamp);
}
} catch (Exception e) {
throw new HoodieRestoreException("Failed to restore to " + savepointToRestoreTimestamp, e);
}
}
| 3.26 |
hudi_BaseHoodieWriteClient_startCommitWithTime_rdh
|
/**
* Completes a new commit time for a write operation (insert/update/delete) with specified action.
*/
private void startCommitWithTime(String instantTime, String actionType, HoodieTableMetaClient metaClient) {
CleanerUtils.rollbackFailedWrites(config.getFailedWritesCleanPolicy(), HoodieTimeline.COMMIT_ACTION, () -> tableServiceClient.rollbackFailedWrites());
startCommit(instantTime, actionType, metaClient);
}
| 3.26 |
hudi_BaseHoodieWriteClient_rollbackFailedWrites_rdh
|
/**
* Rollback failed writes if any.
*
* @return true if rollback happened. false otherwise.
*/
public boolean rollbackFailedWrites() {
return tableServiceClient.rollbackFailedWrites();
}
| 3.26 |
hudi_BaseHoodieWriteClient_deleteColumns_rdh
|
/**
* delete columns to table.
*
* @param colNames
* col name to be deleted. if we want to delete col from a nested filed, the fullName should be specified
*/
public void deleteColumns(String... colNames) {
Pair<InternalSchema, HoodieTableMetaClient> v81 = getInternalSchemaAndMetaClient();
InternalSchema newSchema = new InternalSchemaChangeApplier(v81.getLeft()).applyDeleteChange(colNames);
commitTableChange(newSchema, v81.getRight());
}
| 3.26 |
hudi_BaseHoodieWriteClient_compact_rdh
|
/**
* Ensures compaction instant is in expected state and performs Compaction for the workload stored in instant-time.
*
* @param compactionInstantTime
* Compaction Instant Time
* @return Collection of Write Status
*/
protected HoodieWriteMetadata<O> compact(String compactionInstantTime, boolean shouldComplete) {
HoodieTable table = createTable(config, context.getHadoopConf().get());
preWrite(compactionInstantTime, WriteOperationType.COMPACT, table.getMetaClient());
return tableServiceClient.compact(compactionInstantTime, shouldComplete);
}
| 3.26 |
hudi_BaseHoodieWriteClient_commit_rdh
|
/**
* Commit changes performed at the given instantTime marker.
*/
public boolean commit(String instantTime, O writeStatuses, Option<Map<String, String>> extraMetadata) {
HoodieTableMetaClient metaClient = createMetaClient(false);
String actionType = metaClient.getCommitActionType();
return commit(instantTime, writeStatuses, extraMetadata, actionType, Collections.emptyMap());
}
| 3.26 |
hudi_BaseHoodieWriteClient_releaseResources_rdh
|
/**
* Called after each write, to release any resources used.
*/
protected void releaseResources(String instantTime) {// do nothing here
}
| 3.26 |
hudi_CompactionCommand_readCompactionPlanForActiveTimeline_rdh
|
/**
* TBD Can we make this part of HoodieActiveTimeline or a utility class.
*/
private HoodieCompactionPlan readCompactionPlanForActiveTimeline(HoodieActiveTimeline activeTimeline, HoodieInstant instant) {try {
if (!HoodieTimeline.COMPACTION_ACTION.equals(instant.getAction())) {
try {
// This could be a completed compaction. Assume a compaction request file is present but skip if fails
return TimelineMetadataUtils.deserializeCompactionPlan(activeTimeline.readCompactionPlanAsBytes(HoodieTimeline.getCompactionRequestedInstant(instant.getTimestamp())).get());
} catch (HoodieIOException ioe) {
// SKIP
return null;
}
} else {
return TimelineMetadataUtils.deserializeCompactionPlan(activeTimeline.readCompactionPlanAsBytes(HoodieTimeline.getCompactionRequestedInstant(instant.getTimestamp())).get());
}
} catch (IOException e) {
throw new HoodieIOException(e.getMessage(), e);
}
}
| 3.26 |
hudi_CompactionCommand_compactionPlanReader_rdh
|
/**
* Compaction reading is different for different timelines. Create partial function to override special logic.
* We can make these read methods part of HoodieDefaultTimeline and override where necessary. But the
* BiFunction below has 'hacky' exception blocks, so restricting it to CLI.
*/
private <T extends HoodieDefaultTimeline, U extends HoodieInstant, V extends HoodieCompactionPlan> Function<HoodieInstant, HoodieCompactionPlan> compactionPlanReader(BiFunction<T, HoodieInstant, HoodieCompactionPlan> f, T timeline) {
return y -> f.apply(timeline, y);
}
| 3.26 |
hudi_CompactionCommand_printAllCompactions_rdh
|
/**
* Prints all compaction details.
*/
private static String printAllCompactions(HoodieDefaultTimeline timeline, Function<HoodieInstant, HoodieCompactionPlan> compactionPlanReader, boolean includeExtraMetadata, String sortByField, boolean descending, int limit, boolean headerOnly) {
Stream<HoodieInstant> instantsStream = timeline.getWriteTimeline().getReverseOrderedInstants();
List<Pair<HoodieInstant, HoodieCompactionPlan>> compactionPlans = instantsStream.map(instant -> Pair.of(instant, compactionPlanReader.apply(instant))).filter(pair -> pair.getRight() != null).collect(Collectors.toList());
Set<String> committedInstants = timeline.getCommitTimeline().filterCompletedInstants().getInstantsAsStream().map(HoodieInstant::getTimestamp).collect(Collectors.toSet());
List<Comparable[]> v36 = new ArrayList<>();
for (Pair<HoodieInstant, HoodieCompactionPlan> compactionPlan : compactionPlans) {
HoodieCompactionPlan plan = compactionPlan.getRight();
HoodieInstant instant = compactionPlan.getLeft();
final HoodieInstant.State state;
if (committedInstants.contains(instant.getTimestamp())) {
state = State.COMPLETED;
}
else {
state = instant.getState();
}
if
(includeExtraMetadata) {
v36.add(new Comparable[]{ instant.getTimestamp(), state.toString(), plan.getOperations() == null ? 0 : plan.getOperations().size(), plan.getExtraMetadata().toString() });
} else {
v36.add(new Comparable[]{ instant.getTimestamp(), state.toString(), plan.getOperations() == null ? 0 : plan.getOperations().size() });
}
}
Map<String, Function<Object, String>> fieldNameToConverterMap = new HashMap<>();
TableHeader header = new TableHeader().addTableHeaderField(HoodieTableHeaderFields.HEADER_COMPACTION_INSTANT_TIME).addTableHeaderField(HoodieTableHeaderFields.HEADER_STATE).addTableHeaderField(HoodieTableHeaderFields.HEADER_TOTAL_FILES_TO_BE_COMPACTED);
if (includeExtraMetadata) {
header = header.addTableHeaderField(HoodieTableHeaderFields.HEADER_EXTRA_METADATA);
}return HoodiePrintHelper.print(header, fieldNameToConverterMap, sortByField, descending, limit, headerOnly, v36);
}
| 3.26 |
hudi_HoodieRowDataCreateHandle_canWrite_rdh
|
/**
* Returns {@code true} if this handle can take in more writes. else {@code false}.
*/
public boolean canWrite() {
return fileWriter.canWrite();
}
| 3.26 |
hudi_HoodieRowDataCreateHandle_write_rdh
|
/**
* Writes an {@link RowData} to the underlying {@link HoodieRowDataFileWriter}.
* Before writing, value for meta columns are computed as required
* and wrapped in {@link HoodieRowData}. {@link HoodieRowData} is what gets written to HoodieRowDataFileWriter.
*
* @param recordKey
* The record key
* @param partitionPath
* The partition path
* @param record
* instance of {@link RowData} that needs to be written to the fileWriter.
* @throws IOException
*/
public void write(String recordKey,
String partitionPath, RowData
record) throws IOException {
try {
String seqId = (preserveHoodieMetadata) ? record.getString(HoodieRecord.COMMIT_SEQNO_METADATA_FIELD_ORD).toString() : HoodieRecord.generateSequenceId(instantTime, f0, SEQGEN.getAndIncrement());
String commitInstant = (preserveHoodieMetadata) ? record.getString(HoodieRecord.COMMIT_TIME_METADATA_FIELD_ORD).toString() : instantTime;
RowData rowData = HoodieRowDataCreation.create(commitInstant, seqId, recordKey, partitionPath, path.getName(), record, writeConfig.allowOperationMetadataField(), preserveHoodieMetadata);
try {
fileWriter.writeRow(recordKey, rowData); HoodieRecordDelegate recordDelegate = (writeStatus.isTrackingSuccessfulWrites()) ? HoodieRecordDelegate.create(recordKey, partitionPath, null, newRecordLocation) :
null;
writeStatus.markSuccess(recordDelegate, Option.empty());
} catch (Throwable t) {
writeStatus.markFailure(recordKey, partitionPath, t);
}
} catch (Throwable ge) {
writeStatus.setGlobalError(ge);
throw ge;
}
}
| 3.26 |
hudi_HoodieRowDataCreateHandle_close_rdh
|
/**
* Closes the {@link HoodieRowDataCreateHandle} and returns an instance of {@link WriteStatus} containing the stats and
* status of the writes to this handle.
*
* @return the {@link WriteStatus} containing the stats and status of the writes to this handle.
* @throws IOException
*/
public WriteStatus close() throws IOException {
fileWriter.close();
HoodieWriteStat v5 =
writeStatus.getStat();
v5.setPartitionPath(partitionPath);
v5.setNumWrites(writeStatus.getTotalRecords());
v5.setNumDeletes(0);
v5.setNumInserts(writeStatus.getTotalRecords());
v5.setPrevCommit(HoodieWriteStat.NULL_COMMIT);
v5.setFileId(fileId);
v5.setPath(new Path(writeConfig.getBasePath()), path);
long fileSizeInBytes = FSUtils.getFileSize(table.getMetaClient().getFs(), path);
v5.setTotalWriteBytes(fileSizeInBytes);
v5.setFileSizeInBytes(fileSizeInBytes);
v5.setTotalWriteErrors(writeStatus.getTotalErrorRecords());
HoodieWriteStat.RuntimeStats runtimeStats = new HoodieWriteStat.RuntimeStats();
runtimeStats.setTotalCreateTime(currTimer.endTimer());
v5.setRuntimeStats(runtimeStats);
return
writeStatus;
}
| 3.26 |
hudi_HoodieRowDataCreateHandle_createMarkerFile_rdh
|
/**
* Creates an empty marker file corresponding to storage writer path.
*
* @param partitionPath
* Partition path
*/
private void createMarkerFile(String partitionPath, String dataFileName) {
WriteMarkers writeMarkers = WriteMarkersFactory.get(writeConfig.getMarkersType(), table, instantTime);
writeMarkers.create(partitionPath, dataFileName, IOType.CREATE);
}
| 3.26 |
hudi_RocksDbDiskMap_iterator_rdh
|
/**
* Custom iterator to iterate over values written to disk.
*/
@Override
public Iterator<R> iterator() {
return getRocksDb().iterator(ROCKSDB_COL_FAMILY);
}
| 3.26 |
hudi_HoodieTableMetadata_getDatasetBasePath_rdh
|
/**
* Return the base path of the dataset.
*
* @param metadataTableBasePath
* The base path of the metadata table
*/
static String getDatasetBasePath(String metadataTableBasePath) {
int endPos = metadataTableBasePath.lastIndexOf(Path.SEPARATOR + HoodieTableMetaClient.METADATA_TABLE_FOLDER_PATH);
checkState(endPos != (-1), metadataTableBasePath + " should be base path of the metadata table");
return metadataTableBasePath.substring(0, endPos);
}
| 3.26 |
hudi_HoodieTableMetadata_getMetadataTableBasePath_rdh
|
/**
* Return the base-path of the Metadata Table for the given Dataset identified by base-path
*/static Path getMetadataTableBasePath(Path dataTableBasePath) {return new Path(dataTableBasePath, HoodieTableMetaClient.METADATA_TABLE_FOLDER_PATH);
}
| 3.26 |
hudi_HoodieTableMetadata_getDataTableBasePathFromMetadataTable_rdh
|
/**
* Returns the base path of the Dataset provided the base-path of the Metadata Table of this
* Dataset
*/
static String getDataTableBasePathFromMetadataTable(String metadataTableBasePath) {
checkArgument(isMetadataTable(metadataTableBasePath));
return metadataTableBasePath.substring(0, metadataTableBasePath.lastIndexOf(HoodieTableMetaClient.METADATA_TABLE_FOLDER_PATH) - 1);
}
| 3.26 |
hudi_HoodieTableMetadata_isMetadataTable_rdh
|
/**
* Returns {@code True} if the given path contains a metadata table.
*
* @param basePath
* The base path to check
*/
static boolean isMetadataTable(String
basePath) {
if ((basePath == null) || basePath.isEmpty()) {
return false;
}
if (basePath.endsWith(Path.SEPARATOR)) {
basePath = basePath.substring(0, basePath.length() - 1);
}
return basePath.endsWith(HoodieTableMetaClient.METADATA_TABLE_FOLDER_PATH);
}
| 3.26 |
hudi_AvroSchemaConverter_convertToSchema_rdh
|
/**
* Converts Flink SQL {@link LogicalType} (can be nested) into an Avro schema.
*
* <p>The "{rowName}." is used as the nested row type name prefix in order to generate the right
* schema. Nested record type that only differs with type name is still compatible.
*
* @param logicalType
* logical type
* @param rowName
* the record name
* @return Avro's {@link Schema} matching this logical type.
*/
public static Schema convertToSchema(LogicalType logicalType, String rowName) {
int precision;
boolean nullable = logicalType.isNullable();
switch (logicalType.getTypeRoot()) {
case NULL :
return SchemaBuilder.builder().nullType();
case BOOLEAN :
Schema bool = SchemaBuilder.builder().booleanType();
return nullable ? nullableSchema(bool) : bool;
case TINYINT :
case SMALLINT :
case INTEGER :
Schema integer = SchemaBuilder.builder().intType();
return nullable ? nullableSchema(integer) :
integer;
case BIGINT :
Schema bigint = SchemaBuilder.builder().longType();
return nullable ? nullableSchema(bigint) : bigint;case FLOAT :
Schema f = SchemaBuilder.builder().floatType();
return nullable ? nullableSchema(f)
: f;
case DOUBLE :Schema d = SchemaBuilder.builder().doubleType();
return nullable ? nullableSchema(d) : d;
case CHAR :
case VARCHAR :
Schema str = SchemaBuilder.builder().stringType();
return nullable ? nullableSchema(str) : str;
case BINARY :
case VARBINARY :
Schema binary = SchemaBuilder.builder().bytesType();
return nullable ? nullableSchema(binary) : binary;
case TIMESTAMP_WITHOUT_TIME_ZONE :
// use long to represents Timestamp
final TimestampType timestampType = ((TimestampType) (logicalType));
precision = timestampType.getPrecision();
LogicalType timestampLogicalType;
if (precision <= 3) {
timestampLogicalType = LogicalTypes.timestampMillis();
} else if (precision <= 6) {
timestampLogicalType = LogicalTypes.timestampMicros();
} else {
throw new IllegalArgumentException(("Avro does not support TIMESTAMP type with precision: " + precision) + ", it only support precisions <= 6.");
}
Schema timestamp = timestampLogicalType.addToSchema(SchemaBuilder.builder().longType());
return nullable ? nullableSchema(timestamp) : timestamp;
case TIMESTAMP_WITH_LOCAL_TIME_ZONE :
// use long to represents LocalZonedTimestampType
final LocalZonedTimestampType localZonedTimestampType = ((LocalZonedTimestampType) (logicalType));
precision = localZonedTimestampType.getPrecision();
LogicalType localZonedTimestampLogicalType;
if (precision <= 3) {
localZonedTimestampLogicalType = LogicalTypes.localTimestampMillis();
} else if (precision <= 6) {
localZonedTimestampLogicalType = LogicalTypes.localTimestampMicros();
} else {
throw new IllegalArgumentException(("Avro does not support LOCAL TIMESTAMP type with precision: " + precision) + ", it only support precisions <= 6.");
}
Schema localZonedTimestamp = localZonedTimestampLogicalType.addToSchema(SchemaBuilder.builder().longType());
return nullable ? nullableSchema(localZonedTimestamp) : localZonedTimestamp;
case
DATE :
// use int to represents Date
Schema date = LogicalTypes.date().addToSchema(SchemaBuilder.builder().intType());
return nullable ? nullableSchema(date) : date;
case TIME_WITHOUT_TIME_ZONE :
precision = ((TimeType) (logicalType)).getPrecision();
if (precision > 3) {
throw new IllegalArgumentException(("Avro does not support TIME type with precision: " + precision) + ", it only supports precision less than 3.");
}
// use int to represents Time, we only support millisecond when deserialization
Schema time = LogicalTypes.timeMillis().addToSchema(SchemaBuilder.builder().intType());
return nullable ? nullableSchema(time) : time;
case DECIMAL :
DecimalType decimalType = ((DecimalType) (logicalType));
// store BigDecimal as Fixed
// for spark compatibility.
Schema decimal = LogicalTypes.decimal(decimalType.getPrecision(),
decimalType.getScale()).addToSchema(SchemaBuilder.fixed(String.format("%s.fixed", rowName)).size(computeMinBytesForDecimalPrecision(decimalType.getPrecision())));
return nullable ? nullableSchema(decimal) : decimal;
case ROW :
RowType rowType = ((RowType) (logicalType));
List<String> fieldNames = rowType.getFieldNames();
// we have to make sure the record name is different in a Schema
SchemaBuilder.FieldAssembler<Schema> builder = SchemaBuilder.builder().record(rowName).fields();
for (int i = 0; i < rowType.getFieldCount(); i++) {
String v37 = fieldNames.get(i);
LogicalType fieldType = rowType.getTypeAt(i);
SchemaBuilder.GenericDefault<Schema> fieldBuilder = builder.name(v37).type(convertToSchema(fieldType, (rowName + ".") + v37));
if (fieldType.isNullable()) {
builder = fieldBuilder.withDefault(null);
} else {
builder = fieldBuilder.noDefault();
}
}
Schema record = builder.endRecord();
return nullable ? nullableSchema(record) : record;
case MULTISET :
case MAP
:
Schema map = SchemaBuilder.builder().map().values(convertToSchema(extractValueTypeToAvroMap(logicalType), rowName));
return nullable ? nullableSchema(map) : map;
case ARRAY :
ArrayType arrayType = ((ArrayType) (logicalType));
Schema array = SchemaBuilder.builder().array().items(convertToSchema(arrayType.getElementType(), rowName));
return nullable ? nullableSchema(array) : array;
case RAW :
default :
throw new UnsupportedOperationException("Unsupported to derive Schema for type: " + logicalType);
}
}
| 3.26 |
hudi_AvroSchemaConverter_convertToDataType_rdh
|
/**
* Converts an Avro schema {@code schema} into a nested row structure with deterministic field order and
* data types that are compatible with Flink's Table & SQL API.
*
* @param schema
* Avro schema definition
* @return data type matching the schema
*/
public static DataType convertToDataType(Schema schema) {
switch (schema.getType()) {
case RECORD
:
final List<Schema.Field> schemaFields = schema.getFields();
final DataTypes[] fields = new DataTypes.Field[schemaFields.size()];
for (int
i = 0; i < schemaFields.size(); i++) {
final Schema.Field field = schemaFields.get(i);
fields[i] = DataTypes.FIELD(field.name(), convertToDataType(field.schema()));
}
return DataTypes.ROW(fields).notNull();
case ENUM :
case STRING :
// convert Avro's Utf8/CharSequence to String
return DataTypes.STRING().notNull();
case ARRAY :
return DataTypes.ARRAY(convertToDataType(schema.getElementType())).notNull();
case MAP :
return DataTypes.MAP(DataTypes.STRING().notNull(), convertToDataType(schema.getValueType())).notNull();case UNION :
final Schema v4;
final boolean nullable;
if ((schema.getTypes().size() == 2) && (schema.getTypes().get(0).getType() == Type.NULL)) {
v4 = schema.getTypes().get(1);
nullable = true;
} else if ((schema.getTypes().size() == 2) && (schema.getTypes().get(1).getType() == Type.NULL)) {
v4 = schema.getTypes().get(0);
nullable = true;
} else if (schema.getTypes().size() == 1) {
v4 = schema.getTypes().get(0);
nullable = false;
} else {
List<Schema> v6 =
schema.getTypes().stream().filter(s -> s.getType() != Schema.Type.NULL).collect(Collectors.toList());
nullable = schema.getTypes().size() > v6.size();
// use Kryo for serialization
DataType rawDataType = new AtomicDataType(new TypeInformationRawType<>(false, Types.GENERIC(Object.class))).notNull();
if (recordTypesOfSameNumFields(v6)) {
DataType converted = DataTypes.ROW(DataTypes.FIELD("wrapper", rawDataType)).notNull();
return nullable ? converted.nullable() : converted;
}
// use Kryo for serialization
return nullable ? rawDataType.nullable() : rawDataType;
}
DataType converted = convertToDataType(v4);
return nullable ? converted.nullable() : converted;
case FIXED :
// logical decimal type
if (schema.getLogicalType() instanceof LogicalTypes.Decimal) {
final LogicalTypes.Decimal decimalType = ((LogicalTypes.Decimal) (schema.getLogicalType()));
return DataTypes.DECIMAL(decimalType.getPrecision(), decimalType.getScale()).notNull();
}
// convert fixed size binary data to primitive byte arrays
return DataTypes.VARBINARY(schema.getFixedSize()).notNull();
case BYTES :
// logical decimal type
if (schema.getLogicalType() instanceof LogicalTypes.Decimal) {
final LogicalTypes.Decimal decimalType = ((LogicalTypes.Decimal) (schema.getLogicalType()));
return DataTypes.DECIMAL(decimalType.getPrecision(), decimalType.getScale()).notNull();
}
return DataTypes.BYTES().notNull();
case INT :
// logical date and time type
final LogicalType logicalType = schema.getLogicalType();
if (logicalType == LogicalTypes.date()) {
return DataTypes.DATE().notNull();
} else if (logicalType == LogicalTypes.timeMillis()) {
return DataTypes.TIME(3).notNull();
}
return DataTypes.INT().notNull();
case LONG :
// logical timestamp type
if (schema.getLogicalType() == LogicalTypes.timestampMillis()) {
return DataTypes.TIMESTAMP(3).notNull();
} else if (schema.getLogicalType() == LogicalTypes.localTimestampMillis()) {
return DataTypes.TIMESTAMP_WITH_LOCAL_TIME_ZONE(3).notNull();
} else if (schema.getLogicalType() == LogicalTypes.timestampMicros()) {
return DataTypes.TIMESTAMP(6).notNull();
} else if (schema.getLogicalType() == LogicalTypes.localTimestampMicros()) {
return DataTypes.TIMESTAMP_WITH_LOCAL_TIME_ZONE(6).notNull();
} else if (schema.getLogicalType() == LogicalTypes.timeMillis()) {
return DataTypes.TIME(3).notNull();
} else if (schema.getLogicalType() == LogicalTypes.timeMicros()) {return DataTypes.TIME(6).notNull();
}
return DataTypes.BIGINT().notNull();
case FLOAT :
return DataTypes.FLOAT().notNull();
case DOUBLE :
return DataTypes.DOUBLE().notNull();
case BOOLEAN :
return DataTypes.BOOLEAN().notNull();
case
NULL :
return DataTypes.NULL();
default :
throw new IllegalArgumentException(("Unsupported Avro type '" + schema.getType()) + "'.");
}
}
| 3.26 |
hudi_AvroSchemaConverter_nullableSchema_rdh
|
/**
* Returns schema with nullable true.
*/
private static Schema nullableSchema(Schema schema) {
return schema.isNullable() ? schema : Schema.createUnion(SchemaBuilder.builder().nullType(), schema);}
| 3.26 |
hudi_LSMTimeline_latestSnapshotVersion_rdh
|
/**
* Returns the latest snapshot version.
*/
public static int latestSnapshotVersion(HoodieTableMetaClient metaClient) throws IOException {
Path
versionFilePath = getVersionFilePath(metaClient);
if (metaClient.getFs().exists(versionFilePath)) {
try {
Option<byte[]> content = FileIOUtils.readDataFromPath(metaClient.getFs(), versionFilePath);
if (content.isPresent()) {
return Integer.parseInt(new String(content.get(), StandardCharsets.UTF_8));
}} catch (Exception e) {
// fallback to manifest file listing.
LOG.warn("Error reading version file {}", versionFilePath, e);
}
}
return m0(metaClient).stream().max(Integer::compareTo).orElse(-1);
}
| 3.26 |
hudi_LSMTimeline_isFileFromLayer_rdh
|
/**
* Returns whether a file belongs to the specified layer {@code layer} within the LSM layout.
*/
public static boolean isFileFromLayer(String fileName, int layer) {
return getFileLayer(fileName) == layer;
}
| 3.26 |
hudi_LSMTimeline_getVersionFilePath_rdh
|
/**
* Returns the full version file path with given version number.
*/
public static Path getVersionFilePath(HoodieTableMetaClient metaClient) {
return new Path(metaClient.getArchivePath(), VERSION_FILE_NAME);
}
| 3.26 |
hudi_LSMTimeline_getMaxInstantTime_rdh
|
/**
* Parse the maximum instant time from the file name.
*/
public static String getMaxInstantTime(String fileName) {
Matcher fileMatcher = ARCHIVE_FILE_PATTERN.matcher(fileName);
if (fileMatcher.matches()) {
return fileMatcher.group(2);
} else {
throw new HoodieException("Unexpected archival file name: " + fileName);
}
}
| 3.26 |
hudi_LSMTimeline_getMinInstantTime_rdh
|
/**
* Parse the minimum instant time from the file name.
*/
public static String getMinInstantTime(String fileName) {
Matcher fileMatcher = ARCHIVE_FILE_PATTERN.matcher(fileName);
if (fileMatcher.matches()) {
return fileMatcher.group(1);
} else {
throw new HoodieException("Unexpected archival file name: " + fileName);
}
}
| 3.26 |
hudi_LSMTimeline_latestSnapshotManifest_rdh
|
/**
* Reads the file list from the manifest file for the latest snapshot.
*/
public static HoodieLSMTimelineManifest latestSnapshotManifest(HoodieTableMetaClient metaClient, int latestVersion) {
if (latestVersion < 0) {// there is no valid snapshot of the timeline.
return HoodieLSMTimelineManifest.EMPTY;
}
// read and deserialize the valid files.
byte[] content = FileIOUtils.readDataFromPath(metaClient.getFs(), getManifestFilePath(metaClient,
latestVersion)).get();
try {
return HoodieLSMTimelineManifest.fromJsonString(new String(content, StandardCharsets.UTF_8), HoodieLSMTimelineManifest.class);
} catch (Exception e) {
throw new HoodieException("Error deserializing manifest entries", e);
}
}
| 3.26 |
hudi_LSMTimeline_getReadSchema_rdh
|
// -------------------------------------------------------------------------
// Utilities
// -------------------------------------------------------------------------
public static Schema getReadSchema(HoodieArchivedTimeline.LoadMode loadMode) {
switch (loadMode) {
case SLIM :
return ArchivedInstantReadSchemas.TIMELINE_LSM_SLIM_READ_SCHEMA;
case METADATA :return ArchivedInstantReadSchemas.TIMELINE_LSM_READ_SCHEMA_WITH_METADATA;
case PLAN :
return ArchivedInstantReadSchemas.TIMELINE_LSM_READ_SCHEMA_WITH_PLAN;
default :
throw new AssertionError("Unexpected");
}
}
| 3.26 |
hudi_LSMTimeline_m0_rdh
|
/**
* Returns all the valid snapshot versions.
*/
public static List<Integer> m0(HoodieTableMetaClient metaClient) throws IOException {
return Arrays.stream(metaClient.getFs().listStatus(new Path(metaClient.getArchivePath()), getManifestFilePathFilter())).map(fileStatus -> fileStatus.getPath().getName()).map(LSMTimeline::getManifestVersion).collect(Collectors.toList());
}
| 3.26 |
hudi_LSMTimeline_getManifestFilePath_rdh
|
/**
* Returns the full manifest file path with given version number.
*/
public static Path getManifestFilePath(HoodieTableMetaClient metaClient, int snapshotVersion) {
return new Path(metaClient.getArchivePath(), MANIFEST_FILE_PREFIX + snapshotVersion);
}
| 3.26 |
hudi_LSMTimeline_getManifestFilePathFilter_rdh
|
/**
* Returns a path filter for the manifest files.
*/
public static PathFilter getManifestFilePathFilter() {
return path -> path.getName().startsWith(MANIFEST_FILE_PREFIX) && (!path.getName().endsWith(TEMP_FILE_SUFFIX));
}
| 3.26 |
hudi_LSMTimeline_listAllMetaFiles_rdh
|
/**
* List all the parquet metadata files.
*/
public static FileStatus[] listAllMetaFiles(HoodieTableMetaClient metaClient) throws IOException {
return metaClient.getFs().globStatus(new Path(metaClient.getArchivePath() + "/*.parquet"));
}
| 3.26 |
hudi_LSMTimeline_getFileLayer_rdh
|
/**
* Parse the layer number from the file name.
*/
public static int getFileLayer(String fileName) {
try {
Matcher fileMatcher = ARCHIVE_FILE_PATTERN.matcher(fileName);
if (fileMatcher.matches()) {
return Integer.parseInt(fileMatcher.group(3));
}} catch (NumberFormatException e) {
// log and ignore any format warnings
LOG.warn("error getting file layout for archived file: " + fileName);
}
// return default value in case of any errors
return 0;
}
| 3.26 |
hudi_LSMTimeline_getManifestVersion_rdh
|
/**
* Parse the snapshot version from the manifest file name.
*/
public static int getManifestVersion(String fileName) {
return Integer.parseInt(fileName.split("_")[1]);
}
| 3.26 |
hudi_LSMTimeline_isFileInRange_rdh
|
/**
* Returns whether the given file is located in the filter.
*/
public static boolean isFileInRange(HoodieArchivedTimeline.TimeRangeFilter filter, String fileName) {
String minInstant = getMinInstantTime(fileName);
String maxInstant = getMaxInstantTime(fileName);
return filter.isInRange(minInstant) || filter.isInRange(maxInstant);
}
| 3.26 |
hudi_LSMTimeline_listAllManifestFiles_rdh
|
/**
* List all the parquet manifest files.
*/
public static FileStatus[] listAllManifestFiles(HoodieTableMetaClient metaClient) throws IOException {
return metaClient.getFs().listStatus(new Path(metaClient.getArchivePath()), getManifestFilePathFilter());
}
| 3.26 |
hudi_DFSPathSelector_getNextFilePathsAndMaxModificationTime_rdh
|
/**
* Get the list of files changed since last checkpoint.
*
* @param lastCheckpointStr
* the last checkpoint time string, empty if first run
* @param sourceLimit
* max bytes to read each time
* @return the list of files concatenated and their latest modified time
*/
@Deprecated
public Pair<Option<String>, String> getNextFilePathsAndMaxModificationTime(Option<String> lastCheckpointStr, long sourceLimit) {
try {
// obtain all eligible files under root folder.
log.info((("Root path => " + getStringWithAltKeys(props, DFSPathSelectorConfig.ROOT_INPUT_PATH)) + " source limit => ") + sourceLimit);
long
lastCheckpointTime = lastCheckpointStr.map(Long::parseLong).orElse(Long.MIN_VALUE);
List<FileStatus> eligibleFiles = listEligibleFiles(fs, new Path(getStringWithAltKeys(props, DFSPathSelectorConfig.ROOT_INPUT_PATH)), lastCheckpointTime);// sort them by modification time.
eligibleFiles.sort(Comparator.comparingLong(FileStatus::getModificationTime));
// Filter based on checkpoint & input size, if needed
long currentBytes = 0;
long
newCheckpointTime = lastCheckpointTime;
List<FileStatus> filteredFiles = new ArrayList<>();
for (FileStatus f : eligibleFiles) {
if (((currentBytes + f.getLen()) >= sourceLimit) && (f.getModificationTime() > newCheckpointTime)) {
// we have enough data, we are done
// Also, we've read up to a file with a newer modification time
// so that some files with the same modification time won't be skipped in next read
break;
}
newCheckpointTime = f.getModificationTime();currentBytes += f.getLen();
filteredFiles.add(f);
}
// no data to read
if (filteredFiles.isEmpty()) {
return new ImmutablePair<>(Option.empty(), String.valueOf(newCheckpointTime));
}
// read the files out.
String pathStr = filteredFiles.stream().map(f -> f.getPath().toString()).collect(Collectors.joining(","));
return new ImmutablePair<>(Option.ofNullable(pathStr), String.valueOf(newCheckpointTime));
} catch (IOException ioe) {
throw new HoodieIOException("Unable to read from source from checkpoint: " + lastCheckpointStr, ioe);}
}
| 3.26 |
hudi_DFSPathSelector_listEligibleFiles_rdh
|
/**
* List files recursively, filter out illegible files/directories while doing so.
*/
protected List<FileStatus> listEligibleFiles(FileSystem fs, Path
path, long lastCheckpointTime) throws IOException {
// skip files/dirs whose names start with (_, ., etc)
FileStatus[] statuses = fs.listStatus(path, file -> IGNORE_FILEPREFIX_LIST.stream().noneMatch(pfx -> file.getName().startsWith(pfx)));
List<FileStatus> res = new ArrayList<>();
for (FileStatus status : statuses) {
if (status.isDirectory()) {
// avoid infinite loop
if (!status.isSymlink()) {
res.addAll(listEligibleFiles(fs, status.getPath(), lastCheckpointTime));
}
} else if ((status.getModificationTime() > lastCheckpointTime)
&& (status.getLen()
> 0)) {
res.add(status);
}}
return res;
}
| 3.26 |
hudi_DFSPathSelector_m0_rdh
|
/**
* Factory method for creating custom DFSPathSelector. Default selector
* to use is {@link DFSPathSelector}
*/
public static DFSPathSelector m0(TypedProperties props, Configuration conf) {
String sourceSelectorClass = getStringWithAltKeys(props, DFSPathSelectorConfig.SOURCE_INPUT_SELECTOR, DFSPathSelector.class.getName());
try {
DFSPathSelector selector = ((DFSPathSelector) (ReflectionUtils.loadClass(sourceSelectorClass, new Class<?>[]{ TypedProperties.class, Configuration.class }, props, conf)));
log.info("Using path selector " + selector.getClass().getName());
return selector;
} catch (Exception e) { throw new HoodieException("Could not load source selector class " + sourceSelectorClass, e);
}}
| 3.26 |
hudi_HoodieMetadataLogRecordReader_getRecordsByKeys_rdh
|
/**
* Fetches records identified by the provided list of keys in case these are present in
* the delta-log blocks
*/
@SuppressWarnings("unchecked")
public Map<String, HoodieRecord<HoodieMetadataPayload>> getRecordsByKeys(List<String> sortedKeys) {
if (sortedKeys.isEmpty()) {return Collections.emptyMap();
}
// NOTE: Locking is necessary since we're accessing [[HoodieMetadataLogRecordReader]]
// materialized state, to make sure there's no concurrent access
synchronized(this) {
logRecordScanner.scanByFullKeys(sortedKeys);
Map<String, HoodieRecord> allRecords = logRecordScanner.getRecords();
return sortedKeys.stream().map(key -> ((HoodieRecord<HoodieMetadataPayload>) (allRecords.get(key)))).filter(Objects::nonNull).collect(Collectors.toMap(HoodieRecord::getRecordKey, r -> r));
}
}
| 3.26 |
hudi_HoodieMetadataLogRecordReader_newBuilder_rdh
|
/**
* Returns the builder for {@code HoodieMetadataMergedLogRecordScanner}.
*/
public static HoodieMetadataLogRecordReader.Builder newBuilder() {
return new HoodieMetadataLogRecordReader.Builder();
}
| 3.26 |
hudi_BitCaskDiskMap_sizeOfFileOnDiskInBytes_rdh
|
/**
* Number of bytes spilled to disk.
*/
@Override
public long sizeOfFileOnDiskInBytes() {
return filePosition.get();
}
| 3.26 |
hudi_BitCaskDiskMap_m0_rdh
|
/**
* RandomAccessFile is not thread-safe. This API opens a new file handle per thread and returns.
*
* @return */
private BufferedRandomAccessFile m0() {
try {
BufferedRandomAccessFile readHandle = randomAccessFile.get();
if (readHandle == null) {
readHandle = new BufferedRandomAccessFile(filePath, "r", BUFFER_SIZE);
readHandle.seek(0);
randomAccessFile.set(readHandle);
openedAccessFiles.offer(readHandle);
}
return readHandle;
} catch (IOException ioe) {
throw new HoodieException(ioe);
}
}
| 3.26 |
hudi_HoodieFunctionalIndexConfig_storeProperties_rdh
|
/**
* Write the properties to the given output stream and return the table checksum.
*
* @param props
* - properties to be written
* @param outputStream
* - output stream to which properties will be written
* @return return the table checksum
*/
private static String storeProperties(Properties props, FSDataOutputStream outputStream) throws IOException {
final String checksum;
if (m0(props)) {
checksum = props.getProperty(INDEX_DEFINITION_CHECKSUM.key());
props.store(outputStream, "Updated at " + Instant.now());
} else {
Properties propsWithChecksum = getOrderedPropertiesWithTableChecksum(props);
propsWithChecksum.store(outputStream, "Properties saved on " + Instant.now());checksum = propsWithChecksum.getProperty(INDEX_DEFINITION_CHECKSUM.key());
props.setProperty(INDEX_DEFINITION_CHECKSUM.key(), checksum);
}
return checksum;
}
| 3.26 |
hudi_TwoToOneDowngradeHandler_convertToDirectMarkers_rdh
|
/**
* Converts the markers in new format(timeline server based) to old format of direct markers,
* i.e., one marker file per data file, without MARKERS.type file.
* This needs to be idempotent.
* 1. read all markers from timeline server based marker files
* 2. create direct style markers
* 3. delete marker type file
* 4. delete timeline server based marker files
*
* @param commitInstantTime
* instant of interest for marker conversion.
* @param table
* instance of {@link HoodieTable} to use
* @param context
* instance of {@link HoodieEngineContext} to use
* @param parallelism
* parallelism to use
*/
private void convertToDirectMarkers(final String commitInstantTime, HoodieTable table, HoodieEngineContext context, int parallelism) throws IOException {
String markerDir = table.getMetaClient().getMarkerFolderPath(commitInstantTime);
FileSystem fileSystem = FSUtils.getFs(markerDir, context.getHadoopConf().newCopy());
Option<MarkerType> markerTypeOption = MarkerUtils.readMarkerType(fileSystem, markerDir);
if (markerTypeOption.isPresent()) {switch (markerTypeOption.get()) {case TIMELINE_SERVER_BASED :
// Reads all markers written by the timeline server
Map<String, Set<String>> markersMap = MarkerUtils.readTimelineServerBasedMarkersFromFileSystem(markerDir,
fileSystem, context, parallelism);
DirectWriteMarkers
directWriteMarkers = new DirectWriteMarkers(table, commitInstantTime);
// Recreates the markers in the direct format
markersMap.values().stream().flatMap(Collection::stream).forEach(directWriteMarkers::create);
// Deletes marker type file
MarkerUtils.deleteMarkerTypeFile(fileSystem, markerDir);
// Deletes timeline server based markers
deleteTimelineBasedMarkerFiles(context, markerDir, fileSystem, parallelism);
break;
default :
throw new HoodieException(("The marker type \"" + markerTypeOption.get().name()) + "\" is not supported for rollback.");
}
} else if (fileSystem.exists(new Path(markerDir))) {
// In case of partial failures during downgrade, there is a chance that marker type file was deleted,
// but timeline server based marker files are left. So deletes them if any
deleteTimelineBasedMarkerFiles(context, markerDir, fileSystem, parallelism);
}
}
| 3.26 |
hudi_HoodieInstant_getFileName_rdh
|
/**
* Get the filename for this instant.
*/
public String getFileName() {
if (isCompleted()) {
return getCompleteFileName(completionTime);
}
return getPendingFileName();
}
| 3.26 |
hudi_HoodieInstant_m0_rdh
|
/**
* Get the filename for this instant.
*/
public String m0(String completionTime) {
ValidationUtils.checkState(isCompleted());
return getCompleteFileName(completionTime);
}
| 3.26 |
hudi_SchemaChangeUtils_applyTableChange2Type_rdh
|
/**
* Apply all the DDL update operations to type to produce a new internalSchema.
* do not call this method directly. expose this method only for UT.
*
* @param type
* origin internalSchema.
* @param updates
* a wrapper class for all the DDL update operations.
* @return a new internalSchema.
*/
private static Type applyTableChange2Type(Type type, TableChanges.ColumnUpdateChange updates) {
switch (type.typeId()) {
case RECORD :
Types.RecordType record = ((Types.RecordType) (type));
List<Type> newTypes = new ArrayList<>();
for (Types.Field v32 : record.fields()) {
Type newType = applyTableChange2Type(v32.type(), updates);
newTypes.add(updates.applyUpdates(v32, newType));
}
List<Types.Field> newFields = new
ArrayList<>();
for (int i = 0; i < newTypes.size(); i++) {
Type newType = newTypes.get(i);
Types.Field oldField = record.fields().get(i);
Types.Field updateField = updates.getUpdates().get(oldField.fieldId());
if (updateField != null) {
newFields.add(Types.Field.get(oldField.fieldId(), updateField.isOptional(), updateField.name(), newType,
updateField.doc()));
} else if (!oldField.type().equals(newType)) {
newFields.add(Types.Field.get(oldField.fieldId(), oldField.isOptional(), oldField.name(), newType, oldField.doc()));
} else {
newFields.add(oldField);
}
}
return Types.RecordType.get(newFields, record.name());
case ARRAY :
Types.ArrayType array = ((Types.ArrayType) (type));
Type newElementType;
Types.Field elementField = array.fields().get(0);
newElementType = applyTableChange2Type(array.elementType(), updates);
newElementType = updates.applyUpdates(elementField, newElementType);
Types.Field v42 = updates.getUpdates().get(elementField.fieldId());
boolean optional = (v42 == null) ? array.isElementOptional() : v42.isOptional();
if ((optional == elementField.isOptional()) && (array.elementType() == newElementType)) {
return array;
}
return Types.ArrayType.get(array.elementId(), optional, newElementType);
case MAP :
Types.MapType map = ((Types.MapType) (type));
Types.Field valueFiled = map.fields().get(1);
Type newValueType;
newValueType = applyTableChange2Type(map.valueType(), updates);
newValueType = updates.applyUpdates(valueFiled, newValueType);
Types.Field valueUpdate = updates.getUpdates().get(valueFiled.fieldId());
boolean valueOptional = (valueUpdate == null) ? map.isValueOptional() : valueUpdate.isOptional();
if ((valueOptional == map.isValueOptional()) && (map.valueType() == newValueType)) {
return
map;}
return Types.MapType.get(map.keyId(), map.valueId(), map.keyType(), newValueType, valueOptional);
default :
return type;
}
}
| 3.26 |
hudi_SchemaChangeUtils_isTypeUpdateAllow_rdh
|
/**
* Whether to allow the column type to be updated.
* now only support:
* int => long/float/double/String/Decimal
* long => float/double/String/Decimal
* float => double/String/Decimal
* double => String/Decimal
* Decimal => Decimal/String
* String => date/decimal
* date => String
* TODO: support more type update.
*
* @param src
* origin column type.
* @param dsr
* new column type.
* @return whether to allow the column type to be updated.
*/
public static boolean isTypeUpdateAllow(Type src, Type dsr) {
if (src.isNestedType() || dsr.isNestedType()) {
throw new IllegalArgumentException("only support update primitive type");
}
if (src.equals(dsr)) {
return true;
}
return isTypeUpdateAllowInternal(src, dsr);
}
| 3.26 |
hudi_SchemaChangeUtils_applyTableChanges2Schema_rdh
|
/**
* Apply all the DDL update operations to internalSchema to produce a new internalSchema.
*
* @param internalSchema
* origin internalSchema.
* @param updates
* a wrapper class for all the DDL update operations.
* @return a new internalSchema.
*/
public static InternalSchema applyTableChanges2Schema(InternalSchema internalSchema, TableChanges.ColumnUpdateChange updates) {
Types.RecordType newType = ((Types.RecordType) (applyTableChange2Type(internalSchema.getRecord(), updates)));
// deal with root level changes
List<Types.Field> newFields = TableChangesHelper.applyAddChange2Fields(newType.fields(), new ArrayList<>(), updates.getPositionChangeMap().get(-1));
return new InternalSchema(Types.RecordType.get(newFields, newType.name()));
}
| 3.26 |
hudi_LazyIterableIterator_start_rdh
|
/**
* Called once, before any elements are processed.
*/
protected void start() {
}
| 3.26 |
hudi_LazyIterableIterator_end_rdh
|
/**
* Called once, after all elements are processed.
*/
protected void end() {
}
| 3.26 |
hudi_LazyIterableIterator_invokeStartIfNeeded_rdh
|
// ////////////////
// iterable implementation
private void invokeStartIfNeeded() {
if (!startCalled) {
startCalled = true;
try {
start();
} catch (Exception e) {
throw new RuntimeException("Error in start()");
}
}
}
| 3.26 |
hudi_LazyIterableIterator_hasNext_rdh
|
// ////////////////
// inputItr implementation
@Override
public boolean hasNext() {
boolean ret = inputItr.hasNext();
// make sure, there is exactly one call to start()
invokeStartIfNeeded();
if (!ret) {
// if we are out of elements, and end has not been called yet
invokeEndIfNeeded();
}
return ret;}
| 3.26 |
hudi_LegacyArchivedMetaEntryReader_loadInstants_rdh
|
/**
* This is method to read selected instants. Do NOT use this directly use one of the helper methods above
* If loadInstantDetails is set to true, this would also update 'readCommits' map with commit details
* If filter is specified, only the filtered instants are loaded
* If commitsFilter is specified, only the filtered records are loaded.
*/
private ClosableIterator<ActiveAction> loadInstants(HoodieArchivedTimeline.TimeRangeFilter filter) {try {
// List all files
FileStatus[] fsStatuses = metaClient.getFs().globStatus(new Path(metaClient.getArchivePath() + "/.commits_.archive*"));
// Sort files by version suffix in reverse (implies reverse chronological order)
Arrays.sort(fsStatuses, new ArchiveLogVersionComparator());
ClosableIterator<HoodieRecord<IndexedRecord>> itr = getRecordIterator(fsStatuses);
return new ClosableIterator<ActiveAction>() {
private ActiveAction activeAction;
private Pair<HoodieInstant, Option<byte[]>> nextInstantAndDetail;
@Override
public void close() {itr.close();
}
@Override
public boolean hasNext() {
List<Pair<HoodieInstant, Option<byte[]>>> instantAndDetails = new ArrayList<>();
String lastInstantTime = null;
if (nextInstantAndDetail != null) {
instantAndDetails.add(nextInstantAndDetail); lastInstantTime = nextInstantAndDetail.getKey().getTimestamp();
nextInstantAndDetail = null;
}
while (itr.hasNext()) {
HoodieRecord<IndexedRecord> record = itr.next();
Pair<HoodieInstant, Option<byte[]>> instantAndDetail = readInstant(((GenericRecord) (record.getData())));
String instantTime = instantAndDetail.getKey().getTimestamp();
if ((filter == null) || filter.isInRange(instantTime)) {
if (lastInstantTime == null) {
instantAndDetails.add(instantAndDetail);
lastInstantTime = instantTime;
} else if (lastInstantTime.equals(instantTime)) {
instantAndDetails.add(instantAndDetail);
} else {
nextInstantAndDetail = instantAndDetail;
break;
}
}
}
if (!instantAndDetails.isEmpty()) {
this.activeAction = ActiveActionWithDetails.fromInstantAndDetails(instantAndDetails);
return
true;
}
return
false;
}
@Override
public ActiveAction
next() {
return this.activeAction;}
};
} catch (IOException e) {
throw new HoodieIOException("Could not load archived commit timeline from path " + metaClient.getArchivePath(), e);
}
}
| 3.26 |
hudi_LegacyArchivedMetaEntryReader_getRecordIterator_rdh
|
/**
* Returns the avro record iterator with given file statuses.
*/
private ClosableIterator<HoodieRecord<IndexedRecord>> getRecordIterator(FileStatus[] fsStatuses) throws IOException {
return new ClosableIterator<HoodieRecord<IndexedRecord>>() {
final Iterator<FileStatus> fsItr = Arrays.asList(fsStatuses).iterator();
Reader reader;
ClosableIterator<HoodieRecord<IndexedRecord>> recordItr;
@Override
public void close() {
if (this.reader != null) {
closeLogFormatReader(reader);
}
}
@Override
public boolean hasNext() {
if ((recordItr != null) && recordItr.hasNext()) {
return true;
}
// new reader if possible
if (reader != null) {
while (reader.hasNext()) {
HoodieLogBlock block = reader.next();
if (block instanceof HoodieAvroDataBlock) {
HoodieAvroDataBlock avroBlock = ((HoodieAvroDataBlock) (block));
recordItr = avroBlock.getRecordIterator(HoodieRecordType.AVRO);
if (recordItr.hasNext()) {
return true;
}
}}
// no records in the reader, close the reader
closeLogFormatReader(reader);
reader = null;
}
// new reader
while (fsItr.hasNext()) {
FileStatus fs = fsItr.next();
try {
reader = HoodieLogFormat.newReader(metaClient.getFs(), new HoodieLogFile(fs.getPath()), HoodieArchivedMetaEntry.getClassSchema());
} catch (IOException ioe) {
throw new HoodieIOException("Error initializing the reader for archived log: " + fs.getPath(), ioe);
}
while (reader.hasNext()) {
HoodieLogBlock block = reader.next();
if (block instanceof HoodieAvroDataBlock) {
HoodieAvroDataBlock avroBlock = ((HoodieAvroDataBlock) (block));
recordItr = avroBlock.getRecordIterator(HoodieRecordType.AVRO);
if (recordItr.hasNext()) {
return true;
}
}
}
}
return false;
}
@Override
public HoodieRecord<IndexedRecord> next() {
return this.recordItr.next();
}
};
}
| 3.26 |
hudi_LegacyArchivedMetaEntryReader_readInstant_rdh
|
/**
* Reads the avro record for instant and details.
*/
private Pair<HoodieInstant, Option<byte[]>> readInstant(GenericRecord record) {
final String instantTime = record.get(HoodiePartitionMetadata.COMMIT_TIME_KEY).toString();
final String action = record.get(ACTION_TYPE_KEY).toString();
final String stateTransitionTime = ((String) (record.get(STATE_TRANSITION_TIME)));
final Option<byte[]> details =
getMetadataKey(action).map(key -> {
Object actionData = record.get(key);
if (actionData != null) {
if (action.equals(HoodieTimeline.COMPACTION_ACTION)) {
return HoodieAvroUtils.indexedRecordToBytes(((IndexedRecord) (actionData)));
} else {
return getUTF8Bytes(actionData.toString());
}
}
return null;
});
HoodieInstant instant = new HoodieInstant(HoodieInstant.State.valueOf(record.get(ACTION_STATE).toString()), action, instantTime, stateTransitionTime);
return Pair.of(instant, details);
}
| 3.26 |
hudi_MiniBatchHandle_finalizeWrite_rdh
|
/**
* Finalize the write of one mini-batch. Usually these mini-bathes
* come from one checkpoint interval. The file handle may roll over to new name
* if the name conflicts, give a chance to clean the intermediate file.
*/default void finalizeWrite() {
}
| 3.26 |
hudi_HoodieJavaPairRDD_of_rdh
|
/**
*
* @param pairRDDData
* a {@link JavaPairRDD} of pairs.
* @param <K>
* type of key.
* @param <V>
* type of value.
* @return a new instance containing the {@link JavaPairRDD<K, V>} reference.
*/
public static <K, V> HoodieJavaPairRDD<K, V> of(JavaPairRDD<K, V> pairRDDData) {
return new HoodieJavaPairRDD<>(pairRDDData);
}
| 3.26 |
hudi_HoodieJavaPairRDD_getJavaPairRDD_rdh
|
/**
*
* @param hoodiePairData
* {@link HoodieJavaPairRDD <K, V>} instance containing the {@link JavaPairRDD} of pairs.
* @param <K>
* type of key.
* @param <V>
* type of value.
* @return the {@link JavaPairRDD} of pairs.
*/
public static <K, V> JavaPairRDD<K, V> getJavaPairRDD(HoodiePairData<K, V> hoodiePairData) {
return ((HoodieJavaPairRDD<K, V>) (hoodiePairData)).get();
}
| 3.26 |
hudi_Base64CodecUtil_encode_rdh
|
/**
* Encodes all bytes from the specified byte array into String using StandardCharsets.UTF_8.
*
* @param data
* byte[] source data
* @return base64 encoded data
*/
public static String encode(byte[] data) {
return new String(Base64.getEncoder().encode(data), StandardCharsets.UTF_8);
}
| 3.26 |
hudi_Base64CodecUtil_m0_rdh
|
/**
* Decodes data from the input string into using the encoding scheme.
*
* @param encodedString
* - Base64 encoded string to decode
* @return A newly-allocated byte array containing the decoded bytes.
*/
public static byte[] m0(String encodedString) {return Base64.getDecoder().decode(getUTF8Bytes(encodedString));
}
| 3.26 |
hudi_HoodieMetaserver_getMetaserverStorage_rdh
|
// only for test
public static MetaserverStorage getMetaserverStorage() {
return metaserverStorage;
}
| 3.26 |
hudi_HoodieDropPartitionsTool_printDeleteFilesInfo_rdh
|
/**
* Prints the delete data files info.
*
* @param partitionToReplaceFileIds
*/
private void printDeleteFilesInfo(Map<String, List<String>> partitionToReplaceFileIds) {
LOG.info("Data files and partitions to delete : ");
for (Map.Entry<String, List<String>> entry : partitionToReplaceFileIds.entrySet()) {
LOG.info(String.format("Partitions : %s, corresponding data file IDs : %s", entry.getKey(), entry.getValue()));
}
}
| 3.26 |
hudi_HoodieDropPartitionsTool_readConfigFromFileSystem_rdh
|
/**
* Reads config from the file system.
*
* @param jsc
* {@link JavaSparkContext} instance.
* @param cfg
* {@link Config} instance.
* @return the {@link TypedProperties} instance.
*/
private TypedProperties readConfigFromFileSystem(JavaSparkContext jsc, Config cfg) {
return UtilHelpers.readConfig(jsc.hadoopConfiguration(), new Path(cfg.propsFilePath), cfg.configs).getProps(true);
}
| 3.26 |
hudi_TableServicePipeline_add_rdh
|
/**
* Add a table service task to the end of table service pipe. The task will be executed in FIFO manner.
*
* @param task
* table service task to run in pipeline.
*/
public void add(TableServiceTask task) {
tableServiceTasks.add(task);
}
| 3.26 |
hudi_TableServicePipeline_execute_rdh
|
/**
* Run all table services task sequentially.
*/
public void execute() {
tableServiceTasks.forEach(TableServiceTask::run);
}
| 3.26 |
hudi_ExternalSpillableMap_m0_rdh
|
/**
* Number of bytes spilled to disk.
*/
public long m0() {
return getDiskBasedMap().sizeOfFileOnDiskInBytes();
}
| 3.26 |
hudi_ExternalSpillableMap_getInMemoryMapNumEntries_rdh
|
/**
* Number of entries in InMemoryMap.
*/
public int getInMemoryMapNumEntries() {
return inMemoryMap.size();
}
| 3.26 |
hudi_ExternalSpillableMap_getDiskBasedMapNumEntries_rdh
|
/**
* Number of entries in BitCaskDiskMap.
*/
public int getDiskBasedMapNumEntries() {
return getDiskBasedMap().size();
}
| 3.26 |
hudi_ExternalSpillableMap_getCurrentInMemoryMapSize_rdh
|
/**
* Approximate memory footprint of the in-memory map.
*/
public long getCurrentInMemoryMapSize() {
return f1;
}
| 3.26 |
hudi_ExternalSpillableMap_iterator_rdh
|
/**
* A custom iterator to wrap over iterating in-memory + disk spilled data.
*/
public Iterator<R> iterator() {
return new IteratorWrapper<>(inMemoryMap.values().iterator(), getDiskBasedMap().iterator());
}
| 3.26 |
hudi_FileIOUtils_getConfiguredLocalDirs_rdh
|
/**
* Return the configured local directories where hudi can write files. This
* method does not create any directories on its own, it only encapsulates the
* logic of locating the local directories according to deployment mode.
*/
public static String[] getConfiguredLocalDirs() {
if (m0()) {
// If we are in yarn mode, systems can have different disk layouts so we must set it
// to what Yarn on this system said was available. Note this assumes that Yarn has
// created the directories already, and that they are secured so that only the
// user has access to them.
return getYarnLocalDirs().split(",");
} else if (System.getProperty("java.io.tmpdir") != null) {
return System.getProperty("java.io.tmpdir").split(",");
} else
{
return null;
}
}
| 3.26 |
hudi_FileIOUtils_copy_rdh
|
/**
* Copies the file content from source path to destination path.
*
* @param fileSystem
* {@link FileSystem} instance.
* @param sourceFilePath
* Source file path.
* @param destFilePath
* Destination file path.
*/
public static void copy(FileSystem fileSystem, Path sourceFilePath, Path destFilePath) {
FSDataInputStream fsDataInputStream = null;
FSDataOutputStream fsDataOutputStream = null;try {
fsDataInputStream = fileSystem.open(sourceFilePath);
fsDataOutputStream = fileSystem.create(destFilePath, false);
copy(fsDataInputStream, fsDataOutputStream);
} catch (IOException e) {
throw new HoodieIOException(String.format("Cannot copy from %s to %s", sourceFilePath.toString(), destFilePath.toString()), e);
} finally {
closeQuietly(fsDataInputStream);
closeQuietly(fsDataOutputStream);
}
}
| 3.26 |
hudi_FileIOUtils_readAsUTFStringLines_rdh
|
/**
* Reads the input stream into String lines.
*
* @param input
* {@code InputStream} instance.
* @return String lines in a list.
*/
public static List<String> readAsUTFStringLines(InputStream input) {
List<String> lines = new ArrayList<>();
BufferedReader bufferedReader = new BufferedReader(new InputStreamReader(input, StandardCharsets.UTF_8));
lines = bufferedReader.lines().collect(Collectors.toList());
closeQuietly(bufferedReader);
return lines;
}
| 3.26 |
hudi_FileIOUtils_getYarnLocalDirs_rdh
|
/**
* Get the Yarn approved local directories.
*/
private static String getYarnLocalDirs() {
String localDirs = System.getenv("LOCAL_DIRS");
if (localDirs == null) {
throw new HoodieIOException("Yarn Local dirs can't be empty");
}
return localDirs;
}
| 3.26 |
hudi_FileIOUtils_closeQuietly_rdh
|
/**
* Closes {@code Closeable} quietly.
*
* @param closeable
* {@code Closeable} to close
*/
public static void closeQuietly(Closeable closeable) {
if (closeable == null) {
return;
}
try {
closeable.close();
} catch (IOException e) {
LOG.warn("IOException during close", e);
}
}
| 3.26 |
hudi_HoodieTable_maybeDeleteMetadataTable_rdh
|
/**
* Deletes the metadata table if the writer disables metadata table with hoodie.metadata.enable=false
*/
public void maybeDeleteMetadataTable() {
if (shouldExecuteMetadataTableDeletion()) {
try {
LOG.info("Deleting metadata table because it is disabled in writer.");
deleteMetadataTable(config.getBasePath(), f0);
} catch (HoodieMetadataException e) {
throw new HoodieException("Failed to delete metadata table.", e);
}
}
}
| 3.26 |
hudi_HoodieTable_rollbackInflightLogCompaction_rdh
|
/**
* Rollback failed compactions. Inflight rollbacks for compactions revert the .inflight file
* to the .requested file.
*
* @param inflightInstant
* Inflight Compaction Instant
*/
public void rollbackInflightLogCompaction(HoodieInstant inflightInstant, Function<String, Option<HoodiePendingRollbackInfo>> getPendingRollbackInstantFunc) {final String v2 = getPendingRollbackInstantFunc.apply(inflightInstant.getTimestamp()).map(entry -> entry.getRollbackInstant().getTimestamp()).orElse(getMetaClient().createNewInstantTime());
scheduleRollback(f0, v2, inflightInstant, false, config.shouldRollbackUsingMarkers(),
false);
rollback(f0, v2, inflightInstant, true, false);
}
| 3.26 |
hudi_HoodieTable_rollbackInflightClustering_rdh
|
/**
* Rollback inflight clustering instant to requested clustering instant
*
* @param inflightInstant
* Inflight clustering instant
* @param getPendingRollbackInstantFunc
* Function to get rollback instant
*/
public void rollbackInflightClustering(HoodieInstant inflightInstant, Function<String, Option<HoodiePendingRollbackInfo>> getPendingRollbackInstantFunc) {
ValidationUtils.checkArgument(inflightInstant.getAction().equals(HoodieTimeline.REPLACE_COMMIT_ACTION));
rollbackInflightInstant(inflightInstant, getPendingRollbackInstantFunc);
}
| 3.26 |
hudi_HoodieTable_m3_rdh
|
/**
* Get only the completed (no-inflights) savepoint timeline.
*/
public HoodieTimeline m3() {
return getActiveTimeline().getSavePointTimeline().filterCompletedInstants();
}
| 3.26 |
hudi_HoodieTable_getMetadataWriter_rdh
|
/**
* Gets the metadata writer for regular writes.
*
* @param triggeringInstantTimestamp
* The instant that is triggering this metadata write.
* @return An instance of {@link HoodieTableMetadataWriter}.
*/
/**
* Get Table metadata writer.
* <p>
* Note:
* Get the metadata writer for the conf. If the metadata table doesn't exist,
* this wil trigger the creation of the table and the initial bootstrapping.
* Since this call is under the transaction lock, other concurrent writers
* are blocked from doing the similar initial metadata table creation and
* the bootstrapping.
*
* @param triggeringInstantTimestamp
* The instant that is triggering this metadata write
* @param failedWritesCleaningPolicy
* Cleaning policy on failed writes
* @return instance of {@link HoodieTableMetadataWriter}
*/
protected Option<HoodieTableMetadataWriter> getMetadataWriter(String triggeringInstantTimestamp, HoodieFailedWritesCleaningPolicy failedWritesCleaningPolicy) {
// Each engine is expected to override this and
// provide the actual metadata writer, if enabled.
return Option.empty();
}
| 3.26 |
hudi_HoodieTable_getInvalidDataPaths_rdh
|
/**
* Returns the possible invalid data file name with given marker files.
*/
protected Set<String> getInvalidDataPaths(WriteMarkers markers) throws IOException {
return markers.createdAndMergedDataPaths(f0, config.getFinalizeWriteParallelism());
}
| 3.26 |
hudi_HoodieTable_waitForAllFiles_rdh
|
/**
* Ensures all files passed either appear or disappear.
*
* @param context
* HoodieEngineContext
* @param groupByPartition
* Files grouped by partition
* @param visibility
* Appear/Disappear
*/
private void waitForAllFiles(HoodieEngineContext
context, Map<String,
List<Pair<String, String>>> groupByPartition, FileVisibility visibility) {
// This will either ensure all files to be deleted are present.
context.setJobStatus(this.getClass().getSimpleName(), "Wait for all files to appear/disappear: " + config.getTableName());
boolean checkPassed = context.map(new ArrayList<>(groupByPartition.entrySet()), partitionWithFileList ->
waitForCondition(partitionWithFileList.getKey(), partitionWithFileList.getValue().stream(), visibility), config.getFinalizeWriteParallelism()).stream().allMatch(x -> x);
if (!checkPassed) {
throw
new HoodieIOException("Consistency check failed to ensure all files " + visibility);
}
}
| 3.26 |
hudi_HoodieTable_scheduleLogCompaction_rdh
|
/**
* Schedule log compaction for the instant time.
*
* @param context
* HoodieEngineContext
* @param instantTime
* Instant Time for scheduling log compaction
* @param extraMetadata
* additional metadata to write into plan
* @return */
public Option<HoodieCompactionPlan> scheduleLogCompaction(HoodieEngineContext context, String instantTime, Option<Map<String, String>> extraMetadata) {
throw new UnsupportedOperationException("Log compaction is not supported for this table type");
}
| 3.26 |
hudi_HoodieTable_getCompletedCleanTimeline_rdh
|
/**
* Get only the completed (no-inflights) clean timeline.
*/
public HoodieTimeline getCompletedCleanTimeline() {
return getActiveTimeline().getCleanerTimeline().filterCompletedInstants();
}
| 3.26 |
hudi_HoodieTable_clean_rdh
|
/**
* Executes a new clean action.
*
* @return information on cleaned file slices
*/
@Deprecated
public HoodieCleanMetadata clean(HoodieEngineContext context, String cleanInstantTime, boolean skipLocking) {
return clean(context, cleanInstantTime);
}
| 3.26 |
hudi_HoodieTable_getRestoreTimeline_rdh
|
/**
* Get restore timeline.
*/
public HoodieTimeline getRestoreTimeline() {
return getActiveTimeline().getRestoreTimeline();
}
| 3.26 |
hudi_HoodieTable_getRollbackTimeline_rdh
|
/**
* Get rollback timeline.
*/
public HoodieTimeline getRollbackTimeline() {
return getActiveTimeline().getRollbackTimeline();
}
| 3.26 |
hudi_HoodieTable_getFileSystemView_rdh
|
/**
* Get the view of the file system for this table.
*/
public TableFileSystemView getFileSystemView() {
return new HoodieTableFileSystemView(metaClient, getCompletedCommitsTimeline());
}
| 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.