name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hudi_HoodieInputFormatUtils_getHoodieTimelineForIncrementalQuery_rdh
|
/**
* Get HoodieTimeline for incremental query from Hive map reduce configuration.
*
* @param job
* @param tableName
* @param timeline
* @return */
public static HoodieTimeline getHoodieTimelineForIncrementalQuery(JobContext job, String tableName, HoodieTimeline timeline) {
String lastIncrementalTs = HoodieHiveUtils.readStartCommitTime(job, tableName);
// Total number of commits to return in this batch. Set this to -1 to get all the commits.
Integer maxCommits = HoodieHiveUtils.readMaxCommits(job, tableName);
LOG.info("Last Incremental timestamp was set as " + lastIncrementalTs);
return timeline.findInstantsAfter(lastIncrementalTs, maxCommits);
}
| 3.26 |
hudi_HoodieInputFormatUtils_getFilteredCommitsTimeline_rdh
|
/**
* Extract HoodieTimeline based on HoodieTableMetaClient.
*
* @param job
* @param tableMetaClient
* @return */
public static Option<HoodieTimeline> getFilteredCommitsTimeline(JobContext job, HoodieTableMetaClient tableMetaClient) {
String tableName = tableMetaClient.getTableConfig().getTableName();
HoodieDefaultTimeline baseTimeline;
if (HoodieHiveUtils.stopAtCompaction(job, tableName)) {
baseTimeline = filterInstantsTimeline(tableMetaClient.getActiveTimeline());} else {
baseTimeline
= tableMetaClient.getActiveTimeline();
}
HollowCommitHandling handlingMode = HollowCommitHandling.valueOf(job.getConfiguration().get(INCREMENTAL_READ_HANDLE_HOLLOW_COMMIT.key(), INCREMENTAL_READ_HANDLE_HOLLOW_COMMIT.defaultValue()));
HoodieTimeline filteredTimeline = handleHollowCommitIfNeeded(baseTimeline.getCommitsTimeline().filterCompletedInstants(), tableMetaClient, handlingMode);
return Option.of(filteredTimeline);
}
| 3.26 |
hudi_HoodieInputFormatUtils_getWritePartitionPaths_rdh
|
/**
* Returns all the incremental write partition paths as a set with the given commits metadata.
*
* @param metadataList
* The commits metadata
* @return the partition path set
*/
public static Set<String> getWritePartitionPaths(List<HoodieCommitMetadata> metadataList) {
return metadataList.stream().map(HoodieCommitMetadata::getWritePartitionPaths).flatMap(Collection::stream).collect(Collectors.toSet());
}
| 3.26 |
hudi_TimelineDiffHelper_getPendingCompactionTransitions_rdh
|
/**
* Getting pending compaction transitions.
*/
private static List<Pair<HoodieInstant, HoodieInstant>> getPendingCompactionTransitions(HoodieTimeline oldTimeline, HoodieTimeline newTimeline) {
Set<HoodieInstant> newTimelineInstants = newTimeline.getInstantsAsStream().collect(Collectors.toSet());return oldTimeline.filterPendingCompactionTimeline().getInstantsAsStream().map(instant -> {
if (newTimelineInstants.contains(instant)) {
return Pair.of(instant, instant);
} else {
HoodieInstant v15 = new HoodieInstant(State.COMPLETED, HoodieTimeline.COMMIT_ACTION, instant.getTimestamp());
if (newTimelineInstants.contains(v15)) {
return Pair.of(instant,
v15);
}
HoodieInstant inflightCompacted = new HoodieInstant(State.INFLIGHT, HoodieTimeline.COMPACTION_ACTION, instant.getTimestamp());
if (newTimelineInstants.contains(inflightCompacted)) {
return Pair.of(instant, inflightCompacted);
}
return
Pair.<HoodieInstant, HoodieInstant>of(instant, null);
}
}).collect(Collectors.toList());
}
| 3.26 |
hudi_TimelineDiffHelper_getPendingLogCompactionTransitions_rdh
|
/**
* Getting pending log compaction transitions.
*/
private static List<Pair<HoodieInstant, HoodieInstant>> getPendingLogCompactionTransitions(HoodieTimeline oldTimeline, HoodieTimeline newTimeline) {
Set<HoodieInstant> newTimelineInstants = newTimeline.getInstantsAsStream().collect(Collectors.toSet());
return oldTimeline.filterPendingLogCompactionTimeline().getInstantsAsStream().map(instant -> {
if (newTimelineInstants.contains(instant)) {
return Pair.of(instant,
instant);
} else {
HoodieInstant logCompacted = new HoodieInstant(State.COMPLETED, HoodieTimeline.DELTA_COMMIT_ACTION, instant.getTimestamp());
if (newTimelineInstants.contains(logCompacted)) {
return Pair.of(instant, logCompacted);
}
HoodieInstant inflightLogCompacted = new HoodieInstant(State.INFLIGHT, HoodieTimeline.LOG_COMPACTION_ACTION, instant.getTimestamp());
if (newTimelineInstants.contains(inflightLogCompacted)) {
return Pair.of(instant, inflightLogCompacted);
}
return Pair.<HoodieInstant, HoodieInstant>of(instant, null);
}
}).collect(Collectors.toList());
}
| 3.26 |
hudi_SqlQueryBuilder_on_rdh
|
/**
* Appends an ON clause to a query.
*
* @param predicate
* The predicate to join on.
* @return The {@link SqlQueryBuilder} instance.
*/
public SqlQueryBuilder on(String predicate) {
if (StringUtils.isNullOrEmpty(predicate)) {
throw new IllegalArgumentException();
}
sqlBuilder.append(" on ");
sqlBuilder.append(predicate);
return this;
}
| 3.26 |
hudi_SqlQueryBuilder_orderBy_rdh
|
/**
* Appends an ORDER BY clause to a query. By default, records are ordered in ascending order by the given column.
* To order in descending order use DESC after the column name, e.g. queryBuilder.orderBy("update_time desc").
*
* @param columns
* Column names to order by.
* @return The {@link SqlQueryBuilder} instance.
*/
public SqlQueryBuilder orderBy(String... columns) {
if ((columns == null) || (columns.length == 0)) {
throw new
IllegalArgumentException("No columns provided with ORDER BY clause. Please provide a column name to order records.");
}
sqlBuilder.append(" order by ");
sqlBuilder.append(String.join(", ", columns));
return this;
}
| 3.26 |
hudi_SqlQueryBuilder_from_rdh
|
/**
* Appends a FROM clause to a query.
*
* @param tables
* The table names to select from.
* @return The {@link SqlQueryBuilder} instance.
*/
public SqlQueryBuilder from(String... tables) {
if ((tables
== null) || (tables.length == 0)) {
throw new IllegalArgumentException("No table name provided with FROM clause. Please provide a table name to select from.");
}
sqlBuilder.append(" from ");
sqlBuilder.append(String.join(", ", tables));
return this;
}
| 3.26 |
hudi_SqlQueryBuilder_where_rdh
|
/**
* Appends a WHERE clause to a query.
*
* @param predicate
* The predicate for WHERE clause.
* @return The {@link SqlQueryBuilder} instance.
*/
public SqlQueryBuilder where(String predicate) {
if (StringUtils.isNullOrEmpty(predicate)) {
throw new IllegalArgumentException("No predicate provided with WHERE clause. Please provide a predicate to filter records.");
}
sqlBuilder.append(" where ");
sqlBuilder.append(predicate);
return this;}
| 3.26 |
hudi_SqlQueryBuilder_m0_rdh
|
/**
* Appends a "limit" clause to a query.
*
* @param count
* The limit count.
* @return The {@link SqlQueryBuilder} instance.
*/
public SqlQueryBuilder m0(long count) {
if (count < 0) {
throw new IllegalArgumentException("Please provide a positive integer for the LIMIT clause.");
}
sqlBuilder.append(" limit ");
sqlBuilder.append(count);
return this;
}
| 3.26 |
hudi_SqlQueryBuilder_select_rdh
|
/**
* Creates a SELECT query.
*
* @param columns
* The column names to select.
* @return The new {@link SqlQueryBuilder} instance.
*/
public static SqlQueryBuilder select(String... columns) {
if ((columns == null) || (columns.length == 0)) {
throw new IllegalArgumentException("No columns provided with SELECT statement. Please mention column names or '*' to select all columns.");
}
StringBuilder sqlBuilder = new StringBuilder();
sqlBuilder.append("select ");
sqlBuilder.append(String.join(", ", columns));
return new SqlQueryBuilder(sqlBuilder);
}
| 3.26 |
hudi_SqlQueryBuilder_join_rdh
|
/**
* Appends a JOIN clause to a query.
*
* @param table
* The table to join with.
* @return The {@link SqlQueryBuilder} instance.
*/
public SqlQueryBuilder join(String table) {
if (StringUtils.isNullOrEmpty(table)) {
throw new IllegalArgumentException("No table name provided with JOIN clause. Please provide a table name to join with.");
}sqlBuilder.append(" join ");
sqlBuilder.append(table);
return this;
}
| 3.26 |
hudi_HoodieParquetRealtimeInputFormat_getRecordReader_rdh
|
// To make Hive on Spark queries work with RT tables. Our theory is that due to
// {@link org.apache.hadoop.hive.ql.io.parquet.ProjectionPusher}
// not handling empty list correctly, the ParquetRecordReaderWrapper ends up adding the same column ids multiple
// times which ultimately breaks the query.
@Override
public RecordReader<NullWritable, ArrayWritable> getRecordReader(final InputSplit split, final JobConf jobConf, final Reporter reporter) throws IOException {
// sanity check
ValidationUtils.checkArgument(split instanceof RealtimeSplit, "HoodieRealtimeRecordReader can only work on RealtimeSplit and not with " + split);
RealtimeSplit realtimeSplit = ((RealtimeSplit) (split));
// add preCombineKey
HoodieTableMetaClient metaClient = HoodieTableMetaClient.builder().setConf(jobConf).setBasePath(realtimeSplit.getBasePath()).build();
HoodieTableConfig tableConfig =
metaClient.getTableConfig();
addProjectionToJobConf(realtimeSplit, jobConf, tableConfig);LOG.info((("Creating record reader with readCols :" + jobConf.get(ColumnProjectionUtils.READ_COLUMN_NAMES_CONF_STR))
+ ", Ids :") + jobConf.get(ColumnProjectionUtils.READ_COLUMN_IDS_CONF_STR));
// for log only split, set the parquet reader as empty.
if
(FSUtils.isLogFile(realtimeSplit.getPath())) {
return new HoodieRealtimeRecordReader(realtimeSplit, jobConf, new HoodieEmptyRecordReader(realtimeSplit, jobConf));
}
return new HoodieRealtimeRecordReader(realtimeSplit, jobConf, super.getRecordReader(split, jobConf, reporter));}
| 3.26 |
hudi_HoodieDataSourceHelpers_latestCommit_rdh
|
/**
* Returns the last successful write operation's instant time.
*/
@PublicAPIMethod(maturity = ApiMaturityLevel.STABLE)
public static String latestCommit(FileSystem fs, String basePath) {
HoodieTimeline timeline = allCompletedCommitsCompactions(fs,
basePath);
return timeline.lastInstant().get().getTimestamp();
}
| 3.26 |
hudi_HoodieDataSourceHelpers_hasNewCommits_rdh
|
/**
* Checks if the Hoodie table has new data since given timestamp. This can be subsequently fed to an incremental
* view read, to perform incremental processing.
*/@PublicAPIMethod(maturity = ApiMaturityLevel.STABLE)
public static boolean hasNewCommits(FileSystem fs, String basePath, String commitTimestamp) {
return listCommitsSince(fs, basePath, commitTimestamp).size() > 0;
}
| 3.26 |
hudi_HoodieDataSourceHelpers_listCommitsSince_rdh
|
/**
* Get a list of instant times that have occurred, from the given instant timestamp.
*/
@PublicAPIMethod(maturity = ApiMaturityLevel.STABLE)
public static List<String> listCommitsSince(FileSystem fs, String basePath, String instantTimestamp) { HoodieTimeline v0 = allCompletedCommitsCompactions(fs, basePath);
return v0.findInstantsAfter(instantTimestamp, Integer.MAX_VALUE).getInstantsAsStream().map(HoodieInstant::getTimestamp).collect(Collectors.toList());
}
| 3.26 |
hudi_CompletionTimeQueryView_load_rdh
|
/**
* This is method to read instant completion time.
* This would also update 'startToCompletionInstantTimeMap' map with start time/completion time pairs.
* Only instants starts from 'startInstant' (inclusive) are considered.
*/
private void load() {
// load active instants first.
this.f0.getActiveTimeline().filterCompletedInstants().getInstantsAsStream().forEach(instant -> setCompletionTime(instant.getTimestamp(), instant.getCompletionTime()));
// then load the archived instants.
HoodieArchivedTimeline.loadInstants(f0, new HoodieArchivedTimeline.StartTsFilter(this.cursorInstant), LoadMode.SLIM, r -> true, this::readCompletionTime);
}
| 3.26 |
hudi_CompletionTimeQueryView_isCompleted_rdh
|
/**
* Returns whether the instant is completed.
*/public boolean isCompleted(String instantTime) {
return this.startToCompletionInstantTimeMap.containsKey(instantTime) || HoodieTimeline.compareTimestamps(instantTime, LESSER_THAN, this.firstNonSavepointCommit);
}
| 3.26 |
hudi_CompletionTimeQueryView_isSlicedAfterOrOn_rdh
|
/**
* Returns whether the give instant time {@code instantTime} is sliced after or on the base instant {@code baseInstant}.
*/
public boolean isSlicedAfterOrOn(String baseInstant, String instantTime) {
Option<String> completionTimeOpt = getCompletionTime(baseInstant, instantTime);
if (completionTimeOpt.isPresent()) {
return HoodieTimeline.compareTimestamps(completionTimeOpt.get(), GREATER_THAN_OR_EQUALS, baseInstant);
}
return true;
}
| 3.26 |
hudi_CompletionTimeQueryView_getCompletionTime_rdh
|
/**
* Queries the instant completion time with given start time.
*
* @param startTime
* The start time.
* @return The completion time if the instant finished or empty if it is still pending.
*/
public Option<String> getCompletionTime(String startTime) {
String completionTime = this.startToCompletionInstantTimeMap.get(startTime);
if (completionTime != null) {return Option.of(completionTime);
}
if (HoodieTimeline.compareTimestamps(startTime, GREATER_THAN_OR_EQUALS, this.cursorInstant)) {// the instant is still pending
return Option.empty();
}
// the 'startTime' should be out of the eager loading range, switch to a lazy loading.
// This operation is resource costly.
synchronized(this) {
if (HoodieTimeline.compareTimestamps(startTime, LESSER_THAN, this.cursorInstant)) {
HoodieArchivedTimeline.loadInstants(f0, new HoodieArchivedTimeline.ClosedOpenTimeRangeFilter(startTime, this.cursorInstant), LoadMode.SLIM, r -> true, this::readCompletionTime);
}
// refresh the start instant
this.cursorInstant = startTime;
}
return Option.ofNullable(this.startToCompletionInstantTimeMap.get(startTime));
}
| 3.26 |
hudi_CompletionTimeQueryView_isCompletedBefore_rdh
|
/**
* Returns whether the give instant time {@code instantTime} completed before the base instant {@code baseInstant}.
*/
public boolean isCompletedBefore(String baseInstant, String instantTime) {
Option<String> completionTimeOpt = getCompletionTime(baseInstant, instantTime);
if (completionTimeOpt.isPresent()) {
return HoodieTimeline.compareTimestamps(completionTimeOpt.get(),
LESSER_THAN, baseInstant);
}
return false;
}
| 3.26 |
hudi_ConflictDetectionUtils_getDefaultEarlyConflictDetectionStrategy_rdh
|
/**
*
* @param markerType
* Marker type.
* @return The class name of the default strategy for early conflict detection.
*/
public static String getDefaultEarlyConflictDetectionStrategy(MarkerType markerType) {
switch (markerType) {
case DIRECT :
return SimpleDirectMarkerBasedDetectionStrategy.class.getName();case TIMELINE_SERVER_BASED :
default :
return AsyncTimelineServerBasedDetectionStrategy.class.getName();
}
}
| 3.26 |
hudi_HoodieParquetDataBlock_readRecordsFromBlockPayload_rdh
|
/**
* NOTE: We're overriding the whole reading sequence to make sure we properly respect
* the requested Reader's schema and only fetch the columns that have been explicitly
* requested by the caller (providing projected Reader's schema)
*/
@Override
protected <T> ClosableIterator<HoodieRecord<T>> readRecordsFromBlockPayload(HoodieRecordType type) throws IOException {
HoodieLogBlockContentLocation blockContentLoc = getBlockContentLocation().get();
// NOTE: It's important to extend Hadoop configuration here to make sure configuration
// is appropriately carried over
Configuration inlineConf = FSUtils.buildInlineConf(blockContentLoc.getHadoopConf());
Path inlineLogFilePath = InLineFSUtils.getInlineFilePath(blockContentLoc.getLogFile().getPath(), blockContentLoc.getLogFile().getPath().toUri().getScheme(), blockContentLoc.getContentPositionInLogFile(), blockContentLoc.getBlockSize());Schema
writerSchema = new Schema.Parser().parse(this.getLogBlockHeader().get(HeaderMetadataType.SCHEMA));
ClosableIterator<HoodieRecord<T>> iterator = HoodieFileReaderFactory.getReaderFactory(type).getFileReader(inlineConf, inlineLogFilePath, PARQUET).getRecordIterator(writerSchema,
readerSchema);
return iterator;
}
| 3.26 |
hudi_AbstractIndexingCatchupTask_awaitInstantCaughtUp_rdh
|
/**
* For the given instant, this method checks if it is already caught up or not.
* If not, it waits until the instant is completed.
*
* @param instant
* HoodieInstant to check
* @return null if instant is already caught up, else the instant after it is completed.
*/
HoodieInstant awaitInstantCaughtUp(HoodieInstant instant) {
if ((!metadataCompletedInstants.isEmpty()) && metadataCompletedInstants.contains(instant.getTimestamp())) {
currentCaughtupInstant = instant.getTimestamp();
return null;
}
if (!instant.isCompleted()) {
try {
LOG.warn("instant not completed, reloading timeline " + instant);
reloadTimelineWithWait(instant);
} catch (InterruptedException e) {
throw new HoodieIndexException(String.format("Thread interrupted while running indexing check for instant: %s", instant), e);}
}
return instant;
}
| 3.26 |
hudi_AdbSyncTool_syncPartitions_rdh
|
/**
* Syncs the list of storage partitions passed in (checks if the partition is in adb, if not adds it or if the
* partition path does not match, it updates the partition path).
*/
private void syncPartitions(String tableName, List<String> writtenPartitionsSince) {
try { if (config.getSplitStrings(META_SYNC_PARTITION_FIELDS).isEmpty()) {
LOG.info("Not a partitioned table.");
return;
}
Map<List<String>, String> partitions = f0.scanTablePartitions(tableName);
List<PartitionEvent> partitionEvents = f0.getPartitionEvents(partitions, writtenPartitionsSince);
List<String> newPartitions = filterPartitions(partitionEvents, PartitionEventType.ADD);
LOG.info("New Partitions:{}", newPartitions);
f0.addPartitionsToTable(tableName, newPartitions);
List<String> updatePartitions = filterPartitions(partitionEvents, PartitionEventType.UPDATE);
LOG.info("Changed Partitions:{}", updatePartitions);
f0.updatePartitionsToTable(tableName, updatePartitions);} catch (Exception e) {
throw new HoodieAdbSyncException("Failed to sync partitions for table:" + tableName, e);
}
}
| 3.26 |
hudi_AdbSyncTool_syncSchema_rdh
|
/**
* Get the latest schema from the last commit and check if its in sync with the ADB
* table schema. If not, evolves the table schema.
*
* @param tableName
* The table to be synced
* @param tableExists
* Whether target table exists
* @param useRealTimeInputFormat
* Whether using realtime input format
* @param readAsOptimized
* Whether read as optimized table
* @param schema
* The extracted schema
*/
private void syncSchema(String tableName, boolean tableExists, boolean useRealTimeInputFormat, boolean readAsOptimized, MessageType schema) {
// Append spark table properties & serde properties
Map<String, String> v4 = ConfigUtils.toMap(config.getString(ADB_SYNC_TABLE_PROPERTIES));
Map<String, String> serdeProperties = ConfigUtils.toMap(config.getString(ADB_SYNC_SERDE_PROPERTIES));
if (config.getBoolean(ADB_SYNC_SYNC_AS_SPARK_DATA_SOURCE_TABLE)) {
Map<String, String> v6 = SparkDataSourceTableUtils.getSparkTableProperties(config.getSplitStrings(META_SYNC_PARTITION_FIELDS), config.getString(META_SYNC_SPARK_VERSION), config.getInt(ADB_SYNC_SCHEMA_STRING_LENGTH_THRESHOLD), schema);
Map<String, String> sparkSerdeProperties = SparkDataSourceTableUtils.getSparkSerdeProperties(readAsOptimized, config.getString(META_SYNC_BASE_PATH));
v4.putAll(v6);
serdeProperties.putAll(sparkSerdeProperties);
LOG.info("Sync as spark datasource table, tableName:{}, tableExists:{}, tableProperties:{}, sederProperties:{}", tableName, tableExists, v4, serdeProperties);
}
// Check and sync schema
if (!tableExists) {
LOG.info("ADB table [{}] is not found, creating it", tableName);
String inputFormatClassName = HoodieInputFormatUtils.getInputFormatClassName(HoodieFileFormat.PARQUET, useRealTimeInputFormat);
// Custom serde will not work with ALTER TABLE REPLACE COLUMNS
// https://github.com/apache/hive/blob/release-1.1.0/ql/src/java/org/apache/hadoop/hive
// /ql/exec/DDLTask.java#L3488
f0.createTable(tableName, schema, inputFormatClassName, MapredParquetOutputFormat.class.getName(), ParquetHiveSerDe.class.getName(), serdeProperties, v4);
} else {
// Check if the table schema has evolved
Map<String, String> tableSchema = f0.getMetastoreSchema(tableName);
SchemaDifference schemaDiff = HiveSchemaUtil.getSchemaDifference(schema, tableSchema, config.getSplitStrings(META_SYNC_PARTITION_FIELDS), config.getBoolean(ADB_SYNC_SUPPORT_TIMESTAMP));
if (!schemaDiff.isEmpty()) {
LOG.info("Schema difference found for table:{}", tableName);
f0.updateTableDefinition(tableName, schemaDiff);
} else {
LOG.info("No Schema difference for table:{}", tableName);
}
}
}
| 3.26 |
hudi_MergeOnReadInputFormat_builder_rdh
|
/**
* Returns the builder for {@link MergeOnReadInputFormat}.
*/
public static Builder builder() {
return new Builder();
}
| 3.26 |
hudi_MergeOnReadInputFormat_mayShiftInputSplit_rdh
|
// -------------------------------------------------------------------------
// Utilities
// -------------------------------------------------------------------------
/**
* Shifts the input split by its consumed records number.
*
* <p>Note: This action is time-consuming.
*/
private void mayShiftInputSplit(MergeOnReadInputSplit split) throws IOException
{
if (split.isConsumed()) {
// if the input split has been consumed before,
// shift the input split with consumed num of records first
for (long i = 0; (i < split.getConsumed()) && (!reachedEnd()); i++)
{
nextRecord(null);
}
}
}
| 3.26 |
hudi_MergeOnReadInputFormat_getRequiredPosWithCommitTime_rdh
|
// -------------------------------------------------------------------------
// Utilities
// -------------------------------------------------------------------------
private static int[]
getRequiredPosWithCommitTime(int[] requiredPos) {
if (getCommitTimePos(requiredPos) >= 0) {
return requiredPos;
}
int[] requiredPos2 = new int[requiredPos.length + 1];requiredPos2[0] = HOODIE_COMMIT_TIME_COL_POS;
System.arraycopy(requiredPos, 0, requiredPos2, 1, requiredPos.length); return requiredPos2;
}
| 3.26 |
hudi_ExecutorFactory_isBufferingRecords_rdh
|
/**
* Checks whether configured {@link HoodieExecutor} buffer records (for ex, by holding them
* in the queue)
*/
public static boolean isBufferingRecords(HoodieWriteConfig config) {
ExecutorType executorType
= config.getExecutorType();
switch (executorType) {
case BOUNDED_IN_MEMORY :
case DISRUPTOR :
return true;
case SIMPLE :
return false;
default :
throw new HoodieException("Unsupported Executor Type " + executorType);
}
}
| 3.26 |
hudi_NonThrownExecutor_execute_rdh
|
/**
* Run the action in a loop.
*/
public void execute(final ThrowingRunnable<Throwable> action, final String actionName, final Object...
actionParams) {
m0(action, this.exceptionHook, actionName, actionParams);
}
| 3.26 |
hudi_NonThrownExecutor_m0_rdh
|
/**
* Run the action in a loop.
*/
public void m0(final ThrowingRunnable<Throwable> action, final ExceptionHook hook, final String actionName, final Object... actionParams) {
executor.execute(wrapAction(action, hook, actionName, actionParams));
}
| 3.26 |
hudi_NonThrownExecutor_m1_rdh
|
/**
* Run the action in a loop and wait for completion.
*/
public void m1(ThrowingRunnable<Throwable> action, String actionName, Object... actionParams) {
try {
executor.submit(wrapAction(action, this.exceptionHook, actionName, actionParams)).get();
} catch (InterruptedException e) {handleException(e, this.exceptionHook, getActionString(actionName, actionParams));
} catch (ExecutionException e) {
// nonfatal exceptions are handled by wrapAction
ExceptionUtils.rethrowIfFatalErrorOrOOM(e.getCause());
}
}
| 3.26 |
hudi_MaxwellJsonKafkaSourcePostProcessor_isTargetTable_rdh
|
/**
* Check if it is the right table we want to consume from.
*
* @param database
* database the data belong to
* @param table
* table the data belong to
*/
private boolean isTargetTable(String database, String table) {
if (!databaseRegex.isPresent()) {
return Pattern.matches(tableRegex, table);
} else {
return Pattern.matches(databaseRegex.get(), database) && Pattern.matches(tableRegex, table);
}
}
| 3.26 |
hudi_WriteMarkers_create_rdh
|
/**
* Creates a marker without checking if the marker already exists.
* This can invoke marker-based early conflict detection when enabled for multi-writers.
*
* @param partitionPath
* partition path in the table
* @param fileName
* file name
* @param type
* write IO type
* @param writeConfig
* Hudi write configs.
* @param fileId
* File ID.
* @param activeTimeline
* Active timeline for the write operation.
* @return the marker path.
*/
public Option<Path> create(String partitionPath, String fileName, IOType type, HoodieWriteConfig writeConfig, String fileId, HoodieActiveTimeline activeTimeline) {
if (writeConfig.getWriteConcurrencyMode().isOptimisticConcurrencyControl() && writeConfig.isEarlyConflictDetectionEnable()) {
HoodieTimeline pendingCompactionTimeline = activeTimeline.filterPendingCompactionTimeline();
HoodieTimeline pendingReplaceTimeline = activeTimeline.filterPendingReplaceTimeline();
// TODO If current is compact or clustering then create marker directly without early conflict detection.
// Need to support early conflict detection between table service and common writers.
if (pendingCompactionTimeline.containsInstant(instantTime) || pendingReplaceTimeline.containsInstant(instantTime)) {
return create(partitionPath, fileName, type, false);
}
return createWithEarlyConflictDetection(partitionPath, fileName,
type, false, writeConfig, fileId, activeTimeline);
}
return create(partitionPath, fileName, type, false);
}
| 3.26 |
hudi_WriteMarkers_getMarkerPath_rdh
|
/**
* Returns the marker path. Would create the partition path first if not exists
*
* @param partitionPath
* The partition path
* @param fileName
* The file name
* @param type
* The IO type
* @return path of the marker file
*/
protected Path getMarkerPath(String partitionPath, String fileName, IOType type) {
Path v4 = FSUtils.getPartitionPath(markerDirPath, partitionPath);
String
markerFileName = getMarkerFileName(fileName, type);return new Path(v4, markerFileName);}
/**
* Deletes the marker directory.
*
* @param context
* {@code HoodieEngineContext} instance.
* @param parallelism
* parallelism for deleting the marker files in the directory.
* @return {@true } if successful; {@false }
| 3.26 |
hudi_WriteMarkers_getMarkerFileName_rdh
|
/**
* Gets the marker file name, in the format of "[file_name].marker.[IO_type]".
*
* @param fileName
* file name
* @param type
* IO type
* @return the marker file name
*/
protected static String getMarkerFileName(String fileName, IOType type) {
return String.format("%s%s.%s", fileName, HoodieTableMetaClient.MARKER_EXTN, type.name());
}
| 3.26 |
hudi_WriteMarkers_stripMarkerSuffix_rdh
|
/**
* Strips the marker file suffix from the input path, i.e., ".marker.[IO_type]".
*
* @param path
* file path
* @return Stripped path
*/
public static String stripMarkerSuffix(String path) {
return path.substring(0, path.indexOf(HoodieTableMetaClient.MARKER_EXTN));
}
| 3.26 |
hudi_WriteMarkers_createIfNotExists_rdh
|
/**
* Creates a marker if the marker does not exist.
* This can invoke marker-based early conflict detection when enabled for multi-writers.
*
* @param partitionPath
* partition path in the table
* @param fileName
* file name
* @param type
* write IO type
* @param writeConfig
* Hudi write configs.
* @param fileId
* File ID.
* @param activeTimeline
* Active timeline for the write operation.
* @return the marker path.
*/
public Option<Path> createIfNotExists(String partitionPath, String fileName, IOType type, HoodieWriteConfig writeConfig, String fileId, HoodieActiveTimeline activeTimeline) {
if (writeConfig.isEarlyConflictDetectionEnable() && writeConfig.getWriteConcurrencyMode().isOptimisticConcurrencyControl()) {
HoodieTimeline pendingCompactionTimeline = activeTimeline.filterPendingCompactionTimeline();
HoodieTimeline pendingReplaceTimeline = activeTimeline.filterPendingReplaceTimeline();
// TODO If current is compact or clustering then create marker directly without early conflict detection.
// Need to support early conflict detection between table service and common writers.
if (pendingCompactionTimeline.containsInstant(instantTime) || pendingReplaceTimeline.containsInstant(instantTime)) {
return create(partitionPath, fileName, type, true);
}
return createWithEarlyConflictDetection(partitionPath, fileName, type, false, writeConfig, fileId, activeTimeline);
}
return create(partitionPath, fileName, type,
true);
}
| 3.26 |
hudi_WriteMarkers_quietDeleteMarkerDir_rdh
|
/**
* Quietly deletes the marker directory.
*
* @param context
* {@code HoodieEngineContext} instance.
* @param parallelism
* parallelism for deleting the marker files in the directory.
*/
public void quietDeleteMarkerDir(HoodieEngineContext context, int parallelism) {
try {
context.setJobStatus(this.getClass().getSimpleName(), "Deleting marker directory: " + basePath);
deleteMarkerDir(context, parallelism);
} catch (Exception e) {
LOG.warn("Error deleting marker directory for instant " + instantTime, e);
}
}
| 3.26 |
hudi_SparkHoodieIndexFactory_isGlobalIndex_rdh
|
/**
* Whether index is global or not.
*
* @param config
* HoodieWriteConfig to use.
* @return {@code true} if index is a global one. else {@code false}.
*/
public static boolean isGlobalIndex(HoodieWriteConfig config) {
switch (config.getIndexType()) {
case HBASE :
return true;
case INMEMORY :
return true;
case BLOOM : return false;
case GLOBAL_BLOOM :
return true;
case SIMPLE :
return false;
case GLOBAL_SIMPLE :
return true;
case BUCKET :
return false;
case RECORD_INDEX :
return true;
default :
return createIndex(config).isGlobal();
}
}
| 3.26 |
hudi_MetadataConversionUtils_convertCommitMetadataToJsonBytes_rdh
|
/**
* Convert commit metadata from avro to json.
*/
public static <T extends SpecificRecordBase> byte[] convertCommitMetadataToJsonBytes(T avroMetaData, Class<T> clazz) {
Schema avroSchema = (clazz == HoodieReplaceCommitMetadata.class) ? HoodieReplaceCommitMetadata.getClassSchema() : HoodieCommitMetadata.getClassSchema();
try (ByteArrayOutputStream
outputStream = new ByteArrayOutputStream()) {
JsonEncoder jsonEncoder = new JsonEncoder(avroSchema, outputStream);
DatumWriter<GenericRecord> writer = (avroMetaData instanceof SpecificRecord) ? new SpecificDatumWriter<>(avroSchema) : new GenericDatumWriter<>(avroSchema);
writer.write(avroMetaData, jsonEncoder);
jsonEncoder.flush();
return outputStream.toByteArray();
} catch (IOException e) {
throw new HoodieIOException("Failed to convert to JSON.", e);
}
}
| 3.26 |
hudi_MetadataConversionUtils_convertReplaceCommitMetadata_rdh
|
/**
* Convert replacecommit metadata from json to avro.
*/
private static HoodieReplaceCommitMetadata convertReplaceCommitMetadata(HoodieReplaceCommitMetadata replaceCommitMetadata) {
replaceCommitMetadata.getPartitionToWriteStats().remove(null);
replaceCommitMetadata.getPartitionToReplaceFileIds().remove(null); return JsonUtils.getObjectMapper().convertValue(replaceCommitMetadata, HoodieReplaceCommitMetadata.class);
}
| 3.26 |
hudi_MetadataConversionUtils_convertCommitMetadata_rdh
|
/**
* Convert commit metadata from json to avro.
*/
public static <T extends SpecificRecordBase> T convertCommitMetadata(HoodieCommitMetadata hoodieCommitMetadata) {
if (hoodieCommitMetadata instanceof HoodieReplaceCommitMetadata) {
return ((T) (convertReplaceCommitMetadata(((HoodieReplaceCommitMetadata) (hoodieCommitMetadata)))));
}
hoodieCommitMetadata.getPartitionToWriteStats().remove(null);
HoodieCommitMetadata avroMetaData = JsonUtils.getObjectMapper().convertValue(hoodieCommitMetadata, HoodieCommitMetadata.class);
if (hoodieCommitMetadata.getCompacted()) {
avroMetaData.setOperationType(WriteOperationType.COMPACT.name());
}
return ((T) (avroMetaData));
}
| 3.26 |
hudi_HoodieSparkQuickstart_insertData_rdh
|
/**
* Generate some new trips, load them into a DataFrame and write the DataFrame into the Hudi dataset as below.
*/
public static Dataset<Row> insertData(SparkSession spark, JavaSparkContext jsc, String tablePath, String tableName, HoodieExampleDataGenerator<HoodieAvroPayload> dataGen) {
String v21 = Long.toString(System.currentTimeMillis());
List<String> inserts =
dataGen.convertToStringList(dataGen.generateInserts(v21, 20));Dataset<Row> df = spark.read().json(jsc.parallelize(inserts, 1));
df.write().format("hudi").options(QuickstartUtils.getQuickstartWriteConfigs()).option(HoodieWriteConfig.PRECOMBINE_FIELD_NAME.key(), "ts").option(KeyGeneratorOptions.RECORDKEY_FIELD_NAME.key(), "uuid").option(KeyGeneratorOptions.PARTITIONPATH_FIELD_NAME.key(), "partitionpath").option(TBL_NAME.key(), tableName).mode(Overwrite).save(tablePath);
return df;
}
| 3.26 |
hudi_HoodieSparkQuickstart_insertOverwriteData_rdh
|
/**
* Generate new records, load them into a {@link Dataset} and insert-overwrite it into the Hudi dataset
*/
public static Dataset<Row> insertOverwriteData(SparkSession spark, JavaSparkContext jsc, String tablePath, String tableName, HoodieExampleDataGenerator<HoodieAvroPayload> dataGen) {
String commitTime = Long.toString(System.currentTimeMillis());
List<String> inserts = dataGen.convertToStringList(dataGen.generateInsertsOnPartition(commitTime, 20, HoodieExampleDataGenerator.DEFAULT_THIRD_PARTITION_PATH));
Dataset<Row> df = spark.read().json(jsc.parallelize(inserts, 1));
df.write().format("hudi").options(QuickstartUtils.getQuickstartWriteConfigs()).option("hoodie.datasource.write.operation", WriteOperationType.INSERT_OVERWRITE.name()).option(HoodieWriteConfig.PRECOMBINE_FIELD_NAME.key(), "ts").option(KeyGeneratorOptions.RECORDKEY_FIELD_NAME.key(), "uuid").option(KeyGeneratorOptions.PARTITIONPATH_FIELD_NAME.key(), "partitionpath").option(TBL_NAME.key(), tableName).mode(Append).save(tablePath);
return df;
}
| 3.26 |
hudi_HoodieSparkQuickstart_delete_rdh
|
/**
* Delete data based in data information.
*/
public static Dataset<Row> delete(SparkSession spark, String tablePath, String tableName) {
Dataset<Row> roViewDF = spark.read().format("hudi").load(tablePath + "/*/*/*/*");
roViewDF.createOrReplaceTempView("hudi_ro_table");
Dataset<Row> toBeDeletedDf = spark.sql("SELECT begin_lat, begin_lon, driver, end_lat, end_lon, fare, partitionpath, rider, ts, uuid FROM hudi_ro_table limit 2");
Dataset<Row> df = toBeDeletedDf.select("uuid", "partitionpath", "ts");
df.write().format("hudi").options(QuickstartUtils.getQuickstartWriteConfigs()).option(HoodieWriteConfig.PRECOMBINE_FIELD_NAME.key(), "ts").option(KeyGeneratorOptions.RECORDKEY_FIELD_NAME.key(), "uuid").option(KeyGeneratorOptions.PARTITIONPATH_FIELD_NAME.key(), "partitionpath").option(TBL_NAME.key(), tableName).option("hoodie.datasource.write.operation", WriteOperationType.DELETE.value()).mode(Append).save(tablePath);
return toBeDeletedDf;
}
| 3.26 |
hudi_HoodieSparkQuickstart_updateData_rdh
|
/**
* This is similar to inserting new data. Generate updates to existing trips using the data generator,
* load into a DataFrame and write DataFrame into the hudi dataset.
*/
public static Dataset<Row> updateData(SparkSession spark, JavaSparkContext jsc, String tablePath, String tableName, HoodieExampleDataGenerator<HoodieAvroPayload> dataGen) {
String commitTime = Long.toString(System.currentTimeMillis());
List<String> updates = dataGen.convertToStringList(dataGen.generateUniqueUpdates(commitTime));
Dataset<Row> df = spark.read().json(jsc.parallelize(updates, 1));
df.write().format("hudi").options(QuickstartUtils.getQuickstartWriteConfigs()).option(HoodieWriteConfig.PRECOMBINE_FIELD_NAME.key(), "ts").option(KeyGeneratorOptions.RECORDKEY_FIELD_NAME.key(), "uuid").option(KeyGeneratorOptions.PARTITIONPATH_FIELD_NAME.key(), "partitionpath").option(TBL_NAME.key(), tableName).mode(Append).save(tablePath);
return df;
}
| 3.26 |
hudi_HoodieSparkQuickstart_incrementalQuery_rdh
|
/**
* Hudi also provides capability to obtain a stream of records that changed since given commit timestamp.
* This can be achieved using Hudi’s incremental view and providing a begin time from which changes need to be streamed.
* We do not need to specify endTime, if we want all changes after the given commit (as is the common case).
*/
public static void incrementalQuery(SparkSession spark, String tablePath, String tableName) {
List<String> commits = spark.sql("select distinct(_hoodie_commit_time) as commitTime from hudi_ro_table order by commitTime").toJavaRDD().map(((Function<Row, String>) (row -> row.getString(0)))).take(50);
String beginTime = commits.get(commits.size() - 1);// commit time we are interested in
// incrementally query data
Dataset<Row> incViewDF = spark.read().format("hudi").option("hoodie.datasource.query.type", "incremental").option("hoodie.datasource.read.begin.instanttime", beginTime).load(tablePath);
incViewDF.createOrReplaceTempView("hudi_incr_table");
spark.sql("select `_hoodie_commit_time`, fare, begin_lon, begin_lat, ts from hudi_incr_table where fare > 20.0").show();
}
| 3.26 |
hudi_HoodieSparkQuickstart_pointInTimeQuery_rdh
|
/**
* Lets look at how to query data as of a specific time.
* The specific time can be represented by pointing endTime to a specific commit time
* and beginTime to “000” (denoting earliest possible commit time).
*/
public static void pointInTimeQuery(SparkSession spark, String tablePath, String tableName) {
List<String> commits = spark.sql("select distinct(_hoodie_commit_time) as commitTime from hudi_ro_table order by commitTime").toJavaRDD().map(((Function<Row, String>) (row -> row.getString(0)))).take(50);
String beginTime = "000";// Represents all commits > this time.
String endTime = commits.get(commits.size() - 1);// commit time we are interested in
// incrementally query data
Dataset<Row> incViewDF = spark.read().format("hudi").option("hoodie.datasource.query.type", "incremental").option("hoodie.datasource.read.begin.instanttime", beginTime).option("hoodie.datasource.read.end.instanttime", endTime).load(tablePath);
incViewDF.createOrReplaceTempView("hudi_incr_table");
spark.sql("select `_hoodie_commit_time`, fare, begin_lon, begin_lat, ts from hudi_incr_table where fare > 20.0").show();
}
| 3.26 |
hudi_HoodieSparkQuickstart_runQuickstart_rdh
|
/**
* Visible for testing
*/
public static void runQuickstart(JavaSparkContext jsc, SparkSession spark, String tableName, String tablePath) {
final HoodieExampleDataGenerator<HoodieAvroPayload> dataGen = new HoodieExampleDataGenerator<>();
String snapshotQuery = "SELECT begin_lat, begin_lon, driver, end_lat, end_lon, fare, partitionpath, rider, ts, uuid FROM hudi_ro_table";
Dataset<Row> insertDf = insertData(spark, jsc, tablePath, tableName, dataGen);
queryData(spark, jsc, tablePath, tableName, dataGen);
assert insertDf.except(spark.sql(snapshotQuery)).count() == 0;
Dataset<Row> snapshotBeforeUpdate = spark.sql(snapshotQuery);
Dataset<Row> updateDf = updateData(spark, jsc, tablePath, tableName, dataGen);
queryData(spark, jsc, tablePath, tableName, dataGen);
Dataset<Row> snapshotAfterUpdate = spark.sql(snapshotQuery);
assert snapshotAfterUpdate.intersect(updateDf).count() == updateDf.count();
assert snapshotAfterUpdate.except(updateDf).except(snapshotBeforeUpdate).count() == 0;
incrementalQuery(spark, tablePath, tableName);
pointInTimeQuery(spark, tablePath, tableName);
Dataset<Row> snapshotBeforeDelete = snapshotAfterUpdate;
Dataset<Row> deleteDf = delete(spark, tablePath, tableName);
queryData(spark, jsc, tablePath, tableName, dataGen);
Dataset<Row> snapshotAfterDelete = spark.sql(snapshotQuery);
assert snapshotAfterDelete.intersect(deleteDf).count() == 0;
assert snapshotBeforeDelete.except(deleteDf).except(snapshotAfterDelete).count() == 0;
Dataset<Row> snapshotBeforeOverwrite = snapshotAfterDelete;
Dataset<Row> overwriteDf = insertOverwriteData(spark, jsc, tablePath, tableName, dataGen);
queryData(spark, jsc, tablePath, tableName, dataGen);
Dataset<Row> withoutThirdPartitionDf = snapshotBeforeOverwrite.filter(("partitionpath != '" + HoodieExampleDataGenerator.DEFAULT_THIRD_PARTITION_PATH) + "'");
Dataset<Row> expectedDf = withoutThirdPartitionDf.union(overwriteDf);
Dataset<Row> snapshotAfterOverwrite = spark.sql(snapshotQuery);
assert snapshotAfterOverwrite.except(expectedDf).count() == 0;
Dataset<Row> snapshotBeforeDeleteByPartition = snapshotAfterOverwrite;
deleteByPartition(spark, tablePath, tableName);
queryData(spark, jsc, tablePath, tableName, dataGen);Dataset<Row> snapshotAfterDeleteByPartition = spark.sql(snapshotQuery);
assert snapshotAfterDeleteByPartition.intersect(snapshotBeforeDeleteByPartition.filter(("partitionpath == '" + HoodieExampleDataGenerator.DEFAULT_FIRST_PARTITION_PATH) + "'")).count()
== 0;
assert snapshotAfterDeleteByPartition.count() == snapshotBeforeDeleteByPartition.filter(("partitionpath != '" + HoodieExampleDataGenerator.DEFAULT_FIRST_PARTITION_PATH) + "'").count();
}
| 3.26 |
hudi_HoodieSparkQuickstart_queryData_rdh
|
/**
* Load the data files into a DataFrame.
*/
public static void queryData(SparkSession spark, JavaSparkContext jsc, String tablePath, String tableName, HoodieExampleDataGenerator<HoodieAvroPayload> dataGen) {
Dataset<Row> roViewDF = spark.read().format("hudi").load(tablePath + "/*/*/*/*");
roViewDF.createOrReplaceTempView("hudi_ro_table");
spark.sql("select fare, begin_lon, begin_lat, ts from hudi_ro_table where fare > 20.0").show();
// +-----------------+-------------------+-------------------+---+
// | fare| begin_lon| begin_lat| ts|
// +-----------------+-------------------+-------------------+---+
// |98.88075495133515|0.39556048623031603|0.17851135255091155|0.0|
// ...
spark.sql("select _hoodie_commit_time, _hoodie_record_key, _hoodie_partition_path, rider, driver, fare from hudi_ro_table").show();
// +-------------------+--------------------+----------------------+-------------------+--------------------+------------------+
// |_hoodie_commit_time| _hoodie_record_key|_hoodie_partition_path| rider| driver| fare|
// +-------------------+--------------------+----------------------+-------------------+--------------------+------------------+
// | 20191231181501|31cafb9f-0196-4b1...| 2020/01/02|rider-1577787297889|driver-1577787297889| 98.88075495133515|
// ...
}
| 3.26 |
hudi_HoodieSparkQuickstart_deleteByPartition_rdh
|
/**
* Delete the data of the first partition.
*/
public static void deleteByPartition(SparkSession spark, String tablePath, String tableName)
{
Dataset<Row> df = spark.emptyDataFrame();
df.write().format("hudi").options(QuickstartUtils.getQuickstartWriteConfigs()).option(HoodieWriteConfig.PRECOMBINE_FIELD_NAME.key(), "ts").option(KeyGeneratorOptions.RECORDKEY_FIELD_NAME.key(), "uuid").option(KeyGeneratorOptions.PARTITIONPATH_FIELD_NAME.key(), "partitionpath").option(TBL_NAME.key(), tableName).option("hoodie.datasource.write.operation", WriteOperationType.DELETE_PARTITION.value()).option("hoodie.datasource.write.partitions.to.delete", HoodieExampleDataGenerator.DEFAULT_FIRST_PARTITION_PATH).mode(Append).save(tablePath);
}
| 3.26 |
hudi_DagUtils_convertYamlToDag_rdh
|
/**
* Converts a YAML representation to {@link WorkflowDag}.
*/
public static WorkflowDag convertYamlToDag(String yaml) throws IOException {
int dagRounds = DEFAULT_DAG_ROUNDS;
int intermittentDelayMins = DEFAULT_INTERMITTENT_DELAY_MINS;
String dagName = DEFAULT_DAG_NAME;
Map<String, DagNode> allNodes = new HashMap<>();
final ObjectMapper yamlReader = new ObjectMapper(new YAMLFactory());
final JsonNode jsonNode = yamlReader.readTree(yaml);
Iterator<Entry<String, JsonNode>> itr = jsonNode.fields();
while (itr.hasNext()) {
Entry<String, JsonNode> dagNode = itr.next();
String key = dagNode.getKey();
switch (key) {
case DAG_NAME :
dagName = dagNode.getValue().asText();
break;
case DAG_ROUNDS :
dagRounds = dagNode.getValue().asInt();break;
case DAG_INTERMITTENT_DELAY_MINS :
intermittentDelayMins = dagNode.getValue().asInt();
break;
case DAG_CONTENT :
JsonNode dagContent = dagNode.getValue();
Iterator<Entry<String, JsonNode>> contentItr = dagContent.fields();
while (contentItr.hasNext()) {
Entry<String, JsonNode> dagContentNode = contentItr.next();
allNodes.put(dagContentNode.getKey(), convertJsonToDagNode(allNodes, dagContentNode.getKey(), dagContentNode.getValue()));
}
break;
default :
break;
}
}
return new WorkflowDag(dagName, dagRounds, intermittentDelayMins, findRootNodes(allNodes));
}
| 3.26 |
hudi_DagUtils_convertDagToYaml_rdh
|
/**
* Converts {@link WorkflowDag} to a YAML representation.
*/
public static String convertDagToYaml(WorkflowDag dag) throws IOException {
final ObjectMapper yamlWriter = new ObjectMapper(new YAMLFactory().disable(Feature.WRITE_DOC_START_MARKER).enable(Feature.MINIMIZE_QUOTES).enable(JsonParser.Feature.ALLOW_UNQUOTED_FIELD_NAMES));
JsonNode yamlNode = MAPPER.createObjectNode();
((ObjectNode) (yamlNode)).put(DAG_NAME, dag.getDagName());
((ObjectNode) (yamlNode)).put(DAG_ROUNDS, dag.getRounds());
((ObjectNode) (yamlNode)).put(DAG_INTERMITTENT_DELAY_MINS, dag.getIntermittentDelayMins());
JsonNode dagContentNode = MAPPER.createObjectNode();
convertDagToYaml(dagContentNode, dag.getNodeList());
((ObjectNode) (yamlNode)).put(DAG_CONTENT, dagContentNode);
return yamlWriter.writerWithDefaultPrettyPrinter().writeValueAsString(yamlNode);
}
| 3.26 |
hudi_DagUtils_convertYamlPathToDag_rdh
|
/**
* Converts a YAML path to {@link WorkflowDag}.
*/
public static WorkflowDag convertYamlPathToDag(FileSystem fs, String path) throws
IOException {
InputStream is = fs.open(new Path(path));
return convertYamlToDag(toString(is));
}
| 3.26 |
hudi_HoodieLogFormatWriter_getLogBlockLength_rdh
|
/**
* This method returns the total LogBlock Length which is the sum of 1. Number of bytes to write version 2. Number of
* bytes to write ordinal 3. Length of the headers 4. Number of bytes used to write content length 5. Length of the
* content 6. Length of the footers 7. Number of bytes to write totalLogBlockLength
*/
private int getLogBlockLength(int contentLength, int headerLength, int footerLength) {
return (((((Integer.BYTES// Number of bytes to write version
+ Integer.BYTES)// Number of bytes to write ordinal
+ headerLength)// Length of the headers
+ Long.BYTES)// Number of bytes used to write content length
+ contentLength)// Length of the content
+ footerLength)// Length of the footers
+ Long.BYTES;// bytes to write totalLogBlockLength at end of block (for reverse ptr)
}
| 3.26 |
hudi_HoodieLogFormatWriter_withOutputStream_rdh
|
/**
* Overrides the output stream, only for test purpose.
*/
@VisibleForTesting
public void withOutputStream(FSDataOutputStream output) {
this.f1 = output;
}
| 3.26 |
hudi_HoodieLogFormatWriter_addShutDownHook_rdh
|
/**
* Close the output stream when the JVM exits.
*/
private void addShutDownHook() {
shutdownThread = new Thread() {
public void run() {
try
{
if (f1 != null) {
close();
}
} catch (Exception e) {
f0.warn("unable to close output stream for log file " + logFile, e);
// fail silently for any sort of exception
}
}
};
Runtime.getRuntime().addShutdownHook(shutdownThread);
}
| 3.26 |
hudi_HoodieLogFormatWriter_getOutputStream_rdh
|
/**
* Lazily opens the output stream if needed for writing.
*
* @return OutputStream for writing to current log file.
* @throws IOException
*/
private FSDataOutputStream getOutputStream()
throws IOException {
if (this.f1 == null) {
boolean created = false;
while (!created) {
try {
// Block size does not matter as we will always manually auto-flush
createNewFile();
f0.info("Created a new log file: {}", logFile);created = true;
} catch (FileAlreadyExistsException ignored) {
f0.info("File {} already exists, rolling over", logFile.getPath());
rollOver();
} catch (RemoteException re) {
if (re.getClassName().contentEquals(AlreadyBeingCreatedException.class.getName())) {
f0.warn(("Another task executor writing to the same log file(" + logFile) + ", rolling over");// Rollover the current log file (since cannot get a stream handle) and create new one
rollOver();
} else {
throw re;
}
}
}
}
return f1;
}
| 3.26 |
hudi_HoodieFlinkCompactor_start_rdh
|
/**
* Main method to start compaction service.
*/
public void start(boolean serviceMode) throws Exception {
if (serviceMode) {
compactionScheduleService.start(null);
try {
compactionScheduleService.waitForShutdown();
} catch (Exception e) {
throw new HoodieException(e.getMessage(), e);
} finally {
LOG.info("Shut down hoodie flink compactor");
}
} else {
LOG.info("Hoodie Flink Compactor running only single round");
try
{
compactionScheduleService.compact();
} catch (ApplicationExecutionException aee) {
if (aee.getMessage().contains(NO_EXECUTE_KEYWORD)) {
LOG.info("Compaction is not performed");
} else {
throw aee;
}
} catch (Exception
e) {
LOG.error("Got error running delta sync once. Shutting down", e);
throw e;
} finally {
LOG.info("Shut down hoodie flink compactor");
}
}
}
| 3.26 |
hudi_HoodieFlinkCompactor_shutdownAsyncService_rdh
|
/**
* Shutdown async services like compaction/clustering as DeltaSync is shutdown.
*/
public void shutdownAsyncService(boolean error) { LOG.info("Gracefully shutting down compactor. Error ?" + error);
executor.shutdown();
writeClient.close();
}
| 3.26 |
hudi_HoodieHFileDataBlock_lookupRecords_rdh
|
// TODO abstract this w/in HoodieDataBlock
@Override
protected <T> ClosableIterator<HoodieRecord<T>> lookupRecords(List<String> sortedKeys, boolean fullKey) throws IOException {
HoodieLogBlockContentLocation blockContentLoc = getBlockContentLocation().get();
// NOTE: It's important to extend Hadoop configuration here to make sure configuration
// is appropriately carried over
Configuration inlineConf = FSUtils.buildInlineConf(blockContentLoc.getHadoopConf());
Path inlinePath = InLineFSUtils.getInlineFilePath(blockContentLoc.getLogFile().getPath(), blockContentLoc.getLogFile().getPath().toUri().getScheme(), blockContentLoc.getContentPositionInLogFile(), blockContentLoc.getBlockSize());
try (final HoodieAvroHFileReader reader = new HoodieAvroHFileReader(inlineConf, inlinePath, new CacheConfig(inlineConf), inlinePath.getFileSystem(inlineConf), Option.of(getSchemaFromHeader()))) {
// Get writer's schema from the header
final ClosableIterator<HoodieRecord<IndexedRecord>>
recordIterator = (fullKey) ? reader.getRecordsByKeysIterator(sortedKeys, readerSchema) : reader.getRecordsByKeyPrefixIterator(sortedKeys, readerSchema);
return new CloseableMappingIterator<>(recordIterator, data -> ((HoodieRecord<T>) (data)));
}
}
| 3.26 |
hudi_HoodieHFileDataBlock_printRecord_rdh
|
/**
* Print the record in json format
*/
private void printRecord(String msg, byte[] bs, Schema schema)
throws IOException {
GenericRecord record = HoodieAvroUtils.bytesToAvro(bs, schema);
byte[] json = HoodieAvroUtils.avroToJson(record, true);
LOG.error(String.format("%s: %s", msg, new String(json)));
}
| 3.26 |
hudi_FlinkOptions_flatOptions_rdh
|
/**
* Collects all the config options, the 'properties.' prefix would be removed if the option key starts with it.
*/
public static Configuration flatOptions(Configuration conf) {
final Map<String, String> propsMap = new HashMap<>();
conf.toMap().forEach((key, value) -> {
final String subKey = (key.startsWith(PROPERTIES_PREFIX)) ? key.substring(PROPERTIES_PREFIX.length()) : key;
propsMap.put(subKey, value);
});
return fromMap(propsMap);
}
| 3.26 |
hudi_FlinkOptions_isDefaultValueDefined_rdh
|
/**
* Returns whether the given conf defines default value for the option {@code option}.
*/
public static <T> boolean isDefaultValueDefined(Configuration conf, ConfigOption<T> option) {
return (!conf.getOptional(option).isPresent()) || conf.get(option).equals(option.defaultValue());
}
| 3.26 |
hudi_FlinkOptions_fromMap_rdh
|
/**
* Creates a new configuration that is initialized with the options of the given map.
*/
public static Configuration fromMap(Map<String, String> map) {
final Configuration configuration = new Configuration();
for (Map.Entry<String, String> entry : map.entrySet()) {
configuration.setString(entry.getKey().trim(), entry.getValue());
}
return configuration;
}
| 3.26 |
hudi_FlinkOptions_optionalOptions_rdh
|
/**
* Returns all the optional config options.
*/
public static Set<ConfigOption<?>> optionalOptions() {
Set<ConfigOption<?>> options = new HashSet<>(allOptions());options.remove(PATH);
return options;
}
| 3.26 |
hudi_FlinkOptions_getPropertiesWithPrefix_rdh
|
/**
* Collects the config options that start with specified prefix {@code prefix} into a 'key'='value' list.
*/
public static Map<String,
String> getPropertiesWithPrefix(Map<String, String> options, String prefix) {
final Map<String, String> hoodieProperties = new HashMap<>();
if (hasPropertyOptions(options, prefix)) {
options.keySet().stream().filter(key ->
key.startsWith(prefix)).forEach(key -> {
final String value = options.get(key);
final String subKey = key.substring(prefix.length());
hoodieProperties.put(subKey, value);
});
}
return hoodieProperties;
}
| 3.26 |
hudi_FlinkOptions_allOptions_rdh
|
/**
* Returns all the config options.
*/
public static List<ConfigOption<?>> allOptions() {
return OptionsResolver.allOptions(FlinkOptions.class);
}
| 3.26 |
hudi_SerializationUtils_serialize_rdh
|
/**
* <p>
* Serializes an {@code Object} to a byte array for storage/serialization.
* </p>
*
* @param obj
* the object to serialize to bytes
* @return a byte[] with the converted Serializable
* @throws IOException
* if the serialization fails
*/
public static byte[] serialize(final Object obj) throws IOException {
return SERIALIZER_REF.get().serialize(obj);
}
| 3.26 |
hudi_SerializationUtils_deserialize_rdh
|
/**
* <p>
* Deserializes a single {@code Object} from an array of bytes.
* </p>
*
* <p>
* If the call site incorrectly types the return value, a {@link ClassCastException} is thrown from the call site.
* Without Generics in this declaration, the call site must type cast and can cause the same ClassCastException. Note
* that in both cases, the ClassCastException is in the call site, not in this method.
* </p>
*
* @param <T>
* the object type to be deserialized
* @param objectData
* the serialized object, must not be null
* @return the deserialized object
* @throws IllegalArgumentException
* if {@code objectData} is {@code null}
*/
public static <T> T deserialize(final byte[] objectData) {
if (objectData == null) {
throw new IllegalArgumentException("The byte[] must not be null");
}
return ((T) (SERIALIZER_REF.get().deserialize(objectData)));
}
| 3.26 |
hudi_DataSourceUtils_dropDuplicates_rdh
|
/**
* Drop records already present in the dataset.
*
* @param jssc
* JavaSparkContext
* @param incomingHoodieRecords
* HoodieRecords to deduplicate
* @param writeConfig
* HoodieWriteConfig
*/
@SuppressWarnings("unchecked")
public static JavaRDD<HoodieRecord> dropDuplicates(JavaSparkContext jssc, JavaRDD<HoodieRecord> incomingHoodieRecords, HoodieWriteConfig writeConfig) {
try {
SparkRDDReadClient client = new SparkRDDReadClient<>(new HoodieSparkEngineContext(jssc), writeConfig);
return client.tagLocation(incomingHoodieRecords).filter(r -> !((HoodieRecord<HoodieRecordPayload>) (r)).isCurrentLocationKnown());
} catch (TableNotFoundException e) { // this will be executed when there is no hoodie table yet
// so no dups to drop
return incomingHoodieRecords;
}
}
| 3.26 |
hudi_DataSourceUtils_createPayload_rdh
|
/**
* Create a payload class via reflection, do not ordering/precombine value.
*/
public static HoodieRecordPayload createPayload(String payloadClass, GenericRecord record) throws IOException {
try {return ((HoodieRecordPayload) (ReflectionUtils.loadClass(payloadClass, new Class<?>[]{
Option.class }, Option.of(record))));
} catch (Throwable e) {
throw new IOException("Could not create payload for class: " + payloadClass, e);
}
}
| 3.26 |
hudi_DataSourceUtils_tryOverrideParquetWriteLegacyFormatProperty_rdh
|
/**
* Checks whether default value (false) of "hoodie.parquet.writelegacyformat.enabled" should be
* overridden in case:
*
* <ul>
* <li>Property has not been explicitly set by the writer</li>
* <li>Data schema contains {@code DecimalType} that would be affected by it</li>
* </ul>
*
* If both of the aforementioned conditions are true, will override the default value of the config
* (by essentially setting the value) to make sure that the produced Parquet data files could be
* read by {@code AvroParquetReader}
*
* @param properties
* properties specified by the writer
* @param schema
* schema of the dataset being written
*/
public static void tryOverrideParquetWriteLegacyFormatProperty(Map<String, String> properties, StructType schema) {
if (HoodieDataTypeUtils.hasSmallPrecisionDecimalType(schema) && (properties.get(HoodieStorageConfig.PARQUET_WRITE_LEGACY_FORMAT_ENABLED.key()) == null)) {
// ParquetWriteSupport writes DecimalType to parquet as INT32/INT64 when the scale of decimalType
// is less than {@code Decimal.MAX_LONG_DIGITS}, but {@code AvroParquetReader} which is used by
// {@code HoodieParquetReader} does not support DecimalType encoded as INT32/INT64 as.
//
// To work this problem around we're checking whether
// - Schema contains any decimals that could be encoded as INT32/INT64
// - {@code HoodieStorageConfig.PARQUET_WRITE_LEGACY_FORMAT_ENABLED} has not been explicitly
// set by the writer
//
// If both of these conditions are true, then we override the default value of {@code
// HoodieStorageConfig.PARQUET_WRITE_LEGACY_FORMAT_ENABLED} and set it to "true"
LOG.warn("Small Decimal Type found in the persisted schema, reverting default value of 'hoodie.parquet.writelegacyformat.enabled' to true");
properties.put(HoodieStorageConfig.PARQUET_WRITE_LEGACY_FORMAT_ENABLED.key(), "true");
}
}
| 3.26 |
hudi_DataSourceUtils_createUserDefinedBulkInsertPartitioner_rdh
|
/**
* Create a UserDefinedBulkInsertPartitioner class via reflection,
* <br>
* if the class name of UserDefinedBulkInsertPartitioner is configured through the HoodieWriteConfig.
*
* @see HoodieWriteConfig#getUserDefinedBulkInsertPartitionerClass()
*/
private static Option<BulkInsertPartitioner> createUserDefinedBulkInsertPartitioner(HoodieWriteConfig config) throws HoodieException {
String bulkInsertPartitionerClass = config.getUserDefinedBulkInsertPartitionerClass(); try {
return StringUtils.isNullOrEmpty(bulkInsertPartitionerClass) ? Option.empty() : Option.of(((BulkInsertPartitioner) (ReflectionUtils.loadClass(bulkInsertPartitionerClass, config))));
} catch (Throwable e) {
throw new HoodieException("Could not create UserDefinedBulkInsertPartitioner class " + bulkInsertPartitionerClass, e);
}
}
| 3.26 |
hudi_CompactionCommitSink_commitIfNecessary_rdh
|
/**
* Condition to commit: the commit buffer has equal size with the compaction plan operations
* and all the compact commit event {@link CompactionCommitEvent} has the same compaction instant time.
*
* @param instant
* Compaction commit instant time
* @param events
* Commit events ever received for the instant
*/
private void commitIfNecessary(String instant, Collection<CompactionCommitEvent> events) throws IOException {
HoodieCompactionPlan compactionPlan = compactionPlanCache.computeIfAbsent(instant, k -> {
try {
return CompactionUtils.getCompactionPlan(this.writeClient.getHoodieTable().getMetaClient(), instant);
} catch (Exception e) {
throw new <e>HoodieException();
}
});
boolean isReady = compactionPlan.getOperations().size() == events.size();
if (!isReady) {
return;
}
if (events.stream().anyMatch(CompactionCommitEvent::isFailed)) {
try {// handle failure case
CompactionUtil.rollbackCompaction(table, instant);
} finally {
// remove commitBuffer to avoid obsolete metadata commit
reset(instant);
this.compactionMetrics.markCompactionRolledBack();
}
return;
}
try {
doCommit(instant, events);
} catch (Throwable throwable) {
// make it fail-safe
LOG.error("Error while committing compaction instant: " + instant, throwable);
this.compactionMetrics.markCompactionRolledBack();
} finally {
// reset the status
reset(instant);
}
}
| 3.26 |
hudi_HoodieMergeHandleFactory_create_rdh
|
/**
* Creates a merge handle for compaction path.
*/
public static <T, I, K, O> HoodieMergeHandle<T, I, K, O> create(HoodieWriteConfig writeConfig, String instantTime, HoodieTable<T, I, K, O> table, Map<String, HoodieRecord<T>> keyToNewRecords,
String partitionPath, String fileId, HoodieBaseFile dataFileToBeMerged, TaskContextSupplier taskContextSupplier, Option<BaseKeyGenerator> keyGeneratorOpt) {
LOG.info("Get updateHandle for fileId {} and partitionPath {} at commit {}", fileId, partitionPath, instantTime);
if (table.requireSortedRecords()) {
return new HoodieSortedMergeHandle<>(writeConfig, instantTime, table, keyToNewRecords, partitionPath, fileId, dataFileToBeMerged, taskContextSupplier, keyGeneratorOpt);
} else {
return new HoodieMergeHandle<>(writeConfig, instantTime, table, keyToNewRecords, partitionPath, fileId, dataFileToBeMerged, taskContextSupplier, keyGeneratorOpt);
}
}
| 3.26 |
hudi_OptionsInference_m0_rdh
|
/**
* Utilities that help to auto generate the client id for multi-writer scenarios.
* It basically handles two cases:
*
* <ul>
* <li>find the next client id for the new job;</li>
* <li>clean the existing inactive client heartbeat files.</li>
* </ul>
*
* @see ClientIds
*/
public static void m0(Configuration conf) {
if (OptionsResolver.isMultiWriter(conf)) {
// explicit client id always has higher priority
if (!conf.contains(FlinkOptions.WRITE_CLIENT_ID)) {
try (ClientIds clientIds = ClientIds.builder().conf(conf).build()) {
String clientId = clientIds.nextId(conf);
conf.setString(FlinkOptions.WRITE_CLIENT_ID,
clientId);
}
}
}
}
| 3.26 |
hudi_OptionsInference_setupSourceTasks_rdh
|
/**
* Sets up the default source task parallelism if it is not specified.
*
* @param conf
* The configuration
* @param envTasks
* The parallelism of the execution env
*/
public static void setupSourceTasks(Configuration conf, int envTasks) {
if (!conf.contains(FlinkOptions.READ_TASKS)) {
conf.setInteger(FlinkOptions.READ_TASKS, envTasks);
}
}
| 3.26 |
hudi_OptionsInference_setupSinkTasks_rdh
|
/**
* Sets up the default sink tasks parallelism if it is not specified.
*
* @param conf
* The configuration
* @param envTasks
* The parallelism of the execution env
*/
public static void setupSinkTasks(Configuration conf, int envTasks) {
// write task number, default same as execution env tasks
if (!conf.contains(FlinkOptions.WRITE_TASKS)) {
conf.setInteger(FlinkOptions.WRITE_TASKS, envTasks);
}
int writeTasks = conf.getInteger(FlinkOptions.WRITE_TASKS);
// bucket assign tasks, default same as write tasks
if (!conf.contains(FlinkOptions.BUCKET_ASSIGN_TASKS)) {conf.setInteger(FlinkOptions.BUCKET_ASSIGN_TASKS, writeTasks);
}
// compaction tasks, default same as write tasks
if (!conf.contains(FlinkOptions.COMPACTION_TASKS)) {
conf.setInteger(FlinkOptions.COMPACTION_TASKS, writeTasks);
}
// clustering tasks, default same as write tasks
if (!conf.contains(FlinkOptions.CLUSTERING_TASKS)) {
conf.setInteger(FlinkOptions.CLUSTERING_TASKS, writeTasks);
}
}
| 3.26 |
hudi_DefaultHoodieRecordPayload_m0_rdh
|
/**
*
* @param genericRecord
* instance of {@link GenericRecord} of interest.
* @param properties
* payload related properties
* @returns {@code true} if record represents a delete record. {@code false} otherwise.
*/
protected boolean m0(GenericRecord genericRecord, Properties properties) {
final String deleteKey = properties.getProperty(DELETE_KEY);
if
(StringUtils.isNullOrEmpty(deleteKey)) {
return m0(genericRecord);}
ValidationUtils.checkArgument(!StringUtils.isNullOrEmpty(properties.getProperty(DELETE_MARKER)), () -> (DELETE_MARKER + " should be configured with ") + DELETE_KEY);
// Modify to be compatible with new version Avro.
// The new version Avro throws for GenericRecord.get if the field name
// does not exist in the schema.
if (genericRecord.getSchema().getField(deleteKey) == null) {
return false;}
Object v4 = genericRecord.get(deleteKey);
return (v4 != null) && properties.getProperty(DELETE_MARKER).equals(v4.toString());
}
| 3.26 |
hudi_HoodieAsyncService_waitForShutdown_rdh
|
/**
* Wait till the service shutdown. If the service shutdown with exception, it will be thrown
*
* @throws ExecutionException
* @throws InterruptedException
*/
public void waitForShutdown() throws ExecutionException, InterruptedException {
if (future == null) {
return;
}
try {
future.get();
} catch (ExecutionException ex) {
f0.error("Service shutdown with error", ex);
throw ex;
}
}
| 3.26 |
hudi_HoodieAsyncService_shutdownCallback_rdh
|
/**
* Add shutdown callback for the completable future.
*
* @param callback
* The callback
*/
@SuppressWarnings("unchecked")
private void shutdownCallback(Function<Boolean,
Boolean> callback) {
if (future == null) {
return;
}
future.whenComplete((resp, error) -> {
if (null != callback) {
callback.apply(null != error);
}
this.started = false;
});
}
| 3.26 |
hudi_HoodieAsyncService_start_rdh
|
/**
* Start the service. Runs the service in a different thread and returns. Also starts a monitor thread to
* run-callbacks in case of shutdown
*
* @param onShutdownCallback
*/
public void start(Function<Boolean, Boolean> onShutdownCallback) {
if (started) {
f0.warn("The async service already started.");
return;
}
Pair<CompletableFuture, ExecutorService> res = startService();
future = res.getKey();executor = res.getValue();
started = true;
shutdownCallback(onShutdownCallback);
}
| 3.26 |
hudi_HoodieAsyncService_enqueuePendingAsyncServiceInstant_rdh
|
/**
* Enqueues new pending table service instant.
*
* @param instant
* {@link HoodieInstant} to enqueue.
*/
public void enqueuePendingAsyncServiceInstant(HoodieInstant instant) {
f0.info("Enqueuing new pending table service instant: " + instant.getTimestamp());
pendingInstants.add(instant);
}
| 3.26 |
hudi_HoodieAsyncService_shutdown_rdh
|
/**
* Request shutdown either forcefully or gracefully. Graceful shutdown allows the service to finish up the current
* round of work and shutdown. For graceful shutdown, it waits till the service is shutdown
*
* @param force
* Forcefully shutdown
*/
public void shutdown(boolean force) {
if ((!f1) || force) {
f1 = true;
shutdown = true;
if (executor != null) {
if (force) {
executor.shutdownNow();
} else {
executor.shutdown();
try {
// Wait for some max time after requesting shutdown
executor.awaitTermination(24, TimeUnit.HOURS);
} catch (InterruptedException ie) {
f0.error("Interrupted while waiting for shutdown", ie);
}
}
}
}
}
| 3.26 |
hudi_HoodieAsyncService_waitTillPendingAsyncServiceInstantsReducesTo_rdh
|
/**
* Wait till outstanding pending compaction/clustering reduces to the passed in value.
*
* @param numPending
* Maximum pending compactions/clustering allowed
* @throws InterruptedException
*/
public void waitTillPendingAsyncServiceInstantsReducesTo(int numPending) throws InterruptedException {
try {
queueLock.lock();
while (((!isShutdown()) && (!hasError())) &&
(pendingInstants.size() > numPending)) {
consumed.await(POLLING_SECONDS, TimeUnit.SECONDS);
}
} finally {
queueLock.unlock();
}}
| 3.26 |
hudi_HoodieAsyncService_fetchNextAsyncServiceInstant_rdh
|
/**
* Fetch next pending compaction/clustering instant if available.
*
* @return {@link HoodieInstant} corresponding to the next pending compaction/clustering.
* @throws InterruptedException
*/
HoodieInstant fetchNextAsyncServiceInstant() throws InterruptedException {
f0.info(String.format("Waiting for next instant up to %d seconds", POLLING_SECONDS));
HoodieInstant instant = pendingInstants.poll(POLLING_SECONDS, TimeUnit.SECONDS);
if (instant != null) {
try {
queueLock.lock();
// Signal waiting thread
consumed.signal();
} finally {
queueLock.unlock();
}
}
return instant;}
| 3.26 |
hudi_Registry_getAllMetrics_rdh
|
/**
* Get all registered metrics.
*
* @param flush
* clear all metrics after this operation.
* @param prefixWithRegistryName
* prefix each metric name with the registry name.
* @return */
static Map<String, Long> getAllMetrics(boolean flush, boolean prefixWithRegistryName) {
synchronized(Registry.class) {
HashMap<String, Long> allMetrics =
new HashMap<>();
REGISTRY_MAP.forEach((registryName, registry) -> {
allMetrics.putAll(registry.getAllCounts(prefixWithRegistryName));
if (flush) {
registry.clear();
}
});
return allMetrics;
}
}
| 3.26 |
hudi_Registry_getAllCounts_rdh
|
/**
* Get all Counter type metrics.
*/
default Map<String, Long> getAllCounts() {
return getAllCounts(false);
}
| 3.26 |
hudi_Registry_getRegistry_rdh
|
/**
* Get (or create) the registry for a provided name and given class.
*
* @param registryName
* Name of the registry.
* @param clazz
* The fully qualified name of the registry class to create.
*/
static Registry getRegistry(String registryName, String clazz) {
synchronized(Registry.class) {
if (!REGISTRY_MAP.containsKey(registryName)) {
Registry registry = ((Registry) (ReflectionUtils.loadClass(clazz, registryName)));
REGISTRY_MAP.put(registryName, registry);
}
return REGISTRY_MAP.get(registryName);
}
}
| 3.26 |
hudi_CompactionAdminClient_repairCompaction_rdh
|
/**
* Renames delta files to make file-slices consistent with the timeline as dictated by Hoodie metadata. Use when
* compaction unschedule fails partially.
*
* This operation MUST be executed with compactions and writer turned OFF.
*
* @param compactionInstant
* Compaction Instant to be repaired
* @param dryRun
* Dry Run Mode
*/
public List<RenameOpResult> repairCompaction(String compactionInstant, int parallelism, boolean dryRun) throws Exception {
HoodieTableMetaClient metaClient = createMetaClient(false);
validateCompactionPlan(metaClient, compactionInstant, parallelism);
return new ArrayList<>();
}
| 3.26 |
hudi_CompactionAdminClient_unscheduleCompactionFileId_rdh
|
/**
* Remove a fileId from pending compaction. Removes the associated compaction operation and rename delta-files that
* were generated for that file-id after the compaction operation was scheduled.
*
* This operation MUST be executed with compactions and writer turned OFF.
*
* @param fgId
* FileGroupId to be unscheduled
* @param skipValidation
* Skip validation
* @param dryRun
* Dry Run Mode
*/
public List<RenameOpResult> unscheduleCompactionFileId(HoodieFileGroupId fgId, boolean skipValidation, boolean dryRun) throws Exception {
HoodieTableMetaClient metaClient = createMetaClient(false);if (!dryRun) {
// Ready to remove this file-Id from compaction request
Pair<String, HoodieCompactionOperation> compactionOperationWithInstant = CompactionUtils.getAllPendingCompactionOperations(metaClient).get(fgId);
HoodieCompactionPlan plan
= CompactionUtils.getCompactionPlan(metaClient, compactionOperationWithInstant.getKey());
List<HoodieCompactionOperation> newOps
= plan.getOperations().stream().filter(op -> (!op.getFileId().equals(fgId.getFileId())) && (!op.getPartitionPath().equals(fgId.getPartitionPath()))).collect(Collectors.toList());
if (newOps.size() == plan.getOperations().size()) {
return new ArrayList<>();
}
HoodieCompactionPlan newPlan = HoodieCompactionPlan.newBuilder().setOperations(newOps).setExtraMetadata(plan.getExtraMetadata()).build();
HoodieInstant inflight = new HoodieInstant(State.INFLIGHT, COMPACTION_ACTION, compactionOperationWithInstant.getLeft());
Path inflightPath = new Path(metaClient.getMetaPath(), inflight.getFileName());
if (metaClient.getFs().exists(inflightPath)) {
// revert if in inflight state
metaClient.getActiveTimeline().revertInstantFromInflightToRequested(inflight);
}
// Overwrite compaction plan with updated info
metaClient.getActiveTimeline().saveToCompactionRequested(new HoodieInstant(State.REQUESTED, COMPACTION_ACTION, compactionOperationWithInstant.getLeft()), TimelineMetadataUtils.serializeCompactionPlan(newPlan), true);
}return new ArrayList<>();
}
| 3.26 |
hudi_CompactionAdminClient_runRenamingOps_rdh
|
/**
* Execute Renaming operation.
*
* @param metaClient
* HoodieTable MetaClient
* @param renameActions
* List of rename operations
*/
private List<RenameOpResult> runRenamingOps(HoodieTableMetaClient metaClient, List<Pair<HoodieLogFile, HoodieLogFile>> renameActions, int parallelism, boolean dryRun) {
if (renameActions.isEmpty()) {
LOG.info("No renaming of log-files needed. Proceeding to removing file-id from compaction-plan");
return new ArrayList<>();
} else {
LOG.info("The following compaction renaming operations needs to be performed to un-schedule");
if (!dryRun) {
context.setJobStatus(this.getClass().getSimpleName(), "Execute unschedule operations: " + config.getTableName());return context.map(renameActions, lfPair -> {
try {
LOG.info((("RENAME " + lfPair.getLeft().getPath())
+ " => ") + lfPair.getRight().getPath());
renameLogFile(metaClient, lfPair.getLeft(), lfPair.getRight());
return new RenameOpResult(lfPair, true, Option.empty());
} catch (IOException
e) {
LOG.error("Error renaming log file", e);
LOG.error(("\n\n\n***NOTE Compaction is in inconsistent state. Try running \"compaction repair " + lfPair.getLeft().getDeltaCommitTime()) + "\" to recover from failure ***\n\n\n");
return new RenameOpResult(lfPair, false, Option.of(e));
}
}, parallelism);
} else {
LOG.info("Dry-Run Mode activated for rename operations");
return renameActions.parallelStream().map(lfPair -> new RenameOpResult(lfPair, false, false, Option.empty())).collect(Collectors.toList());
}
}
}
| 3.26 |
hudi_CompactionAdminClient_getCompactionPlan_rdh
|
/**
* Construction Compaction Plan from compaction instant.
*/
private static HoodieCompactionPlan getCompactionPlan(HoodieTableMetaClient metaClient, String compactionInstant) throws IOException {
return TimelineMetadataUtils.deserializeCompactionPlan(metaClient.getActiveTimeline().readCompactionPlanAsBytes(HoodieTimeline.getCompactionRequestedInstant(compactionInstant)).get());
}
| 3.26 |
hudi_CompactionAdminClient_unscheduleCompactionPlan_rdh
|
/**
* Un-schedules compaction plan. Remove All compaction operation scheduled.
*
* @param compactionInstant
* Compaction Instant
* @param skipValidation
* Skip validation step
* @param parallelism
* Parallelism
* @param dryRun
* Dry Run
*/public List<RenameOpResult> unscheduleCompactionPlan(String compactionInstant, boolean skipValidation, int parallelism, boolean dryRun) throws Exception {
HoodieTableMetaClient metaClient = createMetaClient(false);
// Only if all operations are successfully executed
if (!dryRun) {
// Overwrite compaction request with empty compaction operations
HoodieInstant inflight =
new HoodieInstant(State.INFLIGHT, COMPACTION_ACTION, compactionInstant);
Path inflightPath = new Path(metaClient.getMetaPath(), inflight.getFileName());
if (metaClient.getFs().exists(inflightPath)) {
// We need to rollback data-files because of this inflight compaction before unscheduling
throw new IllegalStateException("Please rollback the inflight compaction before unscheduling");
}
// Leave the trace in aux folder but delete from metapath.
// TODO: Add a rollback instant but for compaction
HoodieInstant instant = new HoodieInstant(State.REQUESTED, COMPACTION_ACTION, compactionInstant);
boolean deleted = metaClient.getFs().delete(new Path(metaClient.getMetaPath(), instant.getFileName()), false);
ValidationUtils.checkArgument(deleted, "Unable to delete compaction instant.");
}
return new ArrayList<>();
}
| 3.26 |
hudi_CompactionAdminClient_validateCompactionOperation_rdh
|
/**
* Check if a compaction operation is valid.
*
* @param metaClient
* Hoodie Table Meta client
* @param compactionInstant
* Compaction Instant
* @param operation
* Compaction Operation
* @param fsViewOpt
* File System View
*/
private ValidationOpResult validateCompactionOperation(HoodieTableMetaClient metaClient, String compactionInstant,
CompactionOperation operation, Option<HoodieTableFileSystemView> fsViewOpt) throws IOException {
HoodieTableFileSystemView fileSystemView = (fsViewOpt.isPresent()) ? fsViewOpt.get() : new HoodieTableFileSystemView(metaClient, metaClient.getCommitsAndCompactionTimeline());
Option<HoodieInstant> lastInstant = metaClient.getCommitsAndCompactionTimeline().lastInstant();
try {
if (lastInstant.isPresent()) {
Option<FileSlice> fileSliceOptional = Option.fromJavaOptional(fileSystemView.getLatestUnCompactedFileSlices(operation.getPartitionPath()).filter(fs -> fs.getFileId().equals(operation.getFileId())).findFirst());
if (fileSliceOptional.isPresent()) {
FileSlice fs = fileSliceOptional.get();
Option<HoodieBaseFile> df = fs.getBaseFile();
if (operation.getDataFileName().isPresent()) {
String expPath = metaClient.getFs().getFileStatus(new Path(FSUtils.getPartitionPath(metaClient.getBasePath(), operation.getPartitionPath()), new Path(operation.getDataFileName().get()))).getPath().toString();
ValidationUtils.checkArgument(df.isPresent(), (("Data File must be present. File Slice was : " + fs) + ", operation :") + operation);
ValidationUtils.checkArgument(df.get().getPath().equals(expPath), (("Base Path in operation is specified as " + expPath) + " but got path ") + df.get().getPath());
}
Set<HoodieLogFile> logFilesInFileSlice = fs.getLogFiles().collect(Collectors.toSet());Set<HoodieLogFile> logFilesInCompactionOp = operation.getDeltaFileNames().stream().map(dp -> {
try {
FileStatus[] fileStatuses = metaClient.getFs().listStatus(new Path(FSUtils.getPartitionPath(metaClient.getBasePath(), operation.getPartitionPath()), new Path(dp)));
ValidationUtils.checkArgument(fileStatuses.length == 1, "Expect only 1 file-status");
return new HoodieLogFile(fileStatuses[0]);
} catch (FileNotFoundException fe) {
throw new CompactionValidationException(fe.getMessage());
} catch (IOException ioe) {
throw new <ioe>HoodieIOException(ioe.getMessage());
}
}).collect(Collectors.toSet());
Set<HoodieLogFile> missing = logFilesInCompactionOp.stream().filter(lf -> !logFilesInFileSlice.contains(lf)).collect(Collectors.toSet());
ValidationUtils.checkArgument(missing.isEmpty(), (((("All log files specified in compaction operation is not present. Missing :" + missing) + ", Exp :") +
logFilesInCompactionOp) + ", Got :") + logFilesInFileSlice);
Set<HoodieLogFile> diff = logFilesInFileSlice.stream().filter(lf -> !logFilesInCompactionOp.contains(lf)).collect(Collectors.toSet());
ValidationUtils.checkArgument(diff.stream().allMatch(lf -> HoodieTimeline.compareTimestamps(lf.getDeltaCommitTime(), GREATER_THAN_OR_EQUALS, compactionInstant)), ("There are some log-files which are neither specified in compaction plan " + "nor present after compaction request instant. Some of these :") + diff);
} else
{
throw new CompactionValidationException(("Unable to find file-slice for file-id (" + operation.getFileId()) + " Compaction operation is invalid.");
}
} else
{
throw new CompactionValidationException("Unable to find any committed instant. Compaction Operation may be pointing to stale file-slices");
}
} catch (CompactionValidationException | IllegalArgumentException e) {
return new ValidationOpResult(operation, false, Option.of(e));
}
return new ValidationOpResult(operation, true, Option.empty());
}
| 3.26 |
hudi_CompactionAdminClient_validateCompactionPlan_rdh
|
/**
* Validate all compaction operations in a compaction plan. Verifies the file-slices are consistent with corresponding
* compaction operations.
*
* @param metaClient
* Hoodie Table Meta Client
* @param compactionInstant
* Compaction Instant
*/
public List<ValidationOpResult> validateCompactionPlan(HoodieTableMetaClient metaClient, String compactionInstant, int parallelism) throws IOException {
HoodieCompactionPlan plan = getCompactionPlan(metaClient, compactionInstant);HoodieTableFileSystemView fsView = new HoodieTableFileSystemView(metaClient, metaClient.getCommitsAndCompactionTimeline());
if (plan.getOperations() != null) {
List<CompactionOperation> ops = plan.getOperations().stream().map(CompactionOperation::convertFromAvroRecordInstance).collect(Collectors.toList());
context.setJobStatus(this.getClass().getSimpleName(), "Validate compaction operations: " + config.getTableName());
return context.map(ops, op -> {
try {
return validateCompactionOperation(metaClient, compactionInstant, op, Option.of(fsView));
} catch (IOException e) {
throw new <e>HoodieIOException(e.getMessage());
}
}, parallelism);
}
return new ArrayList<>();
}
| 3.26 |
hudi_CompactionAdminClient_renameLogFile_rdh
|
/**
* Rename log files. This is done for un-scheduling a pending compaction operation NOTE: Can only be used safely when
* no writer (ingestion/compaction) is running.
*
* @param metaClient
* Hoodie Table Meta-Client
* @param oldLogFile
* Old Log File
* @param newLogFile
* New Log File
*/
protected static void renameLogFile(HoodieTableMetaClient metaClient, HoodieLogFile oldLogFile, HoodieLogFile newLogFile) throws IOException {
FileStatus[] statuses = metaClient.getFs().listStatus(oldLogFile.getPath());
ValidationUtils.checkArgument(statuses.length == 1, "Only one status must be present");
ValidationUtils.checkArgument(statuses[0].isFile(), "Source File must exist");
ValidationUtils.checkArgument(oldLogFile.getPath().getParent().equals(newLogFile.getPath().getParent()), "Log file must only be moved within the parent directory");metaClient.getFs().rename(oldLogFile.getPath(), newLogFile.getPath());
}
| 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.