name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hudi_ZeroToOneUpgradeHandler_getFileNameForMarkerFromLogFile_rdh
|
/**
* Curates file name for marker from existing log file path.
* log file format : partitionpath/.fileid_baseInstant.log.writetoken
* marker file format : partitionpath/fileId_writetoken_baseinstant.basefileExtn.marker.APPEND
*
* @param logFilePath
* log file path for which marker file name needs to be generated.
* @param table
* {@link HoodieTable} instance to use
* @return the marker file name thus curated.
*/private static String getFileNameForMarkerFromLogFile(String logFilePath, HoodieTable<?, ?, ?, ?> table) {
Path logPath = new Path(table.getMetaClient().getBasePath(), logFilePath);
String v13 = FSUtils.getFileIdFromLogPath(logPath);
String deltaInstant = FSUtils.getDeltaCommitTimeFromLogPath(logPath);
String writeToken = FSUtils.getWriteTokenFromLogPath(logPath);
return FSUtils.makeBaseFileName(deltaInstant, writeToken, v13, table.getBaseFileExtension());
}
| 3.26 |
hudi_ZeroToOneUpgradeHandler_recreateMarkers_rdh
|
/**
* Recreate markers in new format.
* Step1: Delete existing markers
* Step2: Collect all rollback file info.
* Step3: recreate markers for all interested files.
*
* @param commitInstantTime
* instant of interest for which markers need to be recreated.
* @param table
* instance of {@link HoodieTable} to use
* @param context
* instance of {@link HoodieEngineContext} to use
* @throws HoodieRollbackException
* on any exception during upgrade.
*/
protected void recreateMarkers(final String commitInstantTime, HoodieTable table, HoodieEngineContext context, int parallelism) throws HoodieRollbackException {
try {
// fetch hoodie instant
Option<HoodieInstant> commitInstantOpt = Option.fromJavaOptional(table.getActiveTimeline().getCommitsTimeline().getInstantsAsStream().filter(instant -> HoodieActiveTimeline.EQUALS.test(instant.getTimestamp(), commitInstantTime)).findFirst());
if (commitInstantOpt.isPresent()) {
// delete existing markers
WriteMarkers writeMarkers = WriteMarkersFactory.get(MarkerType.DIRECT, table, commitInstantTime);
writeMarkers.quietDeleteMarkerDir(context, parallelism);
// generate rollback stats
List<HoodieRollbackStat> rollbackStats = getListBasedRollBackStats(table, context, commitInstantOpt);
// recreate markers adhering to marker based rollback
for (HoodieRollbackStat rollbackStat : rollbackStats) {
for (String path : rollbackStat.getSuccessDeleteFiles()) {
String dataFileName = path.substring(path.lastIndexOf("/") +
1);
// not feasible to differentiate MERGE from CREATE. hence creating with MERGE IOType for all base files.
writeMarkers.create(rollbackStat.getPartitionPath(), dataFileName, IOType.MERGE);
}for (FileStatus fileStatus : rollbackStat.getCommandBlocksCount().keySet()) {
writeMarkers.create(rollbackStat.getPartitionPath(), getFileNameForMarkerFromLogFile(fileStatus.getPath().toString(), table), IOType.APPEND);
}
}
}
} catch (Exception e) {
throw new HoodieRollbackException("Exception thrown while upgrading Hoodie Table from version 0 to 1", e);
}
}
| 3.26 |
hudi_HoodieWriteHandle_createMarkerFile_rdh
|
/**
* Creates an empty marker file corresponding to storage writer path.
*
* @param partitionPath
* Partition path
*/
protected void createMarkerFile(String partitionPath, String dataFileName) {
WriteMarkersFactory.get(config.getMarkersType(), hoodieTable, instantTime).create(partitionPath, dataFileName, getIOType(), config, fileId, hoodieTable.getMetaClient().getActiveTimeline());
}
| 3.26 |
hudi_HoodieWriteHandle_doWrite_rdh
|
/**
* Perform the actual writing of the given record into the backing file.
*/
protected void doWrite(HoodieRecord record, Schema schema, TypedProperties props) {
// NO_OP
}
| 3.26 |
hudi_HoodieWriteHandle_canWrite_rdh
|
/**
* Determines whether we can accept the incoming records, into the current file. Depending on
* <p>
* - Whether it belongs to the same partitionPath as existing records - Whether the current file written bytes lt max
* file size
*/
public boolean canWrite(HoodieRecord record) {
return false;
}
| 3.26 |
hudi_HoodieWriteHandle_makeNewFilePath_rdh
|
/**
* Make new file path with given file name.
*/
protected Path makeNewFilePath(String partitionPath, String fileName) {
String relativePath = new Path((partitionPath.isEmpty() ? "" : partitionPath + "/") + fileName).toString();
return new Path(config.getBasePath(), relativePath);
}
| 3.26 |
hudi_HoodieWriteHandle_write_rdh
|
/**
* Perform the actual writing of the given record into the backing file.
*/
public void write(HoodieRecord record, Schema schema, TypedProperties props) {
doWrite(record, schema, props);
}
| 3.26 |
hudi_HoodieWriteHandle_makeWriteToken_rdh
|
/**
* Generate a write token based on the currently running spark task and its place in the spark dag.
*/
private String makeWriteToken() {
return FSUtils.makeWriteToken(getPartitionId(), getStageId(), getAttemptId());
}
| 3.26 |
hudi_HoodieWriteHandle_getLogCreationCallback_rdh
|
/**
* Returns a log creation hook impl.
*/
protected LogFileCreationCallback getLogCreationCallback() {
return new LogFileCreationCallback() {
@Override
public boolean preFileCreation(HoodieLogFile logFile) {
WriteMarkers writeMarkers = WriteMarkersFactory.get(config.getMarkersType(), hoodieTable, instantTime);
return writeMarkers.createIfNotExists(partitionPath, logFile.getFileName(), IOType.CREATE, config, fileId, hoodieTable.getMetaClient().getActiveTimeline()).isPresent();
}
};
}
| 3.26 |
hudi_BootstrapExecutor_syncHive_rdh
|
/**
* Sync to Hive.
*/
private void
syncHive() {
if (cfg.enableHiveSync || cfg.enableMetaSync) {
TypedProperties metaProps = new TypedProperties();
metaProps.putAll(props);
metaProps.put(META_SYNC_BASE_PATH.key(), cfg.targetBasePath);
metaProps.put(META_SYNC_BASE_FILE_FORMAT.key(), cfg.baseFileFormat);
if (props.getBoolean(HIVE_SYNC_BUCKET_SYNC.key(), HIVE_SYNC_BUCKET_SYNC.defaultValue())) {metaProps.put(HIVE_SYNC_BUCKET_SYNC_SPEC.key(), HiveSyncConfig.getBucketSpec(props.getString(HoodieIndexConfig.BUCKET_INDEX_HASH_FIELD.key()), props.getInteger(HoodieIndexConfig.BUCKET_INDEX_NUM_BUCKETS.key())));
}
try (HiveSyncTool hiveSyncTool = new HiveSyncTool(metaProps, configuration)) {
hiveSyncTool.syncHoodieTable();}
}
}
| 3.26 |
hudi_RollbackNode_execute_rdh
|
/**
* Method helps to rollback the last commit instant in the timeline, if it has one.
*
* @param executionContext
* Execution context to perform this rollback
* @param curItrCount
* current iteration count.
* @throws Exception
* will be thrown if any error occurred
*/
@Override
public void execute(ExecutionContext executionContext, int curItrCount) throws Exception {
int numRollbacks = config.getNumRollbacks();
log.info(String.format("Executing rollback node %s with %d rollbacks", this.getName(), numRollbacks));
// Can only be done with an instantiation of a new WriteClient hence cannot be done during DeltaStreamer
// testing for now
HoodieTableMetaClient metaClient = HoodieTableMetaClient.builder().setConf(executionContext.getHoodieTestSuiteWriter().getConfiguration()).setBasePath(executionContext.getHoodieTestSuiteWriter().getCfg().targetBasePath).build();
for (int i = 0; i < numRollbacks; i++) {
metaClient.reloadActiveTimeline();
Option<HoodieInstant> lastInstant = metaClient.getActiveTimeline().getCommitsTimeline().lastInstant();
if (lastInstant.isPresent()) {
log.info("Rolling back last instant {}", lastInstant.get());log.info("Cleaning up generated data for the instant being rolled back {}", lastInstant.get());
ValidationUtils.checkArgument(getStringWithAltKeys(executionContext.getWriterContext().getProps(), DFSPathSelectorConfig.SOURCE_INPUT_SELECTOR, DFSPathSelector.class.getName()).equalsIgnoreCase(DFSTestSuitePathSelector.class.getName()), "Test Suite only supports DFSTestSuitePathSelector");
executionContext.getHoodieTestSuiteWriter().getWriteClient(this).rollback(lastInstant.get().getTimestamp());
metaClient.getFs().delete(new Path(executionContext.getWriterContext().getCfg().inputBasePath, executionContext.getWriterContext().getHoodieTestSuiteWriter().getLastCheckpoint().orElse("")), true);
this.result =
lastInstant;
}
}
}
| 3.26 |
hudi_OptionsResolver_isInsertOperation_rdh
|
/**
* Returns whether the table operation is 'insert'.
*/
public static boolean isInsertOperation(Configuration conf) {
WriteOperationType operationType = WriteOperationType.fromValue(conf.getString(FlinkOptions.OPERATION));
return operationType == WriteOperationType.INSERT; }
| 3.26 |
hudi_OptionsResolver_isSchemaEvolutionEnabled_rdh
|
/**
* Returns whether comprehensive schema evolution enabled.
*/
public static boolean isSchemaEvolutionEnabled(Configuration conf) {
return conf.getBoolean(HoodieCommonConfig.SCHEMA_EVOLUTION_ENABLE.key(), HoodieCommonConfig.SCHEMA_EVOLUTION_ENABLE.defaultValue());
}
| 3.26 |
hudi_OptionsResolver_insertClustering_rdh
|
/**
* Returns whether insert clustering is allowed with given configuration {@code conf}.
*/
public static boolean insertClustering(Configuration conf)
{
return (isCowTable(conf) && isInsertOperation(conf)) && conf.getBoolean(FlinkOptions.INSERT_CLUSTER);
}
| 3.26 |
hudi_OptionsResolver_m2_rdh
|
/**
* Returns whether the query is incremental.
*/public static boolean m2(Configuration conf) {
return conf.getOptional(FlinkOptions.READ_START_COMMIT).isPresent() || conf.getOptional(FlinkOptions.READ_END_COMMIT).isPresent();
}
| 3.26 |
hudi_OptionsResolver_needsAsyncCompaction_rdh
|
/**
* Returns whether there is need to schedule the async compaction.
*
* @param conf
* The flink configuration.
*/
public static boolean needsAsyncCompaction(Configuration conf) {
return OptionsResolver.isMorTable(conf) && conf.getBoolean(FlinkOptions.COMPACTION_ASYNC_ENABLED);
}
| 3.26 |
hudi_OptionsResolver_isMorTable_rdh
|
/**
* Returns whether it is a MERGE_ON_READ table.
*/
public static boolean isMorTable(Map<String, String> options) {
return options.getOrDefault(FlinkOptions.TABLE_TYPE.key(), FlinkOptions.TABLE_TYPE.defaultValue()).equalsIgnoreCase(FlinkOptions.TABLE_TYPE_MERGE_ON_READ);
}
| 3.26 |
hudi_OptionsResolver_needsScheduleCompaction_rdh
|
/**
* Returns whether there is need to schedule the compaction plan.
*
* @param conf
* The flink configuration.
*/
public static boolean needsScheduleCompaction(Configuration conf) {
return OptionsResolver.isMorTable(conf) && conf.getBoolean(FlinkOptions.COMPACTION_SCHEDULE_ENABLED);
}
| 3.26 |
hudi_OptionsResolver_hasReadCommitsLimit_rdh
|
/**
* Returns whether the read commits limit is specified.
*/
public static boolean hasReadCommitsLimit(Configuration conf) {
return conf.contains(FlinkOptions.READ_COMMITS_LIMIT);
}
| 3.26 |
hudi_OptionsResolver_allOptions_rdh
|
// -------------------------------------------------------------------------
// Utilities
// -------------------------------------------------------------------------
/**
* Returns all the config options with the given class {@code clazz}.
*/
public static List<ConfigOption<?>> allOptions(Class<?> clazz) {
Field[] declaredFields = clazz.getDeclaredFields();
List<ConfigOption<?>> options = new ArrayList<>();for (Field field : declaredFields) {
if (Modifier.isStatic(field.getModifiers()) && field.getType().equals(ConfigOption.class)) {
try {
options.add(((ConfigOption<?>) (field.get(ConfigOption.class))));
} catch (IllegalAccessException e) {
throw new HoodieException("Error while fetching static config option", e);
}
}
}
return options;
}
| 3.26 |
hudi_OptionsResolver_isReadByTxnCompletionTime_rdh
|
/**
* Returns whether to read the instants using completion time.
*
* <p>A Hudi instant contains both the txn start time and completion time, for incremental subscription
* of the source reader, using completion time to filter the candidate instants can avoid data loss
* in scenarios like multiple writers.
*/
public static boolean isReadByTxnCompletionTime(Configuration conf) {
HollowCommitHandling handlingMode =
HollowCommitHandling.valueOf(conf.getString(INCREMENTAL_READ_HANDLE_HOLLOW_COMMIT.key(), INCREMENTAL_READ_HANDLE_HOLLOW_COMMIT.defaultValue()));
return handlingMode == HollowCommitHandling.USE_TRANSITION_TIME;
}
| 3.26 |
hudi_OptionsResolver_isSimpleBucketIndexType_rdh
|
/**
* Returns whether the table index is simple bucket index.
*/
public static boolean isSimpleBucketIndexType(Configuration conf) {
return isBucketIndexType(conf) && getBucketEngineType(conf).equals(BucketIndexEngineType.SIMPLE);
}
| 3.26 |
hudi_OptionsResolver_isNonBlockingConcurrencyControl_rdh
|
/**
* Returns whether this is non-blocking concurrency control.
*/
public static boolean isNonBlockingConcurrencyControl(Configuration config) {
return WriteConcurrencyMode.isNonBlockingConcurrencyControl(config.getString(HoodieWriteConfig.WRITE_CONCURRENCY_MODE.key(), HoodieWriteConfig.WRITE_CONCURRENCY_MODE.defaultValue()));
}
| 3.26 |
hudi_OptionsResolver_isPartitionedTable_rdh
|
/**
* Returns whether the table is partitioned.
*/
public static boolean isPartitionedTable(Configuration conf) {
return FilePathUtils.extractPartitionKeys(conf).length > 0;
}
| 3.26 |
hudi_OptionsResolver_isInsertOverwrite_rdh
|
/**
* Returns whether the operation is INSERT OVERWRITE (table or partition).
*/
public static boolean isInsertOverwrite(Configuration conf) {
return conf.getString(FlinkOptions.OPERATION).equalsIgnoreCase(WriteOperationType.INSERT_OVERWRITE_TABLE.value()) || conf.getString(FlinkOptions.OPERATION).equalsIgnoreCase(WriteOperationType.INSERT_OVERWRITE.value());
}
| 3.26 |
hudi_OptionsResolver_isDeltaTimeCompaction_rdh
|
/**
* Returns whether the compaction strategy is based on elapsed delta time.
*/
public static boolean isDeltaTimeCompaction(Configuration conf) {
final String strategy = conf.getString(FlinkOptions.COMPACTION_TRIGGER_STRATEGY).toLowerCase(Locale.ROOT);
return FlinkOptions.TIME_ELAPSED.equals(strategy) || FlinkOptions.NUM_OR_TIME.equals(strategy);
}
| 3.26 |
hudi_OptionsResolver_getIndexType_rdh
|
/**
* Returns the index type.
*/
public static IndexType getIndexType(Configuration conf) {
return HoodieIndex.IndexType.valueOf(conf.getString(FlinkOptions.INDEX_TYPE));
}
| 3.26 |
hudi_OptionsResolver_needsAsyncClustering_rdh
|
/**
* Returns whether there is need to schedule the async clustering.
*
* @param conf
* The flink configuration.
*/
public static boolean needsAsyncClustering(Configuration conf) {
return isInsertOperation(conf) && conf.getBoolean(FlinkOptions.CLUSTERING_ASYNC_ENABLED);
}
| 3.26 |
hudi_OptionsResolver_getIndexKeys_rdh
|
/**
* Returns the index key field values.
*/
public static String[] getIndexKeys(Configuration conf) {
return getIndexKeyField(conf).split(",");
}
| 3.26 |
hudi_OptionsResolver_getDefaultPlanStrategyClassName_rdh
|
/**
* Returns the default plan strategy class.
*/
public static String getDefaultPlanStrategyClassName(Configuration conf) {
return OptionsResolver.m1(conf) ? FlinkConsistentBucketClusteringPlanStrategy.class.getName() : FlinkOptions.CLUSTERING_PLAN_STRATEGY_CLASS.defaultValue();
}
| 3.26 |
hudi_OptionsResolver_getConflictResolutionStrategy_rdh
|
/**
* Returns the conflict resolution strategy.
*/
public static ConflictResolutionStrategy getConflictResolutionStrategy(Configuration
conf) {
return isBucketIndexType(conf) ? new BucketIndexConcurrentFileWritesConflictResolutionStrategy() : new SimpleConcurrentFileWritesConflictResolutionStrategy();
}
| 3.26 |
hudi_OptionsResolver_getIndexKeyField_rdh
|
/**
* Returns the index key field.
*/
public static String getIndexKeyField(Configuration conf) {
return conf.getString(FlinkOptions.INDEX_KEY_FIELD, conf.getString(FlinkOptions.RECORD_KEY_FIELD));
}
| 3.26 |
hudi_OptionsResolver_isBulkInsertOperation_rdh
|
/**
* Returns whether the table operation is 'bulk_insert'.
*/
public static boolean isBulkInsertOperation(Configuration conf) {
WriteOperationType operationType = WriteOperationType.fromValue(conf.getString(FlinkOptions.OPERATION));
return operationType == WriteOperationType.BULK_INSERT;
}
| 3.26 |
hudi_OptionsResolver_sortClusteringEnabled_rdh
|
/**
* Returns whether the clustering sort is enabled.
*/
public static boolean sortClusteringEnabled(Configuration conf) {
return !StringUtils.isNullOrEmpty(conf.getString(FlinkOptions.CLUSTERING_SORT_COLUMNS));
}
| 3.26 |
hudi_OptionsResolver_getCDCSupplementalLoggingMode_rdh
|
/**
* Returns the supplemental logging mode.
*/
public static HoodieCDCSupplementalLoggingMode getCDCSupplementalLoggingMode(Configuration conf) {
String mode = conf.getString(FlinkOptions.SUPPLEMENTAL_LOGGING_MODE).toUpperCase();
return HoodieCDCSupplementalLoggingMode.valueOf(mode);
}
| 3.26 |
hudi_OptionsResolver_overwriteDynamicPartition_rdh
|
/**
* Returns whether the operation is INSERT OVERWRITE dynamic partition.
*/
public static boolean overwriteDynamicPartition(Configuration conf) {
return conf.getString(FlinkOptions.OPERATION).equalsIgnoreCase(WriteOperationType.INSERT_OVERWRITE.value()) || conf.getString(FlinkOptions.WRITE_PARTITION_OVERWRITE_MODE).equalsIgnoreCase(PartitionOverwriteMode.DYNAMIC.name());
}
| 3.26 |
hudi_OptionsResolver_emitChangelog_rdh
|
/**
* Returns whether the source should emit changelog.
*
* @return true if the source is read as streaming with changelog mode enabled
*/public static boolean emitChangelog(Configuration conf) {
return ((conf.getBoolean(FlinkOptions.READ_AS_STREAMING) && conf.getBoolean(FlinkOptions.CHANGELOG_ENABLED)) || (conf.getBoolean(FlinkOptions.READ_AS_STREAMING) && conf.getBoolean(FlinkOptions.CDC_ENABLED))) || (m2(conf) && conf.getBoolean(FlinkOptions.CDC_ENABLED));
}
| 3.26 |
hudi_OptionsResolver_isCowTable_rdh
|
/**
* Returns whether it is a COPY_ON_WRITE table.
*/
public static boolean isCowTable(Configuration conf) {
return conf.getString(FlinkOptions.TABLE_TYPE).toUpperCase(Locale.ROOT).equals(FlinkOptions.TABLE_TYPE_COPY_ON_WRITE);
}
| 3.26 |
hudi_OptionsResolver_isMultiWriter_rdh
|
/**
* Returns whether multi-writer is enabled.
*/
public static boolean isMultiWriter(Configuration conf) {
return WriteConcurrencyMode.supportsMultiWriter(conf.getString(HoodieWriteConfig.WRITE_CONCURRENCY_MODE.key(), HoodieWriteConfig.WRITE_CONCURRENCY_MODE.defaultValue()));
}
| 3.26 |
hudi_OptionsResolver_m1_rdh
|
/**
* Returns whether the table index is consistent bucket index.
*/
public static boolean m1(Configuration conf) {
return isBucketIndexType(conf) && getBucketEngineType(conf).equals(BucketIndexEngineType.CONSISTENT_HASHING);
}
| 3.26 |
hudi_OptionsResolver_isLockRequired_rdh
|
/**
* Returns whether the writer txn should be guarded by lock.
*/
public static boolean isLockRequired(Configuration conf) {
return conf.getBoolean(FlinkOptions.METADATA_ENABLED) || isMultiWriter(conf);
}
| 3.26 |
hudi_OptionsResolver_getPreCombineField_rdh
|
/**
* Returns the preCombine field
* or null if the value is set as {@link FlinkOptions#NO_PRE_COMBINE}.
*/
public static String getPreCombineField(Configuration conf) {
final String preCombineField
= conf.getString(FlinkOptions.PRECOMBINE_FIELD);
return preCombineField.equals(FlinkOptions.NO_PRE_COMBINE) ? null : preCombineField;
}
| 3.26 |
hudi_OptionsResolver_isAppendMode_rdh
|
/**
* Returns whether the insert is clustering disabled with given configuration {@code conf}.
*/public static boolean isAppendMode(Configuration conf) {
// 1. inline clustering is supported for COW table;
// 2. async clustering is supported for both COW and MOR table
return isInsertOperation(conf) && ((isCowTable(conf) &&
(!conf.getBoolean(FlinkOptions.INSERT_CLUSTER))) || isMorTable(conf));
}
| 3.26 |
hudi_OptionsResolver_m0_rdh
|
/**
* Returns whether the payload clazz is {@link DefaultHoodieRecordPayload}.
*/
public static boolean m0(Configuration conf) {
return conf.getString(FlinkOptions.PAYLOAD_CLASS_NAME).contains(DefaultHoodieRecordPayload.class.getSimpleName());
}
| 3.26 |
hudi_OptionsResolver_isConsistentLogicalTimestampEnabled_rdh
|
/**
* Returns whether consistent value will be generated for a logical timestamp type column.
*/
public static boolean isConsistentLogicalTimestampEnabled(Configuration conf) {
return conf.getBoolean(KeyGeneratorOptions.KEYGENERATOR_CONSISTENT_LOGICAL_TIMESTAMP_ENABLED.key(), Boolean.parseBoolean(KeyGeneratorOptions.KEYGENERATOR_CONSISTENT_LOGICAL_TIMESTAMP_ENABLED.defaultValue()));
}
| 3.26 |
hudi_HoodieMergedLogRecordScanner_newBuilder_rdh
|
/**
* Returns the builder for {@code HoodieMergedLogRecordScanner}.
*/
public static HoodieMergedLogRecordScanner.Builder newBuilder() {
return new
Builder();
}
| 3.26 |
hudi_HoodieMergedLogRecordScanner_scanByKeyPrefixes_rdh
|
/**
* Provides incremental scanning capability where only keys matching provided key-prefixes
* will be looked up in the delta-log files, scanned and subsequently materialized into
* the internal cache
*
* @param keyPrefixes
* to be looked up
*/
public void scanByKeyPrefixes(List<String> keyPrefixes) {
// We can skip scanning in case reader is in full-scan mode, in which case all blocks
// are processed upfront (no additional scanning is necessary)
if (forceFullScan) {return;
}
List<String> missingKeyPrefixes = keyPrefixes.stream().filter(keyPrefix -> // NOTE: We can skip scanning the prefixes that have already
// been covered by the previous scans
scannedPrefixes.stream().noneMatch(keyPrefix::startsWith)).collect(Collectors.toList());
if (missingKeyPrefixes.isEmpty()) {
// All the required records are already fetched, no-op
return;
}
// NOTE: When looking up by key-prefixes unfortunately we can't short-circuit
// and will have to scan every time as we can't know (based on just
// the records cached) whether particular prefix was scanned or just records
// matching the prefix looked up (by [[scanByFullKeys]] API)
scanInternal(Option.of(KeySpec.prefixKeySpec(missingKeyPrefixes)), false);
scannedPrefixes.addAll(missingKeyPrefixes);
}
| 3.26 |
hudi_HoodieMergedLogRecordScanner_scanByFullKeys_rdh
|
/**
* Provides incremental scanning capability where only provided keys will be looked
* up in the delta-log files, scanned and subsequently materialized into the internal
* cache
*
* @param keys
* to be looked up
*/
public void scanByFullKeys(List<String> keys)
{
// We can skip scanning in case reader is in full-scan mode, in which case all blocks
// are processed upfront (no additional scanning is necessary)
if (forceFullScan) {
return;// no-op
}
List<String> missingKeys = keys.stream().filter(key -> !records.containsKey(key)).collect(Collectors.toList());
if (missingKeys.isEmpty()) {
// All the required records are already fetched, no-op
return;
}
scanInternal(Option.of(KeySpec.fullKeySpec(missingKeys)), false);
}
| 3.26 |
hudi_HoodieMergedLogRecordScanner_scan_rdh
|
/**
* Scans delta-log files processing blocks
*/
public final void scan() {
scan(false);
}
| 3.26 |
hudi_HoodieMetadataFileSystemView_listPartition_rdh
|
/**
* Return all the files in the partition by reading from the Metadata Table.
*
* @param partitionPath
* The absolute path of the partition
* @throws IOException
*/
@Override
protected FileStatus[] listPartition(Path partitionPath) throws IOException {
return tableMetadata.getAllFilesInPartition(partitionPath);
}
| 3.26 |
hudi_ManifestFileWriter_writeManifestFile_rdh
|
/**
* Write all the latest base file names to the manifest file.
*/
public synchronized void writeManifestFile(boolean useAbsolutePath) {
try {
List<String> baseFiles = fetchLatestBaseFilesForAllPartitions(metaClient, useFileListingFromMetadata, useAbsolutePath).collect(Collectors.toList());
if (baseFiles.isEmpty()) {
LOG.warn("No base file to generate manifest file.");
return;
} else {LOG.info("Writing base file names to manifest file: " + baseFiles.size());
}
final Path manifestFilePath = getManifestFilePath(useAbsolutePath);
try (FSDataOutputStream outputStream = metaClient.getFs().create(manifestFilePath, true);BufferedWriter writer = new BufferedWriter(new OutputStreamWriter(outputStream, StandardCharsets.UTF_8))) {
for (String f : baseFiles) {
writer.write(f);writer.write("\n");
}
}
} catch (Exception e) {
throw new HoodieException("Error in writing manifest file.", e);
}
}
| 3.26 |
hudi_RowDataToHoodieFunction_toHoodieRecord_rdh
|
/**
* Converts the give record to a {@link HoodieRecord}.
*
* @param record
* The input record
* @return HoodieRecord based on the configuration
* @throws IOException
* if error occurs
*/
@SuppressWarnings("rawtypes")
private HoodieRecord toHoodieRecord(I record) throws Exception {
GenericRecord gr = ((GenericRecord) (this.converter.convert(this.avroSchema, record)));final HoodieKey v1 = keyGenerator.getKey(gr);
HoodieRecordPayload payload = payloadCreation.createPayload(gr);
HoodieOperation v3 = HoodieOperation.fromValue(record.getRowKind().toByteValue());
return
new HoodieAvroRecord<>(v1, payload, v3);
}
| 3.26 |
hudi_BaseRollbackHelper_deleteFiles_rdh
|
/**
* Common method used for cleaning out files during rollback.
*/
protected List<HoodieRollbackStat> deleteFiles(HoodieTableMetaClient metaClient, List<String> filesToBeDeleted, boolean doDelete) throws IOException {
return filesToBeDeleted.stream().map(fileToDelete -> {
String basePath = metaClient.getBasePath();
try {
Path fullDeletePath = new Path(fileToDelete);
String partitionPath = FSUtils.getRelativePartitionPath(new Path(basePath), fullDeletePath.getParent());
boolean isDeleted =
true;
if (doDelete) {
try {
isDeleted = metaClient.getFs().delete(fullDeletePath); } catch (FileNotFoundException e) {
// if first rollback attempt failed and retried again, chances that some files are already deleted.
isDeleted = true;
}
}
return HoodieRollbackStat.newBuilder().withPartitionPath(partitionPath).withDeletedFileResult(fullDeletePath.toString(), isDeleted).build();
} catch (IOException e) {
LOG.error("Fetching file status for ");
throw new HoodieIOException(("Fetching file status for " + fileToDelete)
+ " failed ", e);
}
}).collect(Collectors.toList());
}
| 3.26 |
hudi_BaseRollbackHelper_maybeDeleteAndCollectStats_rdh
|
/**
* May be delete interested files and collect stats or collect stats only.
*
* @param context
* instance of {@link HoodieEngineContext} to use.
* @param instantToRollback
* {@link HoodieInstant} of interest for which deletion or collect stats is requested.
* @param rollbackRequests
* List of {@link ListingBasedRollbackRequest} to be operated on.
* @param doDelete
* {@code true} if deletion has to be done. {@code false} if only stats are to be collected w/o performing any deletes.
* @return stats collected with or w/o actual deletions.
*/
List<Pair<String, HoodieRollbackStat>> maybeDeleteAndCollectStats(HoodieEngineContext context, HoodieInstant instantToRollback, List<SerializableHoodieRollbackRequest> rollbackRequests, boolean doDelete, int numPartitions) {
return context.flatMap(rollbackRequests, ((SerializableFunction<SerializableHoodieRollbackRequest, Stream<Pair<String, HoodieRollbackStat>>>) (rollbackRequest -> {
List<String> filesToBeDeleted = rollbackRequest.getFilesToBeDeleted();
if (!filesToBeDeleted.isEmpty()) {
List<HoodieRollbackStat> rollbackStats = deleteFiles(metaClient, filesToBeDeleted, doDelete);
List<Pair<String, HoodieRollbackStat>> partitionToRollbackStats = new ArrayList<>();
rollbackStats.forEach(entry -> partitionToRollbackStats.add(Pair.of(entry.getPartitionPath(), entry)));
return partitionToRollbackStats.stream();
} else if (!rollbackRequest.getLogBlocksToBeDeleted().isEmpty()) {
HoodieLogFormat.Writer
writer = null;
final Path filePath;
try {
String v9 = rollbackRequest.getFileId();
writer = HoodieLogFormat.newWriterBuilder().onParentPath(FSUtils.getPartitionPath(metaClient.getBasePath(), rollbackRequest.getPartitionPath())).withFileId(v9).withDeltaCommit(instantToRollback.getTimestamp()).withFs(metaClient.getFs()).withFileExtension(HoodieLogFile.DELTA_EXTENSION).build();
// generate metadata
if (doDelete) {
Map<HoodieLogBlock.HeaderMetadataType, String> header = generateHeader(instantToRollback.getTimestamp());
// if update belongs to an existing log file
// use the log file path from AppendResult in case the file handle may roll over
filePath = writer.appendBlock(new HoodieCommandBlock(header)).logFile().getPath();
} else {
filePath = writer.getLogFile().getPath();
}
} catch (IOException | InterruptedException io) {
throw new <io>HoodieRollbackException("Failed to rollback for instant " + instantToRollback);
} finally {
try {
if (writer != null) {
writer.close();
}
} catch (IOException io) {
throw new <io>HoodieIOException("Error appending rollback block");
}
}
// This step is intentionally done after writer is closed. Guarantees that
// getFileStatus would reflect correct stats and FileNotFoundException is not thrown in
// cloud-storage : HUDI-168
Map<FileStatus, Long> filesToNumBlocksRollback = Collections.singletonMap(metaClient.getFs().getFileStatus(Objects.requireNonNull(filePath)), 1L);
return Collections.singletonList(Pair.of(rollbackRequest.getPartitionPath(), HoodieRollbackStat.newBuilder().withPartitionPath(rollbackRequest.getPartitionPath()).withRollbackBlockAppendResults(filesToNumBlocksRollback).build())).stream();
} else {
return Collections.singletonList(Pair.of(rollbackRequest.getPartitionPath(), HoodieRollbackStat.newBuilder().withPartitionPath(rollbackRequest.getPartitionPath()).build())).stream();
}
})), numPartitions);
}
| 3.26 |
hudi_BaseRollbackHelper_collectRollbackStats_rdh
|
/**
* Collect all file info that needs to be rolled back.
*/
public List<HoodieRollbackStat> collectRollbackStats(HoodieEngineContext context, HoodieInstant instantToRollback,
List<HoodieRollbackRequest> rollbackRequests) {
int parallelism = Math.max(Math.min(rollbackRequests.size(), config.getRollbackParallelism()), 1);
context.setJobStatus(this.getClass().getSimpleName(), "Collect rollback stats for upgrade/downgrade: " + config.getTableName());
// If not for conversion to HoodieRollbackInternalRequests, code fails. Using avro model (HoodieRollbackRequest) within spark.parallelize
// is failing with com.esotericsoftware.kryo.KryoException
// stack trace: https://gist.github.com/nsivabalan/b6359e7d5038484f8043506c8bc9e1c8
// related stack overflow post: https://issues.apache.org/jira/browse/SPARK-3601. Avro deserializes list as GenericData.Array.
List<SerializableHoodieRollbackRequest> serializableRequests = rollbackRequests.stream().map(SerializableHoodieRollbackRequest::new).collect(Collectors.toList());
return context.reduceByKey(maybeDeleteAndCollectStats(context, instantToRollback, serializableRequests,
false, parallelism), RollbackUtils::mergeRollbackStat, parallelism);
}
| 3.26 |
hudi_HoodieBackedTableMetadata_getOrCreateReaders_rdh
|
/**
* Create a file reader and the record scanner for a given partition and file slice
* if readers are not already available.
*
* @param partitionName
* - Partition name
* @param slice
* - The file slice to open readers for
* @return File reader and the record scanner pair for the requested file slice
*/
private Pair<HoodieSeekingFileReader<?>, HoodieMetadataLogRecordReader> getOrCreateReaders(String partitionName, FileSlice slice) {
if (reuse) {
Pair<String, String> key = Pair.of(partitionName, slice.getFileId());
return partitionReaders.get().computeIfAbsent(key, ignored -> openReaders(partitionName, slice));
} else {
return openReaders(partitionName, slice);
}
}
| 3.26 |
hudi_HoodieBackedTableMetadata_closePartitionReaders_rdh
|
/**
* Close and clear all the partitions readers.
*/
private void closePartitionReaders() {
for (Pair<String, String> partitionFileSlicePair : partitionReaders.get().keySet())
{
close(partitionFileSlicePair);
}
partitionReaders.get().clear();
}
| 3.26 |
hudi_HoodieBackedTableMetadata_lookupKeysFromFileSlice_rdh
|
/**
* Lookup list of keys from a single file slice.
*
* @param partitionName
* Name of the partition
* @param keys
* The list of keys to lookup
* @param fileSlice
* The file slice to read
* @return A {@code Map} of key name to {@code HoodieRecord} for the keys which were found in the file slice
*/
private Map<String, HoodieRecord<HoodieMetadataPayload>> lookupKeysFromFileSlice(String partitionName, List<String> keys, FileSlice fileSlice) {
Pair<HoodieSeekingFileReader<?>, HoodieMetadataLogRecordReader> readers = getOrCreateReaders(partitionName, fileSlice);
try {
List<Long> timings = new ArrayList<>(1);HoodieSeekingFileReader<?> baseFileReader = readers.getKey();
HoodieMetadataLogRecordReader logRecordScanner = readers.getRight();
if ((baseFileReader ==
null) && (logRecordScanner == null)) {
return Collections.emptyMap();
}
// Sort it here once so that we don't need to sort individually for base file and for each individual log files.
List<String> sortedKeys = new ArrayList<>(keys);
Collections.sort(sortedKeys);
boolean fullKeys = true;
Map<String, HoodieRecord<HoodieMetadataPayload>> logRecords = readLogRecords(logRecordScanner, sortedKeys, fullKeys, timings);
return readFromBaseAndMergeWithLogRecords(baseFileReader, sortedKeys, fullKeys, logRecords, timings, partitionName);
} catch (IOException ioe) {
throw new HoodieIOException(("Error merging records from metadata table for " + keys.size()) + " key : ", ioe);
} finally {
if (!reuse) {
closeReader(readers);
}
}
}
| 3.26 |
hudi_HoodieBackedTableMetadata_close_rdh
|
/**
* Close the file reader and the record scanner for the given file slice.
*
* @param partitionFileSlicePair
* - Partition and FileSlice
*/private synchronized void close(Pair<String, String> partitionFileSlicePair) {
Pair<HoodieSeekingFileReader<?>, HoodieMetadataLogRecordReader> readers = partitionReaders.get().remove(partitionFileSlicePair);
closeReader(readers);
}
| 3.26 |
hudi_HiveQueryDDLExecutor_getTableSchema_rdh
|
// TODO Duplicating it here from HMSDLExecutor as HiveQueryQL has no way of doing it on its own currently. Need to refactor it
@Override
public Map<String, String> getTableSchema(String tableName) {
try {
// HiveMetastoreClient returns partition keys separate from Columns, hence get both and merge to
// get the Schema of the table.
final long start = System.currentTimeMillis();
Table
table = metaStoreClient.getTable(databaseName, tableName);
Map<String, String> partitionKeysMap = table.getPartitionKeys().stream().collect(Collectors.toMap(FieldSchema::getName, f -> f.getType().toUpperCase()));
Map<String, String> columnsMap = table.getSd().getCols().stream().collect(Collectors.toMap(FieldSchema::getName, f -> f.getType().toUpperCase()));
Map<String, String> schema = new HashMap<>();
schema.putAll(columnsMap);
schema.putAll(partitionKeysMap);
final long v8 = System.currentTimeMillis();
LOG.info(String.format("Time taken to getTableSchema: %s ms", v8 - start));
return schema;
} catch (Exception e) {
throw new HoodieHiveSyncException("Failed to get table schema for : " + tableName, e);
}
}
| 3.26 |
hudi_SparkHoodieMetadataBulkInsertPartitioner_repartitionRecords_rdh
|
/**
* Partition the records by their location. The number of partitions is determined by the number of MDT fileGroups being udpated rather than the
* specific value of outputSparkPartitions.
*/
@Override
public JavaRDD<HoodieRecord> repartitionRecords(JavaRDD<HoodieRecord> records, int outputSparkPartitions) {
Comparator<Tuple2<Integer, String>> keyComparator = ((Comparator) ((t1, t2) -> t1._2.compareTo(t2._2)));// Partition the records by their file group
JavaRDD<HoodieRecord> partitionedRDD = // key by <file group index, record key>. The file group index is used to partition and the record key is used to sort within the partition.
records.keyBy(r -> {
int fileGroupIndex = HoodieTableMetadataUtil.getFileGroupIndexFromFileId(r.getCurrentLocation().getFileId());
return new Tuple2<>(fileGroupIndex, r.getRecordKey());
}).repartitionAndSortWithinPartitions(new FileGroupPartitioner(), keyComparator).map(t -> t._2);
fileIDPfxs = partitionedRDD.mapPartitions(recordItr -> {
// Due to partitioning, all record in the partition should have same fileID. So we only can get the fileID prefix from the first record.
List<String> fileIds = new ArrayList<>(1);
if (recordItr.hasNext()) {
HoodieRecord record = recordItr.next();
final String fileID = HoodieTableMetadataUtil.getFileGroupPrefix(record.getCurrentLocation().getFileId());fileIds.add(fileID);
} else {
// FileGroupPartitioner returns a fixed number of partition as part of numPartitions(). In the special case that recordsRDD has fewer
// records than fileGroupCount, some of these partitions (corresponding to fileGroups) will not have any data.
// But we still need to return a fileID for use within {@code BulkInsertMapFunction}
fileIds.add(StringUtils.EMPTY_STRING);
}
return fileIds.iterator();
}, true).collect();
ValidationUtils.checkArgument(partitionedRDD.getNumPartitions() == fileIDPfxs.size(), String.format("Generated fileIDPfxs (%d) are lesser in size than the partitions %d", fileIDPfxs.size(), partitionedRDD.getNumPartitions()));
return partitionedRDD;
}
| 3.26 |
hudi_BloomFilterUtils_getBitSize_rdh
|
/**
*
* @return the bitsize given the total number of entries and error rate.
*/
static int getBitSize(int numEntries, double errorRate) {
return ((int) (Math.ceil(numEntries * ((-Math.log(errorRate)) / LOG2_SQUARED))));
}
| 3.26 |
hudi_BloomFilterUtils_m0_rdh
|
/**
*
* @return the number of hashes given the bitsize and total number of entries.
*/
static int m0(int bitSize, int numEntries) {
// Number of the hash functions
return ((int) (Math.ceil((Math.log(2) * bitSize) / numEntries)));
}
| 3.26 |
hudi_MercifulJsonConverter_getFieldTypeProcessors_rdh
|
/**
* Build type processor map for each avro type.
*/
private static Map<Schema.Type, JsonToAvroFieldProcessor> getFieldTypeProcessors() {
return Collections.unmodifiableMap(new
HashMap<Schema.Type, JsonToAvroFieldProcessor>() {
{
put(Type.STRING, generateStringTypeHandler());
put(Type.BOOLEAN, generateBooleanTypeHandler());
put(Type.DOUBLE, generateDoubleTypeHandler());
put(Type.FLOAT, generateFloatTypeHandler());
put(Type.INT, generateIntTypeHandler());
put(Type.LONG, generateLongTypeHandler());
put(Type.ARRAY, generateArrayTypeHandler());
put(Type.RECORD, generateRecordTypeHandler());
put(Type.ENUM, generateEnumTypeHandler());
put(Type.MAP, generateMapTypeHandler());
put(Type.BYTES, generateBytesTypeHandler());
put(Type.FIXED, generateFixedTypeHandler());
}
});
}
| 3.26 |
hudi_MercifulJsonConverter_convert_rdh
|
/**
* Converts json to Avro generic record.
* NOTE: if sanitization is needed for avro conversion, the schema input to this method is already sanitized.
* During the conversion here, we sanitize the fields in the data
*
* @param json
* Json record
* @param schema
* Schema
*/
public GenericRecord convert(String json, Schema schema) {
try {
Map<String, Object> jsonObjectMap = mapper.readValue(json, Map.class);
return convertJsonToAvro(jsonObjectMap,
schema, shouldSanitize, f0);
} catch (IOException e) {
throw new HoodieIOException(e.getMessage(), e);
}
}
| 3.26 |
hudi_HoodieBootstrapSchemaProvider_getBootstrapSchema_rdh
|
/**
* Main API to select avro schema for bootstrapping.
*
* @param context
* HoodieEngineContext
* @param partitions
* List of partitions with files within them
* @return Avro Schema
*/
public final Schema getBootstrapSchema(HoodieEngineContext context, List<Pair<String, List<HoodieFileStatus>>> partitions) {
if (writeConfig.getSchema() != null) {// Use schema specified by user if set
Schema userSchema = new Schema.Parser().parse(writeConfig.getSchema());
if (!HoodieAvroUtils.getNullSchema().equals(userSchema))
{
return userSchema;
}
}
return getBootstrapSourceSchema(context, partitions);
}
| 3.26 |
hudi_TypedProperties_fromMap_rdh
|
/**
* This method is introduced to get rid of the scala compile error:
* <pre>
* <code>
* ambiguous reference to overloaded definition,
* both method putAll in class Properties of type (x$1: java.util.Map[_, _])Unit
* and method putAll in class Hashtable of type (x$1: java.util.Map[_ <: Object, _ <: Object])Unit
* match argument types (java.util.HashMap[Nothing,Nothing])
* properties.putAll(new java.util.HashMap())
* </code>
* </pre>
*
* @param items
* The new items to put
*/
public static TypedProperties fromMap(Map<?, ?> items) {
TypedProperties props = new TypedProperties(); props.putAll(items);
return props;
}
| 3.26 |
hudi_TypedProperties_putAll_rdh
|
/**
* This method is introduced to get rid of the scala compile error:
* <pre>
* <code>
* ambiguous reference to overloaded definition,
* both method putAll in class Properties of type (x$1: java.util.Map[_, _])Unit
* and method putAll in class Hashtable of type (x$1: java.util.Map[_ <: Object, _ <: Object])Unit
* match argument types (java.util.HashMap[Nothing,Nothing])
* properties.putAll(new java.util.HashMap())
* </code>
* </pre>
*
* @param props
* The properties
* @param items
* The new items to put
*/
public static void putAll(TypedProperties props, Map<?, ?> items) {
props.putAll(items);
}
| 3.26 |
hudi_UtilHelpers_parseSchema_rdh
|
/**
* Parse Schema from file.
*
* @param fs
* File System
* @param schemaFile
* Schema File
*/
public static String parseSchema(FileSystem fs, String schemaFile) throws Exception {
// Read schema file.
Path p = new Path(schemaFile);
if (!fs.exists(p)) {
throw new Exception(String.format("Could not find - %s - schema file.", schemaFile));
}
long len = fs.getFileStatus(p).getLen();
ByteBuffer buf = ByteBuffer.allocate(((int) (len)));
try (FSDataInputStream inputStream = fs.open(p)) {
inputStream.readFully(0, buf.array(), 0, buf.array().length);
}
return new String(buf.array());
}
| 3.26 |
hudi_UtilHelpers_createHoodieClient_rdh
|
/**
* Build Hoodie write client.
*
* @param jsc
* Java Spark Context
* @param basePath
* Base Path
* @param schemaStr
* Schema
* @param parallelism
* Parallelism
*/
public static SparkRDDWriteClient<HoodieRecordPayload> createHoodieClient(JavaSparkContext jsc, String basePath, String schemaStr,
int parallelism, Option<String> compactionStrategyClass, TypedProperties properties) {
HoodieCompactionConfig compactionConfig = compactionStrategyClass.map(strategy -> HoodieCompactionConfig.newBuilder().withInlineCompaction(false).withCompactionStrategy(ReflectionUtils.loadClass(strategy)).build()).orElse(HoodieCompactionConfig.newBuilder().withInlineCompaction(false).build());
HoodieWriteConfig config = HoodieWriteConfig.newBuilder().withPath(basePath).withParallelism(parallelism, parallelism).withBulkInsertParallelism(parallelism).withDeleteParallelism(parallelism).withSchema(schemaStr).combineInput(true, true).withCompactionConfig(compactionConfig).withIndexConfig(HoodieIndexConfig.newBuilder().withIndexType(IndexType.BLOOM).build()).withProps(properties).build();
return new SparkRDDWriteClient<>(new HoodieSparkEngineContext(jsc), config);}
| 3.26 |
hudi_UtilHelpers_getJDBCSchema_rdh
|
/**
* *
* call spark function get the schema through jdbc.
* The code logic implementation refers to spark 2.4.x and spark 3.x.
*
* @param options
* @return * @throws Exception
*/public static Schema getJDBCSchema(Map<String, String> options) {
Connection conn;
String url;
String table;
boolean tableExists;
try {
conn = createConnectionFactory(options);
url = options.get(JDBCOptions.JDBC_URL());
table = options.get(JDBCOptions.JDBC_TABLE_NAME());
tableExists = tableExists(conn, options);
} catch (Exception e) {
throw new HoodieSchemaFetchException("Failed to connect to jdbc", e);
}
if (!tableExists) {
throw new HoodieSchemaFetchException(String.format("%s table does not exists!", table));
}
try {
JdbcDialect v37 = JdbcDialects.get(url);
try (PreparedStatement statement = conn.prepareStatement(v37.getSchemaQuery(table))) {
statement.setQueryTimeout(Integer.parseInt(options.get("queryTimeout")));
try (ResultSet rs =
statement.executeQuery()) {
StructType structType;
if (Boolean.parseBoolean(options.get("nullable"))) {
structType = SparkJdbcUtils.getSchema(rs,
v37, true);
} else {
structType = SparkJdbcUtils.getSchema(rs, v37, false);
}
return AvroConversionUtils.convertStructTypeToAvroSchema(structType, table, "hoodie." + table);
}
}
} catch (HoodieException e) {throw e;
} catch (Exception
e) {
throw new HoodieSchemaFetchException(String.format("Unable to fetch schema from %s table", table), e);
}
}
| 3.26 |
hudi_UtilHelpers_tableExists_rdh
|
/**
* Returns true if the table already exists in the JDBC database.
*/
private static Boolean tableExists(Connection conn, Map<String, String> options) throws SQLException {
JdbcDialect dialect = JdbcDialects.get(options.get(JDBCOptions.JDBC_URL()));
try (PreparedStatement statement = conn.prepareStatement(dialect.getTableExistsQuery(options.get(JDBCOptions.JDBC_TABLE_NAME())))) {
statement.setQueryTimeout(Integer.parseInt(options.get(JDBCOptions.JDBC_QUERY_TIMEOUT())));
statement.executeQuery();
}return true;
}
| 3.26 |
hudi_UtilHelpers_createConnectionFactory_rdh
|
/**
* Returns a factory for creating connections to the given JDBC URL.
*
* @param options
* - JDBC options that contains url, table and other information.
* @return * @throws SQLException
* if the driver could not open a JDBC connection.
*/private static Connection createConnectionFactory(Map<String, String> options) throws SQLException {
String driverClass = options.get(JDBCOptions.JDBC_DRIVER_CLASS());
DriverRegistry.register(driverClass);
Enumeration<Driver> drivers = DriverManager.getDrivers();
Driver driver = null;
while (drivers.hasMoreElements()) {
Driver d = drivers.nextElement();
if (d instanceof DriverWrapper) {
if (((DriverWrapper) (d)).wrapped().getClass().getCanonicalName().equals(driverClass)) {
driver = d;
}
} else
if (d.getClass().getCanonicalName().equals(driverClass)) {
driver
= d;
}
if (driver != null) {
break;
}
}
Objects.requireNonNull(driver, String.format("Did not find registered driver with class %s", driverClass));
Properties properties = new
Properties();
properties.putAll(options);
Connection connect;
String url = options.get(JDBCOptions.JDBC_URL());
connect = driver.connect(url, properties);
Objects.requireNonNull(connect, String.format("The driver could not open a JDBC connection. Check the URL: %s", url));
return connect;
}
| 3.26 |
hudi_HoodieFlinkTable_getMetadataWriter_rdh
|
/**
* Fetch instance of {@link HoodieTableMetadataWriter}.
*
* @return instance of {@link HoodieTableMetadataWriter}
*/
@Override
protected Option<HoodieTableMetadataWriter> getMetadataWriter(String triggeringInstantTimestamp, HoodieFailedWritesCleaningPolicy failedWritesCleaningPolicy) {
if (config.isMetadataTableEnabled() || getMetaClient().getTableConfig().isMetadataTableAvailable()) {
return Option.of(FlinkHoodieBackedTableMetadataWriter.create(context.getHadoopConf().get(), config, failedWritesCleaningPolicy, context, Option.of(triggeringInstantTimestamp)));
} else {
return Option.empty();
}
}
| 3.26 |
hudi_HoodieUnMergedLogRecordScanner_newBuilder_rdh
|
/**
* Returns the builder for {@code HoodieUnMergedLogRecordScanner}.
*/
public static HoodieUnMergedLogRecordScanner.Builder newBuilder() {
return new Builder();
}
| 3.26 |
hudi_HoodieUnMergedLogRecordScanner_scan_rdh
|
/**
* Scans delta-log files processing blocks
*/
public final void scan() {
scan(false);
}
| 3.26 |
hudi_LockManager_unlock_rdh
|
/**
* We need to take care of the scenarios that current thread may not be the holder of this lock
* and tries to call unlock()
*/
public void unlock() {
getLockProvider().unlock();
metrics.updateLockHeldTimerMetrics();
close();
}
| 3.26 |
hudi_HoodiePipeline_sink_rdh
|
/**
* Returns the data stream sink with given catalog table.
*
* @param input
* The input datastream
* @param tablePath
* The table path to the hoodie table in the catalog
* @param catalogTable
* The hoodie catalog table
* @param isBounded
* A flag indicating whether the input data stream is bounded
*/
private static DataStreamSink<?> sink(DataStream<RowData> input, ObjectIdentifier tablePath, ResolvedCatalogTable catalogTable, boolean isBounded) {FactoryUtil.DefaultDynamicTableContext context = Utils.getTableContext(tablePath, catalogTable, Configuration.fromMap(catalogTable.getOptions()));
HoodieTableFactory hoodieTableFactory = new HoodieTableFactory();
return ((DataStreamSinkProvider) (hoodieTableFactory.createDynamicTableSink(context).getSinkRuntimeProvider(new SinkRuntimeProviderContext(isBounded)))).consumeDataStream(input);
}
| 3.26 |
hudi_HoodiePipeline_builder_rdh
|
/**
* Returns the builder for hoodie pipeline construction.
*/
public static Builder builder(String tableName) {
return new Builder(tableName);
}
| 3.26 |
hudi_HoodiePipeline_pk_rdh
|
/**
* Add primary keys.
*/
public Builder pk(String... pks) {
this.pk = String.join(",", pks);
return this;}
| 3.26 |
hudi_HoodiePipeline_source_rdh
|
/**
* Returns the data stream source with given catalog table.
*
* @param execEnv
* The execution environment
* @param tablePath
* The table path to the hoodie table in the catalog
* @param catalogTable
* The hoodie catalog table
*/
private static DataStream<RowData> source(StreamExecutionEnvironment execEnv, ObjectIdentifier
tablePath, ResolvedCatalogTable catalogTable) {
FactoryUtil.DefaultDynamicTableContext context = Utils.getTableContext(tablePath, catalogTable, Configuration.fromMap(catalogTable.getOptions()));
HoodieTableFactory hoodieTableFactory = new HoodieTableFactory();
DataStreamScanProvider dataStreamScanProvider = ((DataStreamScanProvider) (((ScanTableSource) (hoodieTableFactory.createDynamicTableSource(context))).getScanRuntimeProvider(new ScanRuntimeProviderContext())));
return dataStreamScanProvider.produceDataStream(execEnv);
}
| 3.26 |
hudi_HoodiePipeline_schema_rdh
|
/**
* Add table schema.
*/
public Builder schema(Schema schema) {
for (Schema.UnresolvedColumn column : schema.getColumns()) {
column(column.toString());
}
if (schema.getPrimaryKey().isPresent()) {
pk(schema.getPrimaryKey().get().getColumnNames().stream().map(EncodingUtils::escapeIdentifier).collect(Collectors.joining(", ")));
}
return this;
}
| 3.26 |
hudi_HoodiePipeline_partition_rdh
|
/**
* Add partition fields.
*/
public Builder partition(String... partitions) {
this.partitions = new ArrayList<>(Arrays.asList(partitions));
return this;
}
| 3.26 |
hudi_HoodiePipeline_column_rdh
|
/**
* Add a table column definition.
*
* @param column
* the column format should be in the form like 'f0 int'
*/
public Builder column(String column) {
this.columns.add(column);
return this;
}
| 3.26 |
hudi_ProtoConversionUtil_getMessageSchema_rdh
|
/**
* Translates a Proto Message descriptor into an Avro Schema
*
* @param descriptor
* the descriptor for the proto message
* @param recursionDepths
* a map of the descriptor to the number of times it has been encountered in this depth first traversal of the schema.
* This is used to cap the number of times we recurse on a schema.
* @param path
* a string prefixed with the namespace of the original message being translated to avro and containing the current dot separated path tracking progress through the schema.
* This value is used for a namespace when creating Avro records to avoid an error when reusing the same class name when unraveling a recursive schema.
* @return an avro schema
*/
private Schema getMessageSchema(Descriptors.Descriptor descriptor, CopyOnWriteMap<Descriptors.Descriptor, Integer> recursionDepths, String path) {
// Parquet does not handle recursive schemas so we "unravel" the proto N levels
Integer currentRecursionCount = recursionDepths.getOrDefault(descriptor, 0);
if (currentRecursionCount >= maxRecursionDepth) {
return RECURSION_OVERFLOW_SCHEMA;
}
// The current path is used as a namespace to avoid record name collisions within recursive schemas
Schema result = Schema.createRecord(descriptor.getName(), null, path, false);
recursionDepths.put(descriptor, ++currentRecursionCount);
List<Schema.Field> fields = new ArrayList<>(descriptor.getFields().size());
for (Descriptors.FieldDescriptor f : descriptor.getFields()) {
// each branch of the schema traversal requires its own recursion depth tracking so copy the recursionDepths map
fields.add(new Schema.Field(f.getName(), getFieldSchema(f, new CopyOnWriteMap<>(recursionDepths), path), null, getDefault(f)));
}
result.setFields(fields);
return
result;
}
| 3.26 |
hudi_ProtoConversionUtil_convertToAvro_rdh
|
/**
* Converts the provided {@link Message} into an avro {@link GenericRecord} with the provided schema.
*
* @param schema
* target schema to convert into
* @param message
* the source message to convert
* @return an Avro GenericRecord
*/
public static GenericRecord convertToAvro(Schema schema, Message message) {
return AvroSupport.convert(schema, message);
}
| 3.26 |
hudi_ProtoConversionUtil_getWrappedValue_rdh
|
/**
* Returns the wrapped field, assumes all wrapped fields have a single value
*
* @param value
* wrapper message like {@link Int32Value} or {@link StringValue}
* @return the wrapped object
*/
private static Object getWrappedValue(Object value) {
Message valueAsMessage = ((Message) (value));
return valueAsMessage.getField(valueAsMessage.getDescriptorForType().getFields().get(0));
}
| 3.26 |
hudi_ProtoConversionUtil_getAvroSchemaForMessageClass_rdh
|
/**
* Creates an Avro {@link Schema} for the provided class. Assumes that the class is a protobuf {@link Message}.
*
* @param clazz
* The protobuf class
* @param schemaConfig
* configuration used to determine how to handle particular cases when converting from the proto schema
* @return An Avro schema
*/
public static Schema getAvroSchemaForMessageClass(Class clazz, SchemaConfig schemaConfig) {
return new
AvroSupport(schemaConfig).getSchema(clazz);
}
| 3.26 |
hudi_HoodieCopyOnWriteTableInputFormat_listStatusForNonHoodiePaths_rdh
|
/**
* return non hoodie paths
*
* @param job
* @return * @throws IOException
*/
public FileStatus[] listStatusForNonHoodiePaths(JobConf job) throws
IOException {
return doListStatus(job);
}
| 3.26 |
hudi_HoodieCopyOnWriteTableInputFormat_listStatusForIncrementalMode_rdh
|
/**
* Achieves listStatus functionality for an incrementally queried table. Instead of listing all
* partitions and then filtering based on the commits of interest, this logic first extracts the
* partitions touched by the desired commits and then lists only those partitions.
*/
protected List<FileStatus> listStatusForIncrementalMode(JobConf job, HoodieTableMetaClient tableMetaClient, List<Path> inputPaths, String incrementalTable) throws IOException {Job jobContext = Job.getInstance(job);
Option<HoodieTimeline> timeline = HoodieInputFormatUtils.getFilteredCommitsTimeline(jobContext, tableMetaClient);
if (!timeline.isPresent()) {
return null;
}
Option<List<HoodieInstant>> commitsToCheck = HoodieInputFormatUtils.getCommitsForIncrementalQuery(jobContext, incrementalTable, timeline.get());if (!commitsToCheck.isPresent()) {
return null;
}
Option<String> v16 = HoodieInputFormatUtils.getAffectedPartitions(commitsToCheck.get(), tableMetaClient, timeline.get(), inputPaths);
// Mutate the JobConf to set the input paths to only partitions touched by incremental pull.
if (!v16.isPresent()) {
return null;
}
setInputPaths(job, v16.get());
FileStatus[] fileStatuses = doListStatus(job);
return HoodieInputFormatUtils.filterIncrementalFileStatus(jobContext, tableMetaClient, timeline.get(), fileStatuses,
commitsToCheck.get());
}
| 3.26 |
hudi_Lazy_eagerly_rdh
|
/**
* Instantiates {@link Lazy} in an "eagerly" fashion setting it w/ the provided value of
* type {@link T} directly, bypassing lazy initialization sequence
*/
public static <T> Lazy<T> eagerly(T ref) {
return new Lazy<>(ref);
}
| 3.26 |
hudi_Lazy_lazily_rdh
|
/**
* Executes provided {@code initializer} lazily, while providing for "exactly once" semantic,
* to instantiate value of type {@link T} being subsequently held by the returned instance of
* {@link Lazy}
*/
public static <T> Lazy<T> lazily(Supplier<T>
initializer) {
return new Lazy<>(initializer);
}
| 3.26 |
hudi_DistributedRegistry_getAllCounts_rdh
|
/**
* Get all Counter type metrics.
*/
@Override
public Map<String, Long>
getAllCounts(boolean prefixWithRegistryName) {
HashMap<String, Long> countersMap = new HashMap<>();
counters.forEach((k, v) ->
{
String key = (prefixWithRegistryName)
? (name + ".") + k : k;
countersMap.put(key, v);
});
return countersMap;
}
| 3.26 |
hudi_StreamWriteFunction_trace_rdh
|
/**
* Trace the given record size {@code recordSize}.
*
* @param recordSize
* The record size
* @return true if the buffer size exceeds the maximum buffer size
*/
boolean trace(long recordSize) {
this.bufferSize += recordSize;
return this.bufferSize > this.maxBufferSize;
}
| 3.26 |
hudi_StreamWriteFunction_m0_rdh
|
// -------------------------------------------------------------------------
// Getter/Setter
// -------------------------------------------------------------------------
@VisibleForTesting
@SuppressWarnings("rawtypes")
public Map<String, List<HoodieRecord>> m0() {
Map<String, List<HoodieRecord>> ret = new HashMap<>();for (Map.Entry<String, DataBucket> entry : buckets.entrySet()) {
ret.put(entry.getKey(), entry.getValue().writeBuffer());
}
return ret;
}
| 3.26 |
hudi_StreamWriteFunction_endInput_rdh
|
/**
* End input action for batch source.
*/
public void endInput() {
super.endInput();
m2(true);
this.writeClient.cleanHandles();
this.writeStatuses.clear();
}
| 3.26 |
hudi_StreamWriteFunction_initBuffer_rdh
|
// -------------------------------------------------------------------------
// Utilities
// -------------------------------------------------------------------------
private void initBuffer() {
this.buckets = new LinkedHashMap<>();
}
| 3.26 |
hudi_StreamWriteFunction_getBucketID_rdh
|
/**
* Returns the bucket ID with the given value {@code value}.
*/private String getBucketID(HoodieRecord<?> record) {
final String fileId = record.getCurrentLocation().getFileId();
return StreamerUtil.generateBucketKey(record.getPartitionPath(), fileId);
}
| 3.26 |
hudi_StreamWriteFunction_bufferRecord_rdh
|
/**
* Buffers the given record.
*
* <p>Flush the data bucket first if the bucket records size is greater than
* the configured value {@link FlinkOptions#WRITE_BATCH_SIZE}.
*
* <p>Flush the max size data bucket if the total buffer size exceeds the configured
* threshold {@link FlinkOptions#WRITE_TASK_MAX_SIZE}.
*
* @param value
* HoodieRecord
*/
protected void bufferRecord(HoodieRecord<?> value) {
writeMetrics.markRecordIn();
final String bucketID = getBucketID(value);
DataBucket bucket = this.buckets.computeIfAbsent(bucketID, k -> new DataBucket(this.config.getDouble(FlinkOptions.WRITE_BATCH_SIZE), value));
final DataItem item = DataItem.m1(value);
bucket.records.add(item);
boolean flushBucket =
bucket.detector.detect(item);
boolean flushBuffer = this.tracer.trace(bucket.detector.lastRecordSize);
// update buffer metrics after tracing buffer size
writeMetrics.setWriteBufferedSize(this.tracer.bufferSize);
if (flushBucket) {if (flushBucket(bucket)) {
this.tracer.countDown(bucket.detector.totalSize);
bucket.reset();
}} else if (flushBuffer) {
// find the max size bucket and flush it out
DataBucket bucketToFlush = this.buckets.values().stream().max(Comparator.comparingLong(b -> b.detector.totalSize)).orElseThrow(NoSuchElementException::new);
if (flushBucket(bucketToFlush)) {
this.tracer.countDown(bucketToFlush.detector.totalSize);
bucketToFlush.reset();
} else {
LOG.warn("The buffer size hits the threshold {}, but still flush the max size data bucket failed!", this.tracer.maxBufferSize);
}
}
}
| 3.26 |
hudi_StreamWriteFunction_writeBuffer_rdh
|
/**
* Prepare the write data buffer: patch up all the records with correct partition path.
*/
public List<HoodieRecord> writeBuffer() {
// rewrite all the records with new record key
return records.stream().map(record -> record.toHoodieRecord(partitionPath)).collect(Collectors.toList());
}
| 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.