name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
hudi_HoodieStreamerMetrics_updateStreamerHeartbeatTimestamp_rdh
/** * Update heartbeat from deltastreamer ingestion job when active for a table. * * @param heartbeatTimestampMs * the timestamp in milliseconds at which heartbeat is emitted. */ public void updateStreamerHeartbeatTimestamp(long heartbeatTimestampMs) { if (writeConfig.isMetricsOn()) { metrics.registerGauge(getMetricsName("deltastreamer", "heartbeatTimestampMs"), heartbeatTimestampMs); } }
3.26
hudi_HoodieHBaseIndexConfig_hbaseIndexMaxQPSPerRegionServer_rdh
/** * <p> * Method to set maximum QPS allowed per Region Server. This should be same across various jobs. This is intended to * limit the aggregate QPS generated across various jobs to an HBase Region Server. * </p> * <p> * It is recommended to set this value based on your global indexing throughput needs and most importantly, how much * your HBase installation is able to tolerate without Region Servers going down. * </p> */ public HoodieHBaseIndexConfig.Builder hbaseIndexMaxQPSPerRegionServer(int maxQPSPerRegionServer) { // This should be same across various jobs hBaseIndexConfig.setValue(HoodieHBaseIndexConfig.MAX_QPS_PER_REGION_SERVER, String.valueOf(maxQPSPerRegionServer));return this; }
3.26
hudi_PartitionFilterGenerator_buildMinMaxPartitionExpression_rdh
/** * This method will extract the min value and the max value of each field, * and construct GreatThanOrEqual and LessThanOrEqual to build the expression. * * This method can reduce the Expression tree level a lot if each field has too many values. */ private static Expression buildMinMaxPartitionExpression(List<Partition> partitions, List<FieldSchema> partitionFields) { return extractFieldValues(partitions, partitionFields).stream().map(fieldWithValues -> { FieldSchema fieldSchema = fieldWithValues.getKey();if (!SUPPORT_TYPES.contains(fieldSchema.getType())) { return null; } String[] values = fieldWithValues.getValue(); if (values.length == 1) { return Predicates.eq(new NameReference(fieldSchema.getName()), buildLiteralExpression(values[0], fieldSchema.getType())); } Arrays.sort(values, new ValueComparator(fieldSchema.getType())); return Predicates.and(Predicates.gteq(new NameReference(fieldSchema.getName()), buildLiteralExpression(values[0], fieldSchema.getType())), Predicates.lteq(new NameReference(fieldSchema.getName()), buildLiteralExpression(values[values.length - 1], fieldSchema.getType()))); }).filter(Objects::nonNull).reduce(null, (result, expr) -> { if (result == null) {return expr; } else { return Predicates.and(result, expr); } }); }
3.26
hudi_PartitionFilterGenerator_buildPartitionExpression_rdh
/** * Build expression from the Partition list. Here we're trying to match all partitions. * * ex. partitionSchema(date, hour) [Partition(2022-09-01, 12), Partition(2022-09-02, 13)] => * Or(And(Equal(Attribute(date), Literal(2022-09-01)), Equal(Attribute(hour), Literal(12))), * And(Equal(Attribute(date), Literal(2022-09-02)), Equal(Attribute(hour), Literal(13)))) */ private static Expression buildPartitionExpression(List<Partition> partitions, List<FieldSchema> partitionFields) { return partitions.stream().map(partition -> { List<String> partitionValues = partition.getValues(); Expression root = null; for (int i = 0; i < partitionFields.size(); i++) { FieldSchema field = partitionFields.get(i); BinaryExpression v4 = Predicates.eq(new NameReference(field.getName()), buildLiteralExpression(partitionValues.get(i), field.getType())); if (root != null) { root = Predicates.and(root, v4); } else { root = v4; } } return root; }).reduce(null, (result, expr) -> { if (result == null) { return expr; } else { return Predicates.or(result, expr); } }); }
3.26
hudi_PartitionFilterGenerator_extractFieldValues_rdh
/** * Extract partition values from the {@param partitions}, and binding to * corresponding partition fieldSchemas. */ private static List<Pair<FieldSchema, String[]>> extractFieldValues(List<Partition> partitions, List<FieldSchema> partitionFields) { return IntStream.range(0, partitionFields.size()).mapToObj(i -> { Set<String> values = new HashSet<String>(); for (int j = 0; j < partitions.size(); j++) { values.add(partitions.get(j).getValues().get(i)); } return Pair.of(partitionFields.get(i), values.toArray(new String[0])); }).collect(Collectors.toList()); }
3.26
hudi_PartitionFilterGenerator_compare_rdh
/** * As HMS only accept DATE, INT, STRING, BIGINT to push down partition filters, here we only * do the comparison for these types. */ @Overridepublic int compare(String s1, String s2) { switch (valueType.toLowerCase(Locale.ROOT)) { case HiveSchemaUtil.INT_TYPE_NAME : int i1 = Integer.parseInt(s1); int i2 = Integer.parseInt(s2); return i1 - i2; case HiveSchemaUtil.BIGINT_TYPE_NAME : long l1 = Long.parseLong(s1); long l2 = Long.parseLong(s2); return Long.signum(l1 - l2); case HiveSchemaUtil.DATE_TYPE_NAME : case HiveSchemaUtil.STRING_TYPE_NAME : return s1.compareTo(s2); default : throw new IllegalArgumentException(String.format(UNSUPPORTED_TYPE_ERROR, valueType)); } }
3.26
hudi_HiveAvroSerializer_isNullableType_rdh
/** * Determine if an Avro schema is of type Union[T, NULL]. Avro supports nullable * types via a union of type T and null. This is a very common use case. * As such, we want to silently convert it to just T and allow the value to be null. * * When a Hive union type is used with AVRO, the schema type becomes * Union[NULL, T1, T2, ...]. The NULL in the union should be silently removed * * @return true if type represents Union[T, Null], false otherwise */ public static boolean isNullableType(Schema schema) { if (!schema.getType().equals(Type.UNION)) { return false; } List<Schema> itemSchemas = schema.getTypes(); if (itemSchemas.size() < 2) { return false; } for (Schema itemSchema : itemSchemas) { if (Type.NULL.equals(itemSchema.getType())) { return true;} } // [null, null] not allowed, so this check is ok. return false; }
3.26
hudi_HiveAvroSerializer_getOtherTypeFromNullableType_rdh
/** * If the union schema is a nullable union, get the schema for the non-nullable type. * This method does no checking that the provided Schema is nullable. If the provided * union schema is non-nullable, it simply returns the union schema */ public static Schema getOtherTypeFromNullableType(Schema unionSchema) { final List<Schema> types = unionSchema.getTypes(); if (types.size() == 2) { // most common scenario if (types.get(0).getType() == Type.NULL) { return types.get(1); } if (types.get(1).getType() == Type.NULL) { return types.get(0); } // not a nullable union return unionSchema; } final List<Schema> itemSchemas = new ArrayList<>();for (Schema itemSchema : types) { if (!Type.NULL.equals(itemSchema.getType())) { itemSchemas.add(itemSchema); } } if (itemSchemas.size() > 1) { return Schema.createUnion(itemSchemas); } else { return itemSchemas.get(0); } }
3.26
hudi_BaseHoodieClient_m0_rdh
/** * Releases any resources used by the client. */ @Override public void m0() { stopEmbeddedServerView(true); this.context.setJobStatus("", ""); this.heartbeatClient.close(); this.txnManager.close(); }
3.26
hudi_BaseHoodieClient_resolveWriteConflict_rdh
/** * Resolve write conflicts before commit. * * @param table * A hoodie table instance created after transaction starts so that the latest commits and files are captured. * @param metadata * Current committing instant's metadata * @param pendingInflightAndRequestedInstants * Pending instants on the timeline * @see {@link BaseHoodieWriteClient#preCommit} * @see {@link BaseHoodieTableServiceClient#preCommit} */ protected void resolveWriteConflict(HoodieTable table, HoodieCommitMetadata metadata, Set<String> pendingInflightAndRequestedInstants) {Timer.Context conflictResolutionTimer = metrics.getConflictResolutionCtx(); try { TransactionUtils.resolveWriteConflictIfAny(table, this.txnManager.getCurrentTransactionOwner(), Option.of(metadata), config, txnManager.getLastCompletedTransactionOwner(), false, pendingInflightAndRequestedInstants);metrics.emitConflictResolutionSuccessful();} catch (HoodieWriteConflictException e) { metrics.emitConflictResolutionFailed(); throw e; } finally { if (conflictResolutionTimer != null) { conflictResolutionTimer.stop(); } } }
3.26
hudi_BaseHoodieClient_createNewInstantTime_rdh
/** * Returns next instant time in the correct format. * * @param shouldLock * Whether to lock the context to get the instant time. */ public String createNewInstantTime(boolean shouldLock) { return HoodieActiveTimeline.createNewInstantTime(shouldLock, timeGenerator); }
3.26
hudi_BaseHoodieClient_finalizeWrite_rdh
/** * Finalize Write operation. * * @param table * HoodieTable * @param instantTime * Instant Time * @param stats * Hoodie Write Stat */ protected void finalizeWrite(HoodieTable table, String instantTime, List<HoodieWriteStat> stats) { try { final Timer.Context finalizeCtx = metrics.getFinalizeCtx(); table.finalizeWrite(context, instantTime, stats); if (finalizeCtx != null) { Option<Long> durationInMs = Option.of(metrics.getDurationInMs(finalizeCtx.stop())); durationInMs.ifPresent(duration -> { LOG.info("Finalize write elapsed time (milliseconds): " + duration); metrics.updateFinalizeWriteMetrics(duration, stats.size()); }); } } catch (HoodieIOException ioe) { throw new HoodieCommitException(("Failed to complete commit " + instantTime) + " due to finalize errors.", ioe); } }
3.26
hudi_RelationalDBBasedStorage_saveInstantMetadata_rdh
// todo: check correctness @Override public void saveInstantMetadata(long tableId, THoodieInstant instant, byte[] metadata) throws MetaserverStorageException { InstantBean instantBean = new InstantBean(tableId, instant); Map<String, Object> params = new HashMap<>(); params.put("instant", instantBean); params.put("metadata", metadata); // todo: array bytes to longblob timelineDao.insertBySql("insertInstantMetadata", params); }
3.26
hudi_BootstrapExecutorUtils_syncHive_rdh
/** * Sync to Hive. */private void syncHive() { if (cfg.enableHiveSync) { TypedProperties metaProps = new TypedProperties(); metaProps.putAll(props); metaProps.put(META_SYNC_DATABASE_NAME.key(), cfg.database); metaProps.put(META_SYNC_TABLE_NAME.key(), cfg.tableName); metaProps.put(META_SYNC_BASE_PATH.key(), cfg.basePath); metaProps.put(META_SYNC_BASE_FILE_FORMAT.key(), cfg.baseFileFormat); if (props.getBoolean(HIVE_SYNC_BUCKET_SYNC.key(), HIVE_SYNC_BUCKET_SYNC.defaultValue())) { metaProps.put(HIVE_SYNC_BUCKET_SYNC_SPEC.key(), HiveSyncConfig.getBucketSpec(props.getString(BUCKET_INDEX_HASH_FIELD.key()), props.getInteger(BUCKET_INDEX_NUM_BUCKETS.key()))); } try (HiveSyncTool hiveSyncTool = new HiveSyncTool(metaProps, configuration)) { hiveSyncTool.syncHoodieTable(); } } }
3.26
hudi_BootstrapExecutorUtils_m0_rdh
/** * Executes Bootstrap. */ public void m0() throws IOException { initializeTable();try (SparkRDDWriteClient bootstrapClient = new SparkRDDWriteClient(new HoodieSparkEngineContext(jssc), bootstrapConfig)) { HashMap<String, String> checkpointCommitMetadata = new HashMap<>(); checkpointCommitMetadata.put(CHECKPOINT_KEY, Config.checkpoint);bootstrapClient.bootstrap(Option.of(checkpointCommitMetadata)); syncHive(); } }
3.26
hudi_FlinkHoodieIndexFactory_createIndex_rdh
/** * A factory to generate Flink {@link HoodieIndex}. */ public final class FlinkHoodieIndexFactory {public static HoodieIndex createIndex(HoodieFlinkEngineContext context, HoodieWriteConfig config) { // first use index class config to create index. if (!StringUtils.isNullOrEmpty(config.getIndexClass())) { return HoodieIndexUtils.createUserDefinedIndex(config); } switch (config.getIndexType()) { case FLINK_STATE : // Flink state index stores the index mappings with a state-backend, // instantiates an in-memory HoodieIndex component as a placeholder. case INMEMORY : return new FlinkInMemoryStateIndex(context, config); case BLOOM : return new HoodieBloomIndex(config, ListBasedHoodieBloomIndexHelper.getInstance()); case GLOBAL_BLOOM : return new HoodieGlobalBloomIndex(config, ListBasedHoodieBloomIndexHelper.getInstance()); case SIMPLE : return new HoodieSimpleIndex(config, Option.empty()); case GLOBAL_SIMPLE : return new HoodieGlobalSimpleIndex(config, Option.empty()); case BUCKET : switch (config.getBucketIndexEngineType()) { case SIMPLE : return new HoodieSimpleBucketIndex(config); case CONSISTENT_HASHING : return new HoodieConsistentBucketIndex(config); default : throw new HoodieIndexException("Unknown bucket index engine type: " + config.getBucketIndexEngineType()); } default : throw new HoodieIndexException("Unsupported index type " + config.getIndexType()); } } }
3.26
hudi_HiveIncrPullSource_findCommitToPull_rdh
/** * Finds the first commit from source, greater than the target's last commit, and reads it out. */ private Option<String> findCommitToPull(Option<String> latestTargetCommit) throws IOException {LOG.info("Looking for commits "); FileStatus[] commitTimePaths = fs.listStatus(new Path(incrPullRootPath)); List<String> v1 = new ArrayList<>(commitTimePaths.length); for (FileStatus commitTimePath : commitTimePaths) { String[] splits = commitTimePath.getPath().toString().split("/"); v1.add(splits[splits.length - 1]); } Collections.sort(v1); LOG.info("Retrieved commit times " + v1); if (!latestTargetCommit.isPresent()) { // start from the beginning return Option.of(v1.get(0)); } for (String instantTime : v1) { // TODO(vc): Add an option to delete consumed commits if (instantTime.compareTo(latestTargetCommit.get()) > 0) { return Option.of(instantTime); } } return Option.empty(); }
3.26
hudi_UpgradeDowngrade_run_rdh
/** * Perform Upgrade or Downgrade steps if required and updated table version if need be. * <p> * Starting from version 0.6.0, this upgrade/downgrade step will be added in all write paths. * <p> * Essentially, if a dataset was created using an previous table version in an older release, * and Hoodie version was upgraded to a new release with new table version supported, * Hoodie table version gets bumped to the new version and there are some upgrade steps need * to be executed before doing any writes. * <p> * Similarly, if a dataset was created using an newer table version in an newer release, * and then hoodie was downgraded to an older release or to older Hoodie table version, * then some downgrade steps need to be executed before proceeding w/ any writes. * <p> * Below shows the table version corresponding to the Hudi release: * Hudi release -> table version * pre 0.6.0 -> v0 * 0.6.0 to 0.8.0 -> v1 * 0.9.0 -> v2 * 0.10.0 -> v3 * 0.11.0 -> v4 * 0.12.0 to 0.13.0 -> v5 * 0.14.0 to current -> v6 * <p> * On a high level, these are the steps performed * <p> * Step1 : Understand current hoodie table version and table version from hoodie.properties file * Step2 : Delete any left over .updated from previous upgrade/downgrade * Step3 : If version are different, perform upgrade/downgrade. * Step4 : Copy hoodie.properties -> hoodie.properties.updated with the version updated * Step6 : Rename hoodie.properties.updated to hoodie.properties * </p> * * @param toVersion * version to which upgrade or downgrade has to be done. * @param instantTime * current instant time that should not be touched. */ public void run(HoodieTableVersion toVersion, String instantTime) { // Change metadata table version automatically if (toVersion.versionCode() >= HoodieTableVersion.FOUR.versionCode()) { String metadataTablePath = HoodieTableMetadata.getMetadataTableBasePath(metaClient.getBasePathV2().toString()); try { if (metaClient.getFs().exists(new Path(metadataTablePath))) { HoodieTableMetaClient mdtMetaClient = HoodieTableMetaClient.builder().setConf(metaClient.getHadoopConf()).setBasePath(metadataTablePath).build(); HoodieWriteConfig mdtWriteConfig = HoodieMetadataWriteUtils.createMetadataWriteConfig(config, HoodieFailedWritesCleaningPolicy.EAGER); new UpgradeDowngrade(mdtMetaClient, mdtWriteConfig, context, upgradeDowngradeHelper).run(toVersion, instantTime); } } catch (Exception e) { LOG.warn(("Unable to upgrade or downgrade the metadata table to version " + toVersion) + ", ignoring the error and continue.", e); } } // Fetch version from property file and current version HoodieTableVersion fromVersion = metaClient.getTableConfig().getTableVersion(); if (!needsUpgradeOrDowngrade(toVersion)) { return; } // Perform the actual upgrade/downgrade; this has to be idempotent, for now. LOG.info((("Attempting to move table from version " + fromVersion) + " to ") + toVersion); Map<ConfigProperty, String> tableProps = new Hashtable<>(); if (fromVersion.versionCode() < toVersion.versionCode()) { // upgrade while (fromVersion.versionCode() < toVersion.versionCode()) { HoodieTableVersion nextVersion = HoodieTableVersion.versionFromCode(fromVersion.versionCode() + 1); tableProps.putAll(upgrade(fromVersion, nextVersion, instantTime)); fromVersion = nextVersion; } } else { // downgrade while (fromVersion.versionCode() > toVersion.versionCode()) { HoodieTableVersion v7 = HoodieTableVersion.versionFromCode(fromVersion.versionCode() - 1); tableProps.putAll(downgrade(fromVersion, v7, instantTime)); fromVersion = v7; } } // Reload the meta client to get the latest table config (which could have been updated due to metadata table) metaClient = HoodieTableMetaClient.reload(metaClient); // Write out the current version in hoodie.properties.updated file for (Map.Entry<ConfigProperty, String> entry : tableProps.entrySet()) { metaClient.getTableConfig().setValue(entry.getKey(), entry.getValue()); } metaClient.getTableConfig().setTableVersion(toVersion); HoodieTableConfig.update(metaClient.getFs(), new Path(metaClient.getMetaPath()), metaClient.getTableConfig().getProps()); }
3.26
hudi_RowDataKeyGens_hasRecordKey_rdh
/** * Checks whether user provides any record key. */ private static boolean hasRecordKey(String recordKeys, List<String> fieldNames) { return (recordKeys.split(",").length != 1) || fieldNames.contains(recordKeys); }
3.26
hudi_RowDataKeyGens_instance_rdh
/** * Factory class for all kinds of {@link RowDataKeyGen}. */public class RowDataKeyGens { /** * Creates a {@link RowDataKeyGen} with given configuration. */ public static RowDataKeyGen instance(Configuration conf, RowType rowType, int taskId, String instantTime) { String recordKeys = conf.getString(FlinkOptions.RECORD_KEY_FIELD); if (hasRecordKey(recordKeys, rowType.getFieldNames())) { return RowDataKeyGen.instance(conf, rowType); } else { return AutoRowDataKeyGen.instance(conf, rowType, taskId, instantTime); } }
3.26
hudi_SparkRecordMergingUtils_getCachedFieldNameToIdMapping_rdh
/** * * @param avroSchema * Avro schema. * @return The field name to ID mapping. */ public static Map<String, Integer> getCachedFieldNameToIdMapping(Schema avroSchema) { return f0.computeIfAbsent(avroSchema, schema -> { StructType structType = HoodieInternalRowUtils.getCachedSchema(schema); Map<String, Integer> schemaFieldIdMapping = new HashMap<>(); int fieldId = 0; for (StructField field : structType.fields()) { schemaFieldIdMapping.put(field.name(), fieldId); fieldId++;} return schemaFieldIdMapping; }); } /** * Merges the two schemas so the merged schema contains all the fields from the two schemas, * with the same ordering of fields based on the provided reader schema. * * @param oldSchema * Old schema. * @param newSchema * New schema. * @param readerSchema * Reader schema containing all the fields to read. * @return The ID to {@link StructField} instance mapping of the merged schema, and the {@link StructType}
3.26
hudi_SparkRecordMergingUtils_mergePartialRecords_rdh
/** * Merges records which can contain partial updates. * <p> * For example, the reader schema is * {[ * {"name":"id", "type":"string"}, * {"name":"ts", "type":"long"}, * {"name":"name", "type":"string"}, * {"name":"price", "type":"double"}, * {"name":"tags", "type":"string"} * ]} * The older and newer records can be (omitting Hudi meta fields): * <p> * (1) older (complete record update): * id | ts | name | price | tags * 1 | 10 | apple | 2.3 | fruit * <p> * newer (partial record update): * ts | price * 16 | 2.8 * <p> * The merging result is (updated values from newer replaces the ones in the older): * <p> * id | ts | name | price | tags * 1 | 16 | apple | 2.8 | fruit * <p> * (2) older (partial record update): * ts | price * 10 | 2.8 * <p> * newer (partial record update): * ts | tag * 16 | fruit,juicy * <p> * The merging result is (two partial updates are merged together, and values of overlapped * fields come from the newer): * <p> * ts | price | tags * 16 | 2.8 | fruit,juicy * * @param older * Older {@link HoodieSparkRecord}. * @param oldSchema * Schema of the older record. * @param newer * Newer {@link HoodieSparkRecord}. * @param newSchema * Schema of the newer record. * @param readerSchema * Reader schema containing all the fields to read. This is used to maintain * the ordering of the fields of the merged record. * @param props * Configuration in {@link TypedProperties}. * @return The merged record and schema. */ public static Pair<HoodieRecord, Schema> mergePartialRecords(HoodieSparkRecord older, Schema oldSchema, HoodieSparkRecord newer, Schema newSchema, Schema readerSchema, TypedProperties props) { // The merged schema contains fields that only appear in either older and/or newer record Pair<Map<Integer, StructField>, Pair<StructType, Schema>> mergedSchemaPair = getCachedMergedSchema(oldSchema, newSchema, readerSchema); boolean isNewerPartial = isPartial(newSchema, mergedSchemaPair.getRight().getRight()); if (isNewerPartial) { InternalRow oldRow = older.getData();InternalRow newPartialRow = newer.getData(); Map<Integer, StructField> mergedIdToFieldMapping = mergedSchemaPair.getLeft(); Map<String, Integer> oldNameToIdMapping = getCachedFieldNameToIdMapping(oldSchema);Map<String, Integer> newPartialNameToIdMapping = getCachedFieldNameToIdMapping(newSchema); List<Object> values = new ArrayList<>(mergedIdToFieldMapping.size()); for (int fieldId = 0; fieldId < mergedIdToFieldMapping.size(); fieldId++) { StructField structField = mergedIdToFieldMapping.get(fieldId);Integer ordInPartialUpdate = newPartialNameToIdMapping.get(structField.name()); if (ordInPartialUpdate != null) { // The field exists in the newer record; picks the value from newer record values.add(newPartialRow.get(ordInPartialUpdate, structField.dataType())); } else { // The field does not exist in the newer record; picks the value from older record values.add(oldRow.get(oldNameToIdMapping.get(structField.name()), structField.dataType())); } } InternalRow mergedRow = new GenericInternalRow(values.toArray()); HoodieSparkRecord mergedSparkRecord = new HoodieSparkRecord(mergedRow, mergedSchemaPair.getRight().getLeft()); return Pair.of(mergedSparkRecord, mergedSchemaPair.getRight().getRight()); } else {return Pair.of(newer, newSchema); } }
3.26
hudi_SparkRecordMergingUtils_getCachedFieldIdToFieldMapping_rdh
/** * * @param avroSchema * Avro schema. * @return The field ID to {@link StructField} instance mapping. */ public static Map<Integer, StructField> getCachedFieldIdToFieldMapping(Schema avroSchema) { return FIELD_ID_TO_FIELD_MAPPING_CACHE.computeIfAbsent(avroSchema, schema -> { StructType structType = HoodieInternalRowUtils.getCachedSchema(schema); Map<Integer, StructField> schemaFieldIdMapping = new HashMap<>(); int fieldId = 0; for (StructField field : structType.fields()) { schemaFieldIdMapping.put(fieldId, field); fieldId++; } return schemaFieldIdMapping; }); }
3.26
hudi_SparkRecordMergingUtils_isPartial_rdh
/** * * @param schema * Avro schema to check. * @param mergedSchema * The merged schema for the merged record. * @return whether the Avro schema is partial compared to the merged schema. */ public static boolean isPartial(Schema schema, Schema mergedSchema) { return !schema.equals(mergedSchema); }
3.26
hudi_HoodieAdbJdbcClient_updateTableDefinition_rdh
/** * TODO align with {@link org.apache.hudi.sync.common.HoodieMetaSyncOperations#updateTableSchema} */ public void updateTableDefinition(String tableName, SchemaDifference schemaDiff) { LOG.info("Adding columns for table:{}", tableName); schemaDiff.getAddColumnTypes().forEach((columnName, columnType) -> executeAdbSql(constructAddColumnSql(tableName, columnName, columnType))); LOG.info("Updating columns' definition for table:{}", tableName); schemaDiff.getUpdateColumnTypes().forEach((columnName, columnType) -> executeAdbSql(constructChangeColumnSql(tableName, columnName, columnType))); }
3.26
hudi_HoodieAdbJdbcClient_scanTablePartitions_rdh
/** * TODO migrate to implementation of {@link #getAllPartitions(String)} */public Map<List<String>, String> scanTablePartitions(String tableName) { String sql = constructShowPartitionSql(tableName); Function<ResultSet, Map<List<String>, String>> transform = resultSet -> { Map<List<String>, String> partitions = new HashMap<>(); try { while (resultSet.next()) { if (resultSet.getMetaData().getColumnCount() > 0) { String str = resultSet.getString(1); if (!StringUtils.isNullOrEmpty(str)) { List<String> values = partitionValueExtractor.extractPartitionValuesInPath(str); Path storagePartitionPath = FSUtils.getPartitionPath(config.getString(META_SYNC_BASE_PATH), String.join("/", values));String fullStoragePartitionPath = Path.getPathWithoutSchemeAndAuthority(storagePartitionPath).toUri().getPath(); partitions.put(values, fullStoragePartitionPath); } } } } catch (Exception e) { throw new HoodieException("Fail to execute sql:" + sql, e); } return partitions; }; return executeQuerySQL(sql, transform); }
3.26
hudi_HoodieAdbJdbcClient_getPartitionEvents_rdh
/** * TODO align with {@link HoodieSyncClient#getPartitionEvents} */ public List<PartitionEvent> getPartitionEvents(Map<List<String>, String> tablePartitions, List<String> partitionStoragePartitions) { Map<String, String> paths = new HashMap<>(); for (Map.Entry<List<String>, String> entry : tablePartitions.entrySet()) { List<String> partitionValues = entry.getKey(); String fullTablePartitionPath = entry.getValue(); paths.put(String.join(", ", partitionValues), fullTablePartitionPath); } List<PartitionEvent> events = new ArrayList<>(); for (String storagePartition : partitionStoragePartitions) { Path storagePartitionPath = FSUtils.getPartitionPath(config.getString(META_SYNC_BASE_PATH), storagePartition); String fullStoragePartitionPath = Path.getPathWithoutSchemeAndAuthority(storagePartitionPath).toUri().getPath(); // Check if the partition values or if hdfs path is the same List<String> storagePartitionValues = partitionValueExtractor.extractPartitionValuesInPath(storagePartition); if (config.getBoolean(ADB_SYNC_USE_HIVE_STYLE_PARTITIONING)) { String partition = String.join("/", storagePartitionValues); storagePartitionPath = FSUtils.getPartitionPath(config.getString(META_SYNC_BASE_PATH), partition); fullStoragePartitionPath = Path.getPathWithoutSchemeAndAuthority(storagePartitionPath).toUri().getPath(); } if (!storagePartitionValues.isEmpty()) { String storageValue = String.join(", ", storagePartitionValues); if (!paths.containsKey(storageValue)) { events.add(PartitionEvent.newPartitionAddEvent(storagePartition)); } else if (!paths.get(storageValue).equals(fullStoragePartitionPath)) { events.add(PartitionEvent.newPartitionUpdateEvent(storagePartition)); } } } return events; }
3.26
hudi_HoodieAdbJdbcClient_getPartitionClause_rdh
/** * Generate Hive Partition from partition values. * * @param partition * Partition path * @return partition clause */ private String getPartitionClause(String partition) { List<String> partitionValues = partitionValueExtractor.extractPartitionValuesInPath(partition); ValidationUtils.checkArgument(config.getSplitStrings(META_SYNC_PARTITION_FIELDS).size() == partitionValues.size(), ((("Partition key parts " + config.getSplitStrings(META_SYNC_PARTITION_FIELDS)) + " does not match with partition values ") + partitionValues) + ". Check partition strategy. "); List<String> partBuilder = new ArrayList<>(); for (int i = 0; i < config.getSplitStrings(META_SYNC_PARTITION_FIELDS).size(); i++) { partBuilder.add(((config.getSplitStrings(META_SYNC_PARTITION_FIELDS).get(i) + "='") + partitionValues.get(i)) + "'"); } return String.join(",", partBuilder); }
3.26
hudi_HoodieListData_m0_rdh
/** * Creates instance of {@link HoodieListData} bearing *eager* execution semantic * * @param listData * a {@link List} of objects in type T * @param <T> * type of object * @return a new instance containing the {@link List<T>} reference */ public static <T> HoodieListData<T> m0(List<T> listData) { return new HoodieListData<>(listData, false); }
3.26
hudi_HoodieListData_lazy_rdh
/** * Creates instance of {@link HoodieListData} bearing *lazy* execution semantic * * @param listData * a {@link List} of objects in type T * @param <T> * type of object * @return a new instance containing the {@link List<T>} reference */ public static <T> HoodieListData<T> lazy(List<T> listData) { return new HoodieListData<>(listData, true); }
3.26
hudi_CLIUtils_getTimelineInRange_rdh
/** * Gets a {@link HoodieDefaultTimeline} instance containing the instants in the specified range. * * @param startTs * Start instant time. * @param endTs * End instant time. * @param includeArchivedTimeline * Whether to include intants from the archived timeline. * @return a {@link HoodieDefaultTimeline} instance containing the instants in the specified range. */ public static HoodieDefaultTimeline getTimelineInRange(String startTs, String endTs, boolean includeArchivedTimeline) { if (isNullOrEmpty(startTs)) { startTs = getTimeDaysAgo(10); } if (isNullOrEmpty(endTs)) { endTs = getTimeDaysAgo(1); } checkArgument(nonEmpty(startTs), "startTs is null or empty"); checkArgument(nonEmpty(endTs), "endTs is null or empty"); HoodieTableMetaClient metaClient = HoodieCLI.getTableMetaClient();HoodieActiveTimeline activeTimeline = metaClient.getActiveTimeline(); if (includeArchivedTimeline) { HoodieArchivedTimeline archivedTimeline = metaClient.getArchivedTimeline(); archivedTimeline.loadInstantDetailsInMemory(startTs, endTs); return archivedTimeline.findInstantsInRange(startTs, endTs).mergeTimeline(activeTimeline); } return activeTimeline; }
3.26
hudi_HoodieLazyInsertIterable_getTransformer_rdh
/** * Transformer function to help transform a HoodieRecord. This transformer is used by BufferedIterator to offload some * expensive operations of transformation to the reader thread. */ public <T> Function<HoodieRecord<T>, HoodieInsertValueGenResult<HoodieRecord>> getTransformer(Schema schema, HoodieWriteConfig writeConfig) { return getTransformerInternal(schema, writeConfig); }
3.26
hudi_TableSizeStats_readConfigFromFileSystem_rdh
/** * Reads config from the file system. * * @param jsc * {@link JavaSparkContext} instance. * @param cfg * {@link Config} instance. * @return the {@link TypedProperties} instance. */ private TypedProperties readConfigFromFileSystem(JavaSparkContext jsc, Config cfg) { return UtilHelpers.readConfig(jsc.hadoopConfiguration(), new Path(cfg.propsFilePath), cfg.configs).getProps(true); }
3.26
hudi_BulkInsertWriteFunction_endInput_rdh
/** * End input action for batch source. */ public void endInput() { initWriterHelperIfNeeded(); final List<WriteStatus> writeStatus = this.writerHelper.getWriteStatuses(this.taskID); final WriteMetadataEvent event = WriteMetadataEvent.builder().taskID(taskID).instantTime(this.writerHelper.getInstantTime()).writeStatus(writeStatus).lastBatch(true).endInput(true).build(); this.eventGateway.sendEventToCoordinator(event); }
3.26
hudi_BulkInsertWriteFunction_setOperatorEventGateway_rdh
// ------------------------------------------------------------------------- // Getter/Setter // ------------------------------------------------------------------------- public void setOperatorEventGateway(OperatorEventGateway operatorEventGateway) { this.eventGateway = operatorEventGateway; }
3.26
hudi_BulkInsertWriteFunction_initWriterHelperIfNeeded_rdh
// ------------------------------------------------------------------------- // Utilities // ------------------------------------------------------------------------- private void initWriterHelperIfNeeded() { if (writerHelper == null) { String instant = instantToWrite(); this.writerHelper = WriterHelpers.getWriterHelper(this.config, this.writeClient.getHoodieTable(), this.writeClient.getConfig(), instant, this.taskID, getRuntimeContext().getNumberOfParallelSubtasks(), getRuntimeContext().getAttemptNumber(), this.rowType); } }
3.26
hudi_HoodieGlobalSimpleIndex_tagLocationInternal_rdh
/** * Tags records location for incoming records. * * @param inputRecords * {@link HoodieData} of incoming records * @param context * instance of {@link HoodieEngineContext} to use * @param hoodieTable * instance of {@link HoodieTable} to use * @return {@link HoodieData} of records with record locations set */ @Override protected <R> HoodieData<HoodieRecord<R>> tagLocationInternal(HoodieData<HoodieRecord<R>> inputRecords, HoodieEngineContext context, HoodieTable hoodieTable) { List<Pair<String, HoodieBaseFile>> latestBaseFiles = m0(context, hoodieTable); HoodiePairData<String, HoodieRecordGlobalLocation> allKeysAndLocations = fetchRecordGlobalLocations(context, hoodieTable, config.getGlobalSimpleIndexParallelism(), latestBaseFiles); boolean mayContainDuplicateLookup = hoodieTable.getMetaClient().getTableType() == MERGE_ON_READ; boolean shouldUpdatePartitionPath = config.getGlobalSimpleIndexUpdatePartitionPath() && hoodieTable.isPartitioned(); return tagGlobalLocationBackToRecords(inputRecords, allKeysAndLocations, mayContainDuplicateLookup, shouldUpdatePartitionPath, config, hoodieTable); }
3.26
hudi_HoodieGlobalSimpleIndex_m0_rdh
/** * Load all files for all partitions as <Partition, filename> pair data. */ private List<Pair<String, HoodieBaseFile>> m0(final HoodieEngineContext context, final HoodieTable hoodieTable) { HoodieTableMetaClient metaClient = hoodieTable.getMetaClient(); List<String> allPartitionPaths = FSUtils.getAllPartitionPaths(context, config.getMetadataConfig(), metaClient.getBasePath()); // Obtain the latest data files from all the partitions. return getLatestBaseFilesForAllPartitions(allPartitionPaths, context, hoodieTable); }
3.26
hudi_KafkaConnectUtils_getDefaultHadoopConf_rdh
/** * Returns the default Hadoop Configuration. * * @return */ public static Configuration getDefaultHadoopConf(KafkaConnectConfigs connectConfigs) { Configuration v9 = new Configuration(); // add hadoop config files if ((!StringUtils.isNullOrEmpty(connectConfigs.getHadoopConfDir())) || (!StringUtils.isNullOrEmpty(connectConfigs.getHadoopConfHome()))) { try { List<Path> configFiles = getHadoopConfigFiles(connectConfigs.getHadoopConfDir(), connectConfigs.getHadoopConfHome()); configFiles.forEach(f -> v9.addResource(new Path(f.toAbsolutePath().toUri()))); } catch (Exception e) { throw new HoodieException("Failed to read hadoop configuration!", e); } } else { DEFAULT_HADOOP_CONF_FILES.forEach(f -> v9.addResource(new Path(f.toAbsolutePath().toUri()))); } connectConfigs.getProps().keySet().stream().filter(prop -> { // In order to prevent printing unnecessary warn logs, here filter out the hoodie // configuration items before passing to hadoop/hive configs return !prop.toString().startsWith(HOODIE_CONF_PREFIX); }).forEach(prop -> { v9.set(prop.toString(), connectConfigs.getProps().get(prop.toString()).toString()); }); return v9; }
3.26
hudi_KafkaConnectUtils_getRecordKeyColumns_rdh
/** * Extract the record fields. * * @param keyGenerator * key generator Instance of the keygenerator. * @return Returns the record key columns separated by comma. */ public static String getRecordKeyColumns(KeyGenerator keyGenerator) { return String.join(",", keyGenerator.getRecordKeyFieldNames()); } /** * Extract partition columns directly if an instance of class {@link BaseKeyGenerator}
3.26
hudi_KafkaConnectUtils_getWriteStatuses_rdh
/** * Unwrap the Hudi {@link WriteStatus} from the received Protobuf message. * * @param participantInfo * The {@link ControlMessage.ParticipantInfo} that contains the * underlying {@link WriteStatus} sent by the participants. * @return the list of {@link WriteStatus} returned by Hudi on a write transaction. */ public static List<WriteStatus> getWriteStatuses(ControlMessage.ParticipantInfo participantInfo) { ControlMessage.ConnectWriteStatus connectWriteStatus = participantInfo.getWriteStatus(); return SerializationUtils.deserialize(connectWriteStatus.getSerializedWriteStatus().toByteArray()); }
3.26
hudi_KafkaConnectUtils_getHadoopConfigFiles_rdh
/** * Get hadoop config files by HADOOP_CONF_DIR or HADOOP_HOME */ public static List<Path> getHadoopConfigFiles(String hadoopConfigPath, String hadoopHomePath) throws IOException { List<Path> hadoopConfigFiles = new ArrayList<>(); if (!StringUtils.isNullOrEmpty(hadoopConfigPath)) { hadoopConfigFiles.addAll(walkTreeForXml(Paths.get(hadoopConfigPath))); } if (hadoopConfigFiles.isEmpty() && (!StringUtils.isNullOrEmpty(hadoopHomePath))) { hadoopConfigFiles.addAll(walkTreeForXml(Paths.get(hadoopHomePath, "etc", "hadoop"))); } return hadoopConfigFiles; }
3.26
hudi_KafkaConnectUtils_getCommitMetadataForLatestInstant_rdh
/** * Get the Metadata from the latest commit file. * * @param metaClient * The {@link HoodieTableMetaClient} to get access to the meta data. * @return An Optional {@link HoodieCommitMetadata} containing the meta data from the latest commit file. */ public static Option<HoodieCommitMetadata> getCommitMetadataForLatestInstant(HoodieTableMetaClient metaClient) { HoodieTimeline timeline = metaClient.getActiveTimeline().getCommitsTimeline().filterCompletedInstants().filter(instant -> ((metaClient.getTableType() == HoodieTableType.COPY_ON_WRITE) && instant.getAction().equals(HoodieActiveTimeline.COMMIT_ACTION)) || ((metaClient.getTableType() == HoodieTableType.MERGE_ON_READ) && instant.getAction().equals(HoodieActiveTimeline.DELTA_COMMIT_ACTION))); Option<HoodieInstant> v12 = timeline.lastInstant(); if (v12.isPresent()) { try { byte[] v13 = timeline.getInstantDetails(v12.get()).get();return Option.of(HoodieCommitMetadata.fromBytes(v13, HoodieCommitMetadata.class)); } catch (Exception e) {throw new HoodieException("Failed to read schema from commit metadata", e); } } else { return Option.empty(); } }
3.26
hudi_Tuple3_of_rdh
/** * Creates a new tuple and assigns the given values to the tuple's fields. This is more * convenient than using the constructor, because the compiler can infer the generic type * arguments implicitly. For example: {@code Tuple3.of(n, x, s)} instead of {@code new * Tuple3<Integer, Double, String>(n, x, s)} */ public static <T0, T1, T2> Tuple3<T0, T1, T2> of(T0 f0, T1 f1, T2 f2) { return new Tuple3<>(f0, f1, f2); }
3.26
hudi_HoodieFlinkClusteringJob_start_rdh
/** * Main method to start clustering service. */ public void start(boolean serviceMode) throws Exception { if (serviceMode) { clusteringScheduleService.start(null); try { clusteringScheduleService.waitForShutdown(); } catch (Exception e) { throw new HoodieException(e.getMessage(), e); } finally { LOG.info("Shut down hoodie flink clustering"); } } else { LOG.info("Hoodie Flink Clustering running only single round"); try { clusteringScheduleService.cluster(); } catch (ApplicationExecutionException aee) { if (aee.getMessage().contains(NO_EXECUTE_KEYWORD)) { LOG.info("Clustering is not performed"); } else { LOG.error("Got error trying to perform clustering. Shutting down", aee); throw aee; } } catch (Exception e) { LOG.error("Got error running delta sync once. Shutting down", e); throw e; } finally { LOG.info("Shut down hoodie flink clustering"); } }}
3.26
hudi_HoodieFlinkClusteringJob_shutdownAsyncService_rdh
/** * Shutdown async services like compaction/clustering as DeltaSync is shutdown. */ public void shutdownAsyncService(boolean error) { LOG.info("Gracefully shutting down clustering job. Error ?" + error); executor.shutdown();writeClient.close(); }
3.26
hudi_HoodieFlinkClusteringJob_cluster_rdh
/** * Follows the same execution methodology of HoodieFlinkCompactor, where only one clustering job is allowed to be * executed at any point in time. * <p> * If there is an inflight clustering job, it will be rolled back and re-attempted. * <p> * A clustering plan will be generated if `schedule` is true. * * @throws Exception * @see HoodieFlinkCompactor */ private void cluster() throws Exception { table.getMetaClient().reloadActiveTimeline(); if (cfg.schedule) { // create a clustering plan on the timeline ClusteringUtil.validateClusteringScheduling(conf); String clusteringInstantTime = (cfg.clusteringInstantTime != null) ? cfg.clusteringInstantTime : writeClient.createNewInstantTime(); LOG.info(("Creating a clustering plan for instant [" + clusteringInstantTime) + "]"); boolean scheduled = writeClient.scheduleClusteringAtInstant(clusteringInstantTime, Option.empty()); if (!scheduled) { // do nothing. LOG.info("No clustering plan for this job"); return; } table.getMetaClient().reloadActiveTimeline(); } // fetch the instant based on the configured execution sequence List<HoodieInstant> instants = ClusteringUtils.getPendingClusteringInstantTimes(table.getMetaClient()); if (instants.isEmpty()) { // do nothing. LOG.info("No clustering plan scheduled, turns on the clustering plan schedule with --schedule option"); return; } final HoodieInstant clusteringInstant; if (cfg.clusteringInstantTime != null) { clusteringInstant = instants.stream().filter(i -> i.getTimestamp().equals(cfg.clusteringInstantTime)).findFirst().orElseThrow(() -> new HoodieException(("Clustering instant [" + cfg.clusteringInstantTime) + "] not found")); } else { // check for inflight clustering plans and roll them back if required clusteringInstant = (CompactionUtil.isLIFO(cfg.clusteringSeq)) ? instants.get(instants.size() - 1) : instants.get(0); } HoodieInstant inflightInstant = HoodieTimeline.getReplaceCommitInflightInstant(clusteringInstant.getTimestamp()); if (table.getMetaClient().getActiveTimeline().containsInstant(inflightInstant)) { LOG.info(("Rollback inflight clustering instant: [" + clusteringInstant) + "]"); table.rollbackInflightClustering(inflightInstant, commitToRollback -> writeClient.getTableServiceClient().getPendingRollbackInfo(table.getMetaClient(), commitToRollback, false)); table.getMetaClient().reloadActiveTimeline(); } // generate clustering plan // should support configurable commit metadata Option<Pair<HoodieInstant, HoodieClusteringPlan>> clusteringPlanOption = ClusteringUtils.getClusteringPlan(table.getMetaClient(), clusteringInstant); if (!clusteringPlanOption.isPresent()) { // do nothing. LOG.info("No clustering plan scheduled, turns on the clustering plan schedule with --schedule option"); return; } HoodieClusteringPlan clusteringPlan = clusteringPlanOption.get().getRight();if (((clusteringPlan == null) || (clusteringPlan.getInputGroups() == null)) || clusteringPlan.getInputGroups().isEmpty()) { // no clustering plan, do nothing and return. LOG.info("No clustering plan for instant " + clusteringInstant.getTimestamp()); return; }HoodieInstant instant = HoodieTimeline.getReplaceCommitRequestedInstant(clusteringInstant.getTimestamp()); int inputGroupSize = clusteringPlan.getInputGroups().size(); // get clusteringParallelism. int clusteringParallelism = (conf.getInteger(FlinkOptions.CLUSTERING_TASKS) == (-1)) ? inputGroupSize : Math.min(conf.getInteger(FlinkOptions.CLUSTERING_TASKS), inputGroupSize); // Mark instant as clustering inflight table.getActiveTimeline().transitionReplaceRequestedToInflight(instant, Option.empty()); final Schema tableAvroSchema = StreamerUtil.getTableAvroSchema(table.getMetaClient(), false); final DataType rowDataType = AvroSchemaConverter.convertToDataType(tableAvroSchema); final RowType rowType = ((RowType) (rowDataType.getLogicalType())); StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); // setup configuration long ckpTimeout = env.getCheckpointConfig().getCheckpointTimeout(); conf.setLong(FlinkOptions.WRITE_COMMIT_ACK_TIMEOUT, ckpTimeout); DataStream<ClusteringCommitEvent> dataStream = env.addSource(new ClusteringPlanSourceFunction(clusteringInstant.getTimestamp(), clusteringPlan, conf)).name("clustering_source").uid("uid_clustering_source").rebalance().transform("clustering_task", TypeInformation.of(ClusteringCommitEvent.class), new ClusteringOperator(conf, rowType)).setParallelism(clusteringParallelism); if (OptionsResolver.sortClusteringEnabled(conf)) { ExecNodeUtil.setManagedMemoryWeight(dataStream.getTransformation(), (conf.getInteger(FlinkOptions.WRITE_SORT_MEMORY) * 1024L) * 1024L); } dataStream.addSink(new ClusteringCommitSink(conf)).name("clustering_commit").uid("uid_clustering_commit").setParallelism(1).getTransformation().setMaxParallelism(1); env.execute("flink_hudi_clustering_" + clusteringInstant.getTimestamp()); }
3.26
hudi_PartialUpdateAvroPayload_mergeDisorderRecordsWithMetadata_rdh
/** * Merges the given disorder records with metadata. * * @param schema * The record schema * @param oldRecord * The current record from file * @param updatingRecord * The incoming record * @return the merged record option */ protected Option<IndexedRecord> mergeDisorderRecordsWithMetadata(Schema schema, GenericRecord oldRecord, GenericRecord updatingRecord, boolean isPreCombining) { if (isDeleteRecord(oldRecord) && (!isPreCombining)) { return Option.empty(); } else { final GenericRecordBuilder builder = new GenericRecordBuilder(schema); List<Schema.Field> fields = schema.getFields(); fields.forEach(field -> { final GenericRecord baseRecord; final GenericRecord mergedRecord; if (HoodieRecord.HOODIE_META_COLUMNS_NAME_TO_POS.containsKey(field.name())) { // this is a metadata field baseRecord = updatingRecord; mergedRecord = oldRecord; } else { baseRecord = oldRecord; mergedRecord = updatingRecord; } setField(baseRecord, mergedRecord, builder, field); }); return Option.of(builder.build()); }}
3.26
hudi_PartialUpdateAvroPayload_mergeOldRecord_rdh
// ------------------------------------------------------------------------- // Utilities // ------------------------------------------------------------------------- /** * Merge old record with new record. * * @param oldRecord * @param schema * @param isOldRecordNewer * @param isPreCombining * flag for deleted record combine logic * 1 preCombine: if delete record is newer, return merged record with _hoodie_is_deleted = true * 2 combineAndGetUpdateValue: if delete record is newer, return empty since we don't need to store deleted data to storage * @return * @throws IOException */ private Option<IndexedRecord> mergeOldRecord(IndexedRecord oldRecord, Schema schema, boolean isOldRecordNewer, boolean isPreCombining) throws IOException { Option<IndexedRecord> recordOption = getInsertValue(schema, isPreCombining); if ((!recordOption.isPresent()) && (!isPreCombining)) { // use natural order for delete record return Option.empty(); } if (isOldRecordNewer && (schema.getField(HoodieRecord.COMMIT_TIME_METADATA_FIELD) != null)) { // handling disorder, should use the metadata fields of the updating record return mergeDisorderRecordsWithMetadata(schema, ((GenericRecord) (oldRecord)), ((GenericRecord) (recordOption.get())), isPreCombining); } else if (isOldRecordNewer) { return mergeRecords(schema, ((GenericRecord) (oldRecord)), ((GenericRecord) (recordOption.get()))); } else { return mergeRecords(schema, ((GenericRecord) (recordOption.get())), ((GenericRecord) (oldRecord))); } }
3.26
hudi_PartialUpdateAvroPayload_isRecordNewer_rdh
/** * Returns whether the given record is newer than the record of this payload. * * @param orderingVal * @param record * The record * @param prop * The payload properties * @return true if the given record is newer */ private static boolean isRecordNewer(Comparable orderingVal, IndexedRecord record, Properties prop) { String orderingField = ConfigUtils.getOrderingField(prop); if (!StringUtils.isNullOrEmpty(orderingField)) { boolean consistentLogicalTimestampEnabled = Boolean.parseBoolean(prop.getProperty(KeyGeneratorOptions.KEYGENERATOR_CONSISTENT_LOGICAL_TIMESTAMP_ENABLED.key(), KeyGeneratorOptions.KEYGENERATOR_CONSISTENT_LOGICAL_TIMESTAMP_ENABLED.defaultValue())); Comparable oldOrderingVal = ((Comparable) (HoodieAvroUtils.getNestedFieldVal(((GenericRecord) (record)), orderingField, true, consistentLogicalTimestampEnabled))); // pick the payload with greater ordering value as insert record return ((oldOrderingVal != null) && ReflectionUtils.isSameClass(oldOrderingVal, orderingVal)) && (oldOrderingVal.compareTo(orderingVal) > 0); } return false; }
3.26
hudi_PartialUpdateAvroPayload_overwriteField_rdh
/** * Return true if value equals defaultValue otherwise false. */ public Boolean overwriteField(Object value, Object defaultValue) { return value == null; }
3.26
hudi_PartialUpdateAvroPayload_getInsertValue_rdh
/** * return itself as long as it called by preCombine * * @param schema * @param isPreCombining * @return * @throws IOException */ public Option<IndexedRecord> getInsertValue(Schema schema, boolean isPreCombining) throws IOException { if ((recordBytes.length == 0) || ((!isPreCombining) && isDeletedRecord)) { return Option.empty(); } return Option.of(((IndexedRecord) (HoodieAvroUtils.bytesToAvro(recordBytes, schema)))); }
3.26
hudi_RestoreUtils_getSavepointToRestoreTimestamp_rdh
/** * Get the savepoint timestamp that this restore instant is restoring * * @param table * the HoodieTable * @param restoreInstant * Instant referring to restore action * @return timestamp of the savepoint we are restoring * @throws IOException */ public static String getSavepointToRestoreTimestamp(HoodieTable table, HoodieInstant restoreInstant) throws IOException { HoodieRestorePlan plan = m0(table.getMetaClient(), restoreInstant); if (plan.getVersion().compareTo(RestorePlanActionExecutor.RESTORE_PLAN_VERSION_1) > 0) { return plan.getSavepointToRestoreTimestamp(); } return getSavepointToRestoreTimestampV1Schema(table, plan); }
3.26
hudi_RestoreUtils_m0_rdh
/** * Get Latest version of Restore plan corresponding to a restore instant. * * @param metaClient * Hoodie Table Meta Client * @param restoreInstant * Instant referring to restore action * @return Rollback plan corresponding to rollback instant * @throws IOException */ public static HoodieRestorePlan m0(HoodieTableMetaClient metaClient, HoodieInstant restoreInstant) throws IOException { final HoodieInstant requested = HoodieTimeline.getRollbackRequestedInstant(restoreInstant); return TimelineMetadataUtils.deserializeAvroMetadata(metaClient.getActiveTimeline().readRestoreInfoAsBytes(requested).get(), HoodieRestorePlan.class);}
3.26
hudi_ConsistentBucketIdentifier_getLatterBucket_rdh
/** * Get the latter node of the given node (inferred from hash value). */ public ConsistentHashingNode getLatterBucket(int hashValue) { SortedMap<Integer, ConsistentHashingNode> tailMap = f0.tailMap(hashValue, false); return tailMap.isEmpty() ? f0.firstEntry().getValue() : tailMap.get(tailMap.firstKey()); }
3.26
hudi_ConsistentBucketIdentifier_initialize_rdh
/** * Initialize necessary data structure to facilitate bucket identifying. * Specifically, we construct: * - An in-memory tree (ring) to speed up range mapping searching. * - A hash table (fileIdToBucket) to allow lookup of bucket using fileId. * <p> * Children nodes are also considered, and will override the original nodes, * which is used during bucket resizing (i.e., children nodes take the place * of the original nodes) */ private void initialize() { for (ConsistentHashingNode p : metadata.getNodes()) { f0.put(p.getValue(), p); // One bucket has only one file group, so append 0 directly fileIdToBucket.put(FSUtils.createNewFileId(p.getFileIdPrefix(), 0), p); } // Handle children nodes, i.e., replace or delete the original nodes ConsistentHashingNode tmp; for (ConsistentHashingNode p : metadata.getChildrenNodes()) { switch (p.getTag()) { case REPLACE : tmp = f0.put(p.getValue(), p); if (tmp != null) { fileIdToBucket.remove(FSUtils.createNewFileId(tmp.getFileIdPrefix(), 0)); } fileIdToBucket.put(FSUtils.createNewFileId(p.getFileIdPrefix(), 0), p); break;case DELETE : tmp = f0.remove(p.getValue()); fileIdToBucket.remove(FSUtils.createNewFileId(tmp.getFileIdPrefix(), 0)); break; default : throw new HoodieClusteringException("Children node is tagged as NORMAL or unknown tag: " + p); } } }
3.26
hudi_ConsistentBucketIdentifier_m0_rdh
/** * Split bucket in the range middle, also generate the corresponding file ids * * TODO support different split criteria, e.g., distribute records evenly using statistics * * @param bucket * parent bucket * @return lists of children buckets */ public Option<List<ConsistentHashingNode>> m0(@NotNull ConsistentHashingNode bucket) { ConsistentHashingNode formerBucket = getFormerBucket(bucket.getValue()); long mid = (((long) (formerBucket.getValue())) + bucket.getValue()) + (formerBucket.getValue() < bucket.getValue() ? 0 : HoodieConsistentHashingMetadata.HASH_VALUE_MASK + 1L); mid = (mid >> 1) & HoodieConsistentHashingMetadata.HASH_VALUE_MASK; // Cannot split as it already is the smallest bucket range if ((mid == formerBucket.getValue()) || (mid == bucket.getValue())) { return Option.empty(); } return Option.of(Arrays.asList(new ConsistentHashingNode(((int) (mid)), FSUtils.createNewFileIdPfx(), NodeTag.REPLACE), new ConsistentHashingNode(bucket.getValue(), FSUtils.createNewFileIdPfx(), NodeTag.REPLACE))); }
3.26
hudi_ConsistentBucketIdentifier_getFormerBucket_rdh
/** * Get the former node of the given node (inferred from hash value). */ public ConsistentHashingNode getFormerBucket(int hashValue) { SortedMap<Integer, ConsistentHashingNode> headMap = f0.headMap(hashValue); return headMap.isEmpty() ? f0.lastEntry().getValue() : headMap.get(headMap.lastKey()); }
3.26
hudi_ConsistentBucketIdentifier_getBucketByFileId_rdh
/** * Get bucket of the given file group * * @param fileId * the file group id. NOTE: not filePrefix (i.e., uuid) */ public ConsistentHashingNode getBucketByFileId(String fileId) { return fileIdToBucket.get(fileId); }
3.26
hudi_HoodieCommitMetadata_fetchTotalPartitionsWritten_rdh
// Here the functions are named "fetch" instead of "get", to get avoid of the json conversion. public long fetchTotalPartitionsWritten() { return partitionToWriteStats.size(); }
3.26
hudi_HoodieCommitMetadata_getFileSliceForFileGroupFromDeltaCommit_rdh
/** * parse the bytes of deltacommit, and get the base file and the log files belonging to this * provided file group. */// TODO: refactor this method to avoid doing the json tree walking (HUDI-4822). public static Option<Pair<String, List<String>>> getFileSliceForFileGroupFromDeltaCommit(byte[] bytes, HoodieFileGroupId fileGroupId) { try { String jsonStr = new String(convertCommitMetadataToJsonBytes(deserializeCommitMetadata(bytes), HoodieCommitMetadata.class), StandardCharsets.UTF_8); if (jsonStr.isEmpty()) { return Option.empty(); } JsonNode ptToWriteStatsMap = JsonUtils.getObjectMapper().readTree(jsonStr).get("partitionToWriteStats"); Iterator<Map.Entry<String, JsonNode>> pts = ptToWriteStatsMap.fields(); while (pts.hasNext()) { Map.Entry<String, JsonNode> ptToWriteStats = pts.next(); if (ptToWriteStats.getValue().isArray()) { for (JsonNode writeStat : ptToWriteStats.getValue()) { HoodieFileGroupId fgId = new HoodieFileGroupId(ptToWriteStats.getKey(), writeStat.get("fileId").asText()); if (fgId.equals(fileGroupId)) { String baseFile = writeStat.get("baseFile").asText(); ArrayNode logFilesNode = ((ArrayNode) (writeStat.get("logFiles"))); List<String> logFiles = new ArrayList<>(); for (JsonNode logFile : logFilesNode) { logFiles.add(logFile.asText()); } return Option.of(Pair.of(baseFile, logFiles)); } } } } return Option.empty(); } catch (Exception e) { throw new HoodieException("Fail to parse the base file and log files from DeltaCommit", e); } }
3.26
hudi_HoodieCommitMetadata_getFileIdToFileStatus_rdh
/** * Extract the file status of all affected files from the commit metadata. If a file has * been touched multiple times in the given commits, the return value will keep the one * from the latest commit by file group ID. * * <p>Note: different with {@link #getFullPathToFileStatus(Configuration, String)}, * only the latest commit file for a file group is returned, * this is an optimization for COPY_ON_WRITE table to eliminate legacy files for filesystem view. * * @param hadoopConf * @param basePath * The base path * @return the file ID to file status mapping */ public Map<String, FileStatus> getFileIdToFileStatus(Configuration hadoopConf, String basePath) { Map<String, FileStatus> fileIdToFileStatus = new HashMap<>(); for (List<HoodieWriteStat> stats : getPartitionToWriteStats().values()) { // Iterate through all the written files. for (HoodieWriteStat stat : stats) { String relativeFilePath = stat.getPath(); Path fullPath = (relativeFilePath != null) ? FSUtils.getPartitionPath(basePath, relativeFilePath) : null; if (fullPath != null) { FileStatus fileStatus = new FileStatus(stat.getFileSizeInBytes(), false, 0, 0, 0, fullPath); fileIdToFileStatus.put(stat.getFileId(), fileStatus); } } } return fileIdToFileStatus; }
3.26
hudi_HoodieCommitMetadata_getFullPathToFileStatus_rdh
/** * Extract the file status of all affected files from the commit metadata. If a file has * been touched multiple times in the given commits, the return value will keep the one * from the latest commit. * * @param hadoopConf * @param basePath * The base path * @return the file full path to file status mapping */ public Map<String, FileStatus> getFullPathToFileStatus(Configuration hadoopConf, String basePath) { Map<String, FileStatus> v14 = new HashMap<>(); for (List<HoodieWriteStat> stats : getPartitionToWriteStats().values()) { // Iterate through all the written files. for (HoodieWriteStat stat : stats) { String relativeFilePath = stat.getPath(); Path fullPath = (relativeFilePath != null) ? FSUtils.getPartitionPath(basePath, relativeFilePath) : null; if (fullPath != null) { long blockSize = FSUtils.getFs(fullPath.toString(), hadoopConf).getDefaultBlockSize(fullPath); FileStatus fileStatus = new FileStatus(stat.getFileSizeInBytes(), false, 0, blockSize, 0, fullPath);v14.put(fullPath.getName(), fileStatus); } } } return v14; }
3.26
hudi_HoodieBaseFileGroupRecordBuffer_doProcessNextDataRecord_rdh
/** * Merge two log data records if needed. * * @param record * @param metadata * @param existingRecordMetadataPair * @return * @throws IOException */ protected Option<Pair<T, Map<String, Object>>> doProcessNextDataRecord(T record, Map<String, Object> metadata, Pair<Option<T>, Map<String, Object>> existingRecordMetadataPair) throws IOException { if (existingRecordMetadataPair != null) { // Merge and store the combined record // Note that the incoming `record` is from an older commit, so it should be put as // the `older` in the merge API Option<Pair<HoodieRecord, Schema>> combinedRecordAndSchemaOpt = (f0) ? recordMerger.partialMerge(readerContext.constructHoodieRecord(Option.of(record), metadata), ((Schema) (metadata.get(INTERNAL_META_SCHEMA))), readerContext.constructHoodieRecord(existingRecordMetadataPair.getLeft(), existingRecordMetadataPair.getRight()), ((Schema) (existingRecordMetadataPair.getRight().get(INTERNAL_META_SCHEMA))), readerSchema, payloadProps) : recordMerger.merge(readerContext.constructHoodieRecord(Option.of(record), metadata), ((Schema) (metadata.get(INTERNAL_META_SCHEMA))), readerContext.constructHoodieRecord(existingRecordMetadataPair.getLeft(), existingRecordMetadataPair.getRight()), ((Schema) (existingRecordMetadataPair.getRight().get(INTERNAL_META_SCHEMA))), payloadProps); if (!combinedRecordAndSchemaOpt.isPresent()) { return Option.empty(); } Pair<HoodieRecord, Schema> combinedRecordAndSchema = combinedRecordAndSchemaOpt.get(); HoodieRecord<T> combinedRecord = combinedRecordAndSchema.getLeft(); // If pre-combine returns existing record, no need to update it if (combinedRecord.getData() != existingRecordMetadataPair.getLeft().get()) { return Option.of(Pair.of(combinedRecord.getData(), f0 ? readerContext.updateSchemaAndResetOrderingValInMetadata(metadata, combinedRecordAndSchema.getRight()) : metadata)); } return Option.empty(); } else { // Put the record as is // NOTE: Record have to be cloned here to make sure if it holds low-level engine-specific // payload pointing into a shared, mutable (underlying) buffer we get a clean copy of // it since these records will be put into records(Map). return Option.of(Pair.of(record, metadata)); } }
3.26
hudi_HoodieBaseFileGroupRecordBuffer_extractRecordPositions_rdh
/** * Extract the record positions from a log block header. * * @param logBlock * @return * @throws IOException */ protected static List<Long> extractRecordPositions(HoodieLogBlock logBlock) throws IOException { List<Long> blockPositions = new ArrayList<>(); Roaring64NavigableMap v12 = logBlock.getRecordPositions(); if ((v12 == null) || v12.isEmpty()) { throw new HoodieValidationException("No record position info is found when attempt to do position based merge."); } Iterator<Long> iterator = v12.iterator(); while (iterator.hasNext()) { blockPositions.add(iterator.next()); } if (blockPositions.isEmpty()) { throw new HoodieCorruptedDataException("No positions are extracted."); } return blockPositions; }
3.26
hudi_HoodieBaseFileGroupRecordBuffer_merge_rdh
/** * Merge two records using the configured record merger. * * @param older * @param olderInfoMap * @param newer * @param newerInfoMap * @return * @throws IOException */ protected Option<T> merge(Option<T> older, Map<String, Object> olderInfoMap, Option<T> newer, Map<String, Object> newerInfoMap) throws IOException { if (!older.isPresent()) { return newer; }Option<Pair<HoodieRecord, Schema>> mergedRecord; if (f0) { mergedRecord = recordMerger.partialMerge(readerContext.constructHoodieRecord(older, olderInfoMap), ((Schema) (olderInfoMap.get(INTERNAL_META_SCHEMA))), readerContext.constructHoodieRecord(newer, newerInfoMap), ((Schema) (newerInfoMap.get(INTERNAL_META_SCHEMA))), readerSchema, payloadProps); } else { mergedRecord = recordMerger.merge(readerContext.constructHoodieRecord(older, olderInfoMap), ((Schema) (olderInfoMap.get(INTERNAL_META_SCHEMA))), readerContext.constructHoodieRecord(newer, newerInfoMap), ((Schema) (newerInfoMap.get(INTERNAL_META_SCHEMA))), payloadProps); } if (mergedRecord.isPresent()) { return Option.ofNullable(((T) (mergedRecord.get().getLeft().getData()))); } return Option.empty(); }
3.26
hudi_HoodieBaseFileGroupRecordBuffer_shouldSkip_rdh
/** * Filter a record for downstream processing when: * 1. A set of pre-specified keys exists. * 2. The key of the record is not contained in the set. */ protected boolean shouldSkip(T record, String keyFieldName, boolean isFullKey, Set<String> keys) { String recordKey = readerContext.getValue(record, readerSchema, keyFieldName).toString(); // Can not extract the record key, throw. if ((recordKey == null) || recordKey.isEmpty()) { throw new HoodieKeyException("Can not extract the key for a record"); } // No keys are specified. Cannot skip at all. if (keys.isEmpty()) { return false; } // When the record key matches with one of the keys or key prefixes, can not skip. if ((isFullKey && keys.contains(recordKey)) || ((!isFullKey) && keys.stream().anyMatch(recordKey::startsWith))) { return false; } // Otherwise, this record is not needed. return true; }
3.26
hudi_HoodieBaseFileGroupRecordBuffer_doProcessNextDeletedRecord_rdh
/** * Merge a delete record with another record (data, or delete). * * @param deleteRecord * @param existingRecordMetadataPair * @return */ protected Option<DeleteRecord> doProcessNextDeletedRecord(DeleteRecord deleteRecord, Pair<Option<T>, Map<String, Object>> existingRecordMetadataPair) { if (existingRecordMetadataPair != null) { // Merge and store the merged record. The ordering val is taken to decide whether the same key record // should be deleted or be kept. The old record is kept only if the DELETE record has smaller ordering val. // For same ordering values, uses the natural order(arrival time semantics). Comparable existingOrderingVal = readerContext.getOrderingValue(existingRecordMetadataPair.getLeft(), existingRecordMetadataPair.getRight(), readerSchema, payloadProps); Comparable deleteOrderingVal = deleteRecord.getOrderingValue(); // Checks the ordering value does not equal to 0 // because we use 0 as the default value which means natural order boolean chooseExisting = ((!deleteOrderingVal.equals(0)) && ReflectionUtils.isSameClass(existingOrderingVal, deleteOrderingVal)) && (existingOrderingVal.compareTo(deleteOrderingVal) > 0); if (chooseExisting) { // The DELETE message is obsolete if the old message has greater orderingVal. return Option.empty(); } } // Do delete. return Option.of(deleteRecord); }
3.26
hudi_HoodieBaseFileGroupRecordBuffer_getRecordsIterator_rdh
/** * Create a record iterator for a data block. The records are filtered by a key set specified by {@code keySpecOpt}. * * @param dataBlock * @param keySpecOpt * @return * @throws IOException */ protected Pair<ClosableIterator<T>, Schema> getRecordsIterator(HoodieDataBlock dataBlock, Option<KeySpec> keySpecOpt) throws IOException { ClosableIterator<T> blockRecordsIterator; if (keySpecOpt.isPresent()) { KeySpec keySpec = keySpecOpt.get(); blockRecordsIterator = dataBlock.getEngineRecordIterator(readerContext, keySpec.getKeys(), keySpec.isFullKey()); } else { blockRecordsIterator = dataBlock.getEngineRecordIterator(readerContext); } return Pair.of(blockRecordsIterator, dataBlock.getSchema()); }
3.26
hudi_HoodieMergedLogRecordReader_scanByFullKeys_rdh
/** * Provides incremental scanning capability where only provided keys will be looked * up in the delta-log files, scanned and subsequently materialized into the internal * cache * * @param keys * to be looked up */ public void scanByFullKeys(List<String> keys) { // We can skip scanning in case reader is in full-scan mode, in which case all blocks // are processed upfront (no additional scanning is necessary) if (forceFullScan) { return;// no-op } List<String> missingKeys = keys.stream().filter(key -> !recordBuffer.containsLogRecord(key)).collect(Collectors.toList()); if (missingKeys.isEmpty()) { // All the required records are already fetched, no-op return; } scanInternal(Option.of(KeySpec.fullKeySpec(missingKeys)), false); }
3.26
hudi_HoodieMergedLogRecordReader_scan_rdh
/** * Scans delta-log files processing blocks */ public final void scan() { scan(false); }
3.26
hudi_HoodieMergedLogRecordReader_scanByKeyPrefixes_rdh
/** * Provides incremental scanning capability where only keys matching provided key-prefixes * will be looked up in the delta-log files, scanned and subsequently materialized into * the internal cache * * @param keyPrefixes * to be looked up */ public void scanByKeyPrefixes(List<String> keyPrefixes) { // We can skip scanning in case reader is in full-scan mode, in which case all blocks // are processed upfront (no additional scanning is necessary) if (forceFullScan) { return; } List<String> missingKeyPrefixes = keyPrefixes.stream().filter(keyPrefix -> // NOTE: We can skip scanning the prefixes that have already // been covered by the previous scans scannedPrefixes.stream().noneMatch(keyPrefix::startsWith)).collect(Collectors.toList()); if (missingKeyPrefixes.isEmpty()) { // All the required records are already fetched, no-op return; } // NOTE: When looking up by key-prefixes unfortunately we can't short-circuit // and will have to scan every time as we can't know (based on just // the records cached) whether particular prefix was scanned or just records // matching the prefix looked up (by [[scanByFullKeys]] API) scanInternal(Option.of(KeySpec.prefixKeySpec(missingKeyPrefixes)), false); scannedPrefixes.addAll(missingKeyPrefixes); }
3.26
hudi_HoodieMergedLogRecordReader_m0_rdh
/** * Returns the builder for {@code HoodieMergedLogRecordReader}. */ public static Builder m0() { return new Builder(); }
3.26
hudi_BaseKeyGenerator_getKey_rdh
/** * Generate a Hoodie Key out of provided generic record. */@Override public final HoodieKey getKey(GenericRecord record) { if ((getRecordKeyFieldNames() == null) || (getPartitionPathFields() == null)) { throw new HoodieKeyException("Unable to find field names for record key or partition path in cfg"); } return new HoodieKey(getRecordKey(record), getPartitionPath(record)); }
3.26
hudi_FlinkHoodieBackedTableMetadataWriter_validateTimelineBeforeSchedulingCompaction_rdh
/** * Validates the timeline for both main and metadata tables to ensure compaction on MDT can be scheduled. */ @Override protected boolean validateTimelineBeforeSchedulingCompaction(Option<String> inFlightInstantTimestamp, String latestDeltaCommitTimeInMetadataTable) { // Allows compaction of the metadata table to run regardless of inflight instants return true; }
3.26
hudi_InitialCheckPointProvider_init_rdh
/** * Initialize the class with the current filesystem. * * @param config * Hadoop configuration */ public void init(Configuration config) throws HoodieException { try { this.fs = FileSystem.get(config); } catch (IOException e) { throw new HoodieException("CheckpointProvider initialization failed"); } }
3.26
hudi_MarkerUtils_readMarkerType_rdh
/** * Reads the marker type from `MARKERS.type` file. * * @param fileSystem * file system to use. * @param markerDir * marker directory. * @return the marker type, or empty if the marker type file does not exist. */ public static Option<MarkerType> readMarkerType(FileSystem fileSystem, String markerDir) { Path markerTypeFilePath = new Path(markerDir, MARKER_TYPE_FILENAME); FSDataInputStream fsDataInputStream = null; Option<MarkerType> content = Option.empty(); try { if (!doesMarkerTypeFileExist(fileSystem, markerDir)) { return Option.empty(); } fsDataInputStream = fileSystem.open(markerTypeFilePath); String markerType = FileIOUtils.readAsUTFString(fsDataInputStream); if (StringUtils.isNullOrEmpty(markerType)) { return Option.empty(); } content = Option.of(MarkerType.valueOf(markerType)); } catch (IOException e) { throw new HoodieIOException((("Cannot read marker type file " + markerTypeFilePath.toString()) + "; ") + e.getMessage(), e); } finally { closeQuietly(fsDataInputStream); }return content; }
3.26
hudi_MarkerUtils_markerDirToInstantTime_rdh
/** * Get instantTime from full marker path, for example: * /var/folders/t3/th1dw75d0yz2x2k2qt6ys9zh0000gp/T/junit6502909693741900820/dataset/.hoodie/.temp/003 * ==> 003 * * @param marker * @return */ public static String markerDirToInstantTime(String marker) { String[] ele = marker.split("/"); return ele[ele.length - 1]; }
3.26
hudi_MarkerUtils_deleteMarkerTypeFile_rdh
/** * Deletes `MARKERS.type` file. * * @param fileSystem * file system to use. * @param markerDir * marker directory. */ public static void deleteMarkerTypeFile(FileSystem fileSystem, String markerDir) { Path markerTypeFilePath = new Path(markerDir, MARKER_TYPE_FILENAME); try { fileSystem.delete(markerTypeFilePath, false); } catch (IOException e) { throw new HoodieIOException((("Cannot delete marker type file " + markerTypeFilePath.toString()) + "; ") + e.getMessage(), e); } }
3.26
hudi_MarkerUtils_hasCommitConflict_rdh
/** * Whether there is write conflict with completed commit among multiple writers. * * @param activeTimeline * Active timeline. * @param currentFileIDs * Current set of file IDs. * @param completedCommitInstants * Completed commits. * @return {@code true} if the conflict is detected; {@code false} otherwise. */ public static boolean hasCommitConflict(HoodieActiveTimeline activeTimeline, Set<String> currentFileIDs, Set<HoodieInstant> completedCommitInstants) { Set<HoodieInstant> currentInstants = new HashSet<>(activeTimeline.reload().getCommitsTimeline().filterCompletedInstants().getInstants()); currentInstants.removeAll(completedCommitInstants); Set<String> missingFileIDs = currentInstants.stream().flatMap(instant -> { try { return HoodieCommitMetadata.fromBytes(activeTimeline.getInstantDetails(instant).get(), HoodieCommitMetadata.class).getFileIdAndRelativePaths().keySet().stream(); } catch (Exception e) { return Stream.empty(); } }).collect(Collectors.toSet()); currentFileIDs.retainAll(missingFileIDs); return !currentFileIDs.isEmpty(); }
3.26
hudi_MarkerUtils_writeMarkerTypeToFile_rdh
/** * Writes the marker type to the file `MARKERS.type`. * * @param markerType * marker type. * @param fileSystem * file system to use. * @param markerDir * marker directory. */ public static void writeMarkerTypeToFile(MarkerType markerType, FileSystem fileSystem, String markerDir) { Path markerTypeFilePath = new Path(markerDir, MARKER_TYPE_FILENAME); FSDataOutputStream fsDataOutputStream = null; BufferedWriter bufferedWriter = null; try { fsDataOutputStream = fileSystem.create(markerTypeFilePath, false); bufferedWriter = new BufferedWriter(new OutputStreamWriter(fsDataOutputStream, StandardCharsets.UTF_8)); bufferedWriter.write(markerType.toString()); } catch (IOException e) { throw new HoodieException((("Failed to create marker type file " + markerTypeFilePath.toString()) + "; ") + e.getMessage(), e); } finally { closeQuietly(bufferedWriter); closeQuietly(fsDataOutputStream); } }
3.26
hudi_MarkerUtils_makerToPartitionAndFileID_rdh
/** * Get fileID from full marker path, for example: * 20210623/0/20210825/932a86d9-5c1d-44c7-ac99-cb88b8ef8478-0_85-15-1390_20220620181735781.parquet.marker.MERGE * ==> get 20210623/0/20210825/932a86d9-5c1d-44c7-ac99-cb88b8ef8478-0 * * @param marker * @return */ public static String makerToPartitionAndFileID(String marker) { String[] ele = marker.split("_"); return ele[0]; }
3.26
hudi_MarkerUtils_getAllMarkerDir_rdh
/** * Gets all marker directories. * * @param tempPath * Temporary folder under .hoodie. * @param fs * File system to use. * @return All marker directories. * @throws IOException * upon error. */ public static List<Path> getAllMarkerDir(Path tempPath, FileSystem fs) throws IOException { return Arrays.stream(fs.listStatus(tempPath)).map(FileStatus::getPath).collect(Collectors.toList()); }
3.26
hudi_MarkerUtils_getCandidateInstants_rdh
/** * Get Candidate Instant to do conflict checking: * 1. Skip current writer related instant(currentInstantTime) * 2. Skip all instants after currentInstantTime * 3. Skip dead writers related instants based on heart-beat * 4. Skip pending compaction instant (For now we don' do early conflict check with compact action) * Because we don't want to let pending compaction block common writer. * * @param instants * @return */ public static List<String> getCandidateInstants(HoodieActiveTimeline activeTimeline, List<Path> instants, String currentInstantTime, long maxAllowableHeartbeatIntervalInMs, FileSystem fs, String basePath) { return instants.stream().map(Path::toString).filter(instantPath -> { String instantTime = markerDirToInstantTime(instantPath); return ((instantTime.compareToIgnoreCase(currentInstantTime) < 0) && (!activeTimeline.filterPendingCompactionTimeline().containsInstant(instantTime))) && (!activeTimeline.filterPendingReplaceTimeline().containsInstant(instantTime)); }).filter(instantPath -> { try { return !isHeartbeatExpired(markerDirToInstantTime(instantPath), maxAllowableHeartbeatIntervalInMs, fs, basePath); } catch (IOException e) { return false; } }).collect(Collectors.toList()); }
3.26
hudi_MarkerUtils_readTimelineServerBasedMarkersFromFileSystem_rdh
/** * Reads files containing the markers written by timeline-server-based marker mechanism. * * @param markerDir * marker directory. * @param fileSystem * file system to use. * @param context * instance of {@link HoodieEngineContext} to use * @param parallelism * parallelism to use * @return A {@code Map} of file name to the set of markers stored in the file. */ public static Map<String, Set<String>> readTimelineServerBasedMarkersFromFileSystem(String markerDir, FileSystem fileSystem, HoodieEngineContext context, int parallelism) { Path dirPath = new Path(markerDir); try { if (fileSystem.exists(dirPath)) { Predicate<FileStatus> prefixFilter = fileStatus -> fileStatus.getPath().getName().startsWith(MARKERS_FILENAME_PREFIX); Predicate<FileStatus> markerTypeFilter = fileStatus -> !fileStatus.getPath().getName().equals(MARKER_TYPE_FILENAME); return FSUtils.parallelizeSubPathProcess(context, fileSystem, dirPath, parallelism, prefixFilter.and(markerTypeFilter), pairOfSubPathAndConf -> { String markersFilePathStr = pairOfSubPathAndConf.getKey(); SerializableConfiguration conf = pairOfSubPathAndConf.getValue(); return readMarkersFromFile(new Path(markersFilePathStr), conf); });} return new HashMap<>(); } catch (IOException ioe) { throw new HoodieIOException(ioe.getMessage(), ioe); } }
3.26
hudi_MarkerUtils_readMarkersFromFile_rdh
/** * Reads the markers stored in the underlying file. * * @param markersFilePath * File path for the markers. * @param conf * Serializable config. * @param ignoreException * Whether to ignore IOException. * @return Markers in a {@code Set} of String. */ public static Set<String> readMarkersFromFile(Path markersFilePath, SerializableConfiguration conf, boolean ignoreException) { FSDataInputStream fsDataInputStream = null; Set<String> markers = new HashSet<>(); try { LOG.debug("Read marker file: " + markersFilePath); FileSystem fs = markersFilePath.getFileSystem(conf.get()); fsDataInputStream = fs.open(markersFilePath); markers = new HashSet<>(FileIOUtils.readAsUTFStringLines(fsDataInputStream)); } catch (IOException e) { String errorMessage = "Failed to read MARKERS file " + markersFilePath; if (ignoreException) { LOG.warn(errorMessage + ". Ignoring the exception and continue.", e); } else { throw new HoodieIOException(errorMessage, e); }} finally { closeQuietly(fsDataInputStream); } return markers; }
3.26
hudi_MarkerUtils_stripMarkerFolderPrefix_rdh
/** * Strips the marker folder prefix of any file path under the marker directory. * * @param fullMarkerPath * the full path of the file * @param markerDir * marker directory * @return file name */ public static String stripMarkerFolderPrefix(String fullMarkerPath, String markerDir) { int begin = fullMarkerPath.indexOf(markerDir); ValidationUtils.checkArgument(begin >= 0, (("Not in marker dir. Marker Path=" + fullMarkerPath) + ", Expected Marker Root=") + markerDir); return fullMarkerPath.substring((begin + markerDir.length()) + 1); }
3.26
hudi_MarkerUtils_doesMarkerTypeFileExist_rdh
/** * * @param fileSystem * file system to use. * @param markerDir * marker directory. * @return {@code true} if the MARKERS.type file exists; {@code false} otherwise. */ public static boolean doesMarkerTypeFileExist(FileSystem fileSystem, String markerDir) throws IOException { return fileSystem.exists(new Path(markerDir, MARKER_TYPE_FILENAME)); }
3.26
hudi_BigQuerySchemaResolver_getTableSchema_rdh
/** * Get the BigQuery schema for the table. If the BigQuery table is configured with partitioning, the caller must pass in the partition fields so that they are not returned in the schema. * If the partition fields are in the schema, it will cause an error when querying the table since BigQuery will treat it as a duplicate column. * * @param metaClient * Meta client for the Hudi table * @param partitionFields * The fields that are used for partitioning in BigQuery * @return The BigQuery schema for the table */ Schema getTableSchema(HoodieTableMetaClient metaClient, List<String> partitionFields) {try { Schema schema = convertSchema(tableSchemaResolverSupplier.apply(metaClient).getTableAvroSchema()); if (partitionFields.isEmpty()) { return schema; } else { return Schema.of(schema.getFields().stream().filter(field -> !partitionFields.contains(field.getName())).collect(Collectors.toList())); } } catch (Exception e) { throw new HoodieBigQuerySyncException("Failed to get table schema", e); } }
3.26
hudi_FileSystemViewManager_m1_rdh
/** * Main API to get the file-system view for the base-path. * * @param metaClient * HoodieTableMetaClient * @return */ public SyncableFileSystemView m1(HoodieTableMetaClient metaClient) { return globalViewMap.computeIfAbsent(metaClient.getBasePath(), path -> viewCreator.apply(metaClient, viewStorageConfig)); }
3.26
hudi_FileSystemViewManager_createViewManager_rdh
/** * Main Factory method for building file-system views. */ public static FileSystemViewManager createViewManager(final HoodieEngineContext context, final HoodieMetadataConfig metadataConfig, final FileSystemViewStorageConfig config, final HoodieCommonConfig commonConfig, final SerializableFunctionUnchecked<HoodieTableMetaClient, HoodieTableMetadata> metadataCreator) { LOG.info("Creating View Manager with storage type :" + config.getStorageType()); final SerializableConfiguration conf = context.getHadoopConf(); switch (config.getStorageType()) { case EMBEDDED_KV_STORE : LOG.info("Creating embedded rocks-db based Table View"); return new FileSystemViewManager(context, config, (metaClient, viewConf) -> createRocksDBBasedFileSystemView(conf, viewConf, metaClient)); case SPILLABLE_DISK : LOG.info("Creating Spillable Disk based Table View"); return new FileSystemViewManager(context, config, (metaClient, viewConf) -> createSpillableMapBasedFileSystemView(conf, viewConf, metaClient, commonConfig)); case MEMORY : LOG.info("Creating in-memory based Table View"); return new FileSystemViewManager(context, config, (metaClient, viewConfig) -> createInMemoryFileSystemView(metadataConfig, viewConfig, metaClient, metadataCreator)); case REMOTE_ONLY : LOG.info("Creating remote only table view"); return new FileSystemViewManager(context, config, (metaClient, viewConfig) -> createRemoteFileSystemView(conf, viewConfig, metaClient)); case REMOTE_FIRST : LOG.info("Creating remote first table view"); return new FileSystemViewManager(context, config, (metaClient, viewConfig) -> { RemoteHoodieTableFileSystemView v6 = createRemoteFileSystemView(conf, viewConfig, metaClient); SyncableFileSystemView secondaryView; switch (viewConfig.getSecondaryStorageType()) { case MEMORY : secondaryView = createInMemoryFileSystemView(metadataConfig, viewConfig, metaClient, metadataCreator); break; case EMBEDDED_KV_STORE : secondaryView = createRocksDBBasedFileSystemView(conf, viewConfig, metaClient); break; case SPILLABLE_DISK : secondaryView = createSpillableMapBasedFileSystemView(conf, viewConfig, metaClient, commonConfig); break; default : throw new IllegalArgumentException("Secondary Storage type can only be in-memory or spillable. Was :" + viewConfig.getSecondaryStorageType()); } return new PriorityBasedFileSystemView(v6, secondaryView); }); default : throw new IllegalArgumentException("Unknown file system view type :" + config.getStorageType()); } }
3.26
hudi_FileSystemViewManager_createInMemoryFileSystemView_rdh
/** * Create an in-memory file System view for a table. */ private static HoodieTableFileSystemView createInMemoryFileSystemView(HoodieMetadataConfig metadataConfig, FileSystemViewStorageConfig viewConf, HoodieTableMetaClient metaClient, SerializableFunctionUnchecked<HoodieTableMetaClient, HoodieTableMetadata> metadataCreator) { LOG.info("Creating InMemory based view for basePath " + metaClient.getBasePathV2()); HoodieTimeline v4 = metaClient.getActiveTimeline().filterCompletedAndCompactionInstants(); if (metaClient.getTableConfig().isMetadataTableAvailable()) { ValidationUtils.checkArgument(metadataCreator != null, "Metadata supplier is null. Cannot instantiate metadata file system view"); return new HoodieMetadataFileSystemView(metaClient, v4, metadataCreator.apply(metaClient)); } if (metaClient.getMetaserverConfig().isMetaserverEnabled()) { return ((HoodieTableFileSystemView) (ReflectionUtils.loadClass(HOODIE_METASERVER_FILE_SYSTEM_VIEW_CLASS, new Class<?>[]{ HoodieTableMetaClient.class, HoodieTimeline.class, HoodieMetaserverConfig.class }, metaClient, v4, metaClient.getMetaserverConfig()))); } return new HoodieTableFileSystemView(metaClient, v4, viewConf.isIncrementalTimelineSyncEnabled()); }
3.26
hudi_FileSystemViewManager_createRemoteFileSystemView_rdh
/** * Create a remote file System view for a table. * * @param conf * Hadoop Configuration * @param viewConf * View Storage Configuration * @param metaClient * Hoodie Table MetaClient for the table. * @return */ private static RemoteHoodieTableFileSystemView createRemoteFileSystemView(SerializableConfiguration conf, FileSystemViewStorageConfig viewConf, HoodieTableMetaClient metaClient) { LOG.info((((((("Creating remote view for basePath " + metaClient.getBasePath()) + ". Server=") + viewConf.getRemoteViewServerHost()) + ":") + viewConf.getRemoteViewServerPort()) + ", Timeout=") + viewConf.getRemoteTimelineClientTimeoutSecs()); return new RemoteHoodieTableFileSystemView(metaClient, viewConf); }
3.26
hudi_FileSystemViewManager_close_rdh
/** * Closes all views opened. */ public void close() { if (!this.globalViewMap.isEmpty()) { this.globalViewMap.values().forEach(SyncableFileSystemView::close); this.globalViewMap.clear(); }}
3.26
hudi_FileSystemViewManager_m0_rdh
/** * Main API to get the file-system view for the base-path. * * @param basePath * @return */ public SyncableFileSystemView m0(String basePath) { return globalViewMap.computeIfAbsent(basePath, path -> { HoodieTableMetaClient metaClient = HoodieTableMetaClient.builder().setConf(conf.newCopy()).setBasePath(path).build(); return viewCreator.apply(metaClient, viewStorageConfig); }); }
3.26
hudi_FileSystemViewManager_clearFileSystemView_rdh
/** * Drops reference to File-System Views. Future calls to view results in creating a new view * * @param basePath */ public void clearFileSystemView(String basePath) { SyncableFileSystemView view = globalViewMap.remove(basePath); if (view != null) { view.close(); } }
3.26
hudi_FileSystemViewManager_createRocksDBBasedFileSystemView_rdh
// FACTORY METHODS FOR CREATING FILE-SYSTEM VIEWS /** * Create RocksDB based file System view for a table. * * @param conf * Hadoop Configuration * @param viewConf * View Storage Configuration * @param metaClient * HoodieTableMetaClient * @return */ private static RocksDbBasedFileSystemView createRocksDBBasedFileSystemView(SerializableConfiguration conf, FileSystemViewStorageConfig viewConf, HoodieTableMetaClient metaClient) { HoodieTimeline timeline = metaClient.getActiveTimeline().filterCompletedAndCompactionInstants(); return new RocksDbBasedFileSystemView(metaClient, timeline, viewConf); }
3.26
hudi_FileSystemViewManager_createSpillableMapBasedFileSystemView_rdh
/** * Create a spillable Map based file System view for a table. * * @param conf * Hadoop Configuration * @param viewConf * View Storage Configuration * @param metaClient * HoodieTableMetaClient * @return */ private static SpillableMapBasedFileSystemView createSpillableMapBasedFileSystemView(SerializableConfiguration conf, FileSystemViewStorageConfig viewConf, HoodieTableMetaClient metaClient, HoodieCommonConfig commonConfig) { LOG.info("Creating SpillableMap based view for basePath " + metaClient.getBasePath()); HoodieTimeline timeline = metaClient.getActiveTimeline().filterCompletedAndCompactionInstants(); return new SpillableMapBasedFileSystemView(metaClient, timeline, viewConf, commonConfig); }
3.26
hudi_HoodieMetaserverClientImp_isLocal_rdh
// used for test @Override public boolean isLocal() { return isLocal;}
3.26
hudi_HoodieParquetInputFormat_initAvroInputFormat_rdh
/** * Spark2 use `parquet.hadoopParquetInputFormat` in `com.twitter:parquet-hadoop-bundle`. * So that we need to distinguish the constructions of classes with * `parquet.hadoopParquetInputFormat` or `org.apache.parquet.hadoop.ParquetInputFormat`. * If we use `org.apache.parquet:parquet-hadoop`, we can use `HudiAvroParquetInputFormat` * in Hive or Spark3 to get timestamp with correct type. */ private void initAvroInputFormat() { try { Constructor[] constructors = ParquetRecordReaderWrapper.class.getConstructors(); if (Arrays.stream(constructors).anyMatch(c -> (c.getParameterCount() > 0) && c.getParameterTypes()[0].getName().equals(ParquetInputFormat.class.getName()))) { supportAvroRead = true; } } catch (SecurityException e) { throw new HoodieException("Failed to check if support avro reader: " + e.getMessage(), e); }}
3.26