name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
hudi_BufferedRandomAccessFile_init_rdh
/** * * @param size * - capacity of the buffer */ private void init(int size) { this.capacity = Math.max(DEFAULT_BUFFER_SIZE, size); this.dataBuffer = ByteBuffer.wrap(new byte[this.capacity]); }
3.26
hudi_BufferedRandomAccessFile_endPosition_rdh
/** * * @return endPosition of the buffer. For the last file block, this may not be a valid position. */ private long endPosition() { return this.startPosition + this.capacity; }
3.26
hudi_BufferedRandomAccessFile_read_rdh
/** * Read specified number of bytes into given array starting at given offset. * * @param b * - byte array * @param off * - start offset * @param len * - length of bytes to be read * @return - number of bytes read. * @throws IOException */ @Override public int read(byte[] b, int off, int len) throws IOException { if (endOfBufferReached()) { if (!loadNewBlockToBuffer()) { return -1; } } // copy data from buffer len = Math.min(len, ((int) (this.validLastPosition - this.currentPosition))); int buffOff = ((int) (this.currentPosition - this.startPosition)); System.arraycopy(this.dataBuffer.array(), buffOff, b, off, len); this.currentPosition += len; return len; }
3.26
hudi_BufferedRandomAccessFile_flushBuffer_rdh
/** * Flush any dirty bytes in the buffer to disk. * * @throws IOException */ private void flushBuffer() throws IOException { if (this.f0) { alignDiskPositionToBufferStartIfNeeded(); int len = ((int) (this.currentPosition - this.startPosition)); super.write(this.dataBuffer.array(), 0, len); this.diskPosition = this.currentPosition; this.f0 = false; } }
3.26
hudi_BufferedRandomAccessFile_getFilePointer_rdh
/** * * @return current file position */ @Override public long getFilePointer() { return this.currentPosition; }
3.26
hudi_BufferedRandomAccessFile_length_rdh
/** * Returns the length of the file, depending on whether buffer has more data (to be flushed). * * @return - length of the file (including data yet to be flushed to the file). * @throws IOException */ @Override public long length() throws IOException { return Math.max(this.currentPosition, super.length()); }
3.26
hudi_BufferedRandomAccessFile_seek_rdh
/** * If the new seek position is in the buffer, adjust the currentPosition. * If the new seek position is outside of the buffer, flush the contents to * the file and reload the buffer corresponding to the position. * * We logically view the file as group blocks, where each block will perfectly * fit into the buffer (except for the last block). Given a position to seek, * we identify the block to be loaded using BUFFER_BOUNDARY_MASK. * * When dealing with the last block, we will have extra space between validLastPosition * and endPosition of the buffer. * * @param pos * - position in the file to be loaded to the buffer. * @throws IOException */ @Override public void seek(long pos) throws IOException { if ((pos >= this.validLastPosition) || (pos < this.startPosition)) { // seeking outside of current buffer -- flush and read this.flushBuffer(); this.startPosition = pos & BUFFER_BOUNDARY_MASK;// start at BuffSz boundary alignDiskPositionToBufferStartIfNeeded(); int n = this.fillBuffer(); this.validLastPosition = this.startPosition + ((long) (n)); } else // seeking inside current buffer -- no read required if (pos < this.currentPosition) { // if seeking backwards, flush buffer. this.flushBuffer(); } this.currentPosition = pos; }
3.26
hudi_BufferedRandomAccessFile_fillBuffer_rdh
/** * read ahead file contents to buffer. * * @return number of bytes filled * @throws IOException */ private int fillBuffer() throws IOException { int cnt = 0; int bytesToRead = this.capacity; // blocking read, until buffer is filled or EOF reached while (bytesToRead > 0) { int n = super.read(this.dataBuffer.array(), cnt, bytesToRead); if (n < 0) { break; } cnt += n; bytesToRead -= n; } this.isEOF = cnt < this.dataBuffer.array().length; this.diskPosition += cnt; return cnt; }
3.26
hudi_DirectMarkerBasedDetectionStrategy_checkMarkerConflict_rdh
/** * We need to do list operation here. * In order to reduce the list pressure as much as possible, first we build path prefix in advance: * '$base_path/.temp/instant_time/partition_path', and only list these specific partition_paths * we need instead of list all the '$base_path/.temp/' * * @param basePath * Base path of the table. * @param maxAllowableHeartbeatIntervalInMs * Heartbeat timeout. * @return true if current fileID is already existed under .temp/instant_time/partition_path/.. * @throws IOException * upon errors. */ public boolean checkMarkerConflict(String basePath, long maxAllowableHeartbeatIntervalInMs) throws IOException { String tempFolderPath = (basePath + Path.SEPARATOR) + HoodieTableMetaClient.TEMPFOLDER_NAME; List<String> candidateInstants = MarkerUtils.getCandidateInstants(activeTimeline, Arrays.stream(fs.listStatus(new Path(tempFolderPath))).map(FileStatus::getPath).collect(Collectors.toList()), instantTime, maxAllowableHeartbeatIntervalInMs, fs, basePath); long res = candidateInstants.stream().flatMap(currentMarkerDirPath -> { try { Path markerPartitionPath; if (StringUtils.isNullOrEmpty(f0)) { markerPartitionPath = new Path(currentMarkerDirPath); } else { markerPartitionPath = new Path(currentMarkerDirPath, f0); } if ((!StringUtils.isNullOrEmpty(f0)) && (!fs.exists(markerPartitionPath))) { return Stream.empty(); } else { return Arrays.stream(fs.listStatus(markerPartitionPath)).parallel().filter(path -> path.toString().contains(fileId)); }} catch (IOException e) { throw new HoodieIOException("IOException occurs during checking marker file conflict"); } }).count(); if (res != 0L) { LOG.warn((((("Detected conflict marker files: " + f0) + "/") + fileId) + " for ") + instantTime); return true; } return false; }
3.26
hudi_HadoopConfigurations_getParquetConf_rdh
/** * Creates a merged hadoop configuration with given flink configuration and hadoop configuration. */ public static Configuration getParquetConf(Configuration options, Configuration hadoopConf) { Configuration copy = new Configuration(hadoopConf); Map<String, String> parquetOptions = FlinkOptions.getPropertiesWithPrefix(options.toMap(), PARQUET_PREFIX); parquetOptions.forEach((k, v) -> copy.set(PARQUET_PREFIX + k, v)); return copy; }
3.26
hudi_HadoopConfigurations_getHiveConf_rdh
/** * Creates a Hive configuration with configured dir path or empty if no Hive conf dir is set. */ public static Configuration getHiveConf(Configuration conf) { String explicitDir = conf.getString(FlinkOptions.HIVE_SYNC_CONF_DIR, System.getenv("HIVE_CONF_DIR")); Configuration hadoopConf = new Configuration(); if (explicitDir != null) { hadoopConf.addResource(new Path(explicitDir, "hive-site.xml")); } return hadoopConf; }
3.26
hudi_ConsistentHashingUpdateStrategyUtils_constructPartitionToIdentifier_rdh
/** * Construct identifier for the given partitions that are under concurrent resizing (i.e., clustering). * * @return map from partition to pair<instant, identifier>, where instant is the clustering instant. */ public static Map<String, Pair<String, ConsistentBucketIdentifier>> constructPartitionToIdentifier(Set<String> partitions, HoodieTable table) { // Read all pending/ongoing clustering plans List<Pair<HoodieInstant, HoodieClusteringPlan>> instantPlanPairs = table.getMetaClient().getActiveTimeline().filterInflightsAndRequested().filter(instant -> instant.getAction().equals(HoodieTimeline.REPLACE_COMMIT_ACTION)).getInstantsAsStream().map(instant -> ClusteringUtils.getClusteringPlan(table.getMetaClient(), instant)).flatMap(o -> o.isPresent() ? Stream.of(o.get()) : Stream.empty()).collect(Collectors.toList()); // Construct child node for each partition & build the bucket identifier Map<String, HoodieConsistentHashingMetadata> partitionToHashingMeta = new HashMap<>(); Map<String, String> partitionToInstant = new HashMap<>(); for (Pair<HoodieInstant, HoodieClusteringPlan> pair : instantPlanPairs) { String instant = pair.getLeft().getTimestamp(); HoodieClusteringPlan plan = pair.getRight(); extractHashingMetadataFromClusteringPlan(instant, plan, table, partitions, partitionToHashingMeta, partitionToInstant); } return partitionToHashingMeta.entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, e -> Pair.of(partitionToInstant.get(e.getKey()), new ConsistentBucketIdentifier(e.getValue())))); }
3.26
hudi_DFSPropertiesConfiguration_addToGlobalProps_rdh
// test only public static TypedProperties addToGlobalProps(String key, String value) { GLOBAL_PROPS.put(key, value); return GLOBAL_PROPS; }
3.26
hudi_DFSPropertiesConfiguration_loadGlobalProps_rdh
/** * Load global props from hudi-defaults.conf which is under class loader or CONF_FILE_DIR_ENV_NAME. * * @return Typed Properties */ public static TypedProperties loadGlobalProps() { DFSPropertiesConfiguration conf = new DFSPropertiesConfiguration(); // First try loading the external config file from class loader URL configFile = Thread.currentThread().getContextClassLoader().getResource(DEFAULT_PROPERTIES_FILE); if (configFile != null) { try (BufferedReader br = new BufferedReader(new InputStreamReader(configFile.openStream()))) { conf.addPropsFromStream(br, new Path(configFile.toURI())); return conf.getProps(); } catch (URISyntaxException e) { throw new HoodieException(String.format("Provided props file url is invalid %s", configFile), e); } catch (IOException ioe) { throw new HoodieIOException(String.format("Failed to read %s from class loader", DEFAULT_PROPERTIES_FILE), ioe); } } // Try loading the external config file from local file system Option<Path> defaultConfPath = getConfPathFromEnv(); if (defaultConfPath.isPresent()) { conf.addPropsFromFile(defaultConfPath.get()); } else { try { conf.addPropsFromFile(DEFAULT_PATH); } catch (Exception e) { LOG.warn("Cannot load default config file: " + DEFAULT_PATH, e); } } return conf.getProps(); }
3.26
hudi_DFSPropertiesConfiguration_addPropsFromFile_rdh
/** * Add properties from external configuration files. * * @param filePath * File path for configuration file */ public void addPropsFromFile(Path filePath) { if (visitedFilePaths.contains(filePath.toString())) { throw new IllegalStateException(("Loop detected; file " + filePath) + " already referenced"); } FileSystem v4 = FSUtils.getFs(filePath.toString(), Option.ofNullable(hadoopConfig).orElseGet(Configuration::new)); try { if (filePath.equals(DEFAULT_PATH) && (!v4.exists(filePath))) { LOG.warn(("Properties file " + filePath) + " not found. Ignoring to load props file"); return; } } catch (IOException ioe) { throw new HoodieIOException("Cannot check if the properties file exist: " + filePath, ioe); } try (BufferedReader reader = new BufferedReader(new InputStreamReader(v4.open(filePath)))) { visitedFilePaths.add(filePath.toString()); addPropsFromStream(reader, filePath); } catch (IOException ioe) { LOG.error("Error reading in properties from dfs from file " + filePath); throw new HoodieIOException("Cannot read properties from dfs from file " + filePath, ioe); }}
3.26
hudi_DFSPropertiesConfiguration_addPropsFromStream_rdh
/** * Add properties from buffered reader. * * @param reader * Buffered Reader * @throws IOException */ public void addPropsFromStream(BufferedReader reader, Path cfgFilePath) throws IOException { try { reader.lines().forEach(line -> { if (!isValidLine(line)) { return; } String[] split = splitProperty(line); if (line.startsWith("include=") || line.startsWith("include =")) { Path providedPath = new Path(split[1]); FileSystem providedFs = FSUtils.getFs(split[1], hadoopConfig); // In the case that only filename is provided, assume it's in the same directory. if (((!providedPath.isAbsolute()) || StringUtils.isNullOrEmpty(providedFs.getScheme())) && (cfgFilePath != null)) { providedPath = new Path(cfgFilePath.getParent(), split[1]); } addPropsFromFile(providedPath); } else { hoodieConfig.setValue(split[0], split[1]); } }); } finally {reader.close(); } }
3.26
hudi_IncrSourceHelper_getStrictlyLowerTimestamp_rdh
/** * Get a timestamp which is the next value in a descending sequence. * * @param timestamp * Timestamp */ private static String getStrictlyLowerTimestamp(String timestamp) { long ts = Long.parseLong(timestamp); ValidationUtils.checkArgument(ts > 0, "Timestamp must be positive"); long lower = ts - 1; return "" + lower; }
3.26
hudi_IncrSourceHelper_getMissingCheckpointStrategy_rdh
/** * Determine the policy to choose if a checkpoint is missing (detected by the absence of a beginInstant), * during a run of a {@link HoodieIncrSource}. * * @param props * the usual Hudi props object * @return */ public static MissingCheckpointStrategy getMissingCheckpointStrategy(TypedProperties props) { boolean readLatestOnMissingCkpt = getBooleanWithAltKeys(props, READ_LATEST_INSTANT_ON_MISSING_CKPT); if (readLatestOnMissingCkpt) { return MissingCheckpointStrategy.READ_LATEST; } if (containsConfigProperty(props, MISSING_CHECKPOINT_STRATEGY)) { return MissingCheckpointStrategy.valueOf(getStringWithAltKeys(props, MISSING_CHECKPOINT_STRATEGY)); } return null; }
3.26
hudi_IncrSourceHelper_filterAndGenerateCheckpointBasedOnSourceLimit_rdh
/** * Adjust the source dataset to size based batch based on last checkpoint key. * * @param sourceData * Source dataset * @param sourceLimit * Max number of bytes to be read from source * @param queryInfo * Query Info * @return end instants along with filtered rows. */ public static Pair<CloudObjectIncrCheckpoint, Option<Dataset<Row>>> filterAndGenerateCheckpointBasedOnSourceLimit(Dataset<Row> sourceData, long sourceLimit, QueryInfo queryInfo, CloudObjectIncrCheckpoint cloudObjectIncrCheckpoint) { if (sourceData.isEmpty()) { return Pair.of(cloudObjectIncrCheckpoint, Option.empty()); } // Let's persist the dataset to avoid triggering the dag repeatedly sourceData.persist(StorageLevel.MEMORY_AND_DISK()); // Set ordering in query to enable batching Dataset<Row> orderedDf = QueryRunner.applyOrdering(sourceData, queryInfo.getOrderByColumns()); Option<String> lastCheckpoint = Option.of(cloudObjectIncrCheckpoint.getCommit()); Option<String> lastCheckpointKey = Option.ofNullable(cloudObjectIncrCheckpoint.getKey()); Option<String> concatenatedKey = lastCheckpoint.flatMap(checkpoint -> lastCheckpointKey.map(key -> checkpoint + key)); // Filter until last checkpoint key if (concatenatedKey.isPresent()) { orderedDf = orderedDf.withColumn("commit_key", functions.concat(functions.col(queryInfo.getOrderColumn()), functions.col(queryInfo.getKeyColumn()))); // Apply incremental filter orderedDf = orderedDf.filter(functions.col("commit_key").gt(concatenatedKey.get())).drop("commit_key"); // We could be just at the end of the commit, so return empty if (orderedDf.isEmpty()) { LOG.info("Empty ordered source, returning endpoint:" + queryInfo.getEndInstant()); sourceData.unpersist(); return Pair.of(new CloudObjectIncrCheckpoint(queryInfo.getEndInstant(), lastCheckpointKey.get()), Option.empty()); } } // Limit based on sourceLimit WindowSpec windowSpec = Window.orderBy(col(queryInfo.getOrderColumn()), col(queryInfo.getKeyColumn())); // Add the 'cumulativeSize' column with running sum of 'limitColumn' Dataset<Row> aggregatedData = orderedDf.withColumn(CUMULATIVE_COLUMN_NAME, sum(col(queryInfo.getLimitColumn())).over(windowSpec)); Dataset<Row> collectedRows = aggregatedData.filter(col(CUMULATIVE_COLUMN_NAME).leq(sourceLimit)); Row row = null; if (collectedRows.isEmpty()) { // If the first element itself exceeds limits then return first element LOG.info(("First object exceeding source limit: " + sourceLimit) + " bytes"); row = aggregatedData.select(queryInfo.getOrderColumn(), queryInfo.getKeyColumn(), CUMULATIVE_COLUMN_NAME).first(); collectedRows = aggregatedData.limit(1); } else { // Get the last row and form composite key row = collectedRows.select(queryInfo.getOrderColumn(), queryInfo.getKeyColumn(), CUMULATIVE_COLUMN_NAME).orderBy(col(queryInfo.getOrderColumn()).desc(), col(queryInfo.getKeyColumn()).desc()).first(); } LOG.info(("Processed batch size: " + row.get(row.fieldIndex(CUMULATIVE_COLUMN_NAME))) + " bytes"); sourceData.unpersist(); return Pair.of(new CloudObjectIncrCheckpoint(row.getString(0), row.getString(1)), Option.of(collectedRows)); }
3.26
hudi_IncrSourceHelper_generateQueryInfo_rdh
/** * Find begin and end instants to be set for the next fetch. * * @param jssc * Java Spark Context * @param srcBasePath * Base path of Hudi source table * @param numInstantsPerFetch * Max Instants per fetch * @param beginInstant * Last Checkpoint String * @param missingCheckpointStrategy * when begin instant is missing, allow reading based on missing checkpoint strategy * @param handlingMode * Hollow Commit Handling Mode * @param orderColumn * Column to order by (used for size based incr source) * @param keyColumn * Key column (used for size based incr source) * @param limitColumn * Limit column (used for size based incr source) * @param sourceLimitBasedBatching * When sourceLimit based batching is used, we need to fetch the current commit as well, * this flag is used to indicate that. * @param lastCheckpointKey * Last checkpoint key (used in the upgrade code path) * @return begin and end instants along with query type and other information. */public static QueryInfo generateQueryInfo(JavaSparkContext jssc, String srcBasePath, int numInstantsPerFetch, Option<String> beginInstant, MissingCheckpointStrategy missingCheckpointStrategy, HollowCommitHandling handlingMode, String orderColumn, String keyColumn, String limitColumn, boolean sourceLimitBasedBatching, Option<String> lastCheckpointKey) { ValidationUtils.checkArgument(numInstantsPerFetch > 0, "Make sure the config hoodie.streamer.source.hoodieincr.num_instants is set to a positive value"); HoodieTableMetaClient srcMetaClient = HoodieTableMetaClient.builder().setConf(jssc.hadoopConfiguration()).setBasePath(srcBasePath).setLoadActiveTimelineOnLoad(true).build(); HoodieTimeline completedCommitTimeline = srcMetaClient.getCommitsAndCompactionTimeline().filterCompletedInstants(); final HoodieTimeline activeCommitTimeline = handleHollowCommitIfNeeded(completedCommitTimeline, srcMetaClient, handlingMode); Function<HoodieInstant, String> timestampForLastInstant = instant -> handlingMode == HollowCommitHandling.USE_TRANSITION_TIME ? instant.getCompletionTime() : instant.getTimestamp(); String beginInstantTime = beginInstant.orElseGet(() -> { if (missingCheckpointStrategy != null) { if (missingCheckpointStrategy == MissingCheckpointStrategy.READ_LATEST) { Option<HoodieInstant> lastInstant = activeCommitTimeline.lastInstant(); return lastInstant.map(hoodieInstant -> getStrictlyLowerTimestamp(timestampForLastInstant.apply(hoodieInstant))).orElse(DEFAULT_BEGIN_TIMESTAMP); } else { return DEFAULT_BEGIN_TIMESTAMP; } } else { throw new IllegalArgumentException("Missing begin instant for incremental pull. For reading from latest " + "committed instant set hoodie.streamer.source.hoodieincr.missing.checkpoint.strategy to a valid value"); } }); // When `beginInstantTime` is present, `previousInstantTime` is set to the completed commit before `beginInstantTime` if that exists. // If there is no completed commit before `beginInstantTime`, e.g., `beginInstantTime` is the first commit in the active timeline, // `previousInstantTime` is set to `DEFAULT_BEGIN_TIMESTAMP`. String previousInstantTime = DEFAULT_BEGIN_TIMESTAMP; if (!beginInstantTime.equals(DEFAULT_BEGIN_TIMESTAMP)) { Option<HoodieInstant> previousInstant = activeCommitTimeline.findInstantBefore(beginInstantTime); if (previousInstant.isPresent()) { previousInstantTime = previousInstant.get().getTimestamp(); } else // if begin instant time matches first entry in active timeline, we can set previous = beginInstantTime - 1 if (activeCommitTimeline.filterCompletedInstants().firstInstant().isPresent() && activeCommitTimeline.filterCompletedInstants().firstInstant().get().getTimestamp().equals(beginInstantTime)) { previousInstantTime = String.valueOf(Long.parseLong(beginInstantTime) - 1); } } if ((missingCheckpointStrategy == MissingCheckpointStrategy.READ_LATEST) || (!activeCommitTimeline.isBeforeTimelineStarts(beginInstantTime))) { Option<HoodieInstant> nthInstant; // When we are in the upgrade code path from non-sourcelimit-based batching to sourcelimit-based batching, we need to avoid fetching the commit // that is read already. Else we will have duplicates in append-only use case if we use "findInstantsAfterOrEquals". // As soon as we have a new format of checkpoint and a key we will move to the new code of fetching the current commit as well. if (sourceLimitBasedBatching && lastCheckpointKey.isPresent()) {nthInstant = Option.fromJavaOptional(activeCommitTimeline.findInstantsAfterOrEquals(beginInstantTime, numInstantsPerFetch).getInstantsAsStream().reduce((x, y) -> y)); } else { nthInstant = Option.fromJavaOptional(activeCommitTimeline.findInstantsAfter(beginInstantTime, numInstantsPerFetch).getInstantsAsStream().reduce((x, y) -> y)); } return new QueryInfo(DataSourceReadOptions.QUERY_TYPE_INCREMENTAL_OPT_VAL(), previousInstantTime, beginInstantTime, nthInstant.map(HoodieInstant::getTimestamp).orElse(beginInstantTime), orderColumn, keyColumn, limitColumn); } else { // when MissingCheckpointStrategy is set to read everything until latest, trigger snapshot query. Option<HoodieInstant> lastInstant = activeCommitTimeline.lastInstant(); return new QueryInfo(DataSourceReadOptions.QUERY_TYPE_SNAPSHOT_OPT_VAL(), previousInstantTime, beginInstantTime, lastInstant.get().getTimestamp(), orderColumn, keyColumn, limitColumn); } }
3.26
hudi_IncrSourceHelper_getHollowCommitHandleMode_rdh
/** * When hollow commits are found while using incremental source with {@link HoodieDeltaStreamer}, * unlike batch incremental query, we do not use {@link HollowCommitHandling#FAIL} by default, * instead we use {@link HollowCommitHandling#BLOCK} to block processing data from going beyond the * hollow commits to avoid unintentional skip. * <p> * Users can set {@link DataSourceReadOptions#INCREMENTAL_READ_HANDLE_HOLLOW_COMMIT} to * {@link HollowCommitHandling#USE_TRANSITION_TIME} to avoid the blocking behavior. */ public static HollowCommitHandling getHollowCommitHandleMode(TypedProperties props) { return HollowCommitHandling.valueOf(props.getString(INCREMENTAL_READ_HANDLE_HOLLOW_COMMIT().key(), HollowCommitHandling.BLOCK.name())); }
3.26
hudi_BaseAvroPayload_isDeleteRecord_rdh
/** * * @param genericRecord * instance of {@link GenericRecord} of interest. * @returns {@code true} if record represents a delete record. {@code false} otherwise. */ protected boolean isDeleteRecord(GenericRecord genericRecord) { final String isDeleteKey = HoodieRecord.HOODIE_IS_DELETED_FIELD; // Modify to be compatible with new version Avro. // The new version Avro throws for GenericRecord.get if the field name // does not exist in the schema. if (genericRecord.getSchema().getField(isDeleteKey) == null) { return false; } Object deleteMarker = genericRecord.get(isDeleteKey); return (deleteMarker instanceof Boolean) && ((boolean) (deleteMarker)); }
3.26
hudi_BaseAvroPayload_isDeleted_rdh
/** * Defines whether this implementation of {@link HoodieRecordPayload} is deleted. * We will not do deserialization in this method. */ public boolean isDeleted(Schema schema, Properties props) { return isDeletedRecord; }
3.26
hudi_BaseAvroPayload_canProduceSentinel_rdh
/** * Defines whether this implementation of {@link HoodieRecordPayload} could produce * {@link HoodieRecord#SENTINEL} */ public boolean canProduceSentinel() { return false; }
3.26
hudi_InternalSchemaMerger_mergeType_rdh
/** * Create final read schema to read avro/parquet file. * this is auxiliary function used by mergeSchema. */ private Type mergeType(Type type, int currentTypeId) { switch (type.typeId()) { case RECORD : Types.RecordType record = ((Types.RecordType) (type)); List<Type> newTypes = new ArrayList<>(); for (Types.Field f : record.fields()) { Type newType = mergeType(f.type(), f.fieldId()); newTypes.add(newType);} return Types.RecordType.get(buildRecordType(record.fields(), newTypes)); case ARRAY : Types.ArrayType array = ((Types.ArrayType) (type)); Type newElementType; Types.Field elementField = array.fields().get(0); newElementType = mergeType(elementField.type(), elementField.fieldId()); return buildArrayType(array, newElementType); case MAP :Types.MapType map = ((Types.MapType) (type)); Type newValueType = mergeType(map.valueType(), map.valueId()); return buildMapType(map, newValueType); default : return buildPrimitiveType(((Type.PrimitiveType) (type)), currentTypeId);} }
3.26
hudi_InternalSchemaMerger_mergeSchema_rdh
/** * Create final read schema to read avro/parquet file. * * @return read schema to read avro/parquet file. */ public InternalSchema mergeSchema() { Types.RecordType record = ((Types.RecordType) (mergeType(querySchema.getRecord(), 0))); return new InternalSchema(record); }
3.26
hudi_SimpleBloomFilter_serializeToString_rdh
/** * Serialize the bloom filter as a string. */ @Override public String serializeToString() { ByteArrayOutputStream baos = new ByteArrayOutputStream(); DataOutputStream dos = new DataOutputStream(baos); try { filter.write(dos); byte[] bytes = baos.toByteArray(); dos.close(); return Base64CodecUtil.encode(bytes); } catch (IOException e) { throw new HoodieIndexException("Could not serialize BloomFilter instance", e); } }
3.26
hudi_SimpleBloomFilter_m0_rdh
// @Override public void m0(DataOutput out) throws IOException { out.write(getUTF8Bytes(filter.toString())); }
3.26
hudi_SimpleBloomFilter_readFields_rdh
// @Override public void readFields(DataInput in) throws IOException { filter = new InternalBloomFilter(); filter.readFields(in); }
3.26
hudi_FlatLists_of_rdh
/** * Creates a memory-, CPU- and cache-efficient immutable list from an * existing list. The list is always copied. * * @param t * Array of members of list * @param <T> * Element type * @return List containing the given members */ public static <T> List<T> of(List<T> t) { return of_(t); }
3.26
hudi_HiveHoodieTableFileIndex_listFileSlices_rdh
/** * Lists latest file-slices (base-file along w/ delta-log files) per partition. * * @return mapping from string partition paths to its base/log files */ public Map<String, List<FileSlice>> listFileSlices() { return getAllInputFileSlices().entrySet().stream().collect(Collectors.toMap(e -> e.getKey().getPath(), Map.Entry::getValue)); }
3.26
hudi_FlinkClientUtil_m0_rdh
/** * Returns the hadoop configuration with possible hadoop conf paths. * E.G. the configurations under path $HADOOP_CONF_DIR and $HADOOP_HOME. */ public static Configuration m0() { // create hadoop configuration with hadoop conf directory configured. Configuration hadoopConf = null; for (String possibleHadoopConfPath : HadoopUtils.possibleHadoopConfPaths(new Configuration())) { hadoopConf = getHadoopConfiguration(possibleHadoopConfPath); if (hadoopConf != null) { break; } } if (hadoopConf == null) { hadoopConf = new Configuration(); } return hadoopConf; }
3.26
hudi_FlinkClientUtil_createMetaClient_rdh
/** * Creates the meta client. */ public static HoodieTableMetaClient createMetaClient(String basePath) { return HoodieTableMetaClient.builder().setBasePath(basePath).setConf(FlinkClientUtil.m0()).build(); }
3.26
hudi_FlinkClientUtil_getHadoopConfiguration_rdh
/** * Returns a new Hadoop Configuration object using the path to the hadoop conf configured. * * @param hadoopConfDir * Hadoop conf directory path. * @return A Hadoop configuration instance. */ private static Configuration getHadoopConfiguration(String hadoopConfDir) { if (new File(hadoopConfDir).exists()) { Configuration hadoopConfiguration = new Configuration(); File coreSite = new File(hadoopConfDir, "core-site.xml"); if (coreSite.exists()) { hadoopConfiguration.addResource(new Path(coreSite.getAbsolutePath())); } File hdfsSite = new File(hadoopConfDir, "hdfs-site.xml"); if (hdfsSite.exists()) { hadoopConfiguration.addResource(new Path(hdfsSite.getAbsolutePath())); } File yarnSite = new File(hadoopConfDir, "yarn-site.xml"); if (yarnSite.exists()) { hadoopConfiguration.addResource(new Path(yarnSite.getAbsolutePath())); } // Add mapred-site.xml. We need to read configurations like compression codec. File mapredSite = new File(hadoopConfDir, "mapred-site.xml"); if (mapredSite.exists()) { hadoopConfiguration.addResource(new Path(mapredSite.getAbsolutePath())); } return hadoopConfiguration; } return null; }
3.26
hudi_HoodieHFileUtils_createHFileReader_rdh
/** * Creates HFile reader for byte array with default `primaryReplicaReader` as true. * * @param fs * File system. * @param dummyPath * Dummy path to file to read. * @param content * Content in byte array. * @return HFile reader * @throws IOException * Upon error. */ public static Reader createHFileReader(FileSystem fs, Path dummyPath, byte[] content) { // Avoid loading default configs, from the FS, since this configuration is mostly // used as a stub to initialize HFile reader Configuration conf = new Configuration(false); HoodieAvroHFileReader.SeekableByteArrayInputStream bis = new HoodieAvroHFileReader.SeekableByteArrayInputStream(content);FSDataInputStream fsdis = new FSDataInputStream(bis); FSDataInputStreamWrapper stream = new FSDataInputStreamWrapper(fsdis); ReaderContext context = new ReaderContextBuilder().withFilePath(dummyPath).withInputStreamWrapper(stream).withFileSize(content.length).withFileSystem(fs).withPrimaryReplicaReader(USE_PRIMARY_REPLICA_READER).withReaderType(ReaderType.STREAM).build();try { HFileInfo fileInfo = new HFileInfo(context, conf); HFile.Reader reader = HFile.createReader(context, fileInfo, new CacheConfig(conf), conf); fileInfo.initMetaAndIndex(reader); return reader; } catch (IOException e) { throw new HoodieIOException("Failed to initialize HFile reader for " + dummyPath, e); } }
3.26
hudi_SourceFormatAdapter_fetchNewDataInRowFormat_rdh
/** * Fetch new data in row format. If the source provides data in different format, they are translated to Row format */ public InputBatch<Dataset<Row>> fetchNewDataInRowFormat(Option<String> lastCkptStr, long sourceLimit) { switch (source.getSourceType()) { case ROW : // we do the sanitizing here if enabled InputBatch<Dataset<Row>> datasetInputBatch = ((Source<Dataset<Row>>) (source)).fetchNext(lastCkptStr, sourceLimit); return new InputBatch<>(processErrorEvents(datasetInputBatch.getBatch(), ErrorReason.JSON_ROW_DESERIALIZATION_FAILURE), datasetInputBatch.getCheckpointForNextBatch(), datasetInputBatch.getSchemaProvider()); case AVRO : { // don't need to sanitize because it's already avro InputBatch<JavaRDD<GenericRecord>> r = ((Source<JavaRDD<GenericRecord>>) (source)).fetchNext(lastCkptStr, sourceLimit); return avroDataInRowFormat(r); } case JSON : { if (isFieldNameSanitizingEnabled()) { // leverage the json -> avro sanitizing. TODO([HUDI-5829]) Optimize by sanitizing during direct conversion InputBatch<JavaRDD<GenericRecord>> r = fetchNewDataInAvroFormat(lastCkptStr, sourceLimit); return avroDataInRowFormat(r); } InputBatch<JavaRDD<String>> r = ((Source<JavaRDD<String>>) (source)).fetchNext(lastCkptStr, sourceLimit); Schema sourceSchema = r.getSchemaProvider().getSourceSchema(); if (errorTableWriter.isPresent()) { // if error table writer is enabled, during spark read `columnNameOfCorruptRecord` option is configured. // Any records which spark is unable to read successfully are transferred to the column // configured via this option. The column is then used to trigger error events. StructType dataType = AvroConversionUtils.convertAvroSchemaToStructType(sourceSchema).add(new StructField(ERROR_TABLE_CURRUPT_RECORD_COL_NAME, DataTypes.StringType, true, Metadata.empty())); StructType v15 = dataType.asNullable(); Option<Dataset<Row>> v16 = r.getBatch().map(rdd -> source.getSparkSession().read().option("columnNameOfCorruptRecord", ERROR_TABLE_CURRUPT_RECORD_COL_NAME).schema(v15).option("mode", "PERMISSIVE").json(rdd)); Option<Dataset<Row>> eventsDataset = processErrorEvents(v16, ErrorReason.JSON_ROW_DESERIALIZATION_FAILURE); return new InputBatch<>(eventsDataset, r.getCheckpointForNextBatch(), r.getSchemaProvider()); } else { StructType dataType = AvroConversionUtils.convertAvroSchemaToStructType(sourceSchema); return new InputBatch<>(Option.ofNullable(r.getBatch().map(rdd -> source.getSparkSession().read().schema(dataType).json(rdd)).orElse(null)), r.getCheckpointForNextBatch(), r.getSchemaProvider()); } } case PROTO : { // TODO([HUDI-5830]) implement field name sanitization InputBatch<JavaRDD<Message>> r = ((Source<JavaRDD<Message>>) (source)).fetchNext(lastCkptStr, sourceLimit); Schema sourceSchema = r.getSchemaProvider().getSourceSchema(); AvroConvertor convertor = new AvroConvertor(r.getSchemaProvider().getSourceSchema()); return new InputBatch<>(Option.ofNullable(r.getBatch().map(rdd -> rdd.map(convertor::fromProtoMessage)).map(rdd -> AvroConversionUtils.createDataFrame(JavaRDD.toRDD(rdd), sourceSchema.toString(), source.getSparkSession())).orElse(null)), r.getCheckpointForNextBatch(), r.getSchemaProvider()); } default : throw new IllegalArgumentException(("Unknown source type (" + source.getSourceType()) + ")"); }}
3.26
hudi_SourceFormatAdapter_fetchNewDataInAvroFormat_rdh
/** * Fetch new data in avro format. If the source provides data in different format, they are translated to Avro format */ public InputBatch<JavaRDD<GenericRecord>> fetchNewDataInAvroFormat(Option<String> lastCkptStr, long sourceLimit) { switch (source.getSourceType()) { case AVRO : // don't need to sanitize because it's already avro return ((Source<JavaRDD<GenericRecord>>) (source)).fetchNext(lastCkptStr, sourceLimit); case JSON :{ // sanitizing is done inside the convertor in transformJsonToGenericRdd if enabled InputBatch<JavaRDD<String>> r = ((Source<JavaRDD<String>>) (source)).fetchNext(lastCkptStr, sourceLimit); JavaRDD<GenericRecord> eventsRdd = transformJsonToGenericRdd(r); return new InputBatch<>(Option.ofNullable(eventsRdd), r.getCheckpointForNextBatch(), r.getSchemaProvider()); } case ROW : { // we do the sanitizing here if enabled InputBatch<Dataset<Row>> r = ((Source<Dataset<Row>>) (source)).fetchNext(lastCkptStr, sourceLimit); return new InputBatch<>(Option.ofNullable(r.getBatch().map(rdd -> { SchemaProvider originalProvider = UtilHelpers.getOriginalSchemaProvider(r.getSchemaProvider()); return (originalProvider instanceof FilebasedSchemaProvider) || (originalProvider instanceof SchemaRegistryProvider) ? // If the source schema is specified through Avro schema, // pass in the schema for the Row-to-Avro conversion // to avoid nullability mismatch between Avro schema and Row schema HoodieSparkUtils.createRdd(rdd, HOODIE_RECORD_STRUCT_NAME, HOODIE_RECORD_NAMESPACE, true, org.apache.hudi.common.util.Option.ofNullable(r.getSchemaProvider().getSourceSchema())).toJavaRDD() : HoodieSparkUtils.createRdd(rdd, HOODIE_RECORD_STRUCT_NAME, HOODIE_RECORD_NAMESPACE, false, Option.empty()).toJavaRDD(); }).orElse(null)), r.getCheckpointForNextBatch(), r.getSchemaProvider()); } case PROTO :{ // TODO([HUDI-5830]) implement field name sanitization InputBatch<JavaRDD<Message>> r = ((Source<JavaRDD<Message>>) (source)).fetchNext(lastCkptStr, sourceLimit); AvroConvertor convertor = new AvroConvertor(r.getSchemaProvider().getSourceSchema()); return new InputBatch<>(Option.ofNullable(r.getBatch().map(rdd -> rdd.map(convertor::fromProtoMessage)).orElse(null)), r.getCheckpointForNextBatch(), r.getSchemaProvider()); } default : throw new IllegalArgumentException(("Unknown source type (" + source.getSourceType()) + ")"); } }
3.26
hudi_SourceFormatAdapter_getInvalidCharMask_rdh
/** * Replacement mask for invalid characters encountered in avro names. * * @return sanitized value. */ private String getInvalidCharMask() { return invalidCharMask; }
3.26
hudi_SourceFormatAdapter_processErrorEvents_rdh
/** * transform datasets with error events when error table is enabled * * @param eventsRow * @return */ public Option<Dataset<Row>> processErrorEvents(Option<Dataset<Row>> eventsRow, ErrorEvent.ErrorReason errorReason) { return eventsRow.map(dataset -> { if (errorTableWriter.isPresent() && Arrays.stream(dataset.columns()).collect(Collectors.toList()).contains(ERROR_TABLE_CURRUPT_RECORD_COL_NAME)) { errorTableWriter.get().addErrorEvents(dataset.filter(new Column(ERROR_TABLE_CURRUPT_RECORD_COL_NAME).isNotNull()).select(new Column(ERROR_TABLE_CURRUPT_RECORD_COL_NAME)).toJavaRDD().map(ev -> new ErrorEvent<>(ev.getString(0), errorReason))); return dataset.filter(new Column(ERROR_TABLE_CURRUPT_RECORD_COL_NAME).isNull()).drop(ERROR_TABLE_CURRUPT_RECORD_COL_NAME); } return dataset; }); }
3.26
hudi_SourceFormatAdapter_transformJsonToGenericRdd_rdh
/** * transform input rdd of json string to generic records with support for adding error events to error table * * @param inputBatch * @return */ private JavaRDD<GenericRecord> transformJsonToGenericRdd(InputBatch<JavaRDD<String>> inputBatch) { MercifulJsonConverter.clearCache(inputBatch.getSchemaProvider().getSourceSchema().getFullName()); AvroConvertor convertor = new AvroConvertor(inputBatch.getSchemaProvider().getSourceSchema(), isFieldNameSanitizingEnabled(), getInvalidCharMask()); return inputBatch.getBatch().map(rdd -> { if (errorTableWriter.isPresent()) { JavaRDD<Either<GenericRecord, String>> v1 = rdd.map(convertor::fromJsonWithError); errorTableWriter.get().addErrorEvents(v1.filter(x -> x.isRight()).map(x -> new ErrorEvent<>(x.right().get(), ErrorEvent.ErrorReason.JSON_AVRO_DESERIALIZATION_FAILURE))); return v1.filter(x -> x.isLeft()).map(x -> x.left().get()); } else {return rdd.map(convertor::fromJson); } }).orElse(null); }
3.26
hudi_SourceFormatAdapter_isFieldNameSanitizingEnabled_rdh
/** * Config that automatically sanitizes the field names as per avro naming rules. * * @return enabled status. */ private boolean isFieldNameSanitizingEnabled() { return shouldSanitize; }
3.26
hudi_ArchiveTask_withProps_rdh
/** * JavaSparkContext to run spark job. */ private JavaSparkContext jsc;public Builder withProps(TypedProperties props) { this.f0 = props; return this; }
3.26
hudi_ArchiveTask_newBuilder_rdh
/** * Utility to create builder for {@link ArchiveTask}. * * @return Builder for {@link ArchiveTask}. */ public static Builder newBuilder() { return new Builder(); }
3.26
hudi_Pair_hashCode_rdh
/** * <p> * Returns a suitable hash code. The hash code follows the definition in {@code Map.Entry}. * </p> * * @return the hash code */ @Override public int hashCode() { // see Map.Entry API specification return (getKey() == null ? 0 : getKey().hashCode()) ^ (getValue() == null ? 0 : getValue().hashCode()); }
3.26
hudi_Pair_toString_rdh
/** * <p> * Formats the receiver using the given format. * </p> * * <p> * This uses {@link java.util.Formattable} to perform the formatting. Two variables may be used to embed the left and * right elements. Use {@code %1$s} for the left element (key) and {@code %2$s} for the right element (value). The * default format used by {@code toString()} is {@code (%1$s,%2$s)}. * </p> * * @param format * the format string, optionally containing {@code %1$s} and {@code %2$s}, not null * @return the formatted string, not null */ public String toString(final String format) { return String.format(format, getLeft(), getRight()); }
3.26
hudi_Pair_of_rdh
/** * <p> * Obtains an immutable pair of from two objects inferring the generic types. * </p> * * <p> * This factory allows the pair to be created using inference to obtain the generic types. * </p> * * @param <L> * the left element type * @param <R> * the right element type * @param left * the left element, may be null * @param right * the right element, may be null * @return a pair formed from the two parameters, not null */ public static <L, R> Pair<L, R> of(final L left, final R right) { return new ImmutablePair<>(left, right); }
3.26
hudi_Pair_compareTo_rdh
// ----------------------------------------------------------------------- /** * <p> * Compares the pair based on the left element followed by the right element. The types must be {@code Comparable}. * </p> * * @param other * the other pair, not null * @return negative if this is less, zero if equal, positive if greater */ @Override public int compareTo(final Pair<L, R> other) { checkComparable(this); checkComparable(other); Comparable thisLeft = ((Comparable) (getLeft())); Comparable thisRight = ((Comparable) (getRight())); Comparable otherLeft = ((Comparable) (other.getLeft())); Comparable otherRight = ((Comparable) (other.getRight())); if (thisLeft.compareTo(otherLeft) == 0) { return thisRight.compareTo(otherRight); } else { return thisLeft.compareTo(otherLeft); } }
3.26
hudi_Pair_equals_rdh
/** * <p> * Compares this pair to another based on the two elements. * </p> * * @param obj * the object to compare to, null returns false * @return true if the elements of the pair are equal */ @Override public boolean equals(final Object obj) { if (obj == this) {return true; } if (obj instanceof Map.Entry<?, ?>) { final Map.Entry<?, ?> other = ((Map.Entry<?, ?>) (obj)); return getKey().equals(other.getKey()) && getValue().equals(other.getValue()); } return false; }
3.26
hudi_Key_incrementWeight_rdh
/** * Increments the weight of <i>this</i> key by one. */ public void incrementWeight() { this.weight++; }
3.26
hudi_Key_readFields_rdh
/** * Deserialize the fields of this object from <code>in</code>. * * <p>For efficiency, implementations should attempt to re-use storage in the * existing object where possible.</p> * * @param in * <code>DataInput</code> to deseriablize this object from. * @throws IOException */public void readFields(DataInput in) throws IOException { this.bytes = new byte[in.readInt()]; in.readFully(this.bytes); weight = in.readDouble(); }
3.26
hudi_Key_getBytes_rdh
/** * * @return byte[] The value of <i>this</i> key. */ public byte[] getBytes() { return this.bytes; }
3.26
hudi_Key_m0_rdh
/** * * @return Returns the weight associated to <i>this</i> key. */ public double m0() { return weight;}
3.26
hudi_Key_write_rdh
/** * Serialize the fields of this object to <code>out</code>. * * @param out * <code>DataOuput</code> to serialize this object into. * @throws IOException */ public void write(DataOutput out) throws IOException {out.writeInt(bytes.length); out.write(bytes); out.writeDouble(weight); }
3.26
hudi_Key_set_rdh
/** * * @param value * @param weight */ public void set(byte[] value, double weight) { if (value == null) { throw new IllegalArgumentException("value can not be null"); } this.bytes = value; this.weight = weight; }
3.26
hudi_Key_compareTo_rdh
// Comparable @Override public int compareTo(Key other) { int result = this.bytes.length - other.getBytes().length; for (int i = 0; (result == 0) && (i < bytes.length); i++) { result = this.bytes[i] - other.bytes[i]; } if (result == 0) { result = ((int) (this.weight - other.weight)); } return result; }
3.26
hudi_InstantRange_builder_rdh
/** * Returns the builder. */ public static Builder builder() { return new Builder(); }
3.26
hudi_SecondaryIndexManager_refresh_rdh
/** * Refresh the specific secondary index * * @param metaClient * Hoodie table meta client * @param indexName * The target secondary index name */ public void refresh(HoodieTableMetaClient metaClient, String indexName) { // TODO }
3.26
hudi_SecondaryIndexManager_show_rdh
/** * Show secondary indexes from hoodie table * * @param metaClient * Hoodie table meta client * @return Indexes in this table */ public Option<List<HoodieSecondaryIndex>> show(HoodieTableMetaClient metaClient) { return SecondaryIndexUtils.getSecondaryIndexes(metaClient); }
3.26
hudi_SecondaryIndexManager_indexExists_rdh
/** * Check if the specific secondary index exists. When drop a secondary index, * only check index name, but for adding a secondary index, we should also * check the index type and columns when index name is different. * * @param secondaryIndexes * Current secondary indexes in this table * @param indexName * The index name of target secondary index * @param indexType * The index type of target secondary index * @param colNames * The column names of target secondary index * @return true if secondary index exists */ private boolean indexExists(Option<List<HoodieSecondaryIndex>> secondaryIndexes, String indexName, Option<String> indexType, Option<Set<String>> colNames) { return secondaryIndexes.map(indexes -> indexes.stream().anyMatch(index -> { if (index.getIndexName().equals(indexName)) { return true; } else if (indexType.isPresent() && colNames.isPresent()) { // When secondary index names are different, we should check index type // and index columns to avoid repeatedly creating the same index. // For example: // create index idx_name on test using lucene (name); // create index idx_name_1 on test using lucene (name); return index.getIndexType().name().equalsIgnoreCase(indexType.get()) && CollectionUtils.diff(index.getColumns().keySet(), colNames.get()).isEmpty(); } return false; })).orElse(false); }
3.26
hudi_SecondaryIndexManager_create_rdh
/** * Create a secondary index for hoodie table, two steps will be performed: * 1. Add secondary index metadata to hoodie.properties * 2. Trigger build secondary index * * @param metaClient * Hoodie table meta client * @param indexName * The unique secondary index name * @param indexType * Index type * @param ignoreIfExists * Whether ignore the creation if the specific secondary index exists * @param columns * The columns referenced by this secondary index, each column * has its own options * @param options * Options for this secondary index */ public void create(HoodieTableMetaClient metaClient, String indexName, String indexType, boolean ignoreIfExists, LinkedHashMap<String, Map<String, String>> columns, Map<String, String> options) { Option<List<HoodieSecondaryIndex>> secondaryIndexes = SecondaryIndexUtils.getSecondaryIndexes(metaClient); Set<String> colNames = columns.keySet(); Schema avroSchema; try { avroSchema = new TableSchemaResolver(metaClient).getTableAvroSchema(false); } catch (Exception e) { throw new HoodieSecondaryIndexException("Failed to get table avro schema: " + metaClient.getTableConfig().getTableName()); } for (String col : colNames) { if (avroSchema.getField(col) == null) { throw new HoodieSecondaryIndexException("Field not exists: " + col); } } if (indexExists(secondaryIndexes, indexName, Option.of(indexType), Option.of(colNames))) { if (ignoreIfExists) { return; } else { throw new HoodieSecondaryIndexException("Secondary index already exists: " + indexName); } } HoodieSecondaryIndex secondaryIndexToAdd = HoodieSecondaryIndex.builder().setIndexName(indexName).setIndexType(indexType).setColumns(columns).setOptions(options).build();List<HoodieSecondaryIndex> newSecondaryIndexes = secondaryIndexes.map(h -> { h.add(secondaryIndexToAdd); return h; }).orElse(Collections.singletonList(secondaryIndexToAdd)); newSecondaryIndexes.sort(new HoodieSecondaryIndex.HoodieIndexCompactor()); // Persistence secondary indexes' metadata to hoodie.properties file Properties updatedProps = new Properties(); updatedProps.put(HoodieTableConfig.SECONDARY_INDEXES_METADATA.key(), SecondaryIndexUtils.toJsonString(newSecondaryIndexes)); HoodieTableConfig.update(metaClient.getFs(), new Path(metaClient.getMetaPath()), updatedProps); LOG.info("Success to add secondary index metadata: {}", secondaryIndexToAdd); // TODO: build index }
3.26
hudi_LocalRegistry_m0_rdh
/** * Get all Counter type metrics. */ @Override public Map<String, Long> m0(boolean prefixWithRegistryName) { HashMap<String, Long> countersMap = new HashMap<>(); counters.forEach((k, v) -> { String key = (prefixWithRegistryName) ? (name + ".") + k : k; countersMap.put(key, v.getValue()); }); return countersMap; }
3.26
hudi_ClusteringCommitSink_validateWriteResult_rdh
/** * Validate actions taken by clustering. In the first implementation, we validate at least one new file is written. * But we can extend this to add more validation. E.g. number of records read = number of records written etc. * We can also make these validations in BaseCommitActionExecutor to reuse pre-commit hooks for multiple actions. */ private static void validateWriteResult(HoodieClusteringPlan clusteringPlan, String instantTime, HoodieWriteMetadata<List<WriteStatus>> writeMetadata) { if (writeMetadata.getWriteStatuses().isEmpty()) { throw new HoodieClusteringException(((((("Clustering plan produced 0 WriteStatus for " + instantTime) + " #groups: ") + clusteringPlan.getInputGroups().size()) + " expected at least ") + clusteringPlan.getInputGroups().stream().mapToInt(HoodieClusteringGroup::getNumOutputFileGroups).sum()) + " write statuses"); } }
3.26
hudi_ClusteringCommitSink_m0_rdh
/** * Condition to commit: the commit buffer has equal size with the clustering plan operations * and all the clustering commit event {@link ClusteringCommitEvent} has the same clustering instant time. * * @param instant * Clustering commit instant time * @param events * Commit events ever received for the instant */ private void m0(String instant, Collection<ClusteringCommitEvent> events) { HoodieClusteringPlan clusteringPlan = clusteringPlanCache.computeIfAbsent(instant, k -> { try { Option<Pair<HoodieInstant, HoodieClusteringPlan>> clusteringPlanOption = ClusteringUtils.getClusteringPlan(this.writeClient.getHoodieTable().getMetaClient(), HoodieTimeline.getReplaceCommitInflightInstant(instant)); return clusteringPlanOption.get().getRight(); } catch (Exception e) {throw new <e>HoodieException();} }); boolean isReady = clusteringPlan.getInputGroups().size() == events.size(); if (!isReady) { return;} if (events.stream().anyMatch(ClusteringCommitEvent::isFailed)) { try {// handle failure case ClusteringUtil.rollbackClustering(table, writeClient, instant); } finally { // remove commitBuffer to avoid obsolete metadata commit reset(instant); } return; }try { doCommit(instant, clusteringPlan, events); } catch (Throwable throwable) { // make it fail-safe f0.error("Error while committing clustering instant: " + instant, throwable); } finally { // reset the status reset(instant); } }
3.26
hudi_HoodieTimeGeneratorConfig_defaultConfig_rdh
/** * Returns the default configuration. */ public static HoodieTimeGeneratorConfig defaultConfig(String tablePath) { return newBuilder().withPath(tablePath).build(); }
3.26
hudi_BaseHoodieDateTimeParser_getOutputDateFormat_rdh
/** * Returns the output date format in which the partition paths will be created for the hudi dataset. */ public String getOutputDateFormat() { return getStringWithAltKeys(config, TIMESTAMP_OUTPUT_DATE_FORMAT); }
3.26
hudi_BaseHoodieDateTimeParser_getConfigInputDateFormatDelimiter_rdh
/** * Returns the input date format delimiter, comma by default. */public String getConfigInputDateFormatDelimiter() { return this.configInputDateFormatDelimiter; }
3.26
hudi_MarkerDirState_parseMarkerFileIndex_rdh
/** * Parses the marker file index from the marker file path. * <p> * E.g., if the marker file path is /tmp/table/.hoodie/.temp/000/MARKERS3, the index returned is 3. * * @param markerFilePathStr * full path of marker file * @return the marker file index */ private int parseMarkerFileIndex(String markerFilePathStr) { String markerFileName = new Path(markerFilePathStr).getName(); int prefixIndex = markerFileName.indexOf(MARKERS_FILENAME_PREFIX); if (prefixIndex < 0) { return -1;} try { return Integer.parseInt(markerFileName.substring(prefixIndex + MARKERS_FILENAME_PREFIX.length())); } catch (NumberFormatException nfe) { LOG.error("Failed to parse marker file index from " + markerFilePathStr); throw new HoodieException(nfe.getMessage(), nfe); } }
3.26
hudi_MarkerDirState_m0_rdh
/** * Adds a {@code MarkerCreationCompletableFuture} instance from a marker * creation request to the queue. * * @param future * {@code MarkerCreationCompletableFuture} instance. */ public void m0(MarkerCreationFuture future) { synchronized(markerCreationFutures) { markerCreationFutures.add(future); } }
3.26
hudi_MarkerDirState_flushMarkersToFile_rdh
/** * Flushes markers to the underlying file. * * @param markerFileIndex * file index to use. */ private void flushMarkersToFile(int markerFileIndex) { LOG.debug(((("Write to " + markerDirPath) + "/") + MARKERS_FILENAME_PREFIX) + markerFileIndex); HoodieTimer timer = HoodieTimer.start(); Path markersFilePath = new Path(markerDirPath, MARKERS_FILENAME_PREFIX + markerFileIndex); FSDataOutputStream v20 = null; BufferedWriter bufferedWriter = null; try { v20 = fileSystem.create(markersFilePath); bufferedWriter = new BufferedWriter(new OutputStreamWriter(v20, StandardCharsets.UTF_8)); bufferedWriter.write(fileMarkersMap.get(markerFileIndex).toString()); } catch (IOException e) { throw new HoodieIOException("Failed to overwrite marker file " + markersFilePath, e); } finally {closeQuietly(bufferedWriter); closeQuietly(v20);} LOG.debug(((markersFilePath.toString() + " written in ") + timer.endTimer()) + " ms"); }
3.26
hudi_MarkerDirState_getAllMarkers_rdh
/** * * @return all markers in the marker directory. */ public Set<String> getAllMarkers() { return allMarkers; }
3.26
hudi_MarkerDirState_getPendingMarkerCreationRequests_rdh
/** * * @param shouldClear * Should clear the internal request list or not. * @return futures of pending marker creation requests. */ public List<MarkerCreationFuture> getPendingMarkerCreationRequests(boolean shouldClear) { List<MarkerCreationFuture> pendingFutures; synchronized(markerCreationFutures) { if (markerCreationFutures.isEmpty()) { return new ArrayList<>(); } pendingFutures = new ArrayList<>(markerCreationFutures); if (shouldClear) { markerCreationFutures.clear(); } } return pendingFutures; }
3.26
hudi_MarkerDirState_writeMarkerTypeToFile_rdh
/** * Writes marker type, "TIMELINE_SERVER_BASED", to file. */ private void writeMarkerTypeToFile() { Path dirPath = new Path(markerDirPath); try { if ((!fileSystem.exists(dirPath)) || (!MarkerUtils.doesMarkerTypeFileExist(fileSystem, markerDirPath))) { // There is no existing marker directory, create a new directory and write marker type fileSystem.mkdirs(dirPath); MarkerUtils.writeMarkerTypeToFile(MarkerType.TIMELINE_SERVER_BASED, fileSystem, markerDirPath); } } catch (IOException e) { throw new HoodieIOException((("Failed to write marker type file in " + markerDirPath) + ": ") + e.getMessage(), e); } }
3.26
hudi_MarkerDirState_markFileAsAvailable_rdh
/** * Marks the file as available to use again. * * @param fileIndex * file index */ public void markFileAsAvailable(int fileIndex) { synchronized(markerCreationProcessingLock) { threadUseStatus.set(fileIndex, false); } }
3.26
hudi_MarkerDirState_deleteAllMarkers_rdh
/** * Deletes markers in the directory. * * @return {@code true} if successful; {@code false} otherwise. */public boolean deleteAllMarkers() { boolean result = FSUtils.deleteDir(hoodieEngineContext, fileSystem, new Path(markerDirPath), parallelism); allMarkers.clear(); fileMarkersMap.clear(); return result; }
3.26
hudi_MarkerDirState_addMarkerToMap_rdh
/** * Adds a new marker to the in-memory map. * * @param fileIndex * Marker file index number. * @param markerName * Marker name. */ private void addMarkerToMap(int fileIndex, String markerName) { allMarkers.add(markerName); StringBuilder stringBuilder = fileMarkersMap.computeIfAbsent(fileIndex, k -> new StringBuilder(16384)); stringBuilder.append(markerName); stringBuilder.append('\n'); }
3.26
hudi_MarkerDirState_exists_rdh
/** * * @return {@code true} if the marker directory exists in the system. */ public boolean exists() { try { return fileSystem.exists(new Path(markerDirPath)); } catch (IOException ioe) { throw new HoodieIOException(ioe.getMessage(), ioe); } }
3.26
hudi_MarkerDirState_syncMarkersFromFileSystem_rdh
/** * Syncs all markers maintained in the underlying files under the marker directory in the file system. */ private void syncMarkersFromFileSystem() { Map<String, Set<String>> fileMarkersSetMap = MarkerUtils.readTimelineServerBasedMarkersFromFileSystem(markerDirPath, fileSystem, hoodieEngineContext, parallelism); for (String markersFilePathStr : fileMarkersSetMap.keySet()) { Set<String> fileMarkers = fileMarkersSetMap.get(markersFilePathStr); if (!fileMarkers.isEmpty()) { int index = parseMarkerFileIndex(markersFilePathStr); if (index >= 0) { fileMarkersMap.put(index, new StringBuilder(StringUtils.join(",", fileMarkers))); allMarkers.addAll(fileMarkers); } } } try { if (MarkerUtils.doesMarkerTypeFileExist(fileSystem, markerDirPath)) { isMarkerTypeWritten = true; } } catch (IOException e) { throw new HoodieIOException(e.getMessage(), e); } }
3.26
hudi_MarkerDirState_fetchPendingMarkerCreationRequests_rdh
/** * * @return futures of pending marker creation requests and removes them from the list. */ public List<MarkerCreationFuture> fetchPendingMarkerCreationRequests() { return getPendingMarkerCreationRequests(true); }
3.26
hudi_MarkerDirState_processMarkerCreationRequests_rdh
/** * Processes pending marker creation requests. * * @param pendingMarkerCreationFutures * futures of pending marker creation requests * @param fileIndex * file index to use to write markers */ public void processMarkerCreationRequests(final List<MarkerCreationFuture> pendingMarkerCreationFutures, int fileIndex) { if (pendingMarkerCreationFutures.isEmpty()) { markFileAsAvailable(fileIndex); return; } LOG.debug((((((("timeMs=" + System.currentTimeMillis()) + " markerDirPath=") + markerDirPath) + " numRequests=") + pendingMarkerCreationFutures.size()) + " fileIndex=") + fileIndex); boolean shouldFlushMarkers = false; synchronized(markerCreationProcessingLock) { for (MarkerCreationFuture future : pendingMarkerCreationFutures) { String markerName = future.getMarkerName(); boolean exists = allMarkers.contains(markerName); if (!exists) { if (conflictDetectionStrategy.isPresent()) { try { conflictDetectionStrategy.get().detectAndResolveConflictIfNecessary(); } catch (HoodieEarlyConflictDetectionException he) { LOG.warn("Detected the write conflict due to a concurrent writer, " + "failing the marker creation as the early conflict detection is enabled", he); future.setResult(false); continue; } catch (Exception e) { LOG.warn("Failed to execute early conflict detection." + e.getMessage()); // When early conflict detection fails to execute, we still allow the marker creation // to continue addMarkerToMap(fileIndex, markerName); future.setResult(true); shouldFlushMarkers = true; continue; } } addMarkerToMap(fileIndex, markerName); shouldFlushMarkers = true; }future.setResult(!exists); } if (!isMarkerTypeWritten) { // Create marker directory and write marker type to MARKERS.type writeMarkerTypeToFile(); isMarkerTypeWritten = true; } } if (shouldFlushMarkers) { flushMarkersToFile(fileIndex); } markFileAsAvailable(fileIndex); for (MarkerCreationFuture future : pendingMarkerCreationFutures) { try { future.complete(jsonifyResult(future.getContext(), future.isSuccessful(), metricsRegistry, OBJECT_MAPPER, LOG)); } catch (JsonProcessingException e) { throw new HoodieException("Failed to JSON encode the value", e); } } }
3.26
hudi_BaseWriteHelper_deduplicateRecords_rdh
/** * Deduplicate Hoodie records, using the given deduplication function. * * @param records * hoodieRecords to deduplicate * @param parallelism * parallelism or partitions to be used while reducing/deduplicating * @return Collection of HoodieRecord already be deduplicated */ public I deduplicateRecords(I records, HoodieTable<T, I, K, O> table, int parallelism) { HoodieRecordMerger recordMerger = HoodieRecordUtils.mergerToPreCombineMode(table.getConfig().getRecordMerger()); return deduplicateRecords(records, table.getIndex(), parallelism, table.getConfig().getSchema(), table.getConfig().getProps(), recordMerger); }
3.26
hudi_GenericRecordFullPayloadSizeEstimator_getNonNull_rdh
/** * Get the nonNull Schema of a given UNION Schema. * * @param schema * @return */ protected Schema getNonNull(Schema schema) { List<Schema> types = schema.getTypes(); return types.get(0).getType().equals(Type.NULL) ? types.get(1) : types.get(0); }
3.26
hudi_GenericRecordFullPayloadSizeEstimator_typeEstimate_rdh
/** * Estimate the size of a given schema according to their type. * * @param schema * schema to estimate. * @return Size of the given schema. */ private long typeEstimate(Schema schema) { Schema localSchema = schema; if (isOption(schema)) { localSchema = getNonNull(schema); } switch (localSchema.getType()) { case BOOLEAN : return 1; case DOUBLE :return 8; case FLOAT : return 4; case INT : return 4; case LONG : return 8; case STRING : return UUID.randomUUID().toString().length(); case ENUM : return 1; case RECORD : return estimate(localSchema); case ARRAY : if (GenericRecordFullPayloadGenerator.isPrimitive(localSchema.getElementType())) { counter.addAndGet(1); } Schema elementSchema = localSchema.getElementType(); return typeEstimate(elementSchema); case MAP : if (GenericRecordFullPayloadGenerator.isPrimitive(localSchema.getValueType())) { counter.addAndGet(1); } Schema valueSchema = localSchema.getValueType(); return UUID.randomUUID().toString().length() + typeEstimate(valueSchema); case BYTES : return UUID.randomUUID().toString().length(); case FIXED : return localSchema.getFixedSize(); default : throw new IllegalArgumentException("Cannot handle type: " + localSchema.getType()); } }
3.26
hudi_GenericRecordFullPayloadSizeEstimator_estimate_rdh
/** * This method estimates the size of the payload if all entries of this payload were populated with one value. * For eg. A primitive data type such as String will be populated with {@link UUID} so the length if 36 bytes * whereas a complex data type such as an Array of type Int, will be populated with exactly 1 Integer value. */ protected int estimate(Schema schema) { long size = 0; for (Schema.Field f : schema.getFields()) { size += typeEstimate(f.schema()); } return ((int) (size)); }
3.26
hudi_FiveToSixUpgradeHandler_deleteCompactionRequestedFileFromAuxiliaryFolder_rdh
/** * See HUDI-6040. */ private void deleteCompactionRequestedFileFromAuxiliaryFolder(HoodieTable table) { HoodieTableMetaClient metaClient = table.getMetaClient(); HoodieTimeline compactionTimeline = metaClient.getActiveTimeline().filterPendingCompactionTimeline().filter(instant -> instant.getState() == HoodieInstant.State.REQUESTED); compactionTimeline.getInstantsAsStream().forEach(deleteInstant -> { LOG.info((("Deleting instant " + deleteInstant) + " in auxiliary meta path ") + metaClient.getMetaAuxiliaryPath()); Path metaFile = new Path(metaClient.getMetaAuxiliaryPath(), deleteInstant.getFileName()); try { if (metaClient.getFs().exists(metaFile)) {metaClient.getFs().delete(metaFile, false); LOG.info("Deleted instant file in auxiliary meta path : " + metaFile); } } catch (IOException e) { throw new <e>HoodieUpgradeDowngradeException(HoodieTableVersion.FIVE.versionCode(), HoodieTableVersion.SIX.versionCode(), true); } }); }
3.26
hudi_HoodieLogBlock_getContentBytes_rdh
// Return the bytes representation of the data belonging to a LogBlock public byte[] getContentBytes() throws IOException { throw new HoodieException("No implementation was provided"); }
3.26
hudi_HoodieLogBlock_getLogMetadata_rdh
/** * Convert bytes to LogMetadata, follow the same order as {@link HoodieLogBlock#getLogMetadataBytes}. */public static Map<HeaderMetadataType, String> getLogMetadata(DataInputStream dis) throws IOException { Map<HeaderMetadataType, String> metadata = new HashMap<>(); // 1. Read the metadata written out int metadataCount = dis.readInt(); try { while (metadataCount > 0) { int metadataEntryIndex = dis.readInt(); int metadataEntrySize = dis.readInt(); byte[] metadataEntry = new byte[metadataEntrySize]; dis.readFully(metadataEntry, 0, metadataEntrySize); metadata.put(HeaderMetadataType.values()[metadataEntryIndex], new String(metadataEntry)); metadataCount--;} return metadata; } catch (EOFException eof) { throw new IOException("Could not read metadata fields ", eof); } }
3.26
hudi_HoodieLogBlock_tryReadContent_rdh
/** * Read or Skip block content of a log block in the log file. Depends on lazy reading enabled in * {@link HoodieMergedLogRecordScanner} */ public static Option<byte[]> tryReadContent(FSDataInputStream inputStream, Integer contentLength, boolean readLazily) throws IOException { if (readLazily) { // Seek to the end of the content block inputStream.seek(inputStream.getPos() + contentLength); return Option.empty(); } // TODO re-use buffer if stream is backed by buffer // Read the contents in memory byte[] v9 = new byte[contentLength]; inputStream.readFully(v9, 0, contentLength); return Option.of(v9); }
3.26
hudi_HoodieLogBlock_deflate_rdh
/** * After the content bytes is converted into the required DataStructure by a logBlock, deflate the content to release * byte [] and relieve memory pressure when GC kicks in. NOTE: This still leaves the heap fragmented */ protected void deflate() { content = Option.empty(); }
3.26
hudi_HoodieLogBlock_getLogMetadataBytes_rdh
/** * Convert log metadata to bytes 1. Write size of metadata 2. Write enum ordinal 3. Write actual bytes */ public static byte[] getLogMetadataBytes(Map<HeaderMetadataType, String> metadata) throws IOException { ByteArrayOutputStream baos = new ByteArrayOutputStream();DataOutputStream output = new DataOutputStream(baos); output.writeInt(metadata.size()); for (Map.Entry<HeaderMetadataType, String> entry : metadata.entrySet()) { output.writeInt(entry.getKey().ordinal()); byte[] bytes = getUTF8Bytes(entry.getValue()); output.writeInt(bytes.length); output.write(bytes); } return baos.toByteArray(); }
3.26
hudi_HoodieLogBlock_isCompactedLogBlock_rdh
/** * Compacted blocks are created using log compaction which basically merges the consecutive blocks together and create * huge block with all the changes. */ public boolean isCompactedLogBlock() { return logBlockHeader.containsKey(HeaderMetadataType.COMPACTED_BLOCK_TIMES); } /** * * @return A {@link Roaring64NavigableMap} bitmap containing the record positions in long type if the {@link HeaderMetadataType#RECORD_POSITIONS} block header exists; otherwise, an empty {@link Roaring64NavigableMap}
3.26
hudi_HoodieLogBlock_inflate_rdh
/** * When lazyReading of blocks is turned on, inflate the content of a log block from disk. */ protected void inflate() throws HoodieIOException { checkState(!content.isPresent(), "Block has already been inflated"); checkState(inputStream != null, "Block should have input-stream provided"); try { content = Option.of(new byte[((int) (this.getBlockContentLocation().get().getBlockSize()))]); inputStream.seek(this.getBlockContentLocation().get().getContentPositionInLogFile()); inputStream.readFully(content.get(), 0, content.get().length); inputStream.seek(this.getBlockContentLocation().get().getBlockEndPos()); } catch (IOException e) { // TODO : fs.open() and return inputstream again, need to pass FS configuration // because the inputstream might close/timeout for large number of log blocks to be merged inflate(); } }
3.26
hudi_HoodieInputFormatUtils_getTableMetaClientForBasePathUnchecked_rdh
/** * Extract HoodieTableMetaClient from a partition path (not base path) */ public static HoodieTableMetaClient getTableMetaClientForBasePathUnchecked(Configuration conf, Path partitionPath) throws IOException { Path baseDir = partitionPath; FileSystem v23 = partitionPath.getFileSystem(conf); if (HoodiePartitionMetadata.hasPartitionMetadata(v23, partitionPath)) { HoodiePartitionMetadata metadata = new HoodiePartitionMetadata(v23, partitionPath); metadata.readFromFS(); int levels = metadata.getPartitionDepth(); baseDir = HoodieHiveUtils.getNthParent(partitionPath, levels); } else { for (int i = 0; i < partitionPath.depth(); i++) { if (v23.exists(new Path(baseDir, METAFOLDER_NAME))) { break; } else if (i == (partitionPath.depth() - 1)) { throw new TableNotFoundException(partitionPath.toString()); } else { baseDir = baseDir.getParent(); } } } LOG.info("Reading hoodie metadata from path " + baseDir.toString()); return HoodieTableMetaClient.builder().setConf(v23.getConf()).setBasePath(baseDir.toString()).build(); }
3.26
hudi_HoodieInputFormatUtils_getAffectedPartitions_rdh
/** * Extract partitions touched by the commitsToCheck. * * @param commitsToCheck * @param tableMetaClient * @param timeline * @param inputPaths * @return * @throws IOException */ public static Option<String> getAffectedPartitions(List<HoodieInstant> commitsToCheck, HoodieTableMetaClient tableMetaClient, HoodieTimeline timeline, List<Path> inputPaths) throws IOException { Set<String> partitionsToList = new HashSet<>(); for (HoodieInstant commit : commitsToCheck) { HoodieCommitMetadata commitMetadata = HoodieCommitMetadata.fromBytes(timeline.getInstantDetails(commit).get(), HoodieCommitMetadata.class); partitionsToList.addAll(commitMetadata.getPartitionToWriteStats().keySet()); } if (partitionsToList.isEmpty()) { return Option.empty(); } String incrementalInputPaths = partitionsToList.stream().map(s -> StringUtils.isNullOrEmpty(s) ? tableMetaClient.getBasePath() : (tableMetaClient.getBasePath() + Path.SEPARATOR) + s).filter(s -> { /* Ensure to return only results from the original input path that has incremental changes This check is needed for the following corner case - When the caller invokes HoodieInputFormat.listStatus multiple times (with small batches of Hive partitions each time. Ex. Hive fetch task calls listStatus for every partition once) we do not want to accidentally return all incremental changes for the entire table in every listStatus() call. This will create redundant splits. Instead we only want to return the incremental changes (if so any) in that batch of input paths. NOTE on Hive queries that are executed using Fetch task: Since Fetch tasks invoke InputFormat.listStatus() per partition, Hoodie metadata can be listed in every such listStatus() call. In order to avoid this, it might be useful to disable fetch tasks using the hive session property for incremental queries: `set hive.fetch.task.conversion=none;` This would ensure Map Reduce execution is chosen for a Hive query, which combines partitions (comma separated) and calls InputFormat.listStatus() only once with all those partitions. */ for (Path path : inputPaths) { if (path.toString().endsWith(s)) { return true; } } return false; }).collect(Collectors.joining(",")); return StringUtils.isNullOrEmpty(incrementalInputPaths) ? Option.empty() : Option.of(incrementalInputPaths); }
3.26
hudi_HoodieInputFormatUtils_refreshFileStatus_rdh
/** * Checks the file status for a race condition which can set the file size to 0. 1. HiveInputFormat does * super.listStatus() and gets back a FileStatus[] 2. Then it creates the HoodieTableMetaClient for the paths listed. * 3. Generation of splits looks at FileStatus size to create splits, which skips this file * * @param conf * @param dataFile * @return */ private static HoodieBaseFile refreshFileStatus(Configuration conf, HoodieBaseFile dataFile) { Path dataPath = dataFile.getFileStatus().getPath(); try { if (dataFile.getFileSize() == 0) { FileSystem fs = dataPath.getFileSystem(conf); LOG.info("Refreshing file status " + dataFile.getPath()); return new HoodieBaseFile(fs.getFileStatus(dataPath), dataFile.getBootstrapBaseFile().orElse(null)); } return dataFile; } catch (IOException e) { throw new HoodieIOException("Could not get FileStatus on path " + dataPath); } }
3.26
hudi_HoodieInputFormatUtils_getCommitsForIncrementalQuery_rdh
/** * Get commits for incremental query from Hive map reduce configuration. * * @param job * @param tableName * @param timeline * @return */ public static Option<List<HoodieInstant>> getCommitsForIncrementalQuery(Job job, String tableName, HoodieTimeline timeline) { return Option.of(getHoodieTimelineForIncrementalQuery(job, tableName, timeline).getInstants()); }
3.26
hudi_HoodieInputFormatUtils_filterInstantsTimeline_rdh
/** * Filter any specific instants that we do not want to process. * example timeline: * <p> * t0 -> create bucket1.parquet * t1 -> create and append updates bucket1.log * t2 -> request compaction * t3 -> create bucket2.parquet * <p> * if compaction at t2 takes a long time, incremental readers on RO tables can move to t3 and would skip updates in t1 * <p> * To workaround this problem, we want to stop returning data belonging to commits > t2. * After compaction is complete, incremental reader would see updates in t2, t3, so on. * * @param timeline * @return */ public static HoodieDefaultTimeline filterInstantsTimeline(HoodieDefaultTimeline timeline) { HoodieDefaultTimeline commitsAndCompactionTimeline = timeline.getWriteTimeline(); Option<HoodieInstant> pendingCompactionInstant = commitsAndCompactionTimeline.filterPendingCompactionTimeline().firstInstant(); if (pendingCompactionInstant.isPresent()) { HoodieDefaultTimeline instantsTimeline = commitsAndCompactionTimeline.findInstantsBefore(pendingCompactionInstant.get().getTimestamp()); int numCommitsFilteredByCompaction = commitsAndCompactionTimeline.getCommitsTimeline().countInstants() - instantsTimeline.getCommitsTimeline().countInstants(); LOG.info(((("Earliest pending compaction instant is: " + pendingCompactionInstant.get().getTimestamp()) + " skipping ") + numCommitsFilteredByCompaction) + " commits"); return instantsTimeline; } else { return timeline; } }
3.26
hudi_HoodieInputFormatUtils_filterIncrementalFileStatus_rdh
/** * Filter a list of FileStatus based on commitsToCheck for incremental view. * * @param job * @param tableMetaClient * @param timeline * @param fileStatuses * @param commitsToCheck * @return */ public static List<FileStatus> filterIncrementalFileStatus(Job job, HoodieTableMetaClient tableMetaClient, HoodieTimeline timeline, FileStatus[] fileStatuses, List<HoodieInstant> commitsToCheck) throws IOException { TableFileSystemView.BaseFileOnlyView roView = new HoodieTableFileSystemView(tableMetaClient, timeline, fileStatuses); List<String> commitsList = commitsToCheck.stream().map(HoodieInstant::getTimestamp).collect(Collectors.toList()); List<HoodieBaseFile> filteredFiles = roView.getLatestBaseFilesInRange(commitsList).collect(Collectors.toList()); List<FileStatus> returns = new ArrayList<>(); for (HoodieBaseFile filteredFile : filteredFiles) { LOG.debug("Processing incremental hoodie file - " + filteredFile.getPath()); filteredFile = refreshFileStatus(job.getConfiguration(), filteredFile); returns.add(getFileStatus(filteredFile)); } LOG.info("Total paths to process after hoodie incremental filter " + filteredFiles.size()); return returns; }
3.26
hudi_HoodieInputFormatUtils_getTableMetaClientByPartitionPath_rdh
/** * Extract HoodieTableMetaClient by partition path. * * @param conf * The hadoop conf * @param partitions * The partitions * @return partition path to table meta client mapping */ public static Map<Path, HoodieTableMetaClient> getTableMetaClientByPartitionPath(Configuration conf, Set<Path> partitions) { Map<Path, HoodieTableMetaClient> metaClientMap = new HashMap<>(); return partitions.stream().collect(Collectors.toMap(Function.identity(), p -> { try { HoodieTableMetaClient metaClient = getTableMetaClientForBasePathUnchecked(conf, p); metaClientMap.put(p, metaClient); return metaClient; } catch (IOException e) { throw new <e>HoodieIOException("Error creating hoodie meta client against : " + p); } })); }
3.26
hudi_HoodieInputFormatUtils_groupFileStatusForSnapshotPaths_rdh
/** * Takes in a list of filesStatus and a list of table metadata. Groups the files status list * based on given table metadata. * * @param fileStatuses * @param fileExtension * @param metaClientList * @return * @throws IOException */public static Map<HoodieTableMetaClient, List<FileStatus>> groupFileStatusForSnapshotPaths(FileStatus[] fileStatuses, String fileExtension, Collection<HoodieTableMetaClient> metaClientList) { // This assumes the paths for different tables are grouped together Map<HoodieTableMetaClient, List<FileStatus>> grouped = new HashMap<>(); HoodieTableMetaClient metadata = null; for (FileStatus status : fileStatuses) { Path inputPath = status.getPath(); if (!inputPath.getName().endsWith(fileExtension)) { // FIXME(vc): skip non data files for now. This wont be needed once log file name start // with "." continue; } if ((metadata == null) || (!inputPath.toString().contains(metadata.getBasePath()))) { for (HoodieTableMetaClient metaClient : metaClientList) { if (inputPath.toString().contains(metaClient.getBasePath())) { metadata = metaClient; if (!grouped.containsKey(metadata)) { grouped.put(metadata, new ArrayList<>()); } break; } } } grouped.get(metadata).add(status); }return grouped; }
3.26
hudi_HoodieInputFormatUtils_listAffectedFilesForCommits_rdh
/** * Iterate through a list of commit metadata in natural order, and extract the file status of * all affected files from the commits metadata grouping by file full path. If the files has * been touched multiple times in the given commits, the return value will keep the one * from the latest commit. * * @param basePath * The table base path * @param metadataList * The metadata list to read the data from * @return the affected file status array */ public static FileStatus[] listAffectedFilesForCommits(Configuration hadoopConf, Path basePath, List<HoodieCommitMetadata> metadataList) { // TODO: Use HoodieMetaTable to extract affected file directly. HashMap<String, FileStatus> fullPathToFileStatus = new HashMap<>(); // Iterate through the given commits. for (HoodieCommitMetadata metadata : metadataList) { fullPathToFileStatus.putAll(metadata.getFullPathToFileStatus(hadoopConf, basePath.toString())); } return fullPathToFileStatus.values().toArray(new FileStatus[0]); }
3.26