name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hudi_TableSchemaResolver_m0_rdh
|
/**
* Fetches the schema for a table from any the table's data files
*/
private Option<MessageType> m0() {
Option<Pair<HoodieInstant, HoodieCommitMetadata>> instantAndCommitMetadata = m1();
try {
switch (metaClient.getTableType()) {
case COPY_ON_WRITE :case MERGE_ON_READ :
// For COW table, data could be written in either Parquet or Orc format currently;
// For MOR table, data could be written in either Parquet, Orc, Hfile or Delta-log format currently;
//
// Determine the file format based on the file name, and then extract schema from it.
if (instantAndCommitMetadata.isPresent()) {
HoodieCommitMetadata commitMetadata
= instantAndCommitMetadata.get().getRight();
Iterator<String> filePaths
= commitMetadata.getFileIdAndFullPaths(metaClient.getBasePathV2()).values().iterator();
return Option.of(fetchSchemaFromFiles(filePaths));
} else {
LOG.warn(("Could not find any data file written for commit, " + "so could not get schema for table ") + metaClient.getBasePath());
return Option.empty();
}default :
LOG.error("Unknown table type " + metaClient.getTableType());
throw new InvalidTableException(metaClient.getBasePath());
}
} catch (IOException e) {
throw new HoodieException("Failed to read data schema", e);
}
}
| 3.26 |
hudi_TableSchemaResolver_readSchemaFromLastCompaction_rdh
|
/**
* Read schema from a data file from the last compaction commit done.
*
* @deprecated please use {@link #getTableAvroSchema(HoodieInstant, boolean)} instead
*/
public MessageType readSchemaFromLastCompaction(Option<HoodieInstant> lastCompactionCommitOpt) throws Exception {
HoodieActiveTimeline activeTimeline = metaClient.getActiveTimeline();
HoodieInstant lastCompactionCommit = lastCompactionCommitOpt.orElseThrow(() -> new Exception("Could not read schema from last compaction, no compaction commits found on path " + metaClient));
// Read from the compacted file wrote
HoodieCommitMetadata compactionMetadata = HoodieCommitMetadata.fromBytes(activeTimeline.getInstantDetails(lastCompactionCommit).get(), HoodieCommitMetadata.class);
String filePath = compactionMetadata.getFileIdAndFullPaths(metaClient.getBasePathV2()).values().stream().findAny().orElseThrow(() -> new IllegalArgumentException((("Could not find any data file written for compaction " + lastCompactionCommit) + ", could not get schema for table ") + metaClient.getBasePath()));
return readSchemaFromBaseFile(filePath);
}
| 3.26 |
hudi_TableSchemaResolver_getTableInternalSchemaFromCommitMetadata_rdh
|
/**
* Gets the InternalSchema for a hoodie table from the HoodieCommitMetadata of the instant.
*
* @return InternalSchema for this table
*/
private Option<InternalSchema> getTableInternalSchemaFromCommitMetadata(HoodieInstant instant) {
try {
HoodieCommitMetadata metadata = getCachedCommitMetadata(instant);
String latestInternalSchemaStr = metadata.getMetadata(SerDeHelper.LATEST_SCHEMA);
if (latestInternalSchemaStr != null) {
return SerDeHelper.fromJson(latestInternalSchemaStr);
} else {
return
Option.empty();
}
} catch (Exception e)
{
throw new HoodieException("Failed to read schema from commit metadata", e);
}
}
| 3.26 |
hudi_TableSchemaResolver_getTableParquetSchema_rdh
|
/**
* Gets users data schema for a hoodie table in Parquet format.
*
* @return Parquet schema for the table
*/
public MessageType getTableParquetSchema(boolean includeMetadataField) throws Exception {
return convertAvroSchemaToParquet(getTableAvroSchema(includeMetadataField));
}
| 3.26 |
hudi_TableSchemaResolver_getTableAvroSchemaFromLatestCommit_rdh
|
/**
* Returns table's latest Avro {@link Schema} iff table is non-empty (ie there's at least
* a single commit)
*
* This method differs from {@link #getTableAvroSchema(boolean)} in that it won't fallback
* to use table's schema used at creation
*/
public Option<Schema> getTableAvroSchemaFromLatestCommit(boolean includeMetadataFields) throws Exception {
if (metaClient.isTimelineNonEmpty()) {
return getTableAvroSchemaInternal(includeMetadataFields, Option.empty());
}
return Option.empty();
}
| 3.26 |
hudi_TableSchemaResolver_hasOperationField_rdh
|
/**
* NOTE: This method could only be used in tests
*
* @VisibleForTesting */
public boolean hasOperationField() {
try {
Schema tableAvroSchema = getTableAvroSchemaFromDataFile();
return
tableAvroSchema.getField(HoodieRecord.OPERATION_METADATA_FIELD) != null;
} catch (Exception e) {
LOG.info(String.format("Failed to read operation field from avro schema (%s)", e.getMessage()));
return false;
}
}
| 3.26 |
hudi_TableSchemaResolver_readSchemaFromLogFile_rdh
|
/**
* Read the schema from the log file on path.
*
* @return */
public static MessageType readSchemaFromLogFile(FileSystem fs, Path path) throws IOException {
try (Reader reader = HoodieLogFormat.newReader(fs, new HoodieLogFile(path), null)) {
HoodieDataBlock lastBlock = null;
while (reader.hasNext()) {
HoodieLogBlock block = reader.next();
if (block instanceof HoodieDataBlock) {
lastBlock = ((HoodieDataBlock) (block));
}
} return lastBlock != null ? new AvroSchemaConverter().convert(lastBlock.getSchema()) : null;
}
}
| 3.26 |
hudi_TableSchemaResolver_getTableAvroSchemaWithoutMetadataFields_rdh
|
/**
* Gets users data schema for a hoodie table in Avro format.
*
* @return Avro user data schema
* @throws Exception
* @deprecated use {@link #getTableAvroSchema(boolean)} instead
*/
@Deprecated
public Schema getTableAvroSchemaWithoutMetadataFields() throws Exception {
return getTableAvroSchemaInternal(false, Option.empty()).orElseThrow(schemaNotFoundError());
}
| 3.26 |
hudi_TableSchemaResolver_getTableHistorySchemaStrFromCommitMetadata_rdh
|
/**
* Gets the history schemas as String for a hoodie table from the HoodieCommitMetadata of the instant.
*
* @return history schemas string for this table
*/
public Option<String> getTableHistorySchemaStrFromCommitMetadata() {
// now we only support FileBaseInternalSchemaManager
FileBasedInternalSchemaStorageManager manager = new FileBasedInternalSchemaStorageManager(metaClient);
String result = manager.getHistorySchemaStr();
return result.isEmpty() ? Option.empty() : Option.of(result);
}
| 3.26 |
hudi_WriteStatus_markFailure_rdh
|
/**
* Used by native write handles like HoodieRowCreateHandle and HoodieRowDataCreateHandle.
*
* @see WriteStatus#markFailure(HoodieRecord, Throwable, Option)
*/
@PublicAPIMethod(maturity = ApiMaturityLevel.EVOLVING)
public void markFailure(String recordKey, String partitionPath, Throwable t) {
if (failedRecords.isEmpty() || (random.nextDouble() <= failureFraction)) {
// Guaranteed to have at-least one error
HoodieRecordDelegate recordDelegate = HoodieRecordDelegate.create(recordKey, partitionPath);
failedRecords.add(Pair.of(recordDelegate, t));
errors.put(recordDelegate.getHoodieKey(), t);
}
updateStatsForFailure();
}
| 3.26 |
hudi_WriteStatus_markSuccess_rdh
|
/**
* Used by native write handles like HoodieRowCreateHandle and HoodieRowDataCreateHandle.
*
* @see WriteStatus#markSuccess(HoodieRecord, Option)
*/
@PublicAPIMethod(maturity = ApiMaturityLevel.EVOLVING)
public void markSuccess(HoodieRecordDelegate recordDelegate, Option<Map<String, String>> optionalRecordMetadata) {
if (trackSuccessRecords) {
writtenRecordDelegates.add(Objects.requireNonNull(recordDelegate));
}
updateStatsForSuccess(optionalRecordMetadata);
}
| 3.26 |
hudi_FlinkWriteClients_createWriteClientV2_rdh
|
/**
* Creates the Flink write client.
*
* <p>This expects to be used by the driver, the client can then send requests for files view.
*
* <p>The task context supplier is a constant: the write token is always '0-1-0'.
*
* <p>Note: different with {@link #createWriteClient}, the fs view storage options are set into the given
* configuration {@code conf}.
*/
@SuppressWarnings("rawtypes")public static HoodieFlinkWriteClient createWriteClientV2(Configuration conf) {
HoodieWriteConfig writeConfig = getHoodieClientConfig(conf, true, false);
// build the write client to start the embedded timeline server
final HoodieFlinkWriteClient writeClient = new HoodieFlinkWriteClient<>(new HoodieFlinkEngineContext(HadoopConfigurations.getHadoopConf(conf)), writeConfig);
writeClient.setOperationType(WriteOperationType.fromValue(conf.getString(FlinkOptions.OPERATION))); // create the filesystem view storage properties for client
final FileSystemViewStorageConfig viewStorageConfig = writeConfig.getViewStorageConfig();
conf.setString(FileSystemViewStorageConfig.VIEW_TYPE.key(), viewStorageConfig.getStorageType().name());
conf.setString(FileSystemViewStorageConfig.REMOTE_HOST_NAME.key(), viewStorageConfig.getRemoteViewServerHost());
conf.setInteger(FileSystemViewStorageConfig.REMOTE_PORT_NUM.key(), viewStorageConfig.getRemoteViewServerPort());return writeClient;
}
| 3.26 |
hudi_FlinkWriteClients_getHoodieClientConfig_rdh
|
/**
* Mainly used for tests.
*/
public static HoodieWriteConfig getHoodieClientConfig(Configuration conf) {
return getHoodieClientConfig(conf, false, false);
}
| 3.26 |
hudi_FlinkWriteClients_createWriteClient_rdh
|
/**
* Creates the Flink write client.
*
* <p>This expects to be used by client, the driver should start an embedded timeline server.
*/
@SuppressWarnings("rawtypes")
public static HoodieFlinkWriteClient createWriteClient(Configuration conf, RuntimeContext runtimeContext) {
return createWriteClient(conf, runtimeContext, true);
}
/**
* Creates the Flink write client.
*
* <p>This expects to be used by client, set flag {@code loadFsViewStorageConfig}
| 3.26 |
hudi_CkpMetadata_bootstrap_rdh
|
// -------------------------------------------------------------------------
// WRITE METHODS
// -------------------------------------------------------------------------
/**
* Initialize the message bus, would clean all the messages
*
* <p>This expects to be called by the driver.
*/
public void bootstrap() throws IOException {
fs.delete(path, true);
fs.mkdirs(path);
}
| 3.26 |
hudi_CkpMetadata_load_rdh
|
// -------------------------------------------------------------------------
// READ METHODS
// -------------------------------------------------------------------------
private void load() {
try {
this.messages = m1(this.path);
} catch (IOException e) {
throw new HoodieException("Exception while scanning the checkpoint meta files under path: " + this.path, e);
}
}
| 3.26 |
hudi_CkpMetadata_commitInstant_rdh
|
/**
* Add a checkpoint commit message.
*
* @param instant
* The committed instant
*/
public void commitInstant(String instant) {
Path path = fullPath(CkpMessage.getFileName(instant, State.COMPLETED));
try {
fs.createNewFile(path);
} catch (IOException e) {
throw new HoodieException("Exception while adding checkpoint commit metadata for instant: " + instant, e);
}
}
| 3.26 |
hudi_CkpMetadata_ckpMetaPath_rdh
|
// -------------------------------------------------------------------------
// Utilities
// -------------------------------------------------------------------------
protected static String ckpMetaPath(String basePath, String uniqueId) {
// .hoodie/.aux/ckp_meta
String metaPath = (((basePath + Path.SEPARATOR) + HoodieTableMetaClient.AUXILIARYFOLDER_NAME) +
Path.SEPARATOR) +
CKP_META;
return StringUtils.isNullOrEmpty(uniqueId) ? metaPath : (metaPath + "_") + uniqueId;
}
| 3.26 |
hudi_StreamerUtil_generateBucketKey_rdh
|
/**
* Generates the bucket ID using format {partition path}_{fileID}.
*/
public static String generateBucketKey(String partitionPath, String fileId) {
return String.format("%s_%s", partitionPath, fileId);
}
| 3.26 |
hudi_StreamerUtil_getTimeGeneratorConfig_rdh
|
/**
* Returns the timeGenerator config with given configuration.
*/
public static HoodieTimeGeneratorConfig getTimeGeneratorConfig(Configuration conf) {
TypedProperties properties = flinkConf2TypedProperties(conf);
// Set lock configure, which is needed in TimeGenerator.
Option<HoodieLockConfig> lockConfig = getLockConfig(conf);
if (lockConfig.isPresent()) {
properties.putAll(lockConfig.get().getProps());
}
return HoodieTimeGeneratorConfig.newBuilder().withPath(conf.getString(FlinkOptions.PATH)).fromProperties(properties).build();
}
| 3.26 |
hudi_StreamerUtil_readConfig_rdh
|
/**
* Read config from properties file (`--props` option) and cmd line (`--hoodie-conf` option).
*/
public static DFSPropertiesConfiguration readConfig(Configuration hadoopConfig, Path cfgPath, List<String> overriddenProps) {
DFSPropertiesConfiguration conf = new DFSPropertiesConfiguration(hadoopConfig, cfgPath);
try {
if (!overriddenProps.isEmpty()) {
LOG.info("Adding overridden properties to file properties.");
conf.addPropsFromStream(new BufferedReader(new StringReader(String.join("\n", overriddenProps))), cfgPath);
}
} catch (IOException ioe) {
throw new HoodieIOException("Unexpected error adding config overrides", ioe);
}
return conf;
}
| 3.26 |
hudi_StreamerUtil_getPayloadConfig_rdh
|
/**
* Returns the payload config with given configuration.
*/
public static HoodiePayloadConfig getPayloadConfig(Configuration conf) {
return HoodiePayloadConfig.newBuilder().withPayloadClass(conf.getString(FlinkOptions.PAYLOAD_CLASS_NAME)).withPayloadOrderingField(conf.getString(FlinkOptions.PRECOMBINE_FIELD)).withPayloadEventTimeField(conf.getString(FlinkOptions.PRECOMBINE_FIELD)).build();}
| 3.26 |
hudi_StreamerUtil_instantTimeDiffSeconds_rdh
|
/**
* Returns the time interval in seconds between the given instant time.
*/
public static long instantTimeDiffSeconds(String newInstantTime, String oldInstantTime) {
try {long newTimestamp = HoodieActiveTimeline.parseDateFromInstantTime(newInstantTime).getTime();
long oldTimestamp = HoodieActiveTimeline.parseDateFromInstantTime(oldInstantTime).getTime();
return (newTimestamp - oldTimestamp) / 1000;
} catch (ParseException e) {
throw new HoodieException(((("Get instant time diff with interval [" + oldInstantTime) +
", ") + newInstantTime) + "] error", e);
}
}
| 3.26 |
hudi_StreamerUtil_createMetaClient_rdh
|
/**
* Creates the meta client.
*/public static HoodieTableMetaClient
createMetaClient(Configuration conf, Configuration hadoopConf) {
return HoodieTableMetaClient.builder().setBasePath(conf.getString(FlinkOptions.PATH)).setConf(hadoopConf).setTimeGeneratorConfig(getTimeGeneratorConfig(conf)).build();
}
| 3.26 |
hudi_StreamerUtil_getMaxCompactionMemoryInBytes_rdh
|
/**
* Returns the max compaction memory in bytes with given conf.
*/ public static long getMaxCompactionMemoryInBytes(Configuration conf) {
return (((long) (conf.getInteger(FlinkOptions.COMPACTION_MAX_MEMORY))) * 1024) * 1024;
}
| 3.26 |
hudi_StreamerUtil_partitionExists_rdh
|
/**
* Returns whether the hoodie partition exists under given table path {@code tablePath} and partition path {@code partitionPath}.
*
* @param tablePath
* Base path of the table.
* @param partitionPath
* The path of the partition.
* @param hadoopConf
* The hadoop configuration.
*/
public static boolean partitionExists(String tablePath, String partitionPath, Configuration hadoopConf) {
// Hadoop FileSystem
FileSystem fs = FSUtils.getFs(tablePath, hadoopConf);
try {
return fs.exists(new Path(tablePath, partitionPath));
} catch (IOException e) {
throw new HoodieException(String.format("Error while checking whether partition exists under table path [%s] and partition path [%s]", tablePath, partitionPath),
e);
}
}
| 3.26 |
hudi_StreamerUtil_medianInstantTime_rdh
|
/**
* Returns the median instant time between the given two instant time.
*/
public static Option<String> medianInstantTime(String highVal, String lowVal) {
try {
long high = HoodieActiveTimeline.parseDateFromInstantTime(highVal).getTime();
long
low = HoodieActiveTimeline.parseDateFromInstantTime(lowVal).getTime();
ValidationUtils.checkArgument(high > low, ((("Instant ["
+ highVal) + "] should have newer timestamp than instant [") + lowVal) + "]");
long median = low + ((high - low) / 2);
final String instantTime = HoodieActiveTimeline.formatDate(new Date(median));
if (HoodieTimeline.compareTimestamps(lowVal, HoodieTimeline.GREATER_THAN_OR_EQUALS, instantTime) || HoodieTimeline.compareTimestamps(highVal, HoodieTimeline.LESSER_THAN_OR_EQUALS, instantTime)) {
return Option.empty();}
return Option.of(instantTime);
} catch (ParseException e) {
throw new HoodieException(((("Get median instant time with interval ["
+ lowVal) + ", ") + highVal) + "] error", e);
}
}
| 3.26 |
hudi_StreamerUtil_getLockConfig_rdh
|
/**
* Get the lockConfig if required, empty {@link Option} otherwise.
*/
public static Option<HoodieLockConfig> getLockConfig(Configuration conf) {
if (OptionsResolver.isLockRequired(conf) && (!conf.containsKey(HoodieLockConfig.LOCK_PROVIDER_CLASS_NAME.key()))) {
// configure the fs lock provider by default
return Option.of(HoodieLockConfig.newBuilder().fromProperties(FileSystemBasedLockProvider.getLockConfig(conf.getString(FlinkOptions.PATH))).withConflictResolutionStrategy(OptionsResolver.getConflictResolutionStrategy(conf)).build());
}
return Option.empty();
}
| 3.26 |
hudi_StreamerUtil_isWriteCommit_rdh
|
/**
* Returns whether the given instant is a data writing commit.
*
* @param tableType
* The table type
* @param instant
* The instant
* @param timeline
* The timeline
*/
public static boolean
isWriteCommit(HoodieTableType tableType, HoodieInstant instant, HoodieTimeline timeline) {
return tableType == HoodieTableType.MERGE_ON_READ ? !instant.getAction().equals(HoodieTimeline.COMMIT_ACTION)// not a compaction
: !ClusteringUtil.isClusteringInstant(instant, timeline);// not a clustering
}
| 3.26 |
hudi_StreamerUtil_metaClientForReader_rdh
|
/**
* Creates the meta client for reader.
*
* <p>The streaming pipeline process is long-running, so empty table path is allowed,
* the reader would then check and refresh the meta client.
*
* @see org.apache.hudi.source.StreamReadMonitoringFunction
*/
public static HoodieTableMetaClient metaClientForReader(Configuration conf, Configuration hadoopConf) {
final String basePath = conf.getString(FlinkOptions.PATH);
if (conf.getBoolean(FlinkOptions.READ_AS_STREAMING) && (!tableExists(basePath, hadoopConf))) {
return null;
} else {
return createMetaClient(basePath, hadoopConf);
}
}
| 3.26 |
hudi_StreamerUtil_isValidFile_rdh
|
/**
* Returns whether the give file is in valid hoodie format.
* For example, filtering out the empty or corrupt files.
*/
public static boolean isValidFile(FileStatus fileStatus) {
final String extension =
FSUtils.getFileExtension(fileStatus.getPath().toString());
if (PARQUET.getFileExtension().equals(extension)) {
return fileStatus.getLen() > MAGIC.length;
}
if (ORC.getFileExtension().equals(extension)) {
return fileStatus.getLen() > OrcFile.MAGIC.length();
}
if (HOODIE_LOG.getFileExtension().equals(extension)) {
return fileStatus.getLen() > MAGIC.length;
}
return fileStatus.getLen() > 0;
}
| 3.26 |
hudi_StreamerUtil_flinkConf2TypedProperties_rdh
|
/**
* Converts the give {@link Configuration} to {@link TypedProperties}.
* The default values are also set up.
*
* @param conf
* The flink configuration
* @return a TypedProperties instance
*/
public static TypedProperties flinkConf2TypedProperties(Configuration conf) {
Configuration flatConf = FlinkOptions.flatOptions(conf);
Properties properties = new Properties();
// put all the set options
flatConf.addAllToProperties(properties);
// put all the default options
for (ConfigOption<?> v10 : FlinkOptions.optionalOptions()) {
if ((!flatConf.contains(v10)) && v10.hasDefaultValue()) {
properties.put(v10.key(), v10.defaultValue()); }
}properties.put(HoodieTableConfig.TYPE.key(), conf.getString(FlinkOptions.TABLE_TYPE));
return new TypedProperties(properties);
}
| 3.26 |
hudi_StreamerUtil_initTableIfNotExists_rdh
|
/**
* Initialize the table if it does not exist.
*
* @param conf
* the configuration
* @throws IOException
* if errors happens when writing metadata
*/
public static HoodieTableMetaClient initTableIfNotExists(Configuration conf, Configuration hadoopConf) throws IOException {
final String basePath = conf.getString(FlinkOptions.PATH);
if (!tableExists(basePath, hadoopConf)) {
HoodieTableMetaClient.withPropertyBuilder().setTableCreateSchema(conf.getString(FlinkOptions.SOURCE_AVRO_SCHEMA)).setTableType(conf.getString(FlinkOptions.TABLE_TYPE)).setTableName(conf.getString(FlinkOptions.TABLE_NAME)).setDatabaseName(conf.getString(FlinkOptions.DATABASE_NAME)).setRecordKeyFields(conf.getString(FlinkOptions.RECORD_KEY_FIELD, null)).setPayloadClassName(conf.getString(FlinkOptions.PAYLOAD_CLASS_NAME)).setPreCombineField(OptionsResolver.getPreCombineField(conf)).setArchiveLogFolder(ARCHIVELOG_FOLDER.defaultValue()).setPartitionFields(conf.getString(FlinkOptions.PARTITION_PATH_FIELD,
null)).setKeyGeneratorClassProp(conf.getOptional(FlinkOptions.KEYGEN_CLASS_NAME).orElse(SimpleAvroKeyGenerator.class.getName())).setHiveStylePartitioningEnable(conf.getBoolean(FlinkOptions.HIVE_STYLE_PARTITIONING)).setUrlEncodePartitioning(conf.getBoolean(FlinkOptions.URL_ENCODE_PARTITIONING)).setCDCEnabled(conf.getBoolean(FlinkOptions.CDC_ENABLED)).setCDCSupplementalLoggingMode(conf.getString(FlinkOptions.SUPPLEMENTAL_LOGGING_MODE)).setTimelineLayoutVersion(1).initTable(hadoopConf, basePath);
LOG.info("Table initialized under base path {}", basePath);
} else {
LOG.info("Table [{}/{}] already exists, no need to initialize the table", basePath, conf.getString(FlinkOptions.TABLE_NAME));
}
return StreamerUtil.createMetaClient(conf, hadoopConf);
// Do not close the filesystem in order to use the CACHE,
// some filesystems release the handles in #close method.
}
/**
* Returns whether the hoodie table exists under given path {@code basePath}
| 3.26 |
hudi_StreamerUtil_getTableConfig_rdh
|
/**
* Returns the table config or empty if the table does not exist.
*/
public static Option<HoodieTableConfig> getTableConfig(String basePath, Configuration hadoopConf) {
FileSystem fs = FSUtils.getFs(basePath, hadoopConf);
Path metaPath = new Path(basePath, HoodieTableMetaClient.METAFOLDER_NAME);
try {
if (fs.exists(new Path(metaPath, HoodieTableConfig.HOODIE_PROPERTIES_FILE))) {
return Option.of(new HoodieTableConfig(fs, metaPath.toString(), null, null));
}
} catch (IOException e) {
throw new HoodieIOException("Get table config error",
e);
}
return Option.empty();
}
| 3.26 |
hudi_StreamerUtil_haveSuccessfulCommits_rdh
|
/**
* Returns whether there are successful commits on the timeline.
*
* @param metaClient
* The meta client
* @return true if there is any successful commit
*/
public static boolean haveSuccessfulCommits(HoodieTableMetaClient metaClient) {
return !metaClient.getCommitsTimeline().filterCompletedInstants().empty();
}
| 3.26 |
hudi_SparkDataSourceTableUtils_getSparkTableProperties_rdh
|
/**
* Get Spark Sql related table properties. This is used for spark datasource table.
*
* @param schema
* The schema to write to the table.
* @return A new parameters added the spark's table properties.
*/
public static Map<String, String> getSparkTableProperties(List<String> partitionNames, String sparkVersion, int schemaLengthThreshold, MessageType schema) {
// Convert the schema and partition info used by spark sql to hive table properties.
// The following code refers to the spark code in
// https://github.com/apache/spark/blob/master/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveExternalCatalog.scala
GroupType originGroupType = schema.asGroupType();
List<Type> partitionCols = new ArrayList<>();
List<Type> dataCols = new ArrayList<>();
Map<String, Type> column2Field = new HashMap<>();
for (Type field : originGroupType.getFields()) {
column2Field.put(field.getName(), field);
}
// Get partition columns and data columns.
for (String partitionName : partitionNames) {
// Default the unknown partition fields to be String.
// Keep the same logical with HiveSchemaUtil#getPartitionKeyType.
partitionCols.add(column2Field.getOrDefault(partitionName, new PrimitiveType(Repetition.REQUIRED, BINARY, partitionName, UTF8)));
}
for (Type field : originGroupType.getFields()) {
if (!partitionNames.contains(field.getName())) {
dataCols.add(field);
}
}
List<Type> reOrderedFields = new ArrayList<>();
reOrderedFields.addAll(dataCols);reOrderedFields.addAll(partitionCols);
GroupType reOrderedType = new GroupType(originGroupType.getRepetition(), originGroupType.getName(), reOrderedFields);
Map<String, String> sparkProperties = new HashMap<>();
sparkProperties.put("spark.sql.sources.provider", "hudi");
if (!StringUtils.isNullOrEmpty(sparkVersion)) {
sparkProperties.put("spark.sql.create.version", sparkVersion);
}
// Split the schema string to multi-parts according the schemaLengthThreshold size.
String schemaString = Parquet2SparkSchemaUtils.convertToSparkSchemaJson(reOrderedType);
int numSchemaPart = ((schemaString.length() + schemaLengthThreshold) - 1) / schemaLengthThreshold;
sparkProperties.put("spark.sql.sources.schema.numParts", String.valueOf(numSchemaPart));
// Add each part of schema string to sparkProperties
for (int i = 0; i < numSchemaPart; i++) {
int start = i * schemaLengthThreshold;
int end = Math.min(start + schemaLengthThreshold, schemaString.length());
sparkProperties.put("spark.sql.sources.schema.part." + i, schemaString.substring(start, end));
}// Add partition columns
if (!partitionNames.isEmpty()) {
sparkProperties.put("spark.sql.sources.schema.numPartCols", String.valueOf(partitionNames.size()));
for (int i = 0; i < partitionNames.size(); i++) {
sparkProperties.put("spark.sql.sources.schema.partCol." + i, partitionNames.get(i));
}
}
return sparkProperties;
}
| 3.26 |
hudi_DataPruner_test_rdh
|
/**
* Filters the index row with specific data filters and query fields.
*
* @param indexRow
* The index row
* @param queryFields
* The query fields referenced by the filters
* @return true if the index row should be considered as a candidate
*/
public boolean test(RowData indexRow, RowType[] queryFields) {
Map<String, ColumnStats>
columnStatsMap = convertColumnStats(indexRow, queryFields);
for (ExpressionEvaluators.Evaluator evaluator : evaluators) {
if (!evaluator.eval(columnStatsMap)) {
return false;
}
}
return true;
}
| 3.26 |
hudi_DataPruner_getValAsJavaObj_rdh
|
/**
* Returns the value as Java object at position {@code pos} of row {@code indexRow}.
*/
private static Object getValAsJavaObj(RowData indexRow, int pos, LogicalType colType) {
switch (colType.getTypeRoot()) {
// NOTE: Since we can't rely on Avro's "date", and "timestamp-micros" logical-types, we're
// manually encoding corresponding values as int and long w/in the Column Stats Index and
// here we have to decode those back into corresponding logical representation.
case TIMESTAMP_WITHOUT_TIME_ZONE :
TimestampType v12 = ((TimestampType) (colType));
return indexRow.getTimestamp(pos, v12.getPrecision()).getMillisecond(); case TIME_WITHOUT_TIME_ZONE :
case DATE :
case BIGINT :
return indexRow.getLong(pos);
// NOTE: All integral types of size less than Int are encoded as Ints in MT
case BOOLEAN :
return indexRow.getBoolean(pos);
case TINYINT :
case SMALLINT :
case
INTEGER :
return indexRow.getInt(pos);
case FLOAT :
return indexRow.getFloat(pos);
case DOUBLE :
return indexRow.getDouble(pos);
case
BINARY :
case VARBINARY
:
return indexRow.getBinary(pos);
case CHAR :
case VARCHAR :
return indexRow.getString(pos).toString();
case DECIMAL :
DecimalType decimalType = ((DecimalType) (colType));
return indexRow.getDecimal(pos, decimalType.getPrecision(), decimalType.getScale()).toBigDecimal();
default :
throw new UnsupportedOperationException("Unsupported type: " + colType);
}
}
| 3.26 |
hudi_PartitionAwareClusteringPlanStrategy_buildClusteringGroupsForPartition_rdh
|
/**
* Create Clustering group based on files eligible for clustering in the partition.
*/
protected Stream<HoodieClusteringGroup> buildClusteringGroupsForPartition(String partitionPath, List<FileSlice> fileSlices) {
HoodieWriteConfig writeConfig = getWriteConfig();
List<Pair<List<FileSlice>, Integer>> fileSliceGroups = new ArrayList<>();
List<FileSlice> currentGroup = new ArrayList<>();
// Sort fileSlices before dividing, which makes dividing more compact
List<FileSlice> sortedFileSlices = new ArrayList<>(fileSlices);
sortedFileSlices.sort((o1, o2) -> ((int) ((o2.getBaseFile().isPresent() ? o2.getBaseFile().get().getFileSize() : writeConfig.getParquetMaxFileSize()) - (o1.getBaseFile().isPresent() ? o1.getBaseFile().get().getFileSize() : writeConfig.getParquetMaxFileSize()))));
long totalSizeSoFar = 0;
for (FileSlice currentSlice : sortedFileSlices) {
long currentSize = (currentSlice.getBaseFile().isPresent()) ? currentSlice.getBaseFile().get().getFileSize() : writeConfig.getParquetMaxFileSize();
// check if max size is reached and create new group, if needed.
if (((totalSizeSoFar + currentSize) > writeConfig.getClusteringMaxBytesInGroup()) && (!currentGroup.isEmpty())) {
int numOutputGroups = getNumberOfOutputFileGroups(totalSizeSoFar, writeConfig.getClusteringTargetFileMaxBytes());
LOG.info((((((("Adding one clustering group " +
totalSizeSoFar) + " max bytes: ") + writeConfig.getClusteringMaxBytesInGroup()) + " num input slices: ") + currentGroup.size()) + " output groups: ") + numOutputGroups);
fileSliceGroups.add(Pair.of(currentGroup, numOutputGroups));
currentGroup = new ArrayList<>();
totalSizeSoFar = 0;
// if fileSliceGroups's size reach the max group, stop loop
if (fileSliceGroups.size() >= writeConfig.getClusteringMaxNumGroups()) {
LOG.info("Having generated the maximum number of groups : " + writeConfig.getClusteringMaxNumGroups());
break;
}
}
// Add to the current file-group
currentGroup.add(currentSlice);
// assume each file group size is ~= parquet.max.file.size
totalSizeSoFar += currentSize;
}
if (!currentGroup.isEmpty()) {
if
((currentGroup.size() > 1) || writeConfig.shouldClusteringSingleGroup()) {
int numOutputGroups = getNumberOfOutputFileGroups(totalSizeSoFar, writeConfig.getClusteringTargetFileMaxBytes());
LOG.info((((((("Adding final clustering group " + totalSizeSoFar) + " max bytes: ") + writeConfig.getClusteringMaxBytesInGroup()) + " num input slices: ") + currentGroup.size()) + " output groups: ") + numOutputGroups);
fileSliceGroups.add(Pair.of(currentGroup, numOutputGroups));
}
}
return fileSliceGroups.stream().map(fileSliceGroup -> HoodieClusteringGroup.newBuilder().setSlices(getFileSliceInfo(fileSliceGroup.getLeft())).setNumOutputFileGroups(fileSliceGroup.getRight()).setMetrics(buildMetrics(fileSliceGroup.getLeft())).build());
}
| 3.26 |
hudi_PartitionAwareClusteringPlanStrategy_filterPartitionPaths_rdh
|
/**
* Return list of partition paths to be considered for clustering.
*/protected List<String> filterPartitionPaths(List<String> partitionPaths) {
List<String> filteredPartitions = ClusteringPlanPartitionFilter.filter(partitionPaths, getWriteConfig());
LOG.debug("Filtered to the following partitions: " + filteredPartitions);
return filteredPartitions;}
| 3.26 |
hudi_SingleSparkJobExecutionStrategy_readRecordsForGroupBaseFiles_rdh
|
/**
* Read records from baseFiles and get iterator.
*/
private Iterator<HoodieRecord<T>> readRecordsForGroupBaseFiles(List<ClusteringOperation> clusteringOps) {
List<Iterator<HoodieRecord<T>>> iteratorsForPartition = clusteringOps.stream().map(clusteringOp -> {
Schema readerSchema = HoodieAvroUtils.addMetadataFields(new Schema.Parser().parse(getWriteConfig().getSchema()));
Iterable<HoodieRecord<T>> indexedRecords = () -> {
try {
HoodieFileReader baseFileReader = HoodieFileReaderFactory.getReaderFactory(recordType).getFileReader(getHoodieTable().getHadoopConf(), new Path(clusteringOp.getDataFilePath()));Option<BaseKeyGenerator> keyGeneratorOp = (writeConfig.populateMetaFields()) ? Option.empty() : Option.of(((BaseKeyGenerator) (HoodieSparkKeyGeneratorFactory.createKeyGenerator(writeConfig.getProps()))));
// NOTE: Record have to be cloned here to make sure if it holds low-level engine-specific
// payload pointing into a shared, mutable (underlying) buffer we get a clean copy of
// it since these records will be shuffled later.
CloseableMappingIterator mappingIterator = new CloseableMappingIterator(((ClosableIterator<HoodieRecord>) (baseFileReader.getRecordIterator(readerSchema))), rec -> ((HoodieRecord) (rec)).copy().wrapIntoHoodieRecordPayloadWithKeyGen(readerSchema, getWriteConfig().getProps(), keyGeneratorOp));
return mappingIterator;
} catch (IOException e) {
throw new <e>HoodieClusteringException((("Error reading input data for " + clusteringOp.getDataFilePath()) + " and ") + clusteringOp.getDeltaFilePaths());
}
};
return StreamSupport.stream(indexedRecords.spliterator(), false).iterator();
}).collect(Collectors.toList());
return new ConcatenatingIterator<>(iteratorsForPartition);
}
| 3.26 |
hudi_DatePartitionPathSelector_pruneDatePartitionPaths_rdh
|
/**
* Prunes date level partitions to last few days configured by 'NUM_PREV_DAYS_TO_LIST' from
* 'CURRENT_DATE'. Parallelizes listing by leveraging HoodieSparkEngineContext's methods.
*/
public List<String> pruneDatePartitionPaths(HoodieSparkEngineContext context, FileSystem fs, String
rootPath, LocalDate currentDate) {
List<String> partitionPaths = new ArrayList<>(); // get all partition paths before date partition level
partitionPaths.add(rootPath);
if (datePartitionDepth <= 0) {
return partitionPaths;
}
SerializableConfiguration serializedConf = new SerializableConfiguration(fs.getConf());
for (int i = 0; i < datePartitionDepth; i++) {
partitionPaths = context.flatMap(partitionPaths, path -> {
Path subDir = new Path(path);FileSystem fileSystem = subDir.getFileSystem(serializedConf.get());
// skip files/dirs whose names start with (_, ., etc)
FileStatus[] v18 = fileSystem.listStatus(subDir, file -> IGNORE_FILEPREFIX_LIST.stream().noneMatch(pfx -> file.getName().startsWith(pfx)));
List<String> res = new ArrayList<>();
for (FileStatus v20 : v18) {
res.add(v20.getPath().toString());
}
return res.stream();
}, f0);
}
// Prune date partitions to last few days
return context.getJavaSparkContext().parallelize(partitionPaths, f0).filter(s -> {
LocalDate fromDate = currentDate.minusDays(numPrevDaysToList);
String[] splits = s.split("/");
String v23 = splits[splits.length - 1];
LocalDate partitionDate;
DateTimeFormatter dateFormatter = DateTimeFormatter.ofPattern(dateFormat);
if (v23.contains("=")) {
String[] moreSplit = v23.split("=");
ValidationUtils.checkArgument(moreSplit.length == 2, ("Partition Field (" + v23) + ") not in expected format");
partitionDate = LocalDate.parse(moreSplit[1], dateFormatter);
} else {partitionDate = LocalDate.parse(v23, dateFormatter);
} return (partitionDate.isEqual(fromDate) || partitionDate.isAfter(fromDate)) && (partitionDate.isEqual(currentDate) || partitionDate.isBefore(currentDate));
}).collect();
}
| 3.26 |
hudi_FlinkCompactionConfig_toFlinkConfig_rdh
|
/**
* Transforms a {@code HoodieFlinkCompaction.config} into {@code Configuration}.
* The latter is more suitable for the table APIs. It reads all the properties
* in the properties file (set by `--props` option) and cmd line options
* (set by `--hoodie-conf` option).
*/
public static Configuration toFlinkConfig(FlinkCompactionConfig config) {
Map<String, String> propsMap = new HashMap<String, String>(((Map) (getProps(config))));
Configuration conf = fromMap(propsMap); conf.setString(FlinkOptions.PATH, config.path);
conf.setString(FlinkOptions.COMPACTION_TRIGGER_STRATEGY, config.compactionTriggerStrategy);
conf.setInteger(FlinkOptions.ARCHIVE_MAX_COMMITS, config.archiveMaxCommits);
conf.setInteger(FlinkOptions.ARCHIVE_MIN_COMMITS, config.archiveMinCommits);
conf.setString(FlinkOptions.CLEAN_POLICY, config.cleanPolicy);
conf.setInteger(FlinkOptions.CLEAN_RETAIN_COMMITS, config.cleanRetainCommits);
conf.setInteger(FlinkOptions.CLEAN_RETAIN_HOURS, config.cleanRetainHours);
conf.setInteger(FlinkOptions.CLEAN_RETAIN_FILE_VERSIONS, config.cleanRetainFileVersions);
conf.setInteger(FlinkOptions.COMPACTION_DELTA_COMMITS, config.compactionDeltaCommits);
conf.setInteger(FlinkOptions.COMPACTION_DELTA_SECONDS, config.compactionDeltaSeconds);
conf.setInteger(FlinkOptions.COMPACTION_MAX_MEMORY, config.compactionMaxMemory);
conf.setLong(FlinkOptions.COMPACTION_TARGET_IO, config.compactionTargetIo);
conf.setInteger(FlinkOptions.COMPACTION_TASKS, config.compactionTasks);
conf.setBoolean(FlinkOptions.CLEAN_ASYNC_ENABLED, config.cleanAsyncEnable);
// use synchronous compaction always
conf.setBoolean(FlinkOptions.COMPACTION_ASYNC_ENABLED, false);
conf.setBoolean(FlinkOptions.COMPACTION_SCHEDULE_ENABLED, config.schedule);
// Map memory
conf.setString(HoodieMemoryConfig.SPILLABLE_MAP_BASE_PATH.key(), config.spillableMapPath);
return conf;
}
| 3.26 |
hudi_ListBasedIndexFileFilter_shouldCompareWithFile_rdh
|
/**
* if we don't have key ranges, then also we need to compare against the file. no other choice if we do, then only
* compare the file if the record key falls in range.
*/
protected boolean shouldCompareWithFile(BloomIndexFileInfo indexInfo, String recordKey) {
return (!indexInfo.hasKeyRanges()) || indexInfo.isKeyInRange(recordKey);
}
| 3.26 |
hudi_RunLengthDecoder_readDictionaryIds_rdh
|
/**
* Decoding for dictionary ids. The IDs are populated into `values` and the nullability is
* populated into `nulls`.
*/
void readDictionaryIds(int total, WritableIntVector values, WritableColumnVector nulls, int rowId, int level, RunLengthDecoder data) {
int left = total;
while (left > 0) {
if (this.currentCount == 0) {
this.readNextGroup();
}
int n = Math.min(left, this.currentCount);
switch (mode) {
case RLE :
if (currentValue == level) {
data.readDictionaryIdData(n, values, rowId);
} else {
nulls.setNulls(rowId, n);
}
break;
case PACKED :
for (int i = 0; i < n; ++i) {
if (f0[currentBufferIdx++] == level) {
values.setInt(rowId + i, data.readInteger());
} else {nulls.setNullAt(rowId + i);
}
}
break;
default :
throw new AssertionError();
}
rowId += n;left -= n;
currentCount -= n;
}
}
| 3.26 |
hudi_RunLengthDecoder_initFromStream_rdh
|
/**
* Init from input stream.
*/
void initFromStream(int valueCount, ByteBufferInputStream in) throws IOException {
this.in =
in;
if (fixedWidth) {// initialize for repetition and definition levels
if (readLength) {
int length = readIntLittleEndian();
this.in = in.sliceStream(length);
}
} else // initialize for values
if (in.available() > 0) {
initWidthAndPacker(in.read());
}
if (bitWidth == 0) {
// 0 bit width, treat this as an RLE run of valueCount number of 0's.
this.mode = MODE.RLE;
this.currentCount = valueCount;
this.currentValue
= 0;
} else {
this.currentCount = 0;
}
}
| 3.26 |
hudi_RunLengthDecoder_initWidthAndPacker_rdh
|
/**
* Initializes the internal state for decoding ints of `bitWidth`.
*/
private void initWidthAndPacker(int bitWidth) {
Preconditions.checkArgument((bitWidth >= 0) && (bitWidth <= 32), "bitWidth must be >= 0 and <= 32");
this.bitWidth = bitWidth;
this.bytesWidth = BytesUtils.paddedByteCountFromBits(bitWidth);
this.packer = Packer.LITTLE_ENDIAN.newBytePacker(bitWidth);
}
| 3.26 |
hudi_RunLengthDecoder_readNextGroup_rdh
|
/**
* Reads the next group.
*/
void readNextGroup() {
try {
int header = m0();
this.mode = ((header & 1) == 0) ? MODE.RLE : MODE.PACKED;
switch (mode) {
case RLE :
this.currentCount = header >>> 1;
this.currentValue = readIntLittleEndianPaddedOnBitWidth();
return;
case PACKED
:
int numGroups = header >>> 1;
this.currentCount = numGroups * 8;
if (this.f0.length < this.currentCount) {
this.f0 = new int[this.currentCount];
}
currentBufferIdx = 0;
int valueIndex = 0;
while (valueIndex < this.currentCount) {// values are bit packed 8 at a time, so reading bitWidth will always work
ByteBuffer buffer = in.slice(bitWidth);
this.packer.unpack8Values(buffer, buffer.position(), this.f0, valueIndex);
valueIndex += 8;
}
return;
default :
throw new ParquetDecodingException("not a valid mode " + this.mode);
}
} catch (IOException e) {
throw new ParquetDecodingException("Failed to read from input stream", e);
}
}
| 3.26 |
hudi_RunLengthDecoder_readIntLittleEndianPaddedOnBitWidth_rdh
|
/**
* Reads the next byteWidth little endian int.
*/
private int readIntLittleEndianPaddedOnBitWidth() throws IOException {
switch (bytesWidth) {
case 0 :
return 0;
case 1 :
return in.read();
case 2 :
{
int ch2 = in.read();
int ch1 = in.read();
return (ch1 << 8) + ch2;}
case 3 :
{
int ch3 = in.read();
int
ch2 = in.read();
int ch1 = in.read();
return ((ch1 << 16) + (ch2 << 8)) + ch3;
}
case 4 :
{
return readIntLittleEndian();
}
default :
throw new RuntimeException("Unreachable");
}
}
| 3.26 |
hudi_RunLengthDecoder_readDictionaryIdData_rdh
|
/**
* It is used to decode dictionary IDs.
*/
private void readDictionaryIdData(int total, WritableIntVector c, int rowId) {
int left = total;
while (left > 0) {
if (this.currentCount == 0) {this.readNextGroup();
}
int n = Math.min(left, this.currentCount);
switch (mode) {
case RLE :
c.setInts(rowId, n, currentValue);
break;case PACKED :
c.setInts(rowId, n, f0, currentBufferIdx);
currentBufferIdx += n;
break;
default :
throw new AssertionError();
}
rowId += n;
left -= n;
currentCount -= n;
}
}
| 3.26 |
hudi_RunLengthDecoder_m0_rdh
|
/**
* Reads the next varint encoded int.
*/
private int m0() throws IOException {
int value = 0;
int shift = 0;
int b;
do {
b = in.read();
value |= (b & 0x7f) << shift;
shift += 7;
} while ((b & 0x80) != 0 );
return value;
}
| 3.26 |
hudi_RunLengthDecoder_readIntLittleEndian_rdh
|
/**
* Reads the next 4 byte little endian int.
*/
private int readIntLittleEndian()
throws IOException {
int ch4 = in.read();
int ch3 = in.read();
int ch2 = in.read();
int ch1
= in.read();
return (((ch1 << 24) + (ch2 << 16)) + (ch3 << 8)) + ch4;
}
| 3.26 |
hudi_BucketAssigners_create_rdh
|
/**
* Creates a {@code BucketAssigner}.
*
* @param taskID
* The task ID
* @param maxParallelism
* The max parallelism
* @param numTasks
* The number of tasks
* @param ignoreSmallFiles
* Whether to ignore the small files
* @param tableType
* The table type
* @param context
* The engine context
* @param config
* The configuration
* @return the bucket assigner instance
*/
public static BucketAssigner create(int taskID, int maxParallelism, int numTasks, boolean ignoreSmallFiles, HoodieTableType tableType, HoodieFlinkEngineContext context, HoodieWriteConfig config) {
boolean delta = tableType.equals(HoodieTableType.MERGE_ON_READ);
WriteProfile writeProfile = WriteProfiles.singleton(ignoreSmallFiles, delta, config, context);
return new BucketAssigner(taskID, maxParallelism, numTasks, writeProfile, config);
}
| 3.26 |
hudi_SchemaEvolutionContext_doEvolutionForRealtimeInputFormat_rdh
|
/**
* Do schema evolution for RealtimeInputFormat.
*
* @param realtimeRecordReader
* recordReader for RealtimeInputFormat.
* @return */
public void doEvolutionForRealtimeInputFormat(AbstractRealtimeRecordReader realtimeRecordReader) throws Exception {
if (!(split instanceof RealtimeSplit)) {
LOG.warn(String.format("expect realtime split for mor table, but find other type split %s", split));
return;
}
if (internalSchemaOption.isPresent()) {
Schema tableAvroSchema = new TableSchemaResolver(metaClient).getTableAvroSchema();
List<String> requiredColumns = getRequireColumn(job);
InternalSchema prunedInternalSchema = InternalSchemaUtils.pruneInternalSchema(internalSchemaOption.get(), requiredColumns);
// Add partitioning fields to writer schema for resulting row to contain null values for these fields
String partitionFields = job.get(hive_metastoreConstants.META_TABLE_PARTITION_COLUMNS, "");List<String> partitioningFields = (partitionFields.length() > 0) ? Arrays.stream(partitionFields.split("/")).collect(Collectors.toList()) : new ArrayList<>();
Schema writerSchema = AvroInternalSchemaConverter.convert(internalSchemaOption.get(), tableAvroSchema.getName());
writerSchema = HoodieRealtimeRecordReaderUtils.addPartitionFields(writerSchema, partitioningFields);
Map<String, Schema.Field> schemaFieldsMap = HoodieRealtimeRecordReaderUtils.getNameToFieldMap(writerSchema);
// we should get HoodieParquetInputFormat#HIVE_TMP_COLUMNS,since serdeConstants#LIST_COLUMNS maybe change by HoodieParquetInputFormat#setColumnNameList
Schema hiveSchema = realtimeRecordReader.constructHiveOrderedSchema(writerSchema, schemaFieldsMap, job.get(HIVE_TMP_COLUMNS));
Schema readerSchema
= AvroInternalSchemaConverter.convert(prunedInternalSchema, tableAvroSchema.getName());
// setUp evolution schema
realtimeRecordReader.setWriterSchema(writerSchema);
realtimeRecordReader.setReaderSchema(readerSchema);
realtimeRecordReader.setHiveSchema(hiveSchema);
internalSchemaOption = Option.of(prunedInternalSchema);
RealtimeSplit realtimeSplit = ((RealtimeSplit) (split));
LOG.info(String.format("About to read compacted logs %s for base split %s, projecting cols %s", realtimeSplit.getDeltaLogPaths(), realtimeSplit.getPath(), requiredColumns));
}
}
| 3.26 |
hudi_SchemaEvolutionContext_doEvolutionForParquetFormat_rdh
|
/**
* Do schema evolution for ParquetFormat.
*/
public void doEvolutionForParquetFormat() {
if (internalSchemaOption.isPresent()) {
// reading hoodie schema evolution table
job.setBoolean(HIVE_EVOLUTION_ENABLE, true);Path finalPath = ((FileSplit) (split)).getPath();
InternalSchema prunedSchema;
List<String> requiredColumns = getRequireColumn(job);
// No need trigger schema evolution for count(*)/count(1) operation
boolean disableSchemaEvolution = requiredColumns.isEmpty()
|| ((requiredColumns.size() == 1) && requiredColumns.get(0).isEmpty());
if (!disableSchemaEvolution) {
prunedSchema = InternalSchemaUtils.pruneInternalSchema(internalSchemaOption.get(), requiredColumns);
InternalSchema querySchema = prunedSchema;
long commitTime = Long.parseLong(FSUtils.getCommitTime(finalPath.getName()));
InternalSchema fileSchema = InternalSchemaCache.searchSchemaAndCache(commitTime, metaClient, false);
InternalSchema mergedInternalSchema = new InternalSchemaMerger(fileSchema, querySchema, true, true).mergeSchema();
List<Types.Field> fields = mergedInternalSchema.columns();
setColumnNameList(job,
fields);
setColumnTypeList(job, fields); pushDownFilter(job, querySchema, fileSchema);
}
}
}
| 3.26 |
hudi_DateTimeUtils_parseDuration_rdh
|
/**
* Parse the given string to a java {@link Duration}. The string is in format "{length
* value}{time unit label}", e.g. "123ms", "321 s". If no time unit label is specified, it will
* be considered as milliseconds.
*
* <p>Supported time unit labels are:
*
* <ul>
* <li>DAYS: "d", "day"
* <li>HOURS: "h", "hour"
* <li>MINUTES: "min", "minute"
* <li>SECONDS: "s", "sec", "second"
* <li>MILLISECONDS: "ms", "milli", "millisecond"
* <li>MICROSECONDS: "µs", "micro", "microsecond"
* <li>NANOSECONDS: "ns", "nano", "nanosecond"
* </ul>
*
* @param text
* string to parse.
*/
public static Duration parseDuration(String text) {
ValidationUtils.checkArgument(!StringUtils.isNullOrEmpty(text));
final String trimmed = text.trim();
ValidationUtils.checkArgument(!trimmed.isEmpty(), "argument is an empty- or whitespace-only string");
final int len = trimmed.length();
int pos = 0;char current;
while (((pos < len) && ((current
= trimmed.charAt(pos)) >= '0')) && (current <= '9')) {
pos++;
}
final String number = trimmed.substring(0, pos);
final String unitLabel = trimmed.substring(pos).trim().toLowerCase(Locale.US);
if (number.isEmpty()) {
throw new NumberFormatException("text does not start with a number");}
final long value;
try {
value = Long.parseLong(number);// this throws a NumberFormatException on overflow
} catch (NumberFormatException e) {
throw new IllegalArgumentException(("The value '" + number) + "' cannot be re represented as 64bit number (numeric overflow).");
}
if (unitLabel.isEmpty()) {
return Duration.of(value, ChronoUnit.MILLIS);
}
ChronoUnit unit = LABEL_TO_UNIT_MAP.get(unitLabel);
if (unit != null) {
return Duration.of(value,
unit);
} else {
throw new IllegalArgumentException((("Time interval unit label '" + unitLabel) + "' does not match any of the recognized units: ") + TimeUnit.getAllUnits());
}
}
| 3.26 |
hudi_DateTimeUtils_microsToInstant_rdh
|
/**
* Converts provided microseconds (from epoch) to {@link Instant}
*/
public static Instant microsToInstant(long microsFromEpoch) {
long epochSeconds = microsFromEpoch / 1000000L;
long nanoAdjustment = (microsFromEpoch % 1000000L) * 1000L;
return Instant.ofEpochSecond(epochSeconds, nanoAdjustment);
}
| 3.26 |
hudi_DateTimeUtils_instantToMicros_rdh
|
/**
* Converts provided {@link Instant} to microseconds (from epoch)
*/
public static long instantToMicros(Instant instant) {
long seconds = instant.getEpochSecond();
int v3 = instant.getNano();
if ((seconds < 0) && (v3 > 0)) {
long micros = Math.multiplyExact(seconds + 1, 1000000L);
long adjustment =
(v3 / 1000L) - 1000000;
return Math.addExact(micros, adjustment);
} else {
long micros = Math.multiplyExact(seconds, 1000000L);
return Math.addExact(micros, v3 / 1000L);
}
}
/**
* Parse input String to a {@link java.time.Instant}
| 3.26 |
hudi_DateTimeUtils_formatUnixTimestamp_rdh
|
/**
* Convert UNIX_TIMESTAMP to string in given format.
*
* @param unixTimestamp
* UNIX_TIMESTAMP
* @param timeFormat
* string time format
*/
public static String formatUnixTimestamp(long unixTimestamp, String timeFormat) {
ValidationUtils.checkArgument(!StringUtils.isNullOrEmpty(timeFormat));
DateTimeFormatter dtf = DateTimeFormatter.ofPattern(timeFormat);
return LocalDateTime.ofInstant(Instant.ofEpochSecond(unixTimestamp), ZoneId.systemDefault()).format(dtf);
}
| 3.26 |
hudi_DateTimeUtils_plural_rdh
|
/**
*
* @param label
* the original label
* @return both the singular format and plural format of the original label
*/
private static String[] plural(String label) {
return new String[]{ label, label + PLURAL_SUFFIX };
}
| 3.26 |
hudi_DateTimeUtils_singular_rdh
|
/**
*
* @param label
* the original label
* @return the singular format of the original label
*/
private static String[] singular(String label) {return new String[]{ label };
}
| 3.26 |
hudi_HoodieFileGroupReader_hasNext_rdh
|
/**
*
* @return {@code true} if the next record exists; {@code false} otherwise.
* @throws IOException
* on reader error.
*/public boolean hasNext() throws IOException {
return recordBuffer.hasNext();
}
/**
*
* @return The next record after calling {@link #hasNext}
| 3.26 |
hudi_SyncUtilHelpers_runHoodieMetaSync_rdh
|
/**
* Create an instance of an implementation of {@link HoodieSyncTool} that will sync all the relevant meta information
* with an external metastore such as Hive etc. to ensure Hoodie tables can be queried or read via external systems.
*
* @param syncToolClassName
* Class name of the {@link HoodieSyncTool} implementation.
* @param props
* property map.
* @param hadoopConfig
* Hadoop confs.
* @param fs
* Filesystem used.
* @param targetBasePath
* The target base path that contains the hoodie table.
* @param baseFileFormat
* The file format used by the hoodie table (defaults to PARQUET).
*/
public static void runHoodieMetaSync(String syncToolClassName, TypedProperties props, Configuration hadoopConfig, FileSystem fs, String targetBasePath, String baseFileFormat) {
if (targetBasePath == null) {
throw new IllegalArgumentException("Target base path must not be null");
}
// Get or create a lock for the specific table
Lock tableLock = TABLE_LOCKS.computeIfAbsent(targetBasePath, k -> new ReentrantLock());
tableLock.lock();
try {
try (HoodieSyncTool v1 = instantiateMetaSyncTool(syncToolClassName, props, hadoopConfig, fs, targetBasePath, baseFileFormat)) {
v1.syncHoodieTable();
} catch (Throwable e) {
throw new HoodieMetaSyncException("Could not sync using the meta sync class " + syncToolClassName, e);
}
} finally {
tableLock.unlock();
}
}
| 3.26 |
hudi_SpillableMapUtils_convertToHoodieRecordPayload_rdh
|
/**
* Utility method to convert bytes to HoodieRecord using schema and payload class.
*/
public static <R> HoodieRecord<R> convertToHoodieRecordPayload(GenericRecord rec, String payloadClazz, String preCombineField, boolean withOperationField) {
return convertToHoodieRecordPayload(rec, payloadClazz, preCombineField, Pair.of(HoodieRecord.RECORD_KEY_METADATA_FIELD, HoodieRecord.PARTITION_PATH_METADATA_FIELD), withOperationField, Option.empty(), Option.empty());
}
| 3.26 |
hudi_SpillableMapUtils_readInternal_rdh
|
/**
* Reads the given file with specific pattern(|crc|timestamp|sizeOfKey|SizeOfValue|key|value|) then
* returns an instance of {@link FileEntry}.
*/
private static FileEntry readInternal(RandomAccessFile file, long valuePosition, int valueLength) throws IOException
{
file.seek(valuePosition);
long crc = file.readLong();
long timestamp = file.readLong();
int keySize = file.readInt();
int valueSize = file.readInt();
byte[] key = new byte[keySize];
file.readFully(key, 0, keySize);
byte[] value = new byte[valueSize];
if (valueSize != valueLength) {
throw new HoodieCorruptedDataException("unequal size of payload written to external file, data may be corrupted");
}
file.readFully(value, 0, valueSize);
long crcOfReadValue = generateChecksum(value);
if (crc != crcOfReadValue) {
throw new HoodieCorruptedDataException("checksum of payload written to external disk does not match, data may be corrupted");
}
return new FileEntry(crc, keySize, valueSize, key, value, timestamp);
}
| 3.26 |
hudi_SpillableMapUtils_getPreCombineVal_rdh
|
/**
* Returns the preCombine value with given field name.
*
* @param rec
* The avro record
* @param preCombineField
* The preCombine field name
* @return the preCombine field value or 0 if the field does not exist in the avro schema
*/private static Object getPreCombineVal(GenericRecord rec, String preCombineField) {
if (preCombineField == null) {
return 0;
}
Schema.Field field = rec.getSchema().getField(preCombineField);
return field == null ? 0 : rec.get(field.pos());
}
| 3.26 |
hudi_SpillableMapUtils_computePayloadSize_rdh
|
/**
* Compute a bytes representation of the payload by serializing the contents This is used to estimate the size of the
* payload (either in memory or when written to disk).
*/public static <R> long computePayloadSize(R value, SizeEstimator<R> valueSizeEstimator) throws IOException {
return valueSizeEstimator.sizeEstimate(value);
}
| 3.26 |
hudi_SpillableMapUtils_readBytesFromDisk_rdh
|
/**
* Using the schema and payload class, read and convert the bytes on disk to a HoodieRecord.
*/
public static byte[] readBytesFromDisk(RandomAccessFile file, long valuePosition, int valueLength) throws IOException {
FileEntry fileEntry = readInternal(file, valuePosition, valueLength);
return fileEntry.getValue();
}
| 3.26 |
hudi_SpillableMapUtils_spillToDisk_rdh
|
/**
* Write Value and other metadata necessary to disk. Each entry has the following sequence of data
* <p>
* |crc|timestamp|sizeOfKey|SizeOfValue|key|value|
*/
public static long spillToDisk(SizeAwareDataOutputStream outputStream, FileEntry fileEntry) throws
IOException {
return spill(outputStream, fileEntry);
}
| 3.26 |
hudi_SpillableMapUtils_generateEmptyPayload_rdh
|
/**
* Utility method to convert bytes to HoodieRecord using schema and payload class.
*/
public static <R> R generateEmptyPayload(String recKey, String partitionPath, Comparable orderingVal, String payloadClazz) {
HoodieRecord<? extends HoodieRecordPayload> hoodieRecord = new HoodieAvroRecord<>(new HoodieKey(recKey, partitionPath), HoodieRecordUtils.loadPayload(payloadClazz, new Object[]{ null, orderingVal }, GenericRecord.class, Comparable.class));return ((R) (hoodieRecord));
}
| 3.26 |
AreaShop_BukkitHandler1_12_getSignFacing_rdh
|
// Uses Sign, which is deprecated in 1.13+, broken in 1.14+
@Override
public BlockFace getSignFacing(Block block) {
if (block == null) {
return null;
}
BlockState blockState = block.getState();
if (blockState == null) {
return null;
}
MaterialData materialData = blockState.getData();
if (materialData instanceof Sign) {
return ((Sign) (materialData)).getFacing();
}
return null;
}
| 3.26 |
AreaShop_ResoldRegionEvent_getFromPlayer_rdh
|
/**
* Get the player that the region has been bought from.
*
* @return The UUID of the player that the region has been bought from
*/
public UUID getFromPlayer() {
return from;
}
| 3.26 |
AreaShop_FileManager_getRegions_rdh
|
/**
* Get all regions.
*
* @return List of all regions (it is safe to modify the list)
*/
public List<GeneralRegion> getRegions() {
return new ArrayList<>(regions.values());
}
| 3.26 |
AreaShop_FileManager_addRegion_rdh
|
/**
* Add a region to the list and mark it as to-be-saved.
*
* @param region
* Then region to add
* @return true when successful, otherwise false (denied by an event listener)
*/
public AddingRegionEvent addRegion(GeneralRegion region) {
AddingRegionEvent v17 = addRegionNoSave(region);
if (v17.isCancelled()) {
return v17;
}
region.saveRequired();
markGroupsAutoDirty();
return v17;
}
| 3.26 |
AreaShop_FileManager_saveVersions_rdh
|
/**
* Save the versions file to disk.
*/
public void saveVersions() {if (!new File(versionPath).exists()) {
AreaShop.debug("versions file created, this should happen only after installing or upgrading the plugin");
}
try (ObjectOutputStream output = new ObjectOutputStream(new FileOutputStream(versionPath))) {
output.writeObject(versions);}
catch (IOException e) {
AreaShop.warn("File could not be saved: " + versionPath);
}}
| 3.26 |
AreaShop_FileManager_checkForInactiveRegions_rdh
|
/**
* Check all regions and unrent/sell them if the player is inactive for too long.
*/
public void checkForInactiveRegions() {
Do.forAll(plugin.getConfig().getInt("inactive.regionsPerTick"), getRegions(), GeneralRegion::checkInactive);
}
| 3.26 |
AreaShop_FileManager_m1_rdh
|
/**
* Load the groups.yml file from disk
*
* @return true if succeeded, otherwise false
*/
public boolean m1() {
boolean result = true;
File groupFile = new File(groupsPath);
if (groupFile.exists() && groupFile.isFile()) {
try (InputStreamReader reader = new InputStreamReader(new FileInputStream(groupFile), Charsets.UTF_8)) {
groupsConfig = YamlConfiguration.loadConfiguration(reader);
} catch (IOException e) {
AreaShop.warn("Could not load groups.yml file: " + groupFile.getAbsolutePath()); }
}
if (groupsConfig == null) {
groupsConfig = new YamlConfiguration();
}
for (String groupName : groupsConfig.getKeys(false)) {
RegionGroup group = new RegionGroup(plugin, groupName);
groups.put(groupName, group);
}
return result;
}
| 3.26 |
AreaShop_FileManager_performPeriodicSignUpdate_rdh
|
/**
* Update all signs that need periodic updating.
*/
public void performPeriodicSignUpdate() {
Do.forAll(plugin.getConfig().getInt("signs.regionsPerTick"), getRents(), region -> {
if (region.needsPeriodicUpdate()) {
region.update();
}
});
}
| 3.26 |
AreaShop_FileManager_saveRequiredFiles_rdh
|
/**
* Save all region related files spread over time (low load).
*/
public void saveRequiredFiles()
{
if (isSaveGroupsRequired()) {
saveGroupsNow();
}
this.saveWorldGuardRegions();
Do.forAll(plugin.getConfig().getInt("saving.regionsPerTick"), getRegions(), region -> {
if (region.isSaveRequired()) {
region.saveNow();
}
});
}
| 3.26 |
AreaShop_FileManager_getRegion_rdh
|
/**
* Get a region.
*
* @param name
* The name of the region to get (will be normalized)
* @return The region if found, otherwise null
*/
public GeneralRegion getRegion(String name) {
return regions.get(name.toLowerCase());
}
| 3.26 |
AreaShop_FileManager_saveGroupsNow_rdh
|
/**
* Save the groups file to disk synchronously.
*/
public void saveGroupsNow() {
AreaShop.debug("saveGroupsNow() done");
saveGroupsRequired = false;
try {
groupsConfig.save(groupsPath);
} catch (IOException e) {
AreaShop.warn("Groups file could not be saved: " + groupsPath);
}
}
| 3.26 |
AreaShop_FileManager_getRent_rdh
|
/**
* Get a rental region.
*
* @param name
* The name of the rental region (will be normalized)
* @return RentRegion if it could be found, otherwise null
*/
public RentRegion getRent(String name) {
GeneralRegion region = regions.get(name.toLowerCase());if (region instanceof RentRegion) {
return ((RentRegion) (region));
}return null;
}
| 3.26 |
AreaShop_FileManager_preUpdateFiles_rdh
|
/**
* Checks for old file formats and converts them to the latest format.
* After conversion the region files need to be loaded.
*/
@SuppressWarnings("unchecked")
private void preUpdateFiles() {
Integer fileStatus = versions.get(AreaShop.versionFiles);
// If the the files are already the current version
if ((fileStatus != null) && (fileStatus == AreaShop.versionFilesCurrent)) {
return;
}
AreaShop.info("Updating AreaShop data to the latest format:");
// Update to YAML based format
if ((fileStatus == null) || (fileStatus < 2)) {
String rentPath = (plugin.getDataFolder() + File.separator) + "rents";
String buyPath = (plugin.getDataFolder() + File.separator) + "buys";
File rentFile = new File(rentPath);
File buyFile = new File(buyPath);
String oldFolderPath = ((plugin.getDataFolder() + File.separator) + "#old") + File.separator;
File oldFolderFile = new File(oldFolderPath);
// Convert old rent files
boolean buyFileFound = false;
boolean rentFileFound = false;
if (rentFile.exists()) {
rentFileFound = true;
if ((!oldFolderFile.exists()) & (!oldFolderFile.mkdirs())) {
AreaShop.warn("Could not create directory: " + oldFolderFile.getAbsolutePath());
}
versions.putIfAbsent("rents", -1);
HashMap<String, HashMap<String, String>> rents = null;
try {
ObjectInputStream input = new ObjectInputStream(new FileInputStream(rentPath));
rents = ((HashMap<String, HashMap<String, String>>) (input.readObject()));
input.close();
} catch (IOException | ClassNotFoundException | ClassCastException e) {
AreaShop.warn(" Error: Something went wrong reading file: " + rentPath);
}
// Delete the file if it is totally wrong
if (rents == null) {
try {
if (!rentFile.delete()) {
AreaShop.warn("Could not delete file: " + rentFile.getAbsolutePath());
}
} catch (Exception e) {
AreaShop.warn("Could not delete file: " + rentFile.getAbsolutePath());
}
} else {
// Move old file
try {
Files.move(new File(rentPath), new File(oldFolderPath + "rents"));} catch (Exception e) {
AreaShop.warn((" Could not create a backup of '" + rentPath)
+ "', check the file permissions (conversion to next version continues)");
}
// Check if conversion is needed
if (versions.get("rents") < 1) {
// Upgrade the rent to the latest version
if (versions.get("rents") < 0) {
for (String rentName : rents.keySet()) {
HashMap<String, String> rent = rents.get(rentName);
// Save the rentName in the hashmap and use a small caps rentName as key
if (rent.get("name") == null) {
rent.put("name", rentName);
rents.remove(rentName);
rents.put(rentName.toLowerCase(), rent);
}
// Save the default setting for region restoring
rent.putIfAbsent("restore", "general");// Save the default setting for the region restore profile
rent.putIfAbsent("profile", "default");
// Change to version 0
versions.put("rents", 0);
}
AreaShop.info((" Updated version of '" + buyPath) + "' from -1 to 0 (switch to using lowercase region names, adding default schematic enabling and profile)"); }
if (versions.get("rents") < 1) {
for (String rentName : rents.keySet()) {
HashMap<String, String> rent = rents.get(rentName);
if (rent.get("player") != null) {
// Fake deprecation by Bukkit to inform developers, method will stay
@SuppressWarnings("deprecation")
OfflinePlayer offlinePlayer = Bukkit.getOfflinePlayer(rent.get("player"));
rent.put("playeruuid", offlinePlayer.getUniqueId().toString());
rent.remove("player");
}
// Change version to 1
versions.put("rents", 1);
}AreaShop.info((" Updated version of '" + rentPath)
+ "' from 0 to 1 (switch to UUID's for player identification)");
}
}
// Save rents to new format
File regionsFile = new File(regionsPath);
if ((!regionsFile.exists()) & (!regionsFile.mkdirs())) {
AreaShop.warn("Could not create directory: " + regionsFile.getAbsolutePath());
return;
}
for (HashMap<String, String> rent : rents.values()) {
YamlConfiguration regionConfig = new YamlConfiguration();
regionConfig.set("general.name", rent.get("name").toLowerCase());
regionConfig.set("general.type", "rent");
regionConfig.set("general.world", rent.get("world"));
regionConfig.set("general.signs.0.location.world", rent.get("world"));
regionConfig.set("general.signs.0.location.x", Double.parseDouble(rent.get("x")));
regionConfig.set("general.signs.0.location.y", Double.parseDouble(rent.get("y")));
regionConfig.set("general.signs.0.location.z", Double.parseDouble(rent.get("z")));
regionConfig.set("rent.price", Double.parseDouble(rent.get("price")));
regionConfig.set("rent.duration", rent.get("duration"));
if ((rent.get("restore") != null) && (!rent.get("restore").equals("general"))) {
regionConfig.set("general.enableRestore", rent.get("restore"));
}
if ((rent.get("profile") != null) && (!rent.get("profile").equals("default"))) {
regionConfig.set("general.schematicProfile", rent.get("profile"));
}
if (rent.get("tpx") != null) {
regionConfig.set("general.teleportLocation.world", rent.get("world"));
regionConfig.set("general.teleportLocation.x", Double.parseDouble(rent.get("tpx")));regionConfig.set("general.teleportLocation.y", Double.parseDouble(rent.get("tpy")));
regionConfig.set("general.teleportLocation.z", Double.parseDouble(rent.get("tpz")));
regionConfig.set("general.teleportLocation.yaw", rent.get("tpyaw"));
regionConfig.set("general.teleportLocation.pitch", rent.get("tppitch"));
}
if (rent.get("playeruuid") != null) {
regionConfig.set("rent.renter", rent.get("playeruuid"));
regionConfig.set("rent.renterName", Utils.toName(rent.get("playeruuid")));
regionConfig.set("rent.rentedUntil", Long.parseLong(rent.get("rented")));
}
try {
regionConfig.save(new File(((regionsPath + File.separator) + rent.get("name").toLowerCase()) + ".yml"));
} catch (IOException e) {
AreaShop.warn((((" Error: Could not save region file while converting: " + regionsPath) + File.separator) + rent.get("name").toLowerCase()) + ".yml");
}}
AreaShop.info(" Updated rent regions to new .yml format (check the /regions folder)");
}
// Change version number
versions.remove("rents");
versions.put(AreaShop.versionFiles, AreaShop.versionFilesCurrent);
saveVersions();
}
if (buyFile.exists()) {
buyFileFound = true;
if ((!oldFolderFile.exists()) & (!oldFolderFile.mkdirs())) {
AreaShop.warn("Could not create directory: " + oldFolderFile.getAbsolutePath());
return;
}
versions.putIfAbsent("buys", -1);
HashMap<String, HashMap<String, String>> buys = null;
try {
ObjectInputStream input = new ObjectInputStream(new FileInputStream(buyPath));
buys = ((HashMap<String, HashMap<String, String>>) (input.readObject()));
input.close();
} catch (IOException | ClassNotFoundException | ClassCastException e) {
AreaShop.warn(" Something went wrong reading file: " + buyPath);
}
// Delete the file if it is totally wrong
if (buys == null) {
try {
if (!buyFile.delete()) {
AreaShop.warn("Could not delete file: " + buyFile.getAbsolutePath());
}
} catch (Exception e) {
AreaShop.warn("Could not delete file: " + buyFile.getAbsolutePath());
}
} else {
// Backup current file
try {
Files.move(new File(buyPath), new
File(oldFolderPath + "buys"));
} catch (Exception e) {
AreaShop.warn((" Could not create a backup of '" + buyPath) + "', check the file permissions (conversion to next version continues)");
}
// Check if conversion is needed
if (versions.get("buys") < 1) {
// Upgrade the buy to the latest version
if (versions.get("buys") < 0) {
for (String buyName : buys.keySet()) {
HashMap<String, String> buy
=
buys.get(buyName);
// Save the buyName in the hashmap and use a small caps buyName as key
if (buy.get("name") == null) {
buy.put("name", buyName);
buys.remove(buyName);
buys.put(buyName.toLowerCase(), buy);
}
// Save the default setting for region restoring
buy.putIfAbsent("restore", "general");
// Save the default setting for the region restore profile
buy.putIfAbsent("profile", "default");
// Change to version 0
versions.put("buys", 0);
}
AreaShop.info((" Updated version of '"
+ buyPath) + "' from -1 to 0 (switch to using lowercase region names, adding default schematic enabling and profile)");
}
if (versions.get("buys") < 1) {
for (String buyName : buys.keySet()) {
HashMap<String, String> buy = buys.get(buyName);
if (buy.get("player") != null) {
// Fake deprecation by Bukkit to inform developers, method will stay
@SuppressWarnings("deprecation")OfflinePlayer offlinePlayer = Bukkit.getOfflinePlayer(buy.get("player"));
buy.put("playeruuid", offlinePlayer.getUniqueId().toString());
buy.remove("player");
}
// Change version to 1
versions.put("buys", 1);
}
AreaShop.info((" Updated version of '" + buyPath) + "' from 0 to 1 (switch to UUID's for player identification)");
}
}
// Save buys to new format
File
regionsFile = new File(regionsPath);
if ((!regionsFile.exists()) & (!regionsFile.mkdirs())) {
AreaShop.warn("Could not create directory: " + regionsFile.getAbsolutePath());}
for (HashMap<String, String> buy : buys.values()) {
YamlConfiguration regionConfig = new YamlConfiguration();
regionConfig.set("general.name", buy.get("name").toLowerCase());
regionConfig.set("general.type", "buy");
regionConfig.set("general.world", buy.get("world"));
regionConfig.set("general.signs.0.location.world", buy.get("world"));
regionConfig.set("general.signs.0.location.x", Double.parseDouble(buy.get("x")));
regionConfig.set("general.signs.0.location.y", Double.parseDouble(buy.get("y")));
regionConfig.set("general.signs.0.location.z", Double.parseDouble(buy.get("z")));
regionConfig.set("buy.price", Double.parseDouble(buy.get("price")));
if ((buy.get("restore") != null) && (!buy.get("restore").equals("general"))) {
regionConfig.set("general.enableRestore", buy.get("restore"));
}if ((buy.get("profile") != null) && (!buy.get("profile").equals("default"))) {regionConfig.set("general.schematicProfile", buy.get("profile"));
}
if (buy.get("tpx") != null) {
regionConfig.set("general.teleportLocation.world", buy.get("world"));
regionConfig.set("general.teleportLocation.x", Double.parseDouble(buy.get("tpx")));
regionConfig.set("general.teleportLocation.y", Double.parseDouble(buy.get("tpy")));
regionConfig.set("general.teleportLocation.z", Double.parseDouble(buy.get("tpz")));
regionConfig.set("general.teleportLocation.yaw", buy.get("tpyaw"));
regionConfig.set("general.teleportLocation.pitch", buy.get("tppitch"));
}
if (buy.get("playeruuid") != null) {
regionConfig.set("buy.buyer", buy.get("playeruuid"));
regionConfig.set("buy.buyerName", Utils.toName(buy.get("playeruuid")));
}try {
regionConfig.save(new File(((regionsPath + File.separator) + buy.get("name").toLowerCase()) + ".yml"));
} catch (IOException e) {
AreaShop.warn((((" Error: Could not save region file while converting: " + regionsPath) + File.separator) + buy.get("name").toLowerCase()) + ".yml");
}
}
AreaShop.info(" Updated buy regions to new .yml format (check the /regions folder)");
}
// Change version number
versions.remove("buys");
}
// Separate try-catch blocks to try them all individually (don't stop after 1 has failed)
try {
Files.move(new File(rentPath + ".old"), new File(oldFolderPath + "rents.old"));
} catch (Exception e) {
// Ignore
}
try {
Files.move(new File(buyPath + ".old"), new File(oldFolderPath + "buys.old"));
} catch (Exception e) {
// Ignore
}
if (buyFileFound || rentFileFound) {
try {
Files.move(new File((plugin.getDataFolder() + File.separator) + "config.yml"), new File(oldFolderPath + "config.yml"));
} catch (Exception e) {
// Ignore
}
}
// Update versions file to 2
versions.put(AreaShop.versionFiles, 2);
saveVersions();if (buyFileFound || rentFileFound) {
AreaShop.info(" Updated to YAML based storage (v1 to v2)");
}
}
}
| 3.26 |
AreaShop_FileManager_getGroupSettings_rdh
|
/**
* Get the settings of a group.
*
* @param groupName
* Name of the group to get the settings from
* @return The settings of the group
*/
public ConfigurationSection getGroupSettings(String groupName) {
return groupsConfig.getConfigurationSection(groupName.toLowerCase());
}
| 3.26 |
AreaShop_FileManager_saveGroupsIsRequired_rdh
|
/**
* Save the group file to disk.
*/
public void saveGroupsIsRequired() {
saveGroupsRequired = true;
}
| 3.26 |
AreaShop_FileManager_getGroups_rdh
|
/**
* Get all groups.
*
* @return Collection with all groups (safe to modify)
*/
public Collection<RegionGroup> getGroups() {
return groups.values();
}
| 3.26 |
AreaShop_FileManager_getRents_rdh
|
/**
* Get all rental regions.
*
* @return List of all rental regions
*/
public List<RentRegion> getRents() {
List<RentRegion> result = new ArrayList<>();
for (GeneralRegion region : regions.values()) {
if (region instanceof RentRegion) {
result.add(((RentRegion) (region)));
}
}
return result;}
| 3.26 |
AreaShop_FileManager_saveWorldGuardRegions_rdh
|
/**
* Save all worldGuard regions that need saving.
*/
public void saveWorldGuardRegions() {
for (String world : worldRegionsRequireSaving) {
World bukkitWorld = Bukkit.getWorld(world);
if (bukkitWorld != null) {
RegionManager manager = plugin.getRegionManager(bukkitWorld);
if (manager != null) {
try {
if (plugin.getWorldGuard().getDescription().getVersion().startsWith("5.")) {
manager.save();
} else {
manager.saveChanges();
}
} catch (Exception e) {
AreaShop.warn(("WorldGuard regions in world " + world) + " could not be saved");
}
}
}
}
}
| 3.26 |
AreaShop_FileManager_loadRegionFiles_rdh
|
/**
* Load all region files.
*/
public void loadRegionFiles() {
regions.clear();
final
File file
= new File(regionsPath);
if (!file.exists()) {
if (!file.mkdirs()) {
AreaShop.warn("Could not create region files directory: " + file.getAbsolutePath());
return;
}
plugin.setReady(true);
} else if (file.isDirectory()) {
loadRegionFilesNow();
}
}
| 3.26 |
AreaShop_FileManager_updateRegions_rdh
|
/**
* Update regions in a task to minimize lag.
*
* @param regions
* Regions to update
* @param confirmationReceiver
* The CommandSender that should be notified at completion
*/
public void updateRegions(final List<GeneralRegion> regions, final CommandSender confirmationReceiver) {
final int regionsPerTick = plugin.getConfig().getInt("update.regionsPerTick");
if (confirmationReceiver != null) {
plugin.message(confirmationReceiver, "reload-updateStart", regions.size(), regionsPerTick * 20);
}
Do.forAll(regionsPerTick, regions, GeneralRegion::update, () -> {
if
(confirmationReceiver != null) {
plugin.message(confirmationReceiver, "reload-updateComplete");
}
});
}
| 3.26 |
AreaShop_FileManager_getRegionSettings_rdh
|
/**
* Get the default region settings as provided by the user (default.yml).
*
* @return YamlConfiguration with the settings (might miss settings, which should be filled in with {@link #getFallbackRegionSettings()})
*/
public YamlConfiguration getRegionSettings() {
return defaultConfig;
}
| 3.26 |
AreaShop_FileManager_getRentNames_rdh
|
/**
* Get a list of names of all rent regions.
*
* @return A String list with all the names
*/
public List<String> getRentNames() {
ArrayList<String> result = new ArrayList<>();
for (RentRegion region : getRents()) {
result.add(region.getName()); } return result;
}
| 3.26 |
AreaShop_FileManager_checkRents_rdh
|
/**
* Unrent regions that have no time left, regions to check per tick is in the config.
*/public void checkRents() {
Do.forAll(plugin.getConfig().getInt("expiration.regionsPerTick"), getRents(), RentRegion::checkExpiration);
}
| 3.26 |
AreaShop_FileManager_getGroupNames_rdh
|
/**
* Get a list of names of all groups.
*
* @return A String list with all the names
*/
public List<String> getGroupNames() {ArrayList<String> result = new ArrayList<>();
for (RegionGroup group : getGroups()) {
result.add(group.getName());
}
return result;
}
| 3.26 |
AreaShop_FileManager_m0_rdh
|
/**
* Remove a region from the list.
*
* @param region
* The region to remove
* @param giveMoneyBack
* use true to give money back to the player if someone is currently holding this region, otherwise false
* @return true if the region has been removed, false otherwise
*/
public DeletingRegionEvent m0(GeneralRegion region, boolean giveMoneyBack) {
DeletingRegionEvent event = new DeletingRegionEvent(region);
if (region == null) {
event.cancel("null region");
return event;
}
Bukkit.getPluginManager().callEvent(event);
if (event.isCancelled()) {
return event;
}
region.setDeleted();
if ((region instanceof RentRegion) && ((RentRegion) (region)).isRented()) {
((RentRegion) (region)).unRent(giveMoneyBack, null);
} else if ((region instanceof BuyRegion) && ((BuyRegion) (region)).isSold()) {
((BuyRegion) (region)).sell(giveMoneyBack, null);
}
// Handle schematics
region.handleSchematicEvent(RegionEvent.DELETED);
// Delete the signs
if (region.getWorld() != null) {
for (Location sign : region.getSignsFeature().getSignLocations()) {
sign.getBlock().setType(Material.AIR);
}
}
// Remove from RegionGroups
RegionGroup[] regionGroups = getGroups().toArray(new RegionGroup[0]);
for (RegionGroup group : regionGroups) {
group.removeMember(region);
}
region.resetRegionFlags();
regions.remove(region.getLowerCaseName());
// Remove file
File file = new File(((((plugin.getDataFolder() + File.separator) + AreaShop.regionsFolder) + File.separator) + region.getLowerCaseName()) + ".yml");
if (file.exists()) {
boolean deleted;
try {
deleted = file.delete();
} catch (Exception e) {
deleted = false;
}
if (!deleted) {
AreaShop.warn("File could not be deleted: " +
file.toString());
}
}
// Broadcast event
Bukkit.getPluginManager().callEvent(new DeletedRegionEvent(region));
return event;}
| 3.26 |
AreaShop_FileManager_saveRequiredFilesAtOnce_rdh
|
/**
* Save all region related files directly (only for cases like onDisable()).
*/
public void saveRequiredFilesAtOnce() {
if (isSaveGroupsRequired()) {
saveGroupsNow();
}
for (GeneralRegion region : getRegions()) {
if (region.isSaveRequired()) {
region.saveNow();
}
}
this.saveWorldGuardRegions();
}
| 3.26 |
AreaShop_FileManager_getGroup_rdh
|
/**
* Get a group.
*
* @param name
* The name of the group to get (will be normalized)
* @return The group if found, otherwise null
*/
public RegionGroup getGroup(String name) {
return groups.get(name.toLowerCase());
}
| 3.26 |
AreaShop_FileManager_addGroup_rdh
|
/**
* Add a RegionGroup.
*
* @param group
* The RegionGroup to add
*/
public void addGroup(RegionGroup group) {
groups.put(group.getName().toLowerCase(), group);
String lowGroup = group.getName().toLowerCase();
groupsConfig.set(lowGroup
+ ".name",
group.getName());
groupsConfig.set(lowGroup + ".priority", 0);
saveGroupsIsRequired();
}
| 3.26 |
AreaShop_FileManager_getBuys_rdh
|
/**
* Get all buy regions.
*
* @return List of all buy regions
*/
public List<BuyRegion> getBuys() {
List<BuyRegion> result = new ArrayList<>();
for (GeneralRegion region : regions.values()) {
if (region instanceof BuyRegion) {
result.add(((BuyRegion) (region)));
}
}return result;
}
| 3.26 |
AreaShop_FileManager_loadDefaultFile_rdh
|
/**
* Load the default.yml file
*
* @return true if it has been loaded successfully, otherwise false
*/
public boolean loadDefaultFile() {
boolean result = true;
File defaultFile = new File(defaultPath);
// Safe the file from the jar to disk if it does not exist
if (!defaultFile.exists()) {
try (InputStream input = plugin.getResource(AreaShop.defaultFile);OutputStream output = new FileOutputStream(defaultFile)) {
int read;
byte[] bytes = new byte[1024];
while ((read = input.read(bytes)) != (-1)) {output.write(bytes, 0, read);}
AreaShop.info("File with default region settings has been saved, should only happen on first startup");
} catch (IOException e) {
AreaShop.warn("Something went wrong saving the default region settings: " + defaultFile.getAbsolutePath());
}
}
// Load default.yml from the plugin folder, and as backup the default one
try (InputStreamReader custom = new InputStreamReader(new FileInputStream(defaultFile), Charsets.UTF_8);InputStreamReader normal = new InputStreamReader(plugin.getResource(AreaShop.defaultFile), Charsets.UTF_8)) {
defaultConfig = YamlConfiguration.loadConfiguration(custom);
if (defaultConfig.getKeys(false).isEmpty()) {
AreaShop.warn("File 'default.yml' is empty, check for errors in the log.");
result = false;
}
defaultConfigFallback = YamlConfiguration.loadConfiguration(normal);
}
catch (IOException e) {
result = false;
}
return result;
}
| 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.