name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
open-banking-gateway_Xs2aAdapterConfiguration_xs2aPkcs12KeyStore_rdh
|
/**
* The keystore for QWAC and QSEAL certificates.
*
* @param keystorePath
* Location of the keystore.
* @param keystorePassword
* Keystore password.
*/
@Bean
@SneakyThrows
Pkcs12KeyStore xs2aPkcs12KeyStore(@Value(("${" + XS2A_PROTOCOL_CONFIG_PREFIX) + "pkcs12.keystore}")
String keystorePath, @Value(("${" + XS2A_PROTOCOL_CONFIG_PREFIX) + "pkcs12.password}")
char[] keystorePassword) {
if (Paths.get(keystorePath).toFile().exists()) {
return new Pkcs12KeyStore(Paths.get(keystorePath).toAbsolutePath().toString(), keystorePassword);
} try (var is = Resources.getResource(keystorePath).openStream()) {
return new Pkcs12KeyStore(is, keystorePassword, "default_qwac", "default_qseal");
}
}
| 3.26 |
open-banking-gateway_HeadersBodyMapperTemplate_forExecution_rdh
|
/**
* Converts context object into object that can be used for ASPSP API call.
*
* @param context
* Context to convert
* @return Object that can be used with {@code Xs2aAdapter} to perform ASPSP API calls
*/
public ValidatedHeadersBody<H, B> forExecution(C context) {
return new ValidatedHeadersBody<>(toHeaders.map(context), toBody.map(toValidatableBody.map(context)));
}
| 3.26 |
open-banking-gateway_BaseDatasafeDbStorageService_write_rdh
|
/**
* Open Datasafe object for writing.
*
* @param withCallback
* Absolute path of the object to write to, including callback hook. I.e. {@code db://storage/deadbeef}
* @return Stream to write data to.
*/
@Override
@SneakyThrows
@Transactional
public OutputStream write(WithCallback<AbsoluteLocation, ? extends ResourceWriteCallback> withCallback) {
return new SetAndSaveOnClose(m0(withCallback.getWrapped()), handlers.get(deduceTable(withCallback.getWrapped())));
}
| 3.26 |
open-banking-gateway_BaseDatasafeDbStorageService_remove_rdh
|
/**
* Delete object within Datasafe storage.
*
* @param absoluteLocation
* Absolute path of the object to remove. I.e. {@code db://storage/deadbeef}
*/
@Override
@Transactional
public void remove(AbsoluteLocation absoluteLocation) {
handlers.get(deduceTable(absoluteLocation)).delete(m0(absoluteLocation));
}
| 3.26 |
open-banking-gateway_BaseDatasafeDbStorageService_m0_rdh
|
/**
* Resolves objects' ID from path.
*
* @param path
* Object path to resolve ID from.
* @return ID of the object in the table.
*/
protected String m0(AbsoluteLocation<?>
path) {
return path.location().getWrapped().getPath().replaceAll("^/", "");
}
| 3.26 |
open-banking-gateway_BaseDatasafeDbStorageService_objectExists_rdh
|
/**
* Checks if object exists within Datasafe storage.
*
* @param absoluteLocation
* Absolute path including protocol to the object. I.e. {@code db://storage/deadbeef}
* @return If the object at the {@code absoluteLocation} exists
*/
@Override
@Transactional
public boolean objectExists(AbsoluteLocation absoluteLocation) {
return handlers.get(deduceTable(absoluteLocation)).read(m0(absoluteLocation)).isPresent();
}
/**
* Lists object within Datasafe storage.
*
* @param absoluteLocation
* Absolute path of the objects' directory including protocol. I.e. {@code db://storage/deadbeef}
* @return All objects that have path within {@code absoluteLocation}
| 3.26 |
open-banking-gateway_BaseDatasafeDbStorageService_read_rdh
|
/**
* Open Datasafe object for reading.
*
* @param absoluteLocation
* Absolute path of the object to read. I.e. {@code db://storage/deadbeef}
* @return Stream to read data from.
*/@Override
@SneakyThrows
@Transactional(noRollbackFor = BaseDatasafeDbStorageService.DbStorageEntityNotFoundException.class)
public InputStream read(AbsoluteLocation absoluteLocation) {
return new ByteArrayInputStream(requireBytes(absoluteLocation)); }
| 3.26 |
open-banking-gateway_BaseDatasafeDbStorageService_deduceTable_rdh
|
/**
* Resolves objects' table name from path.
*
* @param path
* Object path to resolve table from.
* @return Table name that contains the object.
*/
protected String deduceTable(AbsoluteLocation<?> path) {return path.location().getWrapped().getHost();
}
| 3.26 |
open-banking-gateway_PaymentAccessFactory_paymentForAnonymousPsu_rdh
|
/**
* Create {@code PaymentAccess} object that is similar to consent facing to anonymous (to OBG) user and ASPSP pair.
*
* @param fintech
* Fintech that initiates the payment
* @param aspsp
* ASPSP/Bank that is going to perform the payment
* @param session
* Session that identifies the payment.
* @return Payment context to authorize
*/
public PaymentAccess paymentForAnonymousPsu(Fintech fintech, Bank aspsp, ServiceSession session) {
return new AnonymousPsuPaymentAccess(aspsp, fintech, fintechPubKeys, psuEncryption, session, paymentRepository);
}
| 3.26 |
open-banking-gateway_PaymentAccessFactory_paymentForPsuAndAspsp_rdh
|
/**
* Create {@code PaymentAccess} object that is similar to consent facing to PSU/Fintech user and ASPSP pair.
*
* @param psu
* Payee/authorizer of this payment
* @param aspsp
* ASPSP/Bank that is going to perform the payment
* @param session
* Session that identifies the payment.
* @return Payment context to authorize
*/
public PaymentAccess paymentForPsuAndAspsp(Psu psu, Bank aspsp, ServiceSession session) {
PsuAspspPrvKey prvKey = f0.findByPsuIdAndAspspId(psu.getId(), aspsp.getId()).orElseThrow(() -> new IllegalStateException("No public key for: " + psu.getId()));
return new PsuPaymentAccess(psu, aspsp, psuEncryption.forPublicKey(prvKey.getId(), prvKey.getPubKey().getKey()), session,
paymentRepository);
}
| 3.26 |
open-banking-gateway_PaymentAccessFactory_paymentForFintech_rdh
|
/**
* Create {@code PaymentAccess} object that is similar to consent facing to FinTech.
*
* @param fintech
* Fintech that initiates the payment
* @param session
* Session that identifies the payment.
* @param fintechPassword
* FinTech Datasafe/KeyStore password
* @return Payment context
*/
public PaymentAccess paymentForFintech(Fintech fintech, ServiceSession session, Supplier<char[]> fintechPassword) {
return new FintechPaymentAccess(fintech, psuEncryption, fintechPsuAspspPrvKeyRepository, fintechVault, paymentRepository, entityManager, session.getId(), fintechPassword);
}
| 3.26 |
open-banking-gateway_EncryptionProviderConfig_psuConsentEncryptionProvider_rdh
|
/**
* PSU/Fintech user consent encryption.
*
* @param psuKeyPairConfig
* Asymmetric encryption key configuration.
* @return PSU/Fintech user data encryption
*/
@Bean
PsuEncryptionServiceProvider psuConsentEncryptionProvider(PsuKeyPairConfig psuKeyPairConfig) {
return new PsuEncryptionServiceProvider(new CmsEncryptionOper(psuKeyPairConfig));
}
| 3.26 |
open-banking-gateway_EncryptionProviderConfig_fintechOnlyEncryptionProvider_rdh
|
/**
* Fintech data and consent access encryption.
*
* @param fintechOnlyKeyPairConfig
* Asymmetric encryption key configuration.
* @return Fintech data encryption
*/
@Bean
FintechOnlyEncryptionServiceProvider
fintechOnlyEncryptionProvider(FintechOnlyKeyPairConfig fintechOnlyKeyPairConfig) {
return new FintechOnlyEncryptionServiceProvider(new CmsEncryptionOper(fintechOnlyKeyPairConfig));
}
| 3.26 |
open-banking-gateway_EncryptionProviderConfig_consentAuthEncryptionProvider_rdh
|
/**
* Consent authorization flow encryption.
*
* @param specSecretKeyConfig
* Secret key based encryption configuration.
* @return Consent Authorization encryption
*/
@Bean
ConsentAuthorizationEncryptionServiceProvider consentAuthEncryptionProvider(ConsentSpecSecretKeyConfig specSecretKeyConfig) {
return new ConsentAuthorizationEncryptionServiceProvider(new EncryptionWithInitVectorOper(specSecretKeyConfig));
}
| 3.26 |
open-banking-gateway_DatasafeDataStorage_read_rdh
|
/**
* Reads encrypted database entry
*
* @param path
* Path to the entry
*/
@Override
public Optional<byte[]> read(String path) {
return txOper.execute(callback -> find.apply(path).map(getData));
}
| 3.26 |
open-banking-gateway_DatasafeDataStorage_update_rdh
|
/**
* Updates encrypted database entry
*
* @param path
* Path to the entry
* @param data
* New entry value
*/
@Override
public void update(String path, byte[] data) {
txOper.execute(callback -> {
Optional<T> entry = find.apply(path);
if (entry.isPresent()) {
T toSave = entry.get();
setData.accept(toSave, data);
return null;
}
T newEntry = factory.apply(path);
setData.accept(newEntry, data);
repository.save(newEntry);
return null;
});
}
| 3.26 |
open-banking-gateway_DatasafeDataStorage_delete_rdh
|
/**
* Removes encrypted database entry
*
* @param path
* Path to the entry
*/
@Override
public void delete(String path) {
throw new IllegalStateException("Not allowed");
}
| 3.26 |
open-banking-gateway_FireFlyTransactionExporter_exportAccountsTransactionsToFireFly_rdh
|
// Method length is mostly from long argument list to API call
@SuppressWarnings("checkstyle:MethodLength")
private void exportAccountsTransactionsToFireFly(long exportJobId, UUID bankProfileId, String accountIdToExport, LocalDate from, LocalDate to, AtomicInteger numTransactionsExported, AtomicInteger numTransactionsErrored, Set<String> availableAccountsInFireFlyByIban) {
ResponseEntity<TransactionsResponse> transactions = aisApi.getTransactions(accountIdToExport, bankingConfig.getUserId(), f0.getRedirectOkUri(UUID.randomUUID().toString()), f0.getRedirectNokUri(), UUID.randomUUID(), null, null, null, null, bankingConfig.getDataProtectionPassword(), bankProfileId, null, consentRepository.findFirstByBankProfileUuidOrderByModifiedAtDesc(bankProfileId).map(BankConsent::getConsentId).orElse(null), "", null, null, true, null, from, to, null, "both", false, null, null, null, null);if (transactions.getStatusCode() == HttpStatus.ACCEPTED) {
throw new IllegalStateException("Consent is required, but was missing, try to import accounts or click on import transactions button again");}
if (transactions.getStatusCode() != HttpStatus.OK) {
throw new IllegalStateException("Unexpected status code: " + transactions.getStatusCode().toString());
}
AccountReport report = transactions.getBody().getTransactions();
String lastError = null;
for (TransactionDetails transaction : report.getBooked()) {
try {
exportFireFlyTransaction(transaction, availableAccountsInFireFlyByIban);
numTransactionsExported.incrementAndGet();
} catch (Exception ex) {
log.error("Failed to export transaction: {}", transaction.getTransactionId(), ex);
numTransactionsErrored.incrementAndGet();
lastError = ex.getMessage();
}
m0(exportJobId, numTransactionsExported.get(), numTransactionsErrored.get(), lastError);
}
}
| 3.26 |
open-banking-gateway_FireFlyTransactionExporter_exportToFirefly_rdh
|
// Method length is mostly from long argument list to API call
@Async
@SuppressWarnings("checkstyle:MethodLength")
public void exportToFirefly(String fireFlyToken, long exportJobId, UUID bankProfileId, List<String> accountsTransactionsToExport, LocalDate from, LocalDate to) {tokenProvider.setToken(fireFlyToken);
Set<String>
availableAccountsInFireFlyByIban = exportableAccounts.exportableAccounts(fireFlyToken, bankProfileId).getBody().stream().map(ExportableAccount::getIban).collect(Collectors.toSet());
int numExported = 0;
int numErrored = 0;
AtomicInteger numTransactionsExported = new AtomicInteger();
AtomicInteger numTransactionsErrored = new AtomicInteger();
String lastError = null;
for (String v6 : accountsTransactionsToExport) {
try {
exportAccountsTransactionsToFireFly(exportJobId, bankProfileId, v6, from, to, numTransactionsExported, numTransactionsErrored, availableAccountsInFireFlyByIban);
} catch (Exception ex) {
log.error("Failed to export account: {}", v6, ex);
numErrored++;
lastError = ex.getMessage();}
numExported++;
updateAccountsExported(exportJobId, numExported, numErrored, accountsTransactionsToExport.size(), numTransactionsExported.get(), numTransactionsErrored.get(), lastError);
}
txOper.execute(callback -> {TransactionExportJob toUpdate = f1.getOne(exportJobId);
toUpdate.setCompleted(true);
return f1.save(toUpdate);
});
}
| 3.26 |
open-banking-gateway_DatasafeConfigurer_provideBouncyCastle_rdh
|
/**
* Installs BouncyCastle as required by Datasafe.
*/
@PostConstruct
void provideBouncyCastle() {
if (null != Security.getProvider(BouncyCastleProvider.PROVIDER_NAME)) {
return;
}
Security.addProvider(new BouncyCastleProvider());
}
| 3.26 |
open-banking-gateway_DatasafeConfigurer_fintechDatasafeServices_rdh
|
/**
* Fintech Datasafe storage.
*
* @param fintechReadStorePass
* Datasafe password to open keystore.
* @param serde
* Serialization/Deserialization handler
* @return FinTech Datasafe storage
*/
@Bean
public FintechSecureStorage fintechDatasafeServices(@Value(ENCRYPTION_DATASAFE_READ_KEYSTORE_PREFIX + ".fintech}
| 3.26 |
open-banking-gateway_DatasafeConfigurer_psuDatasafeServices_rdh
|
/**
* PSU/FinTech user Datasafe storage.
*
* @param psuReadStorePass
* Datasafe password to open keystore.
* @param serde
* Serialization/Deserialization handler
* @return PSU/FinTech user Datasafe storage
*/
@Bean
public PsuSecureStorage psuDatasafeServices(@Value(ENCRYPTION_DATASAFE_READ_KEYSTORE_PREFIX + ".psu}
| 3.26 |
open-banking-gateway_DatasafeConfigurer_fintechUserDatasafeServices_rdh
|
/**
* FinTech consent specification Datasafe storage.
*
* @param psuReadStorePass
* Datasafe password to open keystore.
* @return PSU/FinTech user Datasafe storage
*/
@Bean
public FintechConsentSpecSecureStorage fintechUserDatasafeServices(@Value(ENCRYPTION_DATASAFE_READ_KEYSTORE_PREFIX + ".fintech-user}
| 3.26 |
open-banking-gateway_ValidationIssue_toString_rdh
|
/**
*
* @return JSON representation of current object.
*/
@Override
public String toString()
{
return (((((((((((("{" + "\"type\":\"") + type) + "\"") + ", \"scope\":\"") + scope) + "\"") + ", \"code\":\"") + code) +
"\"") + ", \"captionMessage\":\"") +
captionMessage) + "\"") + "}";
}
| 3.26 |
open-banking-gateway_AccountInformationRequestCommon_fintech_calls_list_accounts_for_anton_brueckner_rdh
|
// Note that anton.brueckner is typically used for REDIRECT (real REDIRECT that is returned by bank, and not REDIRECT approach in table)
public SELF fintech_calls_list_accounts_for_anton_brueckner(String bankProfileId) {
return m1(bankProfileId, false);
}
| 3.26 |
open-banking-gateway_AccountInformationRequestCommon_fintech_calls_list_accounts_for_max_musterman_with_expected_balances_rdh
|
// Note that max.musterman is typically used for EMBEDDED (real EMBEDDED that is returned by bank, and not EMBEDDED approach in table)
public SELF fintech_calls_list_accounts_for_max_musterman_with_expected_balances(Boolean withBalance) {
ExtractableResponse<Response> response = withAccountsHeaders(MAX_MUSTERMAN).header(SERVICE_SESSION_ID, UUID.randomUUID().toString()).queryParam("withBalance", withBalance).when().get(AIS_ACCOUNTS_ENDPOINT).then().statusCode(ACCEPTED.value()).extract();
updateServiceSessionId(response);
updateRedirectCode(response);
updateNextConsentAuthorizationUrl(response);return self();
}
| 3.26 |
open-banking-gateway_AccountInformationRequestCommon_fintech_calls_list_accounts_for_max_musterman_rdh
|
// Note that max.musterman is typically used for EMBEDDED (real EMBEDDED that is returned by bank, and not EMBEDDED approach in table)
public SELF fintech_calls_list_accounts_for_max_musterman(String bankProfileId) {
ExtractableResponse<Response> response = withAccountsHeaders(MAX_MUSTERMAN, bankProfileId).header(SERVICE_SESSION_ID, UUID.randomUUID().toString()).when().get(AIS_ACCOUNTS_ENDPOINT).then().statusCode(ACCEPTED.value()).extract();
updateServiceSessionId(response);
updateRedirectCode(response);
updateNextConsentAuthorizationUrl(response);
return self();
}
| 3.26 |
open-banking-gateway_AccountExportService_exportAccounts_rdh
|
// This is mostly example code how to use an application
@Transactional
@SuppressWarnings("CPD-START")
public ResponseEntity<Long> exportAccounts(String fireFlyToken, UUID bankProfileId) {
ResponseEntity<AccountList> accounts = aisApi.getAccounts(bankingConfig.getUserId(), apiConfig.getRedirectOkUri(UUID.randomUUID().toString()), apiConfig.getRedirectNokUri(), UUID.randomUUID(), null, null, null, null, bankingConfig.getDataProtectionPassword(), bankProfileId, null, consentRepository.findFirstByBankProfileUuidOrderByModifiedAtDesc(bankProfileId).map(BankConsent::getConsentId).orElse(null), "", null, null, true, null, null, null, null, null, true, null);
if (accounts.getStatusCode() == HttpStatus.ACCEPTED) { String redirectTo = consentService.createConsentForAccountsAndTransactions(bankProfileId);
return ResponseEntity.accepted().header(LOCATION, redirectTo).build();
}
AccountExportJob exportJob = exportJobRepository.save(new AccountExportJob());
exporter.exportToFirefly(fireFlyToken, exportJob.getId(), accounts.getBody());
return ResponseEntity.ok(exportJob.getId());
}
| 3.26 |
open-banking-gateway_ResultBody_getBody_rdh
|
/**
* Body of the results - i.e. account list.
*/
@JsonIgnore
default Object getBody() {
return null;
}
| 3.26 |
open-banking-gateway_ParsingUtil_parseMessageWithoutSensitiveNonSensitiveValidation_rdh
|
// This is the way original parser works - try - catch if message not matches - continue
@SuppressWarnings("PMD.EmptyCatchBlock")public List<Message> parseMessageWithoutSensitiveNonSensitiveValidation(String from) {
NodeList list = SYNTAX.getElementsByTagName("MSGdef");
List<Message> result = new ArrayList<>();
for
(int i = 0; i < list.getLength(); i++) {
Element node = ((Element)
(list.item(i)));
String msgName = node.getAttribute("id");
try
{
// End loop on 1st element
result.add(new
Message(msgName, from, SYNTAX, false, true));
} catch (RuntimeException ex) {
// NOP, that's how kapott works
}
}return result;
}
| 3.26 |
open-banking-gateway_Xs2aWithBalanceParameters_toParameters_rdh
|
// TODO - MapStruct?
public RequestParams toParameters() {
return RequestParams.builder().withBalance(withBalance).build();
}
| 3.26 |
hudi_HoodieFlinkWriteClient_startAsyncCleaning_rdh
|
/**
* Starts async cleaning service for finished commits.
*
* <p>The Flink write client is designed to write data set as buckets
* but cleaning action should trigger after all the write actions within a
* checkpoint finish.
*/
public void startAsyncCleaning() {
tableServiceClient.startAsyncCleanerService(this);
}
| 3.26 |
hudi_HoodieFlinkWriteClient_insertOverwriteTable_rdh
|
/**
* Removes all existing records of the Hoodie table and inserts the given HoodieRecords, into the table.
*
* @param records
* HoodieRecords to insert
* @param instantTime
* Instant time of the commit
* @return list of WriteStatus to inspect errors and counts
*/
public List<WriteStatus>
insertOverwriteTable(List<HoodieRecord<T>> records, final String instantTime) {
HoodieTable table = initTable(WriteOperationType.INSERT_OVERWRITE_TABLE, Option.ofNullable(instantTime));
table.validateInsertSchema();
preWrite(instantTime, WriteOperationType.INSERT_OVERWRITE_TABLE, table.getMetaClient());
// create the write handle if not exists
HoodieWriteMetadata<List<WriteStatus>> result;
try (AutoCloseableWriteHandle closeableHandle = new AutoCloseableWriteHandle(records, instantTime, table, true)) {
result = ((HoodieFlinkTable<T>) (table)).insertOverwriteTable(context, closeableHandle.getWriteHandle(), instantTime, records);
}
return postWrite(result, instantTime, table);
}
| 3.26 |
hudi_HoodieFlinkWriteClient_upgradeDowngrade_rdh
|
/**
* Upgrade downgrade the Hoodie table.
*
* <p>This action should only be executed once for each commit.
* The modification of the table properties is not thread safe.
*/
public void upgradeDowngrade(String instantTime, HoodieTableMetaClient metaClient) {
new UpgradeDowngrade(metaClient, config, context, FlinkUpgradeDowngradeHelper.getInstance()).run(HoodieTableVersion.current(), instantTime);
}
| 3.26 |
hudi_HoodieFlinkWriteClient_preTxn_rdh
|
/**
* Refresh the last transaction metadata,
* should be called before the Driver starts a new transaction.
*/
public void preTxn(WriteOperationType operationType, HoodieTableMetaClient metaClient) {
if (txnManager.isLockRequired() && config.needResolveWriteConflict(operationType)) {
// refresh the meta client which is reused
metaClient.reloadActiveTimeline();
this.lastCompletedTxnAndMetadata = TransactionUtils.getLastCompletedTxnInstantAndMetadata(metaClient);
this.pendingInflightAndRequestedInstants = TransactionUtils.getInflightAndRequestedInstants(metaClient);
}
tableServiceClient.startAsyncArchiveService(this);
}
| 3.26 |
hudi_HoodieFlinkWriteClient_waitForCleaningFinish_rdh
|
/**
* Blocks and wait for the async cleaning service to finish.
*
* <p>The Flink write client is designed to write data set as buckets
* but cleaning action should trigger after all the write actions within a
* checkpoint finish.
*/
public void waitForCleaningFinish() {
if (tableServiceClient.asyncCleanerService != null) {
LOG.info("Cleaner has been spawned already. Waiting for it to finish");
tableServiceClient.asyncClean();
LOG.info("Cleaner has finished");
}
}
| 3.26 |
hudi_HoodieFlinkWriteClient_insertOverwrite_rdh
|
/**
* Removes all existing records from the partitions affected and inserts the given HoodieRecords, into the table.
*
* @param records
* HoodieRecords to insert
* @param instantTime
* Instant time of the commit
* @return list of WriteStatus to inspect errors and counts
*/
public List<WriteStatus> insertOverwrite(List<HoodieRecord<T>>
records, final String instantTime) {
HoodieTable<T, List<HoodieRecord<T>>, List<HoodieKey>, List<WriteStatus>> table = initTable(WriteOperationType.INSERT_OVERWRITE, Option.ofNullable(instantTime));
table.validateInsertSchema();
preWrite(instantTime, WriteOperationType.INSERT_OVERWRITE, table.getMetaClient());
// create the write handle if not exists
HoodieWriteMetadata<List<WriteStatus>> result;
try (AutoCloseableWriteHandle closeableHandle = new AutoCloseableWriteHandle(records, instantTime, table, true)) {
result = ((HoodieFlinkTable<T>) (table)).insertOverwrite(context, closeableHandle.getWriteHandle(), instantTime, records);
}return postWrite(result, instantTime, table);
}
| 3.26 |
hudi_HoodieFlinkWriteClient_initMetadataTable_rdh
|
/**
* Initialized the metadata table on start up, should only be called once on driver.
*/
public void initMetadataTable() {
((HoodieFlinkTableServiceClient<T>) (tableServiceClient)).initMetadataTable();
}
| 3.26 |
hudi_HoodieFlinkWriteClient_cleanHandles_rdh
|
/**
* Clean the write handles within a checkpoint interval.
* All the handles should have been closed already.
*/
public void cleanHandles()
{
this.bucketToHandles.clear();
}
| 3.26 |
hudi_HoodieFlinkWriteClient_getOrCreateWriteHandle_rdh
|
/**
* Get or create a new write handle in order to reuse the file handles.
*
* @param record
* The first record in the bucket
* @param config
* Write config
* @param instantTime
* The instant time
* @param table
* The table
* @param recordItr
* Record iterator
* @param overwrite
* Whether this is an overwrite operation
* @return Existing write handle or create a new one
*/
private HoodieWriteHandle<?, ?, ?, ?> getOrCreateWriteHandle(HoodieRecord<T> record, HoodieWriteConfig
config, String instantTime, HoodieTable<T, List<HoodieRecord<T>>, List<HoodieKey>, List<WriteStatus>> table, Iterator<HoodieRecord<T>> recordItr, boolean overwrite) {
// caution: it's not a good practice to modify the handles internal.
FlinkWriteHandleFactory.Factory<T, List<HoodieRecord<T>>, List<HoodieKey>, List<WriteStatus>> writeHandleFactory = FlinkWriteHandleFactory.getFactory(table.getMetaClient().getTableConfig(), config, overwrite);
return writeHandleFactory.create(this.bucketToHandles, record, config, instantTime, table, recordItr);
}
| 3.26 |
hudi_HoodieFlinkWriteClient_createIndex_rdh
|
/**
* Complete changes performed at the given instantTime marker with specified action.
*/
@Overrideprotected HoodieIndex createIndex(HoodieWriteConfig writeConfig) {
return FlinkHoodieIndexFactory.createIndex(((HoodieFlinkEngineContext) (context)), config);
}
| 3.26 |
hudi_RepairUtils_m0_rdh
|
/**
* Finds the dangling files to remove for a given instant to repair.
*
* @param instantToRepair
* Instant timestamp to repair.
* @param baseAndLogFilesFromFs
* A {@link List} of base and log files based on the file system.
* @param activeTimeline
* {@link HoodieActiveTimeline} instance.
* @param archivedTimeline
* {@link HoodieArchivedTimeline} instance.
* @return A {@link List} of relative file paths to base path for removing.
*/
public static List<String> m0(String instantToRepair, List<String> baseAndLogFilesFromFs, HoodieActiveTimeline activeTimeline, HoodieArchivedTimeline archivedTimeline) {
// Skips the instant if it is requested or inflight in active timeline
if (!activeTimeline.filter(instant -> instant.getTimestamp().equals(instantToRepair) && (!instant.isCompleted())).empty()) {
return Collections.emptyList();
}
try {
boolean doesInstantExist = false;
Option<Set<String>> filesFromTimeline = Option.empty();
Option<HoodieInstant> instantOption = activeTimeline.filterCompletedInstants().filter(instant
-> instant.getTimestamp().equals(instantToRepair)).firstInstant(); if (instantOption.isPresent()) {
// Completed instant in active timeline
doesInstantExist = true;
filesFromTimeline = RepairUtils.getBaseAndLogFilePathsFromTimeline(activeTimeline, instantOption.get());
} else {
instantOption = archivedTimeline.filterCompletedInstants().filter(instant -> instant.getTimestamp().equals(instantToRepair)).firstInstant();
if (instantOption.isPresent()) {
// Completed instant in archived timeline
doesInstantExist = true;
filesFromTimeline = RepairUtils.getBaseAndLogFilePathsFromTimeline(archivedTimeline, instantOption.get());
}
}
if (doesInstantExist) {
if ((!filesFromTimeline.isPresent()) || filesFromTimeline.get().isEmpty()) {
// Skips if no instant details
return Collections.emptyList();
}
// Excludes committed base and log files from timeline
Set<String> filesToRemove = new HashSet<>(baseAndLogFilesFromFs);
filesToRemove.removeAll(filesFromTimeline.get());return new ArrayList<>(filesToRemove);
} else {
// The instant does not exist in the whole timeline (neither completed nor requested/inflight),
// this means the files from this instant are dangling, which should be removed
return baseAndLogFilesFromFs;
}
} catch (IOException e) {
// In case of failure, does not remove any files for the instant
return Collections.emptyList();
}
}
| 3.26 |
hudi_RepairUtils_tagInstantsOfBaseAndLogFiles_rdh
|
/**
* Tags the instant time of each base or log file from the input file paths.
*
* @param basePath
* Base path of the table.
* @param allPaths
* A {@link List} of file paths to tag.
* @return A {@link Map} of instant time in {@link String} to a {@link List} of relative file paths.
*/
public static Map<String, List<String>> tagInstantsOfBaseAndLogFiles(String basePath, List<Path> allPaths) {
// Instant time -> Set of base and log file paths
Map<String, List<String>> instantToFilesMap = new HashMap<>();
allPaths.forEach(path -> { String instantTime = FSUtils.getCommitTime(path.getName());
instantToFilesMap.computeIfAbsent(instantTime, k -> new ArrayList<>());
instantToFilesMap.get(instantTime).add(FSUtils.getRelativePartitionPath(new Path(basePath), path));
});
return instantToFilesMap;
}
/**
* Gets the base and log file paths written for a given instant from the timeline.
* This reads the details of the instant metadata.
*
* @param timeline
* {@link HoodieTimeline} instance, can be active or archived timeline.
* @param instant
* Instant for lookup.
* @return A {@link Option} of {@link Set} of relative file paths to base path
if the instant action is supported; empty {@link Option}
| 3.26 |
hudi_FileStatusDTO_safeReadAndSetMetadata_rdh
|
/**
* Used to safely handle FileStatus calls which might fail on some FileSystem implementation.
* (DeprecatedLocalFileSystem)
*/
private static void safeReadAndSetMetadata(FileStatusDTO dto, FileStatus fileStatus)
{
try {
dto.owner = fileStatus.getOwner();
dto.group = fileStatus.getGroup();
dto.permission = FSPermissionDTO.fromFsPermission(fileStatus.getPermission());
} catch (IllegalArgumentException ie) {
// Deprecated File System (testing) does not work well with this call
// skipping
}
}
| 3.26 |
hudi_RowDataToHoodieFunctions_create_rdh
|
/**
* Creates a {@link RowDataToHoodieFunction} instance based on the given configuration.
*/
@SuppressWarnings("rawtypes")
public static RowDataToHoodieFunction<RowData, HoodieRecord> create(RowType rowType, Configuration conf) {
if (conf.getLong(FlinkOptions.WRITE_RATE_LIMIT) > 0) {
return new RowDataToHoodieFunctionWithRateLimit<>(rowType, conf);
}
else {
return new RowDataToHoodieFunction<>(rowType, conf);
}
}
| 3.26 |
hudi_TableChanges_updateColumnType_rdh
|
/**
* Update a column in the schema to a new type.
* only support update primitive type.
* Only updates that widen types are allowed.
*
* @param name
* name of the column to update
* @param newType
* new type for the column
* @return this
* @throws IllegalArgumentException
*/
public ColumnUpdateChange updateColumnType(String name, Type newType) {
checkColModifyIsLegal(name);
if (newType.isNestedType()) {
throw new IllegalArgumentException(String.format("only support update primitive type but find nest column: %s", name));
}
Types.Field field = internalSchema.findField(name);
if (field == null) {
throw new IllegalArgumentException(String.format("cannot update a missing column: %s", name));
}
if (!SchemaChangeUtils.isTypeUpdateAllow(field.type(), newType)) {
throw new IllegalArgumentException(String.format("cannot update origin type: %s to a incompatibility type: %s", field.type(), newType));}
if (field.type().equals(newType)) {
// do nothings
return this;
}
// save update info
Types.Field update = updates.get(field.fieldId());
if (update == null) {
updates.put(field.fieldId(), Types.Field.get(field.fieldId(), field.isOptional(), field.name(), newType, field.doc()));
} else {
updates.put(field.fieldId(), Types.Field.get(field.fieldId(), update.isOptional(), update.name(), newType, update.doc()));
}return this;
}
| 3.26 |
hudi_TableChanges_updateColumnComment_rdh
|
/**
* Update a column doc in the schema to a new primitive type.
*
* @param name
* name of the column to update
* @param newDoc
* new documentation for the column
* @return this
* @throws IllegalArgumentException
*/
public ColumnUpdateChange updateColumnComment(String name, String newDoc) {
checkColModifyIsLegal(name);
Types.Field field = internalSchema.findField(name);if (field == null) {
throw new IllegalArgumentException(String.format("cannot update a missing column: %s", name));
}
// consider null
if (Objects.equals(field.doc(), newDoc)) {
// do nothings
return this;
}
// save update info
Types.Field update = updates.get(field.fieldId());
if (update == null) {
updates.put(field.fieldId(), Types.Field.get(field.fieldId(), field.isOptional(), field.name(), field.type(), newDoc));
} else {
updates.put(field.fieldId(), Types.Field.get(field.fieldId(), update.isOptional(), update.name(), update.type(), newDoc));
}
return this;
}
| 3.26 |
hudi_TableChanges_renameColumn_rdh
|
/**
* Rename a column in the schema.
*
* @param name
* name of the column to rename
* @param newName
* new name for the column
* @return this
* @throws IllegalArgumentException
*/
public ColumnUpdateChange renameColumn(String name, String newName) {
checkColModifyIsLegal(name);
Types.Field field = internalSchema.findField(name);
if (field == null) {
throw new IllegalArgumentException(String.format("cannot update a missing column: %s", name));
}
if ((newName == null) || newName.isEmpty()) {
throw new IllegalArgumentException(String.format("cannot rename column: %s to empty", name));
}
if (internalSchema.hasColumn(newName, caseSensitive)) {
throw new IllegalArgumentException(String.format("cannot rename column: %s to a existing name", name));
}
// save update info
Types.Field v8 = updates.get(field.fieldId());
if (v8 == null) {
updates.put(field.fieldId(), Types.Field.get(field.fieldId(), field.isOptional(), newName, field.type(), field.doc()));
} else {
updates.put(field.fieldId(), Types.Field.get(field.fieldId(), v8.isOptional(), newName, v8.type(), v8.doc()));
}
return this;}
| 3.26 |
hudi_TableChanges_getFullColName2Id_rdh
|
// expose to test
public Map<String, Integer> getFullColName2Id() {
return fullColName2Id;
}
| 3.26 |
hudi_TableChanges_updateColumnNullability_rdh
|
/**
* Update nullable for column.
* only support required type -> optional type
*
* @param name
* name of the column to update
* @param nullable
* nullable for updated name
* @return this
* @throws IllegalArgumentException
*/
public ColumnUpdateChange updateColumnNullability(String name, boolean nullable) {
return updateColumnNullability(name, nullable, false);
}
| 3.26 |
hudi_HoodieTimelineArchiver_archiveIfRequired_rdh
|
/**
* Check if commits need to be archived. If yes, archive commits.
*/
public int archiveIfRequired(HoodieEngineContext context, boolean acquireLock) throws IOException {
try {
if (acquireLock) {
// there is no owner or instant time per se for archival.
txnManager.beginTransaction(Option.empty(), Option.empty());
}
// Sort again because the cleaning and rollback instants could break the sequence.
List<ActiveAction> instantsToArchive = getInstantsToArchive().sorted().collect(Collectors.toList());
if (!instantsToArchive.isEmpty()) {
LOG.info("Archiving instants " + instantsToArchive);
Consumer<Exception> exceptionHandler
= e -> {
if (this.config.isFailOnTimelineArchivingEnabled()) {
throw new HoodieException(e);
}
};
this.timelineWriter.write(instantsToArchive, Option.of(action -> deleteAnyLeftOverMarkers(context, action)), Option.of(exceptionHandler));
LOG.info("Deleting archived instants " + instantsToArchive);
deleteArchivedInstants(instantsToArchive, context);
// triggers compaction and cleaning only after archiving action
this.timelineWriter.compactAndClean(context);} else {
LOG.info("No Instants to archive");
}
return instantsToArchive.size();
} finally {
if (acquireLock) {
txnManager.endTransaction(Option.empty());
}
}
}
| 3.26 |
hudi_HoodieLogCompactionPlanGenerator_isFileSliceEligibleForLogCompaction_rdh
|
/**
* Can schedule logcompaction if log files count is greater than 4 or total log blocks is greater than 4.
*
* @param fileSlice
* File Slice under consideration.
* @return Boolean value that determines whether log compaction will be scheduled or not.
*/
private boolean isFileSliceEligibleForLogCompaction(FileSlice fileSlice, String maxInstantTime, Option<InstantRange> instantRange) {
LOG.info(((("Checking if fileId " + fileSlice.getFileId()) + " and partition ") + fileSlice.getPartitionPath()) + " eligible for log compaction.");
HoodieTableMetaClient metaClient = hoodieTable.getMetaClient();
HoodieUnMergedLogRecordScanner scanner = HoodieUnMergedLogRecordScanner.newBuilder().withFileSystem(metaClient.getFs()).withBasePath(hoodieTable.getMetaClient().getBasePath()).withLogFilePaths(fileSlice.getLogFiles().sorted(HoodieLogFile.getLogFileComparator()).map(file -> file.getPath().toString()).collect(Collectors.toList())).withLatestInstantTime(maxInstantTime).withInstantRange(instantRange).withBufferSize(writeConfig.getMaxDFSStreamBufferSize()).withOptimizedLogBlocksScan(true).withRecordMerger(writeConfig.getRecordMerger()).build();
scanner.scan(true);int totalBlocks = scanner.getCurrentInstantLogBlocks().size();
LOG.info((("Total blocks seen are " + totalBlocks) + ", log blocks threshold is ") + writeConfig.getLogCompactionBlocksThreshold());
// If total blocks in the file slice is > blocks threshold value(default value is 5).
// Log compaction can be scheduled.
return totalBlocks >= writeConfig.getLogCompactionBlocksThreshold();
}
| 3.26 |
hudi_HoodieTableFactory_checkPreCombineKey_rdh
|
/**
* Validate pre_combine key.
*/
private void checkPreCombineKey(Configuration conf, ResolvedSchema schema) {
List<String> fields = schema.getColumnNames();
String preCombineField = conf.get(FlinkOptions.PRECOMBINE_FIELD);
if (!fields.contains(preCombineField)) {
if (OptionsResolver.isDefaultHoodieRecordPayloadClazz(conf)) { throw new HoodieValidationException((("Option '" + FlinkOptions.PRECOMBINE_FIELD.key()) + "' is required for payload class: ") +
DefaultHoodieRecordPayload.class.getName());
}
if (preCombineField.equals(FlinkOptions.PRECOMBINE_FIELD.defaultValue())) {
conf.setString(FlinkOptions.PRECOMBINE_FIELD, FlinkOptions.NO_PRE_COMBINE);
} else if (!preCombineField.equals(FlinkOptions.NO_PRE_COMBINE)) {
throw new HoodieValidationException((((("Field " + preCombineField) + " does not exist in the table schema.") + "Please check '") + FlinkOptions.PRECOMBINE_FIELD.key()) + "' option.");
}
}
}
| 3.26 |
hudi_HoodieTableFactory_setupWriteOptions_rdh
|
/**
* Sets up the write options from the table definition.
*/
private static void setupWriteOptions(Configuration conf) {
if (FlinkOptions.isDefaultValueDefined(conf, FlinkOptions.OPERATION) && OptionsResolver.isCowTable(conf)) {
conf.setBoolean(FlinkOptions.PRE_COMBINE, true);
}}
| 3.26 |
hudi_HoodieTableFactory_setupHiveOptions_rdh
|
/**
* Sets up the hive options from the table definition.
*/
private static void setupHiveOptions(Configuration conf, ObjectIdentifier tablePath) {
if (!conf.contains(FlinkOptions.HIVE_SYNC_DB)) {
conf.setString(FlinkOptions.HIVE_SYNC_DB, tablePath.getDatabaseName());
}
if (!conf.contains(FlinkOptions.HIVE_SYNC_TABLE)) {
conf.setString(FlinkOptions.HIVE_SYNC_TABLE, tablePath.getObjectName());
}
}
| 3.26 |
hudi_HoodieTableFactory_setupHoodieKeyOptions_rdh
|
/**
* Sets up the hoodie key options (e.g. record key and partition key) from the table definition.
*/
private static void setupHoodieKeyOptions(Configuration conf, CatalogTable table) {
List<String> pkColumns = table.getSchema().getPrimaryKey().map(UniqueConstraint::getColumns).orElse(Collections.emptyList());if (pkColumns.size()
> 0) {
// the PRIMARY KEY syntax always has higher priority than option FlinkOptions#RECORD_KEY_FIELD
String recordKey = String.join(",", pkColumns);
conf.setString(FlinkOptions.RECORD_KEY_FIELD, recordKey);
}
List<String> partitionKeys = table.getPartitionKeys();
if (partitionKeys.size() > 0) {
// the PARTITIONED BY syntax always has higher priority than option FlinkOptions#PARTITION_PATH_FIELD
conf.setString(FlinkOptions.PARTITION_PATH_FIELD, String.join(",", partitionKeys));
}// set index key for bucket index if not defined
if (conf.getString(FlinkOptions.INDEX_TYPE).equals(IndexType.BUCKET.name())) {
if (conf.getString(FlinkOptions.INDEX_KEY_FIELD).isEmpty()) {
conf.setString(FlinkOptions.INDEX_KEY_FIELD, conf.getString(FlinkOptions.RECORD_KEY_FIELD));
} else {
Set<String> recordKeySet = Arrays.stream(conf.getString(FlinkOptions.RECORD_KEY_FIELD).split(",")).collect(Collectors.toSet());
Set<String> indexKeySet = Arrays.stream(conf.getString(FlinkOptions.INDEX_KEY_FIELD).split(",")).collect(Collectors.toSet());
if (!recordKeySet.containsAll(indexKeySet)) {
throw new HoodieValidationException(FlinkOptions.INDEX_KEY_FIELD + " should be a subset of or equal to the recordKey fields");
}
}
}
// tweak the key gen class if possible
final String[] partitions = conf.getString(FlinkOptions.PARTITION_PATH_FIELD).split(",");
final String[] pks = conf.getString(FlinkOptions.RECORD_KEY_FIELD).split(",");
if (partitions.length == 1) {
final String partitionField = partitions[0];
if (partitionField.isEmpty()) {
conf.setString(FlinkOptions.KEYGEN_CLASS_NAME, NonpartitionedAvroKeyGenerator.class.getName());
LOG.info("Table option [{}] is reset to {} because this is a non-partitioned table", FlinkOptions.KEYGEN_CLASS_NAME.key(), NonpartitionedAvroKeyGenerator.class.getName());
return;
}
DataType partitionFieldType = table.getSchema().getFieldDataType(partitionField).orElseThrow(() -> new
HoodieValidationException(("Field "
+ partitionField) + " does not exist"));
if ((pks.length
<= 1) && DataTypeUtils.isDatetimeType(partitionFieldType)) {
// timestamp based key gen only supports simple primary key
setupTimestampKeygenOptions(conf, partitionFieldType);
return;
}
}
boolean complexHoodieKey = (pks.length
> 1) || (partitions.length > 1);
if (complexHoodieKey && FlinkOptions.isDefaultValueDefined(conf, FlinkOptions.KEYGEN_CLASS_NAME)) {
conf.setString(FlinkOptions.KEYGEN_CLASS_NAME, ComplexAvroKeyGenerator.class.getName());
LOG.info("Table option [{}] is reset to {} because record key or partition path has two or more fields", FlinkOptions.KEYGEN_CLASS_NAME.key(), ComplexAvroKeyGenerator.class.getName());
}
}
| 3.26 |
hudi_HoodieTableFactory_checkTableType_rdh
|
/**
* Validate the table type.
*/
private void checkTableType(Configuration conf) {
String tableType = conf.get(FlinkOptions.TABLE_TYPE);
if (StringUtils.nonEmpty(tableType)) {
try {
HoodieTableType.valueOf(tableType);
} catch (IllegalArgumentException e) {
throw new HoodieValidationException(((((("Invalid table type: " + tableType) + ". Table type should be either ") + HoodieTableType.MERGE_ON_READ) + " or ") + HoodieTableType.COPY_ON_WRITE) + ".");
}
}
}
| 3.26 |
hudi_HoodieTableFactory_sanityCheck_rdh
|
// -------------------------------------------------------------------------
// Utilities
// -------------------------------------------------------------------------
/**
* The sanity check.
*
* @param conf
* The table options
* @param schema
* The table schema
*/
private void sanityCheck(Configuration conf, ResolvedSchema schema) {
checkTableType(conf);
if (!OptionsResolver.isAppendMode(conf)) {
checkRecordKey(conf,
schema);
}
checkPreCombineKey(conf, schema);
}
| 3.26 |
hudi_HoodieTableFactory_checkRecordKey_rdh
|
/**
* Validate the record key.
*/
private void checkRecordKey(Configuration conf, ResolvedSchema schema) {
List<String> v6 = schema.getColumnNames();
if (!schema.getPrimaryKey().isPresent()) {
String[] recordKeys = conf.get(FlinkOptions.RECORD_KEY_FIELD).split(",");
if (((recordKeys.length == 1) && FlinkOptions.RECORD_KEY_FIELD.defaultValue().equals(recordKeys[0])) && (!v6.contains(recordKeys[0]))) {
throw new HoodieValidationException(((((("Primary key definition is required, the default primary key field " + "'") + FlinkOptions.RECORD_KEY_FIELD.defaultValue()) + "' does not exist in the table schema, ") + "use either PRIMARY KEY syntax or option '") + FlinkOptions.RECORD_KEY_FIELD.key()) + "' to speciy.");}
Arrays.stream(recordKeys).filter(field -> !v6.contains(field)).findAny().ifPresent(f -> {
throw new HoodieValidationException((((("Field '" + f) + "' specified in option ") + "'") + FlinkOptions.RECORD_KEY_FIELD.key()) + "' does not exist in the table schema.");});
}}
| 3.26 |
hudi_HoodieTableFactory_setupConfOptions_rdh
|
/**
* Sets up the config options based on the table definition, for e.g, the table name, primary key.
*
* @param conf
* The configuration to set up
* @param tablePath
* The table path
* @param table
* The catalog table
* @param schema
* The physical schema
*/
private static void setupConfOptions(Configuration conf, ObjectIdentifier tablePath, CatalogTable
table, ResolvedSchema schema) {
// table name
conf.setString(FlinkOptions.TABLE_NAME.key(), tablePath.getObjectName());
// database name
conf.setString(FlinkOptions.DATABASE_NAME.key(), tablePath.getDatabaseName());
// hoodie key about options
setupHoodieKeyOptions(conf, table);
// compaction options
setupCompactionOptions(conf);
// hive options
setupHiveOptions(conf, tablePath);
// read options
setupReadOptions(conf);
// write options
setupWriteOptions(conf);
// infer avro schema from physical DDL schema
inferAvroSchema(conf, schema.toPhysicalRowDataType().notNull().getLogicalType());
}
| 3.26 |
hudi_HoodieTableFactory_inferAvroSchema_rdh
|
/**
* Inferences the deserialization Avro schema from the table schema (e.g. the DDL)
* if both options {@link FlinkOptions#SOURCE_AVRO_SCHEMA_PATH} and
* {@link FlinkOptions#SOURCE_AVRO_SCHEMA} are not specified.
*
* @param conf
* The configuration
* @param rowType
* The specified table row type
*/
private static void inferAvroSchema(Configuration conf, LogicalType rowType) {
if ((!conf.getOptional(FlinkOptions.SOURCE_AVRO_SCHEMA_PATH).isPresent()) && (!conf.getOptional(FlinkOptions.SOURCE_AVRO_SCHEMA).isPresent())) {
String inferredSchema = AvroSchemaConverter.convertToSchema(rowType, AvroSchemaUtils.getAvroRecordQualifiedName(conf.getString(FlinkOptions.TABLE_NAME))).toString();
conf.setString(FlinkOptions.SOURCE_AVRO_SCHEMA, inferredSchema);
}
}
| 3.26 |
hudi_HoodieTableFactory_setupSortOptions_rdh
|
/**
* Sets up the table exec sort options.
*/
private void setupSortOptions(Configuration conf,
ReadableConfig contextConfig) {
if (contextConfig.getOptional(TABLE_EXEC_SORT_MAX_NUM_FILE_HANDLES).isPresent()) {
conf.set(TABLE_EXEC_SORT_MAX_NUM_FILE_HANDLES, contextConfig.get(TABLE_EXEC_SORT_MAX_NUM_FILE_HANDLES));
}
if (contextConfig.getOptional(TABLE_EXEC_SPILL_COMPRESSION_ENABLED).isPresent()) {
conf.set(TABLE_EXEC_SPILL_COMPRESSION_ENABLED, contextConfig.get(TABLE_EXEC_SPILL_COMPRESSION_ENABLED));
}
if (contextConfig.getOptional(TABLE_EXEC_SPILL_COMPRESSION_BLOCK_SIZE).isPresent()) {
conf.set(TABLE_EXEC_SPILL_COMPRESSION_BLOCK_SIZE, contextConfig.get(TABLE_EXEC_SPILL_COMPRESSION_BLOCK_SIZE));
}
if (contextConfig.getOptional(TABLE_EXEC_SORT_ASYNC_MERGE_ENABLED).isPresent()) {
conf.set(TABLE_EXEC_SORT_ASYNC_MERGE_ENABLED, contextConfig.get(TABLE_EXEC_SORT_ASYNC_MERGE_ENABLED));
}
}
| 3.26 |
hudi_HoodieHiveUtils_getNthParent_rdh
|
/**
* Gets the n'th parent for the Path. Assumes the path has at-least n components
*
* @param path
* @param n
* @return */
public static Path getNthParent(Path path, int n) {
Path parent = path;
for (int i = 0; i < n; i++) {
parent = parent.getParent(); }
return parent;
}
| 3.26 |
hudi_HoodieHiveUtils_getDateWriteable_rdh
|
/**
* Get date writeable object from int value.
* Hive3 use DateWritableV2 to build date objects and Hive2 use DateWritable.
* So that we need to initialize date according to the version of Hive.
*/
public static Writable getDateWriteable(int value) {
return
HIVE_SHIM.getDateWriteable(value);
}
| 3.26 |
hudi_HoodieHiveUtils_getIncrementalTableNames_rdh
|
/**
* Returns a list of tableNames for which hoodie.<tableName>.consume.mode is set to incremental else returns empty List
*
* @param job
* @return */
public static List<String> getIncrementalTableNames(JobContext job) {
Map<String, String> tablesModeMap = job.getConfiguration().getValByRegex(HOODIE_CONSUME_MODE_PATTERN_STRING.pattern()); List<String> result = tablesModeMap.entrySet().stream().map(s -> {
if (s.getValue().trim().toUpperCase().equals(INCREMENTAL_SCAN_MODE)) {
Matcher matcher = HOODIE_CONSUME_MODE_PATTERN_STRING.matcher(s.getKey());
return !matcher.find() ? null : matcher.group(1);
}
return null;
}).filter(Objects::nonNull).collect(Collectors.toList());
if (result == null) {
// Returns an empty list instead of null.
result = new ArrayList<>();
}
return result;
}
| 3.26 |
hudi_HoodieHiveUtils_getTimestampWriteable_rdh
|
/**
* Get timestamp writeable object from long value.
* Hive3 use TimestampWritableV2 to build timestamp objects and Hive2 use TimestampWritable.
* So that we need to initialize timestamp according to the version of Hive.
*/
public static Writable getTimestampWriteable(long value, boolean timestampMillis) {
return HIVE_SHIM.getTimestampWriteable(value, timestampMillis);
}
| 3.26 |
hudi_HoodieTableMetaClient_getMarkerFolderPath_rdh
|
/**
* Returns Marker folder path.
*
* @param instantTs
* Instant Timestamp
* @return */
public String getMarkerFolderPath(String instantTs) {
return String.format("%s%s%s", getTempFolderPath(), Path.SEPARATOR, instantTs);
}
| 3.26 |
hudi_HoodieTableMetaClient_getCommitsAndCompactionTimeline_rdh
|
/**
* Get the commit + pending-compaction timeline visible for this table. A RT filesystem view is constructed with this
* timeline so that file-slice after pending compaction-requested instant-time is also considered valid. A RT
* file-system view for reading must then merge the file-slices before and after pending compaction instant so that
* all delta-commits are read.
*/
public HoodieTimeline getCommitsAndCompactionTimeline() {
switch (this.getTableType()) {
case COPY_ON_WRITE :
return getActiveTimeline().getCommitTimeline();
case MERGE_ON_READ :
return getActiveTimeline().getWriteTimeline();
default :
throw new HoodieException("Unsupported table type :" + this.getTableType());
}
}
| 3.26 |
hudi_HoodieTableMetaClient_m4_rdh
|
/**
* Helper method to scan all hoodie-instant metafiles.
*
* @param fs
* The file system implementation for this table
* @param metaPath
* The meta path where meta files are stored
* @param nameFilter
* The name filter to filter meta files
* @return An array of meta FileStatus
* @throws IOException
* In case of failure
*/
public static FileStatus[] m4(FileSystem fs, Path metaPath, PathFilter nameFilter) throws IOException {
return fs.listStatus(metaPath, nameFilter);
}
| 3.26 |
hudi_HoodieTableMetaClient_getHeartbeatFolderPath_rdh
|
/**
*
* @return Heartbeat folder path.
*/
public static String getHeartbeatFolderPath(String basePath) {
return String.format("%s%s%s", basePath, Path.SEPARATOR, HEARTBEAT_FOLDER_NAME);
}
| 3.26 |
hudi_HoodieTableMetaClient_initTable_rdh
|
/**
* Init Table with the properties build by this builder.
*
* @param configuration
* The hadoop config.
* @param basePath
* The base path for hoodie table.
*/
public HoodieTableMetaClient initTable(Configuration configuration, String basePath) throws IOException {
return HoodieTableMetaClient.m3(configuration, basePath, build());
}
| 3.26 |
hudi_HoodieTableMetaClient_getRawFs_rdh
|
/**
* Return raw file-system.
*
* @return fs
*/
public FileSystem getRawFs() {
return getFs().getFileSystem();
}
| 3.26 |
hudi_HoodieTableMetaClient_m1_rdh
|
/**
*
* @return Hashing metadata base path
*/
public String m1() {
return new Path(metaPath.get(), HASHING_METADATA_FOLDER_NAME).toString();
}
| 3.26 |
hudi_HoodieTableMetaClient_getArchivedTimeline_rdh
|
/**
* Returns the cached archived timeline from startTs (inclusive).
*
* @param startTs
* The start instant time (inclusive) of the archived timeline.
* @return the archived timeline.
*/ public HoodieArchivedTimeline getArchivedTimeline(String startTs) {
return m2(startTs, true);
}
| 3.26 |
hudi_HoodieTableMetaClient_getBootstrapIndexByPartitionFolderPath_rdh
|
/**
*
* @return Bootstrap Index By Partition Folder
*/
public String getBootstrapIndexByPartitionFolderPath() {
return (basePath + Path.SEPARATOR) + BOOTSTRAP_INDEX_BY_PARTITION_FOLDER_PATH;
}
| 3.26 |
hudi_HoodieTableMetaClient_createNewInstantTime_rdh
|
/**
* Returns next instant time in the correct format.
*
* @param shouldLock
* whether the lock should be enabled to get the instant time.
*/
public String createNewInstantTime(boolean shouldLock) {
TimeGenerator timeGenerator = TimeGenerators.getTimeGenerator(timeGeneratorConfig, f0.get());
return HoodieActiveTimeline.createNewInstantTime(shouldLock, timeGenerator);
}
| 3.26 |
hudi_HoodieTableMetaClient_getMetaAuxiliaryPath_rdh
|
/**
*
* @return Auxiliary Meta path
*/
public String getMetaAuxiliaryPath() {
return (basePath + Path.SEPARATOR) + AUXILIARYFOLDER_NAME;
}
| 3.26 |
hudi_HoodieTableMetaClient_reloadActiveTimeline_rdh
|
/**
* Reload ActiveTimeline and cache.
*
* @return Active instants timeline
*/
public synchronized HoodieActiveTimeline reloadActiveTimeline() {
activeTimeline = new HoodieActiveTimeline(this);
return activeTimeline;
}
| 3.26 |
hudi_HoodieTableMetaClient_scanHoodieInstantsFromFileSystem_rdh
|
/**
* Helper method to scan all hoodie-instant metafiles and construct HoodieInstant objects.
*
* @param timelinePath
* MetaPath where instant files are stored
* @param includedExtensions
* Included hoodie extensions
* @param applyLayoutVersionFilters
* Depending on Timeline layout version, if there are multiple states for the same
* action instant, only include the highest state
* @return List of Hoodie Instants generated
* @throws IOException
* in case of failure
*/
public List<HoodieInstant> scanHoodieInstantsFromFileSystem(Path timelinePath, Set<String> includedExtensions, boolean applyLayoutVersionFilters) throws IOException {
Stream<HoodieInstant> instantStream = Arrays.stream(HoodieTableMetaClient.m4(getFs(), timelinePath, path -> {
// Include only the meta files with extensions that needs to be included
String extension =
HoodieInstant.getTimelineFileExtension(path.getName());
return includedExtensions.contains(extension);
})).map(HoodieInstant::new);
if (applyLayoutVersionFilters) {
instantStream
= TimelineLayout.getLayout(getTimelineLayoutVersion()).filterHoodieInstants(instantStream);
}
return instantStream.sorted().collect(Collectors.toList()); }
| 3.26 |
hudi_HoodieTableMetaClient_getArchivePath_rdh
|
/**
*
* @return path where archived timeline is stored
*/
public String getArchivePath()
{
String archiveFolder = tableConfig.getArchivelogFolder();
return (getMetaPath() + Path.SEPARATOR) + archiveFolder;
}
| 3.26 |
hudi_HoodieTableMetaClient_getFs_rdh
|
/**
* Get the FS implementation for this table.
*/
public HoodieWrapperFileSystem getFs() {
if (fs == null) {
FileSystem fileSystem = FSUtils.getFs(metaPath.get(), f0.newCopy());
if (fileSystemRetryConfig.isFileSystemActionRetryEnable()) {
fileSystem = new HoodieRetryWrapperFileSystem(fileSystem, fileSystemRetryConfig.getMaxRetryIntervalMs(), fileSystemRetryConfig.getMaxRetryNumbers(), fileSystemRetryConfig.getInitialRetryIntervalMs(), fileSystemRetryConfig.getRetryExceptions());
}ValidationUtils.checkArgument(!(fileSystem instanceof HoodieWrapperFileSystem), "File System not expected to be that of HoodieWrapperFileSystem");
fs = new HoodieWrapperFileSystem(fileSystem, consistencyGuardConfig.isConsistencyCheckEnabled() ?
new FailSafeConsistencyGuard(fileSystem, consistencyGuardConfig) : new NoOpConsistencyGuard());
}
return fs;
}
| 3.26 |
hudi_HoodieTableMetaClient_getTableConfig_rdh
|
/**
*
* @return Table Config
*/
public HoodieTableConfig getTableConfig() {
return tableConfig;
}
| 3.26 |
hudi_HoodieTableMetaClient_buildFunctionalIndexDefinition_rdh
|
/**
* Builds functional index definition and writes to index definition file.
*
* @param indexMetaPath
* Path to index definition file
* @param indexName
* Name of the index
* @param indexType
* Type of the index
* @param columns
* Columns on which index is built
* @param options
* Options for the index
*/
public void buildFunctionalIndexDefinition(String indexMetaPath, String indexName, String indexType, Map<String, Map<String, String>> columns, Map<String, String> options) {ValidationUtils.checkState((!functionalIndexMetadata.isPresent()) || (!functionalIndexMetadata.get().getIndexDefinitions().containsKey(indexName)), "Functional index metadata is already present");
List<String> columnNames = new ArrayList<>(columns.keySet());
HoodieFunctionalIndexDefinition functionalIndexDefinition = new HoodieFunctionalIndexDefinition(indexName, indexType, options.get("func"), columnNames, options);
if (functionalIndexMetadata.isPresent()) {
functionalIndexMetadata.get().getIndexDefinitions().put(indexName, functionalIndexDefinition);
} else {
functionalIndexMetadata = Option.of(new HoodieFunctionalIndexMetadata(Collections.singletonMap(indexName, functionalIndexDefinition)));
}
try {
// fs.mkdirs(new Path(indexMetaPath).getParent());
FileIOUtils.createFileInPath(fs, new Path(indexMetaPath), Option.of(functionalIndexMetadata.get().toJson().getBytes(StandardCharsets.UTF_8)));
} catch (IOException e) {
throw new HoodieIOException("Could not write functional index metadata at path: " + indexMetaPath, e);
}
}
| 3.26 |
hudi_HoodieTableMetaClient_getMetaPath_rdh
|
/**
*
* @return Meta path
*/
public String getMetaPath() {
return metaPath.get().toString();// this invocation is cached
}
| 3.26 |
hudi_HoodieTableMetaClient_getTempFolderPath_rdh
|
/**
*
* @return Temp Folder path
*/
public String getTempFolderPath() {
return (basePath + Path.SEPARATOR) + TEMPFOLDER_NAME;
}
| 3.26 |
hudi_HoodieTableMetaClient_getActiveTimeline_rdh
|
/**
* Get the active instants as a timeline.
*
* @return Active instants timeline
*/
public synchronized HoodieActiveTimeline getActiveTimeline() {
if (activeTimeline == null) {
activeTimeline = new HoodieActiveTimeline(this);
}
return activeTimeline;
}
| 3.26 |
hudi_HoodieTableMetaClient_getCommitActionType_rdh
|
/**
* Gets the commit action type.
*/public String getCommitActionType() {return CommitUtils.getCommitActionType(this.getTableType());
}
| 3.26 |
hudi_HoodieTableMetaClient_getCommitsTimeline_rdh
|
/**
* Get the commit timeline visible for this table.
*/
public HoodieTimeline getCommitsTimeline() {
switch (this.getTableType()) {
case COPY_ON_WRITE :return getActiveTimeline().getCommitTimeline();
case MERGE_ON_READ :
// We need to include the parquet files written out in delta commits
// Include commit action to be able to start doing a MOR over a COW table - no
// migration required
return getActiveTimeline().getCommitsTimeline();
default :
throw new HoodieException("Unsupported table type :" + this.getTableType());
}
}
| 3.26 |
hudi_HoodieTableMetaClient_getFunctionalIndexMetadata_rdh
|
/**
* Returns Option of {@link HoodieFunctionalIndexMetadata} from index definition file if present, else returns empty Option.
*/
public Option<HoodieFunctionalIndexMetadata> getFunctionalIndexMetadata() {
if (functionalIndexMetadata.isPresent()) {
return functionalIndexMetadata;
}
if (tableConfig.getIndexDefinitionPath().isPresent() && StringUtils.nonEmpty(tableConfig.getIndexDefinitionPath().get())) {
Path indexDefinitionPath = new Path(tableConfig.getIndexDefinitionPath().get());
try {
return Option.of(HoodieFunctionalIndexMetadata.fromJson(new String(FileIOUtils.readDataFromPath(fs, indexDefinitionPath).get())));
} catch (IOException e) {
throw new HoodieIOException("Could not load functional index metadata at path: " + tableConfig.getIndexDefinitionPath().get(), e);
}
}
return Option.empty();
}
| 3.26 |
hudi_HoodieTableMetaClient_getCommitTimeline_rdh
|
/**
* Get the compacted commit timeline visible for this table.
*/
public HoodieTimeline getCommitTimeline() {
switch (this.getTableType()) {
case COPY_ON_WRITE :
case MERGE_ON_READ :
// We need to include the parquet files written out in delta commits in tagging
return getActiveTimeline().getCommitTimeline();
default :
throw new HoodieException("Unsupported table type :" + this.getTableType());
}
}
| 3.26 |
hudi_HoodieTableMetaClient_m2_rdh
|
/**
* Returns the cached archived timeline if using in-memory cache or a fresh new archived
* timeline if not using cache, from startTs (inclusive).
* <p>
* Instantiating an archived timeline is costly operation if really early startTs is
* specified.
* <p>
* This method is not thread safe.
*
* @param startTs
* The start instant time (inclusive) of the archived timeline.
* @param useCache
* Whether to use in-memory cache.
* @return the archived timeline based on the arguments.
*/
public HoodieArchivedTimeline m2(String startTs, boolean useCache) {
if (useCache) {
if (!archivedTimelineMap.containsKey(startTs)) {
// Only keep one entry in the map
archivedTimelineMap.clear();
archivedTimelineMap.put(startTs, instantiateArchivedTimeline(startTs));
}
return archivedTimelineMap.get(startTs);
}
return instantiateArchivedTimeline(startTs);
}
| 3.26 |
hudi_HoodieTableMetaClient_validateTableProperties_rdh
|
/**
* Validate table properties.
*
* @param properties
* Properties from writeConfig.
*/
public void validateTableProperties(Properties properties) {
// Once meta fields are disabled, it cant be re-enabled for a given table.
if ((!getTableConfig().populateMetaFields()) && Boolean.parseBoolean(((String) (properties.getOrDefault(HoodieTableConfig.POPULATE_META_FIELDS.key(), HoodieTableConfig.POPULATE_META_FIELDS.defaultValue().toString()))))) {
throw new HoodieException(HoodieTableConfig.POPULATE_META_FIELDS.key() + " already disabled for the table. Can't be re-enabled back");
}
// Meta fields can be disabled only when either {@code SimpleKeyGenerator}, {@code ComplexKeyGenerator}, {@code NonpartitionedKeyGenerator} is used
if (!getTableConfig().populateMetaFields()) {
String keyGenClass = KeyGeneratorType.getKeyGeneratorClassName(new HoodieConfig(properties));
if (StringUtils.isNullOrEmpty(keyGenClass)) {
keyGenClass = "org.apache.hudi.keygen.SimpleKeyGenerator";
}if (((!keyGenClass.equals("org.apache.hudi.keygen.SimpleKeyGenerator")) && (!keyGenClass.equals("org.apache.hudi.keygen.NonpartitionedKeyGenerator"))) && (!keyGenClass.equals("org.apache.hudi.keygen.ComplexKeyGenerator"))) {
throw new HoodieException("Only simple, non-partitioned or complex key generator are supported when meta-fields are disabled. Used: " + keyGenClass);
}
}
// Check to make sure it's not a COW table with consistent hashing bucket index
if (tableType == HoodieTableType.COPY_ON_WRITE) {
String indexType = properties.getProperty("hoodie.index.type");
if ((indexType != null)
&& indexType.equals("BUCKET")) {
String bucketEngine = properties.getProperty("hoodie.index.bucket.engine");
if ((bucketEngine != null) && bucketEngine.equals("CONSISTENT_HASHING")) {
throw new HoodieException("Consistent hashing bucket index does not work with COW table. Use simple bucket index or an MOR table.");
}}
}
}
| 3.26 |
hudi_HoodieTableMetaClient_getTableType_rdh
|
/**
*
* @return Hoodie Table Type
*/
public HoodieTableType getTableType() {
return tableType;
}
| 3.26 |
hudi_HoodieTableMetaClient_isTimelineNonEmpty_rdh
|
/**
*
* @return {@code true} if any commits are found, else {@code false}.
*/
public boolean isTimelineNonEmpty() {
return !getCommitsTimeline().filterCompletedInstants().empty();
}
| 3.26 |
hudi_HoodieTableMetaClient_readObject_rdh
|
/**
* This method is only used when this object is de-serialized in a spark executor.
*
* @deprecated */
private void
readObject(ObjectInputStream in) throws IOException, ClassNotFoundException
{
in.defaultReadObject();
fs = null;// will be lazily initialized
}
| 3.26 |
hudi_HoodieTableMetaClient_getBasePathV2_rdh
|
/**
* Returns base path of the table
*/
public Path getBasePathV2() {
return basePath.get();
}
| 3.26 |
hudi_HoodieTableMetaClient_getBootstrapIndexByFileIdFolderNameFolderPath_rdh
|
/**
*
* @return Bootstrap Index By Hudi File Id Folder
*/
public String getBootstrapIndexByFileIdFolderNameFolderPath() {
return (basePath +
Path.SEPARATOR) + BOOTSTRAP_INDEX_BY_FILE_ID_FOLDER_PATH;
}
| 3.26 |
hudi_HoodieTableMetaClient_getSchemaFolderName_rdh
|
/**
*
* @return schema folder path
*/
public String getSchemaFolderName() {
return new Path(metaPath.get(), SCHEMA_FOLDER_NAME).toString();
}
| 3.26 |
hudi_HoodieTableMetaClient_getBasePath_rdh
|
/**
*
* @return Base path
* @deprecated please use {@link #getBasePathV2()}
*/
@Deprecated
public String getBasePath() {
return basePath.get().toString();// this invocation is cached
}
| 3.26 |
hudi_BulkInsertWriterHelper_addMetadataFields_rdh
|
/**
* Adds the Hoodie metadata fields to the given row type.
*/
public static RowType addMetadataFields(RowType rowType, boolean withOperationField) {
List<RowType.RowField> mergedFields = new ArrayList<>();
LogicalType metadataFieldType = DataTypes.STRING().getLogicalType();
RowType.RowField commitTimeField = new RowType.RowField(HoodieRecord.COMMIT_TIME_METADATA_FIELD, metadataFieldType, "commit time");
RowType.RowField commitSeqnoField = new RowType.RowField(HoodieRecord.COMMIT_SEQNO_METADATA_FIELD, metadataFieldType, "commit seqno");
RowType.RowField recordKeyField = new RowType.RowField(HoodieRecord.RECORD_KEY_METADATA_FIELD, metadataFieldType, "record key");
RowType.RowField partitionPathField =
new RowType.RowField(HoodieRecord.PARTITION_PATH_METADATA_FIELD, metadataFieldType, "partition path");
RowType.RowField fileNameField = new RowType.RowField(HoodieRecord.FILENAME_METADATA_FIELD, metadataFieldType, "field name");
mergedFields.add(commitTimeField);
mergedFields.add(commitSeqnoField);
mergedFields.add(recordKeyField);
mergedFields.add(partitionPathField);
mergedFields.add(fileNameField);
if (withOperationField)
{
RowType.RowField operationField = new RowType.RowField(HoodieRecord.OPERATION_METADATA_FIELD, metadataFieldType, "operation");
mergedFields.add(operationField);
}
mergedFields.addAll(rowType.getFields());
return new RowType(false, mergedFields);
}
| 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.