name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
hbase_RSGroupUtil_getRSGroupInfo_rdh
/** * Will try to get the rsgroup from {@link TableDescriptor} first, and then try to get the rsgroup * from the {@link NamespaceDescriptor}. If still not present, return empty. */ public static Optional<RSGroupInfo> getRSGroupInfo(MasterServices master, RSGroupInfoManager manager, TableName tableName) throws IOException { TableDescriptor td = master.getTableDescriptors().get(tableName); if (td == null) { return Optional.empty(); } // RSGroup information determined by client. Optional<String> optGroupNameOfTable = td.getRegionServerGroup(); if (optGroupNameOfTable.isPresent()) { RSGroupInfo group = manager.getRSGroup(optGroupNameOfTable.get()); if (group != null) { return Optional.of(group); } } // for backward compatible, where we may still have table configs in the RSGroupInfo after // upgrading when migrating is still on-going. RSGroupInfo groupFromOldRSGroupInfo = manager.getRSGroupForTable(tableName); if (groupFromOldRSGroupInfo != null) { return Optional.of(groupFromOldRSGroupInfo); } // RSGroup information determined by administrator. String groupDeterminedByAdmin = manager.determineRSGroupInfoForTable(tableName);RSGroupInfo groupInfo = null; if (groupDeterminedByAdmin != null) { groupInfo = manager.getRSGroup(groupDeterminedByAdmin);} if (groupInfo != null) { return Optional.of(groupInfo); } // Finally, we will try to fall back to namespace as rsgroup if exists ClusterSchema clusterSchema = master.getClusterSchema(); if (clusterSchema == null) { if (TableName.isMetaTableName(tableName)) { LOG.info("Can not get the namespace rs group config for meta table, since the" + " meta table is not online yet, will use default group to assign meta first"); } else { LOG.warn("ClusterSchema is null, can only use default rsgroup, should not happen?"); } return Optional.empty(); } NamespaceDescriptor nd = clusterSchema.getNamespace(tableName.getNamespaceAsString()); String groupNameOfNs = nd.getConfigurationValue(RSGroupInfo.NAMESPACE_DESC_PROP_GROUP); if (groupNameOfNs == null) { return Optional.empty(); } return Optional.ofNullable(manager.getRSGroup(groupNameOfNs)); }
3.26
hbase_RSGroupUtil_fillTables_rdh
/** * Fill the tables field for {@link RSGroupInfo}, for backward compatibility. */ @SuppressWarnings("deprecation") public static RSGroupInfo fillTables(RSGroupInfo rsGroupInfo, Collection<TableDescriptor> tds) { RSGroupInfo newRsGroupInfo = new RSGroupInfo(rsGroupInfo); Predicate<TableDescriptor> filter; if (rsGroupInfo.getName().equals(RSGroupInfo.DEFAULT_GROUP)) {filter = td -> { Optional<String> optGroupName = td.getRegionServerGroup(); return (!optGroupName.isPresent()) || optGroupName.get().equals(RSGroupInfo.DEFAULT_GROUP); }; } else { filter = td -> { Optional<String> optGroupName = td.getRegionServerGroup(); return optGroupName.isPresent() && optGroupName.get().equals(newRsGroupInfo.getName()); }; } tds.stream().filter(filter).map(TableDescriptor::getTableName).forEach(newRsGroupInfo::addTable); return newRsGroupInfo; }
3.26
hbase_TaskMonitor_createTaskGroup_rdh
/** * Create a task group which contains a series of monitored tasks for users to inquire about the * status * * @param ignoreSubTasksInTaskMonitor * whether to ignore to track(e.g. show/clear/expire) the task * in the {@link TaskMonitor} * @param description * description of the status * @return a group of monitored tasks */ public static TaskGroup createTaskGroup(boolean ignoreSubTasksInTaskMonitor, String description) { return new TaskGroup(ignoreSubTasksInTaskMonitor, description); }
3.26
hbase_TaskMonitor_getTasks_rdh
/** * Produces a list containing copies of the current state of all non-expired MonitoredTasks * handled by this TaskMonitor. * * @param filter * type of wanted tasks * @return A filtered list of MonitoredTasks. */ public synchronized List<MonitoredTask> getTasks(String filter) { purgeExpiredTasks(); TaskFilter taskFilter = createTaskFilter(filter); ArrayList<MonitoredTask> results = Lists.newArrayListWithCapacity(tasks.size() + f1.size()); processTasks(tasks, taskFilter, results); processTasks(f1, taskFilter, results); return results; }
3.26
hbase_TaskMonitor_createStatus_rdh
/** * Create a monitored task for users to inquire about the status * * @param description * description of the status * @param ignore * whether to ignore to track(e.g. show/clear/expire) the task in the * {@link TaskMonitor} * @param enableJournal * enable when the task contains some stage journals * @return a monitored task */ public synchronized MonitoredTask createStatus(String description, boolean ignore, boolean enableJournal) { MonitoredTask v0 = new MonitoredTaskImpl(enableJournal, description); MonitoredTask proxy = ((MonitoredTask) (Proxy.newProxyInstance(v0.getClass().getClassLoader(), new Class<?>[]{ MonitoredTask.class }, new PassthroughInvocationHandler<>(v0)))); TaskAndWeakRefPair pair = new TaskAndWeakRefPair(v0, proxy);if (tasks.isFull()) { purgeExpiredTasks(); } if (!ignore) { tasks.add(pair); } return proxy; }
3.26
hbase_TaskMonitor_get_rdh
/** * Get singleton instance. TODO this would be better off scoped to a single daemon */ public static synchronized TaskMonitor get() { if (instance == null) { instance = new TaskMonitor(HBaseConfiguration.create()); } return instance; }
3.26
hbase_TaskMonitor_createTaskFilter_rdh
/** * Create a task filter according to a given filter type. * * @param filter * type of monitored task * @return a task filter */ private static TaskFilter createTaskFilter(String filter) { switch (TaskFilter.TaskType.getTaskType(filter)) { case GENERAL : return task -> task instanceof MonitoredRPCHandler; case HANDLER : return task -> !(task instanceof MonitoredRPCHandler); case RPC : return task -> (!(task instanceof MonitoredRPCHandler)) || (!((MonitoredRPCHandler) (task)).isRPCRunning()); case OPERATION : return task -> (!(task instanceof MonitoredRPCHandler)) || (!((MonitoredRPCHandler) (task)).isOperationRunning()); default :return task -> false; } }
3.26
hbase_BoundedRecoveredEditsOutputSink_writeRemainingEntryBuffers_rdh
/** * Write out the remaining RegionEntryBuffers and close the writers. * * @return true when there is no error. */ private boolean writeRemainingEntryBuffers() throws IOException { for (EntryBuffers.RegionEntryBuffer v5 : entryBuffers.buffers.values()) { closeCompletionService.submit(() -> { append(v5); return null; }); } boolean progressFailed = false; try { for (int i = 0, n = entryBuffers.buffers.size(); i < n; i++) { Future<Void> future = closeCompletionService.take(); future.get(); if (((!progressFailed) && (reporter != null)) && (!reporter.progress())) {progressFailed = true; }} } catch (InterruptedException e) { IOException iie = new InterruptedIOException(); iie.initCause(e); throw iie; } catch (ExecutionException e) { throw new IOException(e.getCause()); } finally { closeThreadPool.shutdownNow(); } return !progressFailed; }
3.26
hbase_Encryption_getSecretKeyForSubject_rdh
/** * Resolves a key for the given subject * * @return a key for the given subject * @throws IOException * if the key is not found */ public static Key getSecretKeyForSubject(String subject, Configuration conf) throws IOException { KeyProvider provider = getKeyProvider(conf); if (provider != null) { try { Key[] keys = provider.getKeys(new String[]{ subject });if ((keys != null) && (keys.length > 0)) { return keys[0]; } } catch (Exception e) { throw new IOException(e); } } throw new IOException(("No key found for subject '" + subject) + "'"); }
3.26
hbase_Encryption_decrypt_rdh
/** * Decrypt a stream of ciphertext given a context and IV */ public static void decrypt(OutputStream out, InputStream in, int outLen, Context context, byte[] iv) throws IOException { Decryptor d = context.getCipher().getDecryptor(); d.setKey(context.getKey()); d.setIv(iv);// can be null decrypt(out, in, outLen, d); }
3.26
hbase_Encryption_isEncryptionEnabled_rdh
/** * Returns true if the column family encryption feature is enabled globally. */ public static boolean isEncryptionEnabled(Configuration conf) { return conf.getBoolean(CRYPTO_ENABLED_CONF_KEY, CRYPTO_ENABLED_CONF_DEFAULT); }
3.26
hbase_Encryption_getConfiguredHashAlgorithm_rdh
/** * Returns the Hash Algorithm defined in the crypto configuration. */ public static String getConfiguredHashAlgorithm(Configuration conf) { return conf.getTrimmed(CRYPTO_KEY_HASH_ALGORITHM_CONF_KEY, CRYPTO_KEY_HASH_ALGORITHM_CONF_DEFAULT); }
3.26
hbase_Encryption_hash128_rdh
/** * Return the MD5 digest of the concatenation of the supplied arguments. */ public static byte[] hash128(byte[]... args) { return m2("MD5", args); }
3.26
hbase_Encryption_pbkdf128_rdh
/** * Return a 128 bit key derived from the concatenation of the supplied arguments using * PBKDF2WithHmacSHA1 at 10,000 iterations. */ public static byte[] pbkdf128(byte[]... args) { StringBuilder sb = new StringBuilder(); for (byte[] b : args) { sb.append(Arrays.toString(b)); } return generateSecretKey("PBKDF2WithHmacSHA1", AES.KEY_LENGTH, sb.toString().toCharArray()); }
3.26
hbase_Encryption_hash256_rdh
/** * Return the SHA-256 digest of the concatenation of the supplied arguments. */ public static byte[] hash256(byte[]... args) { return m2("SHA-256", args); }
3.26
hbase_Encryption_failOnHashAlgorithmMismatch_rdh
/** * Returns the Hash Algorithm mismatch behaviour defined in the crypto configuration. */ public static boolean failOnHashAlgorithmMismatch(Configuration conf) { return conf.getBoolean(CRYPTO_KEY_FAIL_ON_ALGORITHM_MISMATCH_CONF_KEY, CRYPTO_KEY_FAIL_ON_ALGORITHM_MISMATCH_CONF_DEFAULT); }
3.26
hbase_Encryption_generateSecretKey_rdh
/** * Return a key (byte array) derived from the supplied password argument using the given algorithm * with a random salt at 10,000 iterations. * * @param algorithm * the secret key generation algorithm to use * @param keyLengthBytes * the length of the key to be derived (in bytes, not in bits) * @param password * char array to use as password for the key generation algorithm * @return secret key encoded as a byte array */ private static byte[] generateSecretKey(String algorithm, int keyLengthBytes, char[] password) { byte[] salt = new byte[keyLengthBytes]; Bytes.secureRandom(salt); PBEKeySpec spec = new PBEKeySpec(password, salt, 10000, keyLengthBytes * 8); try { return SecretKeyFactory.getInstance(algorithm).generateSecret(spec).getEncoded(); } catch (NoSuchAlgorithmException | InvalidKeySpecException e) { throw new RuntimeException(e); } }
3.26
hbase_Encryption_computeCryptoKeyHash_rdh
/** * Returns the hash of the supplied argument, using the hash algorithm specified in the given * config. */ public static byte[] computeCryptoKeyHash(Configuration conf, byte[] arg) { String algorithm = getConfiguredHashAlgorithm(conf); try { return m2(algorithm, arg); } catch (RuntimeException e) { String message = format("Error in computeCryptoKeyHash (please check your configuration " + "parameter %s and the security provider configuration of the JVM)", CRYPTO_KEY_HASH_ALGORITHM_CONF_KEY); throw new RuntimeException(message, e); } }
3.26
hbase_Encryption_getCipher_rdh
/** * Get an cipher given a name * * @param name * the cipher name * @return the cipher, or null if a suitable one could not be found */ public static Cipher getCipher(Configuration conf, String name) { return getCipherProvider(conf).getCipher(name); }
3.26
hbase_Encryption_encrypt_rdh
/** * Encrypt a stream of plaintext given a context and IV * * @param out * ciphertext * @param in * plaintet */ public static void encrypt(OutputStream out, InputStream in, Context context, byte[] iv) throws IOException { Encryptor e = context.getCipher().getEncryptor(); e.setKey(context.getKey()); e.setIv(iv);// can be null e.reset(); encrypt(out, in, e); }
3.26
hbase_Encryption_decryptWithSubjectKey_rdh
/** * Decrypts a block of ciphertext with the symmetric key resolved for the given subject * * @param out * plaintext * @param in * ciphertext * @param outLen * the expected plaintext length * @param subject * the subject's key alias * @param conf * configuration * @param cipher * the encryption algorithm * @param iv * the initialization vector, can be null */ public static void decryptWithSubjectKey(OutputStream out, InputStream in, int outLen, String subject, Configuration conf, Cipher cipher, byte[] iv) throws IOException { Key key = getSecretKeyForSubject(subject, conf); if (key == null) { throw new IOException(("No key found for subject '" + subject) + "'"); } Decryptor d = cipher.getDecryptor(); d.setKey(key); d.setIv(iv);// can be null try {decrypt(out, in, outLen, d); } catch (IOException e) { // If the current cipher algorithm fails to unwrap, try the alternate cipher algorithm, if one // is configured String alternateAlgorithm = conf.get(HConstants.CRYPTO_ALTERNATE_KEY_ALGORITHM_CONF_KEY); if (alternateAlgorithm != null) { if (LOG.isDebugEnabled()) { LOG.debug(((("Unable to decrypt data with current cipher algorithm '" + conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES)) + "'. Trying with the alternate cipher algorithm '") + alternateAlgorithm) + "' configured."); } Cipher alterCipher = Encryption.getCipher(conf, alternateAlgorithm); if (alterCipher == null) { throw new RuntimeException(("Cipher '" + alternateAlgorithm) + "' not available"); } d = alterCipher.getDecryptor(); d.setKey(key); d.setIv(iv);// can be null decrypt(out, in, outLen, d); } else { throw new IOException(e); } } }
3.26
hbase_Encryption_getSupportedCiphers_rdh
/** * Get names of supported encryption algorithms * * @return Array of strings, each represents a supported encryption algorithm */ public static String[] getSupportedCiphers() { return getSupportedCiphers(HBaseConfiguration.create()); }
3.26
hbase_Encryption_encryptWithSubjectKey_rdh
/** * Encrypts a block of plaintext with the symmetric key resolved for the given subject * * @param out * ciphertext * @param in * plaintext * @param conf * configuration * @param cipher * the encryption algorithm * @param iv * the initialization vector, can be null */ public static void encryptWithSubjectKey(OutputStream out, InputStream in, String subject, Configuration conf, Cipher cipher, byte[] iv) throws IOException {Key v28 = getSecretKeyForSubject(subject, conf); if (v28 == null) { throw new IOException(("No key found for subject '" + subject) + "'"); } Encryptor e = cipher.getEncryptor(); e.setKey(v28); e.setIv(iv);// can be null encrypt(out, in, e); }
3.26
hbase_RegionScannerImpl_moreCellsInRow_rdh
/** * Based on the nextKv in the heap, and the current row, decide whether or not there are more * cells to be read in the heap. If the row of the nextKv in the heap matches the current row then * there are more cells to be read in the row. * * @return true When there are more cells in the row to be read */ private boolean moreCellsInRow(final Cell nextKv, Cell currentRowCell) { return (nextKv != null) && CellUtil.matchingRows(nextKv, currentRowCell); }
3.26
hbase_RegionScannerImpl_populateFromJoinedHeap_rdh
/** * Returns true if more cells exist after this batch, false if scanner is done */ private boolean populateFromJoinedHeap(List<Cell> results, ScannerContext scannerContext) throws IOException { assert joinedContinuationRow != null; boolean moreValues = populateResult(results, this.joinedHeap, scannerContext, joinedContinuationRow); if (!scannerContext.checkAnyLimitReached(LimitScope.BETWEEN_CELLS)) { // We are done with this row, reset the continuation. joinedContinuationRow = null; }// As the data is obtained from two independent heaps, we need to // ensure that result list is sorted, because Result relies on that. results.sort(comparator); return moreValues; }
3.26
hbase_RegionScannerImpl_filterRow_rdh
/** * This function is to maintain backward compatibility for 0.94 filters. HBASE-6429 combines both * filterRow & filterRow({@code List<KeyValue> kvs}) functions. While 0.94 code or older, it may * not implement hasFilterRow as HBase-6429 expects because 0.94 hasFilterRow() only returns true * when filterRow({@code List<KeyValue> kvs}) is overridden not the filterRow(). Therefore, the * filterRow() will be skipped. */ private boolean filterRow() throws IOException { // when hasFilterRow returns true, filter.filterRow() will be called automatically inside // filterRowCells(List<Cell> kvs) so we skip that scenario here. return ((filter != null) && (!filter.hasFilterRow())) && filter.filterRow(); }
3.26
hbase_RegionScannerImpl_resetFilters_rdh
/** * Reset both the filter and the old filter. * * @throws IOException * in case a filter raises an I/O exception. */ protected final void resetFilters() throws IOException { if (filter != null) { filter.reset(); } }
3.26
hbase_RegionScannerImpl_populateResult_rdh
/** * Fetches records with currentRow into results list, until next row, batchLimit (if not -1) is * reached, or remainingResultSize (if not -1) is reaced * * @param heap * KeyValueHeap to fetch data from.It must be positioned on correct row before call. * @return state of last call to {@link KeyValueHeap#next()} */ private boolean populateResult(List<Cell> results, KeyValueHeap heap, ScannerContext scannerContext, Cell currentRowCell) throws IOException { Cell nextKv; boolean moreCellsInRow = false; boolean tmpKeepProgress = scannerContext.getKeepProgress(); // Scanning between column families and thus the scope is between cells LimitScope limitScope = LimitScope.BETWEEN_CELLS; do { // Check for thread interrupt status in case we have been signaled from // #interruptRegionOperation. region.checkInterrupt(); // We want to maintain any progress that is made towards the limits while scanning across // different column families. To do this, we toggle the keep progress flag on during calls // to the StoreScanner to ensure that any progress made thus far is not wiped away. scannerContext.setKeepProgress(true); heap.next(results, scannerContext); scannerContext.setKeepProgress(tmpKeepProgress); nextKv = heap.peek(); moreCellsInRow = moreCellsInRow(nextKv, currentRowCell); if (!moreCellsInRow) { incrementCountOfRowsScannedMetric(scannerContext); } if (moreCellsInRow && scannerContext.checkBatchLimit(limitScope)) { return scannerContext.setScannerState(NextState.BATCH_LIMIT_REACHED).hasMoreValues(); } else if (scannerContext.checkSizeLimit(limitScope)) { ScannerContext.NextState state = (moreCellsInRow) ? NextState.SIZE_LIMIT_REACHED_MID_ROW : NextState.SIZE_LIMIT_REACHED; return scannerContext.setScannerState(state).hasMoreValues(); } else if (scannerContext.checkTimeLimit(limitScope)) { ScannerContext.NextState v18 = (moreCellsInRow) ? NextState.TIME_LIMIT_REACHED_MID_ROW : NextState.TIME_LIMIT_REACHED; return scannerContext.setScannerState(v18).hasMoreValues(); } } while (moreCellsInRow ); return nextKv != null; }
3.26
hbase_RegionScannerImpl_isFilterDone_rdh
/** * Returns True if a filter rules the scanner is over, done. */ @Override public synchronized boolean isFilterDone() throws IOException {return isFilterDoneInternal(); }
3.26
hbase_RegionScannerImpl_joinedHeapMayHaveData_rdh
/** * Returns true when the joined heap may have data for the current row */ private boolean joinedHeapMayHaveData(Cell currentRowCell) throws IOException { Cell nextJoinedKv = joinedHeap.peek(); boolean matchCurrentRow = (nextJoinedKv != null) && CellUtil.matchingRows(nextJoinedKv, currentRowCell); boolean matchAfterSeek = false; // If the next value in the joined heap does not match the current row, try to seek to the // correct row if (!matchCurrentRow) {Cell firstOnCurrentRow = PrivateCellUtil.createFirstOnRow(currentRowCell); boolean seekSuccessful = this.joinedHeap.requestSeek(firstOnCurrentRow, true, true);matchAfterSeek = (seekSuccessful && (joinedHeap.peek() != null)) && CellUtil.matchingRows(joinedHeap.peek(), currentRowCell); } return matchCurrentRow || matchAfterSeek; }
3.26
hbase_WALEventTrackerQueueService_addToQueue_rdh
/* Made it default to use it in testing. */ synchronized void addToQueue(WALEventTrackerPayload payload) { queue.add(payload); }
3.26
hbase_IncrementalTableBackupClient_isActiveWalPath_rdh
/** * Check if a given path is belongs to active WAL directory * * @param p * path * @return true, if yes */ protected boolean isActiveWalPath(Path p) { return !AbstractFSWALProvider.isArchivedLogFile(p); }
3.26
hbase_RawFloat_decodeFloat_rdh
/** * Read a {@code float} value from the buffer {@code buff}. */ public float decodeFloat(byte[] buff, int offset) { return Bytes.toFloat(buff, offset); }
3.26
hbase_RawFloat_encodeFloat_rdh
/** * Write instance {@code val} into buffer {@code buff}. */ public int encodeFloat(byte[] buff, int offset, float val) { return Bytes.putFloat(buff, offset, val); }
3.26
hbase_QuotaTableUtil_m5_rdh
/** * Returns a set of the names of all namespaces containing snapshot entries. * * @param conn * connection to re-use */ public static Set<String> m5(Connection conn) throws IOException { try (Table quotaTable = conn.getTable(QUOTA_TABLE_NAME);ResultScanner rs = quotaTable.getScanner(createScanForNamespaceSnapshotSizes())) { Set<String> snapshots = new HashSet<>(); for (Result r : rs) { CellScanner cs = r.cellScanner(); while (cs.advance()) { cs.current(); snapshots.add(getNamespaceFromRowKey(r.getRow()));} } return snapshots; } }
3.26
hbase_QuotaTableUtil_doGet_rdh
/* ========================================================================= HTable helpers */ protected static Result doGet(final Connection connection, final Get get) throws IOException { try (Table table = connection.getTable(QUOTA_TABLE_NAME)) { return table.get(get); } }
3.26
hbase_QuotaTableUtil_getObservedSnapshotSizes_rdh
/** * Fetches any persisted HBase snapshot sizes stored in the quota table. The sizes here are * computed relative to the table which the snapshot was created from. A snapshot's size will not * include the size of files which the table still refers. These sizes, in bytes, are what is used * internally to compute quota violation for tables and namespaces. * * @return A map of snapshot name to size in bytes per space quota computations */ public static Map<String, Long> getObservedSnapshotSizes(Connection conn) throws IOException { try (Table quotaTable = conn.getTable(QUOTA_TABLE_NAME);ResultScanner rs = quotaTable.getScanner(createScanForSpaceSnapshotSizes())) { final Map<String, Long> snapshotSizes = new HashMap<>(); for (Result v80 : rs) { CellScanner cs = v80.cellScanner(); while (cs.advance()) { Cell c = cs.current(); final String snapshot = extractSnapshotNameFromSizeCell(c); final long size = parseSnapshotSize(c); snapshotSizes.put(snapshot, size); } } return snapshotSizes; } }
3.26
hbase_QuotaTableUtil_makeQuotaSnapshotGetForTable_rdh
/** * Creates a {@link Get} which returns only {@link SpaceQuotaSnapshot} from the quota table for a * specific table. * * @param tn * table name to get from. Can't be null. */ public static Get makeQuotaSnapshotGetForTable(TableName tn) { Get v20 = new Get(getTableRowKey(tn)); // Limit to "u:v" column v20.addColumn(QUOTA_FAMILY_USAGE, QUOTA_QUALIFIER_POLICY); return v20; }
3.26
hbase_QuotaTableUtil_parseSnapshotSize_rdh
/** * Parses the snapshot size from the given Cell's value. */static long parseSnapshotSize(Cell c) throws InvalidProtocolBufferException { ByteString bs = UnsafeByteOperations.unsafeWrap(c.getValueArray(), c.getValueOffset(), c.getValueLength()); return QuotaProtos.SpaceQuotaSnapshot.parseFrom(bs).getQuotaUsage(); }
3.26
hbase_QuotaTableUtil_makeQuotaSnapshotScan_rdh
/** * Creates a {@link Scan} which returns only quota snapshots from the quota table. */ public static Scan makeQuotaSnapshotScan() { return makeQuotaSnapshotScanForTable(null); }
3.26
hbase_QuotaTableUtil_getSnapshots_rdh
/** * Fetches all {@link SpaceQuotaSnapshot} objects from the {@code hbase:quota} table. * * @param conn * The HBase connection * @return A map of table names and their computed snapshot. */ public static Map<TableName, SpaceQuotaSnapshot> getSnapshots(Connection conn) throws IOException { Map<TableName, SpaceQuotaSnapshot> snapshots = new HashMap<>(); try (Table quotaTable = conn.getTable(QUOTA_TABLE_NAME);ResultScanner rs = quotaTable.getScanner(makeQuotaSnapshotScan())) { for (Result v17 : rs) { extractQuotaSnapshot(v17, snapshots); }} return snapshots; }
3.26
hbase_QuotaTableUtil_m4_rdh
/** * Returns a list of {@code Delete} to remove all table snapshot entries from quota table. * * @param connection * connection to re-use */ static List<Delete> m4(Connection connection) throws IOException { return createDeletesForExistingSnapshotsFromScan(connection, createScanForSpaceSnapshotSizes()); }
3.26
hbase_QuotaTableUtil_createDeletesForExistingTableSnapshotSizes_rdh
/** * Returns a list of {@code Delete} to remove given table snapshot entries to remove from quota * table * * @param snapshotEntriesToRemove * the entries to remove */ static List<Delete> createDeletesForExistingTableSnapshotSizes(Multimap<TableName, String> snapshotEntriesToRemove) { List<Delete> deletes = new ArrayList<>(); for (Map.Entry<TableName, Collection<String>> entry : snapshotEntriesToRemove.asMap().entrySet()) { for (String snapshot : entry.getValue()) { Delete d = new Delete(getTableRowKey(entry.getKey())); d.addColumns(QUOTA_FAMILY_USAGE, Bytes.add(QUOTA_SNAPSHOT_SIZE_QUALIFIER, Bytes.toBytes(snapshot))); deletes.add(d); } } return deletes; }
3.26
hbase_QuotaTableUtil_createDeletesForExistingNamespaceSnapshotSizes_rdh
/** * Returns a list of {@code Delete} to remove all namespace snapshot entries from quota table. * * @param connection * connection to re-use */ static List<Delete> createDeletesForExistingNamespaceSnapshotSizes(Connection connection) throws IOException { return createDeletesForExistingSnapshotsFromScan(connection, createScanForNamespaceSnapshotSizes()); }
3.26
hbase_QuotaTableUtil_makeGetForSnapshotSize_rdh
/** * Creates a {@link Get} for the HBase snapshot's size against the given table. */ static Get makeGetForSnapshotSize(TableName tn, String snapshot) { Get g = new Get(Bytes.add(QUOTA_TABLE_ROW_KEY_PREFIX, Bytes.toBytes(tn.toString())));g.addColumn(QUOTA_FAMILY_USAGE, Bytes.add(QUOTA_SNAPSHOT_SIZE_QUALIFIER, Bytes.toBytes(snapshot))); return g; }
3.26
hbase_QuotaTableUtil_makeFilter_rdh
/** * converts quotafilter to serializeable filterlists. */ public static Filter makeFilter(final QuotaFilter filter) { FilterList filterList = new FilterList(Operator.MUST_PASS_ALL); if (StringUtils.isNotEmpty(filter.getUserFilter())) { FilterList userFilters = new FilterList(Operator.MUST_PASS_ONE); boolean hasFilter = false; if (StringUtils.isNotEmpty(filter.getNamespaceFilter())) { FilterList nsFilters = new FilterList(Operator.MUST_PASS_ALL); nsFilters.addFilter(new RowFilter(CompareOperator.EQUAL, new RegexStringComparator(getUserRowKeyRegex(filter.getUserFilter()), 0))); nsFilters.addFilter(new QualifierFilter(CompareOperator.EQUAL, new RegexStringComparator(getSettingsQualifierRegexForUserNamespace(filter.getNamespaceFilter()), 0))); userFilters.addFilter(nsFilters); hasFilter = true; } if (StringUtils.isNotEmpty(filter.getTableFilter())) { FilterList tableFilters = new FilterList(Operator.MUST_PASS_ALL); tableFilters.addFilter(new RowFilter(CompareOperator.EQUAL, new RegexStringComparator(getUserRowKeyRegex(filter.getUserFilter()), 0))); tableFilters.addFilter(new QualifierFilter(CompareOperator.EQUAL, new RegexStringComparator(getSettingsQualifierRegexForUserTable(filter.getTableFilter()), 0))); userFilters.addFilter(tableFilters);hasFilter = true; } if (!hasFilter) { userFilters.addFilter(new RowFilter(CompareOperator.EQUAL, new RegexStringComparator(getUserRowKeyRegex(filter.getUserFilter()), 0))); }filterList.addFilter(userFilters); } else if (StringUtils.isNotEmpty(filter.getTableFilter())) { filterList.addFilter(new RowFilter(CompareOperator.EQUAL, new RegexStringComparator(getTableRowKeyRegex(filter.getTableFilter()), 0))); } else if (StringUtils.isNotEmpty(filter.getNamespaceFilter())) { filterList.addFilter(new RowFilter(CompareOperator.EQUAL, new RegexStringComparator(getNamespaceRowKeyRegex(filter.getNamespaceFilter()), 0))); } else if (StringUtils.isNotEmpty(filter.getRegionServerFilter())) { filterList.addFilter(new RowFilter(CompareOperator.EQUAL, new RegexStringComparator(getRegionServerRowKeyRegex(filter.getRegionServerFilter()), 0))); } return filterList; }
3.26
hbase_QuotaTableUtil_makeQuotaSnapshotScanForTable_rdh
/** * Creates a {@link Scan} which returns only {@link SpaceQuotaSnapshot} from the quota table for a * specific table. * * @param tn * Optionally, a table name to limit the scan's rowkey space. Can be null. */ public static Scan makeQuotaSnapshotScanForTable(TableName tn) { Scan s = new Scan(); // Limit to "u:v" column s.addColumn(QUOTA_FAMILY_USAGE, QUOTA_QUALIFIER_POLICY); if (null == tn) { s.setStartStopRowForPrefixScan(QUOTA_TABLE_ROW_KEY_PREFIX); } else { byte[] row = getTableRowKey(tn); // Limit rowspace to the "t:" prefix s.withStartRow(row, true).withStopRow(row, true); } return s; }
3.26
hbase_QuotaTableUtil_createPutForSnapshotSize_rdh
/** * Creates a {@link Put} to persist the current size of the {@code snapshot} with respect to the * given {@code table}. */ static Put createPutForSnapshotSize(TableName tableName, String snapshot, long size) { // We just need a pb message with some `long usage`, so we can just reuse the // SpaceQuotaSnapshot message instead of creating a new one. Put v45 = new Put(getTableRowKey(tableName)); v45.addColumn(QUOTA_FAMILY_USAGE, getSnapshotSizeQualifier(snapshot), QuotaProtos.SpaceQuotaSnapshot.newBuilder().setQuotaUsage(size).build().toByteArray()); return v45; }
3.26
hbase_QuotaTableUtil_deleteTableUsageSnapshotsForNamespace_rdh
/** * Remove table usage snapshots (u:p columns) for the namespace passed * * @param connection * connection to re-use * @param namespace * the namespace to fetch the list of table usage snapshots */ static void deleteTableUsageSnapshotsForNamespace(Connection connection, String namespace) throws IOException { Scan s = new Scan(); // Get rows for all tables in namespace s.setStartStopRowForPrefixScan(Bytes.add(QUOTA_TABLE_ROW_KEY_PREFIX, Bytes.toBytes(namespace + TableName.NAMESPACE_DELIM))); // Scan for table usage column (u:p) in quota table s.addColumn(QUOTA_FAMILY_USAGE, QUOTA_QUALIFIER_POLICY); // Scan for table quota column (q:s) if table has a space quota defined s.addColumn(QUOTA_FAMILY_INFO, f0); try (Table quotaTable = connection.getTable(QUOTA_TABLE_NAME);ResultScanner rs = quotaTable.getScanner(s)) { for (Result r : rs) {byte[] data = r.getValue(QUOTA_FAMILY_INFO, f0); // if table does not have a table space quota defined, delete table usage column (u:p) if (data == null) { Delete delete = new Delete(r.getRow()); delete.addColumns(QUOTA_FAMILY_USAGE, QUOTA_QUALIFIER_POLICY); quotaTable.delete(delete); } } } }
3.26
hbase_QuotaTableUtil_createPutForNamespaceSnapshotSize_rdh
/** * Creates a {@code Put} for the namespace's total snapshot size. */ static Put createPutForNamespaceSnapshotSize(String namespace, long size) { Put p = new Put(getNamespaceRowKey(namespace)); p.addColumn(QUOTA_FAMILY_USAGE, QUOTA_SNAPSHOT_SIZE_QUALIFIER, QuotaProtos.SpaceQuotaSnapshot.newBuilder().setQuotaUsage(size).build().toByteArray()); return p; }
3.26
hbase_QuotaTableUtil_createScanForNamespaceSnapshotSizes_rdh
/** * Returns a scanner for all namespace snapshot entries of the given namespace * * @param namespace * name of the namespace whose snapshot entries are to be scanned */ static Scan createScanForNamespaceSnapshotSizes(String namespace) { Scan s = new Scan(); if ((namespace == null) || namespace.isEmpty()) {// Read all namespaces, just look at the row prefix s.setStartStopRowForPrefixScan(QUOTA_NAMESPACE_ROW_KEY_PREFIX); } else { // Fetch the exact row for the table byte[] rowkey = getNamespaceRowKey(namespace); // Fetch just this one row s.withStartRow(rowkey).withStopRow(rowkey, true); } // Just the usage family and only the snapshot size qualifiers return s.addFamily(QUOTA_FAMILY_USAGE).setFilter(new ColumnPrefixFilter(QUOTA_SNAPSHOT_SIZE_QUALIFIER)); }
3.26
hbase_QuotaTableUtil_createPutForSpaceSnapshot_rdh
/** * Creates a {@link Put} to store the given {@code snapshot} for the given {@code tableName} in * the quota table. */ static Put createPutForSpaceSnapshot(TableName tableName, SpaceQuotaSnapshot snapshot) { Put p = new Put(getTableRowKey(tableName)); p.addColumn(QUOTA_FAMILY_USAGE, QUOTA_QUALIFIER_POLICY, SpaceQuotaSnapshot.toProtoSnapshot(snapshot).toByteArray()); return p; }
3.26
hbase_QuotaTableUtil_createGetNamespaceSnapshotSize_rdh
/** * Creates a {@code Get} to fetch the namespace's total snapshot size. */ static Get createGetNamespaceSnapshotSize(String namespace) { Get g = new Get(getNamespaceRowKey(namespace)); g.addColumn(QUOTA_FAMILY_USAGE, QUOTA_SNAPSHOT_SIZE_QUALIFIER); return g; }
3.26
hbase_QuotaTableUtil_getCurrentSnapshotFromQuotaTable_rdh
/** * Returns the current space quota snapshot of the given {@code tableName} from * {@code QuotaTableUtil.QUOTA_TABLE_NAME} or null if the no quota information is available for * that tableName. * * @param conn * connection to re-use * @param tableName * name of the table whose current snapshot is to be retreived */ public static SpaceQuotaSnapshot getCurrentSnapshotFromQuotaTable(Connection conn, TableName tableName) throws IOException { try (Table quotaTable = conn.getTable(QuotaTableUtil.QUOTA_TABLE_NAME)) { Map<TableName, SpaceQuotaSnapshot> snapshots = new HashMap<>(1);Result result = quotaTable.get(makeQuotaSnapshotGetForTable(tableName)); // if we don't have any row corresponding to this get, return null if (result.isEmpty()) { return null; } // otherwise, extract quota snapshot in snapshots object extractQuotaSnapshot(result, snapshots); return snapshots.get(tableName); } }
3.26
hbase_QuotaTableUtil_getTableSnapshots_rdh
/** * Returns a multimap for all existing table snapshot entries. * * @param conn * connection to re-use */ public static Multimap<TableName, String> getTableSnapshots(Connection conn) throws IOException { try (Table quotaTable = conn.getTable(QUOTA_TABLE_NAME);ResultScanner rs = quotaTable.getScanner(createScanForSpaceSnapshotSizes())) { Multimap<TableName, String> snapshots = HashMultimap.create(); for (Result r : rs) { CellScanner cs = r.cellScanner(); while (cs.advance()) { Cell c = cs.current(); final String snapshot = extractSnapshotNameFromSizeCell(c); snapshots.put(getTableFromRowKey(r.getRow()), snapshot); } } return snapshots; } }
3.26
hbase_QuotaTableUtil_extractQuotaSnapshot_rdh
/** * Extracts the {@link SpaceViolationPolicy} and {@link TableName} from the provided * {@link Result} and adds them to the given {@link Map}. If the result does not contain the * expected information or the serialized policy in the value is invalid, this method will throw * an {@link IllegalArgumentException}. * * @param result * A row from the quota table. * @param snapshots * A map of snapshots to add the result of this method into. */ public static void extractQuotaSnapshot(Result result, Map<TableName, SpaceQuotaSnapshot> snapshots) { byte[] row = Objects.requireNonNull(result).getRow(); if ((row == null) || (row.length == 0)) { throw new IllegalArgumentException("Provided result had a null row"); } final TableName targetTableName = getTableFromRowKey(row); Cell v23 = result.getColumnLatestCell(QUOTA_FAMILY_USAGE, QUOTA_QUALIFIER_POLICY); if (v23 == null) { throw new IllegalArgumentException((("Result did not contain the expected column " + f1) + ", ") + result.toString()); } ByteString buffer = UnsafeByteOperations.unsafeWrap(v23.getValueArray(), v23.getValueOffset(), v23.getValueLength()); try { QuotaProtos.SpaceQuotaSnapshot snapshot = QuotaProtos.SpaceQuotaSnapshot.parseFrom(buffer); snapshots.put(targetTableName, SpaceQuotaSnapshot.toSpaceQuotaSnapshot(snapshot)); } catch (InvalidProtocolBufferException e) {throw new IllegalArgumentException("Result did not contain a valid SpaceQuota protocol buffer message", e); } }
3.26
hbase_QuotaTableUtil_getNamespaceSnapshotSize_rdh
/** * Fetches the computed size of all snapshots against tables in a namespace for space quotas. */static long getNamespaceSnapshotSize(Connection conn, String namespace) throws IOException {try (Table quotaTable = conn.getTable(QuotaTableUtil.QUOTA_TABLE_NAME)) { Result r = quotaTable.get(createGetNamespaceSnapshotSize(namespace)); if (r.isEmpty()) { return 0L; } r.advance(); return parseSnapshotSize(r.current()); } catch (InvalidProtocolBufferException e) { throw new IOException("Could not parse snapshot size value for namespace " + namespace, e); } }
3.26
hbase_QuotaTableUtil_createDeletesForExistingSnapshotsFromScan_rdh
/** * Returns a list of {@code Delete} to remove all entries returned by the passed scanner. * * @param connection * connection to re-use * @param scan * the scanner to use to generate the list of deletes */ static List<Delete> createDeletesForExistingSnapshotsFromScan(Connection connection, Scan scan) throws IOException { List<Delete> deletes = new ArrayList<>(); try (Table quotaTable = connection.getTable(QUOTA_TABLE_NAME);ResultScanner rs = quotaTable.getScanner(scan)) { for (Result r : rs) { CellScanner cs = r.cellScanner(); while (cs.advance()) { Cell c = cs.current(); byte[] family = Bytes.copy(c.getFamilyArray(), c.getFamilyOffset(), c.getFamilyLength()); byte[] qual = Bytes.copy(c.getQualifierArray(), c.getQualifierOffset(), c.getQualifierLength()); Delete d = new Delete(r.getRow()); d.addColumns(family, qual); deletes.add(d); } } return deletes; } }
3.26
hbase_ConnectionFactory_createAsyncConnection_rdh
/** * Create a new AsyncConnection instance using the passed {@code conf} and {@code user}. * AsyncConnection encapsulates all housekeeping for a connection to the cluster. All tables and * interfaces created from returned connection share zookeeper connection, meta cache, and * connections to region servers and masters. * <p> * The caller is responsible for calling {@link AsyncConnection#close()} on the returned * connection instance. * <p> * Usually you should only create one AsyncConnection instance in your code and use it everywhere * as it is thread safe. * * @param conf * configuration * @param user * the user the asynchronous connection is for * @return AsyncConnection object wrapped by CompletableFuture */ public static CompletableFuture<AsyncConnection> createAsyncConnection(Configuration conf, final User user) { return createAsyncConnection(conf, user, null); } /** * Create a new AsyncConnection instance using the passed {@code conf} and {@code user}. * AsyncConnection encapsulates all housekeeping for a connection to the cluster. All tables and * interfaces created from returned connection share zookeeper connection, meta cache, and * connections to region servers and masters. * <p> * The caller is responsible for calling {@link AsyncConnection#close()}
3.26
hbase_ConnectionFactory_createConnection_rdh
/** * Create a new Connection instance using the passed <code>conf</code> instance. Connection * encapsulates all housekeeping for a connection to the cluster. All tables and interfaces * created from returned connection share zookeeper connection, meta cache, and connections to * region servers and masters. <br> * The caller is responsible for calling {@link Connection#close()} on the returned connection * instance. Typical usage: * * <pre> * Connection connection = ConnectionFactory.createConnection(conf); * Table table = connection.getTable(TableName.valueOf("table1")); * try { * table.get(...); * ... * } finally { * table.close(); * connection.close(); * } * </pre> * * @param conf * configuration * @param user * the user the connection is for * @param pool * the thread pool to use for batch operations * @return Connection object for <code>conf</code> */ public static Connection createConnection(Configuration conf, ExecutorService pool, final User user) throws IOException { return createConnection(conf, pool, user, Collections.emptyMap()); } /** * Create a new Connection instance using the passed <code>conf</code> instance. Connection * encapsulates all housekeeping for a connection to the cluster. All tables and interfaces * created from returned connection share zookeeper connection, meta cache, and connections to * region servers and masters. <br> * The caller is responsible for calling {@link Connection#close()} on the returned connection * instance. Typical usage: * * <pre> * Connection connection = ConnectionFactory.createConnection(conf); * Table table = connection.getTable(TableName.valueOf("table1")); * try { * table.get(...); * ... * } finally { * table.close(); * connection.close(); * }
3.26
hbase_LockManager_lockHeartbeat_rdh
/** * * @param keepAlive * if false, release the lock. * @return true, if procedure is found and it has the lock; else false. */ public boolean lockHeartbeat(final long procId, final boolean keepAlive) throws IOException { final LockProcedure proc = master.getMasterProcedureExecutor().getProcedure(LockProcedure.class, procId); if (proc == null) return false; master.getMasterCoprocessorHost().preLockHeartbeat(proc, keepAlive); proc.updateHeartBeat(); if (!keepAlive) { proc.unlock(master.getMasterProcedureExecutor().getEnvironment()); } master.getMasterCoprocessorHost().postLockHeartbeat(proc, keepAlive); return proc.isLocked(); }
3.26
hbase_LockManager_requestRegionsLock_rdh
/** * * @throws IllegalArgumentException * if all regions are not from same table. */ public long requestRegionsLock(final RegionInfo[] regionInfos, final String description, final NonceKey nonceKey) throws IllegalArgumentException, IOException { master.getMasterCoprocessorHost().preRequestLock(null, null, regionInfos, LockType.EXCLUSIVE, description); final LockProcedure proc = new LockProcedure(master.getConfiguration(), regionInfos, LockType.EXCLUSIVE, description, null); submitProcedure(proc, nonceKey); master.getMasterCoprocessorHost().postRequestLock(null, null, regionInfos, LockType.EXCLUSIVE, description); return proc.getProcId(); }
3.26
hbase_LockManager_release_rdh
/** * Release the lock. No-op if the lock was never acquired. */ public void release() { if (proc != null) { proc.unlock(master.getMasterProcedureExecutor().getEnvironment()); } proc = null; }
3.26
hbase_LockManager_acquire_rdh
/** * Acquire the lock, waiting indefinitely until the lock is released or the thread is * interrupted. * * @throws InterruptedException * If current thread is interrupted while waiting for the lock */ public boolean acquire() throws InterruptedException { return tryAcquire(0); }
3.26
hbase_ExceptionUtil_asInterrupt_rdh
/** * Returns an InterruptedIOException if t was an interruption, null otherwise */ public static InterruptedIOException asInterrupt(Throwable t) { if (t instanceof SocketTimeoutException) { return null; } if (t instanceof InterruptedIOException) { return ((InterruptedIOException) (t)); } if ((t instanceof InterruptedException) || (t instanceof ClosedByInterruptException)) { InterruptedIOException iie = new InterruptedIOException("Origin: " + t.getClass().getSimpleName()); iie.initCause(t); return iie; } return null; }
3.26
hbase_ExceptionUtil_rethrowIfInterrupt_rdh
/** * Throw InterruptedIOException if t was an interruption, nothing otherwise. */ public static void rethrowIfInterrupt(Throwable t) throws InterruptedIOException { InterruptedIOException iie = asInterrupt(t); if (iie != null) { throw iie; } }
3.26
hbase_ExceptionUtil_isInterrupt_rdh
/** * Returns true if the throwable comes an interruption, false otherwise. */ public static boolean isInterrupt(Throwable t) { if (t instanceof InterruptedException) { return true;} if (t instanceof SocketTimeoutException) { return false; } return (t instanceof InterruptedIOException) || (t instanceof ClosedByInterruptException); }
3.26
hbase_GsonSerializationFeature_bindFactory_rdh
/** * Helper method for smoothing over use of {@link SupplierFactoryAdapter}. Inspired by internal * implementation details of jersey itself. */ private <T> ServiceBindingBuilder<T> bindFactory(Supplier<T> supplier) { return bindFactory(new SupplierFactoryAdapter<>(supplier)); }
3.26
hbase_CompactingMemStore_stopReplayingFromWAL_rdh
/** * This message intends to inform the MemStore that the replaying edits from WAL are done */ @Override public void stopReplayingFromWAL() { inWalReplay = false; }
3.26
hbase_CompactingMemStore_m4_rdh
// debug method public void m4() { String msg = "active size=" + getActive().getDataSize(); msg += " allow compaction is " + (allowCompaction.get() ? "true" : "false"); msg += " inMemoryCompactionInProgress is " + (inMemoryCompactionInProgress.get() ? "true" : "false"); LOG.debug(msg); }
3.26
hbase_CompactingMemStore_m0_rdh
// setter is used only for testability void m0(IndexType type) { indexType = type; // Because this functionality is for testing only and tests are setting in-memory flush size // according to their need, there is no setting of in-memory flush size, here. // If it is needed, please change in-memory flush size explicitly }
3.26
hbase_CompactingMemStore_stopCompaction_rdh
/** * The request to cancel the compaction asynchronous task (caused by in-memory flush) The * compaction may still happen if the request was sent too late Non-blocking request */ private void stopCompaction() { if (inMemoryCompactionInProgress.get()) { compactor.stop(); } }
3.26
hbase_CompactingMemStore_setCompositeSnapshot_rdh
// the following three methods allow to manipulate the settings of composite snapshot public void setCompositeSnapshot(boolean useCompositeSnapshot) { this.compositeSnapshot = useCompositeSnapshot; }
3.26
hbase_CompactingMemStore_preUpdate_rdh
/** * Issue any synchronization and test needed before applying the update For compacting memstore * this means checking the update can increase the size without overflow * * @param currentActive * the segment to be updated * @param cell * the cell to be added * @param memstoreSizing * object to accumulate region size changes * @return true iff can proceed with applying the update */ @Override protected boolean preUpdate(MutableSegment currentActive, Cell cell, MemStoreSizing memstoreSizing) { if (currentActive.sharedLock()) { if (checkAndAddToActiveSize(currentActive, cell, memstoreSizing)) { return true; } currentActive.sharedUnlock(); } return false; }
3.26
hbase_CompactingMemStore_checkAndAddToActiveSize_rdh
/** * Check whether anything need to be done based on the current active set size. The method is * invoked upon every addition to the active set. For CompactingMemStore, flush the active set to * the read-only memory if it's size is above threshold * * @param currActive * intended segment to update * @param cellToAdd * cell to be added to the segment * @param memstoreSizing * object to accumulate changed size * @return true if the cell can be added to the currActive */ protected boolean checkAndAddToActiveSize(MutableSegment currActive, Cell cellToAdd, MemStoreSizing memstoreSizing) { long cellSize = MutableSegment.getCellLength(cellToAdd); boolean successAdd = false; while (true) { long segmentDataSize = currActive.getDataSize(); if ((!inWalReplay) && (segmentDataSize > inmemoryFlushSize)) { // when replaying edits from WAL there is no need in in-memory flush regardless the size // otherwise size below flush threshold try to update atomically break; } if (currActive.compareAndSetDataSize(segmentDataSize, segmentDataSize + cellSize)) { if (memstoreSizing != null) { memstoreSizing.incMemStoreSize(cellSize, 0, 0, 0); } successAdd = true; break; } } if ((!inWalReplay) && (currActive.getDataSize() > inmemoryFlushSize)) { // size above flush threshold so we flush in memory this.tryFlushInMemoryAndCompactingAsync(currActive); } return successAdd; } /** * Try to flush the currActive in memory and submit the background * {@link InMemoryCompactionRunnable} to * {@link RegionServicesForStores#getInMemoryCompactionPool()}
3.26
hbase_CompactingMemStore_flattenOneSegment_rdh
/** * * @param requesterVersion * The caller must hold the VersionedList of the pipeline with version * taken earlier. This version must be passed as a parameter here. The * flattening happens only if versions match. */ public void flattenOneSegment(long requesterVersion, MemStoreCompactionStrategy.Action action) { pipeline.flattenOneSegment(requesterVersion, indexType, action); }
3.26
hbase_CompactingMemStore_getScanners_rdh
/** * This method is protected under {@link HStore#lock} read lock. */ @Override public List<KeyValueScanner> getScanners(long readPt) throws IOException { MutableSegment activeTmp = getActive(); List<? extends Segment> pipelineList = pipeline.getSegments(); List<? extends Segment> snapshotList = snapshot.getAllSegments(); long numberOfSegments = (1L + pipelineList.size()) + snapshotList.size(); // The list of elements in pipeline + the active element + the snapshot segment List<KeyValueScanner> list = createList(((int) (numberOfSegments))); addToScanners(activeTmp, readPt, list); addToScanners(pipelineList, readPt, list); addToScanners(snapshotList, readPt, list); return list; }
3.26
hbase_CompactingMemStore_getSegments_rdh
// the getSegments() method is used for tests only @Override protected List<Segment> getSegments() { List<? extends Segment> pipelineList = pipeline.getSegments(); List<Segment> list = new ArrayList<>(pipelineList.size() + 2); list.add(getActive()); list.addAll(pipelineList); list.addAll(snapshot.getAllSegments()); return list; }
3.26
hbase_CompactingMemStore_snapshot_rdh
/** * Push the current active memstore segment into the pipeline and create a snapshot of the tail of * current compaction pipeline Snapshot must be cleared by call to {@link #clearSnapshot}. * {@link #clearSnapshot(long)}. * * @return {@link MemStoreSnapshot} */ @Override public MemStoreSnapshot snapshot() { // If snapshot currently has entries, then flusher failed or didn't call // cleanup. Log a warning. if (!this.snapshot.isEmpty()) { LOG.warn("Snapshot called again without clearing previous. " + "Doing nothing. Another ongoing flush or did we fail last attempt?"); } else { LOG.debug("FLUSHING TO DISK {}, store={}", getRegionServices().getRegionInfo().getEncodedName(), getFamilyName()); stopCompaction(); // region level lock ensures pushing active to pipeline is done in isolation // no concurrent update operations trying to flush the active segment pushActiveToPipeline(getActive(), true); resetTimeOfOldestEdit(); snapshotId = EnvironmentEdgeManager.currentTime(); // in both cases whatever is pushed to snapshot is cleared from the pipeline if (compositeSnapshot) { pushPipelineToSnapshot(); } else { m2(); } compactor.resetStats(); } return new MemStoreSnapshot(snapshotId, this.snapshot); }
3.26
hbase_CompactingMemStore_flushInMemory_rdh
// externally visible only for tests // when invoked directly from tests it must be verified that the caller doesn't hold updatesLock, // otherwise there is a deadlock void flushInMemory() { MutableSegment currActive = getActive(); if (currActive.setInMemoryFlushed()) { flushInMemory(currActive); } m1(); }
3.26
hbase_CompactingMemStore_getNextRow_rdh
/** * * @param cell * Find the row that comes after this one. If null, we return the first. * @return Next row or null if none found. */Cell getNextRow(final Cell cell) { Cell lowest = null; List<Segment> segments = getSegments(); for (Segment segment : segments) { if (lowest == null) { lowest = getNextRow(cell, segment.getCellSet());} else { lowest = getLowest(lowest, getNextRow(cell, segment.getCellSet()));} } return lowest; }
3.26
hbase_CompactingMemStore_startReplayingFromWAL_rdh
/** * This message intends to inform the MemStore that next coming updates are going to be part of * the replaying edits from WAL */ @Override public void startReplayingFromWAL() { inWalReplay = true; }
3.26
hbase_MasterQuotaManager_isInViolationAndPolicyDisable_rdh
/** * Method to check if a table is in violation and policy set on table is DISABLE. * * @param tableName * tableName to check. * @param quotaObserverChore * QuotaObserverChore instance * @return returns true if table is in violation and policy is disable else false. */ private boolean isInViolationAndPolicyDisable(TableName tableName, QuotaObserverChore quotaObserverChore) { boolean isInViolationAtTable = false; boolean isInViolationAtNamespace = false; SpaceViolationPolicy tablePolicy = null; SpaceViolationPolicy namespacePolicy = null; // Get Current Snapshot for the given table SpaceQuotaSnapshot tableQuotaSnapshot = quotaObserverChore.getTableQuotaSnapshot(tableName); SpaceQuotaSnapshot namespaceQuotaSnapshot = quotaObserverChore.getNamespaceQuotaSnapshot(tableName.getNamespaceAsString()); if (tableQuotaSnapshot != null) { // check if table in violation isInViolationAtTable = tableQuotaSnapshot.getQuotaStatus().isInViolation();Optional<SpaceViolationPolicy> policy = tableQuotaSnapshot.getQuotaStatus().getPolicy(); if (policy.isPresent()) { tablePolicy = policy.get(); } } if (namespaceQuotaSnapshot != null) {// check namespace in violation isInViolationAtNamespace = namespaceQuotaSnapshot.getQuotaStatus().isInViolation(); Optional<SpaceViolationPolicy> policy = namespaceQuotaSnapshot.getQuotaStatus().getPolicy(); if (policy.isPresent()) { namespacePolicy = policy.get(); } } return ((tablePolicy == SpaceViolationPolicy.DISABLE) && isInViolationAtTable) || ((namespacePolicy == SpaceViolationPolicy.DISABLE) && isInViolationAtNamespace); }
3.26
hbase_MasterQuotaManager_getRegionCountOfTable_rdh
/** * Returns cached region count, or -1 if quota manager is disabled or table status not found */ public int getRegionCountOfTable(TableName tName) throws IOException { if (initialized) { return namespaceQuotaManager.getRegionCountOfTable(tName); } return -1; }
3.26
hbase_MasterQuotaManager_removeTableFromNamespaceQuota_rdh
/** * Remove table from namespace quota. * * @param tName * - The table name to update quota usage. * @throws IOException * Signals that an I/O exception has occurred. */ public void removeTableFromNamespaceQuota(TableName tName) throws IOException { if (initialized) { namespaceQuotaManager.removeFromNamespaceUsage(tName); } }
3.26
hbase_MasterQuotaManager_checkQuotaSupport_rdh
/* ========================================================================== Helpers */ private void checkQuotaSupport() throws IOException { if (!QuotaUtil.isQuotaEnabled(masterServices.getConfiguration())) { throw new DoNotRetryIOException(new UnsupportedOperationException("quota support disabled")); } if (!initialized) { long maxWaitTime = masterServices.getConfiguration().getLong("hbase.master.wait.for.quota.manager.init", 30000);// default is 30 seconds. long startTime = EnvironmentEdgeManager.currentTime(); do { try { Thread.sleep(100); } catch (InterruptedException e) { LOG.warn("Interrupted while waiting for Quota Manager to be initialized."); break; } } while ((!initialized) && ((EnvironmentEdgeManager.currentTime() - startTime) < maxWaitTime) ); if (!initialized) { throw new IOException("Quota manager is uninitialized, please retry later."); } } }
3.26
hbase_MasterQuotaManager_removeRegionSizesForTable_rdh
/** * Removes each region size entry where the RegionInfo references the provided TableName. * * @param tableName * tableName. */ public void removeRegionSizesForTable(TableName tableName) { regionSizes.keySet().removeIf(regionInfo -> regionInfo.getTable().equals(tableName)); }
3.26
hbase_Subprocedure_waitForLocallyCompleted_rdh
/** * Waits until the entire procedure has globally completed, or has been aborted. */ public void waitForLocallyCompleted() throws ForeignException, InterruptedException { Procedure.waitForLatch(releasedLocalBarrier, monitor, wakeFrequency, barrierName + ":completed"); }
3.26
hbase_Subprocedure_call_rdh
/** * Execute the Subprocedure {@link #acquireBarrier()} and {@link #insideBarrier()} methods while * keeping some state for other threads to access. This would normally be executed by the * ProcedureMember when a acquire message comes from the coordinator. Rpcs are used to spend * message back to the coordinator after different phases are executed. Any exceptions caught * during the execution (except for InterruptedException) get converted and propagated to * coordinator via {@link ProcedureMemberRpcs#sendMemberAborted( Subprocedure, ForeignException)}. */ @SuppressWarnings("finally") @Override public final Void call() { LOG.debug(((("Starting subprocedure '" + barrierName) + "' with timeout ") + f0.getMaxTime()) + "ms"); // start the execution timeout timer f0.start(); try { // start by checking for error first rethrowException(); LOG.debug(("Subprocedure '" + barrierName) + "' starting 'acquire' stage"); acquireBarrier(); LOG.debug(("Subprocedure '" + barrierName) + "' locally acquired"); rethrowException(); // vote yes to coordinator about being prepared rpcs.sendMemberAcquired(this); LOG.debug((("Subprocedure '" + barrierName) + "' coordinator notified of 'acquire', waiting on") + " 'reached' or 'abort' from coordinator"); // wait for the procedure to reach global barrier before proceding waitForReachedGlobalBarrier(); rethrowException();// if Coordinator aborts, will bail from here with exception // In traditional 2PC, if a member reaches this state the TX has been committed and the // member is responsible for rolling forward and recovering and completing the subsequent // operations in the case of failure. It cannot rollback. // // This implementation is not 2PC since it can still rollback here, and thus has different // semantics. LOG.debug(("Subprocedure '" + barrierName) + "' received 'reached' from coordinator."); byte[] dataToCoordinator = insideBarrier(); LOG.debug(("Subprocedure '" + barrierName) + "' locally completed"); rethrowException(); // Ack that the member has executed and released local barrier rpcs.sendMemberCompleted(this, dataToCoordinator); LOG.debug(("Subprocedure '" + barrierName) + "' has notified controller of completion");// make sure we didn't get an external exception rethrowException(); } catch (Exception e) { String msg = null; if (e instanceof InterruptedException) { msg = (("Procedure '" + barrierName) + "' aborting due to interrupt!") + " Likely due to pool shutdown."; Thread.currentThread().interrupt();} else if (e instanceof ForeignException) { msg = ("Subprocedure '" + barrierName) + "' aborting due to a ForeignException!"; } else { msg = ("Subprocedure '" + barrierName) + "' failed!"; } cancel(msg, e); LOG.debug(("Subprocedure '" + barrierName) + "' running cleanup."); cleanup(e); } finally { releasedLocalBarrier.countDown(); // tell the timer we are done, if we get here successfully f0.complete(); complete = true; LOG.debug(("Subprocedure '" + barrierName) + "' completed."); return null; } }
3.26
hbase_Subprocedure_getErrorCheckable_rdh
/** * exposed for testing. */ ForeignExceptionSnare getErrorCheckable() { return this.monitor; }
3.26
hbase_Subprocedure_waitForReachedGlobalBarrier_rdh
// // Subprocedure Internal State interface // /** * Wait for the reached global barrier notification. Package visibility for testing */ void waitForReachedGlobalBarrier() throws ForeignException, InterruptedException { Procedure.waitForLatch(inGlobalBarrier, monitor, wakeFrequency, barrierName + ":remote acquired"); }
3.26
hbase_Subprocedure_cancel_rdh
/** * Method to cancel the Subprocedure by injecting an exception from and external source. */ public void cancel(String msg, Throwable cause) { LOG.error(msg, cause); complete = true; if (cause instanceof ForeignException) { monitor.receive(((ForeignException) (cause))); } else {monitor.receive(new ForeignException(getMemberName(), cause)); } }
3.26
hbase_Subprocedure_receiveReachedGlobalBarrier_rdh
/** * Callback for the member rpcs to call when the global barrier has been reached. This unblocks * the main subprocedure exectuion thread so that the Subprocedure's {@link #insideBarrier()} * method can be run. */ public void receiveReachedGlobalBarrier() { inGlobalBarrier.countDown();}
3.26
hbase_OffPeakHours_getInstance_rdh
/** * * @param startHour * inclusive * @param endHour * exclusive */ public static OffPeakHours getInstance(int startHour, int endHour) { if ((startHour == (-1)) && (endHour == (-1))) { return f1; } if ((!m0(startHour)) || (!m0(endHour))) { if (f0.isWarnEnabled()) { f0.warn(((("Ignoring invalid start/end hour for peak hour : start = " + startHour) + " end = ") + endHour) + ". Valid numbers are [0-23]"); } return f1; } if (startHour == endHour) { return f1; } return new OffPeakHoursImpl(startHour, endHour); }
3.26
hbase_OrderedInt32_decodeInt_rdh
/** * Read an {@code int} value from the buffer {@code src}. * * @param src * the {@link PositionedByteRange} to read the {@code int} from * @return the {@code int} read from the buffer */ public int decodeInt(PositionedByteRange src) { return OrderedBytes.decodeInt32(src); }
3.26
hbase_OrderedInt32_encodeInt_rdh
/** * Write instance {@code val} into buffer {@code dst}. * * @param dst * the {@link PositionedByteRange} to write to * @param val * the value to write to {@code dst} * @return the number of bytes written */ public int encodeInt(PositionedByteRange dst, int val) { return OrderedBytes.encodeInt32(dst, val, order); }
3.26
hbase_AverageIntervalRateLimiter_setNextRefillTime_rdh
// This method is for strictly testing purpose only @Override public void setNextRefillTime(long nextRefillTime) {this.nextRefillTime = nextRefillTime; }
3.26
hbase_SnapshotOfRegionAssignmentFromMeta_getRegionServerToRegionMap_rdh
/** * Get regionserver to region map * * @return regionserver to region map */ public Map<ServerName, List<RegionInfo>> getRegionServerToRegionMap() { return currentRSToRegionMap; }
3.26
hbase_SnapshotOfRegionAssignmentFromMeta_getTableSet_rdh
/** * Get the table set * * @return the table set */ public Set<TableName> getTableSet() { return this.tableToRegionMap.keySet(); }
3.26
hbase_SnapshotOfRegionAssignmentFromMeta_getExistingAssignmentPlan_rdh
/** * Get the favored nodes plan * * @return the existing favored nodes plan */ public FavoredNodesPlan getExistingAssignmentPlan() { return this.existingAssignmentPlan; }
3.26