name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hbase_CompactionPipeline_swap_rdh | /**
* Swaps the versioned list at the tail of the pipeline with a new segment. Swapping only if there
* were no changes to the suffix of the list since the version list was created.
*
* @param versionedList
* suffix of the pipeline to be replaced can be tail or all the pipeline
* @param segment
* new segment to replace the suffix. Can be null if the suffix just needs
* to be removed.
* @param closeSuffix
* whether to close the suffix (to release memory), as part of swapping it
* out During index merge op this will be false and for compaction it will
* be true.
* @param updateRegionSize
* whether to update the region size. Update the region size, when the
* pipeline is swapped as part of in-memory-flush and further
* merge/compaction. Don't update the region size when the swap is result
* of the snapshot (flush-to-disk).
* @return true iff swapped tail with new segment
*/
@SuppressWarnings(value = "VO_VOLATILE_INCREMENT", justification = "Increment is done under a synchronize block so safe")
public boolean swap(VersionedSegmentsList versionedList, ImmutableSegment segment, boolean closeSuffix, boolean updateRegionSize) {
if (versionedList.getVersion() != version) {
return false;
}
List<ImmutableSegment> suffix;
synchronized(pipeline) {
if (versionedList.getVersion() != version) {
return false;
}
suffix = versionedList.getStoreSegments();
LOG.debug("Swapping pipeline suffix; before={}, new segment={}", versionedList.getStoreSegments().size(), segment);
swapSuffix(suffix, segment, closeSuffix);
readOnlyCopy = new LinkedList<>(pipeline);
version++;
}
if (updateRegionSize && (region != null)) {
// update the global memstore size counter
long suffixDataSize = getSegmentsKeySize(suffix);
long suffixHeapSize = getSegmentsHeapSize(suffix);
long suffixOffHeapSize = getSegmentsOffHeapSize(suffix);
int suffixCellsCount = getSegmentsCellsCount(suffix);
long newDataSize = 0;
long newHeapSize = 0;
long newOffHeapSize = 0;
int newCellsCount = 0;
if (segment != null) {
newDataSize = segment.getDataSize();
newHeapSize = segment.getHeapSize();
newOffHeapSize
= segment.getOffHeapSize();
newCellsCount = segment.getCellsCount();}
long dataSizeDelta = suffixDataSize - newDataSize;
long heapSizeDelta = suffixHeapSize - newHeapSize;
long offHeapSizeDelta = suffixOffHeapSize - newOffHeapSize;
int cellsCountDelta = suffixCellsCount - newCellsCount;
region.addMemStoreSize(-dataSizeDelta, -heapSizeDelta, -offHeapSizeDelta, -cellsCountDelta);
LOG.debug(("Suffix data size={}, new segment data size={}, suffix heap size={},new segment heap " + "size={} suffix off heap size={}, new segment off heap size={}, suffix cells ") + "count={}, new segment cells count={}", suffixDataSize, newDataSize, suffixHeapSize, newHeapSize, suffixOffHeapSize, newOffHeapSize,
suffixCellsCount, newCellsCount);
}
return true;
} | 3.26 |
hbase_RESTServlet_isReadOnly_rdh | /**
* Helper method to determine if server should only respond to GET HTTP method requests.
*
* @return boolean for server read-only state
*/
boolean isReadOnly() {
return getConfiguration().getBoolean("hbase.rest.readonly", false);
} | 3.26 |
hbase_RESTServlet_shutdown_rdh | /**
* Shutdown any services that need to stop
*/
void shutdown() {
if (pauseMonitor != null)
pauseMonitor.stop();
if (connectionCache != null)
connectionCache.shutdown();
} | 3.26 |
hbase_RESTServlet_getInstance_rdh | /**
* Returns the RESTServlet singleton instance
*/
@SuppressWarnings(value = "MS_EXPOSE_REP", justification = "singleton pattern")
public static synchronized RESTServlet getInstance() {
assert INSTANCE != null;
return INSTANCE;
} | 3.26 |
hbase_RESTServlet_getTable_rdh | /**
* Caller closes the table afterwards.
*/
Table getTable(String tableName) throws IOException {
return connectionCache.getTable(tableName);
} | 3.26 |
hbase_RESTServlet_getConnectionCache_rdh | /**
* Returns the ConnectionCache instance
*/
public ConnectionCache getConnectionCache() {
return connectionCache;
} | 3.26 |
hbase_StoreFileScanner_getSeekCount_rdh | // Test methods
static final long getSeekCount() {
return seekCount.sum();
} | 3.26 |
hbase_StoreFileScanner_seekToPreviousRowWithoutHint_rdh | /**
* This variant of the {@link StoreFileScanner#seekToPreviousRow(Cell)} method requires two seeks
* and one reseek. The extra expense/seek is with the intent of speeding up subsequent calls by
* using the {@link StoreFileScanner#seekToPreviousRowWithHint} which this method seeds the state
* for by setting {@link StoreFileScanner#previousRow}
*/
private boolean seekToPreviousRowWithoutHint(Cell originalKey) throws IOException {
// Rewind to the cell before the beginning of this row
Cell keyAtBeginningOfRow = PrivateCellUtil.createFirstOnRow(originalKey);
if (!seekBefore(keyAtBeginningOfRow)) {
return false;
}
// Rewind before this row and save what we find as a seek hint
Cell firstKeyOfPreviousRow = PrivateCellUtil.createFirstOnRow(hfs.getCell());
seekBeforeAndSaveKeyToPreviousRow(firstKeyOfPreviousRow);
// Seek back to the start of the previous row
if (!reseekAtOrAfter(firstKeyOfPreviousRow)) {
return false;
}
// If after skipping newer Kvs, we're still in what we thought was the previous
// row, then we can exit
if (isStillAtSeekTargetAfterSkippingNewerKvs(firstKeyOfPreviousRow)) {
return true;
}
// Skipping newer kvs resulted in skipping the entire row that we thought was the
// previous row. If we've set a seek hint, then we can use that to go backwards
// further
if (previousRow != null) {
return seekToPreviousRowWithHint();
}
// If we've made it here, then we weren't able to set a seek hint. This can happen
// only if we're at the beginning of the storefile i.e. there is no row before this
// one
return false;
} | 3.26 |
hbase_StoreFileScanner_seekToPreviousRowWithHint_rdh | /**
* This variant of the {@link StoreFileScanner#seekToPreviousRow(Cell)} method requires one seek
* and one reseek. This method maintains state in {@link StoreFileScanner#previousRow} which only
* makes sense in the context of a sequential row-by-row reverse scan.
* {@link StoreFileScanner#previousRow} should be reset if that is not the case. The reasoning for
* why this method is faster than {@link StoreFileScanner#seekToPreviousRowStateless(Cell)} is
* that seeks are slower as they need to start from the beginning of the file, while reseeks go
* forward from the current position.
*/
private boolean seekToPreviousRowWithHint() throws IOException {
do {
// Using our existing seek hint, set our next seek hint
Cell firstKeyOfPreviousRow = PrivateCellUtil.createFirstOnRow(previousRow);
seekBeforeAndSaveKeyToPreviousRow(firstKeyOfPreviousRow);
// Reseek back to our initial seek hint (i.e. what we think is the start of the
// previous row)
if (!reseekAtOrAfter(firstKeyOfPreviousRow)) {
return false;
}
// If after skipping newer Kvs, we're still in our seek hint row, then we're finished
if (isStillAtSeekTargetAfterSkippingNewerKvs(firstKeyOfPreviousRow)) {
return true;
}
// If the previousRow seek hint is missing, that means that we're at row after the first row
// in the storefile. Use the without-hint seek path to process the final row
if (previousRow
== null) {
return seekToPreviousRowWithoutHint(firstKeyOfPreviousRow);
}
// Otherwise, use the previousRow seek hint to continue traversing backwards
} while (true );
} | 3.26 |
hbase_StoreFileScanner_requestSeek_rdh | /**
* Pretend we have done a seek but don't do it yet, if possible. The hope is that we find
* requested columns in more recent files and won't have to seek in older files. Creates a fake
* key/value with the given row/column and the highest (most recent) possible timestamp we might
* get from this file. When users of such "lazy scanner" need to know the next KV precisely (e.g.
* when this scanner is at the top of the heap), they run {@link #enforceSeek()}.
* <p>
* Note that this function does guarantee that the current KV of this scanner will be advanced to
* at least the given KV. Because of this, it does have to do a real seek in cases when the seek
* timestamp is older than the highest timestamp of the file, e.g. when we are trying to seek to
* the next row/column and use OLDEST_TIMESTAMP in the seek key.
*/
@Override
public boolean requestSeek(Cell kv, boolean forward, boolean useBloom) throws IOException {
if (kv.getFamilyLength() == 0) {
useBloom = false;
}
boolean haveToSeek = true;
if (useBloom) {
// check ROWCOL Bloom filter first.
if (reader.getBloomFilterType() == BloomType.ROWCOL) {
haveToSeek = reader.passesGeneralRowColBloomFilter(kv);
} else if (canOptimizeForNonNullColumn && (PrivateCellUtil.isDeleteFamily(kv) || PrivateCellUtil.isDeleteFamilyVersion(kv))) {
// if there is no such delete family kv in the store file,
// then no need to seek.
haveToSeek = reader.passesDeleteFamilyBloomFilter(kv.getRowArray(), kv.getRowOffset(), kv.getRowLength());
}
}
delayedReseek = forward;
delayedSeekKV = kv;
if (haveToSeek) {
// This row/column might be in this store file (or we did not use the
// Bloom filter), so we still need to seek.
realSeekDone = false;
long maxTimestampInFile = reader.getMaxTimestamp();
long seekTimestamp = kv.getTimestamp();
if (seekTimestamp > maxTimestampInFile) {
// Create a fake key that is not greater than the real next key.
// (Lower timestamps correspond to higher KVs.)
// To understand this better, consider that we are asked to seek to
// a higher timestamp than the max timestamp in this file. We know that
// the next point when we have to consider this file again is when we
// pass the max timestamp of this file (with the same row/column).
setCurrentCell(PrivateCellUtil.createFirstOnRowColTS(kv, maxTimestampInFile));
} else {
// This will be the case e.g. when we need to seek to the next
// row/column, and we don't know exactly what they are, so we set the
// seek key's timestamp to OLDEST_TIMESTAMP to skip the rest of this
// row/column.
enforceSeek();
}
return cur != null;
}
// Multi-column Bloom filter optimization.
// Create a fake key/value, so that this scanner only bubbles up to the top
// of the KeyValueHeap in StoreScanner after we scanned this row/column in
// all other store files. The query matcher will then just skip this fake
// key/value and the store scanner will progress to the next column. This
// is obviously not a "real real" seek, but unlike the fake KV earlier in
// this method, we want this to be propagated to ScanQueryMatcher.
setCurrentCell(PrivateCellUtil.createLastOnRowCol(kv));
realSeekDone = true;return true;
} | 3.26 |
hbase_StoreFileScanner_getScannerOrder_rdh | /**
*
* @see KeyValueScanner#getScannerOrder()
*/@Override
public long getScannerOrder() {
return
scannerOrder;
} | 3.26 |
hbase_StoreFileScanner_getScannersForStoreFiles_rdh | /**
* Return an array of scanners corresponding to the given set of store files, And set the
* ScanQueryMatcher for each store file scanner for further optimization
*/
public static List<StoreFileScanner> getScannersForStoreFiles(Collection<HStoreFile> files, boolean cacheBlocks, boolean usePread, boolean isCompaction, boolean canUseDrop, ScanQueryMatcher matcher, long readPt) throws IOException {
if (files.isEmpty()) {
return Collections.emptyList();
}
List<StoreFileScanner> scanners = new ArrayList<>(files.size());boolean canOptimizeForNonNullColumn = (matcher != null) ? !matcher.hasNullColumnInQuery() :
false;
PriorityQueue<HStoreFile> sortedFiles = new PriorityQueue<>(files.size(), StoreFileComparators.SEQ_ID);
for (HStoreFile file :
files) {
// The sort function needs metadata so we need to open reader first before sorting the list.
file.initReader();
sortedFiles.add(file);
}
boolean succ = false;
try
{
for (int i = 0, n = files.size(); i < n; i++) {
HStoreFile sf = sortedFiles.remove();
StoreFileScanner scanner;
if (usePread) {
scanner = sf.getPreadScanner(cacheBlocks, readPt, i, canOptimizeForNonNullColumn);
} else {
scanner = sf.getStreamScanner(canUseDrop, cacheBlocks, isCompaction, readPt, i, canOptimizeForNonNullColumn);
}
scanners.add(scanner);
}
succ = true;
} finally {
if (!succ) {
for (StoreFileScanner scanner : scanners) {
scanner.m0();
}
}
}
return scanners;
} | 3.26 |
hbase_StoreFileScanner_seekBeforeAndSaveKeyToPreviousRow_rdh | /**
* Seeks before the seek target cell and saves the location to {@link #previousRow}. If there
* doesn't exist a KV in this file before the seek target cell, reposition the scanner at the
* beginning of the storefile (in preparation to a reseek at or after the seek key) and set the
* {@link #previousRow} to null. If {@link #previousRow} is ever non-null and then transitions to
* being null again via this method, that's because there doesn't exist a row before the seek
* target in the storefile (i.e. we're at the beginning of the storefile)
*/
private void seekBeforeAndSaveKeyToPreviousRow(Cell seekKey) throws IOException {
if (seekCount != null) {
seekCount.increment();
}
if (!hfs.seekBefore(seekKey)) {
// Since the above seek failed, we need to position ourselves back at the start of the
// block or else our reseek might fail. seekTo() cannot return false here as at least
// one seekBefore will have returned true by the time we get here
hfs.seekTo();
this.previousRow = null;
} else {
this.previousRow = hfs.getCell();
}
} | 3.26 |
hbase_StoreFileScanner_seekToPreviousRowStateless_rdh | /**
* This variant of the {@link StoreFileScanner#seekToPreviousRow(Cell)} method requires two seeks.
* It should be used if the cost for seeking is lower i.e. when using a fast seeking data block
* encoding like RIV1.
*/
private boolean seekToPreviousRowStateless(Cell originalKey) throws IOException {
Cell key = originalKey;
do {Cell keyAtBeginningOfRow = PrivateCellUtil.createFirstOnRow(key);
if (!seekBefore(keyAtBeginningOfRow)) {
return false;
}
Cell firstKeyOfPreviousRow = PrivateCellUtil.createFirstOnRow(hfs.getCell());
if (!seekAtOrAfter(firstKeyOfPreviousRow)) {
return false;
}
if (isStillAtSeekTargetAfterSkippingNewerKvs(firstKeyOfPreviousRow)) {
return true;
}
key = firstKeyOfPreviousRow;
} while (true );
} | 3.26 |
hbase_StoreFileScanner_seekAtOrAfter_rdh | /**
* Returns false if not found or if k is after the end.
*/
public static boolean seekAtOrAfter(HFileScanner s, Cell k) throws IOException {
int result = s.seekTo(k);
if (result < 0) {
if (result == HConstants.INDEX_KEY_MAGIC) {
// using faked key
return true;
}
// Passed KV is smaller than first KV in file, work from start of file
return s.seekTo();
} else if (result > 0) {
// Passed KV is larger than current KV in file, if there is a next
// it is the "after", if not then this scanner is done.
return s.next();
}
// Seeked to the exact key
return true;
} | 3.26 |
hbase_MetaTableAccessor_makePutFromRegionInfo_rdh | /**
* Generates and returns a {@link Put} containing the {@link RegionInfo} for the catalog table.
*
* @throws IllegalArgumentException
* when the provided RegionInfo is not the default replica.
*/
public static Put makePutFromRegionInfo(RegionInfo regionInfo, long ts) throws IOException {
return addRegionInfo(new Put(CatalogFamilyFormat.getMetaKeyForRegion(regionInfo), ts), regionInfo);} | 3.26 |
hbase_MetaTableAccessor_scanMeta_rdh | /**
* Performs a scan of META table.
*
* @param connection
* connection we're using
* @param startRow
* Where to start the scan. Pass null if want to begin scan at first row.
* @param stopRow
* Where to stop the scan. Pass null if want to scan all rows from the start one
* @param type
* scanned part of meta
* @param maxRows
* maximum rows to return
* @param visitor
* Visitor invoked against each row.
*/
public static void scanMeta(Connection connection, @Nullablefinal byte[] startRow, @Nullablefinal byte[] stopRow, QueryType type, int maxRows, final ClientMetaTableAccessor.Visitor visitor) throws IOException {
scanMeta(connection, startRow, stopRow, type, null, maxRows, visitor);
} | 3.26 |
hbase_MetaTableAccessor_updateTableState_rdh | /**
* Update state of the table in meta.
*
* @param connection
* what we use for update
* @param state
* new state
*/
private static void updateTableState(Connection connection, TableState state) throws IOException {
Put put
= makePutFromTableState(state, EnvironmentEdgeManager.currentTime());
putToMetaTable(connection, put);
LOG.info("Updated {} in hbase:meta", state);
} | 3.26 |
hbase_MetaTableAccessor_fullScanRegions_rdh | // //////////////////////
// Reading operations //
// //////////////////////
/**
* Performs a full scan of <code>hbase:meta</code> for regions.
*
* @param connection
* connection we're using
* @param visitor
* Visitor invoked against each row in regions family.
*/
public static void fullScanRegions(Connection connection, final ClientMetaTableAccessor.Visitor visitor) throws IOException {
scanMeta(connection, null, null, QueryType.REGION, visitor);
} | 3.26 |
hbase_MetaTableAccessor_scanByRegionEncodedName_rdh | /**
* Scans META table for a row whose key contains the specified <B>regionEncodedName</B>, returning
* a single related <code>Result</code> instance if any row is found, null otherwise.
*
* @param connection
* the connection to query META table.
* @param regionEncodedName
* the region encoded name to look for at META.
* @return <code>Result</code> instance with the row related info in META, null otherwise.
* @throws IOException
* if any errors occur while querying META.
*/
public static Result scanByRegionEncodedName(Connection connection, String regionEncodedName) throws IOException {
RowFilter rowFilter = new RowFilter(CompareOperator.EQUAL, new SubstringComparator(regionEncodedName));
Scan scan = getMetaScan(connection.getConfiguration(), 1);
scan.setFilter(rowFilter);
try (Table table = getMetaHTable(connection);ResultScanner resultScanner = table.getScanner(scan)) {
return resultScanner.next();
}
} | 3.26 |
hbase_MetaTableAccessor_getMetaHTable_rdh | /**
* Callers should call close on the returned {@link Table} instance.
*
* @param connection
* connection we're using to access Meta
* @return An {@link Table} for <code>hbase:meta</code>
* @throws NullPointerException
* if {@code connection} is {@code null}
*/
public static Table getMetaHTable(final Connection connection) throws IOException {
// We used to pass whole CatalogTracker in here, now we just pass in Connection
Objects.requireNonNull(connection, "Connection cannot be null");
if (connection.isClosed()) {
throw new IOException("connection is closed");
}
return
connection.getTable(TableName.META_TABLE_NAME);
} | 3.26 |
hbase_MetaTableAccessor_getTableStates_rdh | /**
* Fetch table states from META table
*
* @param conn
* connection to use
* @return map {tableName -> state}
*/
public static Map<TableName, TableState> getTableStates(Connection conn) throws IOException {
final Map<TableName, TableState> states = new LinkedHashMap<>();
ClientMetaTableAccessor.Visitor collector = r -> {
TableState v55 = CatalogFamilyFormat.getTableState(r);
if (v55 != null) {
states.put(v55.getTableName(), v55);
}
return true;
};
fullScanTables(conn, collector);
return states;
} | 3.26 |
hbase_MetaTableAccessor_updateRegionState_rdh | /**
* Update state column in hbase:meta.
*/
public static void updateRegionState(Connection connection, RegionInfo
ri, RegionState.State state) throws IOException {
final Put put = makePutFromRegionInfo(ri);
addRegionStateToPut(put, ri.getReplicaId(), state);
m3(connection, Collections.singletonList(put));
} | 3.26 |
hbase_MetaTableAccessor_getTableRegions_rdh | /**
* Gets all of the regions of the specified table. Do not use this method to get meta table
* regions, use methods in MetaTableLocator instead.
*
* @param connection
* connection we're using
* @param tableName
* table we're looking for
* @param excludeOfflinedSplitParents
* If true, do not include offlined split parents in the
* return.
* @return Ordered list of {@link RegionInfo}.
*/
public static List<RegionInfo> getTableRegions(Connection connection, TableName tableName, final boolean excludeOfflinedSplitParents) throws IOException {
List<Pair<RegionInfo, ServerName>> v17 = getTableRegionsAndLocations(connection, tableName, excludeOfflinedSplitParents);
return getListOfRegionInfos(v17);
} | 3.26 |
hbase_MetaTableAccessor_getTableState_rdh | /**
* Fetch table state for given table from META table
*
* @param conn
* connection to use
* @param tableName
* table to fetch state for
*/
@Nullable
public static TableState getTableState(Connection conn, TableName tableName) throws IOException {
if (tableName.equals(TableName.META_TABLE_NAME)) {
return new TableState(tableName, State.ENABLED);
}
Table metaHTable = getMetaHTable(conn);
Get get = new Get(tableName.getName()).addColumn(HConstants.TABLE_FAMILY, HConstants.TABLE_STATE_QUALIFIER);
Result result = metaHTable.get(get);
return CatalogFamilyFormat.getTableState(result);
} | 3.26 |
hbase_MetaTableAccessor_deleteTableState_rdh | /**
* Remove state for table from meta
*
* @param connection
* to use for deletion
* @param table
* to delete state for
*/
public static void deleteTableState(Connection connection, TableName table) throws IOException {
long time = EnvironmentEdgeManager.currentTime();
Delete delete = new Delete(table.getName());
delete.addColumns(HConstants.TABLE_FAMILY, HConstants.TABLE_STATE_QUALIFIER, time);
deleteFromMetaTable(connection, delete);
LOG.info(("Deleted table " + table) + " state from META");
} | 3.26 |
hbase_MetaTableAccessor_makePutFromTableState_rdh | /**
* Construct PUT for given state
*
* @param state
* new state
*/
public static Put makePutFromTableState(TableState state, long ts) {
Put v69 = new Put(state.getTableName().getName(), ts);
v69.addColumn(HConstants.TABLE_FAMILY, HConstants.TABLE_STATE_QUALIFIER, state.convert().toByteArray());
return v69;
} | 3.26 |
hbase_MetaTableAccessor_m2_rdh | /**
*
* @param t
* Table to use
* @param p
* put to make
*/
private static void m2(Table t, Put p) throws IOException
{
debugLogMutation(p);
t.put(p);
} | 3.26 |
hbase_MetaTableAccessor_putToMetaTable_rdh | /**
* Put the passed <code>p</code> to the <code>hbase:meta</code> table.
*
* @param connection
* connection we're using
* @param p
* Put to add to hbase:meta
*/
private static void putToMetaTable(Connection connection, Put p) throws IOException {
try (Table table = getMetaHTable(connection)) {
m2(table, p);
}
} | 3.26 |
hbase_MetaTableAccessor_makeDeleteFromRegionInfo_rdh | /**
* Generates and returns a Delete containing the region info for the catalog table
*/public static Delete makeDeleteFromRegionInfo(RegionInfo regionInfo, long ts) {
if (regionInfo == null) {
throw new IllegalArgumentException("Can't make a delete for null region");
}
if (regionInfo.getReplicaId() != RegionInfo.DEFAULT_REPLICA_ID) {
throw new IllegalArgumentException("Can't make delete for a replica region. Operate on the primary");
}
Delete delete = new Delete(CatalogFamilyFormat.getMetaKeyForRegion(regionInfo));
delete.addFamily(HConstants.CATALOG_FAMILY, ts);
return delete;
} | 3.26 |
hbase_MetaTableAccessor_m3_rdh | /**
* Put the passed <code>ps</code> to the <code>hbase:meta</code> table.
*
* @param connection
* connection we're using
* @param ps
* Put to add to hbase:meta
*/
public static void m3(final Connection connection, final List<Put> ps) throws IOException {
if (ps.isEmpty()) {
return;
}
try
(Table t = getMetaHTable(connection)) {
debugLogMutations(ps);
// the implementation for putting a single Put is much simpler so here we do a check first.
if (ps.size() == 1) {
t.put(ps.get(0));
} else {
t.put(ps);
}
}
} | 3.26 |
hbase_MetaTableAccessor_deleteFromMetaTable_rdh | /**
* Delete the passed <code>deletes</code> from the <code>hbase:meta</code> table.
*
* @param connection
* connection we're using
* @param deletes
* Deletes to add to hbase:meta This list should support #remove.
*/
private static void deleteFromMetaTable(final Connection connection, final List<Delete> deletes) throws IOException {
try (Table t = getMetaHTable(connection)) {
debugLogMutations(deletes);
t.delete(deletes);
}
} | 3.26 |
hbase_MetaTableAccessor_getClosestRegionInfo_rdh | /**
* Returns Get closest metatable region row to passed <code>row</code>
*/
@NonNull
private static RegionInfo getClosestRegionInfo(Connection connection, @NonNull
final TableName tableName, @NonNull
final byte[] row) throws IOException {
byte[] searchRow = RegionInfo.createRegionName(tableName, row, HConstants.NINES, false);
Scan scan = getMetaScan(connection.getConfiguration(), 1);
scan.setReversed(true);
scan.withStartRow(searchRow);
try (ResultScanner resultScanner = getMetaHTable(connection).getScanner(scan)) {Result result = resultScanner.next();
if (result == null) {
throw new TableNotFoundException(((("Cannot find row in META " + " for table: ") + tableName) + ", row=") + Bytes.toStringBinary(row));
}
RegionInfo regionInfo = CatalogFamilyFormat.getRegionInfo(result);
if (regionInfo == null) {
throw
new IOException((("RegionInfo was null or empty in Meta for " + tableName) + ", row=") + Bytes.toStringBinary(row));
}
return regionInfo;
}
}
/**
* Returns the {@link ServerName} from catalog table {@link Result} where the region is
* transitioning on. It should be the same as
* {@link CatalogFamilyFormat#getServerName(Result,int)} if the server is at OPEN state.
*
* @param r
* Result to pull the transitioning server name from
* @return A ServerName instance or {@link CatalogFamilyFormat#getServerName(Result,int)} | 3.26 |
hbase_MetaTableAccessor_addRegionsToMeta_rdh | /**
* Adds a hbase:meta row for each of the specified new regions. Initial state for new regions is
* CLOSED.
*
* @param connection
* connection we're using
* @param regionInfos
* region information list
* @param ts
* desired timestamp
* @throws IOException
* if problem connecting or updating meta
*/
public static void addRegionsToMeta(Connection connection, List<RegionInfo> regionInfos, int regionReplication, long ts) throws IOException {
List<Put> puts = new ArrayList<>();
for (RegionInfo regionInfo : regionInfos) {
if (!RegionReplicaUtil.isDefaultReplica(regionInfo)) {
continue;
}
Put put = makePutFromRegionInfo(regionInfo, ts);
// New regions are added with initial state of CLOSED.
addRegionStateToPut(put, regionInfo.getReplicaId(), State.CLOSED);// Add empty locations for region replicas so that number of replicas can be cached
// whenever the primary region is looked up from meta
for (int i = 1; i < regionReplication; i++) {
addEmptyLocation(put, i);
}
puts.add(put);
}
m3(connection, puts);
LOG.info("Added {} regions to meta.", puts.size());
} | 3.26 |
hbase_MetaTableAccessor_updateRegionLocation_rdh | /**
* Updates the location of the specified region in hbase:meta to be the specified server hostname
* and startcode.
* <p>
* Uses passed catalog tracker to get a connection to the server hosting hbase:meta and makes
* edits to that region.
*
* @param connection
* connection we're using
* @param regionInfo
* region to update location of
* @param openSeqNum
* the latest sequence number obtained when the region was open
* @param sn
* Server name
* @param masterSystemTime
* wall clock time from master if passed in the open region RPC
*/
public static void updateRegionLocation(Connection connection, RegionInfo regionInfo, ServerName sn, long openSeqNum, long masterSystemTime) throws IOException {
updateLocation(connection, regionInfo, sn, openSeqNum, masterSystemTime);
} | 3.26 |
hbase_MetaTableAccessor_getDaughterRegions_rdh | /**
* Returns the daughter regions by reading the corresponding columns of the catalog table Result.
*
* @param data
* a Result object from the catalog table scan
* @return pair of RegionInfo or PairOfSameType(null, null) if region is not a split parent
*/
public static PairOfSameType<RegionInfo> getDaughterRegions(Result data) {
RegionInfo splitA = CatalogFamilyFormat.getRegionInfo(data, HConstants.SPLITA_QUALIFIER);
RegionInfo splitB = CatalogFamilyFormat.getRegionInfo(data, HConstants.SPLITB_QUALIFIER);
return new PairOfSameType<>(splitA, splitB);
} | 3.26 |
hbase_MetaTableAccessor_m1_rdh | /**
* Performs a scan of META table for given table starting from given row.
*
* @param connection
* connection we're using
* @param visitor
* visitor to call
* @param tableName
* table withing we scan
* @param row
* start scan from this row
* @param rowLimit
* max number of rows to return
*/
public static void m1(Connection connection, final ClientMetaTableAccessor.Visitor visitor, final TableName tableName, final byte[] row, final int rowLimit) throws IOException {
byte[] startRow = null;
byte[] stopRow = null;
if (tableName != null) {
startRow = ClientMetaTableAccessor.getTableStartRowForMeta(tableName, QueryType.REGION);
if (row != null) {
RegionInfo closestRi = getClosestRegionInfo(connection, tableName, row);
startRow = RegionInfo.createRegionName(tableName, closestRi.getStartKey(), HConstants.ZEROES, false);
}
stopRow = ClientMetaTableAccessor.getTableStopRowForMeta(tableName, QueryType.REGION);
}
scanMeta(connection, startRow, stopRow, QueryType.REGION, rowLimit, visitor);
} | 3.26 |
hbase_MetaTableAccessor_getRegionLocation_rdh | /**
* Returns the HRegionLocation from meta for the given region
*
* @param connection
* connection we're using
* @param regionInfo
* region information
* @return HRegionLocation for the given region
*/
public static HRegionLocation getRegionLocation(Connection connection, RegionInfo regionInfo) throws IOException {
return CatalogFamilyFormat.getRegionLocation(getCatalogFamilyRow(connection, regionInfo), regionInfo, regionInfo.getReplicaId());
}
/**
* Returns Return the {@link HConstants#CATALOG_FAMILY} | 3.26 |
hbase_MetaTableAccessor_fullScan_rdh | /**
* Performs a full scan of <code>hbase:meta</code>.
*
* @param connection
* connection we're using
* @param type
* scanned part of meta
* @return List of {@link Result}
*/
private static List<Result> fullScan(Connection connection, QueryType type) throws IOException {
ClientMetaTableAccessor.CollectAllVisitor v = new ClientMetaTableAccessor.CollectAllVisitor();
scanMeta(connection, null, null, type, v);
return v.getResults();
} | 3.26 |
hbase_MetaTableAccessor_getRegion_rdh | /**
* Gets the region info and assignment for the specified region.
*
* @param connection
* connection we're using
* @param regionName
* Region to lookup.
* @return Location and RegionInfo for <code>regionName</code>
* @deprecated use {@link #getRegionLocation(Connection, byte[])} instead
*/
@Deprecated
public static Pair<RegionInfo, ServerName> getRegion(Connection connection, byte[] regionName) throws IOException {
HRegionLocation location = getRegionLocation(connection, regionName);
return location == null ? null : new Pair<>(location.getRegion(), location.getServerName());
} | 3.26 |
hbase_MetaTableAccessor_getAllRegions_rdh | /**
* Lists all of the regions currently in META.
*
* @param connection
* to connect with
* @param excludeOfflinedSplitParents
* False if we are to include offlined/splitparents regions,
* true and we'll leave out offlined regions from returned list
* @return List of all user-space regions.
*/
public static List<RegionInfo> getAllRegions(Connection connection, boolean excludeOfflinedSplitParents) throws IOException {
List<Pair<RegionInfo, ServerName>> result;
result = getTableRegionsAndLocations(connection, null, excludeOfflinedSplitParents);
return getListOfRegionInfos(result);
} | 3.26 |
hbase_MetaTableAccessor_addRegionStateToPut_rdh | /**
* Set the column value corresponding to this {@code replicaId}'s {@link RegionState} to the
* provided {@code state}. Mutates the provided {@link Put}.
*/
public static Put addRegionStateToPut(Put put, int replicaId, RegionState.State state) throws IOException {
put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(put.getRow()).setFamily(HConstants.CATALOG_FAMILY).setQualifier(CatalogFamilyFormat.getRegionStateColumn(replicaId)).setTimestamp(put.getTimestamp()).setType(Type.Put).setValue(Bytes.toBytes(state.name())).build());
return put;
} | 3.26 |
hbase_MetaTableAccessor_fullScanTables_rdh | /**
* Performs a full scan of <code>hbase:meta</code> for tables.
*
* @param connection
* connection we're using
* @param visitor
* Visitor invoked against each row in tables family.
*/
public static void fullScanTables(Connection connection, final ClientMetaTableAccessor.Visitor visitor) throws IOException {
scanMeta(connection, null, null, QueryType.TABLE, visitor);
} | 3.26 |
hbase_MetaTableAccessor_addSplitsToParent_rdh | /**
* Adds daughter region infos to hbase:meta row for the specified region.
* <p/>
* Note that this does not add its daughter's as different rows, but adds information about the
* daughters in the same row as the parent. Now only used in snapshot. Use
* {@link org.apache.hadoop.hbase.master.assignment.RegionStateStore} if you want to split a
* region.
*
* @param connection
* connection we're using
* @param regionInfo
* RegionInfo of parent region
* @param splitA
* first split daughter of the parent regionInfo
* @param splitB
* second split daughter of the parent regionInfo
* @throws IOException
* if problem connecting or updating meta
*/public static void addSplitsToParent(Connection connection, RegionInfo regionInfo, RegionInfo splitA, RegionInfo splitB) throws IOException {
try (Table meta = getMetaHTable(connection)) {
Put put = makePutFromRegionInfo(regionInfo);
addDaughtersToPut(put, splitA, splitB);
meta.put(put);
debugLogMutation(put);
LOG.debug("Added region {}", regionInfo.getRegionNameAsString());
}
} | 3.26 |
hbase_MetaTableAccessor_addDaughtersToPut_rdh | /**
* Adds split daughters to the Put
*/
public static Put addDaughtersToPut(Put put, RegionInfo splitA, RegionInfo splitB) throws IOException {
if (splitA != null) {
put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(put.getRow()).setFamily(HConstants.CATALOG_FAMILY).setQualifier(HConstants.SPLITA_QUALIFIER).setTimestamp(put.getTimestamp()).setType(Type.Put).setValue(RegionInfo.toByteArray(splitA)).build());
}
if (splitB != null) {
put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(put.getRow()).setFamily(HConstants.CATALOG_FAMILY).setQualifier(HConstants.SPLITB_QUALIFIER).setTimestamp(put.getTimestamp()).setType(Type.Put).setValue(RegionInfo.toByteArray(splitB)).build());
}
return put;
} | 3.26 |
hbase_MetaTableAccessor_getRegionResult_rdh | /**
* Gets the result in hbase:meta for the specified region.
*
* @param connection
* connection we're using
* @param regionInfo
* region we're looking for
* @return result of the specified region
*/
public static Result getRegionResult(Connection connection, RegionInfo regionInfo) throws IOException {
Get get = new Get(CatalogFamilyFormat.getMetaKeyForRegion(regionInfo));
get.addFamily(HConstants.CATALOG_FAMILY);
try
(Table t = getMetaHTable(connection)) {
return t.get(get);
}
} | 3.26 |
hbase_MetaTableAccessor_updateLocation_rdh | /**
* Updates the location of the specified region to be the specified server.
* <p>
* Connects to the specified server which should be hosting the specified catalog region name to
* perform the edit.
*
* @param connection
* connection we're using
* @param regionInfo
* region to update location of
* @param sn
* Server name
* @param openSeqNum
* the latest sequence number obtained when the region was open
* @param masterSystemTime
* wall clock time from master if passed in the open region RPC
* @throws IOException
* In particular could throw {@link java.net.ConnectException} if the server
* is down on other end.
*/
private static void updateLocation(Connection connection, RegionInfo regionInfo, ServerName sn, long openSeqNum, long masterSystemTime) throws
IOException {
// region replicas are kept in the primary region's row
Put put = new Put(CatalogFamilyFormat.getMetaKeyForRegion(regionInfo), masterSystemTime);addRegionInfo(put, regionInfo);
addLocation(put, sn, openSeqNum, regionInfo.getReplicaId());
putToMetaTable(connection, put);
LOG.info("Updated row {} with server=", regionInfo.getRegionNameAsString(), sn);} | 3.26 |
hbase_MetaTableAccessor_getScanForTableName_rdh | /**
* This method creates a Scan object that will only scan catalog rows that belong to the specified
* table. It doesn't specify any columns. This is a better alternative to just using a start row
* and scan until it hits a new table since that requires parsing the HRI to get the table name.
*
* @param tableName
* bytes of table's name
* @return configured Scan object
*/
public static Scan getScanForTableName(Configuration conf, TableName tableName) {
// Start key is just the table name with delimiters
byte[] startKey = ClientMetaTableAccessor.getTableStartRowForMeta(tableName, QueryType.REGION);
// Stop key appends the smallest possible char to the table name
byte[] stopKey = ClientMetaTableAccessor.getTableStopRowForMeta(tableName,
QueryType.REGION);
Scan scan = getMetaScan(conf, -1);
scan.withStartRow(startKey);
scan.withStopRow(stopKey);
return scan;} | 3.26 |
hbase_MetaTableAccessor_getTableRegionsAndLocations_rdh | /**
* Do not use this method to get meta table regions, use methods in MetaTableLocator instead.
*
* @param connection
* connection we're using
* @param tableName
* table to work with, can be null for getting all regions
* @param excludeOfflinedSplitParents
* don't return split parents
* @return Return list of regioninfos and server addresses.
*/
// What happens here when 1M regions in hbase:meta? This won't scale?
public static List<Pair<RegionInfo, ServerName>> getTableRegionsAndLocations(Connection connection, @Nullable
final TableName tableName, final boolean excludeOfflinedSplitParents) throws IOException {
if ((tableName != null) && tableName.equals(TableName.META_TABLE_NAME)) {
throw new IOException("This method can't be used to locate meta regions;" + " use MetaTableLocator instead");
}
// Make a version of CollectingVisitor that collects RegionInfo and ServerAddress
ClientMetaTableAccessor.CollectRegionLocationsVisitor visitor = new ClientMetaTableAccessor.CollectRegionLocationsVisitor(excludeOfflinedSplitParents);
scanMeta(connection, ClientMetaTableAccessor.getTableStartRowForMeta(tableName, QueryType.REGION), ClientMetaTableAccessor.getTableStopRowForMeta(tableName, QueryType.REGION), QueryType.REGION, visitor);
return visitor.getResults();
} | 3.26 |
hbase_MetaTableAccessor_m0_rdh | /**
* Performs a full scan of <code>hbase:meta</code> for regions.
*
* @param connection
* connection we're using
*/
public static List<Result> m0(Connection connection) throws IOException {
return fullScan(connection, QueryType.REGION);
} | 3.26 |
hbase_ZKServerTool_main_rdh | /**
* Run the tool.
*
* @param args
* Command line arguments.
*/
public static void main(String[] args) {
for (ServerName server : readZKNodes(HBaseConfiguration.create())) {
// bin/zookeeper.sh relies on the "ZK host" string for grepping which is case sensitive.
System.out.println("ZK host: " + server.getHostname());
}
} | 3.26 |
hbase_GetUserPermissionsRequest_newBuilder_rdh | /**
* Build a get table permission request
*
* @param tableName
* the specific table name
* @return a get table permission request builder
*/
public static Builder newBuilder(TableName tableName) {
return new Builder(tableName);
} | 3.26 |
hbase_GetUserPermissionsRequest_withUserName_rdh | /**
* user name could be null if need all global/namespace/table permissions
*/
public Builder withUserName(String userName) {
this.userName = userName;
return this;} | 3.26 |
hbase_BufferedMutatorExample_onException_rdh | /**
* a callback invoked when an asynchronous write fails.
*/final BufferedMutator.ExceptionListener listener = new BufferedMutator.ExceptionListener() {
@Override
public void onException(RetriesExhaustedWithDetailsException e, BufferedMutator mutator) {
for (int i = 0; i < e.getNumExceptions(); i++) {
LOG.info(("Failed to sent put " + e.getRow(i)) + ".");
}
} | 3.26 |
hbase_NamedQueueRecorder_addRecord_rdh | /**
* Add various NamedQueue records to ringbuffer. Based on the type of the event (e.g slowLog),
* consumer of disruptor ringbuffer will have specific logic. This method is producer of disruptor
* ringbuffer which is initialized in NamedQueueRecorder constructor.
*
* @param namedQueuePayload
* namedQueue payload sent by client of ring buffer service
*/public void addRecord(NamedQueuePayload namedQueuePayload) {
RingBuffer<RingBufferEnvelope> ringBuffer = this.disruptor.getRingBuffer();
long seqId = ringBuffer.next();
try {
ringBuffer.get(seqId).load(namedQueuePayload);
} finally {
ringBuffer.publish(seqId);
}
} | 3.26 |
hbase_NamedQueueRecorder_getEventCount_rdh | // must be power of 2 for disruptor ringbuffer
private int getEventCount(int eventCount) {
Preconditions.checkArgument(eventCount >= 0, "hbase.namedqueue.ringbuffer.size must be > 0");
int floor = Integer.highestOneBit(eventCount);
if
(floor == eventCount) {
return floor;
}
// max capacity is 1 << 30
if (floor >= (1 << 29)) {
return 1
<< 30;
}
return floor << 1;
} | 3.26 |
hbase_NamedQueueRecorder_persistAll_rdh | /**
* Add all in memory queue records to system table. The implementors can use system table or
* direct HDFS file or ZK as persistence system.
*/
public void persistAll(NamedQueuePayload.NamedQueueEvent namedQueueEvent, Connection connection) {
if (this.logEventHandler
!= null) {
this.logEventHandler.persistAll(namedQueueEvent, connection);
}
} | 3.26 |
hbase_NamedQueueRecorder_getNamedQueueRecords_rdh | /**
* Retrieve in memory queue records from ringbuffer
*
* @param request
* namedQueue request with event type
* @return queue records from ringbuffer after filter (if applied)
*/
public NamedQueueGetResponse getNamedQueueRecords(NamedQueueGetRequest request) {
return this.logEventHandler.getNamedQueueRecords(request);
} | 3.26 |
hbase_VersionInfoUtil_getVersionNumber_rdh | /**
* Pack the full number version in a int. by shifting each component by 8bit, except the dot
* release which has 12bit. Examples: (1.3.4 is 0x0103004, 2.1.0 is 0x0201000)
*
* @param versionInfo
* the VersionInfo object to pack
* @return the version number as int. (e.g. 0x0103004 is 1.3.4)
*/
public static int getVersionNumber(final HBaseProtos.VersionInfo versionInfo) {
if (versionInfo != null) {
try {
final String[] components = getVersionComponents(versionInfo);
int clientMajor = (components.length > 0) ? Integer.parseInt(components[0]) : 0;
int clientMinor = (components.length > 1) ? Integer.parseInt(components[1]) : 0;
int clientPatch = (components.length
> 2) ? Integer.parseInt(components[2]) :
0;
return buildVersionNumber(clientMajor, clientMinor, clientPatch);
} catch (NumberFormatException e) {
int clientMajor = (versionInfo.hasVersionMajor()) ? versionInfo.getVersionMajor() : 0;
int clientMinor = (versionInfo.hasVersionMinor()) ? versionInfo.getVersionMinor() : 0;
return buildVersionNumber(clientMajor, clientMinor, 0);
}
}
return 0;// no version
} | 3.26 |
hbase_VersionInfoUtil_getVersionComponents_rdh | /**
* Returns the version components Examples: "1.4.3" returns [1, 4, 3], "4.5.6-SNAPSHOT" returns
* [4, 5, 6, "SNAPSHOT"]
*
* @return the components of the version string
*/
private static String[] getVersionComponents(final HBaseProtos.VersionInfo versionInfo) {
return versionInfo.getVersion().split("[\\.-]");
} | 3.26 |
hbase_VersionInfoUtil_getCurrentClientVersionInfo_rdh | /**
* Returns the versionInfo extracted from the current RpcCallContext
*/
public static VersionInfo getCurrentClientVersionInfo() {
return RpcServer.getCurrentCall().map(RpcCallContext::getClientVersionInfo).orElse(NonCallVersion.get());
} | 3.26 |
hbase_VersionInfoUtil_buildVersionNumber_rdh | /**
* Pack the full number version in a int. by shifting each component by 8bit, except the dot
* release which has 12bit. Examples: (1.3.4 is 0x0103004, 2.1.0 is 0x0201000)
*
* @param major
* version major number
* @param minor
* version minor number
* @param patch
* version patch number
* @return the version number as int. (e.g. 0x0103004 is 1.3.4)
*/
private static int buildVersionNumber(int major, int minor, int patch) {
return ((major << 20) | (minor << 12)) | patch;
} | 3.26 |
hbase_VersionInfoUtil_versionNumberToString_rdh | /**
* Returns the passed-in <code>version</code> int as a version String (e.g. 0x0103004 is 1.3.4)
*/
public static String versionNumberToString(final int version) {
return String.format("%d.%d.%d", (version >> 20) & 0xff, (version >> 12) & 0xff, version & 0xfff);
} | 3.26 |
hbase_Size_getLongValue_rdh | /**
* get the value
*/
public long getLongValue() {
return ((long) (value));
} | 3.26 |
hbase_Size_get_rdh | /**
* get the value which is converted to specified unit.
*
* @param unit
* size unit
* @return the converted value
*/
public double get(Unit unit) {
if (value == 0) {
return value;
}
int diff = this.unit.getOrderOfSize() - unit.getOrderOfSize();
if (diff == 0) {
return value;
}
BigDecimal
rval = BigDecimal.valueOf(value);
for (int i = 0; i != Math.abs(diff); ++i) {
rval = (diff > 0) ? rval.multiply(SCALE_BASE) : rval.divide(SCALE_BASE);
}
return rval.doubleValue();
} | 3.26 |
hbase_Size_getUnit_rdh | /**
* Returns size unit
*/
public Unit getUnit() {
return unit;
} | 3.26 |
hbase_MasterAddressTracker_deleteIfEquals_rdh | /**
* delete the master znode if its content is same as the parameter
*
* @param zkw
* must not be null
* @param content
* must not be null
*/
public static boolean deleteIfEquals(ZKWatcher zkw, final String content) {
if (content == null) {
throw new IllegalArgumentException("Content must not be null");
}
try {
Stat stat = new Stat();
byte[] data = ZKUtil.getDataNoWatch(zkw, zkw.getZNodePaths().masterAddressZNode, stat);
ServerName sn = ProtobufUtil.parseServerNameFrom(data);
if ((sn != null) && content.equals(sn.toString())) {
return ZKUtil.deleteNode(zkw, zkw.getZNodePaths().masterAddressZNode, stat.getVersion());
}
} catch (KeeperException e) {LOG.warn("Can't get or delete the master znode", e);
} catch (DeserializationException e) {
LOG.warn("Can't get or delete the master znode", e);
}
return false;
} | 3.26 |
hbase_MasterAddressTracker_toByteArray_rdh | /**
*
* @param sn
* must not be null
* @return Content of the master znode as a serialized pb with the pb magic as prefix.
*/ static byte[] toByteArray(final ServerName sn, int infoPort) {
ZooKeeperProtos.Master.Builder mbuilder = ZooKeeperProtos.Master.newBuilder();HBaseProtos.ServerName.Builder snbuilder = HBaseProtos.ServerName.newBuilder();
snbuilder.setHostName(sn.getHostname());
snbuilder.setPort(sn.getPort());
snbuilder.setStartCode(sn.getStartcode());
mbuilder.setMaster(snbuilder.build());
mbuilder.setRpcVersion(HConstants.RPC_CURRENT_VERSION);
mbuilder.setInfoPort(infoPort);
return ProtobufUtil.prependPBMagic(mbuilder.build().toByteArray());
} | 3.26 |
hbase_MasterAddressTracker_setMasterAddress_rdh | /**
* Set master address into the <code>master</code> znode or into the backup subdirectory of backup
* masters; switch off the passed in <code>znode</code> path.
*
* @param zkw
* The ZKWatcher to use.
* @param znode
* Where to create the znode; could be at the top level or it could be under backup
* masters
* @param master
* ServerName of the current master must not be null.
* @return true if node created, false if not; a watch is set in both cases
* @throws KeeperException
* if a ZooKeeper operation fails
*/
public static boolean setMasterAddress(final ZKWatcher zkw, final String znode, final ServerName master, int infoPort) throws KeeperException {
return ZKUtil.createEphemeralNodeAndWatch(zkw, znode, toByteArray(master, infoPort));
} | 3.26 |
hbase_MasterAddressTracker_getBackupMasterInfoPort_rdh | /**
* Get backup master info port. Use this instead of {@link #getBackupMasterInfoPort(ServerName)}
* if you do not have an instance of this tracker in your context.
*
* @param zkw
* ZKWatcher to use
* @param sn
* ServerName of the backup master
* @return backup master info port in the the master address znode or 0 if no znode present.
* @throws KeeperException
* if a ZooKeeper operation fails
* @throws IOException
* if the address of the ZooKeeper master cannot be retrieved
*/
public static int getBackupMasterInfoPort(ZKWatcher zkw, final ServerName sn) throws KeeperException, IOException {
byte[] data;
try {
data = ZKUtil.getData(zkw, ZNodePaths.joinZNode(zkw.getZNodePaths().backupMasterAddressesZNode, sn.toString()));
} catch (InterruptedException e) {
throw new InterruptedIOException();}if (data == null) {throw new IOException("Can't get backup master address from ZooKeeper; znode data == null");
}
try
{
final ZooKeeperProtos.Master backup = parse(data);
if (backup == null) {
return 0;
}
return backup.getInfoPort();
} catch (DeserializationException e) {
KeeperException ke = new KeeperException.DataInconsistencyException();
ke.initCause(e);
throw ke;
}
} | 3.26 |
hbase_MasterAddressTracker_getMasterInfoPort_rdh | /**
* Get the info port of the current master of one is available. Return 0 if no current master or
* zookeeper is unavailable
*
* @return info port or 0 if timed out
*/
public int getMasterInfoPort() {
try {final ZooKeeperProtos.Master master = parse(this.getData(false));
if (master == null) {
return 0;
}return master.getInfoPort();
} catch (DeserializationException e)
{
LOG.warn("Failed parse master zk node data", e);
return 0;
}
} | 3.26 |
hbase_MasterAddressTracker_hasMaster_rdh | /**
* Check if there is a master available.
*
* @return true if there is a master set, false if not.
*/
public boolean hasMaster() {
return super.getData(false) != null;
} | 3.26 |
hbase_MasterAddressTracker_parse_rdh | /**
*
* @param data
* zookeeper data. may be null
* @return pb object of master, null if no active master
* @throws DeserializationException
* if the parsing fails
*/
public static Master parse(byte[] data) throws DeserializationException {
if (data == null) {
return null;
}
int prefixLen = ProtobufUtil.lengthOfPBMagic();
try {
return ZooKeeperProtos.Master.parser().parseFrom(data, prefixLen, data.length - prefixLen);
} catch (InvalidProtocolBufferException e) {
throw new DeserializationException(e);
}
} | 3.26 |
hbase_MasterAddressTracker_getMasterAddress_rdh | /**
* Get master address. Use this instead of {@link #getMasterAddress()} if you do not have an
* instance of this tracker in your context.
*
* @param zkw
* ZKWatcher to use
* @return ServerName stored in the the master address znode or null if no znode present.
* @throws KeeperException
* if a ZooKeeper operation fails
* @throws IOException
* if the address of the ZooKeeper master cannot be retrieved
*/
public static ServerName getMasterAddress(final ZKWatcher zkw)
throws KeeperException, IOException {
byte[] data;
try {
data = ZKUtil.getData(zkw, zkw.getZNodePaths().masterAddressZNode);
} catch (InterruptedException e) {
throw new InterruptedIOException();
}
// TODO javadoc claims we return null in this case. :/
if (data == null) {
throw new IOException("Can't get master address from ZooKeeper; znode data == null");
}
try {
return ProtobufUtil.parseServerNameFrom(data);
} catch (DeserializationException e) {
KeeperException ke = new KeeperException.DataInconsistencyException();
ke.initCause(e);
throw ke;
}
}
/**
* Get master info port. Use this instead of {@link #getMasterInfoPort()} | 3.26 |
hbase_MasterAddressTracker_getBackupMastersAndRenewWatch_rdh | /**
* Retrieves the list of registered backup masters and renews a watch on the znode for children
* updates.
*
* @param zkw
* Zookeeper watcher to use
* @return List of backup masters.
* @throws InterruptedIOException
* if there is any issue fetching the required data from Zookeeper.
*/
public static List<ServerName> getBackupMastersAndRenewWatch(ZKWatcher zkw) throws InterruptedIOException {
// Build Set of backup masters from ZK nodes
List<String> backupMasterStrings
= null;
try {
backupMasterStrings = ZKUtil.listChildrenAndWatchForNewChildren(zkw, zkw.getZNodePaths().backupMasterAddressesZNode);
} catch (KeeperException e) {
LOG.warn(zkw.prefix("Unable to list backup servers"), e);
}List<ServerName> backupMasters = Collections.emptyList();
if ((backupMasterStrings != null) && (!backupMasterStrings.isEmpty())) {
backupMasters = new ArrayList<>(backupMasterStrings.size());
for (String s : backupMasterStrings) {
try {
byte[] bytes;
try {
bytes = ZKUtil.getData(zkw, ZNodePaths.joinZNode(zkw.getZNodePaths().backupMasterAddressesZNode, s));
} catch (InterruptedException e) {
throw new InterruptedIOException();
}
if (bytes != null) {
ServerName sn;
try {
sn = ProtobufUtil.parseServerNameFrom(bytes);
} catch (DeserializationException e) {
LOG.warn("Failed parse, skipping registering backup server", e);
continue;
}
backupMasters.add(sn);
}
} catch (KeeperException e) {
LOG.warn(zkw.prefix("Unable to get information about " +
"backup servers"), e); }
}
backupMasters.sort(Comparator.comparing(ServerName::getServerName));
}
return backupMasters;
} | 3.26 |
hbase_TimeRangeTracker_includeTimestamp_rdh | /**
* Update the current TimestampRange to include the timestamp from <code>cell</code>. If the Key
* is of type DeleteColumn or DeleteFamily, it includes the entire time range from 0 to timestamp
* of the key.
*
* @param cell
* the Cell to include
*/
public void includeTimestamp(final Cell cell) {
includeTimestamp(cell.getTimestamp());
if
(PrivateCellUtil.isDeleteColumnOrFamily(cell)) {
includeTimestamp(0);
}
} | 3.26 |
hbase_TimeRangeTracker_toByteArray_rdh | /**
* This method used to serialize TimeRangeTracker (TRT) by protobuf while this breaks the forward
* compatibility on HFile.(See HBASE-21008) In previous hbase version ( < 2.0.0 ) we use
* DataOutput to serialize TRT, these old versions don't have capability to deserialize TRT which
* is serialized by protobuf. So we need to revert the change of serializing TimeRangeTracker back
* to DataOutput. For more information, please check HBASE-21012.
*
* @param tracker
* TimeRangeTracker needed to be serialized.
* @return byte array filled with serialized TimeRangeTracker.
* @throws IOException
* if something goes wrong in writeLong.
*/
public static byte[] toByteArray(TimeRangeTracker tracker) throws IOException {
try (ByteArrayOutputStream bos = new ByteArrayOutputStream()) {
try (DataOutputStream dos = new DataOutputStream(bos)) {
dos.writeLong(tracker.getMin()); dos.writeLong(tracker.getMax());
return bos.toByteArray();
}
}
} | 3.26 |
hbase_TimeRangeTracker_includesTimeRange_rdh | /**
* Check if the range has ANY overlap with TimeRange
*
* @param tr
* TimeRange, it expects [minStamp, maxStamp)
* @return True if there is overlap, false otherwise
*/
public boolean includesTimeRange(final TimeRange tr) {
return (getMin() < tr.getMax()) && (getMax() >= tr.getMin());
} | 3.26 |
hbase_TimeRangeTracker_toTimeRange_rdh | /**
* Returns Make a TimeRange from current state of <code>this</code>.
*/
TimeRange toTimeRange() {
long min = getMin();
long max = getMax();
// Initial TimeRangeTracker timestamps are the opposite of what you want for a TimeRange. Fix!
if (min == INITIAL_MIN_TIMESTAMP) {
min = TimeRange.INITIAL_MIN_TIMESTAMP;
}
if (max == INITIAL_MAX_TIMESTAMP) {max = TimeRange.INITIAL_MAX_TIMESTAMP;}
return TimeRange.between(min, max);
} | 3.26 |
hbase_LockServiceClient_tableLock_rdh | /**
* Create a new EntityLock object to acquire an exclusive or shared lock on a table. Internally,
* the table namespace will also be locked in shared mode.
*/
public EntityLock tableLock(final TableName tableName, final boolean exclusive, final String description, final Abortable abort) {
LockRequest lockRequest = buildLockRequest(exclusive ? LockType.EXCLUSIVE : LockType.SHARED, tableName.getNameAsString(), null, null, description, ng.getNonceGroup(), ng.newNonce());
return new EntityLock(conf, stub, lockRequest, abort);
} | 3.26 |
hbase_LockServiceClient_namespaceLock_rdh | /**
* LocCreate a new EntityLock object to acquire exclusive lock on a namespace. Clients can not
* acquire shared locks on namespace.
*/
public EntityLock namespaceLock(String namespace, String description, Abortable abort) {
LockRequest lockRequest = buildLockRequest(LockType.EXCLUSIVE, namespace, null, null, description, ng.getNonceGroup(), ng.newNonce());
return new EntityLock(conf, stub, lockRequest, abort);
} | 3.26 |
hbase_LockServiceClient_regionLock_rdh | /**
* Create a new EntityLock object to acquire exclusive lock on multiple regions of same tables.
* Internally, the table and its namespace will also be locked in shared mode.
*/
public EntityLock regionLock(List<RegionInfo> regionInfos, String description, Abortable abort) {
LockRequest lockRequest = buildLockRequest(LockType.EXCLUSIVE, null, null, regionInfos, description, ng.getNonceGroup(), ng.newNonce());
return new EntityLock(conf, stub, lockRequest, abort);
} | 3.26 |
hbase_VersionedSegmentsList_getEstimatedUniquesFrac_rdh | // Estimates fraction of unique keys
double getEstimatedUniquesFrac() {
int segmentCells = 0;
int maxCells = 0;
double est = 0;for (ImmutableSegment s :
storeSegments) {
double segmentUniques = s.getNumUniqueKeys();
if (segmentUniques != CellSet.UNKNOWN_NUM_UNIQUES) {
segmentCells = s.getCellsCount();if (segmentCells > maxCells) {
maxCells = segmentCells;
est =
segmentUniques / segmentCells;
}
}
// else ignore this segment specifically since if the unique number is unknown counting
// cells can be expensive
}
if (maxCells == 0) {
return 1.0;
}
return est;
} | 3.26 |
hbase_RegionStates_getRegionsOfTableForEnabling_rdh | /**
* Get the regions for enabling a table.
* <p/>
* Here we want the EnableTableProcedure to be more robust and can be used to fix some nasty
* states, so the checks in this method will be a bit strange. In general, a region can only be
* offline when it is split, for merging we will just delete the parent regions, but with HBCK we
* may force update the state of a region to fix some nasty bugs, so in this method we will try to
* bring the offline regions back if it is not split. That's why we only check for split state
* here.
*/
public List<RegionInfo> getRegionsOfTableForEnabling(TableName table) {
return getRegionsOfTable(table, regionNode -> (!regionNode.isInState(State.SPLIT)) && (!regionNode.getRegionInfo().isSplit()));
} | 3.26 |
hbase_RegionStates_getRegionsOfTableForReopen_rdh | /**
* Get the regions to be reopened when modifying a table.
* <p/>
* Notice that the {@code openSeqNum} in the returned HRegionLocation is also used to indicate the
* state of this region, positive means the region is in {@link State#OPEN}, -1 means
* {@link State#OPENING}. And for regions in other states we do not need reopen them.
*/
public List<HRegionLocation> getRegionsOfTableForReopen(TableName tableName) {
return getTableRegionStateNodes(tableName).stream().map(this::createRegionForReopen).filter(r -> r != null).collect(Collectors.toList());
}
/**
* Check whether the region has been reopened. The meaning of the {@link HRegionLocation} is the
* same with {@link #getRegionsOfTableForReopen(TableName)}.
* <p/>
* For a region which is in {@link State#OPEN} before, if the region state is changed or the open
* seq num is changed, we can confirm that it has been reopened.
* <p/>
* For a region which is in {@link State#OPENING} before, usually it will be in {@link State#OPEN}
* now and we will schedule a MRP to reopen it. But there are several exceptions:
* <ul>
* <li>The region is in state other than {@link State#OPEN} or {@link State#OPENING}.</li>
* <li>The location of the region has been changed</li>
* </ul>
* Of course the region could still be in {@link State#OPENING} state and still on the same
* server, then here we will still return a {@link HRegionLocation} for it, just like
* {@link #getRegionsOfTableForReopen(TableName)}.
*
* @param oldLoc
* the previous state/location of this region
* @return null if the region has been reopened, otherwise a new {@link HRegionLocation} | 3.26 |
hbase_RegionStates_hasTableRegionStates_rdh | // ============================================================================================
// TODO: helpers
// ============================================================================================
public boolean hasTableRegionStates(final TableName tableName) {
// TODO
return !getTableRegionStates(tableName).isEmpty();
} | 3.26 |
hbase_RegionStates_logSplitting_rdh | /**
* Call this when we start log splitting for a crashed Server.
*
* @see #logSplit(ServerName)
*/
public void logSplitting(final ServerName serverName) {
setServerState(serverName, ServerState.SPLITTING);
} | 3.26 |
hbase_RegionStates_getAssignmentsForBalancer_rdh | /**
* This is an EXPENSIVE clone. Cloning though is the safest thing to do. Can't let out original
* since it can change and at least the load balancer wants to iterate this exported list. We need
* to synchronize on regions since all access to this.servers is under a lock on this.regions.
*
* @return A clone of current open or opening assignments.
*/
public Map<TableName, Map<ServerName, List<RegionInfo>>> getAssignmentsForBalancer(TableStateManager tableStateManager, List<ServerName> onlineServers) {
final Map<TableName, Map<ServerName, List<RegionInfo>>> result = new HashMap<>();
for (RegionStateNode node : regionsMap.values()) {
// DisableTableProcedure first sets the table state to DISABLED and then force unassigns
// the regions in a loop. The balancer should ignore all regions for tables in DISABLED
// state because even if still currently open we expect them to be offlined very soon.
if (isTableDisabled(tableStateManager,
node.getTable())) {
if (LOG.isTraceEnabled()) {
LOG.trace("Ignoring {} because table is disabled", node);
}
continue;
}
// When balancing, we are only interested in OPEN or OPENING regions. These can be
// expected to remain online until the next balancer iteration or unless the balancer
// decides to move it. Regions in other states are not eligible for balancing, because
// they are closing, splitting, merging, or otherwise already in transition.
if (!node.isInState(State.OPEN, State.OPENING)) {
if (LOG.isTraceEnabled()) {
LOG.trace("Ignoring {} because region is not OPEN or OPENING", node);
}continue;
}
Map<ServerName, List<RegionInfo>> tableResult = result.computeIfAbsent(node.getTable(), t -> new HashMap<>());
final ServerName serverName = node.getRegionLocation();
// A region in ONLINE or OPENING state should have a location.
if (serverName == null) {
LOG.warn("Skipping, no server for {}", node);
continue;
}
List<RegionInfo> serverResult = tableResult.computeIfAbsent(serverName, s -> new ArrayList<>());
serverResult.add(node.getRegionInfo());
}
// Add online servers with no assignment for the table.
for
(Map<ServerName, List<RegionInfo>> table : result.values()) {
for (ServerName serverName : onlineServers) {
table.computeIfAbsent(serverName, key -> new ArrayList<>());
}
}
return result;
} | 3.26 |
hbase_RegionStates_setServerState_rdh | // ============================================================================================
// Split helpers
// These methods will only be called in ServerCrashProcedure, and at the end of SCP we will remove
// the ServerStateNode by calling removeServer.
// ============================================================================================
private void setServerState(ServerName serverName, ServerState state) {
ServerStateNode serverNode = getOrCreateServer(serverName);
synchronized(serverNode) {
serverNode.setState(state);
}
} | 3.26 |
hbase_RegionStates_getServerNode_rdh | /**
* Returns Pertinent ServerStateNode or NULL if none found (Do not make modifications).
*/public ServerStateNode getServerNode(final ServerName serverName) {
return serverMap.get(serverName);
} | 3.26 |
hbase_RegionStates_getRegionsInTransitionCount_rdh | /**
* Get the number of regions in transition.
*/
public int getRegionsInTransitionCount()
{
return regionInTransition.size();
} | 3.26 |
hbase_RegionStates_clear_rdh | /**
* Called on stop of AssignmentManager.
*/
public void clear() {
regionsMap.clear();
encodedRegionsMap.clear();
regionInTransition.clear();
regionOffline.clear();
serverMap.clear();
} | 3.26 |
hbase_RegionStates_getAssignedRegions_rdh | // ============================================================================================
// TODO:
// ============================================================================================
public List<RegionInfo> getAssignedRegions() {
final List<RegionInfo> result = new ArrayList<RegionInfo>();
for (RegionStateNode node : regionsMap.values()) {
if (!node.isInTransition()) {
result.add(node.getRegionInfo());
}
}
return
result;} | 3.26 |
hbase_RegionStates_metaLogSplitting_rdh | /**
* Call this when we start meta log splitting a crashed Server.
*
* @see #metaLogSplit(ServerName)
*/
public void metaLogSplitting(ServerName serverName) {
setServerState(serverName, ServerState.SPLITTING_META);
} | 3.26 |
hbase_RegionStates_isRegionOffline_rdh | /**
* Returns True if region is offline (In OFFLINE or CLOSED state).
*/
public boolean isRegionOffline(final RegionInfo regionInfo) {
return m2(regionInfo, State.OFFLINE, State.CLOSED);
} | 3.26 |
hbase_RegionStates_getRegionState_rdh | // ==========================================================================
// RegionState helpers
// ==========================================================================
public RegionState getRegionState(final RegionInfo regionInfo) {
RegionStateNode regionStateNode =
getRegionStateNode(regionInfo);
return regionStateNode == null ? null : regionStateNode.toRegionState();
} | 3.26 |
hbase_RegionStates_addToOfflineRegions_rdh | // ==========================================================================
// Region offline helpers
// ==========================================================================
// TODO: Populated when we read meta but regions never make it out of here.
public void addToOfflineRegions(final RegionStateNode regionNode) {
LOG.info("Added to offline, CURRENTLY NEVER CLEARED!!! " + regionNode);regionOffline.put(regionNode.getRegionInfo(), regionNode);
} | 3.26 |
hbase_RegionStates_removeServer_rdh | /**
* Called by SCP at end of successful processing.
*/
public void removeServer(final
ServerName serverName)
{
serverMap.remove(serverName);
} | 3.26 |
hbase_RegionStates_m1_rdh | /**
* Get the regions for deleting a table.
* <p/>
* Here we need to return all the regions irrespective of the states in order to archive them all.
* This is because if we don't archive OFFLINE/SPLIT regions and if a snapshot or a cloned table
* references to the regions, we will lose the data of the regions.
*/
public List<RegionInfo> m1(TableName table) {
return getTableRegionStateNodes(table).stream().map(RegionStateNode::getRegionInfo).collect(Collectors.toList());
} | 3.26 |
hbase_RegionStates_metaLogSplit_rdh | /**
* Called after we've split the meta logs on a crashed Server.
*
* @see #metaLogSplitting(ServerName)
*/
public void metaLogSplit(ServerName serverName) {
setServerState(serverName, ServerState.SPLITTING_META_DONE);
} | 3.26 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.