name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hbase_HBaseTestingUtility_createMockRegionServerService_rdh | /**
* Create a stubbed out RegionServerService, mainly for getting FS. This version is used by
* TestOpenRegionHandler
*/
public RegionServerServices createMockRegionServerService(ServerName name) throws IOException {
final MockRegionServerServices rss = new MockRegionServerServices(getZooKeeperWatcher(), name);
rss.setFileSystem(getTestFileSystem());
return rss;
} | 3.26 |
hbase_HBaseTestingUtility_m4_rdh | /**
* Load region with rows from 'aaa' to 'zzz'.
*
* @param r
* Region
* @param f
* Family
* @param flush
* flush the cache if true
* @return Count of rows loaded.
*/
public int m4(final HRegion r, final byte[] f, final
boolean flush) throws IOException {
byte[] k = new byte[3];
int rowCount = 0;
for (byte b1 = 'a'; b1 <= 'z'; b1++) {
for (byte b2 = 'a'; b2 <= 'z'; b2++) {
for (byte b3 = 'a'; b3 <= 'z'; b3++) {
k[0] = b1;
k[1] = b2;
k[2] = b3;
Put put = new Put(k);
put.setDurability(Durability.SKIP_WAL);
put.addColumn(f, null, k);
if (r.getWAL() == null) {
put.setDurability(Durability.SKIP_WAL);
}
int preRowCount = rowCount;int pause = 10;
int maxPause =
1000;
while (rowCount == preRowCount) {
try {
r.put(put);
rowCount++;
} catch (RegionTooBusyException e) {
pause = ((pause * 2)
>= maxPause) ? maxPause : pause * 2;
Threads.sleep(pause);
}
}
}
}
if (flush) {
r.flush(true);
}
}
return rowCount;
} | 3.26 |
hbase_MetricsMasterFileSystem_addSplit_rdh | /**
* Record a single instance of a split
*
* @param time
* time that the split took
* @param size
* length of original WALs that were split
*/
public synchronized void
addSplit(long time, long size) {
source.updateSplitTime(time);source.updateSplitSize(size);
} | 3.26 |
hbase_AnnotationReadingPriorityFunction_getPriority_rdh | /**
* Returns a 'priority' based on the request type.
* <p/>
* Currently the returned priority is used for queue selection.
* <p/>
* See the {@code SimpleRpcScheduler} as example. It maintains a queue per 'priority type':
* <ul>
* <li>HIGH_QOS (meta requests)</li>
* <li>REPLICATION_QOS (replication requests)</li>
* <li>NORMAL_QOS (user requests).</li>
* </ul>
*/
@Override
public int getPriority(RequestHeader header, Message param, User user) {
int priorityByAnnotation = getAnnotatedPriority(header);
if (priorityByAnnotation >= 0) {
return priorityByAnnotation;
}if (param == null) {return HConstants.NORMAL_QOS;
}
return getBasePriority(header, param);
} | 3.26 |
hbase_TableDescriptorChecker_sanityCheck_rdh | /**
* Checks whether the table conforms to some sane limits, and configured values (compression, etc)
* work. Throws an exception if something is wrong.
*/
public static void sanityCheck(final Configuration c, final TableDescriptor td) throws IOException {
CompoundConfiguration conf = new CompoundConfiguration().add(c).addBytesMap(td.getValues());
// Setting logs to warning instead of throwing exception if sanityChecks are disabled
boolean logWarn = !shouldSanityCheck(conf);
// check max file size
long maxFileSizeLowerLimit = (2 * 1024) * 1024L;// 2M is the default lower limit
// if not set MAX_FILESIZE in TableDescriptor, and not set HREGION_MAX_FILESIZE in
// hbase-site.xml, use maxFileSizeLowerLimit instead to skip this check
long v3 = (td.getValue(TableDescriptorBuilder.MAX_FILESIZE) == null) ? conf.getLong(HConstants.HREGION_MAX_FILESIZE, maxFileSizeLowerLimit) : Long.parseLong(td.getValue(TableDescriptorBuilder.MAX_FILESIZE));
if (v3 < conf.getLong("hbase.hregion.max.filesize.limit", maxFileSizeLowerLimit)) {
String message = ((("MAX_FILESIZE for table descriptor or " +
"\"hbase.hregion.max.filesize\" (") + v3) + ") is too small, which might cause over splitting into unmanageable ") + "number of regions.";
warnOrThrowExceptionForFailure(logWarn, message, null);
}
// check flush size
long flushSizeLowerLimit = 1024 * 1024L;// 1M is the default lower limit
// if not set MEMSTORE_FLUSHSIZE in TableDescriptor, and not set HREGION_MEMSTORE_FLUSH_SIZE in
// hbase-site.xml, use flushSizeLowerLimit instead to skip this check
long flushSize = (td.getValue(TableDescriptorBuilder.MEMSTORE_FLUSHSIZE) == null) ? conf.getLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, flushSizeLowerLimit) : Long.parseLong(td.getValue(TableDescriptorBuilder.MEMSTORE_FLUSHSIZE));
if (flushSize < conf.getLong("hbase.hregion.memstore.flush.size.limit", flushSizeLowerLimit)) {
String message = ((("MEMSTORE_FLUSHSIZE for table descriptor or " + "\"hbase.hregion.memstore.flush.size\" (") + flushSize) + ") is too small, which might cause") + " very frequent flushing.";
warnOrThrowExceptionForFailure(logWarn, message, null);
}
// check that coprocessors and other specified plugin classes can be loaded
checkClassLoading(conf, td);
if (conf.getBoolean(MASTER_CHECK_COMPRESSION, DEFAULT_MASTER_CHECK_COMPRESSION)) {
// check compression can be loaded
checkCompression(conf, td);
}
if (conf.getBoolean(MASTER_CHECK_ENCRYPTION, DEFAULT_MASTER_CHECK_ENCRYPTION)) {
// check encryption can be loaded
checkEncryption(conf, td);}
// Verify compaction policy
checkCompactionPolicy(conf, td);
// check that we have at least 1 CF
if (td.getColumnFamilyCount() == 0) {
String message = "Table should have at least one column family.";
warnOrThrowExceptionForFailure(logWarn, message, null);
}
// check that we have minimum 1 region replicas
int regionReplicas = td.getRegionReplication();
if (regionReplicas
< 1) {
String message = "Table region replication should be at least one.";
warnOrThrowExceptionForFailure(logWarn, message, null);
}
// Meta table shouldn't be set as read only, otherwise it will impact region assignments
if (td.isReadOnly() && TableName.isMetaTableName(td.getTableName())) {
warnOrThrowExceptionForFailure(false,
"Meta table can't be set as read only.", null);
}
// check replication scope
checkReplicationScope(conf, td);
// check bloom filter type
checkBloomFilterType(conf, td);
for (ColumnFamilyDescriptor hcd : td.getColumnFamilies()) {
if (hcd.getTimeToLive() <= 0) {
String message = ("TTL for column family " + hcd.getNameAsString()) + " must be positive.";
warnOrThrowExceptionForFailure(logWarn, message, null);
}
// check blockSize
if ((hcd.getBlocksize() < 1024) || (hcd.getBlocksize() > ((16
* 1024) * 1024))) {
String message = ("Block size for column family " + hcd.getNameAsString()) + " must be between 1K and 16MB.";
warnOrThrowExceptionForFailure(logWarn, message, null);
}
// check versions
if (hcd.getMinVersions() < 0) {
String message = ("Min versions for column family " + hcd.getNameAsString()) + " must be positive.";
warnOrThrowExceptionForFailure(logWarn, message, null);
}
// max versions already being checked
// HBASE-13776 Setting illegal versions for ColumnFamilyDescriptor
// does not throw IllegalArgumentException
// check minVersions <= maxVerions
if (hcd.getMinVersions() > hcd.getMaxVersions()) {
String message = ("Min versions for column family " + hcd.getNameAsString()) + " must be less than the Max versions.";
warnOrThrowExceptionForFailure(logWarn, message, null);
}
// check data replication factor, it can be 0(default value) when user has not explicitly
// set the value, in this case we use default replication factor set in the file system.
if (hcd.getDFSReplication() < 0) {
String message = ("HFile Replication for column family " +
hcd.getNameAsString()) + " must be greater than zero.";
warnOrThrowExceptionForFailure(logWarn, message, null);
}
// check in-memory compaction
try {
hcd.getInMemoryCompaction();
} catch (IllegalArgumentException e) {
warnOrThrowExceptionForFailure(logWarn, e.getMessage(), e);
}
}
} | 3.26 |
hbase_TableDescriptorChecker_warnOrThrowExceptionForFailure_rdh | // HBASE-13350 - Helper method to log warning on sanity check failures if checks disabled.
private static void warnOrThrowExceptionForFailure(boolean logWarn, String message, Exception cause) throws IOException {
if
(!logWarn) {
throw new DoNotRetryIOException(((message + " Set ") + TABLE_SANITY_CHECKS) + " to false at conf or table descriptor if you want to bypass sanity checks", cause);
}
LOG.warn(message);
} | 3.26 |
hbase_BucketCache_cacheBlock_rdh | /**
* Cache the block with the specified name and buffer.
*
* @param cacheKey
* block's cache key
* @param cachedItem
* block buffer
* @param inMemory
* if block is in-memory
*/
@Override
public void cacheBlock(BlockCacheKey cacheKey, Cacheable cachedItem, boolean inMemory, boolean waitWhenCache) {
cacheBlockWithWait(cacheKey, cachedItem, inMemory, waitWhenCache && (queueAdditionWaitTime > 0));
} | 3.26 |
hbase_BucketCache_bucketSizesAboveThresholdCount_rdh | /**
* Return the count of bucketSizeinfos still need free space
*/
private int bucketSizesAboveThresholdCount(float minFactor) {
BucketAllocator[] stats = bucketAllocator.getIndexStatistics();
int fullCount = 0;
for (int i = 0; i < stats.length; i++) {
long freeGoal
= ((long) (Math.floor(stats[i].totalCount() * (1 - minFactor))));
freeGoal = Math.max(freeGoal, 1);
if (stats[i].freeCount() < freeGoal) {
fullCount++;
}
}
return
fullCount;
} | 3.26 |
hbase_BucketCache_evictBlocksByHfileName_rdh | /**
* Evicts all blocks for a specific HFile.
* <p>
* This is used for evict-on-close to remove all blocks of a specific HFile.
*
* @return the number of blocks evicted
*/
@Override
public int evictBlocksByHfileName(String hfileName) {
removeFileFromPrefetch(hfileName);
Set<BlockCacheKey> keySet = blocksByHFile.subSet(new BlockCacheKey(hfileName, Long.MIN_VALUE), true, new BlockCacheKey(hfileName, Long.MAX_VALUE), true);
int numEvicted = 0;
for (BlockCacheKey key : keySet) {
if (evictBlock(key)) { ++numEvicted;
}
}
return numEvicted;
} | 3.26 |
hbase_BucketCache_disableWriter_rdh | // Used for test
void disableWriter() {
this.writerEnabled = false;
} | 3.26 |
hbase_BucketCache_retrieveFromFile_rdh | /**
*
* @see #persistToFile()
*/
private void retrieveFromFile(int[] bucketSizes) throws IOException {
LOG.info("Started retrieving bucket cache from file");
File persistenceFile = new File(persistencePath);
if (!persistenceFile.exists()) {
LOG.warn("Persistence file missing! " + "It's ok if it's first run after enabling persistent cache.");
bucketAllocator = new BucketAllocator(cacheCapacity, bucketSizes, backingMap, realCacheSize);
blockNumber.add(backingMap.size());
backingMapValidated.set(true);
return;
}
assert !cacheEnabled;
try (FileInputStream in = new FileInputStream(persistenceFile)) {
int pblen = ProtobufMagic.lengthOfPBMagic();
byte[] pbuf = new byte[pblen];
int read = in.read(pbuf);
if (read != pblen) {
throw new IOException(((((("Incorrect number of bytes read while checking for protobuf magic " + "number. Requested=") + pblen) + ", Received= ") + read) + ", File=") + persistencePath);
}if (!ProtobufMagic.isPBMagicPrefix(pbuf)) {
// In 3.0 we have enough flexibility to dump the old cache data.
// TODO: In 2.x line, this might need to be filled in to support reading the old format
throw new IOException("Persistence file does not start with protobuf magic number. " + persistencePath);
}
parsePB(BucketCacheProtos.BucketCacheEntry.parseDelimitedFrom(in));
bucketAllocator = new BucketAllocator(cacheCapacity, bucketSizes, backingMap, realCacheSize);
blockNumber.add(backingMap.size());
LOG.info("Bucket cache retrieved from file successfully");
}
} | 3.26 |
hbase_BucketCache_blockEvicted_rdh | /**
* This method is invoked after the bucketEntry is removed from {@link BucketCache#backingMap}
*/
void blockEvicted(BlockCacheKey cacheKey, BucketEntry bucketEntry, boolean decrementBlockNumber, boolean evictedByEvictionProcess) {
bucketEntry.markAsEvicted();
blocksByHFile.remove(cacheKey);
if (decrementBlockNumber) {
this.blockNumber.decrement();
}
if (evictedByEvictionProcess) {
cacheStats.evicted(bucketEntry.getCachedTime(), cacheKey.isPrimary());
}
if (ioEngine.isPersistent()) {removeFileFromPrefetch(cacheKey.getHfileName());
setCacheInconsistent(true);
}
} | 3.26 |
hbase_BucketCache_cacheBlockWithWait_rdh | /**
* Cache the block to ramCache
*
* @param cacheKey
* block's cache key
* @param cachedItem
* block buffer
* @param inMemory
* if block is in-memory
* @param wait
* if true, blocking wait when queue is full
*/
public void cacheBlockWithWait(BlockCacheKey cacheKey, Cacheable cachedItem, boolean inMemory, boolean wait) {if (cacheEnabled) {
if (backingMap.containsKey(cacheKey) || ramCache.containsKey(cacheKey)) {
if (shouldReplaceExistingCacheBlock(cacheKey, cachedItem))
{
BucketEntry bucketEntry = backingMap.get(cacheKey);
if ((bucketEntry != null) && bucketEntry.isRpcRef()) {
// avoid replace when there are RPC refs for the bucket entry in bucket cache
return;
}
cacheBlockWithWaitInternal(cacheKey, cachedItem, inMemory, wait);
}
} else {
cacheBlockWithWaitInternal(cacheKey, cachedItem, inMemory, wait);
}
}
} | 3.26 |
hbase_BucketCache_putIfAbsent_rdh | /**
* Return the previous associated value, or null if absent. It has the same meaning as
* {@link ConcurrentMap#putIfAbsent(Object, Object)}
*/
public RAMQueueEntry putIfAbsent(BlockCacheKey key, RAMQueueEntry entry) {
AtomicBoolean absent = new AtomicBoolean(false);
RAMQueueEntry re = delegate.computeIfAbsent(key, k -> {
// The RAMCache reference to this entry, so reference count should be increment.
entry.getData().retain();
absent.set(true);
return entry; });
return absent.get() ? null : re;
} | 3.26 |
hbase_BucketCache_createRecycler_rdh | /**
* <pre>
* Create the {@link Recycler} for {@link BucketEntry#refCnt},which would be used as
* {@link RefCnt#recycler} of {@link HFileBlock#buf} returned from {@link BucketCache#getBlock}.
* NOTE: for {@link BucketCache#getBlock},the {@link RefCnt#recycler} of {@link HFileBlock#buf}
* from {@link BucketCache#backingMap} and {@link BucketCache#ramCache} are different:
* 1.For {@link RefCnt#recycler} of {@link HFileBlock#buf} from {@link BucketCache#backingMap},
* it is the return value of current {@link BucketCache#createRecycler} method.
*
* 2.For {@link RefCnt#recycler} of {@link HFileBlock#buf} from {@link BucketCache#ramCache},
* it is {@link ByteBuffAllocator#putbackBuffer}.
* </pre>
*/
private Recycler createRecycler(final BucketEntry bucketEntry) {
return () -> {
freeBucketEntry(bucketEntry);
return;
};
} | 3.26 |
hbase_BucketCache_evictBlock_rdh | /**
* Try to evict the block from {@link BlockCache} by force. We'll call this in few cases:<br>
* 1. Close an HFile, and clear all cached blocks. <br>
* 2. Call {@link Admin#clearBlockCache(TableName)} to clear all blocks for a given table.<br>
* <p>
* Firstly, we'll try to remove the block from RAMCache,and then try to evict from backingMap.
* Here we evict the block from backingMap immediately, but only free the reference from bucket
* cache by calling {@link BucketEntry#markedAsEvicted}. If there're still some RPC referring this
* block, block can only be de-allocated when all of them release the block.
* <p>
* NOTICE: we need to grab the write offset lock firstly before releasing the reference from
* bucket cache. if we don't, we may read an {@link BucketEntry} with refCnt = 0 when
* {@link BucketCache#getBlock(BlockCacheKey, boolean, boolean, boolean)}, it's a memory leak.
*
* @param cacheKey
* Block to evict
* @return true to indicate whether we've evicted successfully or not.
*/
@Override
public boolean evictBlock(BlockCacheKey cacheKey) {
return doEvictBlock(cacheKey, null, false);
} | 3.26 |
hbase_BucketCache_getAllocationFailWarningMessage_rdh | /**
* Prepare and return a warning message for Bucket Allocator Exception
*
* @param fle
* The exception
* @param re
* The RAMQueueEntry for which the exception was thrown.
* @return A warning message created from the input RAMQueueEntry object.
*/
private static String getAllocationFailWarningMessage(final BucketAllocatorException fle, final RAMQueueEntry re) {
final StringBuilder sb = new StringBuilder();
sb.append("Most recent failed allocation after ");
sb.append(ALLOCATION_FAIL_LOG_TIME_PERIOD);
sb.append(" ms;");
if (re != null) {
if (re.getData() instanceof HFileBlock) {
final HFileContext fileContext = ((HFileBlock) (re.getData())).getHFileContext();
final String columnFamily = Bytes.toString(fileContext.getColumnFamily());
final String v60 = Bytes.toString(fileContext.getTableName());
if (v60 != null) {
sb.append(" Table: ");
sb.append(v60);
}
if (columnFamily != null) {
sb.append(" CF: ");
sb.append(columnFamily);
}
sb.append(" HFile: ");
if (fileContext.getHFileName() != null) {
sb.append(fileContext.getHFileName());
} else {
sb.append(re.getKey());
}
} else {
sb.append(" HFile: ");
sb.append(re.getKey());
}
}
sb.append(" Message: ");
sb.append(fle.getMessage());
return sb.toString();
} | 3.26 |
hbase_BucketCache_stopWriterThreads_rdh | /**
* Only used in test
*/
void stopWriterThreads() throws InterruptedException {
for (WriterThread v110 : writerThreads) {
v110.disableWriter();
v110.interrupt();
v110.join();
}
} | 3.26 |
hbase_BucketCache_freeSpace_rdh | /**
* Free the space if the used size reaches acceptableSize() or one size block couldn't be
* allocated. When freeing the space, we use the LRU algorithm and ensure there must be some
* blocks evicted
*
* @param why
* Why we are being called
*/
void freeSpace(final String why) {
// Ensure only one freeSpace progress at a time
if (!freeSpaceLock.tryLock()) {
return;
}
try {
freeInProgress = true; long bytesToFreeWithoutExtra = 0;
// Calculate free byte for each bucketSizeinfo
StringBuilder msgBuffer = (LOG.isDebugEnabled()) ? new StringBuilder() : null;
BucketAllocator[] stats = bucketAllocator.getIndexStatistics();
long[] bytesToFreeForBucket =
new long[stats.length];
for (int i = 0; i < stats.length; i++) {
bytesToFreeForBucket[i] = 0;
long freeGoal = ((long) (Math.floor(stats[i].totalCount() * (1 - minFactor))));
freeGoal = Math.max(freeGoal, 1);
if (stats[i].freeCount() < freeGoal) {
bytesToFreeForBucket[i] = stats[i].itemSize() * (freeGoal - stats[i].freeCount());
bytesToFreeWithoutExtra += bytesToFreeForBucket[i];
if (msgBuffer != null) {
msgBuffer.append(((("Free for bucketSize(" + stats[i].itemSize()) + ")=") + StringUtils.byteDesc(bytesToFreeForBucket[i])) + ", ");
}
}
}
if (msgBuffer != null) {
msgBuffer.append(("Free for total=" + StringUtils.byteDesc(bytesToFreeWithoutExtra)) + ", ");
}
if (bytesToFreeWithoutExtra <= 0) {
return;
}
long v38 = bucketAllocator.getUsedSize();
long totalSize = bucketAllocator.getTotalSize();
if (LOG.isDebugEnabled() && (msgBuffer != null)) {
LOG.debug((((((((("Free started because \"" + why) + "\"; ") + msgBuffer.toString())
+ " of current used=") + StringUtils.byteDesc(v38)) + ", actual cacheSize=") + StringUtils.byteDesc(realCacheSize.sum())) + ", total=") + StringUtils.byteDesc(totalSize));
}
long bytesToFreeWithExtra
= ((long) (Math.floor(bytesToFreeWithoutExtra * (1 + extraFreeFactor))));
// Instantiate priority buckets
BucketEntryGroup bucketSingle = new BucketEntryGroup(bytesToFreeWithExtra, blockSize, getPartitionSize(singleFactor));
BucketEntryGroup bucketMulti = new BucketEntryGroup(bytesToFreeWithExtra, blockSize, getPartitionSize(multiFactor));
BucketEntryGroup bucketMemory = new BucketEntryGroup(bytesToFreeWithExtra, blockSize, getPartitionSize(memoryFactor));
// Scan entire map putting bucket entry into appropriate bucket entry
// group
for (Map.Entry<BlockCacheKey, BucketEntry> bucketEntryWithKey : backingMap.entrySet()) {
switch (bucketEntryWithKey.getValue().getPriority())
{
case SINGLE :
{
bucketSingle.add(bucketEntryWithKey);
break;
}
case MULTI :
{
bucketMulti.add(bucketEntryWithKey);
break;
}
case MEMORY :
{
bucketMemory.add(bucketEntryWithKey);
break;
}
}
}
PriorityQueue<BucketEntryGroup> bucketQueue = new PriorityQueue<>(3, Comparator.comparingLong(BucketEntryGroup::overflow));
bucketQueue.add(bucketSingle);
bucketQueue.add(bucketMulti);
bucketQueue.add(bucketMemory);
int remainingBuckets = bucketQueue.size();
long bytesFreed = 0;
BucketEntryGroup bucketGroup; while ((bucketGroup = bucketQueue.poll()) != null) {
long
v49 = bucketGroup.overflow();
if (v49 > 0) {
long bucketBytesToFree = Math.min(v49, (bytesToFreeWithoutExtra - bytesFreed) / remainingBuckets);
bytesFreed +=
bucketGroup.free(bucketBytesToFree);
}
remainingBuckets--;
}
// Check and free if there are buckets that still need freeing of space
if (bucketSizesAboveThresholdCount(minFactor) > 0) {
bucketQueue.clear();
remainingBuckets = 3;
bucketQueue.add(bucketSingle);
bucketQueue.add(bucketMulti);
bucketQueue.add(bucketMemory);
while ((bucketGroup = bucketQueue.poll()) != null) {
long bucketBytesToFree = (bytesToFreeWithExtra - bytesFreed) / remainingBuckets;
bytesFreed += bucketGroup.free(bucketBytesToFree);
remainingBuckets--;
}
}
// Even after the above free we might still need freeing because of the
// De-fragmentation of the buckets (also called Slab Calcification problem), i.e
// there might be some buckets where the occupancy is very sparse and thus are not
// yielding the free for the other bucket sizes, the fix for this to evict some
// of the buckets, we do this by evicting the buckets that are least fulled
freeEntireBuckets(DEFAULT_FREE_ENTIRE_BLOCK_FACTOR * bucketSizesAboveThresholdCount(1.0F));
if (LOG.isDebugEnabled()) {
long single = bucketSingle.totalSize();
long multi = bucketMulti.totalSize();
long memory = bucketMemory.totalSize();
if (LOG.isDebugEnabled()) {
LOG.debug(((((((((((((("Bucket cache free space completed; " +
"freed=") + StringUtils.byteDesc(bytesFreed)) + ", ") + "total=") + StringUtils.byteDesc(totalSize)) + ", ") + "single=") + StringUtils.byteDesc(single)) + ", ") + "multi=") +
StringUtils.byteDesc(multi)) + ", ") + "memory=") + StringUtils.byteDesc(memory));
}}
} catch (Throwable t) {
LOG.warn("Failed freeing space", t);
} finally {
cacheStats.evict();
freeInProgress = false;
freeSpaceLock.unlock();
}
} | 3.26 |
hbase_BucketCache_evictBucketEntryIfNoRpcReferenced_rdh | /**
* Evict {@link BlockCacheKey} and its corresponding {@link BucketEntry} only if
* {@link BucketEntry#isRpcRef} is false. <br/>
* NOTE:When evict from {@link BucketCache#backingMap},only the matched {@link BlockCacheKey} and
* {@link BucketEntry} could be removed.
*
* @param blockCacheKey
* {@link BlockCacheKey} to evict.
* @param bucketEntry
* {@link BucketEntry} matched {@link BlockCacheKey} to evict.
* @return true to indicate whether we've evicted successfully or not.
*/
boolean evictBucketEntryIfNoRpcReferenced(BlockCacheKey blockCacheKey, BucketEntry bucketEntry) {
if (!bucketEntry.isRpcRef()) {
return doEvictBlock(blockCacheKey, bucketEntry, true);
}
return false;
} | 3.26 |
hbase_BucketCache_checkIOErrorIsTolerated_rdh | /**
* Check whether we tolerate IO error this time. If the duration of IOEngine throwing errors
* exceeds ioErrorsDurationTimeTolerated, we will disable the cache
*/
private void checkIOErrorIsTolerated() {long now = EnvironmentEdgeManager.currentTime();
// Do a single read to a local variable to avoid timing issue - HBASE-24454
long ioErrorStartTimeTmp = this.ioErrorStartTime;
if (ioErrorStartTimeTmp > 0) {
if (cacheEnabled && ((now - ioErrorStartTimeTmp) > this.ioErrorsTolerationDuration)) {
LOG.error(("IO errors duration time has exceeded " + ioErrorsTolerationDuration) + "ms, disabling cache, please check your IOEngine");
disableCache();
}
} else {
this.ioErrorStartTime = now;
}
} | 3.26 |
hbase_BucketCache_disableCache_rdh | /**
* Used to shut down the cache -or- turn it off in the case of something broken.
*/
private void disableCache() {
if (!cacheEnabled)
return;
LOG.info("Disabling cache");
cacheEnabled = false;
ioEngine.shutdown();
this.scheduleThreadPool.shutdown();
for (int i = 0; i < writerThreads.length; ++i)
writerThreads[i].interrupt();
this.ramCache.clear();
if ((!ioEngine.isPersistent()) || (persistencePath == null)) {
// If persistent ioengine and a path, we will serialize out the backingMap.
this.backingMap.clear();
this.fullyCachedFiles.clear();
this.regionCachedSizeMap.clear();
}
} | 3.26 |
hbase_BucketCache_m1_rdh | /**
* Get the buffer of the block with the specified key.
*
* @param key
* block's cache key
* @param caching
* true if the caller caches blocks on cache misses
* @param repeat
* Whether this is a repeat lookup for the same block
* @param updateCacheMetrics
* Whether we should update cache metrics or not
* @return buffer of specified cache key, or null if not in cache
*/
@Overridepublic Cacheable m1(BlockCacheKey key, boolean caching, boolean repeat, boolean updateCacheMetrics) {
if (!cacheEnabled) {
return null;
}
RAMQueueEntry re = ramCache.get(key);
if (re != null) {
if (updateCacheMetrics) {
cacheStats.hit(caching, key.isPrimary(), key.getBlockType());
}
re.access(accessCount.incrementAndGet());
return re.getData();
}
BucketEntry v14 = backingMap.get(key);
if (v14 != null) {
long start = System.nanoTime();
ReentrantReadWriteLock lock = offsetLock.getLock(v14.offset());
try {
lock.readLock().lock();
// We can not read here even if backingMap does contain the given key because its offset
// maybe changed. If we lock BlockCacheKey instead of offset, then we can only check
// existence here.
if (v14.equals(backingMap.get(key))) {
// Read the block from IOEngine based on the bucketEntry's offset and length, NOTICE: the
// block will use the refCnt of bucketEntry, which means if two HFileBlock mapping to
// the same BucketEntry, then all of the three will share the same refCnt.
Cacheable cachedBlock = ioEngine.read(v14);
if (ioEngine.usesSharedMemory()) {
// If IOEngine use shared memory, cachedBlock and BucketEntry will share the
// same RefCnt, do retain here, in order to count the number of RPC references
cachedBlock.retain();
}
// Update the cache statistics.
if (updateCacheMetrics) {
cacheStats.hit(caching, key.isPrimary(), key.getBlockType());
cacheStats.ioHit(System.nanoTime() - start);
}
v14.access(accessCount.incrementAndGet());
if (this.ioErrorStartTime > 0) {
ioErrorStartTime = -1;
}
return cachedBlock;
}
} catch (HBaseIOException hioex) {
// When using file io engine persistent cache,
// the cache map state might differ from the actual cache. If we reach this block,
// we should remove the cache key entry from the backing map
backingMap.remove(key);
fullyCachedFiles.remove(key.getHfileName());
LOG.debug("Failed to fetch block for cache key: {}.", key, hioex);
} catch (IOException ioex) {
LOG.error(("Failed reading block " + key) + " from bucket cache", ioex);
checkIOErrorIsTolerated();
} finally {
lock.readLock().unlock();
}
}
if ((!repeat) && updateCacheMetrics) {
cacheStats.miss(caching, key.isPrimary(), key.getBlockType());
}
return
null;
} | 3.26 |
hbase_BucketCache_deleteFileOnClose_rdh | /**
* Create an input stream that deletes the file after reading it. Use in try-with-resources to
* avoid this pattern where an exception thrown from a finally block may mask earlier exceptions:
*
* <pre>
* File f = ...
* try (FileInputStream fis = new FileInputStream(f)) {
* // use the input stream
* } finally {
* if (!f.delete()) throw new IOException("failed to delete");
* }
* </pre>
*
* @param file
* the file to read and delete
* @return a FileInputStream for the given file
* @throws IOException
* if there is a problem creating the stream
*/
private FileInputStream deleteFileOnClose(final File file) throws IOException {
return new FileInputStream(file) {
private File myFile;
private FileInputStream init(File file) {
myFile = file;
return this;
}
@Override
public void close() throws IOException {
// close() will be called during try-with-resources and it will be
// called by finalizer thread during GC. To avoid double-free resource,
// set myFile to null after the first call.
if (myFile == null) {
return;
}super.close();
if (!myFile.delete()) {
throw new IOException("Failed deleting persistence file " + myFile.getAbsolutePath());
}myFile = null;
}}.init(file);
} | 3.26 |
hbase_BucketCache_evictBlockIfNoRpcReferenced_rdh | /**
* NOTE: This method is only for test.
*/
public boolean evictBlockIfNoRpcReferenced(BlockCacheKey blockCacheKey) {
BucketEntry bucketEntry = backingMap.get(blockCacheKey);
if (bucketEntry == null) {
return false;}
return evictBucketEntryIfNoRpcReferenced(blockCacheKey, bucketEntry);
} | 3.26 |
hbase_BucketCache_getRAMQueueEntries_rdh | /**
* Blocks until elements available in {@code q} then tries to grab as many as possible before
* returning.
*
* @param receptacle
* Where to stash the elements taken from queue. We clear before we use it just
* in case.
* @param q
* The queue to take from.
* @return {@code receptacle} laden with elements taken from the queue or empty if none found.
*/
static List<RAMQueueEntry> getRAMQueueEntries(BlockingQueue<RAMQueueEntry> q, List<RAMQueueEntry> receptacle) throws InterruptedException {
// Clear sets all entries to null and sets size to 0. We retain allocations. Presume it
// ok even if list grew to accommodate thousands.
receptacle.clear();
receptacle.add(q.take());
q.drainTo(receptacle);
return receptacle;
} | 3.26 |
hbase_BucketCache_doEvictBlock_rdh | /**
* Evict the {@link BlockCacheKey} and {@link BucketEntry} from {@link BucketCache#backingMap} and
* {@link BucketCache#ramCache}. <br/>
* NOTE:When Evict from {@link BucketCache#backingMap},only the matched {@link BlockCacheKey} and
* {@link BucketEntry} could be removed.
*
* @param cacheKey
* {@link BlockCacheKey} to evict.
* @param bucketEntry
* {@link BucketEntry} matched {@link BlockCacheKey} to evict.
* @return true to indicate whether we've evicted successfully or not.
*/
private boolean doEvictBlock(BlockCacheKey cacheKey, BucketEntry bucketEntry, boolean evictedByEvictionProcess)
{
if (!cacheEnabled) {
return false;
}
boolean existedInRamCache = removeFromRamCache(cacheKey);
if (bucketEntry == null) {
bucketEntry = backingMap.get(cacheKey);
}
final BucketEntry bucketEntryToUse = bucketEntry;
if (bucketEntryToUse == null) {
if (existedInRamCache && evictedByEvictionProcess) {
cacheStats.evicted(0, cacheKey.isPrimary());
}
return existedInRamCache;
} else {
return bucketEntryToUse.withWriteLock(offsetLock, () -> {
if (backingMap.remove(cacheKey, bucketEntryToUse)) {
LOG.debug("removed key {} from back map in the evict process", cacheKey);
blockEvicted(cacheKey, bucketEntryToUse, !existedInRamCache, evictedByEvictionProcess);
return true;
}
return false;
});
}
} | 3.26 |
hbase_BucketCache_persistToFile_rdh | /**
*
* @see #retrieveFromFile(int[])
*/
@SuppressWarnings(value = "OBL_UNSATISFIED_OBLIGATION", justification = "false positive, try-with-resources ensures close is called.")
void persistToFile() throws IOException {LOG.debug("Thread {} started persisting bucket cache to file", Thread.currentThread().getName());
if (!isCachePersistent()) {throw new IOException("Attempt to persist non-persistent cache mappings!");
}
File tempPersistencePath = new File(persistencePath + EnvironmentEdgeManager.currentTime());
try (FileOutputStream fos = new FileOutputStream(tempPersistencePath, false)) {
fos.write(ProtobufMagic.PB_MAGIC);
BucketProtoUtils.toPB(this).writeDelimitedTo(fos);
} catch (IOException e) {
LOG.error("Failed to persist bucket cache to file", e);
throw e;
}
LOG.debug("Thread {} finished persisting bucket cache to file, renaming", Thread.currentThread().getName());
if (!tempPersistencePath.renameTo(new File(persistencePath))) {
LOG.warn("Failed to commit cache persistent file. We might lose cached blocks if " + "RS crashes/restarts before we successfully checkpoint again.");
}
} | 3.26 |
hbase_BucketCache_freeEntireBuckets_rdh | /**
* This method will find the buckets that are minimally occupied and are not reference counted and
* will free them completely without any constraint on the access times of the elements, and as a
* process will completely free at most the number of buckets passed, sometimes it might not due
* to changing refCounts
*
* @param completelyFreeBucketsNeeded
* number of buckets to free
*/
private void freeEntireBuckets(int
completelyFreeBucketsNeeded) {
if (completelyFreeBucketsNeeded != 0) {
// First we will build a set where the offsets are reference counted, usually
// this set is small around O(Handler Count) unless something else is wrong
Set<Integer> inUseBuckets = new HashSet<>();
backingMap.forEach((k, be) -> {
if (be.isRpcRef()) {
inUseBuckets.add(bucketAllocator.getBucketIndex(be.offset()));
}
});
Set<Integer> candidateBuckets = bucketAllocator.getLeastFilledBuckets(inUseBuckets, completelyFreeBucketsNeeded);
for (Map.Entry<BlockCacheKey, BucketEntry> entry : backingMap.entrySet()) {
if (candidateBuckets.contains(bucketAllocator.getBucketIndex(entry.getValue().offset()))) {
evictBucketEntryIfNoRpcReferenced(entry.getKey(), entry.getValue());
}
}
}
} | 3.26 |
hbase_BucketCache_doDrain_rdh | /**
* Flush the entries in ramCache to IOEngine and add bucket entry to backingMap. Process all that
* are passed in even if failure being sure to remove from ramCache else we'll never undo the
* references and we'll OOME.
*
* @param entries
* Presumes list passed in here will be processed by this invocation only. No
* interference expected.
*/
void doDrain(final List<RAMQueueEntry> entries, ByteBuffer metaBuff) throws InterruptedException {
if (entries.isEmpty()) {
return;
}
// This method is a little hard to follow. We run through the passed in entries and for each
// successful add, we add a non-null BucketEntry to the below bucketEntries. Later we must
// do cleanup making sure we've cleared ramCache of all entries regardless of whether we
// successfully added the item to the bucketcache; if we don't do the cleanup, we'll OOME by
// filling ramCache. We do the clean up by again running through the passed in entries
// doing extra work when we find a non-null bucketEntries corresponding entry.
final int size = entries.size();
BucketEntry[] bucketEntries = new BucketEntry[size];// Index updated inside loop if success or if we can't succeed. We retry if cache is full
// when we go to add an entry by going around the loop again without upping the index.
int index = 0;
while (cacheEnabled && (index < size)) {
RAMQueueEntry re = null;
try {
re
= entries.get(index);
if (re == null) {
LOG.warn("Couldn't get entry or changed on us; who else is messing with it?");
index++;
continue;
}
BlockCacheKey cacheKey = re.getKey();
if (ramCache.containsKey(cacheKey)) {
blocksByHFile.add(cacheKey); }// Reset the position for reuse.
// It should be guaranteed that the data in the metaBuff has been transferred to the
// ioEngine safely. Otherwise, this reuse is problematic. Fortunately, the data is already
// transferred with our current IOEngines. Should take care, when we have new kinds of
// IOEngine in the future.
metaBuff.clear();
BucketEntry bucketEntry = re.writeToCache(ioEngine, bucketAllocator, realCacheSize, this::createRecycler, metaBuff);
// Successfully added. Up index and add bucketEntry. Clear io exceptions.
bucketEntries[index] = bucketEntry;
if (ioErrorStartTime > 0) {
ioErrorStartTime = -1;
}
index++;
} catch (BucketAllocatorException fle) {
long currTs = EnvironmentEdgeManager.currentTime();
cacheStats.allocationFailed();// Record the warning.
if ((allocFailLogPrevTs == 0) || ((currTs - allocFailLogPrevTs) > ALLOCATION_FAIL_LOG_TIME_PERIOD)) {
LOG.warn(getAllocationFailWarningMessage(fle, re));
allocFailLogPrevTs
= currTs;
}
// Presume can't add. Too big? Move index on. Entry will be cleared from ramCache below.
bucketEntries[index] = null;
index++;
} catch (CacheFullException cfe) {
// Cache full when we tried to add. Try freeing space and then retrying (don't up index)
if (!freeInProgress) {
freeSpace("Full!");
} else {
Thread.sleep(50);
}
} catch (IOException ioex) {
// Hopefully transient. Retry. checkIOErrorIsTolerated disables cache if problem.
LOG.error("Failed writing to bucket cache", ioex);
checkIOErrorIsTolerated();
}
}
// Make sure data pages are written on media before we update maps.
try {
ioEngine.sync();
} catch (IOException ioex) {LOG.error("Failed syncing IO engine", ioex);
checkIOErrorIsTolerated();
// Since we failed sync, free the blocks in bucket allocator
for (int i = 0; i < entries.size(); ++i) {BucketEntry bucketEntry
= bucketEntries[i];
if (bucketEntry != null) {
bucketAllocator.freeBlock(bucketEntry.offset(), bucketEntry.getLength());
bucketEntries[i] = null;
}
}
}
// Now add to backingMap if successfully added to bucket cache. Remove from ramCache if
// success or error.
for (int i = 0; i < size; ++i) {
BlockCacheKey key = entries.get(i).getKey();
// Only add if non-null entry.
if (bucketEntries[i] != null) {
putIntoBackingMap(key, bucketEntries[i]);
if (ioEngine.isPersistent()) {
setCacheInconsistent(true);
}
}
// Always remove from ramCache even if we failed adding it to the block cache above.
boolean existed = ramCache.remove(key,
re -> {
if (re != null) {
heapSize.add((-1) * re.getData().heapSize());
}
});
if ((!existed) && (bucketEntries[i] != null)) {
// Block should have already been evicted. Remove it and free space.
final BucketEntry bucketEntry = bucketEntries[i];
bucketEntry.withWriteLock(offsetLock, () -> {
if (backingMap.remove(key, bucketEntry)) {
blockEvicted(key, bucketEntry, false, false);
}
return null;
});
}
}
long used = bucketAllocator.getUsedSize();
if (used > acceptableSize()) {
freeSpace((("Used=" + used) + " > acceptable=") + acceptableSize());
}
return;
} | 3.26 |
hbase_BucketCache_startWriterThreads_rdh | /**
* Called by the constructor to start the writer threads. Used by tests that need to override
* starting the threads.
*/
protected void startWriterThreads() {for (WriterThread thread : writerThreads) {
thread.start();
}
} | 3.26 |
hbase_BucketCache_finalize_rdh | /**
* Needed mostly for UTs that might run in the same VM and create different BucketCache instances
* on different UT methods.
*/
@Override
protected void finalize() {
if ((cachePersister != null) && (!cachePersister.isInterrupted())) {
cachePersister.interrupt();
}
} | 3.26 |
hbase_BucketCache_remove_rdh | /**
* Defined an {@link Consumer} here, because once the removed entry release its reference count,
* then it's ByteBuffers may be recycled and accessing it outside this method will be thrown an
* exception. the consumer will access entry to remove before release its reference count.
* Notice, don't change its reference count in the {@link Consumer}
*/
public boolean remove(BlockCacheKey key, Consumer<RAMQueueEntry> action) {
RAMQueueEntry previous = delegate.remove(key);
action.accept(previous);
if (previous != null) {
previous.getData().release();
}
return previous != null;
} | 3.26 |
hbase_BucketCache_freeBucketEntry_rdh | /**
* Free the {{@link BucketEntry} actually,which could only be invoked when the
* {@link BucketEntry#refCnt} becoming 0.
*/
void freeBucketEntry(BucketEntry bucketEntry) {
bucketAllocator.freeBlock(bucketEntry.offset(), bucketEntry.getLength());
realCacheSize.add((-1) * bucketEntry.getLength());
} | 3.26 |
hbase_BucketCache_getIOEngineFromName_rdh | /**
* Get the IOEngine from the IO engine name
*
* @return the IOEngine
*/
private IOEngine getIOEngineFromName(String ioEngineName, long capacity, String persistencePath) throws IOException {
if (ioEngineName.startsWith("file:") || ioEngineName.startsWith("files:")) {// In order to make the usage simple, we only need the prefix 'files:' in
// document whether one or multiple file(s), but also support 'file:' for
// the compatibility
String[] filePaths = ioEngineName.substring(ioEngineName.indexOf(":") + 1).split(FileIOEngine.FILE_DELIMITER);
return new FileIOEngine(capacity, persistencePath != null, filePaths);
} else if (ioEngineName.startsWith("offheap")) {
return new ByteBufferIOEngine(capacity);
} else if (ioEngineName.startsWith("mmap:")) {
return new ExclusiveMemoryMmapIOEngine(ioEngineName.substring(5), capacity);
} else if (ioEngineName.startsWith("pmem:")) {
// This mode of bucket cache creates an IOEngine over a file on the persistent memory
// device. Since the persistent memory device has its own address space the contents
// mapped to this address space does not get swapped out like in the case of mmapping
// on to DRAM. Hence the cells created out of the hfile blocks in the pmem bucket cache
// can be directly referred to without having to copy them onheap. Once the RPC is done,
// the blocks can be returned back as in case of ByteBufferIOEngine.
return new SharedMemoryMmapIOEngine(ioEngineName.substring(5), capacity);
} else {
throw new IllegalArgumentException("Don't understand io engine name for cache- prefix with file:, files:, mmap: or offheap");
}
} | 3.26 |
hbase_RpcClientFactory_createClient_rdh | /**
* Creates a new RpcClient by the class defined in the configuration or falls back to
* RpcClientImpl
*
* @param conf
* configuration
* @param clusterId
* the cluster id
* @param localAddr
* client socket bind address.
* @param metrics
* the connection metrics
* @return newly created RpcClient
*/
public static RpcClient createClient(Configuration conf, String clusterId, SocketAddress localAddr, MetricsConnection metrics, Map<String, byte[]> connectionAttributes) {
String rpcClientClass = getRpcClientClass(conf);
return ReflectionUtils.instantiateWithCustomCtor(rpcClientClass, new Class[]{ Configuration.class, String.class, SocketAddress.class, MetricsConnection.class, Map.class }, new Object[]{ conf, clusterId, localAddr, metrics, connectionAttributes });
} | 3.26 |
hbase_RegionPlacementMaintainer_transform_rdh | /**
* Copy a given matrix into a new matrix, transforming each row index and each column index
* according to the randomization scheme that was created at construction time.
*
* @param matrix
* the cost matrix to transform
* @return a new matrix with row and column indices transformed
*/ public float[][] transform(float[][] matrix) {
float[][] result = new float[rows][cols];
for (int i = 0;
i < rows; i++) {
for (int j = 0; j < cols; j++) {
result[rowTransform[i]][colTransform[j]] = matrix[i][j];
}
}
return result;
} | 3.26 |
hbase_RegionPlacementMaintainer_checkDifferencesWithOldPlan_rdh | /**
* Compares two plans and check whether the locality dropped or increased (prints the information
* as a string) also prints the baseline locality
*
* @param movesPerTable
* - how many primary regions will move per table
* @param regionLocalityMap
* - locality map from FS
* @param newPlan
* - new assignment plan
*/
public void checkDifferencesWithOldPlan(Map<TableName, Integer> movesPerTable, Map<String, Map<String,
Float>> regionLocalityMap, FavoredNodesPlan
newPlan) throws IOException {
// localities for primary, secondary and tertiary
SnapshotOfRegionAssignmentFromMeta snapshot = this.getRegionAssignmentSnapshot(); FavoredNodesPlan oldPlan = snapshot.getExistingAssignmentPlan();
Set<TableName> tables = snapshot.getTableSet();
Map<TableName, List<RegionInfo>> tableToRegionsMap = snapshot.getTableToRegionMap();
for (TableName table : tables) {
float[] deltaLocality = new float[3];
float[] locality = new float[3];
if ((!this.targetTableSet.isEmpty()) && (!this.targetTableSet.contains(table))) {
continue;
}
List<RegionInfo> regions = tableToRegionsMap.get(table);
System.out.println("==================================================");System.out.println("Assignment Plan Projection Report For Table: " + table);
System.out.println("\t Total regions: " + regions.size());
System.out.println(("\t" + movesPerTable.get(table)) + " primaries will move due to their primary has changed");
for (RegionInfo currentRegion : regions) {
Map<String, Float> regionLocality = regionLocalityMap.get(currentRegion.getEncodedName());
if (regionLocality == null)
{
continue;
}
List<ServerName> oldServers = oldPlan.getFavoredNodes(currentRegion);
List<ServerName> newServers = newPlan.getFavoredNodes(currentRegion);
if ((newServers !=
null) && (oldServers != null)) {
int i = 0; for (FavoredNodesPlan.Position p : FavoredNodesPlan.Position.values()) {
ServerName newServer = newServers.get(p.ordinal());
ServerName oldServer = oldServers.get(p.ordinal());
Float oldLocality = 0.0F;
if (oldServers != null) {
oldLocality = regionLocality.get(oldServer.getHostname());
if (oldLocality == null) {
oldLocality = 0.0F;
}locality[i] += oldLocality;
}
Float newLocality = regionLocality.get(newServer.getHostname());
if (newLocality == null) {
newLocality = 0.0F;
}
deltaLocality[i] += newLocality - oldLocality;
i++;
}
}
}
DecimalFormat v147 = new DecimalFormat("#.##");for (int v148 = 0; v148 <
deltaLocality.length; v148++) {
System.out.print("\t\t Baseline locality for ");
if (v148 == 0) {
System.out.print("primary ");
} else if
(v148 == 1) {
System.out.print("secondary ");
}
else if (v148 == 2) {
System.out.print("tertiary ");
}
System.out.println(v147.format((100 * locality[v148]) / regions.size()) + "%");
System.out.print("\t\t Locality will change with the new plan: ");
System.out.println(v147.format((100 * deltaLocality[v148]) / regions.size()) + "%");
}
System.out.println("\t Baseline dispersion");
printDispersionScores(table, snapshot, regions.size(),
null, true);
System.out.println("\t Projected dispersion");
printDispersionScores(table, snapshot, regions.size(), newPlan, true);
}
} | 3.26 |
hbase_RegionPlacementMaintainer_m0_rdh | /**
* Copy a given matrix into a new matrix, transforming each row index and each column index
* according to the inverse of the randomization scheme that was created at construction time.
*
* @param matrix
* the cost matrix to be inverted
* @return a new matrix with row and column indices inverted
*/
public float[][] m0(float[][] matrix) {
float[][] result = new float[rows][cols];
for (int i = 0; i < rows; i++) {
for (int j = 0; j < cols; j++) {result[rowInverse[i]][colInverse[j]] = matrix[i][j];
}
}
return result;
} | 3.26 |
hbase_RegionPlacementMaintainer_printAssignmentPlan_rdh | /**
* Print the assignment plan to the system output stream
*/
public static void printAssignmentPlan(FavoredNodesPlan plan) {
if (plan == null)
return;
LOG.info("========== Start to print the assignment plan ================");
// sort the map based on region info
Map<String, List<ServerName>> assignmentMap = new TreeMap<>(plan.getAssignmentMap());
for (Map.Entry<String, List<ServerName>> entry : assignmentMap.entrySet()) {
String serverList = FavoredNodeAssignmentHelper.getFavoredNodesAsString(entry.getValue());
String regionName = entry.getKey();
LOG.info("Region: " + regionName);
LOG.info("Its favored nodes: " + serverList);
}
LOG.info("========== Finish to print the assignment plan ================");} | 3.26 |
hbase_RegionPlacementMaintainer_m1_rdh | /**
* Update the assignment plan into hbase:meta
*
* @param plan
* the assignments plan to be updated into hbase:meta
* @throws IOException
* if cannot update assignment plan in hbase:meta
*/
public void m1(FavoredNodesPlan plan) throws IOException {
try {
LOG.info("Start to update the hbase:meta with the new assignment plan");
Map<String, List<ServerName>> assignmentMap = plan.getAssignmentMap();
Map<RegionInfo, List<ServerName>> planToUpdate = new HashMap<>(assignmentMap.size());
Map<String, RegionInfo> regionToRegionInfoMap = getRegionAssignmentSnapshot().getRegionNameToRegionInfoMap();
for (Map.Entry<String, List<ServerName>> entry : assignmentMap.entrySet()) {
planToUpdate.put(regionToRegionInfoMap.get(entry.getKey()), entry.getValue());
}
FavoredNodeAssignmentHelper.updateMetaWithFavoredNodesInfo(planToUpdate, conf);
LOG.info("Updated the hbase:meta with the new assignment plan");
} catch (Exception e) {
LOG.error(("Failed to update hbase:meta with the new assignment" + "plan because ") + e.getMessage());
}
} | 3.26 |
hbase_RegionPlacementMaintainer_verifyRegionPlacement_rdh | /**
* Verify the region placement is consistent with the assignment plan
*/
public List<AssignmentVerificationReport> verifyRegionPlacement(boolean isDetailMode) throws IOException {
System.out.println("Start to verify the region assignment and " + "generate the verification report");
// Get the region assignment snapshot
SnapshotOfRegionAssignmentFromMeta snapshot = this.getRegionAssignmentSnapshot();
// Get all the tables
Set<TableName> tables = snapshot.getTableSet();
// Get the region locality map
Map<String, Map<String, Float>>
regionLocalityMap = null;
if (this.enforceLocality == true) {regionLocalityMap = FSUtils.getRegionDegreeLocalityMappingFromFS(conf);
}
List<AssignmentVerificationReport> reports = new ArrayList<>();
// Iterate all the tables to fill up the verification report
for (TableName table : tables) {
if ((!this.targetTableSet.isEmpty()) && (!this.targetTableSet.contains(table))) {
continue;
}
AssignmentVerificationReport report = new AssignmentVerificationReport();
report.fillUp(table, snapshot, regionLocalityMap);
report.print(isDetailMode);
reports.add(report);
}
return reports;
} | 3.26 |
hbase_RegionPlacementMaintainer_getFavoredNodeList_rdh | /**
*
* @param favoredNodesStr
* The String of favored nodes
* @return the list of ServerName for the byte array of favored nodes.
*/
public static List<ServerName> getFavoredNodeList(String favoredNodesStr) {String[] v169 = StringUtils.split(favoredNodesStr, ",");
if (v169 ==
null)
return null;
List<ServerName> serverList = new ArrayList<>();
for
(String hostNameAndPort : v169) {
serverList.add(ServerName.valueOf(hostNameAndPort, ServerName.NON_STARTCODE));
}
return serverList;
} | 3.26 |
hbase_RegionPlacementMaintainer_invertIndices_rdh | /**
* Given an array where each element {@code indices[i]} represents the randomized column index
* corresponding to randomized row index {@code i}, create a new array with the corresponding
* inverted indices.
*
* @param indices
* an array of transformed indices to be inverted
* @return an array of inverted indices
*/
public int[] invertIndices(int[] indices) {
int[] result = new int[indices.length];
for (int i =
0; i < indices.length; i++) {
result[rowInverse[i]] = colInverse[indices[i]];
}
return result;
} | 3.26 |
hbase_RegionPlacementMaintainer_getRegionsMovement_rdh | /**
* Return how many regions will move per table since their primary RS will change
*
* @param newPlan
* - new AssignmentPlan
* @return how many primaries will move per table
*/
public Map<TableName, Integer> getRegionsMovement(FavoredNodesPlan newPlan) throws IOException {
Map<TableName, Integer> v116 = new HashMap<>();
SnapshotOfRegionAssignmentFromMeta snapshot = this.getRegionAssignmentSnapshot();
Map<TableName, List<RegionInfo>> tableToRegions = snapshot.getTableToRegionMap();
FavoredNodesPlan oldPlan = snapshot.getExistingAssignmentPlan();
Set<TableName> tables = snapshot.getTableSet();for (TableName table : tables) {
int movedPrimaries = 0;if ((!this.targetTableSet.isEmpty()) && (!this.targetTableSet.contains(table))) {
continue;
}
List<RegionInfo> regions = tableToRegions.get(table);
for (RegionInfo v124 : regions) {
List<ServerName> oldServers = oldPlan.getFavoredNodes(v124);
List<ServerName> newServers = newPlan.getFavoredNodes(v124);
if ((oldServers != null) && (newServers != null)) {
ServerName oldPrimary = oldServers.get(0);ServerName newPrimary = newServers.get(0);
if (oldPrimary.compareTo(newPrimary) != 0) {
movedPrimaries++;}
}
}
v116.put(table, movedPrimaries);
}
return v116;
} | 3.26 |
hbase_RegionPlacementMaintainer_getRegionAssignmentSnapshot_rdh | /**
* Returns the new RegionAssignmentSnapshot
*/
public SnapshotOfRegionAssignmentFromMeta getRegionAssignmentSnapshot() throws IOException {
SnapshotOfRegionAssignmentFromMeta currentAssignmentShapshot = new SnapshotOfRegionAssignmentFromMeta(ConnectionFactory.createConnection(conf));
currentAssignmentShapshot.initialize();
return currentAssignmentShapshot;
} | 3.26 |
hbase_RegionPlacementMaintainer_genAssignmentPlan_rdh | /**
* Generate the assignment plan for the existing table
*
* @param munkresForSecondaryAndTertiary
* if set on true the assignment plan for the tertiary and
* secondary will be generated with Munkres algorithm,
* otherwise will be generated using
* placeSecondaryAndTertiaryRS
*/
private void genAssignmentPlan(TableName tableName, SnapshotOfRegionAssignmentFromMeta assignmentSnapshot, Map<String, Map<String, Float>> regionLocalityMap, FavoredNodesPlan plan, boolean munkresForSecondaryAndTertiary) throws IOException {
// Get the all the regions for the current table
List<RegionInfo> v8 = assignmentSnapshot.getTableToRegionMap().get(tableName);
int numRegions = v8.size();
// Get the current assignment map
Map<RegionInfo, ServerName> currentAssignmentMap = assignmentSnapshot.getRegionToRegionServerMap();
// Get the all the region servers
List<ServerName> servers = new ArrayList<>();
servers.addAll(FutureUtils.get(getConnection().getAdmin().getRegionServers()));
LOG.info(((((("Start to generate assignment plan for " +
numRegions) + " regions from table ") + tableName) + " with ") + servers.size()) + " region servers");
int slotsPerServer = ((int) (Math.ceil(((float) (numRegions)) / servers.size())));
int regionSlots = slotsPerServer * servers.size();
// Compute the primary, secondary and tertiary costs for each region/server
// pair. These costs are based only on node locality and rack locality, and
// will be modified later.
float[][] primaryCost = new float[numRegions][regionSlots];
float[][] secondaryCost = new float[numRegions][regionSlots];
float[][] v16 = new
float[numRegions][regionSlots];
if (this.enforceLocality && (regionLocalityMap != null)) {
// Transform the locality mapping into a 2D array, assuming that any
// unspecified locality value is 0.
float[][] localityPerServer = new float[numRegions][regionSlots];
for (int i = 0; i < numRegions; i++) {
Map<String, Float> serverLocalityMap = regionLocalityMap.get(v8.get(i).getEncodedName());
if (serverLocalityMap == null) {
continue;
}
for (int j = 0; j <
servers.size(); j++) {
String serverName = servers.get(j).getHostname();
if (serverName == null) {
continue;
}
Float locality = serverLocalityMap.get(serverName);
if (locality == null) {
continue;
}
for (int v23 = 0; v23 < slotsPerServer; v23++) {
// If we can't find the locality of a region to a server, which occurs
// because locality is only reported for servers which have some
// blocks of a region local, then the locality for that pair is 0.
localityPerServer[i][(j * slotsPerServer) + v23] = locality.floatValue();
}
}
}
// Compute the total rack locality for each region in each rack. The total
// rack locality is the sum of the localities of a region on all servers in
// a rack.
Map<String, Map<RegionInfo, Float>> rackRegionLocality = new HashMap<>();
for (int i = 0; i < numRegions; i++) {RegionInfo region = v8.get(i);
for (int j = 0; j < regionSlots; j += slotsPerServer) {
String rack = rackManager.getRack(servers.get(j / slotsPerServer));
Map<RegionInfo, Float> rackLocality =
rackRegionLocality.get(rack);
if (rackLocality == null) {
rackLocality = new HashMap<>();
rackRegionLocality.put(rack, rackLocality);
}
Float localityObj = rackLocality.get(region);
float locality = (localityObj == null) ? 0 : localityObj.floatValue();
locality += localityPerServer[i][j];
rackLocality.put(region, locality);
}
}
for (int i = 0; i < numRegions; i++) {
for (int
j = 0; j < regionSlots; j++) {
String rack = rackManager.getRack(servers.get(j / slotsPerServer));
Float totalRackLocalityObj =
rackRegionLocality.get(rack).get(v8.get(i));
float totalRackLocality = (totalRackLocalityObj == null) ? 0 : totalRackLocalityObj.floatValue();
// Primary cost aims to favor servers with high node locality and low
// rack locality, so that secondaries and tertiaries can be chosen for
// nodes with high rack locality. This might give primaries with
// slightly less locality at first compared to a cost which only
// considers the node locality, but should be better in the long run.
primaryCost[i][j] = 1 - ((2 * localityPerServer[i][j]) - totalRackLocality);
// Secondary cost aims to favor servers with high node locality and high
// rack locality since the tertiary will be chosen from the same rack as
// the secondary. This could be negative, but that is okay.
secondaryCost[i][j] = 2 - (localityPerServer[i][j] + totalRackLocality);
// Tertiary cost is only concerned with the node locality. It will later
// be restricted to only hosts on the same rack as the secondary.
v16[i][j] = 1 - localityPerServer[i][j];
}
}
}
if (this.enforceMinAssignmentMove && (currentAssignmentMap != null)) {
// We want to minimize the number of regions which move as the result of a
// new assignment. Therefore, slightly penalize any placement which is for
// a host that is not currently serving the region.
for (int i = 0; i < numRegions; i++) {
for (int j = 0; j < servers.size(); j++) {
ServerName currentAddress = currentAssignmentMap.get(v8.get(i));
if ((currentAddress != null) && (!currentAddress.equals(servers.get(j)))) {
for (int k = 0; k < slotsPerServer; k++) {
primaryCost[i][(j * slotsPerServer) + k] += NOT_CURRENT_HOST_PENALTY;
}
}
}
}
}
// Artificially increase cost of last slot of each server to evenly
// distribute the slop, otherwise there will be a few servers with too few
// regions and many servers with the max number of regions.
for (int i = 0; i < numRegions; i++) {
for (int j = 0; j < regionSlots; j += slotsPerServer) {
primaryCost[i][j] += LAST_SLOT_COST_PENALTY;
secondaryCost[i][j] += LAST_SLOT_COST_PENALTY;
v16[i][j] += LAST_SLOT_COST_PENALTY;
} }
RandomizedMatrix v43 = new RandomizedMatrix(numRegions, regionSlots);
primaryCost = v43.transform(primaryCost);
int[] primaryAssignment = new MunkresAssignment(primaryCost).solve();
primaryAssignment = v43.invertIndices(primaryAssignment);
// Modify the secondary and tertiary costs for each region/server pair to
// prevent a region from being assigned to the same rack for both primary
// and either one of secondary or tertiary.
for (int i = 0; i < numRegions; i++) {
int slot = primaryAssignment[i];
String rack = rackManager.getRack(servers.get(slot / slotsPerServer));
for (int
k = 0; k < servers.size(); k++) {
if (!rackManager.getRack(servers.get(k)).equals(rack)) {
continue;
}
if (k == (slot / slotsPerServer)) {
// Same node, do not place secondary or tertiary here ever.
for (int m = 0; m < slotsPerServer; m++) {
secondaryCost[i][(k * slotsPerServer) + m] = MAX_COST;
v16[i][(k * slotsPerServer) + m] = MAX_COST;
}
} else {
// Same rack, do not place secondary or tertiary here if possible.
for (int m = 0; m < slotsPerServer; m++) {
secondaryCost[i][(k * slotsPerServer) + m] = AVOID_COST;
v16[i][(k * slotsPerServer) + m] = AVOID_COST;}
}
}
}
if (munkresForSecondaryAndTertiary) {
v43 = new RandomizedMatrix(numRegions, regionSlots);
secondaryCost = v43.transform(secondaryCost);
int[] secondaryAssignment = new MunkresAssignment(secondaryCost).solve();
secondaryAssignment = v43.invertIndices(secondaryAssignment);
// Modify the tertiary costs for each region/server pair to ensure that a
// region is assigned to a tertiary server on the same rack as its secondary
// server, but not the same server in that rack.
for (int i = 0; i < numRegions; i++) {
int slot = secondaryAssignment[i];
String rack = rackManager.getRack(servers.get(slot / slotsPerServer));
for (int k = 0; k < servers.size(); k++) {
if (k == (slot / slotsPerServer)) {
// Same node, do not place tertiary here ever.
for (int m = 0; m < slotsPerServer; m++) {
v16[i][(k * slotsPerServer) + m] = MAX_COST;
}
} else {
if
(rackManager.getRack(servers.get(k)).equals(rack)) {
continue;
}
// Different rack, do not place tertiary here if possible.
for (int m = 0; m < slotsPerServer; m++) {
v16[i][(k * slotsPerServer) + m] = AVOID_COST;
}
}
}
}
v43 = new RandomizedMatrix(numRegions, regionSlots);
v16 = v43.transform(v16);
int[] tertiaryAssignment = new MunkresAssignment(v16).solve();
tertiaryAssignment = v43.invertIndices(tertiaryAssignment);
for (int i = 0; i < numRegions; i++) {
List<ServerName> v60 = new ArrayList<>(FavoredNodeAssignmentHelper.FAVORED_NODES_NUM);
ServerName s = servers.get(primaryAssignment[i] / slotsPerServer);
v60.add(ServerName.valueOf(s.getHostname(), s.getPort(), ServerName.NON_STARTCODE));
s =
servers.get(secondaryAssignment[i] / slotsPerServer);
v60.add(ServerName.valueOf(s.getHostname(), s.getPort(), ServerName.NON_STARTCODE));
s = servers.get(tertiaryAssignment[i] / slotsPerServer);
v60.add(ServerName.valueOf(s.getHostname(), s.getPort(), ServerName.NON_STARTCODE));
// Update the assignment plan
plan.updateFavoredNodesMap(v8.get(i), v60);
}
LOG.info(((((("Generated the assignment plan for " + numRegions)
+ " regions from table ") + tableName) + " with ") + servers.size()) + " region servers");
LOG.info("Assignment plan for secondary and tertiary generated " + "using MunkresAssignment");
} else {
Map<RegionInfo, ServerName> primaryRSMap = new HashMap<>();
for (int i = 0; i < numRegions; i++) {
primaryRSMap.put(v8.get(i), servers.get(primaryAssignment[i] / slotsPerServer));
}
FavoredNodeAssignmentHelper favoredNodeHelper = new FavoredNodeAssignmentHelper(servers, conf);
favoredNodeHelper.initialize();
Map<RegionInfo, ServerName[]> secondaryAndTertiaryMap = favoredNodeHelper.placeSecondaryAndTertiaryWithRestrictions(primaryRSMap);
for (int i = 0; i < numRegions; i++) {
List<ServerName> favoredServers = new ArrayList<>(FavoredNodeAssignmentHelper.FAVORED_NODES_NUM);
RegionInfo currentRegion = v8.get(i);
ServerName s = primaryRSMap.get(currentRegion);
favoredServers.add(ServerName.valueOf(s.getHostname(), s.getPort(), ServerName.NON_STARTCODE));
ServerName[] secondaryAndTertiary = secondaryAndTertiaryMap.get(currentRegion);
s = secondaryAndTertiary[0];
favoredServers.add(ServerName.valueOf(s.getHostname(), s.getPort(), ServerName.NON_STARTCODE));
s = secondaryAndTertiary[1];
favoredServers.add(ServerName.valueOf(s.getHostname(), s.getPort(), ServerName.NON_STARTCODE));
// Update the assignment plan
plan.updateFavoredNodesMap(v8.get(i), favoredServers);
}
LOG.info(((((("Generated the assignment plan for " + numRegions) + " regions from table ") + tableName) + " with ") + servers.size()) + " region servers");
LOG.info("Assignment plan for secondary and tertiary generated " + "using placeSecondaryAndTertiaryWithRestrictions method");
}
} | 3.26 |
hbase_CompactionConfiguration_getThrottlePoint_rdh | /**
* Returns ThrottlePoint used for classifying small and large compactions
*/
public long getThrottlePoint() {
return throttlePoint;
} | 3.26 |
hbase_CompactionConfiguration_getMinCompactSize_rdh | /**
* Returns lower bound below which compaction is selected without ratio test
*/
public long getMinCompactSize() {
return minCompactSize;
} | 3.26 |
hbase_CompactionConfiguration_getMaxFilesToCompact_rdh | /**
* Returns upper bound on number of files to be included in minor compactions
*/
public int getMaxFilesToCompact() {
return maxFilesToCompact;
} | 3.26 |
hbase_CompactionConfiguration_getMaxCompactSize_rdh | /**
* Returns upper bound on file size to be included in minor compactions
*/
public long getMaxCompactSize() {
return maxCompactSize;
} | 3.26 |
hbase_CompactionConfiguration_getCompactionRatio_rdh | /**
* Returns Ratio used for compaction
*/
public double getCompactionRatio() {
return compactionRatio;
} | 3.26 |
hbase_CompactionConfiguration_setMinFilesToCompact_rdh | /**
* Set lower bound on number of files to be included in minor compactions
*
* @param threshold
* value to set to
*/
public void setMinFilesToCompact(int threshold) {
minFilesToCompact = threshold;
} | 3.26 |
hbase_CompactionConfiguration_getCompactionRatioOffPeak_rdh | /**
* Returns Off peak Ratio used for compaction
*/
public double getCompactionRatioOffPeak() {
return offPeakCompactionRatio;
} | 3.26 |
hbase_CompactionConfiguration_getMinFilesToCompact_rdh | /**
* Returns lower bound on number of files to be included in minor compactions
*/
public int getMinFilesToCompact() {
return minFilesToCompact;
} | 3.26 |
hbase_CellComparatorImpl_getCellComparator_rdh | /**
* Utility method that makes a guess at comparator to use based off passed tableName. Use in
* extreme when no comparator specified.
*
* @return CellComparator to use going off the {@code tableName} passed.
*/
public static CellComparator getCellComparator(byte[] tableName) {
// FYI, TableName.toBytes does not create an array; just returns existing array pointer.
return Bytes.equals(tableName, TableName.META_TABLE_NAME.toBytes()) ? MetaCellComparator.META_COMPARATOR : CellComparatorImpl.COMPARATOR;
} | 3.26 |
hbase_CellComparatorImpl_m0_rdh | /**
* Compares the family and qualifier part of the cell
*
* @return 0 if both cells are equal, 1 if left cell is bigger than right, -1 otherwise
*/
public final int m0(final Cell left, final Cell right) {
int diff = compareFamilies(left, right);if (diff !=
0) {
return diff;
}
return compareQualifiers(left, right);
} | 3.26 |
hbase_CellComparatorImpl_compareRows_rdh | /**
* Compares the rows of the left and right cell. For the hbase:meta case this method is overridden
* such that it can handle hbase:meta cells. The caller should ensure using the appropriate
* comparator for hbase:meta.
*
* @return 0 if both cells are equal, 1 if left cell is bigger than right, -1 otherwise
*/
@Override
public int compareRows(final Cell left, final Cell right) {
return compareRows(left, left.getRowLength(), right, right.getRowLength());
} | 3.26 |
hbase_CellComparatorImpl_compareFamilies_rdh | /**
* This method will be overridden when we compare cells inner store to bypass family comparing.
*/
protected int compareFamilies(KeyValue left, int leftFamilyPosition, int leftFamilyLength, ByteBufferKeyValue right, int rightFamilyPosition, int rightFamilyLength) {
return
ByteBufferUtils.compareTo(left.getFamilyArray(), leftFamilyPosition, leftFamilyLength, right.getFamilyByteBuffer(), rightFamilyPosition, rightFamilyLength);
} | 3.26 |
hbase_TableHFileArchiveTracker_clearTables_rdh | /**
* Remove the currently archived tables.
* <p>
* Does some intelligent checking to make sure we don't prematurely create an archive tracker.
*/
private void clearTables() {
getMonitor().clearArchive();
} | 3.26 |
hbase_TableHFileArchiveTracker_keepHFiles_rdh | /**
* Determine if the given table should or should not allow its hfiles to be deleted
*
* @param tableName
* name of the table to check
* @return <tt>true</tt> if its store files should be retained, <tt>false</tt> otherwise
*/
public boolean keepHFiles(String tableName) {
return getMonitor().shouldArchiveTable(tableName);
} | 3.26 |
hbase_TableHFileArchiveTracker_safeStopTrackingTable_rdh | /**
* Stop tracking a table. Ensures that the table doesn't exist, but if it does, it attempts to add
* the table back via {@link #addAndReWatchTable(String)} - its a 'safe' removal.
*
* @param tableZnode
* full zookeeper path to the table to be added
* @throws KeeperException
* if an unexpected zk exception occurs
*/
private void safeStopTrackingTable(String tableZnode) throws KeeperException {
getMonitor().removeTable(ZKUtil.getNodeName(tableZnode));
// if the table exists, then add and rewatch it
if (ZKUtil.checkExists(watcher, tableZnode) >= 0) {
addAndReWatchTable(tableZnode);
}
} | 3.26 |
hbase_TableHFileArchiveTracker_getMonitor_rdh | /**
* Returns the tracker for which tables should be archived.
*/
public final HFileArchiveTableMonitor getMonitor() {
return this.monitor;
} | 3.26 |
hbase_TableHFileArchiveTracker_start_rdh | /**
* Start monitoring for archive updates
*
* @throws KeeperException
* on failure to find/create nodes
*/
public void start() throws
KeeperException {
// if archiving is enabled, then read in the list of tables to archive
LOG.debug("Starting hfile archive tracker...");
this.checkEnabledAndUpdate();
LOG.debug("Finished starting hfile archive tracker!");
} | 3.26 |
hbase_TableHFileArchiveTracker_addAndReWatchTable_rdh | /**
* Add this table to the tracker and then read a watch on that node.
* <p>
* Handles situation where table is deleted in the time between the update and resetting the watch
* by deleting the table via {@link #safeStopTrackingTable(String)}
*
* @param tableZnode
* full zookeeper path to the table to be added
* @throws KeeperException
* if an unexpected zk exception occurs
*/
private void addAndReWatchTable(String tableZnode) throws KeeperException {
getMonitor().addTable(ZKUtil.getNodeName(tableZnode));// re-add a watch to the table created
// and check to make sure it wasn't deleted
if (!ZKUtil.watchAndCheckExists(watcher, tableZnode)) {
safeStopTrackingTable(tableZnode);
}
} | 3.26 |
hbase_TableHFileArchiveTracker_updateWatchedTables_rdh | /**
* Read the list of children under the archive znode as table names and then sets those tables to
* the list of tables that we should archive
*
* @throws KeeperException
* if there is an unexpected zk exception
*/
private void updateWatchedTables() throws KeeperException {
// get the children and watch for new children
LOG.debug("Updating watches on tables to archive.");
// get the children and add watches for each of the children
List<String> tables = ZKUtil.listChildrenAndWatchThem(watcher, archiveHFileZNode);
LOG.debug("Starting archive for tables:" + tables);
// if archiving is still enabled
if ((tables != null) && (tables.size() > 0)) {
getMonitor().setArchiveTables(tables);
} else {
LOG.debug("No tables to archive.");
// only if we currently have a tracker, then clear the archive
clearTables();
}
} | 3.26 |
hbase_TableHFileArchiveTracker_stop_rdh | /**
* Stop this tracker and the passed zookeeper
*/
public void stop() {
if (this.stopped) {
return;
}
this.stopped = true;
this.watcher.close();
} | 3.26 |
hbase_Queue_add_rdh | // ======================================================================
// Functions to handle procedure queue
// ======================================================================
public void add(Procedure<?> proc,
boolean addToFront) {
if (addToFront) {
runnables.addFirst(proc);
} else {
runnables.addLast(proc);
}
} | 3.26 |
hbase_Queue_compareKey_rdh | // ======================================================================
// Generic Helpers
// ======================================================================
public int compareKey(TKey cmpKey)
{
return key.compareTo(cmpKey);
} | 3.26 |
hbase_Increment_toString_rdh | /**
*/
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("row=");
sb.append(Bytes.toStringBinary(this.row));
if (this.familyMap.isEmpty()) {
sb.append(", no columns set to be incremented");
return sb.toString();
}
sb.append(", families=");
boolean moreThanOne = false;
for (Map.Entry<byte[], List<Cell>> entry : this.familyMap.entrySet()) {
if (moreThanOne) {sb.append("), ");
} else {
moreThanOne = true;
sb.append("{");
}
sb.append("(family=");
sb.append(Bytes.toString(entry.getKey()));
sb.append(", columns=");
if (entry.getValue() == null) {
sb.append("NONE");
} else {
sb.append("{");
boolean moreThanOneB = false;
for (Cell cell : entry.getValue()) {
if (moreThanOneB) {
sb.append(", ");
} else {
moreThanOneB = true;
}
sb.append((CellUtil.getCellKeyAsString(cell) + "+=") + Bytes.toLong(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()));
}
sb.append("}");
}
}
sb.append("}");
return sb.toString();
} | 3.26 |
hbase_Increment_setReturnResults_rdh | /**
*
* @param returnResults
* True (default) if the increment operation should return the results. A
* client that is not interested in the result can save network bandwidth
* setting this to false.
*/
@Override
public Increment setReturnResults(boolean returnResults) {
super.setReturnResults(returnResults);
return this;
} | 3.26 |
hbase_Increment_isReturnResults_rdh | /**
* Returns current setting for returnResults
*/
// This method makes public the superclasses's protected method.
@Override
public boolean isReturnResults() {
return super.isReturnResults();
} | 3.26 |
hbase_Increment_getTimeRange_rdh | /**
* Gets the TimeRange used for this increment.
*/
public TimeRange getTimeRange() {
return this.tr;
} | 3.26 |
hbase_Increment_add_rdh | /**
* Add the specified KeyValue to this operation.
*
* @param cell
* individual Cell
* @throws java.io.IOException
* e
*/
@Override
public Increment add(Cell cell) throws IOException { super.add(cell);
return this;
} | 3.26 |
hbase_Increment_getFamilyMapOfLongs_rdh | /**
* Before 0.95, when you called Increment#getFamilyMap(), you got back a map of families to a list
* of Longs. Now, {@link #getFamilyCellMap()} returns families by list of Cells. This method has
* been added so you can have the old behavior.
*
* @return Map of families to a Map of qualifiers and their Long increments.
* @since 0.95.0
*/
public Map<byte[], NavigableMap<byte[], Long>> getFamilyMapOfLongs() { NavigableMap<byte[], List<Cell>> map = super.getFamilyCellMap();
Map<byte[], NavigableMap<byte[], Long>> results = new TreeMap<>(Bytes.BYTES_COMPARATOR);
for
(Map.Entry<byte[], List<Cell>> entry : map.entrySet()) {
NavigableMap<byte[], Long> longs = new TreeMap<>(Bytes.BYTES_COMPARATOR);
for (Cell cell :
entry.getValue()) {
longs.put(CellUtil.cloneQualifier(cell), Bytes.toLong(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()));
}
results.put(entry.getKey(), longs);
}
return results;
} | 3.26 |
hbase_Increment_hasFamilies_rdh | /**
* Method for checking if any families have been inserted into this Increment
*
* @return true if familyMap is non empty false otherwise
*/
public boolean hasFamilies() {
return !this.familyMap.isEmpty();
} | 3.26 |
hbase_Increment_setTimeRange_rdh | /**
* Sets the TimeRange to be used on the Get for this increment.
* <p>
* This is useful for when you have counters that only last for specific periods of time (ie.
* counters that are partitioned by time). By setting the range of valid times for this increment,
* you can potentially gain some performance with a more optimal Get operation. Be careful adding
* the time range to this class as you will update the old cell if the time range doesn't include
* the latest cells.
* <p>
* This range is used as [minStamp, maxStamp).
*
* @param minStamp
* minimum timestamp value, inclusive
* @param maxStamp
* maximum timestamp value, exclusive
* @throws IOException
* if invalid time range
*/
public Increment setTimeRange(long minStamp, long maxStamp) throws IOException {
tr = TimeRange.between(minStamp, maxStamp);
return this;
} | 3.26 |
hbase_Increment_numFamilies_rdh | /**
* Method for retrieving the number of families to increment from
*
* @return number of families
*/
@Override
public int numFamilies() {
return this.familyMap.size();
} | 3.26 |
hbase_AsyncAggregationClient_sumByRegion_rdh | // the map key is the startRow of the region
private static <R, S, P extends Message, Q extends Message, T extends Message> CompletableFuture<NavigableMap<byte[], S>> sumByRegion(AsyncTable<?> table, ColumnInterpreter<R, S, P, Q, T> ci, Scan scan) {
CompletableFuture<NavigableMap<byte[], S>> future = new CompletableFuture<NavigableMap<byte[], S>>();
AggregateRequest req;
try {
req
= validateArgAndGetPB(scan, ci, false);
} catch (IOException e) {
future.completeExceptionally(e);
return future;
}
int firstPartIndex = scan.getFamilyMap().get(scan.getFamilies()[0]).size() - 1;
AbstractAggregationCallback<NavigableMap<byte[], S>> callback = new
AbstractAggregationCallback<NavigableMap<byte[], S>>(future) {
private final NavigableMap<byte[], S> map = new TreeMap<>(Bytes.BYTES_COMPARATOR);
@Override
protected void aggregate(RegionInfo region, AggregateResponse resp) throws IOException {
if (resp.getFirstPartCount() > 0) {
map.put(region.getStartKey(), getPromotedValueFromProto(ci, resp, firstPartIndex));
}
}
@Override
protected NavigableMap<byte[], S> getFinalResult() {
return map;
}
};
table.<AggregateService, AggregateResponse>coprocessorService(AggregateService::newStub, (stub, controller, rpcCallback)
-> stub.getMedian(controller, req, rpcCallback), callback).fromRow(nullToEmpty(scan.getStartRow()), scan.includeStartRow()).toRow(nullToEmpty(scan.getStopRow()), scan.includeStopRow()).execute();
return future;} | 3.26 |
hbase_InputStreamBlockDistribution_isStreamUnsupported_rdh | /**
* For tests only, returns whether the passed stream is supported
*/
@RestrictedApi(explanation = "Should only be called in tests", link = "", allowedOnPath = ".*/src/test/.*")
boolean isStreamUnsupported() {
return streamUnsupported;
} | 3.26 |
hbase_InputStreamBlockDistribution_isEnabled_rdh | /**
* True if we should derive StoreFile HDFSBlockDistribution from the underlying input stream
*/
public static boolean isEnabled(Configuration conf) {
return conf.getBoolean(HBASE_LOCALITY_INPUTSTREAM_DERIVE_ENABLED, DEFAULT_HBASE_LOCALITY_INPUTSTREAM_DERIVE_ENABLED);
} | 3.26 |
hbase_InputStreamBlockDistribution_getHDFSBlockDistribution_rdh | /**
* Get the HDFSBlocksDistribution derived from the StoreFile input stream, re-computing if cache
* is expired.
*/
public synchronized HDFSBlocksDistribution getHDFSBlockDistribution() {
if ((EnvironmentEdgeManager.currentTime() - lastCachedAt) > cachePeriodMs) {
try {
LOG.debug("Refreshing HDFSBlockDistribution for {}", fileInfo);
computeBlockDistribution();
} catch (IOException e) {
LOG.warn("Failed to recompute block distribution for {}. Falling back on cached value.", fileInfo, e);
}
}
return hdfsBlocksDistribution;
} | 3.26 |
hbase_InputStreamBlockDistribution_setLastCachedAt_rdh | /**
* For tests only, sets lastCachedAt so we can force a refresh
*/
@RestrictedApi(explanation = "Should only be called in tests", link = "", allowedOnPath = ".*/src/test/.*")
synchronized void setLastCachedAt(long timestamp) {
lastCachedAt = timestamp;
} | 3.26 |
hbase_InputStreamBlockDistribution_getCachePeriodMs_rdh | /**
* For tests only, returns the configured cache period
*/
@RestrictedApi(explanation = "Should only be called in tests", link = "", allowedOnPath = ".*/src/test/.*")
long getCachePeriodMs()
{
return cachePeriodMs;
} | 3.26 |
hbase_TablePermission_implies_rdh | /**
* Check if given action can performs on given table:family.
*
* @param table
* table name
* @param family
* family name
* @param action
* one of [Read, Write, Create, Exec, Admin]
* @return true if can, false otherwise
*/
public boolean implies(TableName table, byte[] family, Action action) {
if (failCheckTable(table)) {
return false;
}
if (failCheckFamily(family)) {
return false;
}
return implies(action);} | 3.26 |
hbase_TablePermission_tableFieldsEqual_rdh | /**
* Check if fields of table in table permission equals.
*
* @param tp
* to be checked table permission
* @return true if equals, false otherwise
*/
public boolean tableFieldsEqual(TablePermission tp) {
if (tp == null) {
return false;
} boolean tEq = ((table == null) && (tp.table == null)) ||
((table != null) && table.equals(tp.table));
boolean v1 = ((family == null) &&
(tp.family == null)) || Bytes.equals(family, tp.family);
boolean qEq = ((qualifier == null) && (tp.qualifier == null)) || Bytes.equals(qualifier, tp.qualifier);
return (tEq && v1) && qEq;
} | 3.26 |
hbase_LruCachedBlockQueue_heapSize_rdh | /**
* Total size of all elements in this queue.
*
* @return size of all elements currently in queue, in bytes
*/
@Override
public long heapSize() {
return heapSize;} | 3.26 |
hbase_LruCachedBlockQueue_poll_rdh | /**
* Returns The next element in this queue, or {@code null} if the queue is empty.
*/
public LruCachedBlock poll() {
return queue.poll();
} | 3.26 |
hbase_LruCachedBlockQueue_add_rdh | /**
* Attempt to add the specified cached block to this queue.
* <p>
* If the queue is smaller than the max size, or if the specified element is ordered before the
* smallest element in the queue, the element will be added to the queue. Otherwise, there is no
* side effect of this call.
*
* @param cb
* block to try to add to the queue
*/
@SuppressWarnings(value = "NP_NULL_ON_SOME_PATH_FROM_RETURN_VALUE", justification = "head can not be null as heapSize is greater than maxSize," + " which means we have something in the queue")
public void add(LruCachedBlock cb) {
if (heapSize < maxSize) {
queue.add(cb);
heapSize += cb.heapSize();
} else {
LruCachedBlock head = queue.peek();
if (cb.compareTo(head) > 0) {
heapSize += cb.heapSize();
heapSize
-= head.heapSize();
if (heapSize > maxSize) {
queue.poll();
} else {
heapSize += head.heapSize();
}
queue.add(cb);
}
}
} | 3.26 |
hbase_LruCachedBlockQueue_pollLast_rdh | /**
* Returns The last element in this queue, or {@code null} if the queue is empty.
*/
public LruCachedBlock pollLast() {
return queue.pollLast();
} | 3.26 |
hbase_MetricsSource_setAgeOfLastShippedOpByTable_rdh | /**
* Set the age of the last edit that was shipped group by table
*
* @param timestamp
* write time of the edit
* @param tableName
* String as group and tableName
*/
public void setAgeOfLastShippedOpByTable(long timestamp, String tableName) {
long age = EnvironmentEdgeManager.currentTime() - timestamp;
this.getSingleSourceSourceByTable().computeIfAbsent(tableName, t -> CompatibilitySingletonFactory.getInstance(MetricsReplicationSourceFactory.class).getTableSource(t)).setLastShippedAge(age);
} | 3.26 |
hbase_MetricsSource_setWALReaderEditsBufferUsage_rdh | /**
* Sets the amount of memory in bytes used in this RegionServer by edits pending replication.
*/
public void setWALReaderEditsBufferUsage(long
usageInBytes) {
globalSourceSource.setWALReaderEditsBufferBytes(usageInBytes);
} | 3.26 |
hbase_MetricsSource_incrementFailedBatches_rdh | /**
* Convenience method to update metrics when batch of operations has failed.
*/
public void incrementFailedBatches() {
singleSourceSource.incrFailedBatches();
globalSourceSource.incrFailedBatches();
} | 3.26 |
hbase_MetricsSource_shipBatch_rdh | /**
* Convience method to apply changes to metrics do to shipping a batch of logs.
*
* @param batchSize
* the size of the batch that was shipped to sinks.
* @param hfiles
* total number of hfiles shipped to sinks.
*/
public void shipBatch(long batchSize, int sizeInBytes, long hfiles) {
shipBatch(batchSize, sizeInBytes);
singleSourceSource.incrHFilesShipped(hfiles);
globalSourceSource.incrHFilesShipped(hfiles);
} | 3.26 |
hbase_MetricsSource_m1_rdh | /* Sets the age of oldest log file just for source. */
public void m1(long age) {
singleSourceSource.setOldestWalAge(age);
} | 3.26 |
hbase_MetricsSource_incrSizeOfLogQueue_rdh | /**
* Increment size of the log queue.
*/
public void incrSizeOfLogQueue() {
singleSourceSource.incrSizeOfLogQueue(1);
globalSourceSource.incrSizeOfLogQueue(1);
} | 3.26 |
hbase_MetricsSource_refreshAgeOfLastShippedOp_rdh | /**
* Convenience method to use the last given timestamp to refresh the age of the last edit. Used
* when replication fails and need to keep that metric accurate.
*
* @param walGroupId
* id of the group to update
*/
public void refreshAgeOfLastShippedOp(String walGroupId) {
Long lastTimestamp = this.lastShippedTimeStamps.get(walGroupId);
if (lastTimestamp == null) {
this.lastShippedTimeStamps.put(walGroupId, 0L);
lastTimestamp = 0L;
}
if (lastTimestamp > 0) {
setAgeOfLastShippedOp(lastTimestamp, walGroupId);
}
} | 3.26 |
hbase_MetricsSource_getWALReaderEditsBufferUsage_rdh | /**
* Returns the amount of memory in bytes used in this RegionServer by edits pending replication.
*/
public long getWALReaderEditsBufferUsage()
{
return globalSourceSource.getWALReaderEditsBufferBytes();
} | 3.26 |
hbase_MetricsSource_getAgeOfLastShippedOp_rdh | /**
* Get AgeOfLastShippedOp
*/
public Long getAgeOfLastShippedOp() {
return singleSourceSource.getLastShippedAge();
} | 3.26 |
hbase_MetricsSource_getUncleanlyClosedWALs_rdh | /**
* Get the value of uncleanlyClosedWAL counter
*/
public long getUncleanlyClosedWALs() {
return singleSourceSource.getUncleanlyClosedWALs();
} | 3.26 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.