name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hbase_MobFileCache_closeFile_rdh | /**
* Closes a mob file.
*
* @param file
* The mob file that needs to be closed.
*/
public void closeFile(MobFile file) {
IdLock.Entry v14 = null;
try {
if (!isCacheEnabled) {
file.close();
} else {
v14 = keyLock.getLockEntry(hashFileName(file.getFileName()));
file.close();
}
} catch (IOException e) {
LOG.error("MobFileCache, Exception happen during close " + file.getFileName(), e);
} finally {
if
(v14 != null) {
keyLock.releaseLockEntry(v14);
}
}
} | 3.26 |
hbase_MobFileCache_openFile_rdh | /**
* Opens a mob file.
*
* @param fs
* The current file system.
* @param path
* The file path.
* @param cacheConf
* The current MobCacheConfig
* @return A opened mob file.
*/
public MobFile openFile(FileSystem fs, Path path, CacheConfig cacheConf) throws IOException {
if (!isCacheEnabled) {
MobFile mobFile = MobFile.create(fs, path, conf, cacheConf);mobFile.open();
return mobFile;
} else {
String fileName = path.getName();
CachedMobFile cached = map.get(fileName);
IdLock.Entry lockEntry = keyLock.getLockEntry(hashFileName(fileName));
try {
if (cached == null) {
cached = map.get(fileName);
if (cached ==
null) {if (map.size() > mobFileMaxCacheSize) {
evict();
}
cached = CachedMobFile.create(fs, path, conf, cacheConf);
cached.open();
map.put(fileName, cached);
miss.increment();
}
}
cached.open();
cached.access(count.incrementAndGet());
} finally {
keyLock.releaseLockEntry(lockEntry);
}
return cached;
}
} | 3.26 |
hbase_MobFileCache_evictFile_rdh | /**
* Evicts the cached file by the name.
*
* @param fileName
* The name of a cached file.
*/
public void evictFile(String fileName) {
if (isCacheEnabled) {
IdLock.Entry lockEntry = null;
try {
// obtains the lock to close the cached file.
lockEntry = keyLock.getLockEntry(hashFileName(fileName));
CachedMobFile evictedFile = map.remove(fileName);
if (evictedFile != null) {
evictedFile.close();
evictedFileCount.increment();
}
} catch (IOException e) {
LOG.error("Failed to evict the file " + fileName, e);} finally {
if (lockEntry != null) {
keyLock.releaseLockEntry(lockEntry);
}
}
}
} | 3.26 |
hbase_MetricsTableRequests_updatePutBatch_rdh | /**
* Update the batch Put time histogram
*
* @param t
* time it took
*/
public void updatePutBatch(long t) {
if (isEnableTableLatenciesMetrics()) {
putBatchTimeHistogram.update(t);
}
} | 3.26 |
hbase_MetricsTableRequests_updateTableReadQueryMeter_rdh | /**
* Update table read QPS
*/
public void updateTableReadQueryMeter() {
if (isEnabTableQueryMeterMetrics()) {
readMeter.mark();}
} | 3.26 |
hbase_MetricsTableRequests_updateCheckAndDelete_rdh | /**
* Update the CheckAndDelete time histogram.
*
* @param time
* time it took
*/
public void updateCheckAndDelete(long time) {
if (isEnableTableLatenciesMetrics()) {
checkAndDeleteTimeHistogram.update(time);
}
} | 3.26 |
hbase_MetricsTableRequests_updateAppend_rdh | /**
* Update the Append time histogram.
*
* @param time
* time it took
* @param blockBytesScanned
* size of block bytes scanned to retrieve the response
*/
public void updateAppend(long time, long blockBytesScanned) {if (isEnableTableLatenciesMetrics()) {
appendTimeHistogram.update(time);
if (blockBytesScanned > 0) {
blockBytesScannedCount.increment(blockBytesScanned);
appendBlockBytesScanned.update(blockBytesScanned);
}
}
} | 3.26 |
hbase_MetricsTableRequests_updateGet_rdh | /**
* Update the Get time histogram .
*
* @param time
* time it took
* @param blockBytesScanned
* size of block bytes scanned to retrieve the response
*/
public void updateGet(long time, long blockBytesScanned) {
if (isEnableTableLatenciesMetrics()) {
getTimeHistogram.update(time);
if (blockBytesScanned > 0) {
blockBytesScannedCount.increment(blockBytesScanned);
getBlockBytesScanned.update(blockBytesScanned);
}
}
} | 3.26 |
hbase_MetricsTableRequests_updateIncrement_rdh | /**
* Update the Increment time histogram.
*
* @param time
* time it took
* @param blockBytesScanned
* size of block bytes scanned to retrieve the response
*/
public void updateIncrement(long time, long blockBytesScanned) {
if (isEnableTableLatenciesMetrics()) {
incrementTimeHistogram.update(time);
if (blockBytesScanned > 0) {
blockBytesScannedCount.increment(blockBytesScanned);
incrementBlockBytesScanned.update(blockBytesScanned);
}
}
} | 3.26 |
hbase_MetricsTableRequests_m1_rdh | /**
* Update the CheckAndPut time histogram.
*
* @param time
* time it took
*/
public void m1(long time) {
if (isEnableTableLatenciesMetrics()) {
checkAndPutTimeHistogram.update(time);
}
} | 3.26 |
hbase_MetricsTableRequests_updateCheckAndMutate_rdh | /**
* Update the CheckAndMutate time histogram.
*
* @param time
* time it took
*/
public void updateCheckAndMutate(long time, long blockBytesScanned) {if (isEnableTableLatenciesMetrics()) {
checkAndMutateTimeHistogram.update(time);
if (blockBytesScanned > 0) {
blockBytesScannedCount.increment(blockBytesScanned);checkAndMutateBlockBytesScanned.update(blockBytesScanned);
}
}
} | 3.26 |
hbase_MetricsTableRequests_getMetricRegistryInfo_rdh | // Visible for testing
public MetricRegistryInfo getMetricRegistryInfo() {
return registryInfo;
} | 3.26 |
hbase_MetricsTableRequests_updateTableWriteQueryMeter_rdh | /**
* Update table write QPS
*/
public void updateTableWriteQueryMeter() {
if (isEnabTableQueryMeterMetrics()) {
writeMeter.mark();
}
} | 3.26 |
hbase_MetricsTableRequests_updateScan_rdh | /**
* Update the scan metrics.
*
* @param time
* response time of scan
* @param responseCellSize
* size of the scan resposne
* @param blockBytesScanned
* size of block bytes scanned to retrieve the response
*/
public void updateScan(long time, long responseCellSize, long blockBytesScanned) {
if (isEnableTableLatenciesMetrics()) {
scanTimeHistogram.update(time);
f1.update(responseCellSize);
if (blockBytesScanned > 0) {
blockBytesScannedCount.increment(blockBytesScanned);
scanBlockBytesScanned.update(blockBytesScanned);
}
}
} | 3.26 |
hbase_MetricsTableRequests_updateDeleteBatch_rdh | /**
* Update the batch Delete time histogram
*
* @param t
* time it took
*/
public void updateDeleteBatch(long t) {
if (isEnableTableLatenciesMetrics()) {
deleteBatchTimeHistogram.update(t);
}
} | 3.26 |
hbase_MetricsTableRequests_updatePut_rdh | /**
* Update the Put time histogram
*
* @param t
* time it took
*/
public void updatePut(long t) {
if (isEnableTableLatenciesMetrics()) {
putTimeHistogram.update(t);
}
} | 3.26 |
hbase_MetricsTableRequests_m2_rdh | /**
* Update table read QPS
*
* @param count
* Number of occurrences to record
*/
public void m2(long count) {
if (isEnabTableQueryMeterMetrics()) {
readMeter.mark(count);}
} | 3.26 |
hbase_NoLimitScannerContext_m0_rdh | /**
* Returns the static, immutable instance of {@link NoLimitScannerContext} to be used whenever
* limits should not be enforced
*/
@SuppressWarnings(value = "MS_EXPOSE_REP", justification = "singleton pattern")
public static final ScannerContext m0() {
return NO_LIMIT;
} | 3.26 |
hbase_ProcedureStoreBase_setRunning_rdh | /**
* Change the state to 'isRunning', returns true if the store state was changed, false if the
* store was already in that state.
*
* @param isRunning
* the state to set.
* @return true if the store state was changed, otherwise false.
*/
protected boolean setRunning(boolean isRunning) {
return running.getAndSet(isRunning) != isRunning;
} | 3.26 |
hbase_MemStoreLABImpl_incScannerCount_rdh | /**
* Called when opening a scanner on the data of this MemStoreLAB
*/
@Override
public void incScannerCount() {
this.refCnt.retain();
} | 3.26 |
hbase_MemStoreLABImpl_m0_rdh | /**
* When a cell's size is too big (bigger than maxAlloc), copyCellInto does not allocate it on
* MSLAB. Since the process of flattening to CellChunkMap assumes that all cells are allocated on
* MSLAB, during this process, the big cells are copied into MSLAB using this method.
*/
@Overridepublic Cell m0(Cell cell) {
int size = Segment.getCellLength(cell);
Preconditions.checkArgument(size >= 0, "negative size");
if ((size + ChunkCreator.SIZEOF_CHUNK_HEADER) <= dataChunkSize) {// Using copyCellInto for cells which are bigger than the original maxAlloc
return copyCellInto(cell, dataChunkSize);
} else {
Chunk c = getNewExternalChunk(size);
int allocOffset = c.alloc(size);
return copyToChunkCell(cell, c.getData(), allocOffset, size);
}
} | 3.26 |
hbase_MemStoreLABImpl_getOrMakeChunk_rdh | /**
* Get the current chunk, or, if there is no current chunk, allocate a new one from the JVM.
*/
private Chunk getOrMakeChunk() {
// Try to get the chunk
Chunk c = currChunk.get();
if (c != null) {
return c;
}
// No current chunk, so we want to allocate one. We race
// against other allocators to CAS in an uninitialized chunk
// (which is cheap to allocate)
if (lock.tryLock()) {
try {
// once again check inside the lock
c = currChunk.get();
if (c != null) {
return c;
}
c
= this.chunkCreator.getChunk();
if (c != null) {
// set the curChunk. No need of CAS as only one thread will be here
currChunk.set(c);
chunks.add(c.getId());
return c;
}
} finally {
lock.unlock();
}
}
return null;
} | 3.26 |
hbase_MemStoreLABImpl_copyBBECellInto_rdh | /**
* Mostly a duplicate of {@link #copyCellInto(Cell, int)}} done for perf sake. It presumes
* ByteBufferExtendedCell instead of Cell so we deal with a specific type rather than the super
* generic Cell. Removes instanceof checks. Shrinkage is enough to make this inline where before
* it was too big. Uses less CPU. See HBASE-20875 for evidence.
*
* @see #copyCellInto(Cell, int)
*/
private Cell copyBBECellInto(ByteBufferExtendedCell cell, int maxAlloc) {
int size = cell.getSerializedSize();
Preconditions.checkArgument(size >= 0, "negative size");
// Callers should satisfy large allocations from JVM heap so limit fragmentation.
if (size > maxAlloc) {
return null;
}
Chunk c = null;
int v5 = 0;
while (true) {
// Try to get the chunk
c = getOrMakeChunk();
// We may get null because the some other thread succeeded in getting the lock
// and so the current thread has to try again to make its chunk or grab the chunk
// that the other thread created
// Try to allocate from this chunk
if (c != null) {v5 = c.alloc(size);
if (v5 != (-1)) {
// We succeeded - this is the common case - small alloc
// from a big buffer
break;}
// not enough space!
// try to retire this chunk
tryRetireChunk(c);
}
}
return copyBBECToChunkCell(cell, c.getData(), v5, size);} | 3.26 |
hbase_MemStoreLABImpl_copyToChunkCell_rdh | /**
* Clone the passed cell by copying its data into the passed buf and create a cell with a chunkid
* out of it
*
* @see #copyBBECToChunkCell(ByteBufferExtendedCell, ByteBuffer, int, int)
*/
private static Cell copyToChunkCell(Cell cell, ByteBuffer buf, int offset, int len) {
int tagsLen = cell.getTagsLength();
if (cell instanceof ExtendedCell) {
((ExtendedCell) (cell)).write(buf, offset);
} else {
// Normally all Cell impls within Server will be of type ExtendedCell. Just considering the
// other case also. The data fragments within Cell is copied into buf as in KeyValue
// serialization format only.
KeyValueUtil.appendTo(cell, buf, offset, true);
}
return createChunkCell(buf, offset, len, tagsLen, cell.getSequenceId());
} | 3.26 |
hbase_MemStoreLABImpl_tryRetireChunk_rdh | /**
* Try to retire the current chunk if it is still <code>c</code>. Postcondition is that
* curChunk.get() != c
*
* @param c
* the chunk to retire
*/
private void tryRetireChunk(Chunk c) {
currChunk.compareAndSet(c, null);
// If the CAS succeeds, that means that we won the race
// to retire the chunk. We could use this opportunity to
// update metrics on external fragmentation.
//
// If the CAS fails, that means that someone else already
// retired the chunk for us.
} | 3.26 |
hbase_MemStoreLABImpl_close_rdh | /**
* Close this instance since it won't be used any more, try to put the chunks back to pool
*/
@Override
public void close() {
if (!this.closed.compareAndSet(false, true)) {
return;
}
// We could put back the chunks to pool for reusing only when there is no
// opening scanner which will read their data
this.refCnt.release();} | 3.26 |
hbase_MemStoreLABImpl_decScannerCount_rdh | /**
* Called when closing a scanner on the data of this MemStoreLAB
*/
@Override
public void decScannerCount() {
this.refCnt.release();
} | 3.26 |
hbase_MemStoreLABImpl_copyCellInto_rdh | /**
*
* @see #copyBBECellInto(ByteBufferExtendedCell, int)
*/
private Cell copyCellInto(Cell cell, int maxAlloc) {
int size = Segment.getCellLength(cell);
Preconditions.checkArgument(size >= 0, "negative size");
// Callers should satisfy large allocations directly from JVM since they
// don't cause fragmentation as badly.
if (size > maxAlloc) {
return null;
}
Chunk c = null;
int allocOffset = 0;
while (true) {
// Try to get the chunk
c = getOrMakeChunk();
// we may get null because the some other thread succeeded in getting the lock
// and so the current thread has to try again to make its chunk or grab the chunk
// that the other thread created
// Try to allocate from this chunk
if (c != null) {
allocOffset = c.alloc(size);
if (allocOffset != (-1)) {
// We succeeded - this is the common case - small alloc
// from a big buffer
break;
}
// not enough space!
// try to retire this chunk
tryRetireChunk(c);
}
}
return copyToChunkCell(cell, c.getData(), allocOffset, size);
} | 3.26 |
hbase_MemStoreLABImpl_copyBBECToChunkCell_rdh | /**
* Clone the passed cell by copying its data into the passed buf and create a cell with a chunkid
* out of it
*
* @see #copyToChunkCell(Cell, ByteBuffer, int, int)
*/
private static Cell copyBBECToChunkCell(ByteBufferExtendedCell cell, ByteBuffer buf, int offset, int len) {
int tagsLen = cell.getTagsLength();
cell.write(buf, offset);
return createChunkCell(buf, offset, len, tagsLen, cell.getSequenceId());
} | 3.26 |
hbase_JenkinsHash_m0_rdh | /**
* Compute the hash of the specified file
*
* @param args
* name of file to compute hash of.
* @throws IOException
* e
*/
public static void m0(String[] args) throws IOException {
if (args.length != 1) {
System.err.println("Usage: JenkinsHash filename");
System.exit(-1);
}FileInputStream in = new FileInputStream(args[0]);
byte[] bytes = new byte[512];
int value = 0;
JenkinsHash hash = new JenkinsHash();
try {
for (int length = in.read(bytes); length > 0; length = in.read(bytes)) {
value = hash.hash(new ByteArrayHashKey(bytes, 0, length), value);
}
} finally {
in.close();
}
System.out.println(Math.abs(value));
} | 3.26 |
hbase_LogRollBackupSubprocedurePool_close_rdh | /**
* Attempt to cleanly shutdown any running tasks - allows currently running tasks to cleanly
* finish
*/
@Override
public void close() {
executor.shutdown();
} | 3.26 |
hbase_LogRollBackupSubprocedurePool_waitForOutstandingTasks_rdh | /**
* Wait for all of the currently outstanding tasks submitted via {@link #submitTask(Callable)}
*
* @return <tt>true</tt> on success, <tt>false</tt> otherwise
* @throws ForeignException
* exception
*/
public boolean waitForOutstandingTasks() throws ForeignException {
LOG.debug("Waiting for backup procedure to finish.");
try {
for (Future<Void> f : futures) {
f.get();
}
return true;
} catch (InterruptedException e) {
if (aborted) {throw new ForeignException("Interrupted and found to be aborted while waiting for tasks!", e);
}
Thread.currentThread().interrupt();
} catch (ExecutionException e) {
if (e.getCause() instanceof ForeignException) {
throw ((ForeignException) (e.getCause()));
}
throw new ForeignException(name, e.getCause());
} finally {
// close off remaining tasks
for
(Future<Void> f : futures) {
if (!f.isDone()) {
f.cancel(true);
}
}
}
return false;
} | 3.26 |
hbase_LogRollBackupSubprocedurePool_submitTask_rdh | /**
* Submit a task to the pool.
*/
public void submitTask(final Callable<Void>
task) {
Future<Void> f = this.taskPool.submit(task);
futures.add(f);
} | 3.26 |
hbase_JSONMetricUtil_dumpBeanToString_rdh | /**
* Returns a subset of mbeans defined by qry. Modeled after DumpRegionServerMetrics#dumpMetrics.
* Example: String qry= "java.lang:type=Memory"
*
* @throws MalformedObjectNameException
* if json have bad format
* @throws IOException
* /
* @return String representation of json array.
*/
public static String dumpBeanToString(String qry) throws MalformedObjectNameException, IOException {
StringWriter sw = new StringWriter(1024 * 100);// Guess this size
try
(PrintWriter writer = new PrintWriter(sw)) {
JSONBean
dumper =
new JSONBean();
try (JSONBean.Writer jsonBeanWriter = dumper.open(writer)) {
MBeanServer mbeanServer = ManagementFactory.getPlatformMBeanServer();
jsonBeanWriter.write(mbeanServer, new ObjectName(qry), null, false);
}
}
sw.close();
return sw.toString();
} | 3.26 |
hbase_JSONMetricUtil_buldKeyValueTable_rdh | /**
* Method for building map used for constructing ObjectName. Mapping is done with arrays indices
*
* @param keys
* Map keys
* @param values
* Map values
* @return Map or null if arrays are empty or have different number of elements
*/
// javax requires hashtable param for ObjectName constructor
@SuppressWarnings("JdkObsolete")
public static Hashtable<String, String> buldKeyValueTable(String[] keys, String[] values) {
if (keys.length != values.length) {
LOG.error("keys and values arrays must be same size");return null;
}
if ((keys.length == 0) || (values.length == 0)) {
LOG.error("keys and values arrays can not be empty;");
return null;
}
Hashtable<String, String> table = new Hashtable<>();
for (int i =
0; i < keys.length; i++) {
table.put(keys[i], values[i]);
}
return table;
} | 3.26 |
hbase_ReplicationSourceWALReader_addEntryToBatch_rdh | // returns true if we reach the size limit for batch, i.e, we need to finish the batch and return.
protected final boolean addEntryToBatch(WALEntryBatch batch, Entry entry) {
WALEdit edit = entry.getEdit();
if ((edit == null) || edit.isEmpty()) {
LOG.trace("Edit null or empty for entry {} ", entry);
return false;
}
LOG.trace("updating TimeStampOfLastAttempted to {}, from entry {}, for source queue: {}", entry.getKey().getWriteTime(), entry.getKey(), this.source.getQueueId());
updateReplicationMarkerEdit(entry, batch.getLastWalPosition());
long entrySize =
getEntrySizeIncludeBulkLoad(entry);
batch.addEntry(entry, entrySize);
m0(batch, entry, entrySize);
boolean totalBufferTooLarge = this.getSourceManager().acquireWALEntryBufferQuota(batch, entry);
// Stop if too many entries or too big
return (totalBufferTooLarge || (batch.getHeapSize() >= replicationBatchSizeCapacity))
|| (batch.getNbEntries() >= replicationBatchCountCapacity);
} | 3.26 |
hbase_ReplicationSourceWALReader_checkBufferQuota_rdh | // returns false if we've already exceeded the global quota
private boolean checkBufferQuota() {
// try not to go over total quota
if (!this.getSourceManager().checkBufferQuota(this.source.getPeerId())) {
Threads.sleep(sleepForRetries);
return false;
}
return true;
} | 3.26 |
hbase_ReplicationSourceWALReader_readWALEntries_rdh | // We need to get the WALEntryBatch from the caller so we can add entries in there
// This is required in case there is any exception in while reading entries
// we do not want to loss the existing entries in the batch
protected void readWALEntries(WALEntryStream entryStream, WALEntryBatch batch) throws InterruptedException {
Path v11 = entryStream.getCurrentPath();
for (; ;) {
Entry entry = entryStream.next();
batch.setLastWalPosition(entryStream.getPosition());
entry = filterEntry(entry);
if (entry != null) {
if (addEntryToBatch(batch, entry)) {
break;
}
}
WALEntryStream.HasNext hasNext = entryStream.hasNext();
// always return if we have switched to a new file
if (switched(entryStream, v11)) {
batch.setEndOfFile(true);
break;
}
if (hasNext != HasNext.YES) {
// For hasNext other than YES, it is OK to just retry.
// As for RETRY and RETRY_IMMEDIATELY, the correct action is to retry, and for NO, it will
// return NO again when you call the method next time, so it is OK to just return here and
// let the loop in the upper layer to call hasNext again.
break;
}
}
} | 3.26 |
hbase_ReplicationSourceWALReader_sizeOfStoreFilesIncludeBulkLoad_rdh | /**
* Calculate the total size of all the store files
*
* @param edit
* edit to count row keys from
* @return the total size of the store files
*/
private int sizeOfStoreFilesIncludeBulkLoad(WALEdit edit) {
List<Cell> cells = edit.getCells();
int totalStoreFilesSize = 0;
int totalCells = edit.size();for (int i = 0; i < totalCells; i++) {
if (CellUtil.matchingQualifier(cells.get(i), WALEdit.BULK_LOAD)) {
try {
BulkLoadDescriptor bld = WALEdit.getBulkLoadDescriptor(cells.get(i));
List<StoreDescriptor> stores = bld.getStoresList();
int totalStores = stores.size();
for (int j = 0; j < totalStores; j++) {
totalStoreFilesSize = ((int) (totalStoreFilesSize + stores.get(j).getStoreFileSizeBytes()));
}
} catch (IOException e) {
LOG.error(("Failed to deserialize bulk load entry from wal edit. "
+ "Size of HFiles part of cell will not be considered in replication ") + "request size calculation.", e);
}
}
}
return totalStoreFilesSize;
} | 3.26 |
hbase_ReplicationSourceWALReader_take_rdh | /**
* Retrieves the next batch of WAL entries from the queue, waiting up to the specified time for a
* batch to become available
*
* @return A batch of entries, along with the position in the log after reading the batch
* @throws InterruptedException
* if interrupted while waiting
*/
public WALEntryBatch take() throws InterruptedException
{
return entryBatchQueue.take();
} | 3.26 |
hbase_ReplicationSourceWALReader_countDistinctRowKeysAndHFiles_rdh | /**
* Count the number of different row keys in the given edit because of mini-batching. We assume
* that there's at least one Cell in the WALEdit.
*
* @param edit
* edit to count row keys from
* @return number of different row keys and HFiles
*/
private Pair<Integer, Integer> countDistinctRowKeysAndHFiles(WALEdit edit) {
List<Cell> cells = edit.getCells();
int distinctRowKeys = 1;
int totalHFileEntries = 0;
Cell lastCell = cells.get(0);
int totalCells = edit.size();
for (int i = 0; i < totalCells; i++) {// Count HFiles to be replicated
if (CellUtil.matchingQualifier(cells.get(i), WALEdit.BULK_LOAD)) {
try {BulkLoadDescriptor bld = WALEdit.getBulkLoadDescriptor(cells.get(i));
List<StoreDescriptor> stores = bld.getStoresList();
int v27 = stores.size();
for (int j = 0; j < v27; j++) {
totalHFileEntries += stores.get(j).getStoreFileList().size();
}
} catch (IOException e) {
LOG.error("Failed to deserialize bulk load entry from wal edit. " + "Then its hfiles count will not be added into metric.", e);
}
}
if (!CellUtil.matchingRows(cells.get(i), lastCell)) {
distinctRowKeys++;
}
lastCell = cells.get(i);
}
Pair<Integer, Integer> result = new Pair<>(distinctRowKeys, totalHFileEntries);
return result;} | 3.26 |
hbase_ReplicationSourceWALReader_setReaderRunning_rdh | /**
*
* @param readerRunning
* the readerRunning to set
*/
public void setReaderRunning(boolean readerRunning) {
this.isReaderRunning = readerRunning;
} | 3.26 |
hbase_ReplicationSourceWALReader_isReaderRunning_rdh | /**
* Returns whether the reader thread is running
*/
public boolean isReaderRunning() {
return isReaderRunning && (!isInterrupted());
} | 3.26 |
hbase_AsyncFSWALProvider_createAsyncWriter_rdh | /**
* Public because of AsyncFSWAL. Should be package-private
*/
public static AsyncWriter createAsyncWriter(Configuration conf, FileSystem fs, Path path, boolean overwritable, long blocksize, EventLoopGroup eventLoopGroup, Class<? extends Channel> channelClass, StreamSlowMonitor monitor) throws IOException {
// Configuration already does caching for the Class lookup.
Class<? extends AsyncWriter> logWriterClass = conf.getClass(WRITER_IMPL, AsyncProtobufLogWriter.class, AsyncFSWALProvider.AsyncWriter.class);
try {
AsyncWriter writer = logWriterClass.getConstructor(EventLoopGroup.class, Class.class).newInstance(eventLoopGroup, channelClass);
writer.init(fs, path, conf, overwritable, blocksize, monitor);
return writer;
} catch (Exception e) {
if (e instanceof CommonFSUtils.StreamLacksCapabilityException) {
LOG.error(((((((("The RegionServer async write ahead log provider " + "relies on the ability to call ") + e.getMessage()) + " for proper operation during ") + "component failures, but the current FileSystem does not support doing so. Please ") + "check the config value of '") + CommonFSUtils.HBASE_WAL_DIR) + "' and ensure ") + "it points to a FileSystem mount that has suitable capabilities for output streams.");
} else {
LOG.debug("Error instantiating log writer.", e);
}
Throwables.propagateIfPossible(e, IOException.class);
throw new IOException("cannot get log writer", e);
}
} | 3.26 |
hbase_AsyncFSWALProvider_load_rdh | /**
* Test whether we can load the helper classes for async dfs output.
*/
public static boolean load() {
try {
Class.forName(FanOutOneBlockAsyncDFSOutput.class.getName());
Class.forName(FanOutOneBlockAsyncDFSOutputHelper.class.getName());
Class.forName(FanOutOneBlockAsyncDFSOutputSaslHelper.class.getName());
return true;
} catch (Throwable e) {return false;
}
} | 3.26 |
hbase_TimestampsFilter_parseFrom_rdh | /**
* Parse a serialized representation of {@link TimestampsFilter}
*
* @param pbBytes
* A pb serialized {@link TimestampsFilter} instance
* @return An instance of {@link TimestampsFilter} made from <code>bytes</code>
* @throws DeserializationException
* if an error occurred
* @see #toByteArray
*/
public static TimestampsFilter parseFrom(final byte[] pbBytes) throws DeserializationException {
FilterProtos.TimestampsFilter v8;
try {
v8 = FilterProtos.TimestampsFilter.parseFrom(pbBytes);
} catch (InvalidProtocolBufferException e) {
throw new DeserializationException(e);
}
return new TimestampsFilter(v8.getTimestampsList(), v8.hasCanHint() && v8.getCanHint());
} | 3.26 |
hbase_TimestampsFilter_areSerializedFieldsEqual_rdh | /**
* Returns true if and only if the fields of the filter that are serialized are equal to the
* corresponding fields in other. Used for testing.
*/
@Override
boolean areSerializedFieldsEqual(Filter o) {
if (o == this)
{
return true;
}
if (!(o instanceof TimestampsFilter)) {
return false;
}TimestampsFilter other = ((TimestampsFilter) (o));
return this.getTimestamps().equals(other.getTimestamps());
} | 3.26 |
hbase_TimestampsFilter_getTimestamps_rdh | /**
* Returns the list of timestamps
*/
public List<Long> getTimestamps() {
List<Long> v1 = new ArrayList<>(timestamps.size());
v1.addAll(timestamps);
return v1; } | 3.26 |
hbase_TimestampsFilter_toByteArray_rdh | /**
* Returns The filter serialized using pb
*/
@Override
public byte[] toByteArray() {
FilterProtos.TimestampsFilter.Builder v7 = FilterProtos.TimestampsFilter.newBuilder();
v7.addAllTimestamps(this.timestamps);
v7.setCanHint(canHint);
return v7.build().toByteArray();
} | 3.26 |
hbase_TimestampsFilter_getNextCellHint_rdh | /**
* Pick the next cell that the scanner should seek to. Since this can skip any number of cells any
* of which can be a delete this can resurect old data. The method will only be used if canHint
* was set to true while creating the filter.
*
* @throws IOException
* This will never happen.
*/
@Override
public Cell getNextCellHint(Cell currentCell) throws IOException {
if
(!canHint) {
return null;
}
Long nextTimestampObject = timestamps.lower(currentCell.getTimestamp());
if (nextTimestampObject == null) {
// This should only happen if the current column's
// timestamp is below the last one in the list.
//
// It should never happen as the filterCell should return NEXT_COL
// but it's always better to be extra safe and protect against future
// behavioral changes.
return
PrivateCellUtil.createLastOnRowCol(currentCell);
}// Since we know the nextTimestampObject isn't null here there must still be
// timestamps that can be included. Cast the Long to a long and return the
// a cell with the current row/cf/col and the next found timestamp.
long nextTimestamp = nextTimestampObject;
return PrivateCellUtil.createFirstOnRowColTS(currentCell, nextTimestamp);
} | 3.26 |
hbase_TimestampsFilter_getMin_rdh | /**
* Gets the minimum timestamp requested by filter.
*
* @return minimum timestamp requested by filter.
*/
public long getMin() {
return minTimestamp;
} | 3.26 |
hbase_CellUtil_copyRowTo_rdh | /**
* Copies the row to the given bytebuffer
*
* @param cell
* cell the cell whose row has to be copied
* @param destination
* the destination bytebuffer to which the row has to be copied
* @param destinationOffset
* the offset in the destination byte[]
* @return the offset of the bytebuffer after the copy has happened
*/
public static int copyRowTo(Cell cell, ByteBuffer destination, int destinationOffset) {
short rowLen = cell.getRowLength();
if (cell instanceof ByteBufferExtendedCell) {
ByteBufferUtils.copyFromBufferToBuffer(((ByteBufferExtendedCell) (cell)).getRowByteBuffer(), destination, ((ByteBufferExtendedCell) (cell)).getRowPosition(), destinationOffset, rowLen);
} else {
ByteBufferUtils.copyFromArrayToBuffer(destination, destinationOffset, cell.getRowArray(), cell.getRowOffset(), rowLen);
}
return destinationOffset + rowLen;
} | 3.26 |
hbase_CellUtil_copyValueTo_rdh | /**
* Copies the value to the given bytebuffer
*
* @param cell
* the cell whose value has to be copied
* @param destination
* the destination bytebuffer to which the value has to be copied
* @param destinationOffset
* the offset in the destination bytebuffer
* @return the offset of the bytebuffer after the copy has happened
*/
public static int
copyValueTo(Cell cell, ByteBuffer destination,
int destinationOffset) {
int vlen = cell.getValueLength();
if (cell instanceof ByteBufferExtendedCell) {
ByteBufferUtils.copyFromBufferToBuffer(((ByteBufferExtendedCell) (cell)).getValueByteBuffer(), destination, ((ByteBufferExtendedCell) (cell)).getValuePosition(), destinationOffset, vlen);
} else {
ByteBufferUtils.copyFromArrayToBuffer(destination, destinationOffset, cell.getValueArray(), cell.getValueOffset(), vlen);}
return destinationOffset + vlen;
} | 3.26 |
hbase_CellUtil_matchingRows_rdh | /**
* Compares the row of two keyvalues for equality
*/
public static boolean matchingRows(final Cell left, final short lrowlength, final Cell right, final short rrowlength) {
if (lrowlength != rrowlength)
return false;
if ((left instanceof ByteBufferExtendedCell) && (right instanceof ByteBufferExtendedCell)) {
return ByteBufferUtils.equals(((ByteBufferExtendedCell) (left)).getRowByteBuffer(), ((ByteBufferExtendedCell) (left)).getRowPosition(), lrowlength, ((ByteBufferExtendedCell) (right)).getRowByteBuffer(), ((ByteBufferExtendedCell) (right)).getRowPosition(), rrowlength);
}
if (left instanceof ByteBufferExtendedCell) {
return ByteBufferUtils.equals(((ByteBufferExtendedCell) (left)).getRowByteBuffer(),
((ByteBufferExtendedCell) (left)).getRowPosition(), lrowlength, right.getRowArray(), right.getRowOffset(), rrowlength);
}
if (right instanceof ByteBufferExtendedCell) {
return ByteBufferUtils.equals(((ByteBufferExtendedCell) (right)).getRowByteBuffer(), ((ByteBufferExtendedCell) (right)).getRowPosition(), rrowlength, left.getRowArray(), left.getRowOffset(), lrowlength);
}
return Bytes.equals(left.getRowArray(), left.getRowOffset(), lrowlength, right.getRowArray(), right.getRowOffset(), rrowlength);
} | 3.26 |
hbase_CellUtil_matchingQualifier_rdh | /**
* Finds if the qualifier part of the cell and the KV serialized byte[] are equal.
*
* @return true if the qualifier matches, false otherwise
*/
public static boolean matchingQualifier(final Cell left, final byte[] buf) {
if (buf == null) {
return left.getQualifierLength() == 0;
}
return PrivateCellUtil.matchingQualifier(left, buf, 0, buf.length);
} | 3.26 |
hbase_CellUtil_makeColumn_rdh | /**
* Makes a column in family:qualifier form from separate byte arrays.
* <p>
* Not recommended for usage as this is old-style API.
*
* @return family:qualifier
*/
public static byte[] makeColumn(byte[] family, byte[] qualifier) {
return Bytes.add(family, COLUMN_FAMILY_DELIM_ARRAY, qualifier);
} | 3.26 |
hbase_CellUtil_equals_rdh | /**
* ************** equals ***************************
*/public static boolean equals(Cell a, Cell b) {
return (((matchingRows(a, b) && m1(a, b)) && matchingQualifier(a, b)) && matchingTimestamp(a, b)) && PrivateCellUtil.matchingType(a,
b);
} | 3.26 |
hbase_CellUtil_isDelete_rdh | /**
* Return true if a delete type, a {@link KeyValue.Type#Delete} or a {KeyValue.Type#DeleteFamily}
* or a {@link KeyValue.Type#DeleteColumn} KeyValue type.
*/
@SuppressWarnings("deprecation")
public static boolean isDelete(final Cell cell) {
return PrivateCellUtil.isDelete(cell.getTypeByte());
} | 3.26 |
hbase_CellUtil_toString_rdh | /**
* Returns a string representation of the cell
*/
public static String toString(Cell cell, boolean verbose) {
if (cell == null) {
return "";
}
StringBuilder builder = new StringBuilder();
String keyStr = getCellKeyAsString(cell);
String tag = null;
String
value = null;
if (verbose) {
// TODO: pretty print tags as well
if (cell.getTagsLength() > 0) {
tag = Bytes.toStringBinary(cell.getTagsArray(), cell.getTagsOffset(), cell.getTagsLength());
}
if (!(cell instanceof KeyValue.KeyOnlyKeyValue)) {
value = Bytes.toStringBinary(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength());
}
}
builder.append(keyStr);
if ((tag != null) && (!tag.isEmpty())) {
builder.append("/").append(tag);
}
if (value != null) {
builder.append("/").append(value);
}
return builder.toString();
} | 3.26 |
hbase_CellUtil_createCellScanner_rdh | /**
* Flatten the map of cells out under the CellScanner
*
* @param map
* Map of Cell Lists; for example, the map of families to Cells that is used inside
* Put, etc., keeping Cells organized by family.
* @return CellScanner interface over <code>cellIterable</code>
*/
public static CellScanner createCellScanner(final NavigableMap<byte[], List<Cell>> map) {
return new CellScanner() {
private final Iterator<Entry<byte[], List<Cell>>> entries = map.entrySet().iterator();
private Iterator<Cell> currentIterator = null;
private Cell
currentCell;
@Override
public Cell current() {
return
this.currentCell;
}
@Override public boolean advance() {
while (true) {
if (this.currentIterator == null) {
if (!this.entries.hasNext())
return false;
this.currentIterator = this.entries.next().getValue().iterator();
}
if (this.currentIterator.hasNext()) {
this.currentCell = this.currentIterator.next();
return true;
}
this.currentCell = null;
this.currentIterator = null;
}
}
};
} | 3.26 |
hbase_CellUtil_copyQualifierTo_rdh | /**
* Copies the qualifier to the given bytebuffer
*
* @param cell
* the cell whose qualifier has to be copied
* @param destination
* the destination bytebuffer to which the qualifier has to be copied
* @param destinationOffset
* the offset in the destination bytebuffer
* @return the offset of the bytebuffer after the copy has happened
*/
public static int copyQualifierTo(Cell cell, ByteBuffer destination, int destinationOffset) {
int qlen = cell.getQualifierLength();
if (cell instanceof ByteBufferExtendedCell) {
ByteBufferUtils.copyFromBufferToBuffer(((ByteBufferExtendedCell)
(cell)).getQualifierByteBuffer(), destination, ((ByteBufferExtendedCell) (cell)).getQualifierPosition(), destinationOffset, qlen);
} else {
ByteBufferUtils.copyFromArrayToBuffer(destination, destinationOffset, cell.getQualifierArray(), cell.getQualifierOffset(), qlen);
}
return destinationOffset + qlen;
} | 3.26 |
hbase_CellUtil_setTimestamp_rdh | /**
* Sets the given timestamp to the cell. Note that this method is a LimitedPrivate API and may
* change between minor releases.
*
* @throws IOException
* when the passed cell is not of type {@link ExtendedCell}
*/
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC)
public static void setTimestamp(Cell cell, long ts) throws IOException {
PrivateCellUtil.setTimestamp(cell, ts);
}
/**
* Sets the given timestamp to the cell. Note that this method is a LimitedPrivate API and may
* change between minor releases.
*
* @throws IOException
* when the passed cell is not of type {@link ExtendedCell} | 3.26 |
hbase_CellUtil_copyRow_rdh | /**
* Copies the row to a new byte[]
*
* @param cell
* the cell from which row has to copied
* @return the byte[] containing the row
*/
public static byte[] copyRow(Cell cell) {
if (cell instanceof ByteBufferExtendedCell) {
return ByteBufferUtils.copyOfRange(((ByteBufferExtendedCell) (cell)).getRowByteBuffer(), ((ByteBufferExtendedCell) (cell)).getRowPosition(), ((ByteBufferExtendedCell) (cell)).getRowPosition() + cell.getRowLength());
} else {
return Arrays.copyOfRange(cell.getRowArray(), cell.getRowOffset(), cell.getRowOffset() + cell.getRowLength());
}
} | 3.26 |
hbase_CellUtil_matchingRowColumn_rdh | /**
* Compares the row and column of two keyvalues for equality
*/
public static boolean matchingRowColumn(final Cell left, final Cell right) {
short lrowlength =
left.getRowLength();
short
rrowlength = right.getRowLength();
// match length
if (lrowlength != rrowlength) {
return false;
}
byte lfamlength = left.getFamilyLength();
byte rfamlength = right.getFamilyLength();
if (lfamlength != rfamlength) {
return false;
}
int v32
=
left.getQualifierLength();
int rqlength = right.getQualifierLength();
if (v32 != rqlength) {
return false;
}
if (!matchingRows(left, lrowlength, right, rrowlength)) {
return false;
}
return matchingColumn(left, lfamlength, v32, right, rfamlength, rqlength);
} | 3.26 |
hbase_CellUtil_matchingRowColumnBytes_rdh | /**
* Compares the row and column of two keyvalues for equality
*/
public static boolean matchingRowColumnBytes(final Cell left,
final Cell right) {
int lrowlength = left.getRowLength();
int rrowlength = right.getRowLength();
int lfamlength = left.getFamilyLength();
int rfamlength =
right.getFamilyLength();
int lqlength = left.getQualifierLength();
int rqlength = right.getQualifierLength();
// match length
if (((lrowlength != rrowlength) || (lfamlength != rfamlength)) || (lqlength != rqlength)) {
return false;
}
// match row
if (!Bytes.equals(left.getRowArray(), left.getRowOffset(), lrowlength,
right.getRowArray(), right.getRowOffset(), rrowlength)) {
return false;
}
// match family
if (!Bytes.equals(left.getFamilyArray(), left.getFamilyOffset(), lfamlength, right.getFamilyArray(), right.getFamilyOffset(),
rfamlength)) {
return false;
}
// match qualifier
return Bytes.equals(left.getQualifierArray(), left.getQualifierOffset(), lqlength, right.getQualifierArray(), right.getQualifierOffset(), rqlength);
} | 3.26 |
hbase_CellUtil_getCellKeyAsString_rdh | /**
* Return the Key portion of the passed <code>cell</code> as a String.
*
* @param cell
* the cell to convert
* @param rowConverter
* used to convert the row of the cell to a string
* @return The Key portion of the passed <code>cell</code> as a String.
*/
public static String getCellKeyAsString(Cell cell, Function<Cell, String> rowConverter) {
StringBuilder v21 = new StringBuilder(rowConverter.apply(cell));v21.append('/');
v21.append(cell.getFamilyLength() == 0 ? "" : Bytes.toStringBinary(cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength()));
// KeyValue only added ':' if family is non-null. Do same.
if (cell.getFamilyLength() > 0)
v21.append(':');
v21.append(cell.getQualifierLength() == 0 ? ""
: Bytes.toStringBinary(cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength()));
v21.append('/');
v21.append(KeyValue.humanReadableTimestamp(cell.getTimestamp()));
v21.append('/');
v21.append(KeyValue.Type.codeToType(cell.getTypeByte()));
if (!(cell instanceof KeyValue.KeyOnlyKeyValue)) {
v21.append("/vlen=");
v21.append(cell.getValueLength());
}
v21.append("/seqid=");
v21.append(cell.getSequenceId());
return v21.toString();
} | 3.26 |
hbase_CellUtil_parseColumn_rdh | /**
* Splits a column in {@code family:qualifier} form into separate byte arrays. An empty qualifier
* (ie, {@code fam:}) is parsed as <code>{ fam, EMPTY_BYTE_ARRAY }</code> while no delimiter (ie,
* {@code fam}) is parsed as an array of one element, <code>{ fam }</code>.
* <p>
* Don't forget, HBase DOES support empty qualifiers. (see HBASE-9549)
* </p>
* <p>
* Not recommend to be used as this is old-style API.
* </p>
*
* @param c
* The column.
* @return The parsed column.
*/
public static byte[][] parseColumn(byte[] c) {
final int index = getDelimiter(c, 0, c.length, COLUMN_FAMILY_DELIMITER);
if (index == (-1)) {
// If no delimiter, return array of size 1
return new byte[][]{ c };
} else if (index == (c.length - 1)) {
// family with empty qualifier, return array size 2
byte[] family = new byte[c.length - 1];
System.arraycopy(c, 0, family, 0, family.length);
return new byte[][]{ family, HConstants.EMPTY_BYTE_ARRAY };
}
// Family and column, return array size 2
final byte[][] result = new byte[2][];
result[0] = new byte[index];
System.arraycopy(c, 0, result[0], 0, index);
final int len = c.length - (index + 1);
result[1] = new byte[len];
/* Skip delimiter */
System.arraycopy(c, index + 1, result[1], 0, len);return result;
} | 3.26 |
hbase_CellUtil_cloneRow_rdh | /**
* *************** get individual arrays for tests ***********
*/
public static byte[] cloneRow(Cell cell) {
byte[] output = new byte[cell.getRowLength()]; copyRowTo(cell, output, 0);
return output;
} | 3.26 |
hbase_CellUtil_isPut_rdh | /**
* Returns True if this cell is a Put.
*/
@SuppressWarnings("deprecation")
public static boolean isPut(Cell cell) {
return cell.getTypeByte() == Type.Put.getCode();
} | 3.26 |
hbase_CellUtil_copyFamilyTo_rdh | /**
* Copies the family to the given bytebuffer
*
* @param cell
* the cell whose family has to be copied
* @param destination
* the destination bytebuffer to which the family has to be copied
* @param destinationOffset
* the offset in the destination bytebuffer
* @return the offset of the bytebuffer after the copy has happened
*/
public static int
copyFamilyTo(Cell cell, ByteBuffer destination, int destinationOffset) {
byte v11 = cell.getFamilyLength();
if (cell instanceof ByteBufferExtendedCell) {
ByteBufferUtils.copyFromBufferToBuffer(((ByteBufferExtendedCell) (cell)).getFamilyByteBuffer(), destination, ((ByteBufferExtendedCell) (cell)).getFamilyPosition(), destinationOffset, v11);} else {
ByteBufferUtils.copyFromArrayToBuffer(destination,
destinationOffset, cell.getFamilyArray(), cell.getFamilyOffset(), v11);}
return destinationOffset + v11;
} | 3.26 |
hbase_CellUtil_matchingColumnFamilyAndQualifierPrefix_rdh | /**
* Returns True if matching column family and the qualifier starts with <code>qual</code>
*/
public static boolean matchingColumnFamilyAndQualifierPrefix(final Cell left, final byte[] fam, final byte[] qual) {
return matchingFamily(left, fam) && PrivateCellUtil.qualifierStartsWith(left, qual);
} | 3.26 |
hbase_QuotaState_isBypass_rdh | /**
* Returns true if there is no quota information associated to this object
*/
public synchronized boolean isBypass() {
return globalLimiter == NoopQuotaLimiter.get();
} | 3.26 |
hbase_QuotaState_getGlobalLimiter_rdh | /**
* Return the limiter associated with this quota.
*
* @return the quota limiter
*/
public synchronized QuotaLimiter getGlobalLimiter() {
lastQuery = EnvironmentEdgeManager.currentTime();
return globalLimiter;
} | 3.26 |
hbase_QuotaState_setQuotas_rdh | /**
* Setup the global quota information. (This operation is part of the QuotaState setup)
*/
public synchronized void setQuotas(final Quotas quotas) {
if (quotas.hasThrottle()) {
globalLimiter = QuotaLimiterFactory.fromThrottle(quotas.getThrottle());
} else {
globalLimiter = NoopQuotaLimiter.get();
}} | 3.26 |
hbase_QuotaState_getGlobalLimiterWithoutUpdatingLastQuery_rdh | /**
* Return the limiter associated with this quota without updating internal last query stats
*
* @return the quota limiter
*/
synchronized QuotaLimiter getGlobalLimiterWithoutUpdatingLastQuery() {
return globalLimiter;} | 3.26 |
hbase_QuotaState_update_rdh | /**
* Perform an update of the quota info based on the other quota info object. (This operation is
* executed by the QuotaCache)
*/
public synchronized void update(final QuotaState other) {
if (globalLimiter == NoopQuotaLimiter.get()) {
globalLimiter = other.globalLimiter;
} else if (other.globalLimiter == NoopQuotaLimiter.get()) {
globalLimiter = NoopQuotaLimiter.get();
} else {
globalLimiter = QuotaLimiterFactory.update(globalLimiter, other.globalLimiter);
}
lastUpdate = other.lastUpdate;
} | 3.26 |
hbase_ProcedureCoordinator_defaultPool_rdh | /**
* Default thread pool for the procedure
*
* @param opThreads
* the maximum number of threads to allow in the pool
*/
public static ThreadPoolExecutor defaultPool(String coordName, int opThreads) {
return defaultPool(coordName, opThreads, KEEP_ALIVE_MILLIS_DEFAULT);
} | 3.26 |
hbase_ProcedureCoordinator_createProcedure_rdh | /**
* Exposed for hooking with unit tests.
*
* @return the newly created procedure
*/
Procedure createProcedure(ForeignExceptionDispatcher fed, String procName, byte[] procArgs,
List<String> expectedMembers) {
// build the procedure
return new Procedure(this, fed, wakeTimeMillis, timeoutMillis, procName, procArgs, expectedMembers);
} | 3.26 |
hbase_ProcedureCoordinator_close_rdh | /**
* Shutdown the thread pools and release rpc resources
*/
public void close() throws IOException {
// have to use shutdown now to break any latch waiting
pool.shutdownNow();
rpcs.close();
} | 3.26 |
hbase_ProcedureCoordinator_rpcConnectionFailure_rdh | /**
* The connection to the rest of the procedure group (members and coordinator) has been
* broken/lost/failed. This should fail any interested procedures, but not attempt to notify other
* members since we cannot reach them anymore.
*
* @param message
* description of the error
* @param cause
* the actual cause of the failure
*/
void rpcConnectionFailure(final String message, final IOException cause) {
Collection<Procedure> toNotify = procedures.values();
boolean isTraceEnabled = LOG.isTraceEnabled();
LOG.debug("received connection failure: " + message, cause);
for (Procedure proc : toNotify) {
if (proc == null) {
continue;
}
// notify the elements, if they aren't null
if (isTraceEnabled) {
LOG.trace("connection failure - notify procedure: " + proc.getName());
}
proc.receive(new ForeignException(proc.getName(), cause));
}
} | 3.26 |
hbase_ProcedureCoordinator_getProcedureNames_rdh | /**
* Returns Return set of all procedure names.
*/
public Set<String> getProcedureNames() {
return new HashSet<>(procedures.keySet());
} | 3.26 |
hbase_ProcedureCoordinator_getProcedure_rdh | /**
* Returns the procedure. This Procedure is a live instance so should not be modified but can be
* inspected.
*
* @param name
* Name of the procedure
* @return Procedure or null if not present any more
*/
public Procedure getProcedure(String
name) {
return procedures.get(name);
} | 3.26 |
hbase_ProcedureCoordinator_getRpcs_rdh | /**
* Returns the rpcs implementation for all current procedures
*/
ProcedureCoordinatorRpcs getRpcs() {
return rpcs;} | 3.26 |
hbase_ProcedureCoordinator_memberAcquiredBarrier_rdh | /**
* Notification that the procedure had the specified member acquired its part of the barrier via
* {@link Subprocedure#acquireBarrier()}.
*
* @param procName
* name of the procedure that acquired
* @param member
* name of the member that acquired
*/
void memberAcquiredBarrier(String procName, final String member) {
Procedure proc = procedures.get(procName);
if (proc == null) {
LOG.warn(((("Member '" + member) + "' is trying to acquire an unknown procedure '") + procName) + "'");
return;
}
if (LOG.isTraceEnabled()) {
LOG.trace(((("Member '" + member) + "' acquired procedure '") + procName) + "'");
}
proc.barrierAcquiredByMember(member);
} | 3.26 |
hbase_ProcedureCoordinator_abortProcedure_rdh | /**
* Abort the procedure with the given name
*
* @param procName
* name of the procedure to abort
* @param reason
* serialized information about the abort
*/
public void abortProcedure(String procName, ForeignException reason) {
LOG.debug("abort procedure " + procName, reason);
// if we know about the Procedure, notify it
Procedure proc = procedures.get(procName);
if (proc == null) {
return;
}
proc.receive(reason);
} | 3.26 |
hbase_ProcedureCoordinator_memberFinishedBarrier_rdh | /**
* Notification that the procedure had another member finished executing its in-barrier subproc
* via {@link Subprocedure#insideBarrier()}.
*
* @param procName
* name of the subprocedure that finished
* @param member
* name of the member that executed and released its barrier
* @param dataFromMember
* the data that the member returned along with the notification
*/void memberFinishedBarrier(String procName, final String member, byte[] dataFromMember) {
Procedure proc = procedures.get(procName);
if (proc == null) {
LOG.warn(((("Member '" + member) + "' is trying to release an unknown procedure '") + procName) + "'");
return;
}
if (LOG.isTraceEnabled()) {
LOG.trace(((("Member '" + member) + "' released procedure '") + procName) + "'");
}
proc.barrierReleasedByMember(member, dataFromMember);
} | 3.26 |
hbase_RegionServerTracker_processAsActiveMaster_rdh | // execute the operations which are only needed for active masters, such as expire old servers,
// add new servers, etc.
private void processAsActiveMaster(Set<ServerName> newServers) {
Set<ServerName> oldServers = regionServers;
ServerManager serverManager = server.getServerManager();
// expire dead servers
for (ServerName crashedServer : Sets.difference(oldServers, newServers)) {
LOG.info("RegionServer ephemeral node deleted, processing expiration [{}]", crashedServer);
serverManager.expireServer(crashedServer);
}
// check whether there are new servers, log them
boolean newServerAdded = false;
for (ServerName sn : newServers) {
if (!oldServers.contains(sn)) {
newServerAdded = true;
LOG.info(("RegionServer ephemeral node created, adding [" + sn) + "]");
}
}
if (newServerAdded && server.isInitialized()) {
// Only call the check to move servers if a RegionServer was added to the cluster; in this
// case it could be a server with a new version so it makes sense to run the check.
server.checkIfShouldMoveSystemRegionAsync();
}
} | 3.26 |
hbase_RegionServerTracker_upgrade_rdh | /**
* Upgrade to active master mode, where besides tracking the changes of region server set, we will
* also started to add new region servers to ServerManager and also schedule SCP if a region
* server dies. Starts the tracking of online RegionServers. All RSes will be tracked after this
* method is called.
* <p/>
* In this method, we will also construct the region server sets in {@link ServerManager}. If a
* region server is dead between the crash of the previous master instance and the start of the
* current master instance, we will schedule a SCP for it. This is done in
* {@link ServerManager#findDeadServersAndProcess(Set, Set)}, we call it here under the lock
* protection to prevent concurrency issues with server expiration operation.
*
* @param deadServersFromPE
* the region servers which already have SCP associated.
* @param liveServersBeforeRestart
* the live region servers we recorded before master restarts.
* @param splittingServersFromWALDir
* Servers whose WALs are being actively 'split'.
*/
public void
upgrade(Set<ServerName> deadServersFromPE, Set<ServerName> liveServersBeforeRestart, Set<ServerName> splittingServersFromWALDir) throws KeeperException, IOException {
LOG.info("Upgrading RegionServerTracker to active master mode; {} have existing" + "ServerCrashProcedures, {} possibly 'live' servers, and {} 'splitting'.", deadServersFromPE.size(), liveServersBeforeRestart.size(), splittingServersFromWALDir.size());
// deadServersFromPE is made from a list of outstanding ServerCrashProcedures.
// splittingServersFromWALDir are being actively split -- the directory in the FS ends in
// '-SPLITTING'. Each splitting server should have a corresponding SCP. Log if not.
splittingServersFromWALDir.stream().filter(s -> !deadServersFromPE.contains(s)).forEach(s -> LOG.error("{} has no matching ServerCrashProcedure", s));
// create ServerNode for all possible live servers from wal directory and master local region
liveServersBeforeRestart.forEach(sn -> server.getAssignmentManager().getRegionStates().getOrCreateServer(sn));
ServerManager serverManager = server.getServerManager();
synchronized(this) {
Set<ServerName> liveServers = regionServers;
for (ServerName serverName : liveServers) {
RegionServerInfo info = getServerInfo(serverName);
ServerMetrics serverMetrics = (info != null) ? ServerMetricsBuilder.of(serverName, VersionInfoUtil.getVersionNumber(info.getVersionInfo()), info.getVersionInfo().getVersion())
: ServerMetricsBuilder.of(serverName);
serverManager.checkAndRecordNewServer(serverName, serverMetrics);}
serverManager.findDeadServersAndProcess(deadServersFromPE, liveServersBeforeRestart);
active = true;
}
} | 3.26 |
hbase_ClusterStatusListener_receive_rdh | /**
* Acts upon the reception of a new cluster status.
*
* @param ncs
* the cluster status
*/
public void receive(ClusterMetrics ncs) {
if (ncs.getDeadServerNames() != null) {
for (ServerName sn : ncs.getDeadServerNames()) {
if (!isDeadServer(sn)) {
LOG.info("There is a new dead server: " +
sn);
deadServers.add(sn);
if (deadServerHandler != null) {deadServerHandler.newDead(sn);
}
}
}
}
} | 3.26 |
hbase_ClusterStatusListener_isDeadServer_rdh | /**
* Check if we know if a server is dead.
*
* @param sn
* the server name to check.
* @return true if we know for sure that the server is dead, false otherwise.
*/
public boolean isDeadServer(ServerName sn) {
if (sn.getStartcode() <= 0) {
return false;
}
for (ServerName dead : deadServers) {
if (((dead.getStartcode() >= sn.getStartcode()) && (dead.getPort() == sn.getPort())) && dead.getHostname().equals(sn.getHostname())) {
return true;
}
}
return false;
} | 3.26 |
hbase_FileArchiverNotifierImpl_persistSnapshotSizeChanges_rdh | /**
* Reads the current size for each snapshot to update, generates a new update based on that value,
* and then writes the new update.
*
* @param snapshotSizeChanges
* A map of snapshot name to size change
*/
void persistSnapshotSizeChanges(Map<String, Long> snapshotSizeChanges) throws IOException {
try (Table quotaTable = conn.getTable(QuotaTableUtil.QUOTA_TABLE_NAME)) {
// Create a list (with a more typical ordering implied)
final List<Entry<String, Long>> snapshotSizeEntries = new ArrayList<>(snapshotSizeChanges.entrySet());
// Create the Gets for each snapshot we need to update
final List<Get> snapshotSizeGets = snapshotSizeEntries.stream().map(e -> QuotaTableUtil.makeGetForSnapshotSize(tn, e.getKey())).collect(Collectors.toList());
final Iterator<Entry<String, Long>> iterator = snapshotSizeEntries.iterator();
// A List to store each Put we'll create from the Get's we retrieve
final List<Put> updates = new ArrayList<>(snapshotSizeEntries.size());
// TODO Push this down to the RegionServer with a coprocessor:
//
// We would really like to piggy-back on the row-lock already being grabbed
// to handle the update of the row in the quota table. However, because the value
// is a serialized protobuf, the standard Increment API doesn't work for us. With a CP, we
// can just send the size deltas to the RS and atomically update the serialized PB object
// while relying on the row-lock for synchronization.
//
// Synchronizing on the namespace string is a "minor smell" but passable as this is
// only invoked via a single caller (the active Master). Using the namespace name lets us
// have some parallelism without worry of on caller seeing stale data from the quota table.
synchronized(m0(tn.getNamespaceAsString())) {
final Result[] existingSnapshotSizes = quotaTable.get(snapshotSizeGets);
long totalSizeChange = 0;
// Read the current size values (if they exist) to generate the new value
for (Result result : existingSnapshotSizes) {
Entry<String, Long> entry = iterator.next();
String snapshot = entry.getKey();
Long size = entry.getValue();
// Track the total size change for the namespace this table belongs in
totalSizeChange += size;
// Get the size of the previous value (or zero)
long previousSize = getSnapshotSizeFromResult(result);
// Create an update. A file was archived from the table, so the table's size goes
// down, but the snapshot's size goes up.
updates.add(QuotaTableUtil.createPutForSnapshotSize(tn, snapshot, previousSize + size));
}
// Create an update for the summation of all snapshots in the namespace
if (totalSizeChange != 0) {
long previousSize = getPreviousNamespaceSnapshotSize(quotaTable, tn.getNamespaceAsString());
updates.add(QuotaTableUtil.createPutForNamespaceSnapshotSize(tn.getNamespaceAsString(), previousSize + totalSizeChange));
}
// Send all of the quota table updates in one batch.
List<Object> failures = new ArrayList<>();
final Object[] results = new Object[updates.size()];
quotaTable.batch(updates, results);
for (Object result : results) {
// A null result is an error condition (all RPC attempts failed)
if (!(result instanceof Result)) {
failures.add(result);
}
}
// Propagate a failure if any updates failed
if (!failures.isEmpty()) {
throw new QuotaSnapshotSizeSerializationException("Failed to write some snapshot size updates: " + failures);
}
}
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
return;}
} | 3.26 |
hbase_FileArchiverNotifierImpl_getSizeOfStoreFiles_rdh | /**
* Computes the size of each store file in {@code storeFileNames}
*/
long getSizeOfStoreFiles(TableName tn, Set<StoreFileReference> storeFileNames) {
return storeFileNames.stream().collect(Collectors.summingLong(sfr -> getSizeOfStoreFile(tn, sfr)));
} | 3.26 |
hbase_FileArchiverNotifierImpl_getLastFullCompute_rdh | /**
* Returns a strictly-increasing measure of time extracted by {@link System#nanoTime()}.
*/
long getLastFullCompute() {
return lastFullCompute;
} | 3.26 |
hbase_FileArchiverNotifierImpl_computeSnapshotSizes_rdh | /**
* Computes the size of each snapshot against the table referenced by {@code this}.
*
* @param snapshots
* A sorted list of snapshots against {@code tn}.
* @return A list of the size for each snapshot against {@code tn}.
*/
List<SnapshotWithSize> computeSnapshotSizes(List<String> snapshots) throws IOException {
final List<SnapshotWithSize> snapshotSizes = new ArrayList<>(snapshots.size());
final Path rootDir = CommonFSUtils.getRootDir(conf); // Get the map of store file names to store file path for this table
final Set<String> tableReferencedStoreFiles;
try {
tableReferencedStoreFiles = FSUtils.getTableStoreFilePathMap(fs, rootDir).keySet();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
return null;
}
if
(LOG.isTraceEnabled()) {
LOG.trace((("Paths for " + tn) + ": ") + tableReferencedStoreFiles);}
// For each snapshot on this table, get the files which the snapshot references which
// the table does not.
Set<String> snapshotReferencedFiles = new HashSet<>();
for (String
snapshotName : snapshots) {
Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, rootDir);
SnapshotDescription sd = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir);
SnapshotManifest manifest = SnapshotManifest.open(conf, fs, snapshotDir, sd);
if (LOG.isTraceEnabled()) {
LOG.trace("Files referenced by other snapshots: " + snapshotReferencedFiles);
}
// Get the set of files from the manifest that this snapshot references which are not also
// referenced by the originating table.
Set<StoreFileReference> unreferencedStoreFileNames = getStoreFilesFromSnapshot(manifest, sfn -> (!tableReferencedStoreFiles.contains(sfn)) && (!snapshotReferencedFiles.contains(sfn)));
if (LOG.isTraceEnabled()) {
LOG.trace((("Snapshot " + snapshotName) + " solely references the files: ") + unreferencedStoreFileNames);
}
// Compute the size of the store files for this snapshot
long size = getSizeOfStoreFiles(tn, unreferencedStoreFileNames); if (LOG.isTraceEnabled()) {
LOG.trace((("Computed size of " + snapshotName) + " to be ") + size);
}
// Persist this snapshot's size into the map
snapshotSizes.add(new SnapshotWithSize(snapshotName, size));
// Make sure that we don't double-count the same file
for (StoreFileReference ref : unreferencedStoreFileNames) {
for (String fileNames : ref.getFamilyToFilesMapping().values()) {
snapshotReferencedFiles.add(fileNames);}
}
}
return snapshotSizes;
} | 3.26 |
hbase_FileArchiverNotifierImpl_persistSnapshotSizes_rdh | /**
* Writes the snapshot sizes to the provided {@code table}.
*/
void persistSnapshotSizes(Table table, List<SnapshotWithSize> snapshotSizes) throws IOException {
// Convert each entry in the map to a Put and write them to the quota table
table.put(snapshotSizes.stream().map(sws -> QuotaTableUtil.createPutForSnapshotSize(tn, sws.getName(), sws.getSize())).collect(Collectors.toList()));
} | 3.26 |
hbase_FileArchiverNotifierImpl_getStoreFilesFromSnapshot_rdh | /**
* Extracts the names of the store files referenced by this snapshot which satisfy the given
* predicate (the predicate returns {@code true}).
*/
Set<StoreFileReference> getStoreFilesFromSnapshot(SnapshotManifest manifest, Predicate<String> filter) {
Set<StoreFileReference> references = new HashSet<>();
// For each region referenced by the snapshot
for (SnapshotRegionManifest rm : manifest.getRegionManifests()) {
StoreFileReference regionReference
= new StoreFileReference(ProtobufUtil.toRegionInfo(rm.getRegionInfo()).getEncodedName());
// For each column family in this region
for (FamilyFiles ff : rm.getFamilyFilesList()) {
final String familyName = ff.getFamilyName().toStringUtf8();
// And each store file in that family
for (StoreFile sf : ff.getStoreFilesList()) {
String storeFileName = sf.getName();
// A snapshot only "inherits" a files size if it uniquely refers to it (no table
// and no other snapshot references it).
if (filter.test(storeFileName)) {
regionReference.addFamilyStoreFile(familyName, storeFileName);
}
}
}
// Only add this Region reference if we retained any files.
if (!regionReference.getFamilyToFilesMapping().isEmpty()) {
references.add(regionReference);
}
}
return references;
} | 3.26 |
hbase_FileArchiverNotifierImpl_groupArchivedFiledBySnapshotAndRecordSize_rdh | /**
* For each file in the map, this updates the first snapshot (lexicographic snapshot name) that
* references this file. The result of this computation is serialized to the quota table.
*
* @param snapshots
* A collection of HBase snapshots to group the files into
* @param fileSizes
* A map of file names to their sizes
*/
void groupArchivedFiledBySnapshotAndRecordSize(List<String>
snapshots, Set<Entry<String, Long>> fileSizes) throws IOException
{
// Make a copy as we'll modify it.
final Map<String, Long> filesToUpdate = new HashMap<>(fileSizes.size());
for (Entry<String, Long> entry : fileSizes) {
filesToUpdate.put(entry.getKey(), entry.getValue());
}
// Track the change in size to each snapshot
final Map<String, Long> snapshotSizeChanges = new HashMap<>();
for (String snapshot : snapshots) {
// For each file in `filesToUpdate`, check if `snapshot` refers to it.
// If `snapshot` does, remove it from `filesToUpdate` and add it to `snapshotSizeChanges`.
bucketFilesToSnapshot(snapshot, filesToUpdate, snapshotSizeChanges);
if (filesToUpdate.isEmpty()) {
// If we have no more files recently archived, we have nothing more to check
break;
}
}
// We have computed changes to the snapshot size, we need to record them.
if (!snapshotSizeChanges.isEmpty()) {
if (LOG.isTraceEnabled()) {
LOG.trace("Writing snapshot size changes for: " + snapshotSizeChanges);
}
persistSnapshotSizeChanges(snapshotSizeChanges);
}
} | 3.26 |
hbase_FileArchiverNotifierImpl_getSizeOfStoreFile_rdh | /**
* Computes the size of the store file given its name, region and family name in the archive
* directory.
*/
long getSizeOfStoreFile(TableName tn, String regionName, String family, String storeFile) {
Path familyArchivePath;
try {
familyArchivePath = HFileArchiveUtil.getStoreArchivePath(conf, tn, regionName, family);
} catch (IOException e) {
LOG.warn("Could not compute path for the archive directory for the region", e);
return 0L;
}
Path fileArchivePath = new Path(familyArchivePath, storeFile);
try {
if (fs.exists(fileArchivePath)) {
FileStatus[] status = fs.listStatus(fileArchivePath);
if (1 != status.length) {
LOG.warn(("Expected " + fileArchivePath) + " to be a file but was a directory, ignoring reference");
return 0L;
}
return status[0].getLen();
}} catch (IOException e) {
LOG.warn("Could not obtain the status of "
+ fileArchivePath, e);
return 0L;
}
LOG.warn(("Expected " + fileArchivePath) + " to exist but does not, ignoring reference.");
return 0L;
} | 3.26 |
hbase_FileArchiverNotifierImpl_getSnapshotSizeFromResult_rdh | /**
* Extracts the size component from a serialized {@link SpaceQuotaSnapshot} protobuf.
*
* @param r
* A Result containing one cell with a SpaceQuotaSnapshot protobuf
* @return The size in bytes of the snapshot.
*/
long getSnapshotSizeFromResult(Result r) throws InvalidProtocolBufferException {
// Per javadoc, Result should only be null if an exception was thrown. So, if we're here,
// we should be non-null. If we can't advance to the first cell, same as "no cell".
if ((!r.isEmpty()) && r.advance()) {
return QuotaTableUtil.parseSnapshotSize(r.current());
}
return 0L;
} | 3.26 |
hbase_FileArchiverNotifierImpl_getPreviousNamespaceSnapshotSize_rdh | /**
* Fetches the current size of all snapshots in the given {@code namespace}.
*
* @param quotaTable
* The HBase quota table
* @param namespace
* Namespace to fetch the sum of snapshot sizes for
* @return The size of all snapshot sizes for the namespace in bytes.
*/
long getPreviousNamespaceSnapshotSize(Table quotaTable, String namespace) throws IOException {
// Update the size of each snapshot for all snapshots in a namespace.
Result r = quotaTable.get(QuotaTableUtil.createGetNamespaceSnapshotSize(namespace));
return
getSnapshotSizeFromResult(r);
} | 3.26 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.