name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hbase_Query_setReplicaId_rdh | /**
* Specify region replica id where Query will fetch data from. Use this together with
* {@link #setConsistency(Consistency)} passing {@link Consistency#TIMELINE} to read data from a
* specific replicaId. <br>
* <b> Expert: </b>This is an advanced API exposed. Only use it if you know what you are doing
*/
public Query setReplicaId(int Id) {
this.targetReplicaId = Id;
return this; } | 3.26 |
hbase_Query_getLoadColumnFamiliesOnDemandValue_rdh | /**
* Get the raw loadColumnFamiliesOnDemand setting; if it's not set, can be null.
*/
public Boolean getLoadColumnFamiliesOnDemandValue() {
return this.loadColumnFamiliesOnDemand;
} | 3.26 |
hbase_KeyStoreFileType_fromFilename_rdh | /**
* Detects the type of KeyStore / TrustStore file from the file extension. If the file name ends
* with ".jks", returns <code>StoreFileType.JKS</code>. If the file name ends with ".pem", returns
* <code>StoreFileType.PEM</code>. If the file name ends with ".p12", returns
* <code>StoreFileType.PKCS12</code>. If the file name ends with ".bckfs", returns
* <code>StoreFileType.BCKFS</code>. Otherwise, throws an IllegalArgumentException.
*
* @param filename
* the filename of the key store or trust store file.
* @return a KeyStoreFileType.
* @throws IllegalArgumentException
* if the filename does not end with ".jks", ".pem", "p12" or
* "bcfks".
*/
public static KeyStoreFileType fromFilename(String filename) {
int i = filename.lastIndexOf('.');
if (i >= 0) {
String extension = filename.substring(i); for (KeyStoreFileType storeFileType : KeyStoreFileType.values()) {
if (storeFileType.m0().equals(extension)) {
return storeFileType;
}
}
}
throw new IllegalArgumentException("Unable to auto-detect store file type from file name: " + filename);
} | 3.26 |
hbase_KeyStoreFileType_fromPropertyValueOrFileName_rdh | /**
* If <code>propertyValue</code> is not null or empty, returns the result of
* <code>KeyStoreFileType.fromPropertyValue(propertyValue)</code>. Else, returns the result of
* <code>KeyStoreFileType.fromFileName(filename)</code>.
*
* @param propertyValue
* property value describing the KeyStoreFileType, or null/empty to
* auto-detect the type from the file name.
* @param filename
* file name of the key store file. The file extension is used to auto-detect
* the KeyStoreFileType when <code>propertyValue</code> is null or empty.
* @return a KeyStoreFileType.
* @throws IllegalArgumentException
* if <code>propertyValue</code> is not one of "JKS", "PEM",
* "PKCS12", "BCFKS", or empty/null.
* @throws IllegalArgumentException
* if <code>propertyValue</code>is empty or null and the type
* could not be determined from the file name.
*/
public static KeyStoreFileType fromPropertyValueOrFileName(String propertyValue, String filename) {
KeyStoreFileType result = KeyStoreFileType.fromPropertyValue(propertyValue);
if (result == null) {
result = KeyStoreFileType.fromFilename(filename);
}
return result;
} | 3.26 |
hbase_KeyStoreFileType_m0_rdh | /**
* The file extension that is associated with this file type.
*/
public String m0() {
return
defaultFileExtension;
} | 3.26 |
hbase_KeyStoreFileType_getPropertyValue_rdh | /**
* The property string that specifies that a key store or trust store should use this store file
* type.
*/
public String getPropertyValue() {
return this.name();
} | 3.26 |
hbase_VisibilityLabelServiceManager_getVisibilityLabelService_rdh | /**
*
* @return singleton instance of {@link VisibilityLabelService}.
* @throws IllegalStateException
* if this called before initialization of singleton instance.
*/
public VisibilityLabelService getVisibilityLabelService() {
// By the time this method is called, the singleton instance of visibilityLabelService should
// have been created. And it will be created as getVisibilityLabelService(Configuration conf)
// is called from VC#start() and that will be the 1st thing core code do with any CP.
if (this.visibilityLabelService == null) {
throw new IllegalStateException("VisibilityLabelService not yet instantiated");
}
return this.visibilityLabelService;
} | 3.26 |
hbase_RpcHandler_getCallRunner_rdh | /**
* Returns A {@link CallRunner} n
*/
protected CallRunner getCallRunner() throws InterruptedException {
return this.q.take();
} | 3.26 |
hbase_ProcedureWALPrettyPrinter_run_rdh | /**
* Pass one or more log file names and formatting options and it will dump out a text version of
* the contents on <code>stdout</code>. Command line arguments Thrown upon file system errors etc.
*/
@Override
public int run(final String[] args) throws IOException {
// create options
Options options = new Options();
options.addOption("h", "help", false, "Output help message");options.addOption("f", "file", true, "File to print");
final List<Path> files =
new ArrayList<>();
try {
CommandLine cmd = new DefaultParser().parse(options, args);
if (cmd.hasOption("f")) {
files.add(new Path(cmd.getOptionValue("f")));
}
if (files.isEmpty() || cmd.hasOption("h")) {HelpFormatter formatter = new HelpFormatter();
formatter.printHelp("ProcedureWALPrettyPrinter ", options, true);
return -1;
}
} catch (ParseException e) {
LOG.error("Failed to parse commandLine arguments", e);
HelpFormatter formatter = new HelpFormatter();
formatter.printHelp("ProcedureWALPrettyPrinter ", options, true);
return -1;
}
// get configuration, file system, and process the given files
for (Path file : files) {
processFile(getConf(), file);
}
return 0;
} | 3.26 |
hbase_ProcedureWALPrettyPrinter_processFile_rdh | /**
* Reads a log file and outputs its contents.
*
* @param conf
* HBase configuration relevant to this log file
* @param p
* path of the log file to be read
* @throws IOException
* IOException
*/
public void processFile(final Configuration conf, final Path p) throws IOException {
FileSystem fs = p.getFileSystem(conf);
if (!fs.exists(p)) {
System.err.println("ERROR, file doesnt exist: " + p);
return;
}
if (!fs.isFile(p))
{
System.err.println(p + " is not a file");
return;
}
FileStatus logFile = fs.getFileStatus(p);
if (logFile.getLen() ==
0) {
out.println("Zero length file: " + p);
return;
}
out.println("Opening procedure state-log: " + p);
ProcedureWALFile log = new ProcedureWALFile(fs, logFile);
processProcedureWALFile(log);
} | 3.26 |
hbase_CellCodec_readByteArray_rdh | /**
* Returns Byte array read from the stream. n
*/
private byte[] readByteArray(final InputStream in) throws IOException {
byte[] intArray = new byte[Bytes.SIZEOF_INT];
IOUtils.readFully(in, intArray);
int length = Bytes.toInt(intArray);
byte[] bytes = new byte[length];
IOUtils.readFully(in, bytes);
return bytes;
} | 3.26 |
hbase_CellCodec_write_rdh | /**
* Write int length followed by array bytes.
*/
private void write(final byte[] bytes, final int offset, final int length) throws IOException {
// TODO add BB backed os check and do for write. Pass Cell
this.out.write(Bytes.toBytes(length));
this.out.write(bytes, offset, length);
} | 3.26 |
hbase_RSAnnotationReadingPriorityFunction_getDeadline_rdh | /**
* Based on the request content, returns the deadline of the request.
*
* @return Deadline of this request. 0 now, otherwise msec of 'delay'
*/
@Override
public long getDeadline(RequestHeader header, Message param) {
if (param instanceof ScanRequest) {
ScanRequest request = ((ScanRequest) (param));
if (!request.hasScannerId()) {
return 0;
}
// get the 'virtual time' of the scanner, and applies sqrt() to get a
// nice curve for the delay. More a scanner is used the less priority it gets.
// The weight is used to have more control on the delay.
long vtime = rpcServices.getScannerVirtualTime(request.getScannerId());
return Math.round(Math.sqrt(vtime * scanVirtualTimeWeight));
}
return 0;
} | 3.26 |
hbase_DNS_getHostname_rdh | /**
* Get the configured hostname for a given ServerType. Gets the default hostname if not specified
* in the configuration.
*
* @param conf
* Configuration to look up.
* @param serverType
* ServerType to look up in the configuration for overrides.
*/
public static String getHostname(@NonNull
Configuration conf, @NonNull
ServerType serverType) throws UnknownHostException {
String hostname;
switch (serverType) {case MASTER :
hostname = conf.get(MASTER_HOSTNAME_KEY);
break;
case REGIONSERVER :
hostname = conf.get(UNSAFE_RS_HOSTNAME_KEY);
break;
default :
hostname = null;
}
if ((hostname == null) || hostname.isEmpty()) {
return Strings.domainNamePointerToHostName(getDefaultHost(conf.get(("hbase." + serverType.getName()) + ".dns.interface", "default"), conf.get(("hbase." + serverType.getName()) + ".dns.nameserver", "default")));} else {
return hostname;
}
} | 3.26 |
hbase_DNS_getDefaultHost_rdh | /**
* Wrapper around DNS.getDefaultHost(String, String), calling DNS.getDefaultHost(String, String,
* boolean) when available.
*
* @param strInterface
* The network interface to query.
* @param nameserver
* The DNS host name.
* @return The default host names associated with IPs bound to the network interface.
*/
public static String getDefaultHost(String strInterface, String nameserver) throws UnknownHostException {
if (HAS_NEW_DNS_GET_DEFAULT_HOST_API) {
try {
// Hadoop-2.8 includes a String, String, boolean variant of getDefaultHost
// which properly handles multi-homed systems with Kerberos.
return ((String) (GET_DEFAULT_HOST_METHOD.invoke(null, strInterface, nameserver, true)));
}
catch
(Exception e) {
// If we can't invoke the method as it should exist, throw an exception
throw new RuntimeException("Failed to invoke DNS.getDefaultHost via reflection", e);
}
} else {
return DNS.getDefaultHost(strInterface, nameserver);
}} | 3.26 |
hbase_LruBlockCache_runEviction_rdh | /**
* Multi-threaded call to run the eviction process.
*/
private void runEviction() {
if ((evictionThread == null) ||
(!evictionThread.isGo()))
{evict();
} else {
evictionThread.evict();}
} | 3.26 |
hbase_LruBlockCache_m0_rdh | /**
* Get the buffer of the block with the specified name.
*
* @param cacheKey
* block's cache key
* @param caching
* true if the caller caches blocks on cache misses
* @param repeat
* Whether this is a repeat lookup for the same block (used to avoid
* double counting cache misses when doing double-check locking)
* @param updateCacheMetrics
* Whether to update cache metrics or not
* @return buffer of specified cache key, or null if not in cache
*/@Override
public Cacheable m0(BlockCacheKey cacheKey, boolean caching, boolean repeat, boolean updateCacheMetrics) {
// Note: 'map' must be a ConcurrentHashMap or the supplier may be invoked more than once.
LruCachedBlock cb = map.computeIfPresent(cacheKey, (key, val) -> {
// It will be referenced by RPC path, so increase here. NOTICE: Must do the retain inside
// this block. because if retain outside the map#computeIfPresent, the evictBlock may remove
// the block and release, then we're retaining a block with refCnt=0 which is disallowed.
// see HBASE-22422.
val.getBuffer().retain();
return val;
});
if (cb == null) {
if ((!repeat) && updateCacheMetrics) {
stats.miss(caching, cacheKey.isPrimary(), cacheKey.getBlockType());
}
// If there is another block cache then try and read there.
// However if this is a retry ( second time in double checked locking )
// And it's already a miss then the l2 will also be a miss.
if ((victimHandler != null) && (!repeat)) {
// The handler will increase result's refCnt for RPC, so need no extra retain.
Cacheable result = victimHandler.getBlock(cacheKey, caching, repeat, updateCacheMetrics);
// Promote this to L1.
if (result !=
null) {
if (caching) {
/* inMemory = */
cacheBlock(cacheKey, result, false);}
}
return result;}
return null;
}
if (updateCacheMetrics) {
stats.hit(caching, cacheKey.isPrimary(), cacheKey.getBlockType());
}
cb.access(count.incrementAndGet());
return cb.getBuffer();
} | 3.26 |
hbase_LruBlockCache_getCachedFileNamesForTest_rdh | /**
* Used in testing. May be very inefficient.
*
* @return the set of cached file names
*/
SortedSet<String> getCachedFileNamesForTest() {
SortedSet<String> fileNames = new TreeSet<>();
for (BlockCacheKey cacheKey : map.keySet()) {
fileNames.add(cacheKey.getHfileName());
}
return fileNames; } | 3.26 |
hbase_LruBlockCache_evict_rdh | /**
* Eviction method.
*/ void evict() {
// Ensure only one eviction at a time
if (!evictionLock.tryLock()) {
return;
}
try {
evictionInProgress = true;
long currentSize = this.size.get();
long bytesToFree = currentSize - minSize();
if (LOG.isTraceEnabled()) {
LOG.trace((("Block cache LRU eviction started; Attempting to free " + StringUtils.byteDesc(bytesToFree)) + " of total=") + StringUtils.byteDesc(currentSize));
}
if (bytesToFree <= 0) {
return;
}
// Instantiate priority buckets
BlockBucket bucketSingle = new BlockBucket("single", bytesToFree, blockSize, singleSize());
BlockBucket bucketMulti = new BlockBucket("multi", bytesToFree, blockSize, multiSize());
BlockBucket bucketMemory = new BlockBucket("memory", bytesToFree, blockSize, memorySize());
// Scan entire map putting into appropriate buckets
for (LruCachedBlock
cachedBlock : map.values()) {
switch (cachedBlock.getPriority()) {
case SINGLE :
{
bucketSingle.add(cachedBlock);
break;
}
case MULTI :
{
bucketMulti.add(cachedBlock);
break;}
case MEMORY :
{
bucketMemory.add(cachedBlock);
break;
}
}}
long bytesFreed = 0;
if (forceInMemory || (memoryFactor > 0.999F)) {
long s = bucketSingle.totalSize();
long m = bucketMulti.totalSize();
if (bytesToFree > (s + m)) {
// this means we need to evict blocks in memory bucket to make room,
// so the single and multi buckets will be emptied
bytesFreed = bucketSingle.free(s);
bytesFreed += bucketMulti.free(m);
if (LOG.isTraceEnabled()) {
LOG.trace(("freed " + StringUtils.byteDesc(bytesFreed)) + " from single and multi buckets");
}
bytesFreed += bucketMemory.free(bytesToFree - bytesFreed);
if (LOG.isTraceEnabled()) {
LOG.trace(("freed " + StringUtils.byteDesc(bytesFreed)) + " total from all three buckets ");
}
} else
{
// this means no need to evict block in memory bucket,
// and we try best to make the ratio between single-bucket and
// multi-bucket is 1:2
long bytesRemain = (s + m) - bytesToFree;
if ((3 * s) <= bytesRemain) {
// single-bucket is small enough that no eviction happens for it
// hence all eviction goes from multi-bucket
bytesFreed = bucketMulti.free(bytesToFree);
} else if ((3
* m)
<= (2 * bytesRemain)) {
// multi-bucket is small enough that no eviction happens for it
// hence all eviction goes from single-bucket
bytesFreed
= bucketSingle.free(bytesToFree);} else {
// both buckets need to evict some blocks
bytesFreed = bucketSingle.free(s - (bytesRemain / 3));
if (bytesFreed < bytesToFree) {
bytesFreed += bucketMulti.free(bytesToFree - bytesFreed);
}
}
}} else {
PriorityQueue<BlockBucket> bucketQueue = new PriorityQueue<>(3);
bucketQueue.add(bucketSingle);
bucketQueue.add(bucketMulti);
bucketQueue.add(bucketMemory);
int remainingBuckets = bucketQueue.size();
BlockBucket bucket;
while ((bucket = bucketQueue.poll()) != null) {
long overflow = bucket.overflow();
if (overflow > 0) {
long bucketBytesToFree = Math.min(overflow, (bytesToFree - bytesFreed) / remainingBuckets);bytesFreed += bucket.free(bucketBytesToFree);
}
remainingBuckets--;
}
}
if (LOG.isTraceEnabled()) {
long single = bucketSingle.totalSize();
long multi = bucketMulti.totalSize();
long memory = bucketMemory.totalSize();
LOG.trace(((((((((((((("Block cache LRU eviction completed; " + "freed=") + StringUtils.byteDesc(bytesFreed)) + ", ") + "total=") + StringUtils.byteDesc(this.size.get())) + ", ") + "single=") + StringUtils.byteDesc(single)) + ", ") + "multi=") + StringUtils.byteDesc(multi)) + ", ") + "memory=") + StringUtils.byteDesc(memory));
}
} finally {
stats.evict();
evictionInProgress = false;
evictionLock.unlock();
}
} | 3.26 |
hbase_LruBlockCache_isEnteringRun_rdh | /**
* Used for the test.
*/
boolean isEnteringRun() {
return this.enteringRun;
} | 3.26 |
hbase_LruBlockCache_assertCounterSanity_rdh | /**
* Sanity-checking for parity between actual block cache content and metrics. Intended only for
* use with TRACE level logging and -ea JVM.
*/
private static void assertCounterSanity(long mapSize, long
counterVal) {
if (counterVal < 0) {
LOG.trace((("counterVal overflow. Assertions unreliable. counterVal=" + counterVal) + ", mapSize=") + mapSize);
return;
}
if (mapSize < Integer.MAX_VALUE) {
double pct_diff = Math.abs((((double) (counterVal)) / ((double) (mapSize)))
- 1.0);
if (pct_diff > 0.05) {
LOG.trace((("delta between reported and actual size > 5%. counterVal=" + counterVal) + ", mapSize=") + mapSize);
}
}
} | 3.26 |
hbase_LruBlockCache_updateSizeMetrics_rdh | /**
* Helper function that updates the local size counter and also updates any per-cf or
* per-blocktype metrics it can discern from given {@link LruCachedBlock}
*/
private long updateSizeMetrics(LruCachedBlock cb, boolean evict) {
long heapsize = cb.heapSize();
BlockType bt = cb.getBuffer().getBlockType();
if (evict) {
heapsize *= -1;
}
if (bt != null) {
if (bt.isBloom()) {
bloomBlockSize.add(heapsize);
} else if (bt.isIndex()) {
indexBlockSize.add(heapsize);
} else if (bt.isData())
{
dataBlockSize.add(heapsize);
}
}
return size.addAndGet(heapsize);
} | 3.26 |
hbase_LruBlockCache_getStats_rdh | /**
* Get counter statistics for this cache.
* <p>
* Includes: total accesses, hits, misses, evicted blocks, and runs of the eviction processes.
*/
@Override
public CacheStats getStats() {
return this.stats;
} | 3.26 |
hbase_LruBlockCache_evictBlock_rdh | /**
* Evict the block, and it will be cached by the victim handler if exists && block may be
* read again later
*
* @param evictedByEvictionProcess
* true if the given block is evicted by EvictionThread
* @return the heap size of evicted block
*/
protected long evictBlock(LruCachedBlock block, boolean evictedByEvictionProcess) {
final MutableBoolean evicted = new MutableBoolean(false);
// Note: 'map' must be a ConcurrentHashMap or the supplier may be invoked more than once.
map.computeIfPresent(block.getCacheKey(), (k, v) -> {
// Run the victim handler before we remove the mapping in the L1 map. It must complete
// quickly because other removal or insertion operations can be blocked in the meantime.
if
(evictedByEvictionProcess && (victimHandler != null)) {
victimHandler.cacheBlock(k, v.getBuffer());
}// Decrease the block's reference count, and if refCount is 0, then it'll auto-deallocate. DO
// NOT move this up because if we do that then the victimHandler may access the buffer with
// refCnt = 0 which is disallowed.
v.getBuffer().release();evicted.setTrue();
// By returning null from the supplier we remove the mapping from the L1 map.
return
null;
});
// If we didn't find anything to evict there is nothing more to do here.
if (evicted.isFalse()) {
return 0;
}
// We evicted the block so update L1 statistics.
updateSizeMetrics(block, true);
long v17 = elements.decrementAndGet();
if (LOG.isTraceEnabled()) {
long size = map.size();
assertCounterSanity(size, v17);
}
BlockType bt = block.getBuffer().getBlockType();
if (bt.isBloom()) {
bloomBlockElements.decrement();
} else if (bt.isIndex()) {indexBlockElements.decrement();
} else if (bt.isData())
{
dataBlockElements.decrement();
}if (evictedByEvictionProcess) {
// When the eviction of the block happened because of invalidation of HFiles, no need to
// update the stats counter.
stats.evicted(block.getCachedTime(), block.getCacheKey().isPrimary());
}
return block.heapSize();
} | 3.26 |
hbase_LruBlockCache_acceptableSize_rdh | // Simple calculators of sizes given factors and maxSize
long acceptableSize() {
return ((long) (Math.floor(this.maxSize *
this.acceptableFactor)));
} | 3.26 |
hbase_LruBlockCache_clearCache_rdh | /**
* Clears the cache. Used in tests.
*/
public void clearCache() {
this.map.clear();
this.elements.set(0);
} | 3.26 |
hbase_LruBlockCache_evictBlocksByHfileName_rdh | /**
* Evicts all blocks for a specific HFile. This is an expensive operation implemented as a
* linear-time search through all blocks in the cache. Ideally this should be a search in a
* log-access-time map.
* <p>
* This is used for evict-on-close to remove all blocks of a specific HFile.
*
* @return the number of blocks evicted
*/
@Override
public int evictBlocksByHfileName(String hfileName) {
int numEvicted = 0;for (BlockCacheKey key : map.keySet()) {if (key.getHfileName().equals(hfileName)) {
if (evictBlock(key)) {
++numEvicted;
}
}
}
if (victimHandler != null) {
numEvicted += victimHandler.evictBlocksByHfileName(hfileName);
}
return numEvicted;
} | 3.26 |
hbase_LruBlockCache_cacheBlock_rdh | /**
* Cache the block with the specified name and buffer.
* <p>
* TODO after HBASE-22005, we may cache an block which allocated from off-heap, but our LRU cache
* sizing is based on heap size, so we should handle this in HBASE-22127. It will introduce an
* switch whether make the LRU on-heap or not, if so we may need copy the memory to on-heap,
* otherwise the caching size is based on off-heap.
*
* @param cacheKey
* block's cache key
* @param buf
* block buffer
*/
@Override
public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf) {
cacheBlock(cacheKey, buf, false);
} | 3.26 |
hbase_LruBlockCache_containsBlock_rdh | /**
* Whether the cache contains block with specified cacheKey
*
* @return true if contains the block
*/
@Override
public boolean containsBlock(BlockCacheKey cacheKey) {
return
map.containsKey(cacheKey);
} | 3.26 |
hbase_TableResource_getName_rdh | /**
* Returns the table name
*/
String getName() {
return table;
} | 3.26 |
hbase_TableResource_exists_rdh | /**
* Returns true if the table exists n
*/
boolean exists() throws IOException {
return servlet.getAdmin().tableExists(TableName.valueOf(table));
} | 3.26 |
hbase_BlockIOUtils_readFullyWithHeapBuffer_rdh | /**
* Copying bytes from InputStream to {@link ByteBuff} by using an temporary heap byte[] (default
* size is 1024 now).
*
* @param in
* the InputStream to read
* @param out
* the destination {@link ByteBuff}
* @param length
* to read
* @throws IOException
* if any io error encountered.
*/
public static void readFullyWithHeapBuffer(InputStream in, ByteBuff out, int length) throws IOException {
if (length < 0) {
throw new IllegalArgumentException("Length must not be negative: " + length);
}
int heapBytesRead = 0;
int remain = length;
int count;
byte[] buffer = new byte[1024];
try {
while (remain > 0) {
count = in.read(buffer, 0, Math.min(remain, buffer.length));
if (count < 0) {
throw new IOException(("Premature EOF from inputStream, but still need " + remain) + " bytes");
}
out.put(buffer, 0, count);
remain -= count;
heapBytesRead += count;
}
} finally {
final Span span = Span.current();final AttributesBuilder attributesBuilder = builderFromContext(Context.current());
annotateHeapBytesRead(attributesBuilder, heapBytesRead);
span.addEvent("BlockIOUtils.readFullyWithHeapBuffer", attributesBuilder.build());
}
} | 3.26 |
hbase_BlockIOUtils_readFully_rdh | /**
* Read length bytes into ByteBuffers directly.
*
* @param buf
* the destination {@link ByteBuff}
* @param dis
* the HDFS input stream which implement the ByteBufferReadable interface.
* @param length
* bytes to read.
* @throws IOException
* exception to throw if any error happen
*/
public static void readFully(ByteBuff buf, FSDataInputStream dis, int length) throws IOException {
final
Span span = Span.current();
final AttributesBuilder attributesBuilder = builderFromContext(Context.current());
if (!isByteBufferReadable(dis)) {
// If InputStream does not support the ByteBuffer read, just read to heap and copy bytes to
// the destination ByteBuff.
byte[] heapBuf = new byte[length];
IOUtils.readFully(dis, heapBuf, 0, length);
annotateHeapBytesRead(attributesBuilder, length);
span.addEvent("BlockIOUtils.readFully", attributesBuilder.build());
copyToByteBuff(heapBuf, 0, length, buf);
return;
}
int directBytesRead = 0;
int heapBytesRead = 0;
ByteBuffer[] buffers = buf.nioByteBuffers();
int remain = length;
int idx = 0;
ByteBuffer cur = buffers[idx];
try {while (remain > 0) {
while (!cur.hasRemaining()) {
if ((++idx) >= buffers.length) {
throw new IOException((("Not enough ByteBuffers to read the reminding " + remain) + " ") + "bytes");
}
cur = buffers[idx];
}
cur.limit(cur.position() + Math.min(remain, cur.remaining()));
int bytesRead = dis.read(cur);if (bytesRead < 0) {
throw new IOException((("Premature EOF from inputStream, but still need " + remain) + " ") + "bytes");
}
remain -= bytesRead;
if (cur.isDirect()) {
directBytesRead += bytesRead;
} else {
heapBytesRead += bytesRead;
}
}
} finally {
annotateBytesRead(attributesBuilder, directBytesRead, heapBytesRead);
span.addEvent("BlockIOUtils.readFully", attributesBuilder.build());
}
} | 3.26 |
hbase_BlockIOUtils_readWithExtraOnHeap_rdh | /**
* Read from an input stream at least <code>necessaryLen</code> and if possible,
* <code>extraLen</code> also if available. Analogous to
* {@link IOUtils#readFully(InputStream, byte[], int, int)}, but specifies a number of "extra"
* bytes to also optionally read.
*
* @param in
* the input stream to read from
* @param buf
* the buffer to read into
* @param bufOffset
* the destination offset in the buffer
* @param necessaryLen
* the number of bytes that are absolutely necessary to read
* @param extraLen
* the number of extra bytes that would be nice to read
* @return true if succeeded reading the extra bytes
* @throws IOException
* if failed to read the necessary bytes
*/private static boolean readWithExtraOnHeap(InputStream in, byte[] buf, int bufOffset, int necessaryLen, int extraLen) throws IOException {
int heapBytesRead = 0;
int bytesRemaining = necessaryLen + extraLen;
try {
while (bytesRemaining > 0) {
int ret = in.read(buf, bufOffset, bytesRemaining);if (ret < 0) {
if (bytesRemaining <= extraLen) {
// We could not read the "extra data", but that is OK.
break;
}
throw new IOException((((((((("Premature EOF from inputStream (read " + "returned ") + ret) + ", was trying to read ") + necessaryLen) + " necessary bytes and ") + extraLen) + " extra bytes, ") + "successfully read ") + ((necessaryLen + extraLen) - bytesRemaining));
}
bufOffset += ret;
bytesRemaining -= ret;
heapBytesRead += ret;
}
} finally {
final Span span = Span.current();
final AttributesBuilder attributesBuilder = builderFromContext(Context.current());
annotateHeapBytesRead(attributesBuilder, heapBytesRead);
span.addEvent("BlockIOUtils.readWithExtra", attributesBuilder.build());
}
return bytesRemaining <= 0;
}
/**
* Read bytes into ByteBuffers directly, those buffers either contains the extraLen bytes or only
* contains necessaryLen bytes, which depends on how much bytes do the last time we read.
*
* @param buf
* the destination {@link ByteBuff} | 3.26 |
hbase_BlockIOUtils_annotateBytesRead_rdh | /**
* Conditionally annotate {@code attributesBuilder} with appropriate attributes when values are
* non-zero.
*/private static void annotateBytesRead(AttributesBuilder attributesBuilder, long directBytesRead, long heapBytesRead) {
if (directBytesRead > 0) {
attributesBuilder.put(DIRECT_BYTES_READ_KEY, directBytesRead);
}
if
(heapBytesRead > 0) {
attributesBuilder.put(HEAP_BYTES_READ_KEY, heapBytesRead);
}
} | 3.26 |
hbase_BlockIOUtils_builderFromContext_rdh | /**
* Construct a fresh {@link AttributesBuilder} from the provided {@link Context}, populated with
* relevant attributes populated by {@link HFileContextAttributesBuilderConsumer#CONTEXT_KEY}.
*/private static AttributesBuilder builderFromContext(Context context) {final AttributesBuilder attributesBuilder = Attributes.builder();
Optional.ofNullable(context).map(val -> val.get(HFileContextAttributesBuilderConsumer.CONTEXT_KEY)).ifPresent(c -> c.accept(attributesBuilder));
return attributesBuilder;
} | 3.26 |
hbase_BlockIOUtils_annotateHeapBytesRead_rdh | /**
* Conditionally annotate {@code span} with the appropriate attribute when value is non-zero.
*/
private static void annotateHeapBytesRead(AttributesBuilder attributesBuilder, int heapBytesRead) {
annotateBytesRead(attributesBuilder, 0, heapBytesRead);
} | 3.26 |
hbase_BlockIOUtils_preadWithExtra_rdh | /**
* Read from an input stream at least <code>necessaryLen</code> and if possible,
* <code>extraLen</code> also if available. Analogous to
* {@link IOUtils#readFully(InputStream, byte[], int, int)}, but uses positional read and
* specifies a number of "extra" bytes that would be desirable but not absolutely necessary to
* read. If the input stream supports ByteBufferPositionedReadable, it reads to the byte buffer
* directly, and does not allocate a temporary byte array.
*
* @param buff
* ByteBuff to read into.
* @param dis
* the input stream to read from
* @param position
* the position within the stream from which to start reading
* @param necessaryLen
* the number of bytes that are absolutely necessary to read
* @param extraLen
* the number of extra bytes that would be nice to read
* @param readAllBytes
* whether we must read the necessaryLen and extraLen
* @return true if and only if extraLen is > 0 and reading those extra bytes was successful
* @throws IOException
* if failed to read the necessary bytes
*/
public static boolean preadWithExtra(ByteBuff buff,
FSDataInputStream dis, long position, int necessaryLen, int extraLen, boolean readAllBytes) throws IOException {
boolean preadbytebuffer = dis.hasCapability("in:preadbytebuffer");
if (preadbytebuffer) {
return preadWithExtraDirectly(buff, dis, position, necessaryLen, extraLen, readAllBytes);
} else {
return preadWithExtraOnHeap(buff, dis, position, necessaryLen, extraLen, readAllBytes);}
} | 3.26 |
hbase_NamespacesInstanceModel_toString_rdh | /* (non-Javadoc)
@see java.lang.Object#toString()
*/
@Override
public String toString() {
StringBuilder v1 = new StringBuilder();
v1.append("{NAME => '");
v1.append(namespaceName);v1.append("'");
if (properties != null) {
for (Map.Entry<String, String> entry : properties.entrySet()) {
v1.append(", ");
v1.append(entry.getKey());
v1.append(" => '");
v1.append(entry.getValue());
v1.append("'");
}
}
v1.append("}");
return v1.toString();
} | 3.26 |
hbase_NamespacesInstanceModel_getProperties_rdh | /**
* Returns The map of uncategorized namespace properties.
*/public Map<String, String> getProperties() {
if (properties == null) {
properties = new
HashMap<>();
}
return properties;
} | 3.26 |
hbase_NamespacesInstanceModel_addProperty_rdh | /**
* Add property to the namespace.
*
* @param key
* attribute name
* @param value
* attribute value
*/
public void addProperty(String key, String value) {
if (properties == null) {
properties = new HashMap<>();
}
properties.put(key, value);
} | 3.26 |
hbase_OutputSink_updateStatusWithMsg_rdh | /**
* Set status message in {@link MonitoredTask} instance that is set in this OutputSink
*
* @param msg
* message to update the status with
*/protected final void updateStatusWithMsg(String msg) {
if (status != null) {
status.setStatus(msg);
}
} | 3.26 |
hbase_OutputSink_finishWriterThreads_rdh | /**
* Wait for writer threads to dump all info to the sink
*
* @return true when there is no error
*/
boolean finishWriterThreads() throws IOException {
LOG.debug("Waiting for split writer threads to finish");
boolean progressFailed
= false;
for (WriterThread t : writerThreads) {
t.finish();
}
for (WriterThread t : writerThreads) {
if (((!progressFailed) && (reporter != null)) && (!reporter.progress())) {
progressFailed = true;
}
try {
t.join();
} catch (InterruptedException ie) {
IOException iie = new InterruptedIOException();
iie.initCause(ie);
throw iie;
}
}
controller.checkForErrors();
final String
msg = this.writerThreads.size() + " split writer threads finished";
LOG.info(msg);
updateStatusWithMsg(msg);return !progressFailed;
} | 3.26 |
hbase_OutputSink_startWriterThreads_rdh | /**
* Start the threads that will pump data from the entryBuffers to the output files.
*/
void startWriterThreads() throws IOException {
for (int i = 0; i < numThreads; i++) {
WriterThread t = new WriterThread(controller, entryBuffers, this, i);
t.start();
writerThreads.add(t);
}
} | 3.26 |
hbase_ColumnRangeFilter_isMinColumnInclusive_rdh | /**
* Returns if min column range is inclusive.
*/
public boolean isMinColumnInclusive() {
return minColumnInclusive;
} | 3.26 |
hbase_ColumnRangeFilter_getMaxColumnInclusive_rdh | /**
* Returns true if max column is inclusive, false otherwise
*/
public boolean
getMaxColumnInclusive() {return this.maxColumnInclusive;
} | 3.26 |
hbase_ColumnRangeFilter_getMaxColumn_rdh | /**
* Returns the max column range for the filter
*/
public byte[] getMaxColumn() {
return this.maxColumn;
} | 3.26 |
hbase_ColumnRangeFilter_toByteArray_rdh | /**
* Returns The filter serialized using pb
*/
@Override
public byte[] toByteArray() {
FilterProtos.ColumnRangeFilter.Builder builder = FilterProtos.ColumnRangeFilter.newBuilder();
if (this.minColumn != null)
builder.setMinColumn(UnsafeByteOperations.unsafeWrap(this.minColumn));
builder.setMinColumnInclusive(this.minColumnInclusive);
if (this.maxColumn != null)
builder.setMaxColumn(UnsafeByteOperations.unsafeWrap(this.maxColumn));
builder.setMaxColumnInclusive(this.maxColumnInclusive);
return builder.build().toByteArray();
} | 3.26 |
hbase_ColumnRangeFilter_areSerializedFieldsEqual_rdh | /**
* Returns true if and only if the fields of the filter that are serialized are equal to the
* corresponding fields in other. Used for testing.
*/
@Override
boolean areSerializedFieldsEqual(Filter o) {if (o == this) { return true;
}
if (!(o instanceof ColumnRangeFilter)) {
return false;
}
ColumnRangeFilter other = ((ColumnRangeFilter) (o));
return ((Bytes.equals(this.getMinColumn(), other.getMinColumn()) && (this.getMinColumnInclusive() == other.getMinColumnInclusive())) && Bytes.equals(this.getMaxColumn(), other.getMaxColumn())) && (this.getMaxColumnInclusive() == other.getMaxColumnInclusive());
} | 3.26 |
hbase_ColumnRangeFilter_getMinColumnInclusive_rdh | /**
* Returns true if min column is inclusive, false otherwise
*/
public boolean getMinColumnInclusive() {
return this.minColumnInclusive;
} | 3.26 |
hbase_ColumnRangeFilter_isMaxColumnInclusive_rdh | /**
* Returns if max column range is inclusive.
*/
public boolean isMaxColumnInclusive() {
return maxColumnInclusive;
} | 3.26 |
hbase_ColumnRangeFilter_parseFrom_rdh | /**
* Parse a serialized representation of {@link ColumnRangeFilter}
*
* @param pbBytes
* A pb serialized {@link ColumnRangeFilter} instance
* @return An instance of {@link ColumnRangeFilter} made from <code>bytes</code>
* @throws DeserializationException
* if an error occurred
* @see #toByteArray
*/
public static ColumnRangeFilter parseFrom(final byte[] pbBytes) throws DeserializationException {
FilterProtos.ColumnRangeFilter proto;
try {
proto = FilterProtos.ColumnRangeFilter.parseFrom(pbBytes);
} catch (InvalidProtocolBufferException e) {
throw new DeserializationException(e);
} return new ColumnRangeFilter(proto.hasMinColumn()
? proto.getMinColumn().toByteArray() :
null, proto.getMinColumnInclusive(), proto.hasMaxColumn() ? proto.getMaxColumn().toByteArray() :
null, proto.getMaxColumnInclusive());
} | 3.26 |
hbase_ColumnRangeFilter_getMinColumn_rdh | /**
* Returns the min column range for the filter
*/
public byte[] getMinColumn() {
return this.minColumn;
} | 3.26 |
hbase_FixedLengthWrapper_getLength_rdh | /**
* Retrieve the maximum length (in bytes) of encoded values.
*/
public int getLength() {
return length;
} | 3.26 |
hbase_BackupAdminImpl_finalizeDelete_rdh | /**
* Updates incremental backup set for every backupRoot
*
* @param tablesMap
* map [backupRoot: {@code Set<TableName>}]
* @param table
* backup system table
* @throws IOException
* if a table operation fails
*/
private void finalizeDelete(Map<String, HashSet<TableName>> tablesMap, BackupSystemTable table) throws IOException {
for (String backupRoot : tablesMap.keySet()) {
Set<TableName> incrTableSet = table.getIncrementalBackupTableSet(backupRoot);
Map<TableName, ArrayList<BackupInfo>> tableMap = table.getBackupHistoryForTableSet(incrTableSet, backupRoot);
for (Map.Entry<TableName, ArrayList<BackupInfo>> entry : tableMap.entrySet()) {
if (entry.getValue() == null) {
// No more backups for a table
incrTableSet.remove(entry.getKey());
}
}
if (!incrTableSet.isEmpty()) {
table.addIncrementalBackupTableSet(incrTableSet, backupRoot);
} else {
// empty
table.deleteIncrementalBackupTableSet(backupRoot);
}
}
} | 3.26 |
hbase_BackupAdminImpl_deleteBackup_rdh | /**
* Delete single backup and all related backups <br>
* Algorithm:<br>
* Backup type: FULL or INCREMENTAL <br>
* Is this last backup session for table T: YES or NO <br>
* For every table T from table list 'tables':<br>
* if(FULL, YES) deletes only physical data (PD) <br>
* if(FULL, NO), deletes PD, scans all newer backups and removes T from backupInfo,<br>
* until we either reach the most recent backup for T in the system or FULL backup<br>
* which includes T<br>
* if(INCREMENTAL, YES) deletes only physical data (PD) if(INCREMENTAL, NO) deletes physical data
* and for table T scans all backup images between last<br>
* FULL backup, which is older than the backup being deleted and the next FULL backup (if exists)
* <br>
* or last one for a particular table T and removes T from list of backup tables.
*
* @param backupId
* backup id
* @param sysTable
* backup system table
* @return total number of deleted backup images
* @throws IOException
* if deleting the backup fails
*/
private int deleteBackup(String backupId, BackupSystemTable sysTable) throws IOException {
BackupInfo backupInfo =
sysTable.readBackupInfo(backupId);
int totalDeleted = 0;
if (backupInfo != null) {
LOG.info(("Deleting backup " + backupInfo.getBackupId()) + " ...");
// Step 1: clean up data for backup session (idempotent)
BackupUtils.cleanupBackupData(backupInfo, conn.getConfiguration());
// List of tables in this backup;
List<TableName> tables = backupInfo.getTableNames();
long startTime = backupInfo.getStartTs();
for (TableName tn : tables) {
boolean isLastBackupSession = isLastBackupSession(sysTable, tn, startTime);
if (isLastBackupSession) {
continue;
}
// else
List<BackupInfo> affectedBackups = getAffectedBackupSessions(backupInfo, tn, sysTable);
for (BackupInfo info : affectedBackups) {
if (info.equals(backupInfo)) {
continue;
}
removeTableFromBackupImage(info, tn, sysTable);
}
}
Map<byte[], String> map = sysTable.readBulkLoadedFiles(backupId);
FileSystem fs = FileSystem.get(conn.getConfiguration());
boolean success = true;
int numDeleted = 0;
for (String f : map.values()) {Path p = new Path(f);
try {
LOG.debug((("Delete backup info " + p) + " for ") + backupInfo.getBackupId());
if (!fs.delete(p)) {
if (fs.exists(p)) {
LOG.warn(f + " was not deleted");
success = false;
}
} else {
numDeleted++;
}
} catch (IOException ioe) {
LOG.warn(f + " was not deleted", ioe);
success = false;
}
}
if (LOG.isDebugEnabled()) {
LOG.debug(((numDeleted + " bulk loaded files out of ") + map.size()) + " were deleted");
}
if (success) {
sysTable.deleteBulkLoadedRows(new ArrayList<>(map.keySet()));
}
sysTable.deleteBackupInfo(backupInfo.getBackupId());
LOG.info(("Delete backup " + backupInfo.getBackupId()) + " completed."); totalDeleted++;
} else {
LOG.warn("Delete backup failed: no information found for backupID=" + backupId);
}
return totalDeleted;
} | 3.26 |
hbase_BackupAdminImpl_checkIfValidForMerge_rdh | /**
* Verifies that backup images are valid for merge.
* <ul>
* <li>All backups MUST be in the same destination
* <li>No FULL backups are allowed - only INCREMENTAL
* <li>All backups must be in COMPLETE state
* <li>No holes in backup list are allowed
* </ul>
* <p>
*
* @param backupIds
* list of backup ids
* @param table
* backup system table
* @throws IOException
* if the backup image is not valid for merge
*/
private void checkIfValidForMerge(String[] backupIds, BackupSystemTable table) throws IOException {
String backupRoot = null;
final Set<TableName> allTables = new HashSet<>();final Set<String> allBackups = new HashSet<>();
long minTime = Long.MAX_VALUE;
long maxTime = Long.MIN_VALUE;
for (String backupId : backupIds) {
BackupInfo bInfo = table.readBackupInfo(backupId);if (bInfo == null) {
String msg = ("Backup session " + backupId) + " not found";
throw new IOException(msg);
}
if (backupRoot == null) {backupRoot = bInfo.getBackupRootDir();
} else if (!bInfo.getBackupRootDir().equals(backupRoot)) {
throw new IOException((((("Found different backup destinations in a list of a backup sessions " + "\n1. ") + backupRoot) + "\n") + "2. ") + bInfo.getBackupRootDir());
}
if (bInfo.getType() == BackupType.FULL) {
throw new IOException("FULL backup image can not be merged for: \n" + bInfo);
}if (bInfo.getState() != BackupState.COMPLETE) {
throw new IOException((("Backup image " + backupId) + " can not be merged becuase of its state: ") + bInfo.getState());
}
allBackups.add(backupId);
allTables.addAll(bInfo.getTableNames());
long time = bInfo.getStartTs(); if (time < minTime) {
minTime = time;
}
if (time > maxTime) {
maxTime = time;
}
}
final long startRangeTime = minTime; final long endRangeTime = maxTime;
final String backupDest = backupRoot;
// Check we have no 'holes' in backup id list
// Filter 1 : backupRoot
// Filter 2 : time range filter
// Filter 3 : table filter
BackupInfo.Filter destinationFilter = info -> info.getBackupRootDir().equals(backupDest);
BackupInfo.Filter timeRangeFilter = info -> {
long time = info.getStartTs();
return (time >= startRangeTime) && (time <= endRangeTime);
};
BackupInfo.Filter tableFilter = info -> {
List<TableName>
tables = info.getTableNames();
return !Collections.disjoint(allTables, tables);
};
BackupInfo.Filter typeFilter = info -> info.getType() == BackupType.INCREMENTAL;
BackupInfo.Filter stateFilter = info -> info.getState() == BackupState.COMPLETE;
List<BackupInfo> allInfos = table.getBackupHistory(-1, destinationFilter, timeRangeFilter, tableFilter, typeFilter, stateFilter);
if (allInfos.size() != allBackups.size()) {
// Yes we have at least one hole in backup image sequence
List<String> missingIds
= new ArrayList<>();
for (BackupInfo info : allInfos) {
if (allBackups.contains(info.getBackupId())) {continue;
}
missingIds.add(info.getBackupId());
}
String errMsg = "Sequence of backup ids has 'holes'. The following backup images must be added:" + util.StringUtils.join(",", missingIds);
throw new IOException(errMsg);
}} | 3.26 |
hbase_BackupAdminImpl_cleanupBackupDir_rdh | /**
* Clean up the data at target directory
*
* @throws IOException
* if cleaning up the backup directory fails
*/
private void cleanupBackupDir(BackupInfo backupInfo, TableName table, Configuration conf) throws IOException {
try {
// clean up the data at target directory
String targetDir = backupInfo.getBackupRootDir();if (targetDir == null) {
LOG.warn("No target directory specified for " + backupInfo.getBackupId());
return;
}
FileSystem outputFs = FileSystem.get(new Path(backupInfo.getBackupRootDir()).toUri(), conf);
Path targetDirPath = new Path(BackupUtils.getTableBackupDir(backupInfo.getBackupRootDir(), backupInfo.getBackupId(), table));
if (outputFs.delete(targetDirPath, true)) {
LOG.info(("Cleaning up backup data at " + targetDirPath.toString()) + " done.");
} else { LOG.info(("No data has been found in " + targetDirPath.toString()) + ".");
}
} catch (IOException e1) {LOG.error(((((((("Cleaning up backup data of " + backupInfo.getBackupId()) + " for table ") + table) + "at ") + backupInfo.getBackupRootDir()) + " failed due to ") + e1.getMessage()) + ".");
throw e1;
}
} | 3.26 |
hbase_SplitLogManagerCoordination_getServerName_rdh | /**
* Returns server name
*/
public ServerName getServerName() {return master.getServerName();
} | 3.26 |
hbase_SplitLogManagerCoordination_getMaster_rdh | /**
* Returns the master value
*/
public MasterServices getMaster() {
return master;
} | 3.26 |
hbase_SplitLogManagerCoordination_getFailedDeletions_rdh | /**
* Returns a set of failed deletions
*/
public Set<String> getFailedDeletions() {
return failedDeletions;
} | 3.26 |
hbase_SplitLogManagerCoordination_getTasks_rdh | /**
* Returns map of tasks
*/
public ConcurrentMap<String, Task> getTasks() {
return tasks;
} | 3.26 |
hbase_WALFactory_createStreamReader_rdh | /**
* Create a one-way stream reader for a given path.
*/
public static WALStreamReader createStreamReader(FileSystem fs, Path path, Configuration conf) throws IOException {
return createStreamReader(fs, path, conf, -1);
} | 3.26 |
hbase_WALFactory_m1_rdh | /**
* If you already have a WALFactory, you should favor the instance method. Uses defaults.
*
* @return a writer that won't overwrite files. Caller must close.
*/
public static Writer m1(final FileSystem fs, final Path path, final Configuration configuration) throws IOException {
return FSHLogProvider.createWriter(configuration, fs, path, false);
} | 3.26 |
hbase_WALFactory_getAllWALProviders_rdh | /**
* Returns all the wal providers, for example, the default one, the one for hbase:meta and the one
* for hbase:replication.
*/
public List<WALProvider>
getAllWALProviders() {
List<WALProvider> providers = new ArrayList<>();
if (provider != null) {
providers.add(provider);
}
WALProvider v18 = metaProvider.getProviderNoCreate();
if (v18 != null) {
providers.add(v18);
}
WALProvider replication = replicationProvider.getProviderNoCreate();if (replication != null) {
providers.add(replication);
}
return providers;
} | 3.26 |
hbase_WALFactory_createWALWriter_rdh | /**
* Create a writer for the WAL. Uses defaults.
* <p>
* Should be package-private. public only for tests and
* {@link org.apache.hadoop.hbase.regionserver.wal.Compressor}
*
* @return A WAL writer. Close when done with it.
*/
public Writer createWALWriter(final FileSystem fs, final Path path) throws IOException {
return FSHLogProvider.createWriter(conf, fs, path, false);
} | 3.26 |
hbase_WALFactory_m0_rdh | /**
*
* @param region
* the region which we want to get a WAL for. Could be null.
*/
public WAL m0(RegionInfo region) throws IOException {
// Use different WAL for hbase:meta. Instantiates the meta WALProvider if not already up.
if ((region != null) && RegionReplicaUtil.isDefaultReplica(region)) {
if (region.isMetaRegion()) {
return metaProvider.getProvider().getWAL(region);
} else if (ReplicationStorageFactory.isReplicationQueueTable(conf, region.getTable())) {
return replicationProvider.getProvider().getWAL(region);
}
}
return provider.getWAL(region); } | 3.26 |
hbase_WALFactory_createRecoveredEditsWriter_rdh | /**
* If you already have a WALFactory, you should favor the instance method. Uses defaults.
*
* @return a Writer that will overwrite files. Caller must close.
*/
static Writer createRecoveredEditsWriter(final FileSystem fs, final Path path, final Configuration configuration) throws IOException {
return FSHLogProvider.createWriter(configuration, fs, path, true);
} | 3.26 |
hbase_WALFactory_getInstance_rdh | // Public only for FSHLog
public static WALFactory getInstance(Configuration configuration) {
WALFactory factory = singleton.get();
if (null == factory) {
WALFactory temp = new WALFactory(configuration);if (singleton.compareAndSet(null, temp)) {
factory = temp;
} else {
// someone else beat us to initializing
try {
temp.close();
} catch (IOException exception) {
LOG.debug("failed to close temporary singleton. ignoring.",
exception);
}
factory = singleton.get();
}
}
return factory;
} | 3.26 |
hbase_WALFactory_shutdown_rdh | /**
* Tell the underlying WAL providers to shut down, but do not clean up underlying storage. If you
* are not ending cleanly and will need to replay edits from this factory's wals, use this method
* if you can as it will try to leave things as tidy as possible.
*/
public void shutdown()
throws IOException {
List<IOException> ioes = new ArrayList<>();
// these fields could be null if the WALFactory is created only for being used in the
// getInstance method.
if (metaProvider != null) {
try { metaProvider.shutdown();
} catch (IOException e) {
ioes.add(e);
}
}
if (replicationProvider != null) {
try {
replicationProvider.shutdown();
} catch (IOException e) {
ioes.add(e);
}
}
if (provider != null) {
try
{
provider.shutdown();} catch (IOException e) {
ioes.add(e);
}
}
if (!ioes.isEmpty()) {
IOException ioe = new IOException("Failed to shutdown WALFactory");
for (IOException v7 : ioes) {
ioe.addSuppressed(v7);
}
throw ioe;
}
} | 3.26 |
hbase_WALFactory_close_rdh | /**
* Shutdown all WALs and clean up any underlying storage. Use only when you will not need to
* replay and edits that have gone to any wals from this factory.
*/
public void close() throws IOException {
List<IOException> ioes = new ArrayList<>();
// these fields could be null if the WALFactory is created only for being used in the
// getInstance method.
if (metaProvider != null) {
try {
metaProvider.close();
} catch (IOException e) {
ioes.add(e);
}
}
if (replicationProvider != null) {
try {
replicationProvider.close();
} catch (IOException e) {
ioes.add(e);
}
}
if (provider != null) {
try {
provider.close();
} catch (IOException e) {
ioes.add(e);
}
}
if (!ioes.isEmpty()) {
IOException ioe = new IOException("Failed to close WALFactory");
for (IOException e : ioes) {
ioe.addSuppressed(e);
}
throw ioe; }
} | 3.26 |
hbase_BigDecimalComparator_parseFrom_rdh | /**
* Parse a serialized representation of {@link BigDecimalComparator}
*
* @param pbBytes
* A pb serialized {@link BigDecimalComparator} instance
* @return An instance of {@link BigDecimalComparator} made from <code>bytes</code>
* @throws DeserializationException
* if an error occurred
* @see #toByteArray
*/
public static BigDecimalComparator parseFrom(final byte[] pbBytes) throws DeserializationException {
ComparatorProtos.BigDecimalComparator proto;
try {
proto = ComparatorProtos.BigDecimalComparator.parseFrom(pbBytes);
} catch (InvalidProtocolBufferException e) {
throw new DeserializationException(e);
}return new BigDecimalComparator(Bytes.toBigDecimal(proto.getComparable().getValue().toByteArray()));
} | 3.26 |
hbase_BigDecimalComparator_toByteArray_rdh | /**
* Returns The comparator serialized using pb
*/
@Override
public byte[] toByteArray() {
ComparatorProtos.BigDecimalComparator.Builder builder = ComparatorProtos.BigDecimalComparator.newBuilder();
builder.setComparable(ProtobufUtil.toByteArrayComparable(this.value));return builder.build().toByteArray();
} | 3.26 |
hbase_BigDecimalComparator_areSerializedFieldsEqual_rdh | /**
* Returns true if and only if the fields of the comparator that are serialized are equal to the
* corresponding fields in other. Used for testing.
*/
@SuppressWarnings("ReferenceEquality")
boolean areSerializedFieldsEqual(BigDecimalComparator other)
{
if (other == this) {
return true;
}
return
super.areSerializedFieldsEqual(other);
} | 3.26 |
hbase_RateLimiter_update_rdh | /**
* Sets the current instance of RateLimiter to a new values. if current limit is smaller than the
* new limit, bump up the available resources. Otherwise allow clients to use up the previously
* available resources.
*/
public synchronized void update(final RateLimiter other) {
this.tunit = other.tunit;
if (this.limit < other.limit) {
// If avail is capped to this.limit, it will never overflow,
// otherwise, avail may overflow, just be careful here.
long
diff = other.limit - this.limit;
if (this.avail <= (Long.MAX_VALUE - diff)) {
this.avail += diff;
this.avail = Math.min(this.avail, other.limit);} else {
this.avail = other.limit;
}
}
this.limit = other.limit;
} | 3.26 |
hbase_RateLimiter_waitInterval_rdh | /**
* Returns estimate of the ms required to wait before being able to provide "amount" resources.
*/
public synchronized long waitInterval(final long amount) {
// TODO Handle over quota?
return amount <= avail ? 0
: getWaitInterval(getLimit(), avail, amount);
} | 3.26 |
hbase_RateLimiter_set_rdh | /**
* Set the RateLimiter max available resources and refill period.
*
* @param limit
* The max value available resource units can be refilled to.
* @param timeUnit
* Timeunit factor for translating to ms.
*/
public synchronized void set(final long
limit, final TimeUnit timeUnit) {
switch (timeUnit) {
case MILLISECONDS :
tunit = 1;
break;
case SECONDS :
tunit = 1000;
break;
case MINUTES :
tunit = 60 * 1000;
break;
case HOURS :
tunit = (60 * 60) * 1000;
break;
case DAYS :
tunit = ((24 * 60) * 60) * 1000;
break;
default
:
throw new RuntimeException(("Unsupported " + timeUnit.name()) + " TimeUnit.");
}
this.limit = limit;
this.avail = limit;
} | 3.26 |
hbase_RateLimiter_consume_rdh | /**
* consume amount available units, amount could be a negative number
*
* @param amount
* the number of units to consume
*/
public synchronized void consume(final long amount) {
if (isBypass()) {
return;
}
if (amount >= 0) {
this.avail -= amount;
} else if
(this.avail <= (Long.MAX_VALUE + amount)) {
this.avail -= amount;
this.avail = Math.min(this.avail, this.limit);
} else {
this.avail = this.limit;
}
} | 3.26 |
hbase_RateLimiter_canExecute_rdh | /**
* Are there enough available resources to allow execution?
*
* @param amount
* the number of required resources, a non-negative number
* @return true if there are enough available resources, otherwise false
*/
public synchronized boolean canExecute(final long amount) {
if (isBypass()) {
return true;
}
long refillAmount = refill(limit);
if ((refillAmount == 0) && (avail < amount)) {
return false;
}
// check for positive overflow
if (avail <= (Long.MAX_VALUE - refillAmount)) {
avail = Math.min(avail + refillAmount, limit);
} else {
avail = limit;
}
if (avail >= amount) {
return true;
}
return false;
} | 3.26 |
hbase_IndexOnlyLruBlockCache_cacheBlock_rdh | /**
* Cache only index block with the specified name and buffer
*
* @param cacheKey
* block's cache key
* @param buf
* block buffer
* @param inMemory
* if block is in-memory
*/
@Override
public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory) {
if (isMetaBlock(buf.getBlockType())) {
super.cacheBlock(cacheKey, buf, inMemory);
}
} | 3.26 |
hbase_ZstdCodec_getBufferSize_rdh | // Package private
static int getBufferSize(Configuration conf) {
return conf.getInt(f0, // IO_COMPRESSION_CODEC_ZSTD_BUFFER_SIZE_DEFAULT is 0! We can't allow that.
conf.getInt(CommonConfigurationKeys.IO_COMPRESSION_CODEC_ZSTD_BUFFER_SIZE_KEY, ZSTD_BUFFER_SIZE_DEFAULT));} | 3.26 |
hbase_Pair_getSecond_rdh | /**
* Return the second element stored in the pair.
*/
public T2 getSecond() {
return f0;
} | 3.26 |
hbase_Pair_getFirst_rdh | /**
* Return the first element stored in the pair.
*/
public T1 getFirst() {
return first;
} | 3.26 |
hbase_Pair_setSecond_rdh | /**
* Replace the second element of the pair.
*
* @param b
* operand
*/public void setSecond(T2 b) {
this.second = b;
} | 3.26 |
hbase_MultipleColumnPrefixFilter_parseFrom_rdh | /**
* Parse a serialized representation of {@link MultipleColumnPrefixFilter}
*
* @param pbBytes
* A pb serialized {@link MultipleColumnPrefixFilter} instance
* @return An instance of {@link MultipleColumnPrefixFilter} made from <code>bytes</code>
* @throws DeserializationException
* if an error occurred
* @see #toByteArray
*/
public static MultipleColumnPrefixFilter parseFrom(final byte[] pbBytes) throws DeserializationException {
FilterProtos.MultipleColumnPrefixFilter proto;
try {
proto = FilterProtos.MultipleColumnPrefixFilter.parseFrom(pbBytes);
} catch (InvalidProtocolBufferException e) {
throw new DeserializationException(e);
}
int numPrefixes = proto.getSortedPrefixesCount();
byte[][] prefixes = new byte[numPrefixes][];
for (int i = 0; i < numPrefixes; ++i) {
prefixes[i] = proto.getSortedPrefixes(i).toByteArray();
}
return new MultipleColumnPrefixFilter(prefixes);
} | 3.26 |
hbase_MultipleColumnPrefixFilter_toByteArray_rdh | /**
* Returns The filter serialized using pb
*/
@Override
public byte[] toByteArray() {
FilterProtos.MultipleColumnPrefixFilter.Builder builder = FilterProtos.MultipleColumnPrefixFilter.newBuilder();
for (byte[]
element : sortedPrefixes)
{if (element != null)
builder.addSortedPrefixes(UnsafeByteOperations.unsafeWrap(element));
}
return builder.build().toByteArray();
} | 3.26 |
hbase_MultipleColumnPrefixFilter_areSerializedFieldsEqual_rdh | /**
* Returns true if and only if the fields of the filter that are serialized are equal to the
* corresponding fields in other. Used for testing.
*/
@Override
boolean areSerializedFieldsEqual(Filter o) {
if (o == this) {
return true;
}
if (!(o instanceof MultipleColumnPrefixFilter)) {
return false;
}
MultipleColumnPrefixFilter other = ((MultipleColumnPrefixFilter) (o));
return this.sortedPrefixes.equals(other.sortedPrefixes);
} | 3.26 |
hbase_ZooKeeperHelper_ensureConnectedZooKeeper_rdh | /**
* Ensure passed zookeeper is connected.
*
* @param timeout
* Time to wait on established Connection
*/
public static ZooKeeper ensureConnectedZooKeeper(ZooKeeper zookeeper, int timeout) throws ZooKeeperConnectionException {
if (zookeeper.getState().isConnected()) {
return zookeeper;
}
Stopwatch stopWatch = Stopwatch.createStarted();
// Make sure we are connected before we hand it back.
while (!zookeeper.getState().isConnected()) {
Threads.sleep(1);
if (stopWatch.elapsed(TimeUnit.MILLISECONDS) > timeout) {
throw new ZooKeeperConnectionException((("Failed connect after waiting " + stopWatch.elapsed(TimeUnit.MILLISECONDS)) + "ms (zk session timeout); ") + zookeeper);
}
}
return zookeeper;
} | 3.26 |
hbase_ZooKeeperHelper_getConnectedZooKeeper_rdh | /**
* Get a ZooKeeper instance and wait until it connected before returning.
*
* @param sessionTimeoutMs
* Used as session timeout passed to the created ZooKeeper AND as the
* timeout to wait on connection establishment.
*/public static ZooKeeper getConnectedZooKeeper(String connectString, int sessionTimeoutMs) throws IOException {
ZooKeeper zookeeper = new ZooKeeper(connectString, sessionTimeoutMs,
e -> {
});
return ensureConnectedZooKeeper(zookeeper, sessionTimeoutMs);
} | 3.26 |
hbase_StochasticLoadBalancer_createRegionPlans_rdh | /**
* Create all of the RegionPlan's needed to move from the initial cluster state to the desired
* state.
*
* @param cluster
* The state of the cluster
* @return List of RegionPlan's that represent the moves needed to get to desired final state.
*/
private List<RegionPlan> createRegionPlans(BalancerClusterState cluster) {
List<RegionPlan> plans = new ArrayList<>();
for
(int regionIndex = 0; regionIndex < cluster.regionIndexToServerIndex.length; regionIndex++) {int initialServerIndex = cluster.initialRegionIndexToServerIndex[regionIndex];
int newServerIndex = cluster.regionIndexToServerIndex[regionIndex];
if (initialServerIndex != newServerIndex) {
RegionInfo region = cluster.regions[regionIndex];
ServerName initialServer = cluster.servers[initialServerIndex];
ServerName newServer = cluster.servers[newServerIndex];if (LOG.isTraceEnabled()) {
LOG.trace((((("Moving Region " + region.getEncodedName()) + " from server ") + initialServer.getHostname()) + " to ") + newServer.getHostname());
}
RegionPlan rp = new RegionPlan(region, initialServer, newServer);
plans.add(rp);
}
}
return plans;
} | 3.26 |
hbase_StochasticLoadBalancer_getRandomGenerator_rdh | /**
* Select the candidate generator to use based on the cost of cost functions. The chance of
* selecting a candidate generator is propotional to the share of cost of all cost functions among
* all cost functions that benefit from it.
*/
protected CandidateGenerator getRandomGenerator() {double sum = 0;
for (int i = 0; i < weightsOfGenerators.length; i++) {
sum += weightsOfGenerators[i];
weightsOfGenerators[i] = sum;
}
if (sum == 0) {
return candidateGenerators.get(0);
}
for (int i = 0; i < weightsOfGenerators.length; i++) {
weightsOfGenerators[i] /= sum;
}
double rand = ThreadLocalRandom.current().nextDouble();for (int i = 0; i < weightsOfGenerators.length; i++) {
if (rand <= weightsOfGenerators[i]) {
return candidateGenerators.get(i);
}
}
return candidateGenerators.get(candidateGenerators.size() - 1);
} | 3.26 |
hbase_StochasticLoadBalancer_updateCostsAndWeightsWithAction_rdh | /**
* Update both the costs of costfunctions and the weights of candidate generators
*/
@RestrictedApi(explanation = "Should only be called in tests", link = "", allowedOnPath = ".*(/src/test/.*|StochasticLoadBalancer).java")
void updateCostsAndWeightsWithAction(BalancerClusterState cluster, BalanceAction action) {// Reset all the weights to 0
for (int i = 0; i < weightsOfGenerators.length; i++) {
weightsOfGenerators[i] = 0;
}
for (CostFunction c : costFunctions) {
if (c.isNeeded()) {
c.postAction(action);
c.updateWeight(weightsOfGenerators);
}
}
} | 3.26 |
hbase_StochasticLoadBalancer_updateMetricsSize_rdh | /**
* Update the number of metrics that are reported to JMX
*/
@RestrictedApi(explanation = "Should only be called in tests", link = "", allowedOnPath = ".*(/src/test/.*|StochasticLoadBalancer).java")
void updateMetricsSize(int size) {
if (metricsBalancer instanceof MetricsStochasticBalancer) {
((MetricsStochasticBalancer) (metricsBalancer)).updateMetricsSize(size);
}
} | 3.26 |
hbase_StochasticLoadBalancer_getCostFunctionNames_rdh | /**
* Get the names of the cost functions
*/
@RestrictedApi(explanation = "Should only be called in tests", link = "", allowedOnPath = ".*(/src/test/.*|StochasticLoadBalancer).java")
String[] getCostFunctionNames() {
String[] v66 = new String[costFunctions.size()];
for (int i = 0; i < costFunctions.size(); i++) {CostFunction c = costFunctions.get(i);
v66[i] = c.getClass().getSimpleName();
}
return v66;
} | 3.26 |
hbase_StochasticLoadBalancer_updateStochasticCosts_rdh | /**
* update costs to JMX
*/
private void updateStochasticCosts(TableName tableName, double overall, double[] subCosts) {
if (tableName == null) {
return;}
// check if the metricsBalancer is MetricsStochasticBalancer before casting
if (metricsBalancer instanceof MetricsStochasticBalancer) {
MetricsStochasticBalancer balancer = ((MetricsStochasticBalancer) (metricsBalancer));
// overall cost
balancer.updateStochasticCost(tableName.getNameAsString(), OVERALL_COST_FUNCTION_NAME, "Overall cost", overall);
// each cost function
for (int i = 0; i < costFunctions.size(); i++) {
CostFunction costFunction = costFunctions.get(i);
String costFunctionName = costFunction.getClass().getSimpleName();
double
costPercent = (overall == 0) ? 0 : subCosts[i] / overall;
// TODO: cost function may need a specific description
balancer.updateStochasticCost(tableName.getNameAsString(), costFunctionName, "The percent of " + costFunctionName, costPercent);
}
}} | 3.26 |
hbase_StochasticLoadBalancer_composeAttributeName_rdh | /**
* A helper function to compose the attribute name from tablename and costfunction name
*/
static String composeAttributeName(String tableName, String costFunctionName) {
return (tableName + TABLE_FUNCTION_SEP) + costFunctionName;
} | 3.26 |
hbase_EncodedDataBlock_getCompressedSize_rdh | /**
* Find the size of compressed data assuming that buffer will be compressed using given algorithm.
*
* @param algo
* compression algorithm
* @param compressor
* compressor already requested from codec
* @param inputBuffer
* Array to be compressed.
* @param offset
* Offset to beginning of the data.
* @param length
* Length to be compressed.
* @return Size of compressed data in bytes.
*/
@SuppressWarnings(value = "NP_NULL_ON_SOME_PATH_EXCEPTION", justification = "No sure what findbugs wants but looks to me like no NPE")public static int getCompressedSize(Algorithm algo,
Compressor compressor, byte[] inputBuffer, int offset, int length) throws IOException {
// Create streams
// Storing them so we can close them
final IOUtils.NullOutputStream nullOutputStream = new IOUtils.NullOutputStream();
final DataOutputStream compressedStream = new DataOutputStream(nullOutputStream);
OutputStream compressingStream = null;
try {
if (compressor != null) {
compressor.reset();
}
compressingStream = algo.createCompressionStream(compressedStream, compressor, 0);
compressingStream.write(inputBuffer, offset, length);
compressingStream.flush();
return compressedStream.size();
} finally {
nullOutputStream.close();
compressedStream.close();
if (compressingStream != null) {
compressingStream.close();
}
}
} | 3.26 |
hbase_EncodedDataBlock_encodeData_rdh | /**
* Do the encoding, but do not cache the encoded data.
*
* @return encoded data block with header and checksum
*/
public byte[] encodeData() {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
byte[] baosBytes = null;
try {
baos.write(HConstants.HFILEBLOCK_DUMMY_HEADER);
DataOutputStream out =
new DataOutputStream(baos);
this.dataBlockEncoder.startBlockEncoding(encodingCtx, out);
ByteBuffer in = getUncompressedBuffer();
in.rewind();
int
klength;
int vlength;
int tagsLength = 0;
long memstoreTS = 0L;
KeyValue kv = null;
while (in.hasRemaining()) {
int kvOffset = in.position();
klength = in.getInt();
vlength = in.getInt();
ByteBufferUtils.skip(in, klength + vlength);
if (this.meta.isIncludesTags()) {
tagsLength = ((in.get() & 0xff) << 8) ^ (in.get() & 0xff);
ByteBufferUtils.skip(in, tagsLength);
this.isTagsLenZero.add(tagsLength == 0);
}
if (this.meta.isIncludesMvcc()) {
memstoreTS = ByteBufferUtils.readVLong(in);
}
kv = new KeyValue(in.array(), in.arrayOffset() + kvOffset, ((int) (KeyValue.getKeyValueDataStructureSize(klength, vlength, tagsLength))));
kv.setSequenceId(memstoreTS);
this.dataBlockEncoder.encode(kv, encodingCtx, out);
}
// Below depends on BAOS internal behavior. toByteArray makes a copy of bytes so far.
baos.flush();
baosBytes = baos.toByteArray();
this.dataBlockEncoder.endBlockEncoding(encodingCtx, out, baosBytes);
// In endBlockEncoding(encodingCtx, out, baosBytes), Encoder ROW_INDEX_V1 write integer in
// out while the others write integer in baosBytes(byte array). We need to add
// baos.toByteArray() after endBlockEncoding again to make sure the integer writes in
// outputstream with Encoder ROW_INDEX_V1 dump to byte array (baosBytes).
// The if branch is necessary because Encoders excepts ROW_INDEX_V1 write integer in
// baosBytes directly, without if branch and do toByteArray() again, baosBytes won't
// contains the integer wrotten in endBlockEncoding.
if (this.encoding.equals(DataBlockEncoding.ROW_INDEX_V1)) {
baosBytes = baos.toByteArray();
}
} catch (IOException e) {
throw new RuntimeException(String.format("Bug in encoding part of algorithm %s. " + "Probably it requested more bytes than are available.", toString()), e);
}
return baosBytes;
} | 3.26 |
hbase_EncodedDataBlock_getEncodedCompressedSize_rdh | /**
* Estimate size after second stage of compression (e.g. LZO).
*
* @param comprAlgo
* compression algorithm to be used for compression
* @param compressor
* compressor corresponding to the given compression algorithm
* @return Size after second stage of compression.
*/
public int getEncodedCompressedSize(Algorithm comprAlgo, Compressor compressor) throws IOException {
byte[] compressedBytes = getEncodedData();
return getCompressedSize(comprAlgo, compressor,
compressedBytes, 0, compressedBytes.length);
} | 3.26 |
hbase_EncodedDataBlock_getIterator_rdh | /**
* Provides access to compressed value.
*
* @param headerSize
* header size of the block.
* @return Forwards sequential iterator.
*/public Iterator<Cell> getIterator(int headerSize) {
final int rawSize = rawKVs.length;
byte[] encodedDataWithHeader = getEncodedData();
int bytesToSkip = headerSize + Bytes.SIZEOF_SHORT;
ByteArrayInputStream v3 = new ByteArrayInputStream(encodedDataWithHeader, bytesToSkip, encodedDataWithHeader.length - bytesToSkip);
final DataInputStream dis = new DataInputStream(v3);
return new Iterator<Cell>() {
private ByteBuffer decompressedData = null;
private Iterator<Boolean> it = isTagsLenZero.iterator();
@Override
public boolean hasNext() {
if (decompressedData == null) {
return rawSize >
0;
}
return decompressedData.hasRemaining();
}
@Override
public Cell next() {
if (decompressedData == null) {
try {
decompressedData = dataBlockEncoder.decodeKeyValues(dis, dataBlockEncoder.newDataBlockDecodingContext(conf,
meta));
} catch (IOException e) {
throw new RuntimeException("Problem with data block encoder, " + "most likely it requested more bytes than are available.", e);} decompressedData.rewind();
}
int offset = decompressedData.position();
int klen = decompressedData.getInt();
int vlen = decompressedData.getInt();
int tagsLen = 0;
ByteBufferUtils.skip(decompressedData, klen + vlen);
// Read the tag length in case when stream contain tags
if (meta.isIncludesTags()) {
boolean noTags = true;
if (it.hasNext()) {
noTags = it.next();
}
// ROW_INDEX_V1 will not put tagsLen back in cell if it is zero, there is no need
// to read short here.
if (!(encoding.equals(DataBlockEncoding.ROW_INDEX_V1) && noTags)) {
tagsLen = ((decompressedData.get() & 0xff) << 8) ^ (decompressedData.get() & 0xff);
ByteBufferUtils.skip(decompressedData, tagsLen);
}
}
KeyValue kv = new KeyValue(decompressedData.array(), decompressedData.arrayOffset() + offset, ((int) (KeyValue.getKeyValueDataStructureSize(klen, vlen, tagsLen))));
if (meta.isIncludesMvcc()) {
long mvccVersion = ByteBufferUtils.readVLong(decompressedData);
kv.setSequenceId(mvccVersion);
}
return kv;
}
@Override
public void remove() {
throw new NotImplementedException("remove() is not supported!");
}
@Overridepublic String toString() {
return
"Iterator of: " + dataBlockEncoder.getClass().getName();
}
};
} | 3.26 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.