name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hbase_RestCsrfPreventionFilter_isBrowser_rdh | /**
* This method interrogates the User-Agent String and returns whether it refers to a browser. If
* its not a browser, then the requirement for the CSRF header will not be enforced; if it is a
* browser, the requirement will be enforced.
* <p>
* A User-Agent String is considered to be a browser if it matches any of the regex patterns from
* browser-useragent-regex; the default behavior is to consider everything a browser that matches
* the following: "^Mozilla.*,^Opera.*". Subclasses can optionally override this method to use
* different behavior.
*
* @param userAgent
* The User-Agent String, or null if there isn't one
* @return true if the User-Agent String refers to a browser, false if not
*/
protected boolean isBrowser(String userAgent) {
if (userAgent == null) {
return false;
}
for (Pattern pattern : browserUserAgents) {
Matcher matcher = pattern.matcher(userAgent);
if (matcher.matches()) {
return true;
}
}
return false;
} | 3.26 |
hbase_CoprocessorHost_getCoprocessors_rdh | /**
* Used to create a parameter to the HServerLoad constructor so that HServerLoad can provide
* information about the coprocessors loaded by this regionserver. (HBASE-4070: Improve region
* server metrics to report loaded coprocessors to master).
*/
public Set<String> getCoprocessors() {
Set<String> returnValue = new TreeSet<>();
for (E e : coprocEnvironments) {
returnValue.add(e.getInstance().getClass().getSimpleName());
}
return returnValue;
} | 3.26 |
hbase_CoprocessorHost_findCoprocessor_rdh | /**
* Find coprocessors by full class name or simple name.
*/
public C
findCoprocessor(String
className) {
for (E env : coprocEnvironments) {
if (env.getInstance().getClass().getName().equals(className) || env.getInstance().getClass().getSimpleName().equals(className)) {
return env.getInstance();
}
}
return null;
} | 3.26 |
hbase_CoprocessorHost_findCoprocessors_rdh | /**
* Find list of coprocessors that extend/implement the given class/interface
*
* @param cls
* the class/interface to look for
* @return the list of coprocessors, or null if not found
*/
public <T extends C> List<T> findCoprocessors(Class<T> cls) {
ArrayList<T> ret = new ArrayList<>();
for (E env : coprocEnvironments) {
C cp = env.getInstance();
if (cp != null) {
if (cls.isAssignableFrom(cp.getClass())) {
ret.add(((T) (cp)));
}
}
}
return ret;
} | 3.26 |
hbase_CoprocessorHost_findCoprocessorEnvironment_rdh | /**
* Find a coprocessor environment by class name
*
* @param className
* the class name
* @return the coprocessor, or null if not found
*/
public E findCoprocessorEnvironment(String className) {
for (E env : coprocEnvironments) {
if (env.getInstance().getClass().getName().equals(className) || env.getInstance().getClass().getSimpleName().equals(className)) {
return env;
}
}
return null;
} | 3.26 |
hbase_CoprocessorHost_execOperationWithResult_rdh | // Functions to execute observer hooks and handle results (if any)
// ////////////////////////////////////////////////////////////////////////////////////////
/**
* Do not call with an observerOperation that is null! Have the caller check.
*/
protected <O, R> R execOperationWithResult(final ObserverOperationWithResult<O, R> observerOperation) throws IOException {
boolean bypass = execOperation(observerOperation);
R result = observerOperation.getResult();
return bypass == observerOperation.isBypassable() ? result : null;
} | 3.26 |
hbase_CoprocessorHost_getExternalClassLoaders_rdh | /**
* Retrieves the set of classloaders used to instantiate Coprocessor classes defined in external
* jar files.
*
* @return A set of ClassLoader instances
*/
Set<ClassLoader> getExternalClassLoaders() {
Set<ClassLoader> externalClassLoaders = new HashSet<>();
final ClassLoader systemClassLoader = this.getClass().getClassLoader();
for (E env : coprocEnvironments) {
ClassLoader cl = env.getInstance().getClass().getClassLoader();
if (cl != systemClassLoader) {
// do not include system classloader
externalClassLoaders.add(cl);
}
}
return externalClassLoaders;
} | 3.26 |
hbase_CoprocessorHost_handleCoprocessorThrowable_rdh | /**
* This is used by coprocessor hooks which are declared to throw IOException (or its subtypes).
* For such hooks, we should handle throwable objects depending on the Throwable's type. Those
* which are instances of IOException should be passed on to the client. This is in conformance
* with the HBase idiom regarding IOException: that it represents a circumstance that should be
* passed along to the client for its own handling. For example, a coprocessor that implements
* access controls would throw a subclass of IOException, such as AccessDeniedException, in its
* preGet() method to prevent an unauthorized client's performing a Get on a particular table.
*
* @param env
* Coprocessor Environment
* @param e
* Throwable object thrown by coprocessor.
* @exception IOException
* Exception
*/
// Note to devs: Class comments of all observers ({@link MasterObserver}, {@link WALObserver},
// etc) mention this nuance of our exception handling so that coprocessor can throw appropriate
// exceptions depending on situation. If any changes are made to this logic, make sure to
// update all classes' comments.
protected void handleCoprocessorThrowable(final E env, final Throwable e) throws IOException {
if (e instanceof IOException) {
throw ((IOException) (e));
}
// If we got here, e is not an IOException. A loaded coprocessor has a
// fatal bug, and the server (master or regionserver) should remove the
// faulty coprocessor from its set of active coprocessors. Setting
// 'hbase.coprocessor.abortonerror' to true will cause abortServer(),
// which may be useful in development and testing environments where
// 'failing fast' for error analysis is desired.
if (env.getConfiguration().getBoolean(ABORT_ON_ERROR_KEY, DEFAULT_ABORT_ON_ERROR)) {
// server is configured to abort.
abortServer(env, e);} else {// If available, pull a table name out of the environment
if (env
instanceof RegionCoprocessorEnvironment) {
String tableName = ((RegionCoprocessorEnvironment)
(env)).getRegionInfo().getTable().getNameAsString();
LOG.error(((("Removing coprocessor '" + env.toString()) + "' from table '") + tableName) + "'", e);
} else {LOG.error((("Removing coprocessor '" + env.toString()) + "' from ") + "environment", e);
}
coprocEnvironments.remove(env);
try {
shutdown(env);
} catch (Exception x) {
LOG.error(("Uncaught exception when shutting down coprocessor '"
+ env.toString()) + "'", x);
}
throw new DoNotRetryIOException((((("Coprocessor: '" + env.toString()) + "' threw: '") + e) + "' and has been removed from the active ") + "coprocessor set.", e);
}
} | 3.26 |
hbase_CoprocessorHost_execShutdown_rdh | /**
* Coprocessor classes can be configured in any order, based on that priority is set and chained
* in a sorted order. Should be used preStop*() hooks i.e. when master/regionserver is going down.
* This function first calls coprocessor methods (using ObserverOperation.call()) and then
* shutdowns the environment in postEnvCall(). <br>
* Need to execute all coprocessor methods first then postEnvCall(), otherwise some coprocessors
* may remain shutdown if any exception occurs during next coprocessor execution which prevent
* master/regionserver stop or cluster shutdown. (Refer:
* <a href="https://issues.apache.org/jira/browse/HBASE-16663">HBASE-16663</a>
*
* @return true if bypaas coprocessor execution, false if not.
*/
protected <O> boolean execShutdown(final ObserverOperation<O> observerOperation) throws IOException {
if (observerOperation == null)
return false;
boolean bypass = false;
List<E> envs = coprocEnvironments.get();
// Iterate the coprocessors and execute ObserverOperation's call()
for (E env : envs) {
observerOperation.prepare(env);
Thread currentThread = Thread.currentThread();
ClassLoader cl = currentThread.getContextClassLoader();
try {
currentThread.setContextClassLoader(env.getClassLoader());
observerOperation.callObserver();
} catch (Throwable e) {
handleCoprocessorThrowable(env, e); } finally {
currentThread.setContextClassLoader(cl);
}
bypass |= observerOperation.shouldBypass();
}
// Iterate the coprocessors and execute ObserverOperation's postEnvCall()
for (E env : envs) {
observerOperation.prepare(env);
observerOperation.postEnvCall();
}
return bypass;
} | 3.26 |
hbase_CoprocessorHost_callObserver_rdh | /**
* In case of coprocessors which have many kinds of observers (for eg, {@link RegionCoprocessor}
* has BulkLoadObserver, RegionObserver, etc), some implementations may not need all observers,
* in which case they will return null for that observer's getter. We simply ignore such cases.
*/
@Override
void callObserver() throws IOException {
Optional<O> observer = observerGetter.apply(getEnvironment().getInstance());
if (observer.isPresent()) {
call(observer.get());
}
} | 3.26 |
hbase_CoprocessorHost_loadSystemCoprocessors_rdh | /**
* Load system coprocessors once only. Read the class names from configuration. Called by
* constructor.
*/protected void loadSystemCoprocessors(Configuration conf, String confKey) {
boolean coprocessorsEnabled = conf.getBoolean(COPROCESSORS_ENABLED_CONF_KEY, DEFAULT_COPROCESSORS_ENABLED);
if (!coprocessorsEnabled) {
return;
}
Class<?> implClass;
// load default coprocessors from configure file
String[] defaultCPClasses = conf.getStrings(confKey);
if
((defaultCPClasses == null) || (defaultCPClasses.length == 0))
return;
int currentSystemPriority = Coprocessor.PRIORITY_SYSTEM;
for (String className : defaultCPClasses) {
// After HBASE-23710 and HBASE-26714 when configuring for system coprocessor, we accept
// an optional format of className|priority|path
String[] classNameToken = className.split("\\|");
boolean hasPriorityOverride = false;
boolean v9 = false;
className =
classNameToken[0];
int overridePriority = Coprocessor.PRIORITY_SYSTEM;
Path path = null;
if ((classNameToken.length > 1) && (!Strings.isNullOrEmpty(classNameToken[1]))) {
overridePriority = Integer.parseInt(classNameToken[1]);
hasPriorityOverride = true;
}
if ((classNameToken.length > 2) && (!Strings.isNullOrEmpty(classNameToken[2]))) {
path = new Path(classNameToken[2].trim());
v9 = true;
}
className = className.trim();if (findCoprocessor(className) != null) {// If already loaded will just continue
LOG.warn(("Attempted duplicate loading of " + className) + "; skipped");
continue;
}
ClassLoader cl = this.getClass().getClassLoader();
try {
// override the class loader if a path for the system coprocessor is provided.
if (v9) {
cl = CoprocessorClassLoader.getClassLoader(path, this.getClass().getClassLoader(), pathPrefix, conf);
}
Thread.currentThread().setContextClassLoader(cl);
implClass = cl.loadClass(className);
int coprocPriority = (hasPriorityOverride) ? overridePriority : currentSystemPriority;
// Add coprocessors as we go to guard against case where a coprocessor is specified twice
// in the configuration
E env = checkAndLoadInstance(implClass, coprocPriority, conf);
if (env != null) {
this.coprocEnvironments.add(env);
LOG.info("System coprocessor {} loaded, priority={}.", className, coprocPriority);
if (!hasPriorityOverride) {
++currentSystemPriority;
}
}
} catch (Throwable t) {
// We always abort if system coprocessors cannot be loaded
abortServer(className, t);
}
}
} | 3.26 |
hbase_CoprocessorHost_checkAndLoadInstance_rdh | /**
*
* @param implClass
* Implementation class
* @param priority
* priority
* @param conf
* configuration
* @throws java.io.IOException
* Exception
*/
public E checkAndLoadInstance(Class<?> implClass, int priority, Configuration conf) throws IOException {
// create the instance
C impl;
try {
impl = checkAndGetInstance(implClass);
if (impl == null)
{
LOG.error("Cannot load coprocessor " + implClass.getSimpleName());
return null;
}
} catch (InstantiationException | IllegalAccessException e) {
throw new IOException(e);
}
// create the environment
E env = createEnvironment(impl, priority, loadSequence.incrementAndGet(), conf);
assert env instanceof BaseEnvironment;((BaseEnvironment<C>) (env)).startup();
// HBASE-4014: maintain list of loaded coprocessors for later crash analysis
// if server (master or regionserver) aborts.
coprocessorNames.add(implClass.getName());
return env;
} | 3.26 |
hbase_CoprocessorDescriptorBuilder_of_rdh | /**
* Used to build the {@link CoprocessorDescriptor}
*/
@InterfaceAudience.Publicpublic final class CoprocessorDescriptorBuilder {
public static CoprocessorDescriptor of(String className) {
return new CoprocessorDescriptorBuilder(className).build();
} | 3.26 |
hbase_HFileBlockIndex_getNonRootSize_rdh | /**
* Returns the size of this chunk if stored in the non-root index block format
*/
@Override
public int getNonRootSize() {return (Bytes.SIZEOF_INT// Number of entries
+
(Bytes.SIZEOF_INT * (blockKeys.size() + 1)))// Secondary index
+ curTotalNonRootEntrySize;// All entries
} | 3.26 |
hbase_HFileBlockIndex_getEntryBySubEntry_rdh | /**
* Finds the entry corresponding to the deeper-level index block containing the given
* deeper-level entry (a "sub-entry"), assuming a global 0-based ordering of sub-entries.
* <p>
* <i> Implementation note. </i> We are looking for i such that numSubEntriesAt[i - 1] <= k <
* numSubEntriesAt[i], because a deeper-level block #i (0-based) contains sub-entries #
* numSubEntriesAt[i - 1]'th through numSubEntriesAt[i] - 1, assuming a global 0-based ordering
* of sub-entries. i is by definition the insertion point of k in numSubEntriesAt.
*
* @param k
* sub-entry index, from 0 to the total number sub-entries - 1
* @return the 0-based index of the entry corresponding to the given sub-entry
*/@Override
public int getEntryBySubEntry(long
k) {
// We define mid-key as the key corresponding to k'th sub-entry
// (0-based).
int i
= Collections.binarySearch(numSubEntriesAt, k);
// Exact match: cumulativeWeight[i] = k. This means chunks #0 through
// #i contain exactly k sub-entries, and the sub-entry #k (0-based)
// is in the (i + 1)'th chunk.
if (i >= 0)
return i + 1;
// Inexact match. Return the insertion point.
return (-i) - 1;
} | 3.26 |
hbase_HFileBlockIndex_getNumLevels_rdh | /**
* Returns the number of levels in this block index.
*/public int getNumLevels() {
return numLevels;
} | 3.26 |
hbase_HFileBlockIndex_addEntry_rdh | /**
* Add one index entry to the current leaf-level block. When the leaf-level block gets large
* enough, it will be flushed to disk as an inline block.
*
* @param firstKey
* the first key of the data block
* @param blockOffset
* the offset of the data block
* @param blockDataSize
* the on-disk size of the data block ({@link HFile} format version 2), or
* the uncompressed size of the data block ( {@link HFile} format version
* 1).
*/
public void addEntry(byte[] firstKey, long blockOffset, int blockDataSize) {
curInlineChunk.add(firstKey, blockOffset, blockDataSize);
++totalNumEntries;
} | 3.26 |
hbase_HFileBlockIndex_add_rdh | /**
* The same as {@link #add(byte[], long, int, long)} but does not take the key/value into
* account. Used for single-level indexes.
*
* @see #add(byte[], long, int, long)
*/
@Override
public void add(byte[] firstKey, long blockOffset, int onDiskDataSize) {
add(firstKey,
blockOffset, onDiskDataSize, -1);
} | 3.26 |
hbase_HFileBlockIndex_shouldWriteBlock_rdh | /**
* Whether there is an inline block ready to be written. In general, we write an leaf-level
* index block as an inline block as soon as its size as serialized in the non-root format
* reaches a certain threshold.
*/
@Override
public boolean shouldWriteBlock(boolean closing) {
if (singleLevelOnly) {throw new UnsupportedOperationException(INLINE_BLOCKS_NOT_ALLOWED);
}
if (curInlineChunk == null)
{throw new IllegalStateException("curInlineChunk is null; has shouldWriteBlock been " + "called with closing=true and then called again?");
}
if (curInlineChunk.getNumEntries() == 0) {return false;
}
// We do have some entries in the current inline chunk.
if (closing) {
if (rootChunk.getNumEntries() == 0) {// We did not add any leaf-level blocks yet. Instead of creating a
// leaf level with one block, move these entries to the root level.
expectNumLevels(1);
rootChunk = curInlineChunk;
curInlineChunk = null;// Disallow adding any more index entries.
return false;
}
return true;
} else {
return curInlineChunk.getNonRootSize() >= maxChunkSize;
}
} | 3.26 |
hbase_HFileBlockIndex_readRootIndex_rdh | /**
* Read in the root-level index from the given input stream. Must match what was written into
* the root level by {@link BlockIndexWriter#writeIndexBlocks(FSDataOutputStream)} at the offset
* that function returned.
*
* @param blk
* the HFile block
* @param numEntries
* the number of root-level index entries
* @return the buffered input stream or wrapped byte input stream
*/
public DataInputStream readRootIndex(HFileBlock blk, final int numEntries) throws IOException {
DataInputStream in = blk.getByteStream();
m2(in, numEntries);
return in;
}
/**
* Read the root-level metadata of a multi-level block index. Based on
* {@link #readRootIndex(DataInput, int)} | 3.26 |
hbase_HFileBlockIndex_m2_rdh | /**
* Read in the root-level index from the given input stream. Must match what was written into
* the root level by {@link BlockIndexWriter#writeIndexBlocks(FSDataOutputStream)} at the offset
* that function returned.
*
* @param in
* the buffered input stream or wrapped byte input stream
* @param numEntries
* the number of root-level index entries
*/
public void m2(DataInput in, final int numEntries) throws IOException {
blockOffsets = new long[numEntries];initialize(numEntries);
blockDataSizes = new int[numEntries];
// If index size is zero, no index was written.
if (numEntries > 0) {
for (int i = 0; i < numEntries; ++i) {long offset = in.readLong();
int dataSize = in.readInt();
byte[] key = Bytes.readByteArray(in);
add(key, offset, dataSize);
}
}
} | 3.26 |
hbase_HFileBlockIndex_getTotalUncompressedSize_rdh | /**
* The total uncompressed size of the root index block, intermediate-level index blocks, and
* leaf-level index blocks.
*
* @return the total uncompressed size of all index blocks
*/
public long getTotalUncompressedSize() {
return totalBlockUncompressedSize;
} | 3.26 |
hbase_HFileBlockIndex_ensureSingleLevel_rdh | /**
*
* @throws IOException
* if we happened to write a multi-level index.
*/
public void ensureSingleLevel() throws IOException {
if (numLevels > 1) {
throw new IOException((((("Wrote a " + numLevels) + "-level index with ") + rootChunk.getNumEntries()) + " root-level entries, but ") + "this is expected to be a single-level block index.");
}
} | 3.26 |
hbase_HFileBlockIndex_getRootBlockCount_rdh | /**
* Returns the number of root-level blocks in this block index
*/
public int getRootBlockCount() {
return rootCount;
} | 3.26 |
hbase_HFileBlockIndex_writeInlineBlock_rdh | /**
* Write out the current inline index block. Inline blocks are non-root blocks, so the non-root
* index format is used.
*/ @Override
public void writeInlineBlock(DataOutput out) throws IOException {
if (singleLevelOnly)
throw new UnsupportedOperationException(INLINE_BLOCKS_NOT_ALLOWED);
// Write the inline block index to the output stream in the non-root
// index block format.
indexBlockEncoder.encode(curInlineChunk, false, out);// Save the first key of the inline block so that we can add it to the
// parent-level index.
firstKey = curInlineChunk.getBlockKey(0);
// Start a new inline index block
curInlineChunk.clear();
} | 3.26 |
hbase_HFileBlockIndex_m3_rdh | /**
* Writes the root level and intermediate levels of the block index into the output stream,
* generating the tree from bottom up. Assumes that the leaf level has been inline-written to
* the disk if there is enough data for more than one leaf block. We iterate by breaking the
* current level of the block index, starting with the index of all leaf-level blocks, into
* chunks small enough to be written to disk, and generate its parent level, until we end up
* with a level small enough to become the root level. If the leaf level is not large enough,
* there is no inline block index anymore, so we only write that level of block index to disk as
* the root level.
*
* @param out
* FSDataOutputStream
* @return position at which we entered the root-level index.
*/
public long m3(FSDataOutputStream out) throws IOException {
if ((curInlineChunk != null) && (curInlineChunk.getNumEntries() != 0)) {
throw new IOException(((("Trying to write a multi-level block index, " + "but are ") + curInlineChunk.getNumEntries()) + " entries in the ") + "last inline chunk.");
}
// We need to get mid-key metadata before we create intermediate
// indexes and overwrite the root chunk.
byte[] midKeyMetadata = (numLevels > 1) ? rootChunk.getMidKeyMetadata() : null;
if (curInlineChunk != null) {
while (((rootChunk.getRootSize() > maxChunkSize) && // HBASE-16288: if firstKey is larger than maxChunkSize we will loop indefinitely
(rootChunk.getNumEntries() > minIndexNumEntries)) && // Sanity check. We will not hit this (minIndexNumEntries ^ 16) blocks can be addressed
(numLevels < 16)) {
rootChunk = writeIntermediateLevel(out, rootChunk);
numLevels += 1;
}
}
// write the root level
long rootLevelIndexPos = out.getPos();
{
DataOutput blockStream = blockWriter.startWriting(BlockType.ROOT_INDEX);
indexBlockEncoder.encode(rootChunk, true, blockStream);
if (midKeyMetadata != null)
blockStream.write(midKeyMetadata);
blockWriter.writeHeaderAndData(out);
if (cacheConf != null) {
cacheConf.getBlockCache().ifPresent(cache -> {
HFileBlock blockForCaching = blockWriter.getBlockForCaching(cacheConf);
cache.cacheBlock(new BlockCacheKey(nameForCaching, rootLevelIndexPos, true, blockForCaching.getBlockType()), blockForCaching);
});
}
}// Add root index block size
totalBlockOnDiskSize +=
blockWriter.getOnDiskSizeWithoutHeader();
totalBlockUncompressedSize += blockWriter.getUncompressedSizeWithoutHeader();
if (LOG.isTraceEnabled()) {
LOG.trace(((((((((((("Wrote a " + numLevels) + "-level index with root level at pos ") + rootLevelIndexPos) + ", ") + rootChunk.getNumEntries()) + " root-level entries, ") + totalNumEntries) + " total entries, ") + StringUtils.humanReadableInt(this.totalBlockOnDiskSize)) + " on-disk size, ") + StringUtils.humanReadableInt(totalBlockUncompressedSize)) + " total uncompressed size.");
}
return rootLevelIndexPos;
} | 3.26 |
hbase_HFileBlockIndex_blockWritten_rdh | /**
* Called after an inline block has been written so that we can add an entry referring to that
* block to the parent-level index.
*/
@Override
public void blockWritten(long offset, int onDiskSize, int uncompressedSize) {
// Add leaf index block size
totalBlockOnDiskSize += onDiskSize;
totalBlockUncompressedSize += uncompressedSize;if (singleLevelOnly)
throw new UnsupportedOperationException(INLINE_BLOCKS_NOT_ALLOWED);
if (firstKey == null) {
throw new IllegalStateException((((("Trying to add second-level index " + "entry with offset=") + offset) + " and onDiskSize=") + onDiskSize) + "but the first key was not set in writeInlineBlock");
}
if (rootChunk.getNumEntries() == 0) {
// We are writing the first leaf block, so increase index level.
expectNumLevels(1);
numLevels = 2;
}
// Add another entry to the second-level index. Include the number of
// entries in all previous leaf-level chunks for mid-key calculation.
rootChunk.add(firstKey, offset, onDiskSize, totalNumEntries);
firstKey = null;
} | 3.26 |
hbase_HFileBlockIndex_getNonRootIndexedKey_rdh | /**
* The indexed key at the ith position in the nonRootIndex. The position starts at 0.
*
* @param i
* the ith position
* @return The indexed key at the ith position in the nonRootIndex.
*/
static byte[] getNonRootIndexedKey(ByteBuff nonRootIndex, int i) {
int v26 = nonRootIndex.getInt(0);
if ((i < 0) || (i >= v26)) {
return null;
}
// Entries start after the number of entries and the secondary index.
// The secondary index takes numEntries + 1 ints.
int entriesOffset = Bytes.SIZEOF_INT * (v26 + 2);
// Targetkey's offset relative to the end of secondary index
int targetKeyRelOffset = nonRootIndex.getInt(Bytes.SIZEOF_INT *
(i + 1));
// The offset of the target key in the blockIndex buffer
int targetKeyOffset = (entriesOffset// Skip secondary index
+ targetKeyRelOffset)// Skip all entries until mid
+ SECONDARY_INDEX_ENTRY_OVERHEAD;// Skip offset and on-disk-size
// We subtract the two consecutive secondary index elements, which
// gives us the size of the whole (offset, onDiskSize, key) tuple. We
// then need to subtract the overhead of offset and onDiskSize.
int v30 = (nonRootIndex.getInt(Bytes.SIZEOF_INT * (i + 2)) - targetKeyRelOffset) - SECONDARY_INDEX_ENTRY_OVERHEAD;
// TODO check whether we can make BB backed Cell here? So can avoid bytes copy.
return nonRootIndex.toBytes(targetKeyOffset, v30);} | 3.26 |
hbase_HFileBlockIndex_getRootSize_rdh | /**
* Returns the size of this chunk if stored in the root index block format
*/
@Override
public int getRootSize() {
return curTotalRootSize;
} | 3.26 |
hbase_HFileBlockIndex_getNumEntries_rdh | /**
* Returns the number of entries in this block index chunk
*/
public int getNumEntries() {
return blockKeys.size();
} | 3.26 |
hbase_HFileBlockIndex_getRootBlockKey_rdh | /**
* from 0 to {@link #getRootBlockCount() - 1}
*/
public Cell getRootBlockKey(int i) {
return seeker.getRootBlockKey(i);
} | 3.26 |
hbase_HFileBlockIndex_getNumRootEntries_rdh | /**
* Returns how many block index entries there are in the root level
*/
public final int getNumRootEntries() {
return rootChunk.getNumEntries();} | 3.26 |
hbase_HFileBlockIndex_writeSingleLevelIndex_rdh | /**
* Writes the block index data as a single level only. Does not do any block framing.
*
* @param out
* the buffered output stream to write the index to. Typically a stream
* writing into an {@link HFile} block.
* @param description
* a short description of the index being written. Used in a log message.
*/
public void writeSingleLevelIndex(DataOutput out, String description) throws IOException {
expectNumLevels(1);
if (!singleLevelOnly)
throw new IOException("Single-level mode is turned off");
if (rootChunk.getNumEntries() > 0)
throw new IOException("Root-level entries already added in " + "single-level mode");
rootChunk = curInlineChunk;
curInlineChunk = new BlockIndexChunkImpl();
if
(LOG.isTraceEnabled()) {
LOG.trace(((((("Wrote a single-level " + description) + " index with ") + rootChunk.getNumEntries()) + " entries, ") + rootChunk.getRootSize()) + " bytes");
}
indexBlockEncoder.encode(rootChunk, true, out);
} | 3.26 |
hbase_HFileBlockIndex_seekToDataBlock_rdh | /**
* Return the data block which contains this key. This function will only be called when the
* HFile version is larger than 1.
*
* @param key
* the key we are looking for
* @param currentBlock
* the current block, to avoid re-reading the same block
* @param expectedDataBlockEncoding
* the data block encoding the caller is expecting the data
* block to be in, or null to not perform this check and return
* the block irrespective of the encoding
* @return reader a basic way to load blocks
*/
public HFileBlock seekToDataBlock(final Cell key, HFileBlock currentBlock, boolean cacheBlocks, boolean
pread, boolean isCompaction, DataBlockEncoding expectedDataBlockEncoding, CachingBlockReader cachingBlockReader) throws IOException {
BlockWithScanInfo blockWithScanInfo = loadDataBlockWithScanInfo(key, currentBlock, cacheBlocks, pread, isCompaction, expectedDataBlockEncoding, cachingBlockReader);
if (blockWithScanInfo == null) {
return null;
} else {
return blockWithScanInfo.getHFileBlock();
}
} | 3.26 |
hbase_HFileBlockIndex_getRootBlockOffset_rdh | /**
*
* @param i
* from 0 to {@link #getRootBlockCount() - 1}
*/
public long getRootBlockOffset(int i) {
return blockOffsets[i];
} | 3.26 |
hbase_HFileBlockIndex_ensureNonEmpty_rdh | /**
* Returns true if the block index is empty.
*/ public abstract boolean isEmpty();
/**
* Verifies that the block index is non-empty and throws an {@link IllegalStateException}
* otherwise.
*/
public void ensureNonEmpty() {
if (isEmpty()) {
throw new IllegalStateException("Block index is empty or not loaded");}
} | 3.26 |
hbase_HFileBlockIndex_getMidKeyMetadata_rdh | /**
* Used when writing the root block index of a multi-level block index. Serializes additional
* information allowing to efficiently identify the mid-key.
*
* @return a few serialized fields for finding the mid-key
* @throws IOException
* if could not create metadata for computing mid-key
*/
@Override
public byte[] getMidKeyMetadata() throws IOException {
ByteArrayOutputStream v66 = new ByteArrayOutputStream(MID_KEY_METADATA_SIZE);
DataOutputStream baosDos = new DataOutputStream(v66);
long totalNumSubEntries = numSubEntriesAt.get(blockKeys.size() - 1);
if (totalNumSubEntries == 0) {
throw new IOException("No leaf-level entries, mid-key unavailable");
}
long midKeySubEntry = (totalNumSubEntries - 1) / 2;
int midKeyEntry = getEntryBySubEntry(midKeySubEntry);
baosDos.writeLong(blockOffsets.get(midKeyEntry));
baosDos.writeInt(onDiskDataSizes.get(midKeyEntry));
long numSubEntriesBefore = (midKeyEntry > 0) ? numSubEntriesAt.get(midKeyEntry - 1) : 0;
long subEntryWithinEntry = midKeySubEntry - numSubEntriesBefore;
if ((subEntryWithinEntry < 0) || (subEntryWithinEntry > Integer.MAX_VALUE)) {
throw new IOException((((((("Could not identify mid-key index within the " + "leaf-level block containing mid-key: out of range (") + subEntryWithinEntry) + ", numSubEntriesBefore=") + numSubEntriesBefore) + ", midKeySubEntry=") + midKeySubEntry) + ")");
}
baosDos.writeInt(((int) (subEntryWithinEntry)));
if (baosDos.size() != MID_KEY_METADATA_SIZE) {
throw new IOException((("Could not write mid-key metadata: size="
+ baosDos.size()) + ", correct size: ") + MID_KEY_METADATA_SIZE);
}
// Close just to be good citizens, although this has no effect.
v66.close();
return v66.toByteArray();
} | 3.26 |
hbase_BalanceResponse_isBalancerRan_rdh | /**
* Returns true if the balancer ran, otherwise false. The balancer may not run for a variety of
* reasons, such as: another balance is running, there are regions in transition, the cluster is
* in maintenance mode, etc.
*/
public boolean isBalancerRan() {
return balancerRan;
} | 3.26 |
hbase_BalanceResponse_newBuilder_rdh | /**
* Creates a new {@link BalanceResponse.Builder}
*/
public static Builder newBuilder() {
return new Builder();
} | 3.26 |
hbase_BalanceResponse_getMovesExecuted_rdh | /**
* The number of moves actually executed by the balancer if it ran. This will be zero if
* {@link #getMovesCalculated()} is zero or if {@link BalanceRequest#isDryRun()} was true. It may
* also not be equal to {@link #getMovesCalculated()} if the balancer was interrupted midway
* through executing the moves due to max run time.
*/
public int getMovesExecuted() {
return movesExecuted;} | 3.26 |
hbase_BalanceResponse_build_rdh | /**
* Build the {@link BalanceResponse}
*/public BalanceResponse build() {
return new BalanceResponse(balancerRan, movesCalculated, movesExecuted);
} | 3.26 |
hbase_BalanceResponse_setBalancerRan_rdh | /**
* Set true if the balancer ran, otherwise false. The balancer may not run in some
* circumstances, such as if a balance is already running or there are regions already in
* transition.
*
* @param balancerRan
* true if balancer ran, false otherwise
*/
public Builder setBalancerRan(boolean balancerRan) {
this.balancerRan = balancerRan;
return this;
} | 3.26 |
hbase_BalanceResponse_setMovesCalculated_rdh | /**
* Set how many moves were calculated by the balancer. This will be zero if the cluster is
* already balanced.
*
* @param movesCalculated
* moves calculated by the balance run
*/
public Builder setMovesCalculated(int movesCalculated) {this.movesCalculated = movesCalculated;
return this;
} | 3.26 |
hbase_BalanceResponse_setMovesExecuted_rdh | /**
* Set how many of the calculated moves were actually executed by the balancer. This should be
* zero if the balancer is run with {@link BalanceRequest#isDryRun()}. It may also not equal
* movesCalculated if the balancer ran out of time while executing the moves.
*
* @param movesExecuted
* moves executed by the balance run
*/
public Builder setMovesExecuted(int movesExecuted) {
this.movesExecuted = movesExecuted;
return this;
} | 3.26 |
hbase_BalanceResponse_getMovesCalculated_rdh | /**
* The number of moves calculated by the balancer if {@link #isBalancerRan()} is true. This will
* be zero if no better balance could be found.
*/
public int getMovesCalculated() {
return movesCalculated;
} | 3.26 |
hbase_FilterList_getOperator_rdh | /**
* Get the operator.
*/
public Operator getOperator()
{
return operator;} | 3.26 |
hbase_FilterList_addFilter_rdh | /**
* Add a filter.
*
* @param filter
* another filter
*/
public void addFilter(Filter filter) {
addFilter(Collections.singletonList(filter));
} | 3.26 |
hbase_FilterList_toByteArray_rdh | /**
* Returns The filter serialized using pb
*/
@Override
public byte[] toByteArray() throws IOException {
FilterProtos.FilterList.Builder builder = FilterProtos.FilterList.newBuilder();
builder.setOperator(FilterList.Operator.valueOf(operator.name()));
ArrayList<Filter> filters = filterListBase.getFilters();
for (int i = 0, n = filters.size(); i < n; i++) {
builder.addFilters(ProtobufUtil.toFilter(filters.get(i)));
}
return builder.build().toByteArray();
} | 3.26 |
hbase_FilterList_parseFrom_rdh | /**
* Parse a seralized representation of {@link FilterList}
*
* @param pbBytes
* A pb serialized {@link FilterList} instance
* @return An instance of {@link FilterList} made from <code>bytes</code>
* @throws DeserializationException
* if an error occurred
* @see #toByteArray
*/
public static FilterList parseFrom(final byte[] pbBytes) throws DeserializationException {
FilterProtos.FilterList proto;try {
proto = FilterProtos.FilterList.parseFrom(pbBytes);} catch (InvalidProtocolBufferException e) {
throw new DeserializationException(e);
}
List<Filter> rowFilters = new ArrayList<>(proto.getFiltersCount());
try {
List<FilterProtos.Filter> filtersList = proto.getFiltersList();
for (int i = 0, n = filtersList.size(); i < n; i++) {
rowFilters.add(ProtobufUtil.toFilter(filtersList.get(i)));
}
} catch (IOException ioe) {
throw new DeserializationException(ioe);
}
return new FilterList(Operator.valueOf(proto.getOperator().name()), rowFilters);
} | 3.26 |
hbase_FilterList_getFilters_rdh | /**
* Get the filters.
*/
public List<Filter> getFilters() {
return filterListBase.getFilters();
} | 3.26 |
hbase_FilterList_filterRowCells_rdh | /**
* Filters that never filter by modifying the returned List of Cells can inherit this
* implementation that does nothing. {@inheritDoc }
*/
@Override
public void filterRowCells(List<Cell> cells) throws IOException {
filterListBase.filterRowCells(cells);
} | 3.26 |
hbase_FilterList_areSerializedFieldsEqual_rdh | /**
* Returns true if and only if the fields of the filter that are serialized are equal to the
* corresponding fields in other. Used for testing.
*/
@Override
boolean areSerializedFieldsEqual(Filter
other) {
if (other == this) {
return true;
}
if (!(other instanceof FilterList)) {
return false;
}
FilterList o = ((FilterList) (other));
return this.getOperator().equals(o.getOperator()) && ((this.getFilters() == o.getFilters()) || this.getFilters().equals(o.getFilters()));
} | 3.26 |
hbase_ReportMakingVisitor_isTableDisabled_rdh | /**
* Returns True if table is disabled or disabling; defaults false!
*/
boolean isTableDisabled(RegionInfo ri) {
if (ri == null) {
return false;
}
if (this.services == null) {
return false;
}
if (this.services.getTableStateManager() == null) {
return false;
}
TableState state = null;
try {
state = this.services.getTableStateManager().getTableState(ri.getTable());
} catch (IOException e) {
LOG.warn("Failed getting table state", e);
}
return (state != null) && state.isDisabledOrDisabling();} | 3.26 |
hbase_ReportMakingVisitor_getReport_rdh | /**
* Do not call until after {@link #close()}. Will throw a {@link RuntimeException} if you do.
*/
CatalogJanitorReport getReport() {
if (!this.closed) {
throw new RuntimeException("Report not ready until after close()");
}
return
this.report;
} | 3.26 |
hbase_ReportMakingVisitor_isTableTransition_rdh | /**
* Returns True iff first row in hbase:meta or if we've broached a new table in hbase:meta
*/
private boolean isTableTransition(RegionInfo ri) {
return (this.previous == null) || (!this.previous.getTable().equals(ri.getTable()));
} | 3.26 |
hbase_ReportMakingVisitor_checkServer_rdh | /**
* Run through referenced servers and save off unknown and the dead.
*/
private void checkServer(RegionLocations locations) {
if (this.services == null) {
// Can't do this test if no services.
return;
}
if (locations == null) {
return;
}
if (locations.getRegionLocations() == null) {
return;
}
// Check referenced servers are known/online. Here we are looking
// at both the default replica -- the main replica -- and then replica
// locations too.
for (HRegionLocation location : locations.getRegionLocations()) {
if (location == null) {
continue;
}
ServerName sn
= location.getServerName();
if (sn == null) {
continue;
}
if (location.getRegion() == null) {
LOG.warn("Empty RegionInfo in {}", location);
// This should never happen but if it does, will mess up below.
continue;}
RegionInfo ri = location.getRegion();
// Skip split parent region
if (ri.isSplitParent()) {
continue;
}
// skip the offline regions which belong to disabled table.
if (isTableDisabled(ri)) {
continue;
}
RegionState rs = this.services.getAssignmentManager().getRegionStates().getRegionState(ri);
if ((rs == null) || rs.isClosedOrAbnormallyClosed()) {
// If closed against an 'Unknown Server', that is should be fine.
continue;
}
ServerManager.ServerLiveState state = this.services.getServerManager().isServerKnownAndOnline(sn);
switch (state) {
case UNKNOWN :
this.report.unknownServers.add(new Pair<>(ri, sn));
break;
default :
break;
}
}
} | 3.26 |
hbase_GlobalMetricRegistriesAdapter_init_rdh | /**
* Make sure that this global MetricSource for hbase-metrics module based metrics are initialized.
* This should be called only once.
*/
public static GlobalMetricRegistriesAdapter init() {
return new GlobalMetricRegistriesAdapter();
} | 3.26 |
hbase_HBaseSnapshotException_getSnapshotDescription_rdh | /**
* Returns the description of the snapshot that is being failed
*/
public SnapshotDescription getSnapshotDescription() {
return this.description;
} | 3.26 |
hbase_TableSplit_readFields_rdh | /**
* Reads the values of each field.
*
* @param in
* The input to read from.
* @throws IOException
* When reading the input fails.
*/
@Override
public void readFields(DataInput in) throws IOException {Version version = Version.UNVERSIONED;
// TableSplit was not versioned in the beginning.
// In order to introduce it now, we make use of the fact
// that tableName was written with Bytes.writeByteArray,
// which encodes the array length as a vint which is >= 0.
// Hence if the vint is >= 0 we have an old version and the vint
// encodes the length of tableName.
// If < 0 we just read the version and the next vint is the length.
// @see Bytes#readByteArray(DataInput)
int len = WritableUtils.readVInt(in);
if (len < 0)
{
// what we just read was the version
version = Version.fromCode(len);
len = WritableUtils.readVInt(in);
}
byte[] tableNameBytes = new byte[len];
in.readFully(tableNameBytes);
tableName = TableName.valueOf(tableNameBytes);
startRow = Bytes.readByteArray(in);endRow = Bytes.readByteArray(in);
regionLocation = Bytes.toString(Bytes.readByteArray(in));
if (version.atLeast(Version.f0)) {
scan = Bytes.toString(Bytes.readByteArray(in));
}
length = WritableUtils.readVLong(in);
if (version.atLeast(Version.WITH_ENCODED_REGION_NAME)) {encodedRegionName = Bytes.toString(Bytes.readByteArray(in));
}
} | 3.26 |
hbase_TableSplit_getEncodedRegionName_rdh | /**
* Returns the region's encoded name.
*
* @return The region's encoded name.
*/
public String getEncodedRegionName() {
return encodedRegionName;
} | 3.26 |
hbase_TableSplit_getLength_rdh | /**
* Returns the length of the split.
*
* @return The length of the split.
* @see org.apache.hadoop.mapreduce.InputSplit#getLength()
*/
@Override
public long getLength() {
return length;
} | 3.26 |
hbase_TableSplit_write_rdh | /**
* Writes the field values to the output.
*
* @param out
* The output to write to.
* @throws IOException
* When writing the values to the output fails.
*/
@Override
public void write(DataOutput out) throws IOException {
WritableUtils.writeVInt(out, VERSION.code);
Bytes.writeByteArray(out, tableName.getName());Bytes.writeByteArray(out, startRow);
Bytes.writeByteArray(out, endRow);
Bytes.writeByteArray(out, Bytes.toBytes(regionLocation));
Bytes.writeByteArray(out,
Bytes.toBytes(scan));
WritableUtils.writeVLong(out, length);
Bytes.writeByteArray(out, Bytes.toBytes(encodedRegionName));
} | 3.26 |
hbase_TableSplit_getEndRow_rdh | /**
* Returns the end row.
*
* @return The end row.
*/
public byte[] getEndRow() {
return endRow;
} | 3.26 |
hbase_TableSplit_toString_rdh | /**
* Returns the details about this instance as a string.
*
* @return The values of this instance as a string.
* @see java.lang.Object#toString()
*/
@Override
public String toString() {StringBuilder sb = new StringBuilder();
sb.append("Split(");
sb.append("tablename=").append(tableName);
// null scan input is represented by ""
String printScan = "";
if (!scan.equals("")) {
try {
// get the real scan here in toString, not the Base64 string
printScan = TableMapReduceUtil.convertStringToScan(scan).toString();
} catch (IOException e) {
printScan = "";
}
sb.append(", scan=").append(printScan);
}
sb.append(", startrow=").append(Bytes.toStringBinary(startRow));
sb.append(", endrow=").append(Bytes.toStringBinary(endRow));
sb.append(", regionLocation=").append(regionLocation);
sb.append(", regionname=").append(encodedRegionName);
sb.append(")");
return sb.toString();
} | 3.26 |
hbase_TableSplit_getStartRow_rdh | /**
* Returns the start row.
*
* @return The start row.
*/
public byte[] getStartRow() {
return startRow;
} | 3.26 |
hbase_TableSplit_compareTo_rdh | /**
* Compares this split against the given one.
*
* @param split
* The split to compare to.
* @return The result of the comparison.
* @see java.lang.Comparable#compareTo(java.lang.Object)
*/
@Override
public int compareTo(TableSplit split) {
// If The table name of the two splits is the same then compare start row
// otherwise compare based on table names
int tableNameComparison = getTable().compareTo(split.getTable());
return tableNameComparison != 0 ? tableNameComparison : Bytes.compareTo(getStartRow(), split.getStartRow());
} | 3.26 |
hbase_TableSplit_getLocations_rdh | /**
* Returns the region's location as an array.
*
* @return The array containing the region location.
* @see org.apache.hadoop.mapreduce.InputSplit#getLocations()
*/
@Override
public String[] getLocations() {return new
String[]{ regionLocation };
} | 3.26 |
hbase_TableSplit_getScan_rdh | /**
* Returns a Scan object from the stored string representation.
*
* @return Returns a Scan object based on the stored scanner.
* @throws IOException
* throws IOException if deserialization fails
*/
public Scan getScan() throws IOException {
return TableMapReduceUtil.convertStringToScan(this.scan);
} | 3.26 |
hbase_TableSplit_getRegionLocation_rdh | /**
* Returns the region location.
*
* @return The region's location.
*/
public String getRegionLocation() {
return regionLocation;
} | 3.26 |
hbase_TableSplit_getTableName_rdh | /**
* Returns the table name converted to a byte array.
*
* @see #getTable()
* @return The table name.
*/
public byte[] getTableName() {
return tableName.getName();
} | 3.26 |
hbase_TableSplit_getTable_rdh | /**
* Returns the table name.
*
* @return The table name.
*/
public TableName getTable() {
// It is ugly that usually to get a TableName, the method is called getTableName. We can't do
// that in here though because there was an existing getTableName in place already since
// deprecated.
return tableName;
} | 3.26 |
hbase_FSTableDescriptors_createTableDescriptor_rdh | /**
* Create new TableDescriptor in HDFS. Happens when we are creating table. If forceCreation is
* true then even if previous table descriptor is present it will be overwritten
*
* @return True if we successfully created file.
*/
public boolean createTableDescriptor(TableDescriptor htd, boolean forceCreation) throws IOException {
Path tableDir = getTableDir(htd.getTableName());
return createTableDescriptorForTableDirectory(tableDir, htd, forceCreation);
} | 3.26 |
hbase_FSTableDescriptors_m2_rdh | /**
* Deletes files matching the table info file pattern within the given directory whose sequenceId
* is at most the given max sequenceId.
*/
private static void m2(FileSystem fs, Path dir, int maxSequenceId) throws IOException {
FileStatus[] status = CommonFSUtils.listStatus(fs, dir, TABLEINFO_PATHFILTER);
for (FileStatus file : status) {
Path v49 = file.getPath();
int sequenceId = m0(v49).sequenceId;
if (sequenceId <= maxSequenceId) {
boolean success = CommonFSUtils.delete(fs, v49, false);
if (success) {
LOG.debug("Deleted {}", v49);
} else {
LOG.error("Failed to delete table descriptor at {}", v49);
}
}
}
} | 3.26 |
hbase_FSTableDescriptors_getTableDir_rdh | /**
* Return the table directory in HDFS
*/
private Path getTableDir(TableName tableName) {
return CommonFSUtils.getTableDir(rootdir, tableName);
} | 3.26 |
hbase_FSTableDescriptors_remove_rdh | /**
* Removes the table descriptor from the local cache and returns it. If not in read only mode, it
* also deletes the entire table directory(!) from the FileSystem.
*/
@Override
public TableDescriptor remove(final TableName tablename) throws IOException {
if (fsreadonly) {
throw new NotImplementedException("Cannot remove a table descriptor - in read only mode");
}
Path tabledir = getTableDir(tablename);
if (this.fs.exists(tabledir)) {
if (!this.fs.delete(tabledir, true)) {
throw new IOException("Failed delete of " + tabledir.toString());
}
}TableDescriptor descriptor = this.f1.remove(tablename);
return descriptor;
} | 3.26 |
hbase_FSTableDescriptors_getAll_rdh | /**
* Returns a map from table name to table descriptor for all tables.
*/
@Overridepublic Map<String, TableDescriptor> getAll() throws
IOException {
Map<String, TableDescriptor> tds = new ConcurrentSkipListMap<>();
if (fsvisited) {
for (Map.Entry<TableName, TableDescriptor> entry : this.f1.entrySet()) {
tds.put(entry.getKey().getNameWithNamespaceInclAsString(), entry.getValue());
}
} else {
LOG.info("Fetching table descriptors from the filesystem.");
final long startTime = EnvironmentEdgeManager.currentTime();
AtomicBoolean allvisited = new AtomicBoolean(usecache);
List<Path> tableDirs = FSUtils.getTableDirs(fs, rootdir);
if (!tableDescriptorParallelLoadEnable) {
for (Path
dir : tableDirs) {internalGet(dir, tds, allvisited);
}
} else {
CountDownLatch latch = new CountDownLatch(tableDirs.size());
for (Path dir : tableDirs) {
executor.submit(new Runnable() {
@Override
public void run() {
try {
internalGet(dir, tds, allvisited);
} finally {
latch.countDown();
}
}
});
}
try {
latch.await();
} catch (InterruptedException ie) {
throw ((InterruptedIOException) (new InterruptedIOException().initCause(ie)));
}
}
fsvisited = allvisited.get();
LOG.info(((("Fetched table descriptors(size=" + tds.size()) + ") cost ") + (EnvironmentEdgeManager.currentTime() - startTime)) + "ms.");
}
return tds;
} | 3.26 |
hbase_FSTableDescriptors_getTableInfoFileName_rdh | /**
* Returns Name of tableinfo file.
*/
@RestrictedApi(explanation = "Should only be called in tests or self", link = "", allowedOnPath = ".*/src/test/.*|.*/FSTableDescriptors\\.java")
static String getTableInfoFileName(int sequenceId, byte[] content) {
return (((TABLEINFO_FILE_PREFIX + ".") + formatTableInfoSequenceId(sequenceId)) + ".") + content.length;
} | 3.26 |
hbase_FSTableDescriptors_get_rdh | /**
* Get the current table descriptor for the given table, or null if none exists.
* <p/>
* Uses a local cache of the descriptor but still checks the filesystem on each call if
* {@link #fsvisited} is not {@code true}, i.e, we haven't done a full scan yet, to see if a newer
* file has been created since the cached one was read.
*/
@Override@Nullable
public TableDescriptor get(TableName tableName) {
f0++;
if
(usecache) {
// Look in cache of descriptors.
TableDescriptor cachedtdm = this.f1.get(tableName);
if (cachedtdm != null) {
cachehits++;
return cachedtdm;
}
// we do not need to go to fs any more
if (fsvisited) {
return null;
}
}TableDescriptor tdmt = null;
try {
tdmt = getTableDescriptorFromFs(fs, getTableDir(tableName), fsreadonly).map(Pair::getSecond).orElse(null);
} catch (IOException ioe) {
LOG.debug("Exception during readTableDecriptor. Current table name = " + tableName, ioe);
}
// last HTD written wins
if (usecache && (tdmt !=
null)) {
this.f1.put(tableName, tdmt);
}
return tdmt;
} | 3.26 |
hbase_FSTableDescriptors_getTableDescriptorFromFs_rdh | /**
* Returns the latest table descriptor for the table located at the given directory directly from
* the file system if it exists.
*/
public static TableDescriptor getTableDescriptorFromFs(FileSystem fs,
Path tableDir) throws
IOException {
return getTableDescriptorFromFs(fs, tableDir, true).map(Pair::getSecond).orElse(null);
} | 3.26 |
hbase_FSTableDescriptors_writeTableDescriptor_rdh | /**
* Attempts to write a new table descriptor to the given table's directory. It begins at the
* currentSequenceId + 1 and tries 10 times to find a new sequence number not already in use.
* <p/>
* Removes the current descriptor file if passed in.
*
* @return Descriptor file or null if we failed write.
*/
private static Path writeTableDescriptor(final FileSystem fs, final TableDescriptor td, final Path tableDir, final FileStatus currentDescriptorFile) throws IOException {
// Here we will write to the final directory directly to avoid renaming as on OSS renaming is
// not atomic and has performance issue. The reason why we could do this is that, in the below
// code we will not overwrite existing files, we will write a new file instead. And when
// loading, we will skip the half written file, please see the code in getTableDescriptorFromFs
Path tableInfoDir = new Path(tableDir, TABLEINFO_DIR);
// In proc v2 we have table lock so typically, there will be no concurrent writes. Keep the
// retry logic here since we may still want to write the table descriptor from for example,
// HBCK2?
int currentSequenceId = (currentDescriptorFile ==
null) ? 0 : m0(currentDescriptorFile.getPath()).sequenceId;
// Put arbitrary upperbound on how often we retry
int maxAttempts = 10;
int
maxSequenceId = currentSequenceId + maxAttempts;
byte[] v56 = TableDescriptorBuilder.toByteArray(td);for (int newSequenceId = currentSequenceId + 1; newSequenceId <= maxSequenceId; newSequenceId++) {
String fileName =
getTableInfoFileName(newSequenceId, v56);
Path filePath = new Path(tableInfoDir, fileName);
try (FSDataOutputStream out = fs.create(filePath,
false)) {
out.write(v56);
} catch (FileAlreadyExistsException e) {
LOG.debug("{} exists; retrying up to {} times", filePath, maxAttempts, e);
continue;
} catch (IOException e)
{
LOG.debug("Failed write {}; retrying up to {} times", filePath,
maxAttempts, e);
continue;
}
m2(fs, tableInfoDir, newSequenceId - 1);
return filePath;
}return null;
} | 3.26 |
hbase_FSTableDescriptors_m0_rdh | /**
* Returns the current sequence id and file length or 0 if none found.
*
* @param p
* Path to a <code>.tableinfo</code> file.
*/
@RestrictedApi(explanation = "Should only be called in tests or self", link = "", allowedOnPath = ".*/src/test/.*|.*/FSTableDescriptors\\.java")
static SequenceIdAndFileLength m0(Path p) {
String name = p.getName();
if (!name.startsWith(TABLEINFO_FILE_PREFIX)) {
throw new IllegalArgumentException("Invalid table descriptor file name: " + name);
}
int firstDot = name.indexOf('.', TABLEINFO_FILE_PREFIX.length());
if (firstDot < 0) {
// oldest style where we do not have both sequence id and file length
return new SequenceIdAndFileLength(0, 0);
}
int secondDot = name.indexOf('.', firstDot + 1);
if (secondDot < 0) {// old stype where we do not have file length
int sequenceId = Integer.parseInt(name.substring(firstDot
+ 1));
return new SequenceIdAndFileLength(sequenceId, 0);
}
int sequenceId =
Integer.parseInt(name.substring(firstDot + 1, secondDot));
int fileLength = Integer.parseInt(name.substring(secondDot + 1));
return new SequenceIdAndFileLength(sequenceId, fileLength);
} | 3.26 |
hbase_FSTableDescriptors_getByNamespace_rdh | /**
* Find descriptors by namespace.
*
* @see #get(org.apache.hadoop.hbase.TableName)
*/
@Override
public Map<String, TableDescriptor> getByNamespace(String name) throws IOException {
Map<String, TableDescriptor> htds = new TreeMap<>();
List<Path> tableDirs = FSUtils.getLocalTableDirs(fs, CommonFSUtils.getNamespaceDir(rootdir, name));
for (Path d : tableDirs) {
TableDescriptor htd = get(CommonFSUtils.getTableName(d));
if (htd == null) {
continue;
}
htds.put(CommonFSUtils.getTableName(d).getNameAsString(), htd);
}
return htds;
} | 3.26 |
hbase_ProcedureUtil_convertToProcedure_rdh | /**
* Helper to convert the protobuf procedure.
* <p/>
* Used by ProcedureStore implementations.
* <p/>
* TODO: OPTIMIZATION: some of the field never change during the execution (e.g. className,
* procId, parentId, ...). We can split in 'data' and 'state', and the store may take advantage of
* it by storing the data only on insert().
*/
public static Procedure<?> convertToProcedure(ProcedureProtos.Procedure proto) throws IOException {
// Procedure from class name
Procedure<?> proc = newProcedure(proto.getClassName());
// set fields
proc.setProcId(proto.getProcId());
proc.setState(proto.getState());
proc.setSubmittedTime(proto.getSubmittedTime());
proc.setLastUpdate(proto.getLastUpdate());
if
(proto.hasParentId()) {
proc.setParentProcId(proto.getParentId());
}
if (proto.hasOwner()) {
proc.setOwner(proto.getOwner());
}
if (proto.hasTimeout()) {
proc.setTimeout(proto.getTimeout());
}
if (proto.getStackIdCount() > 0) {
proc.setStackIndexes(proto.getStackIdList());
}
if (proto.hasException()) {
assert (proc.getState() == ProcedureState.FAILED) || (proc.getState() == ProcedureState.ROLLEDBACK) : "The procedure must be failed (waiting to rollback) or rolledback";
proc.setFailure(RemoteProcedureException.fromProto(proto.getException()));
}
if (proto.hasResult()) {
proc.setResult(proto.getResult().toByteArray());
}
if (proto.getNonce()
!= HConstants.NO_NONCE) {
proc.setNonceKey(new NonceKey(proto.getNonceGroup(), proto.getNonce()));
}
if (proto.getLocked()) {
proc.lockedWhenLoading();
} if (proto.getBypass()) {
proc.bypass(null);
}
ProcedureStateSerializer serializer = null;
if (proto.getStateMessageCount() > 0) {
serializer = new StateSerializer(proto.toBuilder());
} else if (proto.hasStateData()) {
InputStream inputStream = proto.getStateData().newInput();
serializer = new CompatStateSerializer(inputStream);
}
if (serializer != null) {
proc.deserializeStateData(serializer);
}
return proc;
} | 3.26 |
hbase_ProcedureUtil_convertToProtoResourceType_rdh | // ==========================================================================
// convert from LockedResource object
// ==========================================================================
public static LockedResourceType convertToProtoResourceType(LockedResourceType resourceType) {
return LockServiceProtos.LockedResourceType.valueOf(resourceType.name());
} | 3.26 |
hbase_ProcedureUtil_createRetryCounter_rdh | /**
* Get a retry counter for getting the backoff time. We will use the
* {@link ExponentialBackoffPolicyWithLimit} policy, and the base unit is 1 second, max sleep time
* is 10 minutes by default.
* <p/>
* For UTs, you can set the {@link #PROCEDURE_RETRY_SLEEP_INTERVAL_MS} and
* {@link #PROCEDURE_RETRY_MAX_SLEEP_TIME_MS} to make more frequent retry so your UT will not
* timeout.
*/
public static RetryCounter createRetryCounter(Configuration conf) {
long
sleepIntervalMs = conf.getLong(PROCEDURE_RETRY_SLEEP_INTERVAL_MS, DEFAULT_PROCEDURE_RETRY_SLEEP_INTERVAL_MS);
long maxSleepTimeMs = conf.getLong(PROCEDURE_RETRY_MAX_SLEEP_TIME_MS, DEFAULT_PROCEDURE_RETRY_MAX_SLEEP_TIME_MS);
RetryConfig retryConfig = new RetryConfig().setSleepInterval(sleepIntervalMs).setMaxSleepTime(maxSleepTimeMs).setBackoffPolicy(new ExponentialBackoffPolicyWithLimit());
return new RetryCounter(retryConfig);
} | 3.26 |
hbase_Import_instantiateFilter_rdh | /**
* Create a {@link Filter} to apply to all incoming keys ({@link KeyValue KeyValues}) to
* optionally not include in the job output
*
* @param conf
* {@link Configuration} from which to load the filter
* @return the filter to use for the task, or <tt>null</tt> if no filter to should be used
* @throws IllegalArgumentException
* if the filter is misconfigured
*/
public static Filter
instantiateFilter(Configuration
conf) {
// get the filter, if it was configured
Class<? extends Filter> filterClass = conf.getClass(FILTER_CLASS_CONF_KEY, null, Filter.class);
if (filterClass == null) {
LOG.debug("No configured filter class, accepting all keyvalues.");
return null;
}
LOG.debug("Attempting to create filter:" + filterClass);
String[] filterArgs = conf.getStrings(FILTER_ARGS_CONF_KEY);
ArrayList<byte[]> quotedArgs = toQuotedByteArrays(filterArgs);
try {
Method m = filterClass.getMethod("createFilterFromArguments", ArrayList.class);
return ((Filter) (m.invoke(null, quotedArgs)));
} catch (IllegalAccessException e)
{
LOG.error("Couldn't instantiate filter!", e);
throw new RuntimeException(e);
} catch (SecurityException e) {
LOG.error("Couldn't instantiate filter!", e);
throw new RuntimeException(e);
} catch (NoSuchMethodException e) {LOG.error("Couldn't instantiate filter!", e);
throw new
RuntimeException(e);
} catch (IllegalArgumentException e) {
LOG.error("Couldn't instantiate filter!", e);throw new RuntimeException(e);
} catch (InvocationTargetException e) {
LOG.error("Couldn't instantiate filter!", e);
throw new RuntimeException(e);
}
} | 3.26 |
hbase_Import_m1_rdh | // helper: create a new KeyValue based on CF rename map
private static Cell m1(Cell kv, Map<byte[], byte[]> cfRenameMap) {
if (cfRenameMap != null) {
// If there's a rename mapping for this CF, create a new KeyValue
byte[] newCfName = cfRenameMap.get(CellUtil.cloneFamily(kv));
if (newCfName != null) {
List<Tag> tags = PrivateCellUtil.getTags(kv);
kv = // row buffer
// row offset
// row length
// CF buffer
// CF offset
// CF length
// qualifier buffer
// qualifier offset
// qualifier length
// timestamp
// KV Type
// value buffer
// value offset
// value length
new KeyValue(kv.getRowArray(), kv.getRowOffset(), kv.getRowLength(), newCfName, 0, newCfName.length, kv.getQualifierArray(), kv.getQualifierOffset(), kv.getQualifierLength(), kv.getTimestamp(), KeyValue.Type.codeToType(kv.getTypeByte()), kv.getValueArray(), kv.getValueOffset(), kv.getValueLength(), tags.size() == 0 ? null : tags);
}
}
return kv;
} | 3.26 |
hbase_Import_map_rdh | /**
*
* @param row
* The current table row key.
* @param value
* The columns.
* @param context
* The current context.
* @throws IOException
* When something is broken with the data.
*/
@Override
public void map(ImmutableBytesWritable row, Result value, Context
context) throws IOException {
try {
writeResult(row, value, context);
} catch (InterruptedException e) {
LOG.error("Interrupted while writing result", e);
Thread.currentThread().interrupt();}} | 3.26 |
hbase_Import_main_rdh | /**
* Main entry point.
*
* @param args
* The command line parameters.
* @throws Exception
* When running the job fails.
*/
public static void main(String[] args) throws Exception {
int errCode = ToolRunner.run(HBaseConfiguration.create(), new Import(), args);
System.exit(errCode);} | 3.26 |
hbase_Import_createCfRenameMap_rdh | // helper: make a map from sourceCfName to destCfName by parsing a config key
private static Map<byte[], byte[]> createCfRenameMap(Configuration conf) {
Map<byte[], byte[]> cfRenameMap = null;
String allMappingsPropVal = conf.get(CF_RENAME_PROP);
if (allMappingsPropVal != null) {
// The conf value format should be sourceCf1:destCf1,sourceCf2:destCf2,...
String[] allMappings = allMappingsPropVal.split(",");
for (String mapping : allMappings) {
if (cfRenameMap == null) {
cfRenameMap = new TreeMap<>(Bytes.BYTES_COMPARATOR);
}
String[] srcAndDest = mapping.split(":");
if (srcAndDest.length != 2) {
continue;
}
cfRenameMap.put(Bytes.toBytes(srcAndDest[0]), Bytes.toBytes(srcAndDest[1]));}
}
return cfRenameMap;
} | 3.26 |
hbase_Import_createSubmittableJob_rdh | /**
* Sets up the actual job.
*
* @param conf
* The current configuration.
* @param args
* The command line parameters.
* @return The newly created job.
* @throws IOException
* When setting up the job fails.
*/
public static Job createSubmittableJob(Configuration conf, String[] args) throws IOException {
TableName tableName = TableName.valueOf(args[0]);
conf.set(TABLE_NAME, tableName.getNameAsString());
Path inputDir = new Path(args[1]);
Job job = Job.getInstance(conf, conf.get(JOB_NAME_CONF_KEY, (NAME + "_") + tableName));
job.setJarByClass(Import.Importer.class);
FileInputFormat.setInputPaths(job, inputDir);
job.setInputFormatClass(SequenceFileInputFormat.class);
String hfileOutPath = conf.get(BULK_OUTPUT_CONF_KEY);
// make sure we get the filter in the jars
try {
Class<? extends Filter> filter = conf.getClass(FILTER_CLASS_CONF_KEY, null, Filter.class);
if (filter != null) {
TableMapReduceUtil.addDependencyJarsForClasses(conf, filter);
}} catch (Exception e) {
throw new IOException(e);
}
if ((hfileOutPath !=
null) && conf.getBoolean(HAS_LARGE_RESULT, false)) {
LOG.info("Use Large Result!!");try (Connection conn =
ConnectionFactory.createConnection(conf);Table table
= conn.getTable(tableName);RegionLocator regionLocator = conn.getRegionLocator(tableName)) {
HFileOutputFormat2.configureIncrementalLoad(job, table.getDescriptor(), regionLocator);
job.setMapperClass(Import.CellSortImporter.class);
job.setReducerClass(Import.CellReducer.class);
Path outputDir = new Path(hfileOutPath);
FileOutputFormat.setOutputPath(job, outputDir);
job.setMapOutputKeyClass(Import.CellWritableComparable.class);
job.setMapOutputValueClass(MapReduceExtendedCell.class);
job.getConfiguration().setClass("mapreduce.job.output.key.comparator.class", Import.CellWritableComparable.CellWritableComparator.class, RawComparator.class);
Path partitionsPath = new Path(TotalOrderPartitioner.getPartitionFile(job.getConfiguration()));
FileSystem fs = FileSystem.get(job.getConfiguration());
fs.deleteOnExit(partitionsPath);
job.setPartitionerClass(Import.CellWritableComparablePartitioner.class);
job.setNumReduceTasks(regionLocator.getStartKeys().length);
TableMapReduceUtil.addDependencyJarsForClasses(job.getConfiguration(), Preconditions.class);
}
} else if (hfileOutPath != null) {
LOG.info("writing to hfiles for bulk load.");
job.setMapperClass(Import.CellImporter.class);
try (Connection conn = ConnectionFactory.createConnection(conf);Table table = conn.getTable(tableName);RegionLocator regionLocator = conn.getRegionLocator(tableName)) {
job.setReducerClass(CellSortReducer.class);
Path outputDir = new Path(hfileOutPath);
FileOutputFormat.setOutputPath(job, outputDir);
job.setMapOutputKeyClass(ImmutableBytesWritable.class);
job.setMapOutputValueClass(MapReduceExtendedCell.class);
HFileOutputFormat2.configureIncrementalLoad(job, table.getDescriptor(), regionLocator);
TableMapReduceUtil.addDependencyJarsForClasses(job.getConfiguration(), Preconditions.class);
}
} else {
LOG.info("writing directly to table from Mapper.");
// No reducers. Just write straight to table. Call initTableReducerJob
// because it sets up the TableOutputFormat.
job.setMapperClass(Import.Importer.class);
TableMapReduceUtil.initTableReducerJob(tableName.getNameAsString(), null, job); job.setNumReduceTasks(0);
}
return job;
} | 3.26 |
hbase_Import_addFilterAndArguments_rdh | /**
* Add a Filter to be instantiated on import
*
* @param conf
* Configuration to update (will be passed to the job)
* @param clazz
* {@link Filter} subclass to instantiate on the server.
* @param filterArgs
* List of arguments to pass to the filter on instantiation
*/
public static void addFilterAndArguments(Configuration conf, Class<? extends Filter> clazz, List<String> filterArgs) throws IOException {
conf.set(Import.FILTER_CLASS_CONF_KEY, clazz.getName());
conf.setStrings(Import.FILTER_ARGS_CONF_KEY, filterArgs.toArray(new String[filterArgs.size()]));
} | 3.26 |
hbase_Import_usage_rdh | /* @param errorMsg Error message. Can be null. */
private static void usage(final String errorMsg) {
if ((errorMsg != null) && (errorMsg.length() > 0)) {
System.err.println("ERROR: " + errorMsg);
}
System.err.println("Usage: Import [options] <tablename> <inputdir>");
System.err.println("By default Import will load data directly into HBase. To instead generate");
System.err.println("HFiles of data to prepare for a bulk data load, pass the option:");System.err.println((" -D" + BULK_OUTPUT_CONF_KEY) + "=/path/for/output");
System.err.println("If there is a large result that includes too much Cell " + "whitch can occur OOME caused by the memery sort in reducer, pass the option:");
System.err.println((" -D" + HAS_LARGE_RESULT) + "=true");
System.err.println(" To apply a generic org.apache.hadoop.hbase.filter.Filter to the input, use");
System.err.println((" -D" + FILTER_CLASS_CONF_KEY) + "=<name of filter class>");
System.err.println((" -D" + FILTER_ARGS_CONF_KEY) + "=<comma separated list of args for filter");
System.err.println(((((((" NOTE: The filter will be applied BEFORE doing key renames via the " + CF_RENAME_PROP) + " property. Futher, filters will only use the") + " Filter#filterRowKey(byte[] buffer, int offset, int length) method to identify ") + " whether the current row needs to be ignored completely for processing and ") + " Filter#filterCell(Cell) method to determine if the Cell should be added;") + " Filter.ReturnCode#INCLUDE and #INCLUDE_AND_NEXT_COL will be considered as including") + " the Cell.");
System.err.println("To import data exported from HBase 0.94, use");
System.err.println(" -Dhbase.import.version=0.94");
System.err.println((" -D " + JOB_NAME_CONF_KEY) + "=jobName - use the specified mapreduce job name for the import");
System.err.println((((((("For performance consider the following options:\n" + " -Dmapreduce.map.speculative=false\n") + " -Dmapreduce.reduce.speculative=false\n") + " -D") + WAL_DURABILITY) + "=<Used while writing data to hbase.") + " Allowed values are the supported durability values") + " like SKIP_WAL/ASYNC_WAL/SYNC_WAL/...>");
} | 3.26 |
hbase_Import_configureCfRenaming_rdh | /**
* <p>
* Sets a configuration property with key {@link #CF_RENAME_PROP} in conf that tells the mapper
* how to rename column families.
* <p>
* Alternately, instead of calling this function, you could set the configuration key
* {@link #CF_RENAME_PROP} yourself. The value should look like
*
* <pre>
* srcCf1:destCf1,srcCf2:destCf2,....
* </pre>
*
* . This would have the same effect on the mapper behavior.
*
* @param conf
* the Configuration in which the {@link #CF_RENAME_PROP} key will be set
* @param renameMap
* a mapping from source CF names to destination CF names
*/
public static void configureCfRenaming(Configuration conf, Map<String, String> renameMap) {
StringBuilder sb = new StringBuilder();
for (Map.Entry<String, String> v39 : renameMap.entrySet()) {
String sourceCf = v39.getKey();
String destCf = v39.getValue();
if (((sourceCf.contains(":") || sourceCf.contains(",")) || destCf.contains(":")) || destCf.contains(",")) {throw new IllegalArgumentException((("Illegal character in CF names: " + sourceCf) + ", ") + destCf);
}
if (sb.length() != 0) {
sb.append(",");
}
sb.append((sourceCf + ":") + destCf);
}
conf.set(CF_RENAME_PROP, sb.toString());
} | 3.26 |
hbase_Import_flushRegionsIfNecessary_rdh | /**
* If the durability is set to {@link Durability#SKIP_WAL} and the data is imported to hbase, we
* need to flush all the regions of the table as the data is held in memory and is also not
* present in the Write Ahead Log to replay in scenarios of a crash. This method flushes all the
* regions of the table in the scenarios of import data to hbase with {@link Durability#SKIP_WAL}
*/
public static void flushRegionsIfNecessary(Configuration conf) throws
IOException, InterruptedException {String tableName = conf.get(TABLE_NAME);
Admin hAdmin = null;
Connection connection = null;
String durability = conf.get(WAL_DURABILITY);
// Need to flush if the data is written to hbase and skip wal is enabled.
if (((conf.get(BULK_OUTPUT_CONF_KEY) == null) && (durability != null)) && Durability.SKIP_WAL.name().equalsIgnoreCase(durability)) {
LOG.info("Flushing all data that skipped the WAL.");
try {
connection = ConnectionFactory.createConnection(conf);
hAdmin = connection.getAdmin();
hAdmin.flush(TableName.valueOf(tableName));
} finally {
if (hAdmin != null) {
hAdmin.close();
}
if (connection != null) {connection.close();
}
}
}
} | 3.26 |
hbase_ColumnPaginationFilter_toByteArray_rdh | /**
* Returns The filter serialized using pb
*/
@Override
public byte[] toByteArray() {
FilterProtos.ColumnPaginationFilter.Builder builder = FilterProtos.ColumnPaginationFilter.newBuilder();
builder.setLimit(this.limit);
if (this.offset >= 0) {
builder.setOffset(this.offset);
}
if (this.columnOffset != null) {
builder.setColumnOffset(UnsafeByteOperations.unsafeWrap(this.columnOffset));
}
return builder.build().toByteArray();
} | 3.26 |
hbase_ColumnPaginationFilter_areSerializedFieldsEqual_rdh | /**
* Returns true if and only if the fields of the filter that are serialized are equal to the
* corresponding fields in other. Used for testing.
*/
@Override
boolean areSerializedFieldsEqual(Filter o) {
if (o == this) {
return true;
}
if (!(o instanceof ColumnPaginationFilter)) {
return false;
}
ColumnPaginationFilter other = ((ColumnPaginationFilter) (o));
if (this.columnOffset != null) {
return (this.getLimit()
== other.getLimit()) && Bytes.equals(this.getColumnOffset(), other.getColumnOffset());
}
return (this.getLimit() == other.getLimit()) && (this.getOffset() == other.getOffset());} | 3.26 |
hbase_ColumnPaginationFilter_parseFrom_rdh | /**
* Parse a serialized representation of {@link ColumnPaginationFilter}
*
* @param pbBytes
* A pb serialized {@link ColumnPaginationFilter} instance
* @return An instance of {@link ColumnPaginationFilter} made from <code>bytes</code>
* @throws DeserializationException
* if an error occurred
* @see #toByteArray
*/
public static ColumnPaginationFilter parseFrom(final byte[] pbBytes) throws DeserializationException {
FilterProtos.ColumnPaginationFilter proto;
try {
proto = FilterProtos.ColumnPaginationFilter.parseFrom(pbBytes);
} catch (InvalidProtocolBufferException e) {
throw new DeserializationException(e);
}
if (proto.hasColumnOffset()) {
return new ColumnPaginationFilter(proto.getLimit(), proto.getColumnOffset().toByteArray());
}
return new ColumnPaginationFilter(proto.getLimit(), proto.getOffset());
} | 3.26 |
hbase_Lz4Codec_getBufferSize_rdh | // Package private
static int getBufferSize(Configuration conf) {
return conf.getInt(LZ4_BUFFER_SIZE_KEY, conf.getInt(CommonConfigurationKeys.IO_COMPRESSION_CODEC_LZ4_BUFFERSIZE_KEY, CommonConfigurationKeys.IO_COMPRESSION_CODEC_LZ4_BUFFERSIZE_DEFAULT));
} | 3.26 |
hbase_AggregateImplementation_getMin_rdh | /**
* Gives the minimum for a given combination of column qualifier and column family, in the given
* row range as defined in the Scan object. In its current implementation, it takes one column
* family and one column qualifier (if provided). In case of null column qualifier, minimum value
* for the entire column family will be returned.
*/
@Override
public void getMin(RpcController controller, AggregateRequest request, RpcCallback<AggregateResponse> done) {
AggregateResponse response = null;InternalScanner scanner = null;
T min = null;
try {
ColumnInterpreter<T, S, P, Q, R> ci = constructColumnInterpreterFromRequest(request); T
temp;
Scan scan = ProtobufUtil.toScan(request.getScan());
scanner
= env.getRegion().getScanner(scan);
List<Cell> results = new ArrayList<>();
byte[] colFamily = scan.getFamilies()[0];
NavigableSet<byte[]> qualifiers = scan.getFamilyMap().get(colFamily);byte[] qualifier = null;
if ((qualifiers != null) && (!qualifiers.isEmpty())) {
qualifier = qualifiers.pollFirst();}
boolean hasMoreRows = false;
do {
hasMoreRows = scanner.next(results);
int listSize = results.size();
for (int i = 0; i
< listSize; i++) {
temp = ci.getValue(colFamily, qualifier, results.get(i));
min = ((min == null) || ((temp != null) && (ci.compare(temp, min)
< 0))) ? temp : min;
}
results.clear();
} while (hasMoreRows );
if (min != null) {
response = AggregateResponse.newBuilder().addFirstPart(ci.getProtoForCellType(min).toByteString()).build();
}
} catch (IOException e) {
CoprocessorRpcUtils.setControllerException(controller, e);
} finally {
if (scanner != null) {
IOUtils.closeQuietly(scanner);
}
}
log.info((("Minimum from this region is " + env.getRegion().getRegionInfo().getRegionNameAsString()) + ": ") + min);
done.run(response);
} | 3.26 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.