name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hbase_CompareFilter_areSerializedFieldsEqual_rdh | /**
* Returns true if and only if the fields of the filter that are serialized are equal to the
* corresponding fields in other. Used for testing.
*/
@Override
boolean areSerializedFieldsEqual(Filter o) {
if (o == this) {
return true;
}
if (!(o instanceof CompareFilter)) {
return false;
}
CompareFilter other = ((CompareFilter) (o));return this.m0().equals(other.m0()) && ((this.getComparator() == other.getComparator())
|| this.getComparator().areSerializedFieldsEqual(other.getComparator()));
} | 3.26 |
hbase_CompareFilter_convert_rdh | /**
* Returns A pb instance to represent this instance.
*/
CompareFilter convert() {
FilterProtos.CompareFilter.Builder builder = FilterProtos.CompareFilter.newBuilder();
HBaseProtos.CompareType compareOp = CompareType.valueOf(this.f0.name());
builder.setCompareOp(compareOp);
if (this.comparator != null)
builder.setComparator(ProtobufUtil.toComparator(this.comparator));
return builder.build();
} | 3.26 |
hbase_CompareFilter_extractArguments_rdh | /**
* Returns an array of heterogeneous objects
*/
public static ArrayList<Object> extractArguments(ArrayList<byte[]> filterArguments) {
Preconditions.checkArgument(filterArguments.size() == 2, "Expected 2 but got: %s", filterArguments.size());
CompareOperator op = ParseFilter.createCompareOperator(filterArguments.get(0));
ByteArrayComparable comparator = ParseFilter.createComparator(ParseFilter.removeQuotesFromByteArray(filterArguments.get(1)));
if ((comparator instanceof RegexStringComparator) || (comparator
instanceof SubstringComparator)) {
if ((op != CompareOperator.EQUAL) && (op != CompareOperator.NOT_EQUAL)) {
throw new IllegalArgumentException("A regexstring comparator and substring comparator" + " can only be used with EQUAL and NOT_EQUAL");
}
}
ArrayList<Object>
arguments = new ArrayList<>(2);
arguments.add(op);
arguments.add(comparator);
return arguments;
} | 3.26 |
hbase_CompareFilter_getComparator_rdh | /**
* Returns the comparator
*/
public ByteArrayComparable getComparator() {
return comparator;
} | 3.26 |
hbase_ReplaySyncReplicationWALCallable_filter_rdh | // return whether we should include this entry.
private boolean filter(Entry entry) {
WALEdit edit = entry.getEdit();
WALUtil.filterCells(edit, c -> CellUtil.matchingFamily(c, WALEdit.METAFAMILY) ? null : c);
return !edit.isEmpty();} | 3.26 |
hbase_TableInfoModel_get_rdh | /**
*
* @param index
* the index
* @return the region model
*/
public TableRegionModel get(int index) {
return regions.get(index);
} | 3.26 |
hbase_TableInfoModel_add_rdh | /**
* Add a region model to the list
*
* @param region
* the region
*/
public void add(TableRegionModel region) {
regions.add(region);
} | 3.26 |
hbase_TableInfoModel_getRegions_rdh | /**
* Returns the regions
*/@XmlElement(name = "Region")
public List<TableRegionModel> getRegions() {
return regions;
} | 3.26 |
hbase_TableInfoModel_setRegions_rdh | /**
*
* @param regions
* the regions to set
*/
public void setRegions(List<TableRegionModel> regions) {
this.regions = regions;
} | 3.26 |
hbase_TableInfoModel_toString_rdh | /* (non-Javadoc)
@see java.lang.Object#toString()
*/
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
for (TableRegionModel aRegion : regions) {
sb.append(aRegion.toString());
sb.append('\n');
}
return sb.toString();
} | 3.26 |
hbase_TableInfoModel_setName_rdh | /**
*
* @param name
* the table name
*/
public void setName(String name) {
this.name = name;
} | 3.26 |
hbase_BloomFilterUtil_idealMaxKeys_rdh | /**
* The maximum number of keys we can put into a Bloom filter of a certain size to maintain the
* given error rate, assuming the number of hash functions is chosen optimally and does not even
* have to be an integer (hence the "ideal" in the function name).
*
* @return maximum number of keys that can be inserted into the Bloom filter
* @see #computeMaxKeys(long, double, int) for a more precise estimate
*/
public static long idealMaxKeys(long bitSize, double errorRate) {
// The reason we need to use floor here is that otherwise we might put
// more keys in a Bloom filter than is allowed by the target error rate.
return ((long) (bitSize * (LOG2_SQUARED / (-Math.log(errorRate)))));
} | 3.26 |
hbase_BloomFilterUtil_checkBit_rdh | /**
* Check if bit at specified index is 1.
*
* @param pos
* index of bit
* @return true if bit at specified index is 1, false if 0.
*/
static boolean checkBit(int pos, ByteBuff bloomBuf,
int bloomOffset) {
int bytePos = pos >> 3;// pos / 8
int bitPos = pos & 0x7;// pos % 8
byte curByte = bloomBuf.get(bloomOffset + bytePos);
curByte &= bitvals[bitPos];
return curByte != 0;}
/**
* A human-readable string with statistics for the given Bloom filter.
*
* @param bloomFilter
* the Bloom filter to output statistics for;
* @return a string consisting of "<key>: <value>" parts separated by
{@link #STATS_RECORD_SEP} | 3.26 |
hbase_BloomFilterUtil_computeFoldableByteSize_rdh | /**
* Increases the given byte size of a Bloom filter until it can be folded by the given factor.
*
* @return Foldable byte size
*/
public static int computeFoldableByteSize(long bitSize, int foldFactor) {
long byteSizeLong = (bitSize + 7) / 8;
int mask = (1 << foldFactor) - 1;
if ((mask & byteSizeLong) != 0) {
byteSizeLong >>= foldFactor;
++byteSizeLong;
byteSizeLong <<= foldFactor;
}
if (byteSizeLong > Integer.MAX_VALUE) {
throw new IllegalArgumentException(((((("byteSize=" + byteSizeLong) + " too ") + "large for bitSize=") + bitSize) + ", foldFactor=") + foldFactor);
}
return ((int) (byteSizeLong));
} | 3.26 |
hbase_BloomFilterUtil_setRandomGeneratorForTest_rdh | /**
* Sets a random generator to be used for look-ups instead of computing hashes. Can be used to
* simulate uniformity of accesses better in a test environment. Should not be set in a real
* environment where correctness matters!
* <p>
* This gets used in {@link #contains(ByteBuff, int, int, Hash, int, HashKey)}
*
* @param random
* The random number source to use, or null to compute actual hashes
*/
@SuppressWarnings(value = "EI_EXPOSE_STATIC_REP2", justification = "ignore for now, improve TestCompoundBloomFilter later")
public static void setRandomGeneratorForTest(Random random) {
randomGeneratorForTest = random;
} | 3.26 |
hbase_BloomFilterUtil_actualErrorRate_rdh | /**
* Computes the actual error rate for the given number of elements, number of bits, and number of
* hash functions. Taken directly from the
* <a href= "http://en.wikipedia.org/wiki/Bloom_filter#Probability_of_false_positives" > Wikipedia
* Bloom filter article</a>.
*
* @return the actual error rate
*/
public static double actualErrorRate(long maxKeys, long
bitSize, int functionCount) {
return Math.exp(Math.log(1 - Math.exp((((-functionCount) * maxKeys) * 1.0) / bitSize)) * functionCount);
} | 3.26 |
hbase_MetaTableMetrics_getRegionIdFromOp_rdh | /**
* Get regionId from Ops such as: get, put, delete.
*
* @param op
* such as get, put or delete.
*/
private String getRegionIdFromOp(Row op) {
final String v2 = Bytes.toString(op.getRow());
if (StringUtils.isEmpty(v2)) {
return null;
}
final String[] splits = v2.split(",");
return splits.length > 2 ? splits[2] : null;
} | 3.26 |
hbase_MetaTableMetrics_getTableNameFromOp_rdh | /**
* Get table name from Ops such as: get, put, delete.
*
* @param op
* such as get, put or delete.
*/
private String getTableNameFromOp(Row op) {final String tableRowKey = Bytes.toString(op.getRow());
if (StringUtils.isEmpty(tableRowKey)) {
return null;
}
final String[] splits = tableRowKey.split(",");
return splits.length > 0 ? splits[0] : null;
} | 3.26 |
hbase_MetaTableMetrics_registerAndMarkMeter_rdh | // Helper function to register and mark meter if not present
private void registerAndMarkMeter(String requestMeter) {
if (requestMeter.isEmpty()) {
return;
}
if (!registry.get(requestMeter).isPresent()) {
f0.add(requestMeter);
}
registry.meter(requestMeter).mark();
} | 3.26 |
hbase_CallRunner_cleanup_rdh | /**
* Cleanup after ourselves... let go of references.
*/
private void cleanup() {
this.call.cleanup();
this.call = null;
this.rpcServer = null;
} | 3.26 |
hbase_CallRunner_drop_rdh | /**
* When we want to drop this call because of server is overloaded.
*/
public void drop()
{
try (Scope ignored = span.makeCurrent()) {
if (call.disconnectSince() >= 0) {
RpcServer.LOG.debug("{}: skipped {}", Thread.currentThread().getName(), call);
span.addEvent("Client disconnect detected");
span.setStatus(StatusCode.OK);
return;
}
// Set the response
InetSocketAddress address = rpcServer.getListenerAddress();
call.setResponse(null, null, CALL_DROPPED_EXCEPTION, ("Call dropped, server " + (address != null ? address : "(channel closed)")) + " is overloaded, please retry.");
TraceUtil.setError(span, CALL_DROPPED_EXCEPTION);
call.sendResponseIfReady();
this.rpcServer.getMetrics().exception(CALL_DROPPED_EXCEPTION);
} catch (ClosedChannelException cce) {
InetSocketAddress address = rpcServer.getListenerAddress();
RpcServer.LOG.warn((("{}: caught a ClosedChannelException, " + "this means that the server ") + (address != null ? address : "(channel closed)")) + " was processing a request but the client went away. The error message was: {}", Thread.currentThread().getName(), cce.getMessage()); TraceUtil.setError(span, cce);} catch (Exception e) {
RpcServer.LOG.warn("{}: caught: {}", Thread.currentThread().getName(), StringUtils.stringifyException(e));
TraceUtil.setError(span, e);
} finally {
if (!successful) {
this.rpcServer.addCallSize(call.getSize() * (-1));
}
cleanup();
span.end();
}
} | 3.26 |
hbase_BalanceRequest_newBuilder_rdh | /**
* Create a builder to construct a custom {@link BalanceRequest}.
*/
public static Builder newBuilder() {
return new Builder();
} | 3.26 |
hbase_BalanceRequest_setDryRun_rdh | /**
* Updates BalancerRequest to run the balancer in dryRun mode. In this mode, the balancer will
* try to find a plan but WILL NOT execute any region moves or call any coprocessors. You can
* run in dryRun mode regardless of whether the balancer switch is enabled or disabled, but
* dryRun mode will not run over an existing request or chore. Dry run is useful for testing out
* new balance configs. See the logs on the active HMaster for the results of the dry run.
*/
public Builder setDryRun(boolean dryRun) {
this.dryRun = dryRun;
return this;
} | 3.26 |
hbase_BalanceRequest_defaultInstance_rdh | /**
* Get a BalanceRequest for a default run of the balancer. The default mode executes any moves
* calculated and will not run if regions are already in transition.
*/
public static BalanceRequest defaultInstance() {
return DEFAULT;
} | 3.26 |
hbase_BalanceRequest_build_rdh | /**
* Build the {@link BalanceRequest}
*/
public BalanceRequest build() {
return new BalanceRequest(dryRun, f0);
} | 3.26 |
hbase_BalanceRequest_setIgnoreRegionsInTransition_rdh | /**
* Updates BalancerRequest to run the balancer even if there are regions in transition. WARNING:
* Advanced usage only, this could cause more issues than it fixes.
*/
public Builder setIgnoreRegionsInTransition(boolean ignoreRegionsInTransition) {
this.f0 = ignoreRegionsInTransition;
return this;
} | 3.26 |
hbase_BalanceRequest_isDryRun_rdh | /**
* Returns true if the balancer should run in dry run mode, otherwise false. In dry run mode,
* moves will be calculated but not executed.
*/ public boolean isDryRun() {
return dryRun;} | 3.26 |
hbase_BalanceRequest_isIgnoreRegionsInTransition_rdh | /**
* Returns true if the balancer should execute even if regions are in transition, otherwise false.
* This is an advanced usage feature, as it can cause more issues than it fixes.
*/
public boolean isIgnoreRegionsInTransition() {
return ignoreRegionsInTransition;
} | 3.26 |
hbase_ZKUtil_setData_rdh | /**
* Returns a setData ZKUtilOp
*/
public static ZKUtilOp setData(String path, byte[] data, int version) {
return new SetData(path, data, version);
} | 3.26 |
hbase_ZKUtil_getParent_rdh | //
// Helper methods
//
/**
* Returns the full path of the immediate parent of the specified node.
*
* @param node
* path to get parent of
* @return parent of path, null if passed the root node or an invalid node
*/
public static String getParent(String node) {
int idx = node.lastIndexOf(ZNodePaths.ZNODE_PATH_SEPARATOR);
return idx <= 0 ? null : node.substring(0, idx);
} | 3.26 |
hbase_ZKUtil_setWatchIfNodeExists_rdh | /**
* Watch the specified znode, but only if exists. Useful when watching for deletions. Uses
* .getData() (and handles NoNodeException) instead of .exists() to accomplish this, as .getData()
* will only set a watch if the znode exists.
*
* @param zkw
* zk reference
* @param znode
* path of node to watch
* @return true if the watch is set, false if node does not exists
* @throws KeeperException
* if unexpected zookeeper exception
*/
public static boolean setWatchIfNodeExists(ZKWatcher zkw, String znode) throws KeeperException {
try {
zkw.getRecoverableZooKeeper().getData(znode, true, null);
return true;
} catch (NoNodeException e) {
return false;
} catch (InterruptedException e) {
LOG.warn(zkw.prefix("Unable to set watcher on znode " + znode), e);
zkw.interruptedException(e);
return false;
}
} | 3.26 |
hbase_ZKUtil_convert_rdh | /**
* Convert a {@link DeserializationException} to a more palatable {@link KeeperException}. Used
* when can't let a {@link DeserializationException} out w/o changing public API.
*
* @param e
* Exception to convert
* @return Converted exception
*/
public static KeeperException convert(final DeserializationException e) {
KeeperException ke = new KeeperException.DataInconsistencyException();
ke.initCause(e);
return ke;
} | 3.26 |
hbase_ZKUtil_multiOrSequential_rdh | /**
* Use ZooKeeper's multi-update functionality. If all of the following are true: -
* runSequentialOnMultiFailure is true - on calling multi, we get a ZooKeeper exception that can
* be handled by a sequential call(*) Then: - we retry the operations one-by-one (sequentially)
* Note *: an example is receiving a NodeExistsException from a "create" call. Without multi, a
* user could call "createAndFailSilent" to ensure that a node exists if they don't care who
* actually created the node (i.e. the NodeExistsException from ZooKeeper is caught). This will
* cause all operations in the multi to fail, however, because the NodeExistsException that
* zk.create throws will fail the multi transaction. In this case, if the previous conditions
* hold, the commands are run sequentially, which should result in the correct final state, but
* means that the operations will not run atomically.
*
* @throws KeeperException
* if a ZooKeeper operation fails
*/
public static void multiOrSequential(ZKWatcher zkw, List<ZKUtilOp> ops, boolean runSequentialOnMultiFailure) throws KeeperException {
if (ops == null) {
return;
}
if (useMultiWarn) {
// Only check and warn at first use
if (zkw.getConfiguration().get("hbase.zookeeper.useMulti") != null) {
LOG.warn("hbase.zookeeper.useMulti is deprecated. Default to true always.");
}
useMultiWarn = false;
}
List<Op> zkOps = new LinkedList<>();
for (ZKUtilOp op : ops) {
zkOps.add(toZooKeeperOp(zkw, op));
}
try {
zkw.getRecoverableZooKeeper().multi(zkOps);
} catch (KeeperException ke) {
switch (ke.code()) {
case NODEEXISTS :
case NONODE :
case BADVERSION :
case NOAUTH :case NOTEMPTY :
// if we get an exception that could be solved by running sequentially
// (and the client asked us to), then break out and run sequentially
if (runSequentialOnMultiFailure) {
LOG.info("multi exception: {}; running operations sequentially " + "(runSequentialOnMultiFailure=true); {}", ke.toString(), ops.stream().map(o -> o.toString()).collect(Collectors.joining(",")));
m0(zkw, ops);
break;
}
default :
throw ke;
}
} catch (InterruptedException ie) {
zkw.interruptedException(ie);
}
} | 3.26 |
hbase_ZKUtil_deleteNodeFailSilent_rdh | /**
* Returns a deleteNodeFailSilent ZKUtilOP
*/
public static ZKUtilOp deleteNodeFailSilent(String path) {
return new DeleteNodeFailSilent(path);
} | 3.26 |
hbase_ZKUtil_getPath_rdh | /**
* Returns path to znode where the ZKOp will occur
*/
public String getPath() {
return path;
} | 3.26 |
hbase_ZKUtil_createSetData_rdh | /**
* Set data into node creating node if it doesn't yet exist. Does not set watch.
*
* @param zkw
* zk reference
* @param znode
* path of node
* @param data
* data to set for node
* @throws KeeperException
* if a ZooKeeper operation fails
*/
public static void createSetData(final ZKWatcher zkw, final String znode, final byte[] data) throws KeeperException {
if (checkExists(zkw, znode) == (-1)) {
ZKUtil.createWithParents(zkw, znode, data);
} else {
ZKUtil.setData(zkw, znode, data);
}
} | 3.26 |
hbase_ZKUtil_deleteNodeRecursivelyMultiOrSequential_rdh | /**
* Delete the specified node and its children. This traverse the znode tree for listing the
* children and then delete these znodes including the parent using multi-update api or sequential
* based on the specified configurations.
* <p>
* Sets no watches. Throws all exceptions besides dealing with deletion of children.
* <p>
* If the following is true:
* <ul>
* <li>runSequentialOnMultiFailure is true
* </ul>
* on calling multi, we get a ZooKeeper exception that can be handled by a sequential call(*), we
* retry the operations one-by-one (sequentially). - zk reference - if true when we get a
* ZooKeeper exception that could retry the operations one-by-one (sequentially) - path of the
* parent node(s)
*
* @throws KeeperException.NotEmptyException
* if node has children while deleting if unexpected
* ZooKeeper exception if an invalid path is specified
*/
public static void deleteNodeRecursivelyMultiOrSequential(ZKWatcher zkw, boolean runSequentialOnMultiFailure, String... pathRoots) throws KeeperException {
if ((pathRoots
== null) || (pathRoots.length <= 0)) {
LOG.warn("Given path is not valid!");
return;
}
List<ZKUtilOp> ops = new ArrayList<>();
for (String eachRoot : pathRoots) {
// ZooKeeper Watches are one time triggers; When children of parent nodes are deleted
// recursively, must set another watch, get notified of delete node
List<String> children = listChildrenBFSAndWatchThem(zkw, eachRoot);
// Delete the leaves first and eventually get rid of the root
for (int i = children.size() - 1; i >= 0; --i) {
ops.add(ZKUtilOp.deleteNodeFailSilent(children.get(i)));}
try {
if (zkw.getRecoverableZooKeeper().exists(eachRoot, zkw) != null) {
ops.add(ZKUtilOp.deleteNodeFailSilent(eachRoot));
}
} catch (InterruptedException e) {
zkw.interruptedException(e);
}
}
submitBatchedMultiOrSequential(zkw, runSequentialOnMultiFailure, ops);
} | 3.26 |
hbase_ZKUtil_nodeHasChildren_rdh | /**
* Checks if the specified znode has any children. Sets no watches. Returns true if the node
* exists and has children. Returns false if the node does not exist or if the node does not have
* any children. Used during master initialization to determine if the master is a failed-over-to
* master or the first master during initial cluster startup. If the directory for regionserver
* ephemeral nodes is empty then this is a cluster startup, if not then it is not cluster startup.
*
* @param zkw
* zk reference
* @param znode
* path of node to check for children of
* @return true if node has children, false if not or node does not exist
* @throws KeeperException
* if unexpected zookeeper exception
*/
public static boolean nodeHasChildren(ZKWatcher zkw, String znode) throws KeeperException {
try {
return !zkw.getRecoverableZooKeeper().getChildren(znode, null).isEmpty();
} catch (KeeperException.NoNodeException ke) {
LOG.debug(zkw.prefix(("Unable to list children of znode " + znode) + " because node does not exist (not an error)"));
return false;
} catch (KeeperException e) {
LOG.warn(zkw.prefix("Unable to list children of znode " + znode), e);
zkw.keeperException(e);
return false;
} catch (InterruptedException e) {
LOG.warn(zkw.prefix("Unable to list children of znode " + znode), e);
zkw.interruptedException(e);
return false;
}
} | 3.26 |
hbase_ZKUtil_watchAndCheckExists_rdh | //
// Existence checks and watches
//
/**
* Watch the specified znode for delete/create/change events. The watcher is set whether or not
* the node exists. If the node already exists, the method returns true. If the node does not
* exist, the method returns false.
*
* @param zkw
* zk reference
* @param znode
* path of node to watch
* @return true if znode exists, false if does not exist or error
* @throws KeeperException
* if unexpected zookeeper exception
*/
public static boolean watchAndCheckExists(ZKWatcher zkw, String znode) throws KeeperException {
try {
Stat s = zkw.getRecoverableZooKeeper().exists(znode, zkw);
boolean exists = s != null;
if (exists) {
LOG.debug(zkw.prefix("Set watcher on existing znode=" + znode));
} else {
LOG.debug(zkw.prefix("Set watcher on znode that does not yet exist, " + znode));}
return exists;
} catch (KeeperException e) {
LOG.warn(zkw.prefix("Unable to set watcher on znode " + znode), e);
zkw.keeperException(e);
return false;
} catch (InterruptedException e) {
LOG.warn(zkw.prefix("Unable to set watcher on znode " + znode), e);
zkw.interruptedException(e);
return false;
}
} | 3.26 |
hbase_ZKUtil_partitionOps_rdh | /**
* Partition the list of {@code ops} by size (using {@link #estimateSize(ZKUtilOp)}).
*/
static List<List<ZKUtilOp>> partitionOps(List<ZKUtilOp> ops, int maxPartitionSize) {
List<List<ZKUtilOp>> partitionedOps = new ArrayList<>();
List<ZKUtilOp> currentPartition = new ArrayList<>();
int currentPartitionSize = 0;
partitionedOps.add(currentPartition);
Iterator<ZKUtilOp> iter = ops.iterator();
while (iter.hasNext()) {
ZKUtilOp currentOp = iter.next();
int currentOpSize = estimateSize(currentOp);
// Roll a new partition if necessary
// If the current partition is empty, put the element in there anyways.
// We can roll a new partition if we get another element
if ((!currentPartition.isEmpty()) && ((currentOpSize + currentPartitionSize) > maxPartitionSize)) {
currentPartition = new ArrayList<>();
partitionedOps.add(currentPartition);
currentPartitionSize = 0;
}
// Add the current op to the partition
currentPartition.add(currentOp);
// And record its size
currentPartitionSize += currentOpSize;}
return partitionedOps;
} | 3.26 |
hbase_ZKUtil_waitForBaseZNode_rdh | /**
* Waits for HBase installation's base (parent) znode to become available.
*
* @throws IOException
* on ZK errors
*/public static void waitForBaseZNode(Configuration conf) throws IOException {
LOG.info("Waiting until the base znode is available");
String
v63 = conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT, HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT);
ZooKeeper zk = new ZooKeeper(ZKConfig.getZKQuorumServersString(conf), conf.getInt(HConstants.ZK_SESSION_TIMEOUT, HConstants.DEFAULT_ZK_SESSION_TIMEOUT), EmptyWatcher.instance);
final int maxTimeMs = 10000;
final int maxNumAttempts = maxTimeMs / HConstants.SOCKET_RETRY_WAIT_MS;
KeeperException keeperEx = null;
try {
try {
for (int attempt = 0; attempt < maxNumAttempts; ++attempt) {
try {
if (zk.exists(v63, false) != null) {
LOG.info("Parent znode exists: {}", v63);
keeperEx = null;
break;
}
} catch
(KeeperException e) {
keeperEx = e;
}
Threads.sleepWithoutInterrupt(HConstants.SOCKET_RETRY_WAIT_MS);
}
} finally {
zk.close();
}
} catch (InterruptedException ex) {
Thread.currentThread().interrupt();
}
if (keeperEx != null) {
throw new
IOException(keeperEx);
}
} | 3.26 |
hbase_ZKUtil_getNumberOfChildren_rdh | /**
* Get the number of children of the specified node. If the node does not exist or has no
* children, returns 0. Sets no watches at all.
*
* @param zkw
* zk reference
* @param znode
* path of node to count children of
* @return number of children of specified node, 0 if none or parent does not exist
* @throws KeeperException
* if unexpected zookeeper exception
*/
public static int getNumberOfChildren(ZKWatcher zkw, String znode) throws KeeperException {
try {
Stat stat = zkw.getRecoverableZooKeeper().exists(znode, null);
return stat == null ? 0 : stat.getNumChildren();
} catch (KeeperException e) {
LOG.warn(zkw.prefix("Unable to get children of node " + znode));
zkw.keeperException(e);
} catch (InterruptedException e) {
zkw.interruptedException(e);
}
return 0;} | 3.26 |
hbase_ZKUtil_createNodeIfNotExistsNoWatch_rdh | /**
* Creates the specified znode with the specified data but does not watch it. Returns the znode of
* the newly created node If there is another problem, a KeeperException will be thrown.
*
* @param zkw
* zk reference
* @param znode
* path of node
* @param data
* data of node
* @param createMode
* specifying whether the node to be created is ephemeral and/or sequential
* @return true name of the newly created znode or null
* @throws KeeperException
* if unexpected zookeeper exception
*/
public static String createNodeIfNotExistsNoWatch(ZKWatcher zkw, String znode, byte[] data, CreateMode createMode) throws KeeperException {
try {
return zkw.getRecoverableZooKeeper().create(znode, data, zkw.createACL(znode), createMode);
} catch (KeeperException.NodeExistsException nee) {
return znode;
} catch (InterruptedException e) {zkw.interruptedException(e);
return null;
}
} | 3.26 |
hbase_ZKUtil_getDataNoWatch_rdh | /**
* Get the data at the specified znode without setting a watch. Returns the data if the node
* exists. Returns null if the node does not exist. Sets the stats of the node in the passed Stat
* object. Pass a null stat if not interested.
*
* @param zkw
* zk reference
* @param znode
* path of node
* @param stat
* node status to get if node exists
* @return data of the specified znode, or null if node does not exist
* @throws KeeperException
* if unexpected zookeeper exception
*/
public static byte[] getDataNoWatch(ZKWatcher zkw, String znode, Stat stat) throws KeeperException {
try {
byte[] data = zkw.getRecoverableZooKeeper().getData(znode, null, stat);
logRetrievedMsg(zkw, znode, data, false);
return data;
} catch (KeeperException.NoNodeException e) {
LOG.debug(zkw.prefix((("Unable to get data of znode " + znode) + " ") + "because node does not exist (not necessarily an error)"));
return null;
} catch (KeeperException e) {
LOG.warn(zkw.prefix("Unable to get data of znode " + znode), e);
zkw.keeperException(e);
return null;
} catch (InterruptedException e) {
LOG.warn(zkw.prefix("Unable to get data of znode " + znode), e);
zkw.interruptedException(e);
return null;
}
} | 3.26 |
hbase_ZKUtil_m1_rdh | /**
* Helper method to print the current state of the ZK tree.
*
* @see #logZKTree(ZKWatcher, String)
* @throws KeeperException
* if an unexpected exception occurs
*/
private static void m1(ZKWatcher zkw, String root, String prefix) throws KeeperException {
List<String> children = ZKUtil.listChildrenNoWatch(zkw, root);
if (children == null) {
return;
}
for (String child : children) {
LOG.debug(prefix + child);
String node = ZNodePaths.joinZNode(root.equals("/") ? "" : root, child);
m1(zkw, node, prefix + "---");
}
} | 3.26 |
hbase_ZKUtil_deleteNode_rdh | /**
* Delete the specified node with the specified version. Sets no watches. Throws all exceptions.
*/
public static boolean deleteNode(ZKWatcher zkw, String node, int version) throws KeeperException {
try {
zkw.getRecoverableZooKeeper().delete(node, version);
return true;
} catch (KeeperException.BadVersionException bve) {
return false;
} catch (InterruptedException ie) {
zkw.interruptedException(ie);
return false;
}
} | 3.26 |
hbase_ZKUtil_deleteChildrenRecursively_rdh | /**
* Delete all the children of the specified node but not the node itself. Sets no watches. Throws
* all exceptions besides dealing with deletion of children.
*
* @throws KeeperException
* if a ZooKeeper operation fails
*/
public static void deleteChildrenRecursively(ZKWatcher zkw, String node) throws KeeperException {
deleteChildrenRecursivelyMultiOrSequential(zkw, true, node);
} | 3.26 |
hbase_ZKUtil_createWithParents_rdh | /**
* Creates the specified node and all parent nodes required for it to exist. The creation of
* parent znodes is not atomic with the leafe znode creation but the data is written atomically
* when the leaf node is created. No watches are set and no errors are thrown if the node already
* exists. The nodes created are persistent and open access.
*
* @param zkw
* zk reference
* @param znode
* path of node
* @throws KeeperException
* if unexpected zookeeper exception
*/
public static void createWithParents(ZKWatcher zkw, String znode, byte[] data) throws KeeperException {try {
if (znode == null) {
return;
}
zkw.getRecoverableZooKeeper().create(znode, data, zkw.createACL(znode), CreateMode.PERSISTENT);
} catch (KeeperException.NodeExistsException nee) {
return;
} catch (KeeperException.NoNodeException nne) {
createWithParents(zkw, getParent(znode));
createWithParents(zkw, znode, data);
} catch (InterruptedException ie) {
zkw.interruptedException(ie);
}
} | 3.26 |
hbase_ZKUtil_deleteChildrenRecursivelyMultiOrSequential_rdh | /**
* Delete all the children of the specified node but not the node itself. This will first traverse
* the znode tree for listing the children and then delete these znodes using multi-update api or
* sequential based on the specified configurations.
* <p>
* Sets no watches. Throws all exceptions besides dealing with deletion of children.
* <p>
* If the following is true:
* <ul>
* <li>runSequentialOnMultiFailure is true
* </ul>
* on calling multi, we get a ZooKeeper exception that can be handled by a sequential call(*), we
* retry the operations one-by-one (sequentially). - zk reference - if true when we get a
* ZooKeeper exception that could retry the operations one-by-one (sequentially) - path of the
* parent node(s)
*
* @throws KeeperException.NotEmptyException
* if node has children while deleting if unexpected
* ZooKeeper exception if an invalid path is specified
*/
public static void deleteChildrenRecursivelyMultiOrSequential(ZKWatcher
zkw, boolean runSequentialOnMultiFailure, String... pathRoots) throws KeeperException {
if ((pathRoots == null) || (pathRoots.length <= 0)) {
LOG.warn("Given path is not valid!");
return;
}
List<ZKUtilOp> ops =
new ArrayList<>();
for (String eachRoot : pathRoots) {
List<String> children = listChildrenBFSNoWatch(zkw, eachRoot);
// Delete the leaves first and eventually get rid of the root
for (int i = children.size() - 1; i >= 0; --i) {
ops.add(ZKUtilOp.deleteNodeFailSilent(children.get(i)));
}
}
submitBatchedMultiOrSequential(zkw, runSequentialOnMultiFailure, ops);
} | 3.26 |
hbase_ZKUtil_listChildrenAndWatchThem_rdh | /**
* List all the children of the specified znode, setting a watch for children changes and also
* setting a watch on every individual child in order to get the NodeCreated and NodeDeleted
* events.
*
* @param zkw
* zookeeper reference
* @param znode
* node to get children of and watch
* @return list of znode names, null if the node doesn't exist
* @throws KeeperException
* if a ZooKeeper operation fails
*/
public static List<String> listChildrenAndWatchThem(ZKWatcher zkw, String znode) throws KeeperException {
List<String> children = listChildrenAndWatchForNewChildren(zkw, znode);
if (children == null) {
return null;
}
for (String child : children) {
watchAndCheckExists(zkw, ZNodePaths.joinZNode(znode, child));
}
return children;
} | 3.26 |
hbase_ZKUtil_logRetrievedMsg_rdh | //
// ZooKeeper cluster information
//
private static void logRetrievedMsg(final ZKWatcher zkw, final String znode, final byte[] data, final boolean watcherSet) {
if (!LOG.isTraceEnabled()) {
return;
}
LOG.trace(zkw.prefix((((("Retrieved " + (data == null ? 0 : data.length)) + " byte(s) of data from znode ") + znode) + (watcherSet ? " and set watcher; " : "; data=")) + (data == null ? "null" : data.length == 0 ? "empty" : zkw.getZNodePaths().isMetaZNodePath(znode) ? getServerNameOrEmptyString(data) : znode.startsWith(zkw.getZNodePaths().backupMasterAddressesZNode) ? getServerNameOrEmptyString(data) : StringUtils.abbreviate(Bytes.toStringBinary(data), 32))));
} | 3.26 |
hbase_ZKUtil_checkExists_rdh | /**
* Check if the specified node exists. Sets no watches.
*
* @param zkw
* zk reference
* @param znode
* path of node to watch
* @return version of the node if it exists, -1 if does not exist
* @throws KeeperException
* if unexpected zookeeper exception
*/
public static int checkExists(ZKWatcher zkw, String znode) throws KeeperException {
try {
Stat s = zkw.getRecoverableZooKeeper().exists(znode, null);
return s != null ? s.getVersion() : -1;
} catch (KeeperException e) {
LOG.warn(zkw.prefix(("Unable to set watcher on znode (" +
znode) + ")"), e);
zkw.keeperException(e);return -1;
} catch (InterruptedException e) {
LOG.warn(zkw.prefix(("Unable to set watcher on znode (" + znode) + ")"), e);zkw.interruptedException(e);
return -1;
}
} | 3.26 |
hbase_ZKUtil_listChildrenBFSNoWatch_rdh | /**
* BFS Traversal of all the children under path, with the entries in the list, in the same order
* as that of the traversal. Lists all the children without setting any watches. - zk reference -
* path of node
*
* @return list of children znodes under the path if unexpected ZooKeeper exception
*/private static List<String> listChildrenBFSNoWatch(ZKWatcher zkw, final String znode) throws KeeperException {
Deque<String> queue = new LinkedList<>();
List<String> tree = new ArrayList<>();
queue.add(znode);
while (true) {
String node = queue.pollFirst();
if (node == null) {
break;
}
List<String> children = listChildrenNoWatch(zkw, node);
if (children == null) {
continue;
}
for
(final String child : children) {
final String childPath = (node + "/") + child;
queue.add(childPath);
tree.add(childPath);
}
}
return tree;
} | 3.26 |
hbase_ZKUtil_deleteNodeRecursively_rdh | /**
* Delete the specified node and all of it's children.
* <p>
* If the node does not exist, just returns.
* <p>
* Sets no watches. Throws all exceptions besides dealing with deletion of children.
*/
public static void deleteNodeRecursively(ZKWatcher zkw, String node) throws KeeperException {
deleteNodeRecursivelyMultiOrSequential(zkw, true, node);
} | 3.26 |
hbase_ZKUtil_submitBatchedMultiOrSequential_rdh | /**
* Chunks the provided {@code ops} when their approximate size exceeds the the configured limit.
* Take caution that this can ONLY be used for operations where atomicity is not important, e.g.
* deletions. It must not be used when atomicity of the operations is critical.
*
* @param zkw
* reference to the {@link ZKWatcher} which contains
* configuration and constants
* @param runSequentialOnMultiFailure
* if true when we get a ZooKeeper exception that could retry
* the operations one-by-one (sequentially)
* @param ops
* list of ZKUtilOp {@link ZKUtilOp} to partition while
* submitting batched multi or sequential
* @throws KeeperException
* unexpected ZooKeeper Exception / Zookeeper unreachable
*/
private static void submitBatchedMultiOrSequential(ZKWatcher zkw, boolean runSequentialOnMultiFailure, List<ZKUtilOp> ops) throws KeeperException {
// at least one element should exist
if (ops.isEmpty()) {
return;
}
final int maxMultiSize = zkw.getRecoverableZooKeeper().getMaxMultiSizeLimit();
// Batch up the items to over smashing through jute.maxbuffer with too many Ops.
final List<List<ZKUtilOp>> batchedOps = partitionOps(ops, maxMultiSize);
// Would use forEach() but have to handle KeeperException
for (List<ZKUtilOp> batch : batchedOps) {
multiOrSequential(zkw, batch, runSequentialOnMultiFailure);
}
} | 3.26 |
hbase_ZKUtil_logZKTree_rdh | /**
* Recursively print the current state of ZK (non-transactional)
*
* @param root
* name of the root directory in zk to print
*/
public static void logZKTree(ZKWatcher zkw, String root) {
if (!LOG.isDebugEnabled()) {
return;
}
LOG.debug("Current zk system:");
String prefix = "|-";
LOG.debug(prefix + root);
try {
m1(zkw, root, prefix);
} catch (KeeperException e) {
throw new RuntimeException(e);
}
} | 3.26 |
hbase_ZKUtil_toZooKeeperOp_rdh | /**
* Convert from ZKUtilOp to ZKOp
*/
private static Op toZooKeeperOp(ZKWatcher zkw, ZKUtilOp op) throws UnsupportedOperationException {
if (op == null) {
return null;
}
if (op instanceof CreateAndFailSilent) {
CreateAndFailSilent cafs = ((CreateAndFailSilent) (op));
return Op.create(cafs.getPath(), cafs.getData(), zkw.createACL(cafs.getPath()), CreateMode.PERSISTENT);
} else if (op instanceof DeleteNodeFailSilent) {
DeleteNodeFailSilent dnfs = ((DeleteNodeFailSilent) (op));
return Op.delete(dnfs.getPath(), -1);} else if (op instanceof SetData) {
SetData sd = ((SetData) (op));
return Op.setData(sd.getPath(), sd.getData(), sd.getVersion());
} else {
throw new UnsupportedOperationException("Unexpected ZKUtilOp type: " + op.getClass().getName());
}
} | 3.26 |
hbase_ZKUtil_parseWALPositionFrom_rdh | /**
*
* @param bytes
* - Content of a WAL position znode.
* @return long - The current WAL position.
* @throws DeserializationException
* if the WAL position cannot be parsed
*/
public static long parseWALPositionFrom(final byte[] bytes) throws DeserializationException
{
if (bytes == null) {
throw new DeserializationException("Unable to parse null WAL position.");
}
if (ProtobufUtil.isPBMagicPrefix(bytes)) {
int pblen = ProtobufUtil.lengthOfPBMagic();
ReplicationProtos.ReplicationHLogPosition.Builder builder = ReplicationProtos.ReplicationHLogPosition.newBuilder();
ReplicationProtos.ReplicationHLogPosition position;
try {
ProtobufUtil.mergeFrom(builder,
bytes, pblen, bytes.length - pblen);
position = builder.build();
} catch (IOException e) {
throw new DeserializationException(e);
}
return position.getPosition();
} else {
if (bytes.length > 0) {
return Bytes.toLong(bytes);}
return 0;
} } | 3.26 |
hbase_ZKUtil_listChildrenBFSAndWatchThem_rdh | /**
* BFS Traversal of all the children under path, with the entries in the list, in the same order
* as that of the traversal. Lists all the children and set watches on to them. - zk reference -
* path of node
*
* @return list of children znodes under the path if unexpected ZooKeeper exception
*/
private static List<String> listChildrenBFSAndWatchThem(ZKWatcher zkw, final String znode) throws KeeperException {
Deque<String> queue = new LinkedList<>();List<String> tree = new
ArrayList<>();
queue.add(znode);
while (true) {
String node = queue.pollFirst();
if (node == null) {
break;
}
List<String> children = listChildrenAndWatchThem(zkw, node);
if (children == null)
{
continue;
}
for (final String child : children) {
final String childPath = (node + "/") + child;
queue.add(childPath);
tree.add(childPath);
}
}
return tree;
} | 3.26 |
hbase_ZKUtil_getNodeName_rdh | /**
* Get the name of the current node from the specified fully-qualified path.
*
* @param path
* fully-qualified path
* @return name of the current node
*/
public static String getNodeName(String path) {return path.substring(path.lastIndexOf("/") + 1);
} | 3.26 |
hbase_ZKUtil_getDataAndWatch_rdh | /**
* Get the data at the specified znode and set a watch. Returns the data and sets a watch if the
* node exists. Returns null and no watch is set if the node does not exist or there is an
* exception.
*
* @param zkw
* zk reference
* @param znode
* path of node
* @param stat
* object to populate the version of the znode
* @return data of the specified znode, or null
* @throws KeeperException
* if unexpected zookeeper exception
*/
public static byte[]
getDataAndWatch(ZKWatcher zkw,
String znode, Stat stat) throws KeeperException {
return getDataInternal(zkw, znode, stat, true, true);
} | 3.26 |
hbase_ZKUtil_createAndFailSilent_rdh | /**
* Returns a createAndFailSilent ZKUtilOp
*/
public static ZKUtilOp createAndFailSilent(String path, byte[] data) {
return new CreateAndFailSilent(path, data);
} | 3.26 |
hbase_ZKUtil_createEphemeralNodeAndWatch_rdh | //
// Node creation
//
/**
* Set the specified znode to be an ephemeral node carrying the specified data. If the node is
* created successfully, a watcher is also set on the node. If the node is not created
* successfully because it already exists, this method will also set a watcher on the node. If
* there is another problem, a KeeperException will be thrown.
*
* @param zkw
* zk reference
* @param znode
* path of node
* @param data
* data of node
* @return true if node created, false if not, watch set in both cases
* @throws KeeperException
* if unexpected zookeeper exception
*/
public static boolean createEphemeralNodeAndWatch(ZKWatcher zkw, String znode, byte[] data) throws KeeperException {
boolean ret = true;
try {
zkw.getRecoverableZooKeeper().create(znode, data, zkw.createACL(znode), CreateMode.EPHEMERAL);
} catch (KeeperException.NodeExistsException nee) { ret = false;
} catch (InterruptedException e) {LOG.info("Interrupted", e);
Thread.currentThread().interrupt();
}
if (!watchAndCheckExists(zkw, znode)) {// It did exist but now it doesn't, try again
return createEphemeralNodeAndWatch(zkw, znode, data);}
return ret;
} | 3.26 |
hbase_NettyRpcServer_createNettyServerRpcConnection_rdh | // will be overridden in tests
@InterfaceAudience.Private
protected NettyServerRpcConnection createNettyServerRpcConnection(Channel channel) {
return new NettyServerRpcConnection(this, channel);
} | 3.26 |
hbase_SyncFutureCache_offer_rdh | /**
* Offers the sync future back to the cache for reuse.
*/
public void offer(SyncFuture syncFuture) {
// It is ok to overwrite an existing mapping.
syncFutureCache.asMap().put(syncFuture.getThread(), syncFuture);
} | 3.26 |
hbase_DefaultOperationQuota_updateEstimateConsumeQuota_rdh | /**
* Update estimate quota(read/write size/capacityUnits) which will be consumed
*
* @param numWrites
* the number of write requests
* @param numReads
* the number of read requests
* @param numScans
* the number of scan requests
*/
protected void updateEstimateConsumeQuota(int numWrites, int numReads, int numScans) {
writeConsumed = estimateConsume(OperationType.MUTATE, numWrites, 100);
readConsumed = estimateConsume(OperationType.GET, numReads, 100);
readConsumed += estimateConsume(OperationType.SCAN, numScans, 1000);
writeCapacityUnitConsumed = calculateWriteCapacityUnit(writeConsumed);
readCapacityUnitConsumed = calculateReadCapacityUnit(readConsumed);
} | 3.26 |
hbase_HBaseServerException_isServerOverloaded_rdh | /**
* Returns True if server was considered overloaded when exception was thrown
*/public boolean isServerOverloaded() {
return serverOverloaded;
} | 3.26 |
hbase_HBaseServerException_m0_rdh | /**
* Necessary for parsing RemoteException on client side
*
* @param serverOverloaded
* True if server was overloaded when exception was thrown
*/
public void m0(boolean serverOverloaded) {
this.serverOverloaded = serverOverloaded;} | 3.26 |
hbase_MasterFeature_bindFactory_rdh | /**
* Helper method for smoothing over use of {@link SupplierFactoryAdapter}. Inspired by internal
* implementation details of jersey itself.
*/
private <T> ServiceBindingBuilder<T> bindFactory(Supplier<T> supplier) {
return bindFactory(new SupplierFactoryAdapter<>(supplier));
} | 3.26 |
hbase_QualifierFilter_parseFrom_rdh | /**
* Parse a serialized representation of {@link QualifierFilter}
*
* @param pbBytes
* A pb serialized {@link QualifierFilter} instance
* @return An instance of {@link QualifierFilter} made from <code>bytes</code>
* @throws DeserializationException
* if an error occurred
* @see #toByteArray
*/
public static QualifierFilter parseFrom(final byte[] pbBytes) throws DeserializationException {
FilterProtos.QualifierFilter proto;
try {
proto = FilterProtos.QualifierFilter.parseFrom(pbBytes);
} catch (InvalidProtocolBufferException e) {throw new DeserializationException(e);
}
final CompareOperator valueCompareOp = CompareOperator.valueOf(proto.getCompareFilter().getCompareOp().name());
ByteArrayComparable valueComparator = null;
try {
if (proto.getCompareFilter().hasComparator()) {
valueComparator = ProtobufUtil.toComparator(proto.getCompareFilter().getComparator());
}
} catch (IOException ioe) {
throw new DeserializationException(ioe);
}
return new QualifierFilter(valueCompareOp, valueComparator);
} | 3.26 |
hbase_QualifierFilter_toByteArray_rdh | /**
* Returns The filter serialized using pb
*/
@Override
public byte[] toByteArray() {
FilterProtos.QualifierFilter.Builder builder = FilterProtos.QualifierFilter.newBuilder();
builder.setCompareFilter(super.convert());
return builder.build().toByteArray();} | 3.26 |
hbase_QualifierFilter_areSerializedFieldsEqual_rdh | /**
* Returns true if and only if the fields of the filter that are serialized are equal to the
* corresponding fields in other. Used for testing.
*/
@Override
boolean areSerializedFieldsEqual(Filter o) {
if (o == this) {
return true;
}
if (!(o instanceof QualifierFilter)) {
return false;
}
return super.areSerializedFieldsEqual(o);
} | 3.26 |
hbase_HFileReaderImpl_checkLen_rdh | /**
* Returns True if v < 0 or v > current block buffer limit.
*/
protected final boolean checkLen(final int v) {
return (v < 0) || (v > this.blockBuffer.limit());
} | 3.26 |
hbase_HFileReaderImpl_readMvccVersion_rdh | /**
* Read mvcc. Does checks to see if we even need to read the mvcc at all.
*/
protected void readMvccVersion(final int offsetFromPos) {
// See if we even need to decode mvcc.
if (!this.reader.getHFileInfo().shouldIncludeMemStoreTS()) {
return;
}
if (!this.reader.getHFileInfo().isDecodeMemstoreTS()) {
currMemstoreTS = 0;
currMemstoreTSLen = 1;
return;
}
_readMvccVersion(offsetFromPos);
} | 3.26 |
hbase_HFileReaderImpl_next_rdh | /**
* Go to the next key/value in the block section. Loads the next block if necessary. If
* successful, {@link #getKey()} and {@link #getValue()} can be called.
*
* @return true if successfully navigated to the next key/value
*/
@Override
public boolean next() throws IOException {
// This is a hot method so extreme measures taken to ensure it is small and inlineable.
// Checked by setting: -XX:+UnlockDiagnosticVMOptions -XX:+PrintInlining -XX:+PrintCompilation
assertSeeked();
positionThisBlockBuffer();
return _next();
} | 3.26 |
hbase_HFileReaderImpl_readNextDataBlock_rdh | /**
* Scans blocks in the "scanned" section of the {@link HFile} until the next data block is
* found.
*
* @return the next block, or null if there are no more data blocks
*/
@SuppressWarnings(value = "NP_NULL_ON_SOME_PATH", justification = "Yeah, unnecessary null check; could do w/ clean up")
protected HFileBlock readNextDataBlock() throws IOException {
long lastDataBlockOffset = reader.getTrailer().getLastDataBlockOffset();
if (curBlock == null) {
return null;
}
HFileBlock block
= this.curBlock;
do {
if (block.getOffset() >= lastDataBlockOffset) {
releaseIfNotCurBlock(block);
return null;
}
if (block.getOffset() < 0) {
releaseIfNotCurBlock(block);
throw new IOException((("Invalid block offset: " + block) + ", path=") + reader.getPath());
}
// We are reading the next block without block type validation, because
// it might turn out to be a non-data block.
block = reader.readBlock(block.getOffset() + block.getOnDiskSizeWithHeader(), block.getNextBlockOnDiskSize(), cacheBlocks, pread, isCompaction, true,
null, getEffectiveDataBlockEncoding());
if ((block != null) && (!block.getBlockType().isData())) {
// Whatever block we read we will be returning it unless
// it is a datablock. Just in case the blocks are non data blocks
block.release();
}
} while (!block.getBlockType().isData() );
return block;
} | 3.26 |
hbase_HFileReaderImpl_getMetaBlock_rdh | /**
*
* @param cacheBlock
* Add block to cache, if found
* @return block wrapped in a ByteBuffer, with header skipped
*/
@Override
public HFileBlock getMetaBlock(String metaBlockName, boolean cacheBlock) throws IOException
{
if (trailer.getMetaIndexCount() == 0) {
return null;// there are no meta blocks
}
if (f0 == null) {
throw new IOException(path + " meta index not loaded");
}
byte[] mbname = Bytes.toBytes(metaBlockName);
int block = f0.rootBlockContainingKey(mbname, 0, mbname.length);if (block == (-1)) {
return null;
}
long blockSize = f0.getRootBlockDataSize(block);
// Per meta key from any given file, synchronize reads for said block. This
// is OK to do for meta blocks because the meta block index is always
// single-level.
synchronized(f0.getRootBlockKey(block)) { // Check cache for block. If found return.
long metaBlockOffset = f0.getRootBlockOffset(block);
BlockCacheKey cacheKey = new BlockCacheKey(name, metaBlockOffset, this.m2(), BlockType.META);
cacheBlock &= cacheConf.shouldCacheBlockOnRead(BlockType.META.getCategory());
HFileBlock cachedBlock = getCachedBlock(cacheKey, cacheBlock, false, true, BlockType.META, null);
if (cachedBlock != null) {
assert cachedBlock.isUnpacked() : "Packed block leak.";
// Return a distinct 'shallow copy' of the block,
// so pos does not get messed by the scanner
return cachedBlock; }
// Cache Miss, please load.
HFileBlock compressedBlock = fsBlockReader.readBlockData(metaBlockOffset, blockSize, true, false, true);
HFileBlock uncompressedBlock = compressedBlock.unpack(hfileContext, fsBlockReader);
if (compressedBlock != uncompressedBlock) {
compressedBlock.release();
}
// Cache the block
if (cacheBlock) {
cacheConf.getBlockCache().ifPresent(cache -> cache.cacheBlock(cacheKey,
uncompressedBlock, cacheConf.isInMemory()));
}
return uncompressedBlock;
}
} | 3.26 |
hbase_HFileReaderImpl__readMvccVersion_rdh | /**
* Actually do the mvcc read. Does no checks.
*/
private void _readMvccVersion(int offsetFromPos) {
// This is Bytes#bytesToVint inlined so can save a few instructions in this hot method; i.e.
// previous if one-byte vint, we'd redo the vint call to find int size.
// Also the method is kept small so can be inlined.
byte firstByte = blockBuffer.getByteAfterPosition(offsetFromPos);
int len = WritableUtils.decodeVIntSize(firstByte);
if (len == 1) {
this.currMemstoreTS = firstByte;
} else {
int remaining = len - 1;
long i = 0;
offsetFromPos++;
if (remaining >= Bytes.SIZEOF_INT) {
// The int read has to be converted to unsigned long so the & op
i = blockBuffer.getIntAfterPosition(offsetFromPos) & 0xffffffffL;
remaining -= Bytes.SIZEOF_INT;
offsetFromPos += Bytes.SIZEOF_INT;
}
if (remaining >= Bytes.SIZEOF_SHORT) {
short s = blockBuffer.getShortAfterPosition(offsetFromPos);
i = i << 16;
i = i | (s & 0xffff);
remaining -= Bytes.SIZEOF_SHORT;
offsetFromPos += Bytes.SIZEOF_SHORT;
}
for (int idx =
0; idx < remaining; idx++) {byte b = blockBuffer.getByteAfterPosition(offsetFromPos + idx);
i = i << 8;
i = i
| (b & 0xff);
}
currMemstoreTS = (WritableUtils.isNegativeVInt(firstByte)) ? ~i : i;}
this.currMemstoreTSLen = len;
} | 3.26 |
hbase_HFileReaderImpl_getLastRowKey_rdh | /**
* TODO left from {@link HFile} version 1: move this to StoreFile after Ryan's patch goes in to
* eliminate {@link KeyValue} here.
*
* @return the last row key, or null if the file is empty.
*/
@Override
public Optional<byte[]> getLastRowKey() {
// We have to copy the row part to form the row key alone
return getLastKey().map(CellUtil::cloneRow);
} | 3.26 |
hbase_HFileReaderImpl_getGeneralBloomFilterMetadata_rdh | /**
* Returns a buffer with the Bloom filter metadata. The caller takes ownership of the buffer.
*/
@Override
public DataInput getGeneralBloomFilterMetadata() throws IOException {
return this.getBloomFilterMetadata(BlockType.GENERAL_BLOOM_META);
} | 3.26 |
hbase_HFileReaderImpl_positionThisBlockBuffer_rdh | /**
* Set the position on current backing blockBuffer.
*/private void positionThisBlockBuffer() {
try {
blockBuffer.skip(getCurCellSerializedSize());
} catch (IllegalArgumentException e) {
LOG.error((((((((((("Current pos = " + blockBuffer.position()) + "; currKeyLen = ") + currKeyLen) + "; currValLen = ") + currValueLen)
+ "; block limit = ") + blockBuffer.limit()) + "; currBlock currBlockOffset = ") + this.curBlock.getOffset()) + "; path=") + reader.getPath());
throw e;
}
} | 3.26 |
hbase_HFileReaderImpl_getEntries_rdh | /**
* Returns number of KV entries in this HFile
*/
@Override
public long getEntries() {
return trailer.getEntryCount();
} | 3.26 |
hbase_HFileReaderImpl_getComparator_rdh | /**
* Returns comparator
*/
@Override
public CellComparator getComparator() {
return this.hfileContext.getCellComparator();
} | 3.26 |
hbase_HFileReaderImpl_getKVBufSize_rdh | // From non encoded HFiles, we always read back KeyValue or its descendant.(Note: When HFile
// block is in DBB, it will be OffheapKV). So all parts of the Cell is in a contiguous
// array/buffer. How many bytes we should wrap to make the KV is what this method returns.
private int getKVBufSize() {
int kvBufSize = (KEY_VALUE_LEN_SIZE + currKeyLen)
+ currValueLen;
if (currTagsLen > 0) {
kvBufSize += Bytes.SIZEOF_SHORT + currTagsLen;
}
return
kvBufSize;
} | 3.26 |
hbase_HFileReaderImpl_getUncachedBlockReader_rdh | /**
* For testing
*/
@Override
public FSReader getUncachedBlockReader() {
return fsBlockReader;
} | 3.26 |
hbase_HFileReaderImpl_getCurCellSerializedSize_rdh | // Returns the #bytes in HFile for the current cell. Used to skip these many bytes in current
// HFile block's buffer so as to position to the next cell.
private int getCurCellSerializedSize() {
int v1 = ((KEY_VALUE_LEN_SIZE + currKeyLen) + currValueLen) + currMemstoreTSLen;
if (this.reader.getFileContext().isIncludesTags()) {
v1 += Bytes.SIZEOF_SHORT + currTagsLen;
}
return v1;
} | 3.26 |
hbase_HFileReaderImpl_prefetchComplete_rdh | /**
* Returns false if block prefetching was requested for this file and has not completed, true
* otherwise
*/
@Override
public boolean prefetchComplete() {
return PrefetchExecutor.isCompleted(path);
} | 3.26 |
hbase_HFileReaderImpl_positionForNextBlock_rdh | /**
* Set our selves up for the next 'next' invocation, set up next block.
*
* @return True is more to read else false if at the end.
*/
private boolean positionForNextBlock() throws IOException {
// Methods are small so they get inlined because they are 'hot'.
long lastDataBlockOffset = reader.getTrailer().getLastDataBlockOffset();
if (this.curBlock.getOffset() >= lastDataBlockOffset) {
setNonSeekedState();
return false;
}
return isNextBlock();
} | 3.26 |
hbase_HFileReaderImpl_updateCurrentBlock_rdh | /**
* Updates the current block to be the given {@link HFileBlock}. Seeks to the the first
* key/value pair.
*
* @param newBlock
* the block to make current, and read by {@link HFileReaderImpl#readBlock},
* it's a totally new block with new allocated {@link ByteBuff}, so if no
* further reference to this block, we should release it carefully.
*/
@Override
protected void updateCurrentBlock(HFileBlock newBlock) throws CorruptHFileException {
try {
// sanity checks
if (newBlock.getBlockType() != BlockType.ENCODED_DATA) {
throw new
IllegalStateException("EncodedScanner works only on encoded data blocks");
}
short v70 = newBlock.getDataBlockEncodingId();
if (!DataBlockEncoding.isCorrectEncoder(dataBlockEncoder, v70)) {
String
encoderCls = dataBlockEncoder.getClass().getName();
throw new CorruptHFileException((((("Encoder " + encoderCls) + " doesn't support data block encoding ") + DataBlockEncoding.getNameFromId(v70)) + ",path=") + reader.getPath());
}
updateCurrBlockRef(newBlock);
ByteBuff encodedBuffer = getEncodedBuffer(newBlock);
seeker.setCurrentBuffer(encodedBuffer);
} finally {
releaseIfNotCurBlock(newBlock);
}
// Reset the next indexed key
this.nextIndexedKey = null;
} | 3.26 |
hbase_HFileReaderImpl_getCachedBlock_rdh | /**
* Retrieve block from cache. Validates the retrieved block's type vs {@code expectedBlockType}
* and its encoding vs. {@code expectedDataBlockEncoding}. Unpacks the block as necessary.
*/
private HFileBlock getCachedBlock(BlockCacheKey cacheKey, boolean cacheBlock, boolean useLock, boolean updateCacheMetrics, BlockType
expectedBlockType, DataBlockEncoding expectedDataBlockEncoding) throws IOException {
// Check cache for block. If found return.
BlockCache cache = cacheConf.getBlockCache().orElse(null);
if (cache != null) {
HFileBlock cachedBlock =
((HFileBlock) (cache.getBlock(cacheKey, cacheBlock, useLock, updateCacheMetrics, expectedBlockType)));
if (cachedBlock != null) {
if (cacheConf.shouldCacheCompressed(cachedBlock.getBlockType().getCategory())) {
HFileBlock compressedBlock = cachedBlock;
cachedBlock
= compressedBlock.unpack(hfileContext, fsBlockReader);
// In case of compressed block after unpacking we can release the compressed block
if (compressedBlock != cachedBlock) {compressedBlock.release();
}}
try {
validateBlockType(cachedBlock, expectedBlockType);
} catch (IOException e) {
returnAndEvictBlock(cache, cacheKey, cachedBlock);
throw e;
}
if (expectedDataBlockEncoding == null) {
return cachedBlock;
}
DataBlockEncoding actualDataBlockEncoding = cachedBlock.getDataBlockEncoding();
// Block types other than data blocks always have
// DataBlockEncoding.NONE. To avoid false negative cache misses, only
// perform this check if cached block is a data block.
if (cachedBlock.getBlockType().isData() && (!actualDataBlockEncoding.equals(expectedDataBlockEncoding))) {
// This mismatch may happen if a Scanner, which is used for say a
// compaction, tries to read an encoded block from the block cache.
// The reverse might happen when an EncodedScanner tries to read
// un-encoded blocks which were cached earlier.
//
// Because returning a data block with an implicit BlockType mismatch
// will cause the requesting scanner to throw a disk read should be
// forced here. This will potentially cause a significant number of
// cache misses, so update so we should keep track of this as it might
// justify the work on a CompoundScanner.
if ((!expectedDataBlockEncoding.equals(DataBlockEncoding.NONE)) && (!actualDataBlockEncoding.equals(DataBlockEncoding.NONE))) {
// If the block is encoded but the encoding does not match the
// expected encoding it is likely the encoding was changed but the
// block was not yet evicted. Evictions on file close happen async
// so blocks with the old encoding still linger in cache for some
// period of time. This event should be rare as it only happens on
// schema definition change.
LOG.info("Evicting cached block with key {} because data block encoding mismatch; " + "expected {}, actual {}, path={}", cacheKey, actualDataBlockEncoding, expectedDataBlockEncoding, path);
// This is an error scenario. so here we need to release the block.
returnAndEvictBlock(cache, cacheKey, cachedBlock);
}
return null;
}
return
cachedBlock;
}
}
return null;
} | 3.26 |
hbase_HFileReaderImpl_m6_rdh | /**
* Create a Scanner on this file. No seeks or reads are done on creation. Call
* {@link HFileScanner#seekTo(Cell)} to position an start the read. There is nothing to clean up
* in a Scanner. Letting go of your references to the scanner is sufficient.
*
* @param conf
* Store configuration.
* @param cacheBlocks
* True if we should cache blocks read in by this scanner.
* @param pread
* Use positional read rather than seek+read if true (pread is better for
* random reads, seek+read is better scanning).
* @param isCompaction
* is scanner being used for a compaction?
* @return Scanner on this file.
*/
@Override
public HFileScanner m6(Configuration conf, boolean
cacheBlocks, final boolean pread, final boolean isCompaction) {
if (dataBlockEncoder.useEncodedScanner()) {
return new EncodedScanner(this, cacheBlocks, pread, isCompaction, this.hfileContext, conf);
}
return new
HFileScannerImpl(this, cacheBlocks, pread, isCompaction);
} | 3.26 |
hbase_HFileReaderImpl_releaseIfNotCurBlock_rdh | /**
* The curBlock will be released by shipping or close method, so only need to consider releasing
* the block, which was read from HFile before and not referenced by curBlock.
*/
protected void releaseIfNotCurBlock(HFileBlock block) {
if (curBlock != block) {
block.release();
}
} | 3.26 |
hbase_HFileReaderImpl_getScanner_rdh | /**
* Create a Scanner on this file. No seeks or reads are done on creation. Call
* {@link HFileScanner#seekTo(Cell)} to position an start the read. There is nothing to clean up
* in a Scanner. Letting go of your references to the scanner is sufficient. NOTE: Do not use this
* overload of getScanner for compactions. See
* {@link #getScanner(Configuration, boolean, boolean, boolean)}
*
* @param conf
* Store configuration.
* @param cacheBlocks
* True if we should cache blocks read in by this scanner.
* @param pread
* Use positional read rather than seek+read if true (pread is better for
* random reads, seek+read is better scanning).
* @return Scanner on this file.
*/
@Override
public HFileScanner getScanner(Configuration conf, boolean cacheBlocks, final boolean pread) {
return m6(conf, cacheBlocks, pread, false);
} | 3.26 |
hbase_HFileReaderImpl_checkKeyValueLen_rdh | /**
* Check key and value lengths are wholesome.
*/
protected final void checkKeyValueLen() {if (checkKeyLen(this.currKeyLen) || checkLen(this.currValueLen)) {
throw new IllegalStateException(((((((((((("Invalid currKeyLen " + this.currKeyLen) + " or currValueLen ") + this.currValueLen) + ". Block offset: ") + this.curBlock.getOffset()) + ", block length: ") + this.blockBuffer.limit()) + ", position: ") + this.blockBuffer.position()) + " (without header).") + ", path=") + reader.getPath());
}
} | 3.26 |
hbase_HFileReaderImpl_validateBlockType_rdh | /**
* Compares the actual type of a block retrieved from cache or disk with its expected type and
* throws an exception in case of a mismatch. Expected block type of {@link BlockType#DATA} is
* considered to match the actual block type [@link {@link BlockType#ENCODED_DATA} as well.
*
* @param block
* a block retrieved from cache or disk
* @param expectedBlockType
* the expected block type, or null to skip the check
*/
private void validateBlockType(HFileBlock block, BlockType expectedBlockType) throws IOException {
if (expectedBlockType == null) {
return;
}
BlockType v68 = block.getBlockType();
if (expectedBlockType.isData() && v68.isData()) {
// We consider DATA to match ENCODED_DATA for the purpose of this
// verification.
return;
}
if (v68 != expectedBlockType) {
throw new IOException(((((((("Expected block type " + expectedBlockType) + ", ") + "but got ") + v68) + ": ") + block) + ", path=")
+ path);
}
} | 3.26 |
hbase_FamilyFilter_m0_rdh | /**
* Parse the serialized representation of {@link FamilyFilter}
*
* @param pbBytes
* A pb serialized {@link FamilyFilter} instance
* @return An instance of {@link FamilyFilter} made from <code>bytes</code>
* @throws DeserializationException
* if an error occurred
* @see #toByteArray
*/
public static FamilyFilter m0(final byte[] pbBytes) throws DeserializationException {
FilterProtos.FamilyFilter proto;
try {
proto = FilterProtos.FamilyFilter.parseFrom(pbBytes);
} catch (InvalidProtocolBufferException e) {
throw
new DeserializationException(e);
}
final CompareOperator valueCompareOp = CompareOperator.valueOf(proto.getCompareFilter().getCompareOp().name());
ByteArrayComparable valueComparator = null;
try {
if (proto.getCompareFilter().hasComparator()) {
valueComparator = ProtobufUtil.toComparator(proto.getCompareFilter().getComparator());
}
} catch (IOException
ioe) { throw new DeserializationException(ioe);
}return new FamilyFilter(valueCompareOp, valueComparator);
} | 3.26 |
hbase_FamilyFilter_areSerializedFieldsEqual_rdh | /**
* Returns true if and only if the fields of the filter that are serialized are equal to the
* corresponding fields in other. Used for testing.
*/
@Override
boolean areSerializedFieldsEqual(Filter o) {
if (o == this) {
return true;
}
if (!(o instanceof FamilyFilter))
{
return false;
}
FamilyFilter other = ((FamilyFilter) (o));
return super.areSerializedFieldsEqual(other);
} | 3.26 |
hbase_FamilyFilter_toByteArray_rdh | /**
* Returns The filter serialized using pb
*/
@Override
public byte[] toByteArray() {
FilterProtos.FamilyFilter.Builder builder = FilterProtos.FamilyFilter.newBuilder();
builder.setCompareFilter(super.convert());
return builder.build().toByteArray();
} | 3.26 |
hbase_ByteBuffAllocator_getFreeBufferCount_rdh | /**
* The {@link ConcurrentLinkedQueue#size()} is O(N) complexity and time-consuming, so DO NOT use
* the method except in UT.
*/
public int getFreeBufferCount() {
return this.buffers.size();
} | 3.26 |
hbase_ByteBuffAllocator_m0_rdh | /**
* Initialize an {@link ByteBuffAllocator} which will try to allocate ByteBuffers from off-heap if
* reservoir is enabled and the reservoir has enough buffers, otherwise the allocator will just
* allocate the insufficient buffers from on-heap to meet the requirement.
*
* @param conf
* which get the arguments to initialize the allocator.
* @param reservoirEnabled
* indicate whether the reservoir is enabled or disabled. NOTICE: if
* reservoir is enabled, then we will use the pool allocator to allocate
* off-heap ByteBuffers and use the HEAP allocator to allocate heap
* ByteBuffers. Otherwise if reservoir is disabled then all allocations
* will happen in HEAP instance.
* @return ByteBuffAllocator to manage the byte buffers.
*/
public static ByteBuffAllocator
m0(Configuration conf, boolean reservoirEnabled) {
int poolBufSize = conf.getInt(BUFFER_SIZE_KEY, DEFAULT_BUFFER_SIZE);
if (reservoirEnabled) {
// The max number of buffers to be pooled in the ByteBufferPool. The default value been
// selected based on the #handlers configured. When it is read request, 2 MB is the max size
// at which we will send back one RPC request. Means max we need 2 MB for creating the
// response cell block. (Well it might be much lesser than this because in 2 MB size calc, we
// include the heap size overhead of each cells also.) Considering 2 MB, we will need
// (2 * 1024 * 1024) / poolBufSize buffers to make the response cell block. Pool buffer size
// is by default 64 KB.
// In case of read request, at the end of the handler process, we will make the response
// cellblock and add the Call to connection's response Q and a single Responder thread takes
// connections and responses from that one by one and do the socket write. So there is chances
// that by the time a handler originated response is actually done writing to socket and so
// released the BBs it used, the handler might have processed one more read req. On an avg 2x
// we consider and consider that also for the max buffers to pool
int bufsForTwoMB = ((2 * 1024) * 1024) / poolBufSize;
int maxBuffCount = conf.getInt(MAX_BUFFER_COUNT_KEY, (conf.getInt(HConstants.REGION_SERVER_HANDLER_COUNT, HConstants.DEFAULT_REGION_SERVER_HANDLER_COUNT) * bufsForTwoMB) * 2);
int minSizeForReservoirUse = conf.getInt(MIN_ALLOCATE_SIZE_KEY, poolBufSize / 6);
Class<?> clazz = conf.getClass(BYTEBUFF_ALLOCATOR_CLASS, ByteBuffAllocator.class);
return
((ByteBuffAllocator) (ReflectionUtils.newInstance(clazz, true, maxBuffCount, poolBufSize, minSizeForReservoirUse)));
} else {
return HEAP;
}
} | 3.26 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.