name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hbase_LocalHBaseCluster_getConfiguration_rdh | /**
* Returns the Configuration used by this LocalHBaseCluster
*/
public Configuration getConfiguration() {
return this.conf;
} | 3.26 |
hbase_LocalHBaseCluster_waitOnMaster_rdh | /**
* Wait for the specified master to stop. Removes this thread from list of running threads.
*
* @return Name of master that just went down.
*/
public String waitOnMaster(JVMClusterUtil.MasterThread masterThread) {
boolean interrupted = false;
while (masterThread.isAlive()) {
try {
LOG.info("Waiting on " + masterThread.getMaster().getServerName().toString());
masterThread.join();
} catch (InterruptedException e) {
LOG.error("Interrupted while waiting for {} to finish. Retrying join", masterThread.getName(), e);
interrupted = true;
}
}
masterThreads.remove(masterThread);
if (interrupted) {
Thread.currentThread().interrupt();
}
return masterThread.getName();
} | 3.26 |
hbase_LocalHBaseCluster_main_rdh | /**
* Test things basically work.
*/
public static void main(String[] args) throws IOException {
Configuration conf = HBaseConfiguration.create();
LocalHBaseCluster cluster = new LocalHBaseCluster(conf);
cluster.startup();
try (Connection connection = ConnectionFactory.createConnection(conf);Admin admin = connection.getAdmin()) {
TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.valueOf(cluster.getClass().getName())).build();
admin.createTable(htd);
} finally {
cluster.shutdown();
}
} | 3.26 |
hbase_LocalHBaseCluster_waitOnRegionServer_rdh | /**
* Wait for the specified region server to stop. Removes this thread from list of running threads.
*
* @return Name of region server that just went down.
*/
public String waitOnRegionServer(JVMClusterUtil.RegionServerThread rst) {
boolean v11 = false;
while (rst.isAlive()) {
try {
LOG.info("Waiting on " + rst.getRegionServer().toString());
rst.join();
} catch (InterruptedException e) {
LOG.error("Interrupted while waiting for {} to finish. Retrying join", rst.getName(), e);
v11 = true;
}
}
regionThreads.remove(rst);
if (v11) {
Thread.currentThread().interrupt();
}
return rst.getName();
} | 3.26 |
hbase_NettyServerCall_sendResponseIfReady_rdh | /**
* If we have a response, and delay is not set, then respond immediately. Otherwise, do not
* respond to client. This is called by the RPC code in the context of the Handler thread.
*/
@Override
public synchronized void sendResponseIfReady() throws IOException {
// set param null to reduce memory pressure
this.param = null;
connection.doRespond(this);
} | 3.26 |
hbase_MasterDDLOperationHelper_deleteColumnFamilyFromFileSystem_rdh | /**
* Remove the column family from the file system
*/
public static void
deleteColumnFamilyFromFileSystem(final MasterProcedureEnv env, final TableName tableName, final List<RegionInfo> regionInfoList, final byte[] familyName, final boolean hasMob) throws IOException {
final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
if
(LOG.isDebugEnabled()) {LOG.debug((("Removing family=" + Bytes.toString(familyName)) + " from table=") + tableName);
}
for (RegionInfo hri : regionInfoList) {
// Delete the family directory in FS for all the regions one by one
mfs.deleteFamilyFromFS(hri, familyName);
}
if (hasMob) {
// Delete the mob region
Path
mobRootDir = new Path(mfs.getRootDir(), MobConstants.MOB_DIR_NAME);
RegionInfo mobRegionInfo = MobUtils.getMobRegionInfo(tableName);
mfs.deleteFamilyFromFS(mobRootDir, mobRegionInfo, familyName);
}
} | 3.26 |
hbase_AsyncRpcRetryingCallerFactory_start_rdh | /**
* Short cut for {@code build().start(HBaseRpcController, ScanResponse)}.
*/
public CompletableFuture<Boolean> start(HBaseRpcController controller, ScanResponse respWhenOpen) {
return build().start(controller, respWhenOpen);
} | 3.26 |
hbase_AsyncRpcRetryingCallerFactory_call_rdh | /**
* Shortcut for {@code build().call()}
*/
public CompletableFuture<T> call() {
return build().call(); } | 3.26 |
hbase_AsyncRpcRetryingCallerFactory_single_rdh | /**
* Create retry caller for single action, such as get, put, delete, etc.
*/public <T> SingleRequestCallerBuilder<T>
single() {
return new SingleRequestCallerBuilder<>();
} | 3.26 |
hbase_AsyncRpcRetryingCallerFactory_scanSingleRegion_rdh | /**
* Create retry caller for scanning a region.
*/
public ScanSingleRegionCallerBuilder scanSingleRegion() {return new ScanSingleRegionCallerBuilder();
} | 3.26 |
hbase_WALEntryBatch_getNbOperations_rdh | /**
* Returns total number of operations in this batch
*/
public int getNbOperations() {
return getNbRowKeys() + getNbHFiles();
} | 3.26 |
hbase_WALEntryBatch_getNbHFiles_rdh | /**
* Returns the number of HFiles in this batch
*/
public int getNbHFiles() {
return nbHFiles;
} | 3.26 |
hbase_WALEntryBatch_getLastSeqIds_rdh | /**
* Returns the last sequenceid for each region if the table has serial-replication scope
*/
public Map<String, Long> getLastSeqIds() {
return lastSeqIds;
} | 3.26 |
hbase_WALEntryBatch_getHeapSize_rdh | /**
* Returns the heap size of this batch
*/
public long getHeapSize() {
return heapSize;
} | 3.26 |
hbase_WALEntryBatch_getLastWalPosition_rdh | /**
* Returns the position in the last WAL that was read.
*/
public long getLastWalPosition() {
return lastWalPosition;
} | 3.26 |
hbase_WALEntryBatch_getWalEntries_rdh | /**
* Returns the WAL Entries.
*/
public List<Entry> getWalEntries() {
return walEntriesWithSize.stream().map(Pair::getFirst).collect(Collectors.toList());
} | 3.26 |
hbase_WALEntryBatch_getWalEntriesWithSize_rdh | /**
* Returns the WAL Entries.
*/
public List<Pair<Entry, Long>> getWalEntriesWithSize() {
return walEntriesWithSize;
} | 3.26 |
hbase_WALEntryBatch_getLastWalPath_rdh | /**
* Returns the path of the last WAL that was read.
*/ public Path getLastWalPath() {
return lastWalPath;
} | 3.26 |
hbase_MultiResponse_m0_rdh | /**
* Returns Number of pairs in this container
*/
public int m0() {
int size = 0;
for (RegionResult v1 : results.values()) { size += v1.size();
}
return size;
} | 3.26 |
hbase_MultiResponse_add_rdh | /**
* Add the pair to the container, grouped by the regionName.
*/
public void add(byte[] regionName, int originalIndex,
Object resOrEx) {
getResult(regionName).addResult(originalIndex, resOrEx);
} | 3.26 |
hbase_MultiResponse_getException_rdh | /**
* Returns the exception for the region, if any. Null otherwise.
*/
public Throwable getException(byte[] regionName) {
return exceptions.get(regionName);
} | 3.26 |
hbase_ZKDump_getReplicationZnodesDump_rdh | /**
* Appends replication znodes to the passed StringBuilder.
*
* @param zkw
* reference to the {@link ZKWatcher} which also contains configuration and operation
* @param sb
* the {@link StringBuilder} to append to
* @throws KeeperException
* if a ZooKeeper operation fails
*/
private static void getReplicationZnodesDump(ZKWatcher zkw, StringBuilder sb) throws KeeperException {
String replicationZnode = zkw.getZNodePaths().replicationZNode;
if (ZKUtil.checkExists(zkw, replicationZnode) == (-1)) {
return;
}
// do a ls -r on this znode
sb.append("\n").append(replicationZnode).append(": ");
List<String> children = ZKUtil.listChildrenNoWatch(zkw, replicationZnode);
if (children != null) {
Collections.sort(children);
for
(String child : children) { String zNode = ZNodePaths.joinZNode(replicationZnode, child);
if (zNode.equals(zkw.getZNodePaths().peersZNode)) {
appendPeersZnodes(zkw, zNode, sb);}
else if (zNode.equals(zkw.getZNodePaths().queuesZNode)) {
appendRSZnodes(zkw, zNode, sb);
} else if (zNode.equals(zkw.getZNodePaths().hfileRefsZNode)) {
appendHFileRefsZNodes(zkw, zNode, sb);
}
}
}
} | 3.26 |
hbase_ZKDump_getServerStats_rdh | /**
* Gets the statistics from the given server.
*
* @param server
* The server to get the statistics from.
* @param timeout
* The socket timeout to use.
* @return The array of response strings.
* @throws IOException
* When the socket communication fails.
*/
private static String[] getServerStats(String server, int timeout) throws IOException {
String[] sp = server.split(":");
if (sp.length == 0) {
return null;
}
String host = sp[0];
int port = (sp.length > 1) ? Integer.parseInt(sp[1]) : HConstants.DEFAULT_ZOOKEEPER_CLIENT_PORT;
try (Socket socket = new Socket()) {
InetSocketAddress sockAddr = new InetSocketAddress(host, port);
if (sockAddr.isUnresolved()) {throw new UnknownHostException(host + " cannot be resolved");
}
socket.connect(sockAddr, timeout);
socket.setSoTimeout(timeout);
try (PrintWriter out = new PrintWriter(new BufferedWriter(new OutputStreamWriter(socket.getOutputStream(), StandardCharsets.UTF_8)), true);BufferedReader in = new BufferedReader(new InputStreamReader(socket.getInputStream(), StandardCharsets.UTF_8))) {
out.println("stat");
out.flush();
ArrayList<String> res = new ArrayList<>();
while (true) {
String line = in.readLine();
if (line != null) {
res.add(line);
} else {
break;
}
}
return res.toArray(new String[res.size()]);
}
}
} | 3.26 |
hbase_Branch1CoprocessorMethods_addMethods_rdh | /* This list of methods was generated from HBase 1.4.4. */
private void addMethods() {
/* BulkLoadObserver */
addMethod("prePrepareBulkLoad", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.protobuf.generated.SecureBulkLoadProtos.PrepareBulkLoadRequest");
addMethod("preCleanupBulkLoad", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.protobuf.generated.SecureBulkLoadProtos.CleanupBulkLoadRequest");
/* EndpointObserver */addMethod("postEndpointInvocation", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "com.google.protobuf.Service", "java.lang.String", "com.google.protobuf.Message", "com.google.protobuf.Message.Builder");
addMethod("preEndpointInvocation", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "com.google.protobuf.Service", "java.lang.String", "com.google.protobuf.Message");
/* MasterObserver */
addMethod("preCreateTable", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.HTableDescriptor", "org.apache.hadoop.hbase.HRegionInfo[]");
addMethod("postCreateTable", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.HTableDescriptor", "org.apache.hadoop.hbase.HRegionInfo[]");
addMethod("preDeleteTable", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.TableName");
addMethod("postDeleteTable", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.TableName");
addMethod("preDeleteTableHandler", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.TableName");
addMethod("preMove", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.HRegionInfo", "org.apache.hadoop.hbase.ServerName", "org.apache.hadoop.hbase.ServerName");
addMethod("preCreateTableHandler", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.HTableDescriptor", "org.apache.hadoop.hbase.HRegionInfo[]");
addMethod("postCreateTableHandler", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.HTableDescriptor", "org.apache.hadoop.hbase.HRegionInfo[]");
addMethod("postMove", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.HRegionInfo", "org.apache.hadoop.hbase.ServerName", "org.apache.hadoop.hbase.ServerName");
addMethod("postDeleteTableHandler", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.TableName");
addMethod("preTruncateTable", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.TableName");
addMethod("postTruncateTable",
"org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.TableName");
addMethod("preTruncateTableHandler", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.TableName");
addMethod("postTruncateTableHandler", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.TableName");
addMethod("preModifyTable", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.TableName", "org.apache.hadoop.hbase.HTableDescriptor");
addMethod("postModifyTable", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.TableName", "org.apache.hadoop.hbase.HTableDescriptor");
addMethod("preModifyTableHandler", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.TableName", "org.apache.hadoop.hbase.HTableDescriptor");
addMethod("postModifyTableHandler", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.TableName", "org.apache.hadoop.hbase.HTableDescriptor");
addMethod("preAddColumn", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.TableName", "org.apache.hadoop.hbase.HColumnDescriptor");
addMethod("postAddColumn", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.TableName", "org.apache.hadoop.hbase.HColumnDescriptor");
addMethod("preAddColumnHandler", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.TableName", "org.apache.hadoop.hbase.HColumnDescriptor");
addMethod("postAddColumnHandler", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.TableName", "org.apache.hadoop.hbase.HColumnDescriptor");
addMethod("preModifyColumn", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.TableName", "org.apache.hadoop.hbase.HColumnDescriptor");
addMethod("postModifyColumn", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.TableName", "org.apache.hadoop.hbase.HColumnDescriptor");
addMethod("preModifyColumnHandler", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.TableName", "org.apache.hadoop.hbase.HColumnDescriptor");addMethod("postModifyColumnHandler", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.TableName", "org.apache.hadoop.hbase.HColumnDescriptor");
addMethod("preDeleteColumn", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.TableName", "byte[]");
addMethod("postDeleteColumn", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.TableName", "byte[]");
addMethod("preDeleteColumnHandler", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.TableName", "byte[]");
addMethod("postDeleteColumnHandler", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.TableName", "byte[]");
addMethod("preEnableTable", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.TableName");
addMethod("postEnableTable", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.TableName");
addMethod("preEnableTableHandler", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.TableName");
addMethod("postEnableTableHandler", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.TableName");
addMethod("preDisableTable", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.TableName");
addMethod("postDisableTable", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.TableName");
addMethod("preDisableTableHandler", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.TableName");
addMethod("postDisableTableHandler", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.TableName");
addMethod("preAbortProcedure", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.procedure2.ProcedureExecutor", "long");
addMethod("postAbortProcedure", "org.apache.hadoop.hbase.coprocessor.ObserverContext");
addMethod("preListProcedures", "org.apache.hadoop.hbase.coprocessor.ObserverContext");
addMethod("postListProcedures", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "java.util.List");
addMethod("preAssign", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.HRegionInfo");addMethod("postAssign", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.HRegionInfo");
addMethod("preUnassign", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.HRegionInfo", "boolean");
addMethod("postUnassign", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.HRegionInfo", "boolean");
addMethod("preRegionOffline", "org.apache.hadoop.hbase.coprocessor.ObserverContext",
"org.apache.hadoop.hbase.HRegionInfo");
addMethod("postRegionOffline", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.HRegionInfo");
addMethod("preBalance", "org.apache.hadoop.hbase.coprocessor.ObserverContext");
addMethod("postBalance", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "java.util.List");
addMethod("preSetSplitOrMergeEnabled", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "boolean", "org.apache.hadoop.hbase.client.Admin.MasterSwitchType");
addMethod("postSetSplitOrMergeEnabled", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "boolean", "org.apache.hadoop.hbase.client.Admin.MasterSwitchType");
addMethod("preBalanceSwitch", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "boolean");
addMethod("postBalanceSwitch", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "boolean", "boolean");
addMethod("preShutdown", "org.apache.hadoop.hbase.coprocessor.ObserverContext");
addMethod("preStopMaster", "org.apache.hadoop.hbase.coprocessor.ObserverContext");
addMethod("postStartMaster", "org.apache.hadoop.hbase.coprocessor.ObserverContext");
addMethod("preMasterInitialization", "org.apache.hadoop.hbase.coprocessor.ObserverContext");
addMethod("preSnapshot", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription", "org.apache.hadoop.hbase.HTableDescriptor");
addMethod("postSnapshot", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription",
"org.apache.hadoop.hbase.HTableDescriptor");
addMethod("preListSnapshot", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription");
addMethod("postListSnapshot", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription");
addMethod("preCloneSnapshot", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription", "org.apache.hadoop.hbase.HTableDescriptor");
addMethod("postCloneSnapshot", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription", "org.apache.hadoop.hbase.HTableDescriptor");
addMethod("preRestoreSnapshot", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription", "org.apache.hadoop.hbase.HTableDescriptor");addMethod("postRestoreSnapshot", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription", "org.apache.hadoop.hbase.HTableDescriptor");
addMethod("preDeleteSnapshot", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription");
addMethod("postDeleteSnapshot", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription");
addMethod("preGetTableDescriptors", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "java.util.List", "java.util.List");addMethod("preGetTableDescriptors", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "java.util.List", "java.util.List", "java.lang.String");
addMethod("postGetTableDescriptors", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "java.util.List",
"java.util.List", "java.lang.String");
addMethod("postGetTableDescriptors", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "java.util.List");
addMethod("preGetTableNames", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "java.util.List", "java.lang.String");
addMethod("postGetTableNames", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "java.util.List", "java.lang.String");
addMethod("preCreateNamespace", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.NamespaceDescriptor");
addMethod("postCreateNamespace", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.NamespaceDescriptor");
addMethod("preDeleteNamespace", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "java.lang.String");
addMethod("postDeleteNamespace", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "java.lang.String");
addMethod("preModifyNamespace",
"org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.NamespaceDescriptor");
addMethod("postModifyNamespace", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.NamespaceDescriptor");
addMethod("preGetNamespaceDescriptor", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "java.lang.String");
addMethod("postGetNamespaceDescriptor", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.NamespaceDescriptor");
addMethod("preListNamespaceDescriptors", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "java.util.List");
addMethod("postListNamespaceDescriptors", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "java.util.List");
addMethod("preTableFlush", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.TableName");
addMethod("postTableFlush", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.TableName");
addMethod("preSetUserQuota", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "java.lang.String", "java.lang.String", "org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas");
addMethod("preSetUserQuota", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "java.lang.String", "org.apache.hadoop.hbase.TableName", "org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas");
addMethod("preSetUserQuota", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "java.lang.String", "org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas");
addMethod("postSetUserQuota", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "java.lang.String", "java.lang.String", "org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas");
addMethod("postSetUserQuota", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "java.lang.String", "org.apache.hadoop.hbase.TableName", "org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas");
addMethod("postSetUserQuota", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "java.lang.String", "org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas");
addMethod("preSetTableQuota", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.TableName", "org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas");
addMethod("postSetTableQuota", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.TableName", "org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas");
addMethod("preSetNamespaceQuota", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "java.lang.String", "org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas");
addMethod("postSetNamespaceQuota", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "java.lang.String", "org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas");
addMethod("preDispatchMerge", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.HRegionInfo", "org.apache.hadoop.hbase.HRegionInfo");
addMethod("postDispatchMerge", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.HRegionInfo", "org.apache.hadoop.hbase.HRegionInfo");
addMethod("preGetClusterStatus", "org.apache.hadoop.hbase.coprocessor.ObserverContext");
addMethod("postGetClusterStatus", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.ClusterStatus");
addMethod("preClearDeadServers", "org.apache.hadoop.hbase.coprocessor.ObserverContext");
addMethod("postClearDeadServers", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "java.util.List", "java.util.List");
addMethod("preMoveServers", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "java.util.Set", "java.lang.String");
addMethod("postMoveServers", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "java.util.Set", "java.lang.String");
addMethod("preMoveTables", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "java.util.Set", "java.lang.String");
addMethod("postMoveTables", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "java.util.Set", "java.lang.String");
addMethod("preMoveServersAndTables", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "java.util.Set", "java.util.Set", "java.lang.String");
addMethod("postMoveServersAndTables", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "java.util.Set", "java.util.Set", "java.lang.String");
addMethod("preAddRSGroup", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "java.lang.String");
addMethod("postAddRSGroup", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "java.lang.String");
addMethod("preRemoveRSGroup", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "java.lang.String");
addMethod("postRemoveRSGroup", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "java.lang.String");
addMethod("preRemoveServers", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "java.util.Set");
addMethod("postRemoveServers", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "java.util.Set");
addMethod("preBalanceRSGroup", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "java.lang.String");
addMethod("postBalanceRSGroup", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "java.lang.String", "boolean");/* RegionObserver */
addMethod("preOpen", "org.apache.hadoop.hbase.coprocessor.ObserverContext");
addMethod("postOpen", "org.apache.hadoop.hbase.coprocessor.ObserverContext");
addMethod("postLogReplay", "org.apache.hadoop.hbase.coprocessor.ObserverContext");
addMethod("preFlushScannerOpen", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.regionserver.Store", "org.apache.hadoop.hbase.regionserver.KeyValueScanner", "org.apache.hadoop.hbase.regionserver.InternalScanner", "long");
addMethod("preFlushScannerOpen", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.regionserver.Store", "org.apache.hadoop.hbase.regionserver.KeyValueScanner", "org.apache.hadoop.hbase.regionserver.InternalScanner");
addMethod("preFlush", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.regionserver.Store", "org.apache.hadoop.hbase.regionserver.InternalScanner");
addMethod("preFlush", "org.apache.hadoop.hbase.coprocessor.ObserverContext");
addMethod("postFlush", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.regionserver.Store", "org.apache.hadoop.hbase.regionserver.StoreFile");
addMethod("postFlush", "org.apache.hadoop.hbase.coprocessor.ObserverContext");
addMethod("preCompactSelection", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.regionserver.Store", "java.util.List");
addMethod("preCompactSelection", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.regionserver.Store", "java.util.List", "org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest");
addMethod("postCompactSelection", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.regionserver.Store", "com.google.common.collect.ImmutableList");
addMethod("postCompactSelection", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.regionserver.Store", "com.google.common.collect.ImmutableList", "org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest");
addMethod("preCompact", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.regionserver.Store", "org.apache.hadoop.hbase.regionserver.InternalScanner", "org.apache.hadoop.hbase.regionserver.ScanType");
addMethod("preCompact",
"org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.regionserver.Store", "org.apache.hadoop.hbase.regionserver.InternalScanner", "org.apache.hadoop.hbase.regionserver.ScanType", "org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest");
addMethod("preClose", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "boolean");
addMethod("preCompactScannerOpen", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.regionserver.Store", "java.util.List", "org.apache.hadoop.hbase.regionserver.ScanType", "long", "org.apache.hadoop.hbase.regionserver.InternalScanner");
addMethod("preCompactScannerOpen", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.regionserver.Store", "java.util.List", "org.apache.hadoop.hbase.regionserver.ScanType", "long", "org.apache.hadoop.hbase.regionserver.InternalScanner", "org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest", "long");
addMethod("preCompactScannerOpen", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.regionserver.Store", "java.util.List", "org.apache.hadoop.hbase.regionserver.ScanType", "long", "org.apache.hadoop.hbase.regionserver.InternalScanner", "org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest");
addMethod("postCompact", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.regionserver.Store", "org.apache.hadoop.hbase.regionserver.StoreFile");
addMethod("postCompact", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.regionserver.Store", "org.apache.hadoop.hbase.regionserver.StoreFile", "org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest");
addMethod("preSplit", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "byte[]");
addMethod("preSplit", "org.apache.hadoop.hbase.coprocessor.ObserverContext");
addMethod("postSplit", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.regionserver.Region", "org.apache.hadoop.hbase.regionserver.Region");
addMethod("preSplitBeforePONR", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "byte[]", "java.util.List");
addMethod("preSplitAfterPONR", "org.apache.hadoop.hbase.coprocessor.ObserverContext");
addMethod("preRollBackSplit", "org.apache.hadoop.hbase.coprocessor.ObserverContext");
addMethod("postRollBackSplit", "org.apache.hadoop.hbase.coprocessor.ObserverContext");
addMethod("postCompleteSplit", "org.apache.hadoop.hbase.coprocessor.ObserverContext");
addMethod("postClose", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "boolean");
addMethod("preGetClosestRowBefore", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "byte[]", "byte[]", "org.apache.hadoop.hbase.client.Result");
addMethod("postGetClosestRowBefore", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "byte[]", "byte[]", "org.apache.hadoop.hbase.client.Result");
addMethod("preGetOp", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.client.Get", "java.util.List");
addMethod("postGetOp", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.client.Get", "java.util.List");
addMethod("preExists", "org.apache.hadoop.hbase.coprocessor.ObserverContext",
"org.apache.hadoop.hbase.client.Get", "boolean");
addMethod("postExists", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.client.Get", "boolean");
addMethod("prePut", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.client.Put", "org.apache.hadoop.hbase.regionserver.wal.WALEdit", "org.apache.hadoop.hbase.client.Durability");
addMethod("postPut", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.client.Put", "org.apache.hadoop.hbase.regionserver.wal.WALEdit", "org.apache.hadoop.hbase.client.Durability");
addMethod("preDelete", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.client.Delete", "org.apache.hadoop.hbase.regionserver.wal.WALEdit", "org.apache.hadoop.hbase.client.Durability");
addMethod("prePrepareTimeStampForDeleteVersion", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.client.Mutation", "org.apache.hadoop.hbase.Cell", "byte[]", "org.apache.hadoop.hbase.client.Get");
addMethod("postDelete", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.client.Delete", "org.apache.hadoop.hbase.regionserver.wal.WALEdit", "org.apache.hadoop.hbase.client.Durability");
addMethod("preBatchMutate", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress");
addMethod("postBatchMutate", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress");
addMethod("postStartRegionOperation", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.regionserver.Region.Operation");
addMethod("postCloseRegionOperation", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.regionserver.Region.Operation");
addMethod("postBatchMutateIndispensably", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress", "boolean");
addMethod("preCheckAndPut", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "byte[]", "byte[]", "byte[]", "org.apache.hadoop.hbase.filter.CompareFilter.CompareOp", "org.apache.hadoop.hbase.filter.ByteArrayComparable", "org.apache.hadoop.hbase.client.Put", "boolean");
addMethod("preCheckAndPutAfterRowLock", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "byte[]", "byte[]", "byte[]", "org.apache.hadoop.hbase.filter.CompareFilter.CompareOp", "org.apache.hadoop.hbase.filter.ByteArrayComparable", "org.apache.hadoop.hbase.client.Put", "boolean");
addMethod("postCheckAndPut", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "byte[]", "byte[]", "byte[]", "org.apache.hadoop.hbase.filter.CompareFilter.CompareOp", "org.apache.hadoop.hbase.filter.ByteArrayComparable", "org.apache.hadoop.hbase.client.Put", "boolean");
addMethod("preCheckAndDelete", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "byte[]", "byte[]", "byte[]", "org.apache.hadoop.hbase.filter.CompareFilter.CompareOp", "org.apache.hadoop.hbase.filter.ByteArrayComparable", "org.apache.hadoop.hbase.client.Delete", "boolean");
addMethod("preCheckAndDeleteAfterRowLock", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "byte[]", "byte[]", "byte[]", "org.apache.hadoop.hbase.filter.CompareFilter.CompareOp", "org.apache.hadoop.hbase.filter.ByteArrayComparable", "org.apache.hadoop.hbase.client.Delete", "boolean");
addMethod("postCheckAndDelete", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "byte[]", "byte[]", "byte[]", "org.apache.hadoop.hbase.filter.CompareFilter.CompareOp", "org.apache.hadoop.hbase.filter.ByteArrayComparable", "org.apache.hadoop.hbase.client.Delete", "boolean");
addMethod("preIncrementColumnValue", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "byte[]", "byte[]", "byte[]",
"long", "boolean");
addMethod("postIncrementColumnValue", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "byte[]", "byte[]", "byte[]", "long", "boolean", "long");
addMethod("preAppend", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.client.Append");
addMethod("preAppendAfterRowLock", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.client.Append");
addMethod("postAppend", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.client.Append", "org.apache.hadoop.hbase.client.Result");
addMethod("preIncrement", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.client.Increment");
addMethod("preIncrementAfterRowLock", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.client.Increment");
addMethod("postIncrement", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.client.Increment", "org.apache.hadoop.hbase.client.Result");
addMethod("preScannerOpen", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.client.Scan", "org.apache.hadoop.hbase.regionserver.RegionScanner");
addMethod("preStoreScannerOpen", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.regionserver.Store", "org.apache.hadoop.hbase.client.Scan", "java.util.NavigableSet", "org.apache.hadoop.hbase.regionserver.KeyValueScanner");
addMethod("postScannerOpen", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.client.Scan", "org.apache.hadoop.hbase.regionserver.RegionScanner");
addMethod("preScannerNext", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.regionserver.InternalScanner", "java.util.List", "int", "boolean");
addMethod("postScannerNext", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.regionserver.InternalScanner", "java.util.List", "int", "boolean");
addMethod("postScannerFilterRow", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.regionserver.InternalScanner", "byte[]", "int", "short", "boolean");
addMethod("preScannerClose", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.regionserver.InternalScanner");
addMethod("postScannerClose", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.regionserver.InternalScanner");addMethod("preWALRestore", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.HRegionInfo", "org.apache.hadoop.hbase.regionserver.wal.HLogKey", "org.apache.hadoop.hbase.regionserver.wal.WALEdit");
addMethod("preWALRestore", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.HRegionInfo", "org.apache.hadoop.hbase.wal.WALKey", "org.apache.hadoop.hbase.regionserver.wal.WALEdit");addMethod("postWALRestore", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.HRegionInfo", "org.apache.hadoop.hbase.regionserver.wal.HLogKey", "org.apache.hadoop.hbase.regionserver.wal.WALEdit");
addMethod("postWALRestore", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.HRegionInfo", "org.apache.hadoop.hbase.wal.WALKey", "org.apache.hadoop.hbase.regionserver.wal.WALEdit");
addMethod("preBulkLoadHFile", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "java.util.List");
addMethod("preCommitStoreFile", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "byte[]", "java.util.List");
addMethod("postCommitStoreFile", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "byte[]", "org.apache.hadoop.fs.Path", "org.apache.hadoop.fs.Path");
addMethod("postBulkLoadHFile", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "java.util.List", "boolean");
addMethod("preStoreFileReaderOpen", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.fs.FileSystem", "org.apache.hadoop.fs.Path", "org.apache.hadoop.hbase.io.FSDataInputStreamWrapper", "long", "org.apache.hadoop.hbase.io.hfile.CacheConfig", "org.apache.hadoop.hbase.io.Reference", "org.apache.hadoop.hbase.regionserver.StoreFile.Reader");
addMethod("postStoreFileReaderOpen", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.fs.FileSystem", "org.apache.hadoop.fs.Path", "org.apache.hadoop.hbase.io.FSDataInputStreamWrapper", "long", "org.apache.hadoop.hbase.io.hfile.CacheConfig", "org.apache.hadoop.hbase.io.Reference", "org.apache.hadoop.hbase.regionserver.StoreFile.Reader");
addMethod("postMutationBeforeWAL", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.coprocessor.RegionObserver.MutationType", "org.apache.hadoop.hbase.client.Mutation", "org.apache.hadoop.hbase.Cell", "org.apache.hadoop.hbase.Cell");
addMethod("postInstantiateDeleteTracker",
"org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.regionserver.DeleteTracker");
/* RegionServerObserver */
addMethod("preMerge", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.regionserver.Region", "org.apache.hadoop.hbase.regionserver.Region");
addMethod("preStopRegionServer", "org.apache.hadoop.hbase.coprocessor.ObserverContext");
addMethod("postMerge", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.regionserver.Region", "org.apache.hadoop.hbase.regionserver.Region", "org.apache.hadoop.hbase.regionserver.Region");
addMethod("preMergeCommit", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.regionserver.Region", "org.apache.hadoop.hbase.regionserver.Region", "java.util.List");
addMethod("postMergeCommit", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.regionserver.Region", "org.apache.hadoop.hbase.regionserver.Region", "org.apache.hadoop.hbase.regionserver.Region");
addMethod("preRollBackMerge", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.regionserver.Region",
"org.apache.hadoop.hbase.regionserver.Region");
addMethod("postRollBackMerge", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.regionserver.Region", "org.apache.hadoop.hbase.regionserver.Region");
addMethod("preRollWALWriterRequest", "org.apache.hadoop.hbase.coprocessor.ObserverContext");
addMethod("postRollWALWriterRequest", "org.apache.hadoop.hbase.coprocessor.ObserverContext");
addMethod("postCreateReplicationEndPoint", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.replication.ReplicationEndpoint");
addMethod("preReplicateLogEntries", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "java.util.List", "org.apache.hadoop.hbase.CellScanner");addMethod("postReplicateLogEntries", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "java.util.List", "org.apache.hadoop.hbase.CellScanner");
/* WALObserver */
addMethod("preWALWrite", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.HRegionInfo", "org.apache.hadoop.hbase.wal.WALKey", "org.apache.hadoop.hbase.regionserver.wal.WALEdit");
addMethod("preWALWrite", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.HRegionInfo", "org.apache.hadoop.hbase.regionserver.wal.HLogKey", "org.apache.hadoop.hbase.regionserver.wal.WALEdit");
addMethod("postWALWrite", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.HRegionInfo", "org.apache.hadoop.hbase.regionserver.wal.HLogKey", "org.apache.hadoop.hbase.regionserver.wal.WALEdit");
addMethod("postWALWrite", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.HRegionInfo", "org.apache.hadoop.hbase.wal.WALKey", "org.apache.hadoop.hbase.regionserver.wal.WALEdit");addMethod("preWALRoll", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.fs.Path", "org.apache.hadoop.fs.Path");
addMethod("postWALRoll", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.fs.Path", "org.apache.hadoop.fs.Path");
} | 3.26 |
hbase_Cluster_isEmpty_rdh | /**
* Returns true if no locations have been added, false otherwise
*/
public boolean isEmpty()
{
return nodes.isEmpty();
} | 3.26 |
hbase_Cluster_remove_rdh | /**
* Remove a node from the cluster
*
* @param name
* host name
* @param port
* service port
*/
public Cluster remove(String name, int port) {
StringBuilder sb = new StringBuilder();
sb.append(name);
sb.append(':');
sb.append(port);
return remove(sb.toString());
} | 3.26 |
hbase_Cluster_add_rdh | /**
* Add a node to the cluster
*
* @param name
* host name
* @param port
* service port
*/
public Cluster add(String name, int port) { StringBuilder sb = new StringBuilder();
sb.append(name);
sb.append(':');
sb.append(port);
return add(sb.toString());
} | 3.26 |
hbase_BaseEnvironment_startup_rdh | /**
* Initialize the environment
*/
public void startup() throws IOException {
if ((state == State.INSTALLED)
|| (state == State.STOPPED)) {
state = State.STARTING;
Thread currentThread = Thread.currentThread();
ClassLoader hostClassLoader =
currentThread.getContextClassLoader();
try {
currentThread.setContextClassLoader(this.getClassLoader());
impl.start(this);
state = State.ACTIVE;
} finally {
currentThread.setContextClassLoader(hostClassLoader);
}
} else {
LOG.warn(((("Not starting coprocessor " + impl.getClass().getName()) + " because not inactive (state=") + state.toString()) + ")");
}
} | 3.26 |
hbase_BaseEnvironment_getHBaseVersion_rdh | /**
* Returns the HBase release
*/
@Override
public String getHBaseVersion() {
return VersionInfo.getVersion();
} | 3.26 |
hbase_BaseEnvironment_getVersion_rdh | /**
* Returns the coprocessor environment version
*/
@Override
public int getVersion() {
return Coprocessor.VERSION;
} | 3.26 |
hbase_ReflectionUtils_printThreadInfo_rdh | /**
* Print all of the thread's information and stack traces.
*
* @param stream
* the stream to
* @param title
* a string title for the stack trace
*/
static void printThreadInfo(PrintStream stream, String title) {final int STACK_DEPTH = 20;
boolean contention = threadBean.isThreadContentionMonitoringEnabled();
long[] threadIds = threadBean.getAllThreadIds();
stream.println("Process Thread Dump: " + title);
stream.println(threadIds.length + " active threads");
for (long v16 : threadIds) {
ThreadInfo info = threadBean.getThreadInfo(v16, STACK_DEPTH);
if (info == null) {
stream.println(" Inactive");
continue;
}stream.println(("Thread " + getTaskName(info.getThreadId(), info.getThreadName())) + ":");
Thread.State state = info.getThreadState();
stream.println(" State: " + state);
stream.println(" Blocked count: " + info.getBlockedCount());
stream.println(" Waited count: " + info.getWaitedCount());
if (contention) {
stream.println(" Blocked time: " + info.getBlockedTime());
stream.println(" Waited time: " + info.getWaitedTime());
}
if (state == Thread.State.WAITING) {
stream.println(" Waiting on " + info.getLockName());
} else if (state == Thread.State.BLOCKED) {
stream.println(" Blocked on " + info.getLockName());
stream.println(" Blocked by " + getTaskName(info.getLockOwnerId(), info.getLockOwnerName()));
}
stream.println(" Stack:");
for (StackTraceElement frame : info.getStackTrace()) {
stream.println(" " + frame.toString());
}
}
stream.flush();
} | 3.26 |
hbase_ReflectionUtils_logThreadInfo_rdh | /**
* Log the current thread stacks at INFO level.
*
* @param log
* the logger that logs the stack trace
* @param title
* a descriptive title for the call stacks
* @param minInterval
* the minimum time from the last
*/
public static void logThreadInfo(Logger log, String title, long minInterval) {
boolean dumpStack = false;
if (log.isInfoEnabled()) {
synchronized(ReflectionUtils.class) {
long now = EnvironmentEdgeManager.currentTime();
if ((now - previousLogTime) >= (minInterval *
1000)) {
previousLogTime = now;
dumpStack = true;
}
}
if (dumpStack) {
try {
ByteArrayOutputStream buffer = new ByteArrayOutputStream();
printThreadInfo(new PrintStream(buffer, false, "UTF-8"), title);
log.info(buffer.toString(Charset.defaultCharset().name()));
} catch (UnsupportedEncodingException ignored) {
log.warn(("Could not write thread info about '" +
title) + "' due to a string encoding issue.");
} }
}
} | 3.26 |
hbase_ReflectionUtils_invokeMethod_rdh | /**
* Get and invoke the target method from the given object with given parameters
*
* @param obj
* the object to get and invoke method from
* @param methodName
* the name of the method to invoke
* @param params
* the parameters for the method to invoke
* @return the return value of the method invocation
*/
@NonNull
public static Object invokeMethod(Object obj, String methodName, Object... params) {
Method m;
try {
m = obj.getClass().getMethod(methodName, getParameterTypes(params));
m.setAccessible(true);
return m.invoke(obj, params);
} catch (NoSuchMethodException e) {
throw new UnsupportedOperationException("Cannot find specified method " +
methodName, e);
} catch (IllegalAccessException e) {
throw new UnsupportedOperationException("Unable to access specified method " + methodName, e);
} catch (IllegalArgumentException e) {
throw new UnsupportedOperationException("Illegal arguments supplied for method " + methodName, e);
} catch (InvocationTargetException e) {
throw new UnsupportedOperationException("Method threw an exception for " + methodName, e);
}
} | 3.26 |
hbase_ReflectionUtils_getOneArgStaticMethodAsFunction_rdh | /**
* Creates a Function which can be called to performantly execute a reflected static method. The
* creation of the Function itself may not be fast, but executing that method thereafter should be
* much faster than {@link #invokeMethod(Object, String, Object...)}.
*
* @param lookupClazz
* the class to find the static method in
* @param methodName
* the method name
* @param argumentClazz
* the type of the argument
* @param returnValueClass
* the type of the return value
* @return a function which when called executes the requested static method.
* @throws Throwable
* exception types from the underlying reflection
*/
public static <I, R> Function<I,
R> getOneArgStaticMethodAsFunction(Class<?> lookupClazz, String methodName, Class<I> argumentClazz, Class<R> returnValueClass) throws Throwable {
MethodHandles.Lookup lookup = MethodHandles.lookup();
MethodHandle methodHandle = lookup.findStatic(lookupClazz, methodName, MethodType.methodType(returnValueClass, argumentClazz));
CallSite site = LambdaMetafactory.metafactory(lookup, "apply", MethodType.methodType(Function.class), methodHandle.type().generic(), methodHandle, methodHandle.type());
return ((Function<I, R>) (site.getTarget().invokeExact()));
} | 3.26 |
hbase_RetryCounter_sleepUntilNextRetry_rdh | /**
* Sleep for a back off time as supplied by the backoff policy, and increases the attempts
*/
public void sleepUntilNextRetry() throws InterruptedException {int attempts = getAttemptTimes();
long
sleepTime = getBackoffTime();
LOG.trace("Sleeping {} ms before retry #{}...", sleepTime, attempts);
retryConfig.getTimeUnit().sleep(sleepTime);
useRetry();
} | 3.26 |
hbase_KeyValueTestUtil_containsIgnoreMvccVersion_rdh | /**
* Checks whether KeyValues from kvCollection2 are contained in kvCollection1. The comparison is
* made without distinguishing MVCC version of the KeyValues
*
* @return true if KeyValues from kvCollection2 are contained in kvCollection1
*/
public static boolean containsIgnoreMvccVersion(Collection<? extends Cell> kvCollection1, Collection<? extends Cell> kvCollection2) {
for (Cell kv1 : kvCollection1) {
boolean found = false;
for (Cell kv2 : kvCollection2) {
if (PrivateCellUtil.equalsIgnoreMvccVersion(kv1, kv2))
found = true;
}
if (!found)
return false;
}
return true;
} | 3.26 |
hbase_KeyValueTestUtil_toStringWithPadding_rdh | /**
* ******************* toString ***********************************
*/public static String toStringWithPadding(final Collection<? extends KeyValue> kvs, final boolean includeMeta) {
int maxRowStringLength = 0;
int maxFamilyStringLength = 0;
int maxQualifierStringLength = 0;
int maxTimestampLength = 0;
for (KeyValue kv : kvs) {
maxRowStringLength = Math.max(maxRowStringLength, getRowString(kv).length());
maxFamilyStringLength = Math.max(maxFamilyStringLength, getFamilyString(kv).length());
maxQualifierStringLength = Math.max(maxQualifierStringLength, getQualifierString(kv).length());
maxTimestampLength = Math.max(maxTimestampLength, Long.valueOf(kv.getTimestamp()).toString().length()); }
StringBuilder sb = new StringBuilder();
for (KeyValue kv : kvs) {
if (sb.length() > 0) {
sb.append("\n");
}
String row = toStringWithPadding(kv, maxRowStringLength, maxFamilyStringLength, maxQualifierStringLength, maxTimestampLength, includeMeta);
sb.append(row);
}
return sb.toString();} | 3.26 |
hbase_SizeCachedByteBufferKeyValue_getSerializedSize_rdh | /**
* Override by just returning the length for saving cost of method dispatching. If not, it will
* call {@link ExtendedCell#getSerializedSize()} firstly, then forward to
* {@link SizeCachedKeyValue#getSerializedSize(boolean)}. (See HBASE-21657)
*/
@Override
public int getSerializedSize() {return this.length; } | 3.26 |
hbase_CatalogFamilyFormat_isMergeQualifierPrefix_rdh | /**
* Returns True if the column in <code>cell</code> matches the regex 'info:merge.*'.
*/
public static boolean isMergeQualifierPrefix(Cell cell)
{
// Check to see if has family and that qualifier starts with the merge qualifier 'merge'
return CellUtil.matchingFamily(cell, HConstants.CATALOG_FAMILY) && PrivateCellUtil.qualifierStartsWith(cell, HConstants.MERGE_QUALIFIER_PREFIX);} | 3.26 |
hbase_CatalogFamilyFormat_getMergeRegions_rdh | /**
* Returns Deserialized regioninfo values taken from column values that match the regex
* 'info:merge.*' in array of <code>cells</code>.
*/
@Nullable
public static List<RegionInfo> getMergeRegions(Cell[] cells) {
Map<String, RegionInfo> mergeRegionsWithName = getMergeRegionsWithName(cells);
return mergeRegionsWithName == null ? null : new ArrayList<>(mergeRegionsWithName.values());
} | 3.26 |
hbase_CatalogFamilyFormat_getRegionInfo_rdh | /**
* Returns RegionInfo object from the column
* HConstants.CATALOG_FAMILY:HConstants.REGIONINFO_QUALIFIER of the catalog table Result.
*
* @param data
* a Result object from the catalog table scan
* @return RegionInfo or null
*/
public static RegionInfo getRegionInfo(Result data) {
return getRegionInfo(data, HConstants.REGIONINFO_QUALIFIER);
} | 3.26 |
hbase_CatalogFamilyFormat_getTableState_rdh | /**
* Decode table state from META Result. Should contain cell from HConstants.TABLE_FAMILY
*
* @return null if not found
*/
@Nullable
public static TableState getTableState(Result r) throws IOException {
Cell cell = r.getColumnLatestCell(HConstants.TABLE_FAMILY, HConstants.TABLE_STATE_QUALIFIER);
if (cell == null) {
return null;
}
try {
return TableState.parseFrom(TableName.valueOf(r.getRow()), Arrays.copyOfRange(cell.getValueArray(), cell.getValueOffset(), cell.getValueOffset() + cell.getValueLength()));
} catch (DeserializationException e) {
throw new IOException(e);
}
} | 3.26 |
hbase_CatalogFamilyFormat_parseRegionInfoFromRegionName_rdh | /**
* Returns an HRI parsed from this regionName. Not all the fields of the HRI is stored in the
* name, so the returned object should only be used for the fields in the regionName.
* <p/>
* Since the returned object does not contain all the fields, we do not expose this method in
* public API, such as {@link RegionInfo} or {@link RegionInfoBuilder}.
*/
public static RegionInfo parseRegionInfoFromRegionName(byte[] regionName) throws IOException {
byte[][] fields = RegionInfo.parseRegionName(regionName);
long regionId = Long.parseLong(Bytes.toString(fields[2]));
int replicaId = (fields.length > 3) ? Integer.parseInt(Bytes.toString(fields[3]), 16) : 0;
return RegionInfoBuilder.newBuilder(TableName.valueOf(fields[0])).setStartKey(fields[1]).setRegionId(regionId).setReplicaId(replicaId).build();
} | 3.26 |
hbase_CatalogFamilyFormat_getMetaKeyForRegion_rdh | /**
* Returns the row key to use for this regionInfo
*/
public static byte[] getMetaKeyForRegion(RegionInfo regionInfo) {
return RegionReplicaUtil.getRegionInfoForDefaultReplica(regionInfo).getRegionName();
} | 3.26 |
hbase_CatalogFamilyFormat_getRegionStateColumn_rdh | /**
* Returns the column qualifier for serialized region state
*
* @param replicaId
* the replicaId of the region
* @return a byte[] for state qualifier
*/
public static byte[] getRegionStateColumn(int replicaId) {
return replicaId == 0 ? HConstants.STATE_QUALIFIER : Bytes.toBytes((HConstants.STATE_QUALIFIER_STR + META_REPLICA_ID_DELIMITER) + String.format(RegionInfo.REPLICA_ID_FORMAT, replicaId));
} | 3.26 |
hbase_CatalogFamilyFormat_hasMergeRegions_rdh | /**
* Returns True if any merge regions present in <code>cells</code>; i.e. the column in
* <code>cell</code> matches the regex 'info:merge.*'.
*/
public static boolean hasMergeRegions(Cell[] cells) {
for (Cell v29 : cells) {
if (isMergeQualifierPrefix(v29)) {
return true;
}
}
return false;
} | 3.26 |
hbase_CatalogFamilyFormat_getServerName_rdh | /**
* Returns a {@link ServerName} from catalog table {@link Result}.
*
* @param r
* Result to pull from
* @return A ServerName instance or null if necessary fields not found or empty.
*/
@Nullable
public static ServerName getServerName(Result r, int replicaId) {
byte[] serverColumn = m0(replicaId);
Cell cell = r.getColumnLatestCell(HConstants.CATALOG_FAMILY, serverColumn);
if ((cell == null) || (cell.getValueLength() == 0)) {
return null;
}
String hostAndPort = Bytes.toString(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength());
byte[] startcodeColumn = getStartCodeColumn(replicaId);
cell = r.getColumnLatestCell(HConstants.CATALOG_FAMILY, startcodeColumn);
if ((cell == null)
|| (cell.getValueLength() == 0)) {
return null;
}
try {
return ServerName.valueOf(hostAndPort, Bytes.toLong(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()));
} catch (IllegalArgumentException
e)
{
f0.error((("Ignoring invalid region for server " + hostAndPort) + "; cell=") + cell, e);
return null;
}
} | 3.26 |
hbase_CatalogFamilyFormat_getServerNameColumn_rdh | /**
* Returns the column qualifier for serialized region state
*
* @param replicaId
* the replicaId of the region
* @return a byte[] for sn column qualifier
*/
public static byte[] getServerNameColumn(int replicaId) {
return replicaId == 0 ? HConstants.SERVERNAME_QUALIFIER : Bytes.toBytes((HConstants.SERVERNAME_QUALIFIER_STR + META_REPLICA_ID_DELIMITER) + String.format(RegionInfo.REPLICA_ID_FORMAT, replicaId));
} | 3.26 |
hbase_CatalogFamilyFormat_getStartCodeColumn_rdh | /**
* Returns the column qualifier for server start code column for replicaId
*
* @param replicaId
* the replicaId of the region
* @return a byte[] for server start code column qualifier
*/
public static byte[] getStartCodeColumn(int replicaId) {
return
replicaId == 0 ? HConstants.STARTCODE_QUALIFIER : Bytes.toBytes((HConstants.STARTCODE_QUALIFIER_STR + META_REPLICA_ID_DELIMITER) + String.format(RegionInfo.REPLICA_ID_FORMAT, replicaId));
} | 3.26 |
hbase_CatalogFamilyFormat_getRegionLocation_rdh | /**
* Returns the HRegionLocation parsed from the given meta row Result for the given regionInfo and
* replicaId. The regionInfo can be the default region info for the replica.
*
* @param r
* the meta row result
* @param regionInfo
* RegionInfo for default replica
* @param replicaId
* the replicaId for the HRegionLocation
* @return HRegionLocation parsed from the given meta row Result for the given replicaId
*/
public static HRegionLocation getRegionLocation(final Result r, final RegionInfo regionInfo, final int replicaId) {
ServerName serverName = getServerName(r, replicaId);
long seqNum = getSeqNumDuringOpen(r, replicaId);
RegionInfo
replicaInfo = RegionReplicaUtil.getRegionInfoForReplica(regionInfo, replicaId);
return new HRegionLocation(replicaInfo, serverName, seqNum);
} | 3.26 |
hbase_CatalogFamilyFormat_m0_rdh | /**
* Returns the column qualifier for server column for replicaId
*
* @param replicaId
* the replicaId of the region
* @return a byte[] for server column qualifier
*/
public static byte[] m0(int replicaId) {
return replicaId == 0 ? HConstants.SERVER_QUALIFIER : Bytes.toBytes((HConstants.SERVER_QUALIFIER_STR + META_REPLICA_ID_DELIMITER) + String.format(RegionInfo.REPLICA_ID_FORMAT, replicaId));
} | 3.26 |
hbase_CatalogFamilyFormat_getSeqNumDuringOpen_rdh | /**
* The latest seqnum that the server writing to meta observed when opening the region. E.g. the
* seqNum when the result of {@link getServerName} was written.
*
* @param r
* Result to pull the seqNum from
* @return SeqNum, or HConstants.NO_SEQNUM if there's no value written.
*/
private static long getSeqNumDuringOpen(final Result r, final int replicaId)
{
Cell cell = r.getColumnLatestCell(HConstants.CATALOG_FAMILY, getSeqNumColumn(replicaId));
if ((cell == null) || (cell.getValueLength() == 0)) {
return HConstants.NO_SEQNUM;
}
return Bytes.toLong(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength());
} | 3.26 |
hbase_CatalogFamilyFormat_parseReplicaIdFromServerColumn_rdh | /**
* Parses the replicaId from the server column qualifier. See top of the class javadoc for the
* actual meta layout
*
* @param serverColumn
* the column qualifier
* @return an int for the replicaId
*/
static int parseReplicaIdFromServerColumn(byte[] serverColumn) {
String serverStr = Bytes.toString(serverColumn);
Matcher matcher = SERVER_COLUMN_PATTERN.matcher(serverStr);
if (matcher.matches() && (matcher.groupCount() > 0)) {
String group = matcher.group(1);
if ((group !=
null) && (group.length() > 0)) {
return Integer.parseInt(group.substring(1), 16);
} else {
return 0;
}
}
return -1;
} | 3.26 |
hbase_CatalogFamilyFormat_getSeqNumColumn_rdh | /**
* Returns the column qualifier for seqNum column for replicaId
*
* @param replicaId
* the replicaId of the region
* @return a byte[] for seqNum column qualifier
*/
public static byte[] getSeqNumColumn(int replicaId) {
return replicaId == 0 ? HConstants.SEQNUM_QUALIFIER : Bytes.toBytes((HConstants.SEQNUM_QUALIFIER_STR + META_REPLICA_ID_DELIMITER) + String.format(RegionInfo.REPLICA_ID_FORMAT, replicaId));
} | 3.26 |
hbase_CatalogFamilyFormat_getMergeRegionsWithName_rdh | /**
* Returns Deserialized values of <qualifier,regioninfo> pairs taken from column values that
* match the regex 'info:merge.*' in array of <code>cells</code>.
*/@Nullable
public static Map<String, RegionInfo> getMergeRegionsWithName(Cell[] cells) {
if (cells == null) {
return null;
}Map<String, RegionInfo> regionsToMerge = null;
for (Cell cell : cells) {
if (!isMergeQualifierPrefix(cell)) {
continue;
}
// Ok. This cell is that of a info:merge* column.
RegionInfo ri = RegionInfo.parseFromOrNull(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength());
if (ri != null)
{
if (regionsToMerge == null) {
regionsToMerge = new LinkedHashMap<>();
}
regionsToMerge.put(Bytes.toString(CellUtil.cloneQualifier(cell)), ri);
}
}
return regionsToMerge;
} | 3.26 |
hbase_InnerStoreCellComparator_getInnerStoreCellComparator_rdh | /**
* Utility method that makes a guess at comparator to use based off passed tableName. Use in
* extreme when no comparator specified.
*
* @return CellComparator to use going off the {@code tableName} passed.
*/
public static CellComparator getInnerStoreCellComparator(byte[] tableName) {
return Bytes.equals(tableName, TableName.META_TABLE_NAME.toBytes()) ? MetaCellComparator.META_COMPARATOR : InnerStoreCellComparator.INNER_STORE_COMPARATOR;
} | 3.26 |
hbase_VersionInfo_getSrcChecksum_rdh | /**
* Get the checksum of the source files from which Hadoop was compiled.
*
* @return a string that uniquely identifies the source
*/
public static String getSrcChecksum() {
return Version.srcChecksum;
} | 3.26 |
hbase_VersionInfo_getVersion_rdh | /**
* Get the hbase version.
*
* @return the hbase version string, eg. "0.6.3-dev"
*/
public static String getVersion() {
return
Version.version;
} | 3.26 |
hbase_VersionInfo_getUser_rdh | /**
* The user that compiled hbase.
*
* @return the username of the user
*/
public static String getUser() {
return Version.user;
} | 3.26 |
hbase_VersionInfo_getDate_rdh | /**
* The date that hbase was compiled.
*
* @return the compilation date in unix date format
*/
public static String getDate() {
return Version.date;
} | 3.26 |
hbase_VersionInfo_getRevision_rdh | /**
* Get the subversion revision number for the root directory
*
* @return the revision number, eg. "451451"
*/
public static String getRevision() {
return Version.revision;
} | 3.26 |
hbase_VersionInfo_getUrl_rdh | /**
* Get the subversion URL for the root hbase directory.
*
* @return the url
*/
public static String getUrl() {
return Version.url;
} | 3.26 |
hbase_VersionInfo_getVersionComponents_rdh | /**
* Returns the version components as String objects Examples: "1.4.3" returns ["1", "4", "3"],
* "4.5.6-SNAPSHOT" returns ["4", "5", "6", "-1"] "4.5.6-beta" returns ["4", "5", "6", "-2"],
* "4.5.6-alpha" returns ["4", "5", "6", "-3"] "4.5.6-UNKNOW" returns ["4", "5", "6", "-4"]
*
* @return the components of the version string
*/
private static String[] getVersionComponents(final String version) {
assert version != null;
List<String> list = Splitter.onPattern("[\\.-]").splitToList(version);
String[] strComps = list.toArray(new String[list.size()]);
assert strComps.length > 0;
String[] comps = new String[strComps.length];
for (int i = 0; i < strComps.length; ++i) {
if (StringUtils.isNumeric(strComps[i])) {
comps[i] = strComps[i];
} else if (StringUtils.isEmpty(strComps[i])) {
comps[i] = String.valueOf(VERY_LARGE_NUMBER);
} else if ("SNAPSHOT".equals(strComps[i])) {
comps[i] = "-1";
} else if ("beta".equals(strComps[i])) {
comps[i] = "-2";
} else if ("alpha".equals(strComps[i])) {
comps[i] = "-3";
} else {
comps[i] = "-4";
}
}
return comps;
} | 3.26 |
hbase_CacheConfig_isInMemory_rdh | /**
* Returns true if blocks in this file should be flagged as in-memory
*/
public boolean isInMemory() {
return this.inMemory;
} | 3.26 |
hbase_CacheConfig_enableCacheOnWrite_rdh | /**
* Enable cache on write including: cacheDataOnWrite cacheIndexesOnWrite cacheBloomsOnWrite
*/
public void enableCacheOnWrite() {
this.cacheDataOnWrite = true;
this.cacheIndexesOnWrite = true;
this.cacheBloomsOnWrite = true;
} | 3.26 |
hbase_CacheConfig_shouldCacheCompressed_rdh | /**
* Returns true if this {@link BlockCategory} should be compressed in blockcache, false otherwise
*/
public boolean shouldCacheCompressed(BlockCategory category) {
switch (category) {
case DATA :
return this.cacheDataOnRead && this.cacheDataCompressed;
default :
return false;
}
} | 3.26 |
hbase_CacheConfig_shouldCacheDataOnRead_rdh | /**
* Returns whether the DATA blocks of this HFile should be cached on read or not (we always cache
* the meta blocks, the INDEX and BLOOM blocks).
*
* @return true if blocks should be cached on read, false if not
*/
public boolean shouldCacheDataOnRead() {
return
cacheDataOnRead;
} | 3.26 |
hbase_CacheConfig_setCacheDataOnWrite_rdh | /**
*
* @param cacheDataOnWrite
* whether data blocks should be written to the cache when an HFile is
* written
*/
public void setCacheDataOnWrite(boolean cacheDataOnWrite) {
this.cacheDataOnWrite = cacheDataOnWrite;
} | 3.26 |
hbase_CacheConfig_getCacheCompactedBlocksOnWriteThreshold_rdh | /**
* Returns total file size in bytes threshold for caching while writing during compaction
*/
public long getCacheCompactedBlocksOnWriteThreshold() {return this.cacheCompactedDataOnWriteThreshold;
} | 3.26 |
hbase_CacheConfig_shouldReadBlockFromCache_rdh | /**
* Return true if we may find this type of block in block cache.
* <p>
* TODO: today {@code family.isBlockCacheEnabled()} only means {@code cacheDataOnRead}, so here we
* consider lots of other configurations such as {@code cacheDataOnWrite}. We should fix this in
* the future, {@code cacheDataOnWrite} should honor the CF level {@code isBlockCacheEnabled}
* configuration.
*/
public boolean shouldReadBlockFromCache(BlockType blockType) {
if (cacheDataOnRead) {
return true;
}
if (prefetchOnOpen) {
return true;
}
if (cacheDataOnWrite) {
return true;
}
if (blockType == null) {
return true;
}if
((blockType.getCategory() == BlockCategory.BLOOM) || (blockType.getCategory() == BlockCategory.INDEX)) {
return true;
}
return false;
} | 3.26 |
hbase_CacheConfig_setEvictOnClose_rdh | /**
* Only used for testing.
*
* @param evictOnClose
* whether blocks should be evicted from the cache when an HFile reader is
* closed
*/
public void setEvictOnClose(boolean evictOnClose) {this.evictOnClose = evictOnClose;
} | 3.26 |
hbase_CacheConfig_m0_rdh | /**
* Should we cache a block of a particular category? We always cache important blocks such as
* index blocks, as long as the block cache is available.
*/
public boolean m0(BlockCategory category) {
return ((cacheDataOnRead || (category == BlockCategory.INDEX)) || (category == BlockCategory.BLOOM)) || (prefetchOnOpen && ((category != BlockCategory.META) && (category != BlockCategory.UNKNOWN)));
} | 3.26 |
hbase_CacheConfig_shouldCacheDataCompressed_rdh | /**
* Returns true if data blocks should be compressed in the cache, false if not
*/
public boolean shouldCacheDataCompressed() {
return this.cacheDataOnRead && this.cacheDataCompressed;
} | 3.26 |
hbase_CacheConfig_shouldPrefetchOnOpen_rdh | /**
* Returns true if blocks should be prefetched into the cache on open, false if not
*/
public boolean shouldPrefetchOnOpen() {
return this.prefetchOnOpen;
} | 3.26 |
hbase_CacheConfig_getBlockCache_rdh | /**
* Returns the block cache.
*
* @return the block cache, or null if caching is completely disabled
*/
public Optional<BlockCache> getBlockCache() {
return Optional.ofNullable(this.blockCache);
} | 3.26 |
hbase_CacheConfig_shouldCacheCompactedBlocksOnWrite_rdh | /**
* Returns true if blocks should be cached while writing during compaction, false if not
*/
public boolean shouldCacheCompactedBlocksOnWrite() {
return this.cacheCompactedDataOnWrite;
} | 3.26 |
hbase_FullTableBackupClient_execute_rdh | /**
* Backup request execution.
*
* @throws IOException
* if the execution of the backup fails
*/
@Override
public void execute() throws IOException {try (Admin admin = conn.getAdmin()) {
// Begin BACKUP
beginBackup(backupManager, backupInfo);
String savedStartCode;
boolean firstBackup;
// do snapshot for full table backup
savedStartCode = backupManager.readBackupStartCode();
firstBackup = (savedStartCode == null) || (Long.parseLong(savedStartCode) == 0L);
if (firstBackup) {
// This is our first backup. Let's put some marker to system table so that we can hold the
// logs while we do the backup.
backupManager.writeBackupStartCode(0L);
}
// We roll log here before we do the snapshot. It is possible there is duplicate data
// in the log that is already in the snapshot. But if we do it after the snapshot, we
// could have data loss.
// A better approach is to do the roll log on each RS in the same global procedure as
// the snapshot.
f0.info("Execute roll log procedure for full backup ...");
Map<String, String> props = new HashMap<>();
props.put("backupRoot", backupInfo.getBackupRootDir());
admin.execProcedure(LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_SIGNATURE, LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_NAME, props);
newTimestamps
= backupManager.readRegionServerLastLogRollResult();
// SNAPSHOT_TABLES:
backupInfo.setPhase(BackupPhase.SNAPSHOT);
for (TableName tableName : tableList) {
String snapshotName = (((("snapshot_" + Long.toString(EnvironmentEdgeManager.currentTime())) + "_") + tableName.getNamespaceAsString())
+ "_") + tableName.getQualifierAsString();
snapshotTable(admin, tableName, snapshotName);backupInfo.setSnapshotName(tableName, snapshotName);
}
// SNAPSHOT_COPY:
// do snapshot copy
f0.debug("snapshot copy for " + backupId);
snapshotCopy(backupInfo);
// Updates incremental backup table set
backupManager.addIncrementalBackupTableSet(backupInfo.getTables());
// BACKUP_COMPLETE:
// set overall backup status: complete. Here we make sure to complete the backup.
// After this checkpoint, even if entering cancel process, will let the backup finished
backupInfo.setState(BackupState.COMPLETE);
// The table list in backupInfo is good for both full backup and incremental backup.
// For incremental backup, it contains the incremental backup table set.
backupManager.writeRegionServerLogTimestamp(backupInfo.getTables(), newTimestamps);
Map<TableName, Map<String, Long>> newTableSetTimestampMap = backupManager.readLogTimestampMap();
backupInfo.setTableSetTimestampMap(newTableSetTimestampMap);
Long newStartCode = BackupUtils.getMinValue(BackupUtils.getRSLogTimestampMins(newTableSetTimestampMap));
backupManager.writeBackupStartCode(newStartCode);
// backup complete
completeBackup(conn, backupInfo, backupManager, BackupType.FULL, conf);
} catch (Exception e) {
failBackup(conn, backupInfo, backupManager, e, "Unexpected BackupException : ",
BackupType.FULL, conf);
throw new IOException(e);
}
} | 3.26 |
hbase_FullTableBackupClient_snapshotCopy_rdh | /**
* Do snapshot copy.
*
* @param backupInfo
* backup info
* @throws Exception
* exception
*/
protected void snapshotCopy(BackupInfo backupInfo) throws Exception {
f0.info("Snapshot copy is starting.");
// set overall backup phase: snapshot_copy
backupInfo.setPhase(BackupPhase.SNAPSHOTCOPY);
// call ExportSnapshot to copy files based on hbase snapshot for backup
// ExportSnapshot only support single snapshot export, need loop for multiple tables case
BackupCopyJob v0 = BackupRestoreFactory.getBackupCopyJob(conf);
// number of snapshots matches number of tables
float numOfSnapshots = backupInfo.getSnapshotNames().size();
f0.debug(("There are " + ((int)
(numOfSnapshots))) + " snapshots to be copied.");
for (TableName table : backupInfo.getTables()) {
// Currently we simply set the sub copy tasks by counting the table snapshot number, we can
// calculate the real files' size for the percentage in the future.
// backupCopier.setSubTaskPercntgInWholeTask(1f / numOfSnapshots);
int res;
ArrayList<String> argsList = new ArrayList<>();
argsList.add("-snapshot");
argsList.add(backupInfo.getSnapshotName(table));
argsList.add("-copy-to");
argsList.add(backupInfo.getTableBackupDir(table));
if (backupInfo.getBandwidth() > (-1)) {argsList.add("-bandwidth");
argsList.add(String.valueOf(backupInfo.getBandwidth()));
}
if (backupInfo.getWorkers() > (-1)) {argsList.add("-mappers");
argsList.add(String.valueOf(backupInfo.getWorkers()));
}
String[] args = argsList.toArray(new String[0]);
String jobname = (("Full-Backup_" + backupInfo.getBackupId()) + "_") + table.getNameAsString();
if (f0.isDebugEnabled()) {
f0.debug("Setting snapshot copy job name to : " + jobname);
}conf.set(JOB_NAME_CONF_KEY, jobname);
f0.debug((("Copy snapshot " + args[1]) + " to ") +
args[3]);
res = v0.copy(backupInfo, backupManager, conf, BackupType.FULL, args);
// if one snapshot export failed, do not continue for remained snapshots
if (res != 0) {
f0.error(((("Exporting Snapshot " + args[1]) + " failed with return code: ") + res) + ".");
throw new IOException((((("Failed of exporting snapshot " + args[1]) + " to ") + args[3]) + " with reason code ") + res);
}
conf.unset(JOB_NAME_CONF_KEY);
f0.info(("Snapshot copy " + args[1])
+ " finished.");
}
} | 3.26 |
hbase_KeyValue_getTypeByte_rdh | /**
* Return the KeyValue.TYPE byte representation
*/
byte getTypeByte(int keyLength) {return this.bytes[((this.offset + keyLength)
- 1) + ROW_OFFSET];
} | 3.26 |
hbase_KeyValue_toString_rdh | // ---------------------------------------------------------------------------
//
// String representation
//
// ---------------------------------------------------------------------------
@Override
public String
toString() {
if ((this.bytes == null) || (this.bytes.length == 0)) {
return "empty";
} return (((keyToString(this.bytes, this.offset + ROW_OFFSET, getKeyLength()) + "/vlen=") + getValueLength()) + "/seqid=") + seqId;
} | 3.26 |
hbase_KeyValue_getSequenceId_rdh | /**
* Here be dragons *
*/
/**
* used to achieve atomic operations in the memstore.
*/
@Override
public long getSequenceId() {
return seqId;
} | 3.26 |
hbase_KeyValue_getDelimiterInReverse_rdh | /**
* Find index of passed delimiter walking from end of buffer backwards.
*
* @param b
* the kv serialized byte[] to process
* @param offset
* the offset in the byte[]
* @param length
* the length in the byte[]
* @param delimiter
* input delimeter to fetch index from end
* @return Index of delimiter
*/
public static int getDelimiterInReverse(final byte[] b, final int offset, final int length, final int delimiter) {
if (b == null)
{
throw new IllegalArgumentException("Passed buffer is null");
}
int result = -1;
for (int i = (offset + length) - 1; i >= offset; i--) {
if (b[i] ==
delimiter) {
result = i;
break;
}
}
return result;
}
/**
* A {@link KVComparator} for <code>hbase:meta</code> catalog table {@link KeyValue}s.
*
* @deprecated : {@link MetaCellComparator#META_COMPARATOR} | 3.26 |
hbase_KeyValue_getKeyOffset_rdh | /**
* Returns Key offset in backing buffer..
*/
public int getKeyOffset() {
return this.offset + ROW_OFFSET;
} | 3.26 |
hbase_KeyValue_isValidType_rdh | /**
* True to indicate that the byte b is a valid type.
*
* @param b
* byte to check
* @return true or false
*/static boolean isValidType(byte b) {
return codeArray[b & 0xff] != null;
} | 3.26 |
hbase_KeyValue_getQualifierLength_rdh | /**
* Returns Qualifier length
*/
int getQualifierLength(int keyLength, int rlength, int flength) {
return keyLength - ((int) (getKeyDataStructureSize(rlength, flength, 0)));
} | 3.26 |
hbase_KeyValue_isLatestTimestamp_rdh | /**
* Returns True if this KeyValue has a LATEST_TIMESTAMP timestamp.
*/
public boolean isLatestTimestamp() {
return Bytes.equals(getBuffer(), getTimestampOffset(), Bytes.SIZEOF_LONG, HConstants.LATEST_TIMESTAMP_BYTES, 0, Bytes.SIZEOF_LONG);
} | 3.26 |
hbase_KeyValue_createByteArray_rdh | /**
*
* @param qualifier
* can be a ByteBuffer or a byte[], or null.
* @param value
* can be a ByteBuffer or a byte[], or null.
*/
private static byte[] createByteArray(final byte[] row, final int roffset, final int rlength, final byte[] family, final int foffset, int flength, final Object qualifier, final int qoffset, int qlength, final long timestamp, final Type type, final Object value, final int voffset, int vlength, List<Tag> tags) {
checkParameters(row, rlength, family, flength, qlength, vlength);
// Calculate length of tags area
int tagsLength =
0;
if ((tags != null) && (!tags.isEmpty())) {
for (Tag t : tags) {
tagsLength += t.getValueLength() + Tag.INFRASTRUCTURE_SIZE;
}
}
RawCell.checkForTagsLength(tagsLength);
// Allocate right-sized byte array.
int keyLength = ((int) (getKeyDataStructureSize(rlength, flength, qlength)));
byte[] bytes =
new byte[((int) (getKeyValueDataStructureSize(rlength, flength, qlength, vlength, tagsLength)))];
// Write key, value and key row length.
int pos = 0;
pos = Bytes.putInt(bytes, pos, keyLength);
pos = Bytes.putInt(bytes, pos, vlength);
pos = Bytes.putShort(bytes, pos, ((short) (rlength & 0xffff)));
pos = Bytes.putBytes(bytes,
pos, row, roffset, rlength);
pos = Bytes.putByte(bytes, pos, ((byte) (flength &
0xff)));if (flength != 0) {
pos = Bytes.putBytes(bytes, pos,
family, foffset, flength);
}
if (qlength > 0) {
if (qualifier instanceof ByteBuffer) {
pos = Bytes.putByteBuffer(bytes, pos, ((ByteBuffer) (qualifier)));
} else {
pos = Bytes.putBytes(bytes, pos, ((byte[]) (qualifier)), qoffset, qlength);
}
}
pos = Bytes.putLong(bytes, pos, timestamp);
pos = Bytes.putByte(bytes, pos, type.getCode());
if (vlength > 0) {
if (value instanceof ByteBuffer) {
pos = Bytes.putByteBuffer(bytes, pos, ((ByteBuffer) (value)));
} else {
pos = Bytes.putBytes(bytes, pos, ((byte[]) (value)), voffset, vlength);
}
}
// Add the tags after the value part
if (tagsLength > 0) {
pos = Bytes.putAsShort(bytes, pos, tagsLength);
for (Tag v22 : tags)
{
int tlen = v22.getValueLength();pos = Bytes.putAsShort(bytes, pos, tlen + Tag.TYPE_LENGTH_SIZE);
pos = Bytes.putByte(bytes, pos, v22.getType());
Tag.copyValueTo(v22, bytes, pos);
pos += tlen;
}}
return bytes;
} | 3.26 |
hbase_KeyValue_m4_rdh | /**
* Update the timestamp.
*
* @param now
* Time to set into <code>this</code> IFF timestamp ==
* {@link HConstants#LATEST_TIMESTAMP} (else, its a noop).
* @return True is we modified this.
*/
public boolean m4(final byte[] now) {if (this.isLatestTimestamp()) {
int tsOffset = getTimestampOffset();
System.arraycopy(now, 0, this.bytes, tsOffset, Bytes.SIZEOF_LONG);
// clear cache or else getTimestamp() possibly returns an old value
return true;
}
return false;
} | 3.26 |
hbase_KeyValue_compareFlatKey_rdh | /**
* Compares left to right assuming that left,loffset,llength and right,roffset,rlength are full
* KVs laid out in a flat byte[]s.
*
* @param left
* the left kv serialized byte[] to be compared with
* @param loffset
* the offset in the left byte[]
* @param llength
* the length in the left byte[]
* @param right
* the right kv serialized byte[] to be compared with
* @param roffset
* the offset in the right byte[]
* @param rlength
* the length in the right byte[]
* @return 0 if equal, <0 if left smaller, >0 if right smaller
*/
public int compareFlatKey(byte[] left, int loffset, int llength, byte[] right, int roffset, int rlength) {
// Compare row
short lrowlength = Bytes.toShort(left, loffset);
short rrowlength = Bytes.toShort(right, roffset);
int compare = compareRows(left, loffset + Bytes.SIZEOF_SHORT, lrowlength, right, roffset +
Bytes.SIZEOF_SHORT, rrowlength);
if (compare != 0) {
return compare;
}
// Compare the rest of the two KVs without making any assumptions about
// the common prefix. This function will not compare rows anyway, so we
// don't need to tell it that the common prefix includes the row.
return compareWithoutRow(0, left, loffset, llength, right, roffset, rlength, rrowlength);
} | 3.26 |
hbase_KeyValue_matchingRowColumn_rdh | /**
* Compares the row and column of two keyvalues for equality
*
* @param left
* left cell to compare row and column
* @param right
* right cell to compare row and column
* @return True if same row and column.
*/
public boolean matchingRowColumn(final Cell left, final Cell right) {
short lrowlength = left.getRowLength();
short rrowlength = right.getRowLength();
// TsOffset = end of column data. just comparing Row+CF length of each
if (((left.getRowLength() + left.getFamilyLength()) + left.getQualifierLength()) != ((right.getRowLength() + right.getFamilyLength()) + right.getQualifierLength())) {
return false;
}
if (!matchingRows(left, lrowlength, right, rrowlength)) {
return false;
}int lfoffset = left.getFamilyOffset();
int rfoffset = right.getFamilyOffset();
int lclength = left.getQualifierLength();
int rclength = right.getQualifierLength();
int lfamilylength = left.getFamilyLength();
int rfamilylength
= right.getFamilyLength();
int diff = compareFamilies(left.getFamilyArray(), lfoffset, lfamilylength, right.getFamilyArray(), rfoffset, rfamilylength);
if (diff != 0) {
return false;
} else {
diff = compareColumns(left.getQualifierArray(), left.getQualifierOffset(), lclength, right.getQualifierArray(), right.getQualifierOffset(), rclength);
return diff == 0;
}
} | 3.26 |
hbase_KeyValue_getRowArray_rdh | /**
* Returns the backing array of the entire KeyValue (all KeyValue fields are in a single array)
*/
@Override
public byte[] getRowArray() {
return bytes;
} | 3.26 |
hbase_KeyValue_compareRows_rdh | /**
* Get the b[],o,l for left and right rowkey portions and compare.
*
* @param left
* the left kv serialized byte[] to be compared with
* @param loffset
* the offset in the left byte[]
* @param llength
* the length in the left byte[]
* @param right
* the right kv serialized byte[] to be compared with
* @param roffset
* the offset in the right byte[]
* @param rlength
* the length in the right byte[]
* @return 0 if equal, <0 if left smaller, >0 if right smaller
*/
public int compareRows(byte[] left, int loffset, int llength, byte[] right, int roffset, int rlength) {
return Bytes.compareTo(left, loffset, llength, right, roffset, rlength);
} | 3.26 |
hbase_KeyValue_getValueArray_rdh | /**
* Returns the backing array of the entire KeyValue (all KeyValue fields are in a single array)
*/
@Override
public byte[] getValueArray() {
return bytes;
} | 3.26 |
hbase_KeyValue_getTimestampOffset_rdh | /**
* Return the timestamp offset
*/
private int getTimestampOffset(final int keylength) {
return (getKeyOffset() + keylength) - TIMESTAMP_TYPE_SIZE;
} | 3.26 |
hbase_KeyValue_getDelimiter_rdh | /**
* Find index of passed delimiter walking from start of buffer forwards.
*
* @param b
* the kv serialized byte[] to process
* @param delimiter
* input delimeter to fetch index from start
* @return Index of delimiter having started from start of <code>b</code> moving rightward.
*/
public static int getDelimiter(final byte[] b, int offset, final int length, final int delimiter) {
if (b == null) {
throw
new IllegalArgumentException("Passed buffer is null");
}
int result = -1;
for (int i = offset; i < (length + offset); i++) {
if (b[i] == delimiter) {
result = i;
break;
}}
return result;
} | 3.26 |
hbase_KeyValue_getRowOffset_rdh | /**
* Returns Row offset
*/
@Override
public int getRowOffset() {
return this.offset + ROW_KEY_OFFSET;
} | 3.26 |
hbase_KeyValue_compareIgnoringPrefix_rdh | /**
* Overridden
*
* @param commonPrefix
* location of expected common prefix
* @param left
* the left kv serialized byte[] to be compared with
* @param loffset
* the offset in the left byte[]
* @param llength
* the length in the left byte[]
* @param right
* the right kv serialized byte[] to be compared with
* @param roffset
* the offset in the byte[]
* @param rlength
* the length in the right byte[]
* @return 0 if equal, <0 if left smaller, >0 if right smaller
*/
// SamePrefixComparator
@Override
public int compareIgnoringPrefix(int commonPrefix, byte[]
left, int loffset, int llength, byte[] right, int
roffset, int rlength) {
// Compare row
short lrowlength = Bytes.toShort(left, loffset);
short v79;
int comparisonResult = 0;
if (commonPrefix < ROW_LENGTH_SIZE) {
// almost nothing in common
v79 = Bytes.toShort(right, roffset);
comparisonResult = compareRows(left, loffset + ROW_LENGTH_SIZE, lrowlength, right,
roffset + ROW_LENGTH_SIZE, v79);
} else {
// the row length is the same
v79 = lrowlength;
if (commonPrefix < (ROW_LENGTH_SIZE + v79)) {
// The rows are not the same. Exclude the common prefix and compare
// the rest of the two rows.
int common = commonPrefix - ROW_LENGTH_SIZE;
comparisonResult = compareRows(left, (loffset + common) + ROW_LENGTH_SIZE, lrowlength
-
common, right, (roffset + common) + ROW_LENGTH_SIZE, v79 - common);
}
}if (comparisonResult != 0) {
return comparisonResult;
}
assert lrowlength == v79;
return compareWithoutRow(commonPrefix, left, loffset, llength, right, roffset, rlength, lrowlength);
} | 3.26 |
hbase_KeyValue_getLength_rdh | // ---------------------------------------------------------------------------
//
// Length and Offset Calculators
//
// ---------------------------------------------------------------------------
/**
* Determines the total length of the KeyValue stored in the specified byte array and offset.
* Includes all headers.
*
* @param bytes
* byte array
* @param offset
* offset to start of the KeyValue
* @return length of entire KeyValue, in bytes
*/
private static int getLength(byte[] bytes, int offset) {
int klength = ROW_OFFSET + Bytes.toInt(bytes, offset);
int vlength = Bytes.toInt(bytes, offset + Bytes.SIZEOF_INT);
return klength + vlength;
} | 3.26 |
hbase_KeyValue_getValueOffset_rdh | /**
* Returns the value offset
*/
@Override
public int getValueOffset() {
int voffset = getKeyOffset() + getKeyLength();
return voffset;
} | 3.26 |
hbase_KeyValue_hashCode_rdh | /**
* In line with {@link #equals(Object)}, only uses the key portion, not the value.
*/@Override
public int hashCode() {
return calculateHashForKey(this);
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.