name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hbase_ServerCommandLine_logHBaseConfigs_rdh | /**
* Print into log some of the important hbase attributes.
*/
private static void logHBaseConfigs(Configuration conf) {
final String[] keys = new String[]{ // Expand this list as you see fit.
"hbase.tmp.dir", HConstants.HBASE_DIR, HConstants.CLUSTER_DISTRIBUTED, HConstants.ZOOKEEPER_QUORUM };
for (String key : keys) {
LOG.info((key + ": ") + conf.get(key));
}
} | 3.26 |
hbase_ServerCommandLine_logJVMInfo_rdh | /**
* Log information about the currently running JVM.
*/
public static void logJVMInfo() {
// Print out vm stats before starting up.
RuntimeMXBean runtime = ManagementFactory.getRuntimeMXBean();
if (runtime != null) {
LOG.info((((("vmName=" + runtime.getVmName()) +
", vmVendor=")
+
runtime.getVmVendor()) + ", vmVersion=") + runtime.getVmVersion());
LOG.info("vmInputArguments=" + runtime.getInputArguments());
}
} | 3.26 |
hbase_BrokenStoreFileCleaner_isCompactedFile_rdh | // Compacted files can still have readers and are cleaned by a separate chore, so they have to
// be skipped here
private boolean isCompactedFile(FileStatus file, HStore store) {
return store.getStoreEngine().getStoreFileManager().getCompactedfiles().stream().anyMatch(sf -> sf.getPath().equals(file.getPath()));
} | 3.26 |
hbase_NamespacesInstanceResource_post_rdh | /**
* Build a response for POST create namespace with properties specified.
*
* @param model
* properties used for create.
* @param uriInfo
* (JAX-RS context variable) request URL
* @return response code.
*/
@POST
@Consumes({ MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, MIMETYPE_PROTOBUF_IETF })
public Response post(final NamespacesInstanceModel model, @Context
final UriInfo uriInfo) {
return
processUpdate(model, false, uriInfo);
} | 3.26 |
hbase_NamespacesInstanceResource_createOrUpdate_rdh | // Do the actual namespace create or alter.
private Response createOrUpdate(final NamespacesInstanceModel model, final UriInfo uriInfo, final Admin admin, final boolean updateExisting) {
NamespaceDescriptor.Builder builder = NamespaceDescriptor.create(namespace);
builder.addConfiguration(model.getProperties());
if (model.getProperties().size() > 0) {
builder.addConfiguration(model.getProperties());
}
NamespaceDescriptor nsd = builder.build();
try {
if (updateExisting) {admin.modifyNamespace(nsd);
} else {
admin.createNamespace(nsd);
}
} catch (IOException e) {servlet.getMetrics().incrementFailedPutRequests(1);
return processException(e);
}
servlet.getMetrics().incrementSucessfulPutRequests(1);
return updateExisting ? Response.ok(uriInfo.getAbsolutePath()).build() :
Response.created(uriInfo.getAbsolutePath()).build();
} | 3.26 |
hbase_NamespacesInstanceResource_deleteNoBody_rdh | /**
* Build a response for DELETE delete namespace.
*
* @param message
* value not used.
* @param headers
* value not used.
* @return response code.
*/
@DELETE
public Response deleteNoBody(final byte[] message, @Context
final UriInfo uriInfo, @Context
final HttpHeaders headers) {
if (LOG.isTraceEnabled()) {
LOG.trace("DELETE " + uriInfo.getAbsolutePath());
}
if (servlet.isReadOnly()) {
servlet.getMetrics().incrementFailedDeleteRequests(1);
return Response.status(Status.FORBIDDEN).type(MIMETYPE_TEXT).entity("Forbidden" + CRLF).build();
}
try {
Admin admin = servlet.getAdmin();
if (!doesNamespaceExist(admin, namespace)) {
return Response.status(Status.NOT_FOUND).type(MIMETYPE_TEXT).entity((("Namespace '" + namespace) + "' does not exists. Cannot ") + "drop namespace.").build();
}
admin.deleteNamespace(namespace);
servlet.getMetrics().incrementSucessfulDeleteRequests(1);
return Response.ok().build();
} catch (IOException e) {
servlet.getMetrics().incrementFailedDeleteRequests(1);
return processException(e);
}
} | 3.26 |
hbase_NamespacesInstanceResource_processUpdate_rdh | // Check that POST or PUT is valid and then update namespace.
private Response processUpdate(NamespacesInstanceModel model, final boolean updateExisting, final UriInfo uriInfo) {
if (LOG.isTraceEnabled()) {
LOG.trace((updateExisting ? "PUT " : "POST ") + uriInfo.getAbsolutePath());
}
if (model == null) {
try {
model = new NamespacesInstanceModel(namespace);
} catch (IOException ioe) {
servlet.getMetrics().incrementFailedPutRequests(1);
throw new RuntimeException(("Cannot retrieve info for '" + namespace) + "'.");}
}
servlet.getMetrics().incrementRequests(1);
if (servlet.isReadOnly()) {
servlet.getMetrics().incrementFailedPutRequests(1);
return Response.status(Status.FORBIDDEN).type(MIMETYPE_TEXT).entity("Forbidden" +
CRLF).build();
}
Admin admin = null;
boolean namespaceExists = false;
try {
admin = servlet.getAdmin();
namespaceExists = doesNamespaceExist(admin, namespace);
} catch (IOException e) {
servlet.getMetrics().incrementFailedPutRequests(1);
return processException(e);
}
// Do not allow creation if namespace already exists.
if ((!updateExisting) && namespaceExists) {
servlet.getMetrics().incrementFailedPutRequests(1);
return Response.status(Status.FORBIDDEN).type(MIMETYPE_TEXT).entity((("Namespace '" + namespace) + "' already exists. Use REST PUT ") + "to alter the existing namespace.").build();
}
// Do not allow altering if namespace does not exist.
if (updateExisting && (!namespaceExists)) {
servlet.getMetrics().incrementFailedPutRequests(1);
return Response.status(Status.FORBIDDEN).type(MIMETYPE_TEXT).entity((("Namespace '" + namespace) + "' does not exist. Use ") + "REST POST to create the namespace.").build();
}
return createOrUpdate(model, uriInfo, admin, updateExisting);
} | 3.26 |
hbase_NamespacesInstanceResource_getNamespaceInstanceResource_rdh | /**
* Dispatch to NamespaceInstanceResource for getting list of tables.
*/
@Path("tables")
public NamespacesInstanceResource getNamespaceInstanceResource(@PathParam("tables")
final String namespace) throws IOException {
return new NamespacesInstanceResource(this.namespace, true);
} | 3.26 |
hbase_CachedClusterId_setClusterId_rdh | /**
* Succeeds only once, when setting to a non-null value. Overwrites are not allowed.
*/
private void setClusterId(ClusterId id) {
if ((id == null) || isClusterIdSet.get()) {
return;
}
clusterId = id;
isClusterIdSet.set(true);
} | 3.26 |
hbase_CachedClusterId_getClusterId_rdh | /**
* Returns a cached copy of the cluster ID. null if the cache is not populated.
*/
private String getClusterId() {
if (!isClusterIdSet.get()) {
return null;
}
// It is ok to read without a lock since clusterId is immutable once set.
return clusterId.toString();
} | 3.26 |
hbase_HFileArchiveUtil_getTableArchivePath_rdh | /**
* Get the path to the table archive directory based on the configured archive directory.
* <p>
* Assumed that the table should already be archived.
*
* @param conf
* {@link Configuration} to read the archive directory property. Can be null
* @param tableName
* Name of the table to be archived. Cannot be null.
* @return {@link Path} to the archive directory for the table
*/
public static Path getTableArchivePath(final Configuration conf, final TableName tableName) throws IOException {
return CommonFSUtils.getTableDir(getArchivePath(conf), tableName);
} | 3.26 |
hbase_HFileArchiveUtil_getTableName_rdh | /* @return table name given archive file path */
public static TableName getTableName(Path archivePath) {
Path p = archivePath;String tbl = null;
// namespace is the 4th parent of file
for (int i = 0; i < 5; i++) {
if (p == null)
return null;
if (i == 3)
tbl = p.getName();
p = p.getParent();
}
if (p == null)
return null;
return TableName.valueOf(p.getName(), tbl);
} | 3.26 |
hbase_HFileArchiveUtil_getArchivePath_rdh | /**
* Get the full path to the archive directory on the configured
* {@link org.apache.hadoop.hbase.master.MasterFileSystem}
*
* @param conf
* to look for archive directory name and root directory. Cannot be null. Notes for
* testing: requires a FileSystem root directory to be specified.
* @return the full {@link Path} to the archive directory, as defined by the configuration
* @throws IOException
* if an unexpected error occurs
*/
public static Path getArchivePath(Configuration conf) throws IOException {
return getArchivePath(CommonFSUtils.getRootDir(conf));
}
/**
* Get the full path to the archive directory on the configured
* {@link org.apache.hadoop.hbase.master.MasterFileSystem}
*
* @param rootdir
* {@link Path} to the root directory where hbase files are stored (for building
* the archive path)
* @return the full {@link Path} | 3.26 |
hbase_ShutdownHook_install_rdh | /**
* Install a shutdown hook that calls stop on the passed Stoppable and then thread joins against
* the passed <code>threadToJoin</code>. When this thread completes, it then runs the hdfs thread
* (This install removes the hdfs shutdown hook keeping a handle on it to run it after
* <code>threadToJoin</code> has stopped).
* <p>
* To suppress all shutdown hook handling -- both the running of the regionserver hook and of the
* hdfs hook code -- set {@link ShutdownHook#RUN_SHUTDOWN_HOOK} in {@link Configuration} to
* <code>false</code>. This configuration value is checked when the hook code runs.
*
* @param fs
* Instance of Filesystem used by the RegionServer
* @param stop
* Installed shutdown hook will call stop against this passed
* <code>Stoppable</code> instance.
* @param threadToJoin
* After calling stop on <code>stop</code> will then join this thread.
*/
public static void install(final Configuration conf, final FileSystem fs, final Stoppable stop, final Thread threadToJoin)
{
Runnable fsShutdownHook = suppressHdfsShutdownHook(fs);
Thread t = new ShutdownHookThread(conf, stop, threadToJoin, fsShutdownHook);
ShutdownHookManager.affixShutdownHook(t, 0);
LOG.debug("Installed shutdown hook thread: " + t.getName());
} | 3.26 |
hbase_ShutdownHook_main_rdh | /**
* Main to test basic functionality. Run with clean hadoop 0.20 and hadoop 0.21 and cloudera
* patched hadoop to make sure our shutdown hook handling works for all compbinations. Pass
* '-Dhbase.shutdown.hook=false' to test turning off the running of shutdown hooks.
*/
public static void main(final String[]
args) throws IOException
{
Configuration conf = HBaseConfiguration.create();
String prop = System.getProperty(RUN_SHUTDOWN_HOOK);
if (prop != null) {
conf.setBoolean(RUN_SHUTDOWN_HOOK, Boolean.parseBoolean(prop));
}
// Instantiate a FileSystem. This will register the fs shutdown hook.
FileSystem fs = FileSystem.get(conf);
Thread donothing = new DoNothingThread();
donothing.start();
ShutdownHook.install(conf, fs, new DoNothingStoppable(), donothing);
} | 3.26 |
hbase_ReplicationSyncUp_listRegionServers_rdh | // Find region servers under wal directory
// Here we only care about the region servers which may still be alive, as we need to add
// replications for them if missing. The dead region servers which have already been processed
// fully do not need to add their replication queues again, as the operation has already been done
// in SCP.
private Set<ServerName> listRegionServers(FileSystem walFs, Path walDir) throws IOException {
FileStatus[] statuses;
try {
statuses = walFs.listStatus(walDir);
} catch (FileNotFoundException e) {
System.out.println(("WAL directory " + walDir) + " does not exists, ignore");
return Collections.emptySet();
}
Set<ServerName> regionServers = new HashSet<>();
for (FileStatus status : statuses) {
// All wal files under the walDir is within its region server's directory
if (!status.isDirectory()) {
continue;
}
ServerName sn = AbstractFSWALProvider.getServerNameFromWALDirectoryName(status.getPath());
if (sn != null) {
regionServers.add(sn);
}}
return regionServers;
} | 3.26 |
hbase_ReplicationSyncUp_claimReplicationQueues_rdh | // When using this tool, usually the source cluster is unhealthy, so we should try to claim the
// replication queues for the dead region servers first and then replicate the data out.
private void claimReplicationQueues(ReplicationSourceManager mgr, Set<ServerName> regionServers) throws ReplicationException, KeeperException, IOException {
// union the region servers from both places, i.e, from the wal directory, and the records in
// replication queue storage.
Set<ServerName> replicators = new HashSet<>(regionServers);
ReplicationQueueStorage queueStorage = mgr.getQueueStorage();
replicators.addAll(queueStorage.listAllReplicators());
FileSystem fs = CommonFSUtils.getCurrentFileSystem(getConf()); Path infoDir = new Path(CommonFSUtils.getRootDir(getConf()), f1);
for (ServerName sn : replicators) {
List<ReplicationQueueId> replicationQueues = queueStorage.listAllQueueIds(sn);
System.out.println((sn + " is dead, claim its replication queues: ") + replicationQueues);
// record the rs name, so when master restarting, we will skip claiming its replication queue
fs.createNewFile(new Path(infoDir, sn.getServerName()));
for (ReplicationQueueId queueId : replicationQueues) {
mgr.claimQueue(queueId, true);
}
}
} | 3.26 |
hbase_ReplicationSyncUp_main_rdh | /**
* Main program
*/
public static void main(String[] args) throws Exception {
int ret = ToolRunner.run(HBaseConfiguration.create(), new ReplicationSyncUp(), args);
System.exit(ret);
} | 3.26 |
hbase_AbstractProtobufWALReader_getCodecClsName_rdh | /**
* Returns the cell codec classname
*/
public String getCodecClsName() {return
codecClsName;
} | 3.26 |
hbase_AbstractProtobufWALReader_isWALTrailer_rdh | /**
* This is used to determine whether we have already reached the WALTrailer. As the size and magic
* are at the end of the WAL file, it is possible that these two options are missing while
* writing, so we will consider there is no trailer. And when we actually reach the WALTrailer, we
* will try to decode it as WALKey and we will fail but the error could be vary as it is parsing
* WALTrailer actually.
*
* @return whether this is a WALTrailer and we should throw EOF to upper layer the file is done
*/
protected final boolean isWALTrailer(long startPosition)
throws IOException {
// We have nothing in the WALTrailer PB message now so its size is just a int length size and a
// magic at the end
int trailerSize = PB_WAL_COMPLETE_MAGIC.length + Bytes.SIZEOF_INT;
if ((fileLength - startPosition) >= trailerSize) {
// We still have more than trailerSize bytes before reaching the EOF so this is not a trailer.
// We also test for == here because if this is a valid trailer, we can read it while opening
// the reader so we should not reach here
return false;
}
inputStream.seek(startPosition);
for (int i = 0; i < 4; i++) {
int r = inputStream.read();
if (r == (-1)) {
// we have reached EOF while reading the length, and all bytes read are 0, so we assume this
// is a partial trailer
return true;
}if (r != 0) {
// the length is not 0, should not be a trailer
return false;
}
}
for (int i = 0; i < PB_WAL_COMPLETE_MAGIC.length; i++) {
int r = inputStream.read();
if (r == (-1)) {
// we have reached EOF while reading the magic, and all bytes read are matched, so we assume
// this is a partial trailer
return true;
}
if (r != (PB_WAL_COMPLETE_MAGIC[i] & 0xff)) {
// does not match magic, should not be a trailer
return false;
}
}
// in fact we should not reach here, as this means the trailer bytes are all matched and
// complete, then we should not call this method...
return true;
} | 3.26 |
hbase_AbstractProtobufWALReader_getWriterClsNames_rdh | /**
* Returns names of the accepted writer classes
*/
public List<String> getWriterClsNames() {
return WRITER_CLS_NAMES;
} | 3.26 |
hbase_AbstractProtobufWALReader_setTrailerIfPresent_rdh | /**
* To check whether a trailer is present in a WAL, it seeks to position (fileLength -
* PB_WAL_COMPLETE_MAGIC.size() - Bytes.SIZEOF_INT). It reads the int value to know the size of
* the trailer, and checks whether the trailer is present at the end or not by comparing the last
* PB_WAL_COMPLETE_MAGIC.size() bytes. In case trailer is not present, it returns false;
* otherwise, sets the trailer and sets this.walEditsStopOffset variable up to the point just
* before the trailer.
* <p/>
* The trailer is ignored in case:
* <ul>
* <li>fileLength is 0 or not correct (when file is under recovery, etc).
* <li>the trailer size is negative.
* </ul>
* In case the trailer size > this.trailerMaxSize, it is read after a WARN message.
*
* @return true if a valid trailer is present
*/
private boolean setTrailerIfPresent(FSDataInputStream stream) throws IOException {
try {
long trailerSizeOffset = this.fileLength - (PB_WAL_COMPLETE_MAGIC.length + Bytes.SIZEOF_INT);
if (trailerSizeOffset <= 0) {
// no trailer possible.
return false;
}
stream.seek(trailerSizeOffset);
// read the int as trailer size.
int trailerSize = stream.readInt();
ByteBuffer buf = ByteBuffer.allocate(PB_WAL_COMPLETE_MAGIC.length);
stream.readFully(buf.array(), buf.arrayOffset(), buf.capacity());
if (!Arrays.equals(buf.array(), PB_WAL_COMPLETE_MAGIC)) {
LOG.trace("No trailer found.");
return false;
}
if (trailerSize < 0) {
LOG.warn(("Invalid trailer Size " + trailerSize) + ", ignoring the trailer");
return false;
} else if (trailerSize > this.trailerWarnSize) {
// continue reading after warning the user.
LOG.warn((("Please investigate WALTrailer usage. Trailer size > maximum configured size : " + trailerSize) + " > ") + this.trailerWarnSize);
}
// seek to the position where trailer starts.
long positionOfTrailer = trailerSizeOffset - trailerSize;
stream.seek(positionOfTrailer);
// read the trailer.
buf = ByteBuffer.allocate(trailerSize);// for trailer.
stream.readFully(buf.array(), buf.arrayOffset(), buf.capacity());
trailer = WALTrailer.parseFrom(buf.array());
this.walEditsStopOffset = positionOfTrailer;
return true;
} catch (IOException ioe) {
LOG.warn("Got IOE while reading the trailer. Continuing as if no trailer is present.", ioe);
}
return false;
} | 3.26 |
hbase_MiniBatchOperationInProgress_m0_rdh | /**
* Returns The number of operations(Mutations) involved in this batch.
*/
public int m0()
{
return this.lastIndexExclusive - this.firstIndex;
} | 3.26 |
hbase_MiniBatchOperationInProgress_getOperation_rdh | /**
* Returns The operation(Mutation) at the specified position.
*/
public T getOperation(int index) {
return operations[getAbsoluteIndex(index)];
} | 3.26 |
hbase_MiniBatchOperationInProgress_getOperationStatus_rdh | /**
* Returns Gets the status code for the operation(Mutation) at the specified position.
*/
public OperationStatus getOperationStatus(int index) {
return this.retCodeDetails[getAbsoluteIndex(index)];
} | 3.26 |
hbase_MiniBatchOperationInProgress_addOperationsFromCP_rdh | /**
* Add more Mutations corresponding to the Mutation at the given index to be committed atomically
* in the same batch. These mutations are applied to the WAL and applied to the memstore as well.
* The timestamp of the cells in the given Mutations MUST be obtained from the original mutation.
* <b>Note:</b> The durability from CP will be replaced by the durability of corresponding
* mutation. <b>Note:</b> Currently only supports Put and Delete operations.
*
* @param index
* the index that corresponds to the original mutation index in the batch
* @param newOperations
* the Mutations to add
*/
public void addOperationsFromCP(int
index, Mutation[] newOperations) {
if (this.operationsFromCoprocessors == null) {
// lazy allocation to save on object allocation in case this is not used
this.operationsFromCoprocessors = new
Mutation[operations.length][];
}
this.operationsFromCoprocessors[getAbsoluteIndex(index)] = newOperations;
} | 3.26 |
hbase_MiniBatchOperationInProgress_setWalEdit_rdh | /**
* Sets the walEdit for the operation(Mutation) at the specified position.
*/
public void setWalEdit(int index, WALEdit walEdit) {
this.walEditsFromCoprocessors[getAbsoluteIndex(index)] = walEdit;
} | 3.26 |
hbase_MiniBatchOperationInProgress_setOperationStatus_rdh | /**
* Sets the status code for the operation(Mutation) at the specified position. By setting this
* status, {@link org.apache.hadoop.hbase.coprocessor.RegionObserver} can make HRegion to skip
* Mutations.
*/
public void setOperationStatus(int index, OperationStatus opStatus) {
this.retCodeDetails[getAbsoluteIndex(index)] = opStatus;
} | 3.26 |
hbase_MiniBatchOperationInProgress_getWalEdit_rdh | /**
* Returns Gets the walEdit for the operation(Mutation) at the specified position.
*/
public WALEdit getWalEdit(int index) {
return this.walEditsFromCoprocessors[getAbsoluteIndex(index)];
} | 3.26 |
hbase_BucketAllocator_allocateBlock_rdh | /**
* Allocate a block with specified size. Return the offset
*
* @param blockSize
* size of block
* @return the offset in the IOEngine
*/
public synchronized long allocateBlock(int blockSize) throws CacheFullException, BucketAllocatorException {
assert blockSize > 0;
BucketSizeInfo bsi = roundUpToBucketSizeInfo(blockSize);
if (bsi == null) {
throw new BucketAllocatorException(((("Allocation too big size=" + blockSize) + "; adjust BucketCache sizes ") + BlockCacheFactory.BUCKET_CACHE_BUCKETS_KEY) + " to accomodate if size seems reasonable and you want it cached.");
}
long offset = bsi.allocateBlock(blockSize);
// Ask caller to free up space and try again!
if (offset < 0)
throw new CacheFullException(blockSize, bsi.sizeIndex());
usedSize += f0[bsi.sizeIndex()];
return offset;
} | 3.26 |
hbase_BucketAllocator_itemSize_rdh | /**
* This bucket size can only allocate items of this size, even if the requested allocation size
* is smaller. The rest goes towards {@link #fragmentationBytes()}.
*/
public long itemSize() {
return itemSize;
} | 3.26 |
hbase_BucketAllocator_usedCount_rdh | /**
* How many items are currently taking up space in this bucket size's buckets
*/
public long usedCount() {
return usedCount;
} | 3.26 |
hbase_BucketAllocator_roundUpToBucketSizeInfo_rdh | /**
* Round up the given block size to bucket size, and get the corresponding BucketSizeInfo
*/
public BucketSizeInfo roundUpToBucketSizeInfo(int blockSize) {
for (int i = 0; i < f0.length; ++i)
if (blockSize <= f0[i])
return bucketSizeInfos[i];
return null;
} | 3.26 |
hbase_BucketAllocator_freeBytes_rdh | /**
* How many more bytes can be allocated from the currently claimed blocks of this bucket size
*/
public long freeBytes() {
return f1 * itemSize;
} | 3.26 |
hbase_BucketAllocator_wastedBytes_rdh | /**
* If {@link #bucketCapacity} is not perfectly divisible by this {@link #itemSize()}, the
* remainder will be unusable by in buckets of this size. A high value here may be optimized by
* trying to choose bucket sizes which can better divide {@link #bucketCapacity}.
*/
public long wastedBytes() {
return wastedBytes;
} | 3.26 |
hbase_BucketAllocator_freeBlock_rdh | /**
* Free a block with the offset
*
* @param offset
* block's offset
* @return size freed
*/
public synchronized int freeBlock(long offset, int length) {
int bucketNo = ((int) (offset / bucketCapacity));
assert (bucketNo
>= 0) && (bucketNo < buckets.length);
Bucket targetBucket = buckets[bucketNo];
bucketSizeInfos[targetBucket.sizeIndex()].freeBlock(targetBucket, offset,
length);
usedSize -= targetBucket.getItemAllocationSize();
return targetBucket.getItemAllocationSize();
} | 3.26 |
hbase_BucketAllocator_completelyFreeBuckets_rdh | /**
* How many buckets are currently claimed by this bucket size but as yet totally unused. These
* buckets are available for reallocation to other bucket sizes if those fill up.
*/public int completelyFreeBuckets() {
return completelyFreeBuckets;
} | 3.26 |
hbase_BucketAllocator_usedBytes_rdh | /**
* How many bytes are currently taking up space in this bucket size's buckets Note: If your
* items are less than the bucket size of this bucket, the actual used bytes by items will be
* lower than this value. But since a bucket size can only allocate items of a single size, this
* value is the true number of used bytes. The difference will be counted in
* {@link #fragmentationBytes()}.
*/
public long
usedBytes() {
return usedCount * itemSize;
} | 3.26 |
hbase_BucketAllocator_fullBuckets_rdh | /**
* How many buckets have been completely filled by blocks for this bucket size. These buckets
* can't accept any more blocks unless some existing are freed.
*/
public int fullBuckets() {
return fullBuckets;
} | 3.26 |
hbase_BucketAllocator_getLeastFilledBuckets_rdh | /**
* Returns a set of indices of the buckets that are least filled excluding the offsets, we also
* the fully free buckets for the BucketSizes where everything is empty and they only have one
* completely free bucket as a reserved
*
* @param excludedBuckets
* the buckets that need to be excluded due to currently being in used
* @param bucketCount
* max Number of buckets to return
* @return set of bucket indices which could be used for eviction
*/
public Set<Integer> getLeastFilledBuckets(Set<Integer> excludedBuckets, int bucketCount) {
Queue<Integer> queue = MinMaxPriorityQueue.<Integer>orderedBy(new Comparator<Integer>() {
@Override
public int compare(Integer left, Integer right) {
// We will always get instantiated buckets
return Float.compare(((float) (buckets[left].usedCount)) / buckets[left].itemCount, ((float) (buckets[right].usedCount)) / buckets[right].itemCount);
}
}).maximumSize(bucketCount).create();for (int i = 0; i < buckets.length; i++) {
if (((!excludedBuckets.contains(i)) && (!buckets[i].isUninstantiated())) && // Avoid the buckets that are the only buckets for a sizeIndex
(bucketSizeInfos[buckets[i].sizeIndex()].bucketList.size() != 1))
{
queue.add(i);
}
}
Set<Integer> result = new HashSet<>(bucketCount);
result.addAll(queue); return result;
} | 3.26 |
hbase_BucketAllocator_totalCount_rdh | /**
* Combined {@link #freeCount()} + {@link #usedCount()}
*/
public long totalCount() {
return totalCount;
} | 3.26 |
hbase_BucketAllocator_totalBytes_rdh | /**
* Combined {@link #totalCount()} * {@link #itemSize()}
*/
public long totalBytes() {
return totalCount * itemSize;
} | 3.26 |
hbase_BucketAllocator_m4_rdh | /**
* Every time you allocate blocks in these buckets where the block size is less than the bucket
* size, fragmentation increases by that difference. You can reduce fragmentation by lowering
* the bucket size so that it is closer to the typical block size. This may have the consequence
* of bumping some blocks to the next larger bucket size, so experimentation may be needed.
*/
public long m4() {
return fragmentationBytes;
} | 3.26 |
hbase_BucketAllocator_allocate_rdh | /**
* Allocate a block in this bucket, return the offset representing the position in physical
* space
*
* @return the offset in the IOEngine
*/
public long allocate() {
assert freeCount
> 0;// Else should not have been called
assert sizeIndex != (-1);
++usedCount;
long offset = baseOffset + (freeList[--freeCount] * itemAllocationSize);
assert offset >= 0;
return offset;
} | 3.26 |
hbase_AssignmentVerificationReport_getRegionsWithoutValidFavoredNodes_rdh | /**
* Return the regions without favored nodes
*
* @return regions without favored nodes
*/
List<RegionInfo> getRegionsWithoutValidFavoredNodes() {
return regionsWithoutValidFavoredNodes;
} | 3.26 |
hbase_AssignmentVerificationReport_getUnassignedRegions_rdh | /**
* Return the unassigned regions
*
* @return unassigned regions
*/
List<RegionInfo> getUnassignedRegions() {
return unAssignedRegionsList;
} | 3.26 |
hbase_AssignmentVerificationReport_getNonFavoredAssignedRegions_rdh | /**
* Return the regions not assigned to its favored nodes
*
* @return regions not assigned to its favored nodes
*/
List<RegionInfo> getNonFavoredAssignedRegions() {
return nonFavoredAssignedRegionList;
} | 3.26 |
hbase_AssignmentVerificationReport_getNumRegionsOnFavoredNodeByPosition_rdh | /**
* Return the number of regions based on the position (primary/secondary/ tertiary) assigned to
* their favored nodes
*
* @return the number of regions
*/
int getNumRegionsOnFavoredNodeByPosition(FavoredNodesPlan.Position position) {
return favoredNodes[position.ordinal()];
} | 3.26 |
hbase_AssignmentVerificationReport_getDispersionInformation_rdh | /**
* Return a list which contains 3 elements: average dispersion score, max dispersion score and min
* dispersion score as first, second and third elements, respectively.
*/
public List<Float> getDispersionInformation() {
List<Float> dispersion
= new ArrayList<>();
dispersion.add(avgDispersionScore);
dispersion.add(maxDispersionScore);
dispersion.add(minDispersionScore);
return dispersion;
} | 3.26 |
hbase_AssignmentVerificationReport_getTotalFavoredAssignments_rdh | /**
* Return the number of regions assigned to their favored nodes
*
* @return number of regions assigned to their favored nodes
*/
int getTotalFavoredAssignments() {
return totalFavoredAssignments;
} | 3.26 |
hbase_AssignmentVerificationReport_fillUpDispersion_rdh | /**
* Use this to project the dispersion scores
*/
public void fillUpDispersion(TableName tableName, SnapshotOfRegionAssignmentFromMeta snapshot,
FavoredNodesPlan newPlan) {
// Set the table name
this.tableName = tableName;
// Get all the regions for this table
List<RegionInfo> regionInfoList = snapshot.getTableToRegionMap().get(tableName);
// Get the total region num for the current table
this.totalRegions
= regionInfoList.size();
FavoredNodesPlan plan = null;
if (newPlan == null) {
plan = snapshot.getExistingAssignmentPlan();
} else {
plan = newPlan;
}
// Get the region to region server mapping
Map<ServerName, Integer> primaryRSToRegionCounterMap = new HashMap<>();
Map<ServerName, Set<ServerName>> primaryToSecTerRSMap = new HashMap<>();
// Check the favored nodes and its locality information
// Also keep tracker of the most loaded and least loaded region servers
for (RegionInfo region : regionInfoList) {
try {
// Get the favored nodes from the assignment plan and verify it.
List<ServerName> favoredNodes =
plan.getFavoredNodes(region);
if ((favoredNodes == null) || (favoredNodes.size()
!= FavoredNodeAssignmentHelper.FAVORED_NODES_NUM)) {
regionsWithoutValidFavoredNodes.add(region);
continue;
}
// Get the primary, secondary and tertiary region server
ServerName v37 = favoredNodes.get(Position.PRIMARY.ordinal());
ServerName secondaryRS
= favoredNodes.get(Position.SECONDARY.ordinal());
ServerName tertiaryRS = favoredNodes.get(Position.TERTIARY.ordinal());
// Update the primary rs to its region set map
Integer v40 = primaryRSToRegionCounterMap.get(v37);
if (v40 == null) {
v40 = Integer.valueOf(0);
}
v40 = v40.intValue() + 1;
primaryRSToRegionCounterMap.put(v37, v40);
// Update the primary rs to secondary and tertiary rs map
Set<ServerName> secAndTerSet = primaryToSecTerRSMap.get(v37);
if (secAndTerSet == null)
{
secAndTerSet = new HashSet<>();
}
secAndTerSet.add(secondaryRS);
secAndTerSet.add(tertiaryRS);primaryToSecTerRSMap.put(v37, secAndTerSet);
} catch (Exception e) {
f0.error((("Cannot verify the region assignment for region " + (region == null ? " null " : region.getRegionNameAsString())) + "because of ") + e);
}
}
float dispersionScoreSummary = 0;
float dispersionNumSummary = 0;
// Calculate the secondary score for each primary region server
for (Map.Entry<ServerName, Integer> entry : primaryRSToRegionCounterMap.entrySet()) {
ServerName primaryRS = entry.getKey();
Integer regionsOnPrimary = entry.getValue();
// Process the dispersion number and score
float dispersionScore
= 0;
int dispersionNum = 0;
if ((primaryToSecTerRSMap.get(primaryRS) != null) && (regionsOnPrimary.intValue() != 0)) {
dispersionNum = primaryToSecTerRSMap.get(primaryRS).size();
dispersionScore = dispersionNum / (((float) (regionsOnPrimary.intValue())) * 2);}
// Update the max dispersion num
if (dispersionNum > this.maxDispersionNum) {
this.maxDispersionNumServerSet.clear();
this.maxDispersionNumServerSet.add(primaryRS);
this.maxDispersionNum = dispersionNum;
} else if (dispersionNum == this.maxDispersionNum) {
this.maxDispersionNumServerSet.add(primaryRS);
}
// Update the min dispersion score
if (dispersionScore < this.minDispersionScore) {
this.minDispersionScoreServerSet.clear();
this.minDispersionScoreServerSet.add(primaryRS);
this.minDispersionScore = dispersionScore;
} else if (dispersionScore == this.minDispersionScore) {
this.minDispersionScoreServerSet.add(primaryRS);
}
// Update the min dispersion num
if (dispersionNum < this.minDispersionNum) {
this.f1.clear();
this.f1.add(primaryRS);
this.minDispersionNum = dispersionNum;
} else if (dispersionNum == this.minDispersionNum) {this.f1.add(primaryRS);
}
dispersionScoreSummary += dispersionScore;
dispersionNumSummary += dispersionNum;
} // Update the avg dispersion score
if (primaryRSToRegionCounterMap.keySet().size() != 0) {
this.avgDispersionScore = dispersionScoreSummary / ((float) (primaryRSToRegionCounterMap.keySet().size()));
this.avgDispersionNum = dispersionNumSummary / ((float) (primaryRSToRegionCounterMap.keySet().size()));
}
} | 3.26 |
hbase_Chunk_reset_rdh | /**
* Reset the offset to UNINITIALIZED before before reusing an old chunk
*/
void reset() {
if (nextFreeOffset.get() != UNINITIALIZED) {
nextFreeOffset.set(UNINITIALIZED);allocCount.set(0);
}
} | 3.26 |
hbase_Chunk_alloc_rdh | /**
* Try to allocate <code>size</code> bytes from the chunk. If a chunk is tried to get allocated
* before init() call, the thread doing the allocation will be in busy-wait state as it will keep
* looping till the nextFreeOffset is set.
*
* @return the offset of the successful allocation, or -1 to indicate not-enough-space
*/
public int alloc(int size) {
while (true) {
int oldOffset = nextFreeOffset.get();
if (oldOffset == UNINITIALIZED) {
// The chunk doesn't have its data allocated yet.
// Since we found this in curChunk, we know that whoever
// CAS-ed it there is allocating it right now. So spin-loop
// shouldn't spin long!
Thread.yield();
continue;
}
if (oldOffset == OOM) {
// doh we ran out of ram. return -1 to chuck this away.
return -1;
}
if ((oldOffset + size) > data.capacity()) {
return -1;// alloc doesn't fit
}
// TODO : If seqID is to be written add 8 bytes here for nextFreeOFfset
// Try to atomically claim this chunk
if (nextFreeOffset.compareAndSet(oldOffset, oldOffset + size)) {
// we got the alloc
allocCount.incrementAndGet();
return
oldOffset;
}
// we raced and lost alloc, try again
}
} | 3.26 |
hbase_Chunk_init_rdh | /**
* Actually claim the memory for this chunk. This should only be called from the thread that
* constructed the chunk. It is thread-safe against other threads calling alloc(), who will block
* until the allocation is complete.
*/
public void init() {
assert nextFreeOffset.get() == UNINITIALIZED;
try {
allocateDataBuffer();
} catch (OutOfMemoryError e) {
boolean failInit = nextFreeOffset.compareAndSet(UNINITIALIZED, OOM);
assert failInit;// should be true.
throw e;
}
// Mark that it's ready for use
// Move 4 bytes since the first 4 bytes are having the chunkid in it
boolean initted = nextFreeOffset.compareAndSet(UNINITIALIZED, Bytes.SIZEOF_INT);
// We should always succeed the above CAS since only one thread
// calls init()!
Preconditions.checkState(initted, "Multiple threads tried to init same chunk");
} | 3.26 |
hbase_Chunk_getData_rdh | /**
* Returns This chunk's backing data.
*/
ByteBuffer getData() {
return this.data;
} | 3.26 |
hbase_TableIntegrityErrorHandlerImpl_handleHoleInRegionChain_rdh | /**
* {@inheritDoc }
*/@Override
public void handleHoleInRegionChain(byte[] holeStart, byte[] holeEnd) throws IOException
{
}
/**
* {@inheritDoc } | 3.26 |
hbase_TableIntegrityErrorHandlerImpl_handleRegionEndKeyNotEmpty_rdh | /**
* {@inheritDoc }
*/
@Override
public void handleRegionEndKeyNotEmpty(byte[] curEndKey) throws IOException {
} | 3.26 |
hbase_TableIntegrityErrorHandlerImpl_handleRegionStartKeyNotEmpty_rdh | /**
* {@inheritDoc }
*/
@Override
public void handleRegionStartKeyNotEmpty(HbckRegionInfo hi) throws IOException {
} | 3.26 |
hbase_TableIntegrityErrorHandlerImpl_handleDuplicateStartKeys_rdh | /**
* {@inheritDoc }
*/
@Override
public void handleDuplicateStartKeys(HbckRegionInfo hi1, HbckRegionInfo hi2) throws IOException {
} | 3.26 |
hbase_TableIntegrityErrorHandlerImpl_m0_rdh | /**
* {@inheritDoc }
*/
@Override
public HbckTableInfo m0() {
return ti;
} | 3.26 |
hbase_TableIntegrityErrorHandlerImpl_handleDegenerateRegion_rdh | /**
* {@inheritDoc }
*/
@Override
public void handleDegenerateRegion(HbckRegionInfo hi) throws IOException {
} | 3.26 |
hbase_TableIntegrityErrorHandlerImpl_setTableInfo_rdh | /**
* {@inheritDoc }
*/
@Override
public void setTableInfo(HbckTableInfo ti2) {
this.ti = ti2;
} | 3.26 |
hbase_TableIntegrityErrorHandlerImpl_handleOverlapInRegionChain_rdh | /**
* {@inheritDoc }
*/
@Override
public void handleOverlapInRegionChain(HbckRegionInfo hi1, HbckRegionInfo hi2) throws IOException {
} | 3.26 |
hbase_DictionaryCache_loadFromResource_rdh | // Visible for testing
public static byte[] loadFromResource(final Configuration conf,
final String s, final int maxSize) throws IOException {
if (!s.startsWith(RESOURCE_SCHEME)) {
throw new IOException("Path does not start with " + RESOURCE_SCHEME);
}
final String path = s.substring(RESOURCE_SCHEME.length(), s.length());
LOG.info("Loading resource {}", path);
final InputStream in = DictionaryCache.class.getClassLoader().getResourceAsStream(path);
if (in == null) {throw new FileNotFoundException(("Resource " + path) + " not found");
}
final ByteArrayOutputStream baos = new ByteArrayOutputStream();
try {
final byte[] buffer = new byte[8192];
int n;
int len = 0;
do {
n = in.read(buffer);
if (n > 0) {
len += n;
if (len > maxSize) {
throw new IOException((("Dictionary " + s) + " is too large, limit=") + maxSize);
}
baos.write(buffer, 0, n);
}
} while (n > 0 );
} finally {
in.close();
}
return baos.toByteArray();
} | 3.26 |
hbase_DictionaryCache_getDictionary_rdh | /**
* Load a dictionary or return a previously cached load.
*
* @param conf
* configuration
* @param path
* the hadoop Path where the dictionary is located, as a String
* @return the dictionary bytes if successful, null otherwise
*/
public static byte[] getDictionary(final Configuration conf, final String path) throws IOException {
if ((path == null) || path.isEmpty()) {
return null;
}
// Create the dictionary loading cache if we haven't already
if (CACHE == null) {
synchronized(DictionaryCache.class) {
if (CACHE == null) {
final int maxSize = conf.getInt(DICTIONARY_MAX_SIZE_KEY, DEFAULT_DICTIONARY_MAX_SIZE);
CACHE = CacheBuilder.newBuilder().maximumSize(100).expireAfterAccess(10, TimeUnit.MINUTES).build(new CacheLoader<String, byte[]>() {
@Override
public byte[] load(String s) throws Exception {
byte[] bytes;
if (path.startsWith(RESOURCE_SCHEME)) {
bytes = loadFromResource(conf, path, maxSize);
} else {
bytes = loadFromHadoopFs(conf, path, maxSize);
}
LOG.info("Loaded dictionary from {} (size {})", s, bytes.length);
return bytes;
}
});
}
}
}
// Get or load the dictionary for the given path
try {
return CACHE.get(path);} catch (ExecutionException e) {
throw
new IOException(e);
}
} | 3.26 |
hbase_SimpleRpcScheduler_onConfigurationChange_rdh | /**
* Resize call queues;
*
* @param conf
* new configuration
*/
@Override
public void onConfigurationChange(Configuration conf) {
callExecutor.resizeQueues(conf);
if (priorityExecutor != null) {
priorityExecutor.resizeQueues(conf);
}
if (replicationExecutor != null) {
replicationExecutor.resizeQueues(conf);
}
if (metaTransitionExecutor != null) {
metaTransitionExecutor.resizeQueues(conf);
}
if (bulkloadExecutor != null) {
bulkloadExecutor.resizeQueues(conf);
}
String callQueueType = conf.get(RpcExecutor.CALL_QUEUE_TYPE_CONF_KEY, RpcExecutor.CALL_QUEUE_TYPE_CONF_DEFAULT);
if (RpcExecutor.isCodelQueueType(callQueueType) || RpcExecutor.isPluggableQueueType(callQueueType)) {
callExecutor.onConfigurationChange(conf);
}
} | 3.26 |
hbase_HeterogeneousRegionCountCostFunction_loadRules_rdh | /**
* used to load the rule files.
*/
@RestrictedApi(explanation = "Should only be called in tests", link = "", allowedOnPath = ".*(/src/test/.*|HeterogeneousRegionCountCostFunction).java")
void loadRules() {
final List<String> lines = readFile(this.rulesPath); if (null == lines) {
LOG.warn(("cannot load rules file, keeping latest rules file which has " + this.limitPerRule.size()) + " rules");
return;
}
LOG.info(("loading rules file '" + this.rulesPath) + "'"); this.limitPerRule.clear();
for (final String line : lines) {
try {
if (line.length() == 0) {
continue;
}
if (line.startsWith("#")) {
continue;
}
final List<String> splits = Splitter.on(' ').splitToList(line);
if (splits.size() != 2) {
throw new IOException((("line '" + line)
+ "' is malformated, ") + "expected [regexp] [limit]. Skipping line");
}
final Pattern pattern = Pattern.compile(splits.get(0));
final Integer limit = Integer.parseInt(splits.get(1));
this.limitPerRule.put(pattern, limit);
} catch (IOException | NumberFormatException |
PatternSyntaxException e) {
LOG.error("error on line: " + e);
}
}
this.rebuildCache();
} | 3.26 |
hbase_HeterogeneousRegionCountCostFunction_findLimitForRS_rdh | /**
* Find the limit for a ServerName. If not found then return the default value
*
* @param serverName
* the server we are looking for
* @return the limit
*/int findLimitForRS(final ServerName serverName) {
boolean matched = false;
int limit = -1;
for (final Map.Entry<Pattern, Integer> entry : this.limitPerRule.entrySet()) {
if (entry.getKey().matcher(serverName.getHostname()).matches()) {matched = true;
limit
= entry.getValue();
break;
}
}
if (!matched) {
limit = this.defaultNumberOfRegions;
}
// Feeding cache
this.limitPerRS.put(serverName, limit);
return limit;
} | 3.26 |
hbase_HeterogeneousRegionCountCostFunction_readFileFromLocalFS_rdh | /**
* used to read the rule files from local FS
*/
private List<String> readFileFromLocalFS(final String filename) throws IOException {
return Files.readAllLines(Paths.get(filename), StandardCharsets.UTF_8);
} | 3.26 |
hbase_HeterogeneousRegionCountCostFunction_readFile_rdh | /**
* used to read the rule files from either HDFS or local FS
*/
private List<String> readFile(final String filename) {
if (null
== filename) {
return null;
}
try {
if (filename.startsWith("file:")) {
return readFileFromLocalFS(filename);
}
return readFileFromHDFS(filename);
} catch (IOException e) {LOG.error((("cannot read rules file located at ' " + filename) + " ':") + e.getMessage());
return null;
}
} | 3.26 |
hbase_HeterogeneousRegionCountCostFunction_readFileFromHDFS_rdh | /**
* used to read the rule files from HDFS
*/
private List<String> readFileFromHDFS(final String filename) throws IOException {
final Path path = new Path(filename);
final FileSystem fs = FileSystem.get(this.conf);
try (BufferedReader reader = new BufferedReader(new InputStreamReader(fs.open(path), StandardCharsets.UTF_8))) {
return CharStreams.readLines(reader);
}
} | 3.26 |
hbase_HeterogeneousRegionCountCostFunction_rebuildCache_rdh | /**
* Rebuild cache matching ServerNames and their capacity.
*/
private void rebuildCache() {
LOG.debug("Rebuilding cache of capacity for each RS");
this.limitPerRS.clear();
this.totalCapacity = 0;
if (null == this.cluster) {
return;
}
for (int i = 0; i < this.cluster.numServers; i++) {
final ServerName sn = this.cluster.servers[i];
final int capacity = this.findLimitForRS(sn);
LOG.debug(((sn.getHostname() + " can hold ") + capacity) + " regions");
this.totalCapacity += capacity;
}
overallUsage = ((double) (this.cluster.numRegions)) / ((double) (this.totalCapacity));
LOG.info(((((("Cluster can hold " + this.cluster.numRegions) + "/") + this.totalCapacity) + " regions (") + Math.round(overallUsage * 100)) + "%)");
if (overallUsage >= 1) {
LOG.warn("Cluster is overused, {}", overallUsage);}
} | 3.26 |
hbase_HeterogeneousRegionCountCostFunction_prepare_rdh | /**
* Called once per LB invocation to give the cost function to initialize it's state, and perform
* any costly calculation.
*/
@Override
void prepare(final BalancerClusterState cluster) {
this.cluster = cluster;
this.loadRules();
} | 3.26 |
hbase_SizeCachedKeyValue_getSerializedSize_rdh | /**
* Override by just returning the length for saving cost of method dispatching. If not, it will
* call {@link ExtendedCell#getSerializedSize()} firstly, then forward to
* {@link SizeCachedKeyValue#getSerializedSize(boolean)}. (See HBASE-21657)
*/
@Override
public int getSerializedSize() {
return this.length;
} | 3.26 |
hbase_TokenProvider_getServices_rdh | // AuthenticationService implementation
@Override
public Iterable<Service> getServices() {
return Collections.singleton(AuthenticationProtos.AuthenticationService.newReflectiveService(this));
} | 3.26 |
hbase_TokenProvider_isAllowedDelegationTokenOp_rdh | /**
*
* @param ugi
* A user group information.
* @return true if delegation token operation is allowed
*/
private boolean isAllowedDelegationTokenOp(UserGroupInformation ugi) throws IOException {
AuthenticationMethod authMethod = ugi.getAuthenticationMethod();
if (authMethod == AuthenticationMethod.PROXY) {
authMethod = ugi.getRealUser().getAuthenticationMethod();
}
if (((authMethod != AuthenticationMethod.KERBEROS) && (authMethod != AuthenticationMethod.KERBEROS_SSL)) && (authMethod != AuthenticationMethod.CERTIFICATE)) {
return false;
}
return true;
} | 3.26 |
hbase_FilterWrapper_toByteArray_rdh | /**
* Returns The filter serialized using pb
*/
@Override
public byte[] toByteArray() throws IOException {
FilterProtos.FilterWrapper.Builder builder = FilterProtos.FilterWrapper.newBuilder();builder.setFilter(ProtobufUtil.toFilter(this.filter));
return builder.build().toByteArray();
} | 3.26 |
hbase_FilterWrapper_parseFrom_rdh | /**
*
* @param pbBytes
* A pb serialized {@link FilterWrapper} instance
* @return An instance of {@link FilterWrapper} made from <code>bytes</code>
* @throws org.apache.hadoop.hbase.exceptions.DeserializationException
* @see #toByteArray
*/
public static FilterWrapper parseFrom(final byte[] pbBytes) throws DeserializationException {
FilterProtos.FilterWrapper proto;
try {
proto = FilterProtos.FilterWrapper.parseFrom(pbBytes);
} catch (InvalidProtocolBufferException e) {
throw new DeserializationException(e);
}
try {
return new FilterWrapper(ProtobufUtil.toFilter(proto.getFilter()));
} catch (IOException ioe) {
throw new DeserializationException(ioe);
}
} | 3.26 |
hbase_AccessControlClient_getUserPermissions_rdh | /**
* List all the userPermissions matching the given table pattern, column family and column
* qualifier.
*
* @param connection
* Connection
* @param tableRegex
* The regular expression string to match against. It shouldn't be null,
* empty or a namespace regular expression.
* @param columnFamily
* Column family
* @param columnQualifier
* Column qualifier
* @param userName
* User name, if empty then all user permissions will be retrieved.
* @return List of UserPermissions
* @throws Throwable
* on failure
*/
public static List<UserPermission> getUserPermissions(Connection connection, String tableRegex, byte[] columnFamily, byte[] columnQualifier, String userName) throws Throwable {
if (((tableRegex == null) || tableRegex.isEmpty()) || (tableRegex.charAt(0) == '@')) {
throw new IllegalArgumentException("Table name can't be null or empty or a namespace.");
}
List<UserPermission> permList = new ArrayList<UserPermission>(); try (Admin admin = connection.getAdmin()) {
List<TableDescriptor> htds = admin.listTableDescriptors(Pattern.compile(tableRegex), true);
// Retrieve table permissions
for (TableDescriptor htd : htds) {
permList.addAll(admin.getUserPermissions(GetUserPermissionsRequest.newBuilder(htd.getTableName()).withFamily(columnFamily).withQualifier(columnQualifier).withUserName(userName).build()));
}
}
return permList;
} | 3.26 |
hbase_AccessControlClient_grant_rdh | /**
* Grant global permissions for the specified user. If permissions for the specified user exists,
* later granted permissions will override previous granted permissions.
*/
public static void grant(Connection connection, final String userName, final Permission... actions) throws Throwable {grant(connection, userName, true, actions);
} | 3.26 |
hbase_AccessControlClient_revoke_rdh | /**
* Revoke global permissions for the specified user.
*
* @param connection
* The Connection instance to use
*/
public static void revoke(Connection connection, final String userName, final Permission... actions) throws Throwable {
connection.getAdmin().revoke(new UserPermission(userName, Permission.newBuilder().withActions(actions).build()));
} | 3.26 |
hbase_AccessControlClient_isAuthorizationEnabled_rdh | /**
* Return true if authorization is supported and enabled
*
* @param connection
* The connection to use
* @return true if authorization is supported and enabled, false otherwise
*/
public static boolean isAuthorizationEnabled(Connection connection) throws IOException {return connection.getAdmin().getSecurityCapabilities().contains(SecurityCapability.AUTHORIZATION);
} | 3.26 |
hbase_AccessControlClient_isCellAuthorizationEnabled_rdh | /**
* Return true if cell authorization is supported and enabled
*
* @param connection
* The connection to use
* @return true if cell authorization is supported and enabled, false otherwise
*/
public static boolean isCellAuthorizationEnabled(Connection connection) throws IOException {
return connection.getAdmin().getSecurityCapabilities().contains(SecurityCapability.CELL_AUTHORIZATION);
} | 3.26 |
hbase_AccessControlClient_hasPermission_rdh | /**
* Validates whether specified user has permission to perform actions on the mentioned table,
* column family or column qualifier.
*
* @param connection
* Connection
* @param tableName
* Table name, it shouldn't be null or empty.
* @param columnFamily
* The column family. Optional argument, can be empty. If empty then
* validation will happen at table level.
* @param columnQualifier
* The column qualifier. Optional argument, can be empty. If empty then
* validation will happen at table and column family level. columnQualifier
* will not be considered if columnFamily is passed as null or empty.
* @param userName
* User name, it shouldn't be null or empty.
* @param actions
* Actions
* @return true if access allowed to the specified user, otherwise false.
* @throws Throwable
* on failure
*/
public static boolean hasPermission(Connection connection, String tableName, byte[] columnFamily, byte[] columnQualifier, String userName, Permission... actions) throws Throwable {
if (StringUtils.isEmpty(tableName) || StringUtils.isEmpty(userName)) {
throw new IllegalArgumentException("Table and user name can't be null or empty.");
}
List<Permission> permissions = new ArrayList<>(1);
permissions.add(Permission.newBuilder(TableName.valueOf(tableName)).withFamily(columnFamily).withQualifier(columnQualifier).withActions(actions).build());
return connection.getAdmin().hasUserPermissions(userName, permissions).get(0);
} | 3.26 |
hbase_AccessControlClient_m0_rdh | /**
* Grants permission on the specified table for the specified user
*
* @param connection
* The Connection instance to use
* @param tableName
* the table name
* @param userName
* the user name
* @param family
* the column family
* @param qual
* the column qualifier
* @param mergeExistingPermissions
* If set to false, later granted permissions will override
* previous granted permissions. otherwise, it'll merge with
* previous granted permissions.
* @param actions
* the actions
*/
private static void m0(Connection connection, final TableName tableName, final String userName, final byte[] family, final byte[] qual, boolean mergeExistingPermissions, final Permission... actions) throws Throwable {
connection.getAdmin().grant(new UserPermission(userName, Permission.newBuilder(tableName).withFamily(family).withQualifier(qual).withActions(actions).build()), mergeExistingPermissions);
} | 3.26 |
hbase_ByteBufferUtils_drainInputStreamToBuffer_rdh | /**
* Copy from the InputStream to a new heap ByteBuffer until the InputStream is exhausted.
*/public static ByteBuffer drainInputStreamToBuffer(InputStream is) throws IOException {
ByteArrayOutputStream baos = new ByteArrayOutputStream(4096);
IOUtils.copyBytes(is, baos, 4096, true);
ByteBuffer buffer = ByteBuffer.wrap(baos.toByteArray());
buffer.rewind();
return buffer;
} | 3.26 |
hbase_ByteBufferUtils_searchDelimiterIndex_rdh | /**
* Find index of passed delimiter.
*
* @return Index of delimiter having started from start of <code>b</code> moving rightward.
*/
public static int searchDelimiterIndex(ByteBuffer b, int
offset, final
int length, final int delimiter) { for (int i = offset, n = offset + length; i < n; i++) {
if (b.get(i) == delimiter) {
return i;
}
}
return -1;
} | 3.26 |
hbase_ByteBufferUtils_putInt_rdh | /**
* Put an int value out to the given ByteBuffer's current position in big-endian format. This also
* advances the position in buffer by int size.
*
* @param buffer
* the ByteBuffer to write to
* @param val
* int to write out
*/
public static void putInt(ByteBuffer buffer, int val) {
ConverterHolder.BEST_CONVERTER.putInt(buffer, val);
} | 3.26 |
hbase_ByteBufferUtils_readVLong_rdh | /**
* Similar to {@link WritableUtils#readVLong(java.io.DataInput)} but reads from a
* {@link ByteBuff}.
*/
public static long readVLong(ByteBuff in) {
return readVLong(in::get);
} | 3.26 |
hbase_ByteBufferUtils_toInt_rdh | /**
* Reads an int value at the given buffer's offset.
*
* @param buffer
* input byte buffer to read
* @param offset
* input offset where int is
* @return int value at offset
*/
public static int toInt(ByteBuffer buffer, int offset) {
return ConverterHolder.BEST_CONVERTER.toInt(buffer, offset);
} | 3.26 |
hbase_ByteBufferUtils_putShort_rdh | /**
* Put a short value out to the given ByteBuffer's current position in big-endian format. This
* also advances the position in buffer by short size.
*
* @param buffer
* the ByteBuffer to write to
* @param val
* short to write out
*/
public static void putShort(ByteBuffer buffer, short val)
{
ConverterHolder.BEST_CONVERTER.putShort(buffer, val);
} | 3.26 |
hbase_ByteBufferUtils_copyBufferToStream_rdh | /**
* Copy data from a buffer to an output stream. Does not update the position in the buffer.
*
* @param out
* the output stream to write bytes to
* @param in
* the buffer to read bytes from
* @param offset
* the offset in the buffer (from the buffer's array offset) to start copying bytes
* from
* @param length
* the number of bytes to copy
*/
public static void copyBufferToStream(DataOutput out, ByteBuffer in, int offset, int length) throws IOException {
if (out instanceof ByteBufferWriter) {
((ByteBufferWriter) (out)).write(in, offset, length);
} else if (in.hasArray()) {
out.write(in.array(), in.arrayOffset() + offset, length);
} else {
for (int i = 0; i < length; ++i) {
out.write(toByte(in, offset + i));
}
}
} | 3.26 |
hbase_ByteBufferUtils_findCommonPrefix_rdh | /**
* Find length of common prefix in two arrays.
*
* @param left
* ByteBuffer to be compared.
* @param leftOffset
* Offset in left ByteBuffer.
* @param leftLength
* Length of left ByteBuffer.
* @param right
* Array to be compared
* @param rightOffset
* Offset in right Array.
* @param rightLength
* Length of right Array.
*/
public static int findCommonPrefix(ByteBuffer left, int leftOffset, int leftLength, byte[] right, int rightOffset, int rightLength) {
return CommonPrefixerHolder.BEST_COMMON_PREFIXER.findCommonPrefix(left, leftOffset, leftLength, right, rightOffset, rightLength);
} | 3.26 |
hbase_ByteBufferUtils_copyFromArrayToBuffer_rdh | /**
* Copies bytes from given array's offset to length part into the given buffer. Puts the bytes to
* buffer's given position. This doesn't affect the position of buffer.
*
* @param out
* output bytebuffer to copy to
* @param outOffset
* output buffer offset
* @param in
* input array to copy from
* @param inOffset
* input offset to copy from
* @param length
* the number of bytes to copy
*/
public static void copyFromArrayToBuffer(ByteBuffer out, int outOffset, byte[] in, int inOffset, int length)
{
if (out.hasArray()) {
System.arraycopy(in, inOffset, out.array(), out.arrayOffset() + outOffset, length);
} else if (UNSAFE_AVAIL) {
UnsafeAccess.copy(in, inOffset, out, outOffset, length);
} else {
ByteBuffer outDup = out.duplicate();
outDup.position(outOffset);
outDup.put(in, inOffset, length);
}
} | 3.26 |
hbase_ByteBufferUtils_copyOfRange_rdh | /**
* Similar to {@link Arrays#copyOfRange(byte[], int, int)}
*
* @param original
* the buffer from which the copy has to happen
* @param from
* the starting index
* @param to
* the ending index
* @return a byte[] created out of the copy
*/
public static byte[] copyOfRange(ByteBuffer original, int from, int to) {
int newLength = to - from;
if (newLength < 0) {
throw new IllegalArgumentException((from +
" > ") + to);
}
byte[] copy = new byte[newLength];
ByteBufferUtils.copyFromBufferToArray(copy, original, from, 0, newLength);
return copy;} | 3.26 |
hbase_ByteBufferUtils_m3_rdh | /**
* Reads a double value at the given buffer's offset.
*
* @param buffer
* input byte buffer to read
* @param offset
* offset where double is
* @return double value at offset
*/
public static double m3(ByteBuffer buffer, int offset) {
return Double.longBitsToDouble(toLong(buffer, offset));
} | 3.26 |
hbase_ByteBufferUtils_copyFromBufferToArray_rdh | /**
* Copies specified number of bytes from given offset of 'in' ByteBuffer to the array. This
* doesn't affect the position of buffer.
*
* @param out
* output array to copy input bytebuffer to
* @param in
* input bytebuffer to copy from
* @param sourceOffset
* offset of source bytebuffer
* @param destinationOffset
* offset of destination array
* @param length
* the number of bytes to copy
*/
public static void
copyFromBufferToArray(byte[] out, ByteBuffer in, int sourceOffset, int destinationOffset, int length) {
if (in.hasArray()) {
System.arraycopy(in.array(), sourceOffset + in.arrayOffset(), out, destinationOffset, length);
} else if (UNSAFE_AVAIL) {
UnsafeAccess.copy(in, sourceOffset, out, destinationOffset, length);
} else {
ByteBuffer inDup = in.duplicate();
inDup.position(sourceOffset);
inDup.get(out, destinationOffset, length);
}
} | 3.26 |
hbase_ByteBufferUtils_intFitsIn_rdh | /**
* Check how many bytes is required to store value.
*
* @param value
* Value which size will be tested.
* @return How many bytes are required to store value.
*/
public static int intFitsIn(final int value) {
if (value < 0) {
return 4;
}
if (value < (1 << (2 * 8))) {
if (value < (1 << (1
* 8))) {
return 1;
}
return 2;
}
if (value <= (1 << (3 * 8))) {
return 3;
}
return 4;
} | 3.26 |
hbase_ByteBufferUtils_m2_rdh | /**
* Copy the data to the output stream and update position in buffer.
*
* @param out
* the stream to write bytes to
* @param in
* the buffer to read bytes from
* @param length
* the number of bytes to copy
*/
public static void m2(OutputStream out, ByteBuffer in, int length) throws IOException {
copyBufferToStream(out, in, in.position(), length);
skip(in, length);
} | 3.26 |
hbase_ByteBufferUtils_putCompressedInt_rdh | /**
* Put in buffer integer using 7 bit encoding. For each written byte: 7 bits are used to store
* value 1 bit is used to indicate whether there is next bit.
*
* @param value
* Int to be compressed.
* @param out
* Where to put compressed data
* @return Number of bytes written.
* @throws IOException
* on stream error
*/
public static int putCompressedInt(OutputStream out, final int value) throws IOException {
int i = 0;
int tmpvalue = value;
do {
byte b = ((byte) (tmpvalue & VALUE_MASK));
tmpvalue >>>= NEXT_BIT_SHIFT;
if (tmpvalue != 0) {
b |= ((byte) (NEXT_BIT_MASK));
}
out.write(b);
i++;
} while (tmpvalue != 0
);
return i;
} | 3.26 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.