name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hbase_ZKProcedureCoordinator_m0_rdh | /**
* Start monitoring znodes in ZK - subclass hook to start monitoring znodes they are about.
*
* @return true if succeed, false if encountered initialization errors.
*/
@Override
public final boolean m0(final ProcedureCoordinator coordinator)
{
if (this.coordinator != null) {
throw new IllegalStateException("ZKProcedureCoordinator already started and already has listener installed");
}
this.coordinator = coordinator;
try {
this.zkProc = new
ZKProcedureUtil(watcher, procedureType) {@Override
public void nodeCreated(String path) {
if (!isInProcedurePath(path))
return;
LOG.debug("Node created: " + path);
logZKTree(this.baseZNode);
if (isAcquiredPathNode(path)) {
// node wasn't present when we created the watch so zk event triggers acquire
coordinator.memberAcquiredBarrier(ZKUtil.getNodeName(ZKUtil.getParent(path)), ZKUtil.getNodeName(path));
} else if (isReachedPathNode(path)) {// node was absent when we created the watch so zk event triggers the finished barrier.
// TODO Nothing enforces that acquire and reached znodes from showing up in wrong order.
String procName = ZKUtil.getNodeName(ZKUtil.getParent(path));
String member = ZKUtil.getNodeName(path);
// get the data from the procedure member
try {
byte[] dataFromMember = ZKUtil.getData(watcher, path);
// ProtobufUtil.isPBMagicPrefix will check null
if ((dataFromMember != null) && (dataFromMember.length > 0)) {
if (!ProtobufUtil.isPBMagicPrefix(dataFromMember)) {
ForeignException ee = new ForeignException(coordName, "Failed to get data from finished node or data is illegally formatted:" + path);
coordinator.abortProcedure(procName, ee);
} else {
dataFromMember = Arrays.copyOfRange(dataFromMember, ProtobufUtil.lengthOfPBMagic(), dataFromMember.length);LOG.debug("Finished data from procedure '{}' member '{}': {}", procName, member, new String(dataFromMember, StandardCharsets.UTF_8));
coordinator.memberFinishedBarrier(procName, member,
dataFromMember);
}
} else {
coordinator.memberFinishedBarrier(procName, member, dataFromMember);
}
} catch
(KeeperException e) {
ForeignException ee = new ForeignException(coordName,
e); coordinator.abortProcedure(procName, ee);
} catch (InterruptedException e) {
ForeignException
ee = new ForeignException(coordName, e);
coordinator.abortProcedure(procName,
ee);
}
} else if (isAbortPathNode(path)) {
abort(path);
} else {
LOG.debug("Ignoring created notification for node:" + path);
}
}
};
zkProc.clearChildZNodes();
} catch (KeeperException e) {
LOG.error("Unable to start the ZK-based Procedure Coordinator rpcs.", e);return false;
}
LOG.debug("Starting controller for procedure member=" + coordName);
return true;
} | 3.26 |
hbase_SequenceIdAccounting_onRegionClose_rdh | /**
* Clear all the records of the given region as it is going to be closed.
* <p/>
* We will call this once we get the region close marker. We need this because that, if we use
* Durability.ASYNC_WAL, after calling startCacheFlush, we may still get some ongoing wal entries
* that has not been processed yet, this will lead to orphan records in the
* lowestUnflushedSequenceIds and then cause too many WAL files.
* <p/>
* See HBASE-23157 for more details.
*/
void onRegionClose(byte[] encodedRegionName) {
synchronized(tieLock) {
this.lowestUnflushedSequenceIds.remove(encodedRegionName);
Map<ImmutableByteArray, Long> flushing = this.flushingSequenceIds.remove(encodedRegionName);
if (flushing != null) {
LOG.warn("Still have flushing records when closing {}, {}", Bytes.toString(encodedRegionName), flushing.entrySet().stream().map(e -> (e.getKey().toString() + "->") + e.getValue()).collect(Collectors.joining(",", "{", "}")));
}
}
this.highestSequenceIds.remove(encodedRegionName);
} | 3.26 |
hbase_SequenceIdAccounting_findLower_rdh | /**
* Iterates over the given Map and compares sequence ids with corresponding entries in
* {@link #lowestUnflushedSequenceIds}. If a region in {@link #lowestUnflushedSequenceIds} has a
* sequence id less than that passed in <code>sequenceids</code> then return it.
*
* @param sequenceids
* Sequenceids keyed by encoded region name.
* @return stores of regions found in this instance with sequence ids less than those passed in.
*/
Map<byte[], List<byte[]>> findLower(Map<byte[], Long> sequenceids) {
Map<byte[], List<byte[]>> toFlush = null;
// Keeping the old behavior of iterating unflushedSeqNums under oldestSeqNumsLock.
synchronized(tieLock) {
for (Map.Entry<byte[], Long> e : sequenceids.entrySet()) {
Map<ImmutableByteArray, Long> m = this.lowestUnflushedSequenceIds.get(e.getKey());
if (m == null) {
continue;
}
for (Map.Entry<ImmutableByteArray, Long> me : m.entrySet()) {
if (me.getValue() <= e.getValue()) {
if (toFlush == null) {
toFlush = new TreeMap(Bytes.BYTES_COMPARATOR);
}
toFlush.computeIfAbsent(e.getKey(), k -> new ArrayList<>()).add(Bytes.toBytes(me.getKey().toString()));
}
}
}
}
return toFlush;
} | 3.26 |
hbase_SequenceIdAccounting_areAllLower_rdh | /**
* See if passed <code>sequenceids</code> are lower -- i.e. earlier -- than any outstanding
* sequenceids, sequenceids we are holding on to in this accounting instance.
*
* @param sequenceids
* Keyed by encoded region name. Cannot be null (doesn't make sense for it to
* be null).
* @param keysBlocking
* An optional collection that is used to return the specific keys that are
* causing this method to return false.
* @return true if all sequenceids are lower, older than, the old sequenceids in this instance.
*/
boolean areAllLower(Map<byte[], Long> sequenceids, Collection<byte[]> keysBlocking) { Map<byte[], Long> flushing = null;
Map<byte[], Long> v42 = null;
synchronized(this.tieLock) {
// Get a flattened -- only the oldest sequenceid -- copy of current flushing and unflushed
// data structures to use in tests below.
flushing = flattenToLowestSequenceId(this.flushingSequenceIds);
v42 = flattenToLowestSequenceId(this.lowestUnflushedSequenceIds);
}boolean result = true;
for (Map.Entry<byte[], Long> e : sequenceids.entrySet()) {
long oldestFlushing = Long.MAX_VALUE;
long oldestUnflushed = Long.MAX_VALUE;
if ((flushing != null) && flushing.containsKey(e.getKey()))
{
oldestFlushing = flushing.get(e.getKey());
}
if ((v42 != null) && v42.containsKey(e.getKey())) {
oldestUnflushed = v42.get(e.getKey());
}
long min = Math.min(oldestFlushing, oldestUnflushed);
if (min <= e.getValue()) {
if (keysBlocking == null) {
return false;
}
result = false;
keysBlocking.add(e.getKey());
// Continue examining the map so we could log all regions blocking this WAL.
}
}
return
result;
} | 3.26 |
hbase_SequenceIdAccounting_updateStore_rdh | /**
* Update the store sequence id, e.g., upon executing in-memory compaction
*/
void updateStore(byte[] encodedRegionName, byte[] familyName, Long sequenceId, boolean onlyIfGreater) {
if (sequenceId == null) {
return;
}
Long highest = this.highestSequenceIds.get(encodedRegionName);
if ((highest == null) || (sequenceId > highest)) {
this.highestSequenceIds.put(encodedRegionName, sequenceId);
}
ImmutableByteArray familyNameWrapper
= ImmutableByteArray.wrap(familyName);
synchronized(this.tieLock) {ConcurrentMap<ImmutableByteArray, Long> m = getOrCreateLowestSequenceIds(encodedRegionName);
boolean replaced = false;
while (!replaced) {
Long oldSeqId = m.get(familyNameWrapper);
if (oldSeqId == null) {
m.put(familyNameWrapper, sequenceId);
replaced = true;
} else if (onlyIfGreater) {
if (sequenceId > oldSeqId) {
replaced = m.replace(familyNameWrapper, oldSeqId, sequenceId);
} else {
return;
}
} else {
// replace even if sequence id is not greater than oldSeqId
m.put(familyNameWrapper, sequenceId);
return;
}
}
}
} | 3.26 |
hbase_SequenceIdAccounting_getLowestSequenceId_rdh | /**
*
* @param sequenceids
* Map to search for lowest value.
* @return Lowest value found in <code>sequenceids</code>.
*/
private static long getLowestSequenceId(Map<?, Long>
sequenceids) {
long lowest = HConstants.NO_SEQNUM;
for (Map.Entry<?, Long> entry : sequenceids.entrySet()) {
if (entry.getKey().toString().equals("METAFAMILY")) {
continue;
}
Long v19 = entry.getValue();
if ((lowest == HConstants.NO_SEQNUM) || (v19.longValue() < lowest)) {
lowest = v19.longValue();
}
}
return lowest;
} | 3.26 |
hbase_SequenceIdAccounting_update_rdh | /**
* We've been passed a new sequenceid for the region. Set it as highest seen for this region and
* if we are to record oldest, or lowest sequenceids, save it as oldest seen if nothing currently
* older.
*
* @param lowest
* Whether to keep running account of oldest sequence id.
*/
void update(byte[] encodedRegionName, Set<byte[]> families, long sequenceid, final boolean lowest) {
Long l = Long.valueOf(sequenceid);
this.highestSequenceIds.put(encodedRegionName, l);
if (lowest) {
ConcurrentMap<ImmutableByteArray, Long> m = getOrCreateLowestSequenceIds(encodedRegionName);
for (byte[] familyName : families) {
m.putIfAbsent(ImmutableByteArray.wrap(familyName), l);
}
}
} | 3.26 |
hbase_RegionReplicaCandidateGenerator_selectCoHostedRegionPerGroup_rdh | /**
* Randomly select one regionIndex out of all region replicas co-hosted in the same group (a group
* is a server, host or rack)
*
* @param colocatedReplicaCountsPerGroup
* either Cluster.colocatedReplicaCountsPerServer,
* colocatedReplicaCountsPerHost or
* colocatedReplicaCountsPerRack
* @param regionsPerGroup
* either Cluster.regionsPerServer, regionsPerHost or
* regionsPerRack
* @param regionIndexToPrimaryIndex
* Cluster.regionsIndexToPrimaryIndex
* @return a regionIndex for the selected primary or -1 if there is no co-locating
*/ int selectCoHostedRegionPerGroup(Int2IntCounterMap colocatedReplicaCountsPerGroup, int[] regionsPerGroup, int[] regionIndexToPrimaryIndex) {
final IntArrayList colocated = new
IntArrayList(colocatedReplicaCountsPerGroup.size(), -1);
colocatedReplicaCountsPerGroup.forEach((primary, count) -> {
if (count > 1) {
// means consecutive primaries, indicating co-location
colocated.add(primary);
}
});if (!colocated.isEmpty()) {
int rand = ThreadLocalRandom.current().nextInt(colocated.size());
int selectedPrimaryIndex = colocated.get(rand);
// we have found the primary id for the region to move. Now find the actual regionIndex
// with the given primary, prefer to move the secondary region.
for (int regionIndex : regionsPerGroup) {
if
(selectedPrimaryIndex == regionIndexToPrimaryIndex[regionIndex]) {
// always move the secondary, not the primary
if
(selectedPrimaryIndex != regionIndex) {
return regionIndex;
}
}
}
}
return -1;
} | 3.26 |
hbase_ClientExceptionsUtil_findException_rdh | /**
* Look for an exception we know in the remote exception: - hadoop.ipc wrapped exceptions - nested
* exceptions Looks for: RegionMovedException / RegionOpeningException / RegionTooBusyException /
* RpcThrottlingException
*
* @return null if we didn't find the exception, the exception otherwise.
*/
public static Throwable findException(Object exception) {
if ((exception == null) || (!(exception instanceof Throwable))) {
return null;
}
Throwable v0 = ((Throwable) (exception));
while (v0 != null) {
if (isSpecialException(v0)) {
return v0;
}
if (v0 instanceof RemoteException) {
RemoteException re = ((RemoteException) (v0));
v0 = re.unwrapRemoteException();
// unwrapRemoteException can return the exception given as a parameter when it cannot
// unwrap it. In this case, there is no need to look further
// noinspection ObjectEquality
if (v0 == re) {
return v0;
}
// When we receive RemoteException which wraps IOException which has a cause as
// RemoteException we can get into infinite loop here; so if the cause of the exception
// is RemoteException, we shouldn't look further.
} else if ((v0.getCause() != null) && (!(v0.getCause() instanceof RemoteException))) {
v0 = v0.getCause();
} else {
return v0;
}
}
return null;
} | 3.26 |
hbase_ClientExceptionsUtil_translatePFFE_rdh | /**
* Translates exception for preemptive fast fail checks.
*
* @param t
* exception to check
* @return translated exception
*/
public static Throwable translatePFFE(Throwable t) throws IOException {
if (t instanceof NoSuchMethodError) {
// We probably can't recover from this exception by retrying.
throw ((NoSuchMethodError) (t));
}
if (t instanceof NullPointerException) {
// The same here. This is probably a bug.
throw ((NullPointerException) (t));
}
if (t instanceof UndeclaredThrowableException) {
t = t.getCause();
}
if (t instanceof RemoteException) {
t = ((RemoteException) (t)).unwrapRemoteException();
}
if (t instanceof DoNotRetryIOException) {
throw ((DoNotRetryIOException) (t));
}
if (t instanceof Error) {
throw ((Error) (t));
}
return t;
} | 3.26 |
hbase_ClientExceptionsUtil_isConnectionException_rdh | /**
* Check if the exception is something that indicates that we cannot contact/communicate with the
* server.
*
* @param e
* exception to check
* @return true when exception indicates that the client wasn't able to make contact with server
*/
public static boolean isConnectionException(Throwable e) {
if (e == null) {
return false;
}for (Class<? extends Throwable> clazz : CONNECTION_EXCEPTION_TYPES) {
if (clazz.isAssignableFrom(e.getClass())) {
return true;
}
}
return false;
} | 3.26 |
hbase_ServerListener_m0_rdh | /**
* The server was removed from the cluster.
*
* @param serverName
* The remote servers name.
*/
default void m0(final ServerName serverName) {
} | 3.26 |
hbase_ServerListener_serverAdded_rdh | /**
* The server has joined the cluster.
*
* @param serverName
* The remote servers name.
*/
default void serverAdded(final ServerName serverName) {
} | 3.26 |
hbase_ServerListener_waiting_rdh | /**
* Started waiting on RegionServers to check-in.
*/
default void waiting() {
} | 3.26 |
hbase_ByteBufferIOEngine_write_rdh | /**
* Transfers data from the given {@link ByteBuff} to the buffer array. Position of source will be
* advanced by the {@link ByteBuffer#remaining()}.
*
* @param src
* the given byte buffer from which bytes are to be read.
* @param offset
* The offset in the ByteBufferArray of the first byte to be written
* @throws IOException
* throws IOException if writing to the array throws exception
*/
@Override
public void write(ByteBuff src, long offset) throws IOException {
bufferArray.write(offset, src);
} | 3.26 |
hbase_ByteBufferIOEngine_shutdown_rdh | /**
* No operation for the shutdown in the memory IO engine
*/
@Override
public void shutdown() {
// Nothing to do.
} | 3.26 |
hbase_ByteBufferIOEngine_isPersistent_rdh | /**
* Memory IO engine is always unable to support persistent storage for the cache
*/
@Overridepublic boolean isPersistent() {
return false;
} | 3.26 |
hbase_ByteBufferIOEngine_sync_rdh | /**
* No operation for the sync in the memory IO engine
*/
@Override
public void sync() {
// Nothing to do.
} | 3.26 |
hbase_AccessChecker_requireTablePermission_rdh | /**
* Authorizes that the current user has any of the given permissions for the given table, column
* family and column qualifier.
*
* @param user
* Active user to which authorization checks should be applied
* @param request
* Request type
* @param tableName
* Table requested
* @param family
* Column family param
* @param qualifier
* Column qualifier param
* @throws IOException
* if obtaining the current user fails
* @throws AccessDeniedException
* if user has no authorization
*/public void requireTablePermission(User user, String request, TableName tableName, byte[] family, byte[] qualifier, Action... permissions) throws IOException {
AuthResult result = null;
for (Action permission : permissions) {
if (f0.authorizeUserTable(user, tableName, permission)) {
result = AuthResult.allow(request, "Table permission granted", user, permission, tableName, null, null);
result.getParams().setFamily(family).setQualifier(qualifier);
break;
} else {
// rest of the world
result = AuthResult.deny(request, "Insufficient permissions", user, permission, tableName, family, qualifier);
result.getParams().setFamily(family).setQualifier(qualifier);
}
} logResult(result);
if (!result.isAllowed())
{
throw new AccessDeniedException("Insufficient permissions " + result.toContextString());
}
} | 3.26 |
hbase_AccessChecker_getUserGroups_rdh | /**
* Retrieve the groups of the given user.
*
* @param user
* User name
*/
public static List<String> getUserGroups(String user)
{
try {
return groupService.getGroups(user);
} catch (IOException e) {
LOG.error("Error occurred while retrieving group for " + user, e);return new ArrayList<>();
}} | 3.26 |
hbase_AccessChecker_hasUserPermission_rdh | /**
* Authorizes that if the current user has the given permissions.
*
* @param user
* Active user to which authorization checks should be applied
* @param request
* Request type
* @param permission
* Actions being requested
* @return True if the user has the specific permission
*/
public boolean hasUserPermission(User user, String request, Permission permission) {
if
(permission instanceof TablePermission) {
TablePermission tPerm = ((TablePermission) (permission));
for (Permission.Action action : permission.getActions()) {
AuthResult authResult = permissionGranted(request, user, action, tPerm.getTableName(), tPerm.getFamily(), tPerm.getQualifier());
AccessChecker.logResult(authResult);
if (!authResult.isAllowed()) {
return false;
}}
} else if (permission instanceof NamespacePermission) {
NamespacePermission nsPerm = ((NamespacePermission) (permission));
AuthResult authResult;
for (Action action : nsPerm.getActions()) {
if (getAuthManager().authorizeUserNamespace(user, nsPerm.getNamespace(), action)) {
authResult = AuthResult.allow(request, "Namespace action allowed", user, action, null, null);
} else {
authResult = AuthResult.deny(request, "Namespace action denied", user, action, null, null);
}
AccessChecker.logResult(authResult);if (!authResult.isAllowed()) {
return false;
}
}
} else {
AuthResult authResult;
for (Permission.Action action : permission.getActions()) {
if (getAuthManager().authorizeUserGlobal(user, action)) {
authResult = AuthResult.allow(request, "Global action allowed", user, action, null, null);
} else {
authResult = AuthResult.deny(request, "Global action denied", user, action, null, null);
}
AccessChecker.logResult(authResult);
if (!authResult.isAllowed()) {
return false;
}
}
}
return
true;
} | 3.26 |
hbase_AccessChecker_initGroupService_rdh | /* Initialize the group service. */
private void initGroupService(Configuration conf) {
if (groupService == null) {
if (conf.getBoolean(TestingGroups.TEST_CONF, false)) {
UserProvider.setGroups(new User.TestingGroups(UserProvider.getGroups()));
groupService = UserProvider.getGroups();
} else {
groupService = Groups.getUserToGroupsMappingService(conf); }
}
} | 3.26 |
hbase_AccessChecker_requireGlobalPermission_rdh | /**
* Checks that the user has the given global permission. The generated audit log message will
* contain context information for the operation being authorized, based on the given parameters.
*
* @param user
* Active user to which authorization checks should be applied
* @param request
* Request type
* @param perm
* Action being requested
* @param namespace
* The given namespace
*/
public void requireGlobalPermission(User user,
String request, Action perm, String namespace) throws IOException {
AuthResult
authResult;
if (f0.authorizeUserGlobal(user, perm)) {
authResult = AuthResult.allow(request, "Global check allowed", user, perm, null);
authResult.getParams().setNamespace(namespace); logResult(authResult);
} else {
authResult = AuthResult.deny(request, "Global check failed", user, perm, null);
authResult.getParams().setNamespace(namespace);
logResult(authResult);
throw new AccessDeniedException(((("Insufficient permissions for user '" + (user != null ? user.getShortName() : "null")) + "' (global, action=")
+
perm.toString()) + ")");
}
} | 3.26 |
hbase_AccessChecker_requireNamespacePermission_rdh | /**
* Checks that the user has the given global or namespace permission.
*
* @param user
* Active user to which authorization checks should be applied
* @param request
* Request type
* @param namespace
* The given namespace
* @param tableName
* Table requested
* @param familyMap
* Column family map requested
* @param permissions
* Actions being requested
*/
public void requireNamespacePermission(User user,
String request, String namespace,
TableName tableName, Map<byte[], ? extends Collection<byte[]>> familyMap, Action... permissions) throws IOException {
AuthResult result = null;
for (Action permission : permissions) {
if (f0.authorizeUserNamespace(user, namespace, permission)) {
result = AuthResult.allow(request, "Namespace permission granted", user, permission, namespace);
result.getParams().setTableName(tableName).setFamilies(familyMap);
break;
} else
{
// rest of the world
result = AuthResult.deny(request, "Insufficient permissions", user, permission, namespace);
result.getParams().setTableName(tableName).setFamilies(familyMap);
}
}
logResult(result);
if (!result.isAllowed()) {
throw new AccessDeniedException("Insufficient permissions " + result.toContextString());
}
} | 3.26 |
hbase_AccessChecker_requirePermission_rdh | /**
* Authorizes that the current user has any of the given permissions for the given table, column
* family and column qualifier.
*
* @param user
* Active user to which authorization checks should be applied
* @param request
* Request type
* @param tableName
* Table requested
* @param family
* Column family requested
* @param qualifier
* Column qualifier requested
* @param filterUser
* User name to be filtered from permission as requested
* @param permissions
* Actions being requested
* @throws IOException
* if obtaining the current user fails
* @throws AccessDeniedException
* if user has no authorization
*/
public void requirePermission(User user, String request,
TableName tableName, byte[] family, byte[] qualifier, String filterUser, Action... permissions) throws IOException {
AuthResult
result = null;
for (Action permission : permissions) {
if (f0.authorizeUserTable(user, tableName, family, qualifier, permission)) {
result = AuthResult.allow(request, "Table permission granted", user, permission, tableName, family, qualifier);
break;
} else {
// rest of the world
result = AuthResult.deny(request, "Insufficient permissions", user, permission, tableName, family, qualifier);
}
}
result.getParams().addExtraParam("filterUser", filterUser);
logResult(result);
if (!result.isAllowed())
{
throw new AccessDeniedException("Insufficient permissions " + result.toContextString());
}
} | 3.26 |
hbase_AccessChecker_performOnSuperuser_rdh | /**
* Check if caller is granting or revoking superusers's or supergroups's permissions.
*
* @param request
* request name
* @param caller
* caller
* @param userToBeChecked
* target user or group
* @throws IOException
* AccessDeniedException if target user is superuser
*/
public void performOnSuperuser(String request, User caller, String userToBeChecked) throws IOException {
List<String> userGroups = new ArrayList<>();
userGroups.add(userToBeChecked);
if (!AuthUtil.isGroupPrincipal(userToBeChecked)) {
for (String group : getUserGroups(userToBeChecked)) {
userGroups.add(AuthUtil.toGroupEntry(group));
}
}
for (String name : userGroups) {
if (Superusers.isSuperUser(name)) {
AuthResult result = AuthResult.deny(request, "Granting or revoking superusers's or supergroups's permissions is not allowed", caller, Action.ADMIN, NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR);
logResult(result);
throw new AccessDeniedException(result.getReason());
}
}
} | 3.26 |
hbase_AccessChecker_requireAccess_rdh | /**
* Authorizes that the current user has any of the given permissions to access the table.
*
* @param user
* Active user to which authorization checks should be applied
* @param request
* Request type.
* @param tableName
* Table requested
* @param permissions
* Actions being requested
* @throws IOException
* if obtaining the current user fails
* @throws AccessDeniedException
* if user has no authorization
*/
public void requireAccess(User
user, String request, TableName tableName, Action... permissions) throws IOException {
AuthResult result = null;
for (Action permission : permissions) {
if (f0.accessUserTable(user, tableName, permission))
{
result = AuthResult.allow(request, "Table permission granted", user, permission, tableName, null, null);
break;
} else {
// rest of the world
result = AuthResult.deny(request, "Insufficient permissions", user, permission, tableName, null, null);
}
}
logResult(result);
if (!result.isAllowed()) {
throw new AccessDeniedException("Insufficient permissions " +
result.toContextString());
}
} | 3.26 |
hbase_HealthChecker_init_rdh | /**
* Initialize.
*
* @param location
* the location of the health script
* @param timeout
* the timeout to be used for the health script
*/
public void init(String location, long timeout) {
this.healthCheckScript = location;
this.scriptTimeout = timeout;
ArrayList<String> execScript = new ArrayList<>();
execScript.add(healthCheckScript);
this.shexec = new ShellCommandExecutor(execScript.toArray(new String[execScript.size()]), null, null, scriptTimeout);
LOG.info((("HealthChecker initialized with script at " + this.healthCheckScript) + ", timeout=") + timeout);
} | 3.26 |
hbase_ZKTableArchiveClient_getArchivingEnabled_rdh | /**
* Determine if archiving is enabled (but not necessarily fully propagated) for a table
*
* @param table
* name of the table to check
* @return <tt>true</tt> if it is, <tt>false</tt> otherwise
* @throws IOException
* if an unexpected network issue occurs
* @throws KeeperException
* if zookeeper can't be reached
*/
public boolean getArchivingEnabled(String table) throws IOException, KeeperException {return getArchivingEnabled(Bytes.toBytes(table));
}
/**
*
* @return A new {@link HFileArchiveManager} | 3.26 |
hbase_ZKTableArchiveClient_enableHFileBackupAsync_rdh | /**
* Turn on backups for all HFiles for the given table.
* <p>
* All deleted hfiles are moved to the archive directory under the table directory, rather than
* being deleted.
* <p>
* If backups are already enabled for this table, does nothing.
* <p>
* If the table does not exist, the archiving the table's hfiles is still enabled as a future
* table with that name may be created shortly.
*
* @param table
* name of the table to start backing up
* @throws IOException
* if an unexpected exception occurs
* @throws KeeperException
* if zookeeper can't be reached
*/
public void enableHFileBackupAsync(final byte[] table) throws IOException,
KeeperException {
createHFileArchiveManager().enableHFileBackup(table).stop();
} | 3.26 |
hbase_ZKTableArchiveClient_disableHFileBackup_rdh | /**
* Disable hfile backups for all tables.
* <p>
* Previously backed up files are still retained (if present).
* <p>
* Asynchronous operation - some extra HFiles may be retained, in the archive directory after
* disable is called, dependent on the latency in zookeeper to the servers.
*
* @throws IOException
* if an unexpected exception occurs
* @throws KeeperException
* if zookeeper can't be reached
*/
public void disableHFileBackup() throws IOException, KeeperException {
createHFileArchiveManager().disableHFileBackup().stop();
} | 3.26 |
hbase_ZKTableArchiveClient_getArchiveZNode_rdh | /**
*
* @param conf
* conf to read for the base archive node
* @param zooKeeper
* zookeeper to used for building the full path
* @return get the znode for long-term archival of a table for
*/
public static String getArchiveZNode(Configuration conf, ZKWatcher zooKeeper) {return ZNodePaths.joinZNode(zooKeeper.getZNodePaths().baseZNode, conf.get(ZOOKEEPER_ZNODE_HFILE_ARCHIVE_KEY, TableHFileArchiveTracker.HFILE_ARCHIVE_ZNODE_PARENT));
} | 3.26 |
hbase_EnabledTableSnapshotHandler_snapshotRegions_rdh | // TODO consider switching over to using regionnames, rather than server names. This would allow
// regions to migrate during a snapshot, and then be involved when they are ready. Still want to
// enforce a snapshot time constraints, but lets us be potentially a bit more robust.
/**
* This method kicks off a snapshot procedure. Other than that it hangs around for various phases
* to complete.
*/
@Override
protected void snapshotRegions(List<Pair<RegionInfo, ServerName>> regions) throws IOException {
Set<String> regionServers = new HashSet<>(regions.size());
for (Pair<RegionInfo, ServerName> region : regions) {
if (((region != null) && (region.getFirst() != null)) && (region.getSecond() != null)) {
RegionInfo hri = region.getFirst();
if (hri.isOffline() && (hri.isSplit() || hri.isSplitParent()))
continue;
regionServers.add(region.getSecond().toString());
}
}
// start the snapshot on the RS
Procedure proc = coordinator.startProcedure(this.monitor, this.snapshot.getName(), this.snapshot.toByteArray(), Lists.newArrayList(regionServers));
if (proc == null) {
String msg = ("Failed to submit distributed procedure for snapshot '" + snapshot.getName()) + "'";
LOG.error(msg);
throw new HBaseSnapshotException(msg);
}
try {
// wait for the snapshot to complete. A timer thread is kicked off that should cancel this
// if it takes too long.
proc.waitForCompleted();
LOG.info("Done waiting - online snapshot for " + this.snapshot.getName());
// Take the offline regions as disabled
for (Pair<RegionInfo, ServerName> region : regions) {RegionInfo regionInfo = region.getFirst();
if ((regionInfo.isOffline() && (regionInfo.isSplit() || regionInfo.isSplitParent())) && RegionReplicaUtil.isDefaultReplica(regionInfo)) {
LOG.info("Take disabled snapshot of offline region=" + regionInfo);
snapshotDisabledRegion(regionInfo);
}
}
// handle the mob files if any.
boolean mobEnabled = MobUtils.hasMobColumns(htd);
if (mobEnabled) {
LOG.info("Taking snapshot for mob files in table " + htd.getTableName());
// snapshot the mob files as a offline region.
RegionInfo mobRegionInfo = MobUtils.getMobRegionInfo(htd.getTableName());
m0(mobRegionInfo);
}
} catch (InterruptedException e) {
ForeignException ee = new ForeignException("Interrupted while waiting for snapshot to finish", e);
monitor.receive(ee);
Thread.currentThread().interrupt();
} catch (ForeignException e) {
monitor.receive(e);
}
} | 3.26 |
hbase_EnabledTableSnapshotHandler_m0_rdh | /**
* Takes a snapshot of the mob region
*/private void m0(final RegionInfo regionInfo) throws IOException {
snapshotManifest.addMobRegion(regionInfo);
monitor.rethrowException();
status.setStatus("Completed referencing HFiles for the mob region of table: " + snapshotTable);
} | 3.26 |
hbase_BlockCache_notifyFileBlockEvicted_rdh | /**
* Notifies the cache implementation that the given file had a block evicted
*
* @param fileName
* the file had a block evicted.
*/
default void notifyFileBlockEvicted(String fileName) {
// noop
} | 3.26 |
hbase_BlockCache_cacheBlock_rdh | /**
* Add block to cache.
*
* @param cacheKey
* The block's cache key.
* @param buf
* The block contents wrapped in a ByteBuffer.
* @param inMemory
* Whether block should be treated as in-memory
* @param waitWhenCache
* Whether to wait for the cache to be flushed mainly when BucketCache is
* configured.
*/
default void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory, boolean waitWhenCache) {
cacheBlock(cacheKey, buf, inMemory);
} | 3.26 |
hbase_BlockCache_isMetaBlock_rdh | /**
* Check if block type is meta or index block
*
* @param blockType
* block type of a given HFile block
* @return true if block type is non-data block
*/
default boolean isMetaBlock(BlockType blockType) {
return (blockType != null) && (blockType.getCategory() != BlockCategory.DATA);
} | 3.26 |
hbase_BlockCache_getBlock_rdh | /**
* Fetch block from cache.
*
* @param cacheKey
* Block to fetch.
* @param caching
* Whether this request has caching enabled (used for stats)
* @param repeat
* Whether this is a repeat lookup for the same block (used to avoid
* double counting cache misses when doing double-check locking)
* @param updateCacheMetrics
* Whether to update cache metrics or not
* @param blockType
* BlockType
* @return Block or null if block is not in 2 cache.
*/
default Cacheable getBlock(BlockCacheKey cacheKey,
boolean caching, boolean repeat,
boolean updateCacheMetrics, BlockType blockType) {
return getBlock(cacheKey, caching, repeat, updateCacheMetrics);
} | 3.26 |
hbase_BlockCache_notifyFileCachingCompleted_rdh | /**
* Notifies the cache implementation that the given file has been fully cached (all its blocks
* made into the cache).
*
* @param fileName
* the file that has been completely cached.
*/
default void notifyFileCachingCompleted(Path fileName, int totalBlockCount, int dataBlockCount, long
size) {
// noop
} | 3.26 |
hbase_EnableTableProcedure_runCoprocessorAction_rdh | /**
* Coprocessor Action.
*
* @param env
* MasterProcedureEnv
* @param state
* the procedure state
*/
private void runCoprocessorAction(final MasterProcedureEnv env, final EnableTableState state) throws IOException, InterruptedException {
final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost();
if (cpHost != null) {
switch (state) {
case ENABLE_TABLE_PRE_OPERATION :cpHost.preEnableTableAction(getTableName(), getUser());
break;
case ENABLE_TABLE_POST_OPERATION :
cpHost.postCompletedEnableTableAction(getTableName(), getUser());
break;
default :
throw new UnsupportedOperationException((this + " unhandled state=") +
state);
}
}
} | 3.26 |
hbase_EnableTableProcedure_setTableStateToEnabled_rdh | /**
* Mark table state to Enabled
*
* @param env
* MasterProcedureEnv
*/
protected static void setTableStateToEnabled(final MasterProcedureEnv env, final TableName tableName) throws IOException {
// Flip the table to Enabled
env.getMasterServices().getTableStateManager().setTableState(tableName, State.ENABLED);
LOG.info(("Table '" +
tableName) + "' was successfully enabled.");
} | 3.26 |
hbase_EnableTableProcedure_prepareEnable_rdh | /**
* Action before any real action of enabling table. Set the exception in the procedure instead of
* throwing it. This approach is to deal with backward compatible with 1.0.
*
* @param env
* MasterProcedureEnv
* @return whether the table passes the necessary checks
*/
private boolean prepareEnable(final
MasterProcedureEnv env) throws IOException {
boolean canTableBeEnabled = true;
// Check whether table exists
if (!env.getMasterServices().getTableDescriptors().exists(tableName)) {
setFailure("master-enable-table", new TableNotFoundException(tableName));
canTableBeEnabled = false;
} else {
// There could be multiple client requests trying to disable or enable
// the table at the same time. Ensure only the first request is honored
// After that, no other requests can be accepted until the table reaches
// DISABLED or ENABLED.
//
// Note: in 1.0 release, we called TableStateManager.setTableStateIfInStates() to set
// the state to ENABLING from DISABLED. The implementation was done before table lock
// was implemented. With table lock, there is no need to set the state here (it will
// set the state later on). A quick state check should be enough for us to move forward.
TableStateManager tsm = env.getMasterServices().getTableStateManager();
TableState ts = tsm.getTableState(tableName);
if (!ts.isDisabled()) {
LOG.info("Not DISABLED tableState={}; skipping enable; {}", ts.getState(), this);
setFailure("master-enable-table", new TableNotDisabledException(ts.toString()));
canTableBeEnabled = false;
}
}
// We are done the check. Future actions in this procedure could be done asynchronously.
releaseSyncLatch();
return canTableBeEnabled;} | 3.26 |
hbase_EnableTableProcedure_postEnable_rdh | /**
* Action after enabling table.
*
* @param env
* MasterProcedureEnv
* @param state
* the procedure state
*/private void postEnable(final MasterProcedureEnv env, final EnableTableState state) throws IOException, InterruptedException {
runCoprocessorAction(env, state);
} | 3.26 |
hbase_EnableTableProcedure_preEnable_rdh | /**
* Action before enabling table.
*
* @param env
* MasterProcedureEnv
* @param state
* the procedure state
*/
private void preEnable(final MasterProcedureEnv env, final EnableTableState state) throws IOException, InterruptedException {
runCoprocessorAction(env, state);
} | 3.26 |
hbase_EnableTableProcedure_getMaxReplicaId_rdh | /**
* Returns Maximum region replica id found in passed list of regions.
*/
private static int getMaxReplicaId(List<RegionInfo> regions) {
int max = 0;
for (RegionInfo regionInfo : regions) {
if (regionInfo.getReplicaId() > max) {
// Iterating through all the list to identify the highest replicaID region.
// We can stop after checking with the first set of regions??
max = regionInfo.getReplicaId();
}
}
return max;
} | 3.26 |
hbase_ZKListener_getWatcher_rdh | /**
* Returns The watcher associated with this listener
*/
public ZKWatcher getWatcher()
{
return this.watcher;
} | 3.26 |
hbase_ZKListener_nodeCreated_rdh | /**
* Called when a new node has been created.
*
* @param path
* full path of the new node
*/public void nodeCreated(String path) {
// no-op
} | 3.26 |
hbase_ZKListener_nodeDeleted_rdh | /**
* Called when a node has been deleted
*
* @param path
* full path of the deleted node
*/
public void
nodeDeleted(String path) {
// no-op
} | 3.26 |
hbase_ZKListener_nodeChildrenChanged_rdh | /**
* Called when an existing node has a child node added or removed.
*
* @param path
* full path of the node whose children have changed
*/
public void nodeChildrenChanged(String path) {
// no-op
} | 3.26 |
hbase_ZKListener_nodeDataChanged_rdh | /**
* Called when an existing node has changed data.
*
* @param path
* full path of the updated node
*/
public void nodeDataChanged(String path) {
// no-op
} | 3.26 |
hbase_RefCnt_create_rdh | /**
* Create an {@link RefCnt} with an initial reference count = 1. If the reference count become
* zero, the recycler will do nothing. Usually, an Heap {@link ByteBuff} will use this kind of
* refCnt to track its life cycle, it help to abstract the code path although it's not really
* needed to track on heap ByteBuff.
*/
public static RefCnt create() {
return new RefCnt(ByteBuffAllocator.NONE);
} | 3.26 |
hbase_RefCnt_hasRecycler_rdh | /**
* Returns true if this refCnt has a recycler.
*/
public boolean hasRecycler() {
return recycler != ByteBuffAllocator.NONE;
} | 3.26 |
hbase_ClientSnapshotDescriptionUtils_toString_rdh | /**
* Returns a single line (no \n) representation of snapshot metadata. Use this instead of the
* {@code toString} method of
* {@link org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription}.
* We don't replace
* {@link org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription}'s
* {@code toString}, because it is auto-generated by protoc.
*
* @param snapshot
* description of the snapshot
* @return single line string with a summary of the snapshot parameters
*/public static String toString(SnapshotProtos.SnapshotDescription snapshot) {
if (snapshot == null) {
return null;
}
return new
StringBuilder("{ ss=").append(snapshot.getName()).append(" table=").append(snapshot.hasTable() ? TableName.valueOf(snapshot.getTable()) : "").append(" type=").append(snapshot.getType()).append(" ttl=").append(snapshot.getTtl()).append(" }").toString();
} | 3.26 |
hbase_HeapMemoryManager_getHeapOccupancyPercent_rdh | /**
* Returns heap occupancy percentage, 0 <= n <= 1. or -0.0 for error asking JVM
*/
public float getHeapOccupancyPercent() {
return this.heapOccupancyPercent == Float.MAX_VALUE ? HEAP_OCCUPANCY_ERROR_VALUE : this.heapOccupancyPercent;
} | 3.26 |
hbase_HeapMemoryManager_isTunerOn_rdh | // Used by the test cases.
boolean isTunerOn() {
return this.tunerOn;
} | 3.26 |
hbase_ClientIdGenerator_generateClientId_rdh | /**
* Returns a unique ID incorporating IP address, PID, TID and timer. Might be an overkill... Note
* though that new UUID in java by default is just a random number.
*/
public static byte[] generateClientId() {byte[] selfBytes = getIpAddressBytes();
Long pid = getPid();
long tid = Thread.currentThread().getId();
long ts
= EnvironmentEdgeManager.currentTime();
byte[] id = new byte[selfBytes.length + (((pid != null ? 1 : 0) + 2) * Bytes.SIZEOF_LONG)];
int offset = Bytes.putBytes(id, 0, selfBytes, 0,
selfBytes.length);
if
(pid != null) {
offset = Bytes.putLong(id, offset, pid);
}
offset = Bytes.putLong(id, offset, tid);
offset = Bytes.putLong(id, offset, ts);
assert offset == id.length;
return id;
} | 3.26 |
hbase_ClientIdGenerator_getPid_rdh | /**
* Returns PID of the current process, if it can be extracted from JVM name, or null.
*/public static Long getPid() {
String name = ManagementFactory.getRuntimeMXBean().getName();
List<String> nameParts = Splitter.on('@').splitToList(name);if (nameParts.size() == 2) {
// 12345@somewhere
try {return Long.parseLong(Iterators.get(nameParts.iterator(), 0));
} catch (NumberFormatException ex) {
LOG.warn(("Failed to get PID from [" + name) + "]", ex);
}
} else {
LOG.warn(("Don't know how to get PID from [" + name) + "]");
}
return null;
} | 3.26 |
hbase_ClientIdGenerator_getIpAddressBytes_rdh | /**
* Returns Some IPv4/IPv6 address available on the current machine that is up, not virtual and not
* a loopback address. Empty array if none can be found or error occurred.
*/
public static byte[] getIpAddressBytes() {
try {
return Addressing.getIpAddress().getAddress();
} catch (IOException ex) {
LOG.warn("Failed to get IP address bytes", ex);
}
return new byte[0];
} | 3.26 |
hbase_StealJobQueue_getStealFromQueue_rdh | /**
* Get a queue whose job might be stolen by the consumer of this original queue
*
* @return the queue whose job could be stolen
*/
public BlockingQueue<T> getStealFromQueue() {
return f0;
} | 3.26 |
hbase_RSProcedureDispatcher_splitAndResolveOperation_rdh | /**
* Fetches {@link org.apache.hadoop.hbase.procedure2.RemoteProcedureDispatcher.RemoteOperation}s
* from the given {@code remoteProcedures} and groups them by class of the returned operation.
* Then {@code resolver} is used to dispatch {@link RegionOpenOperation}s and
* {@link RegionCloseOperation}s.
*
* @param serverName
* RegionServer to which the remote operations are sent
* @param operations
* Remote procedures which are dispatched to the given server
* @param resolver
* Used to dispatch remote procedures to given server.
*/
public void splitAndResolveOperation(ServerName serverName, Set<RemoteProcedure> operations, RemoteProcedureResolver resolver) {
MasterProcedureEnv env = master.getMasterProcedureExecutor().getEnvironment();
ArrayListMultimap<Class<?>, RemoteOperation> reqsByType = buildAndGroupRequestByType(env, serverName, operations);
List<RegionOpenOperation> openOps = fetchType(reqsByType, RSProcedureDispatcher.RegionOpenOperation.class);
if (!openOps.isEmpty()) {
resolver.dispatchOpenRequests(env, openOps);
}
List<RegionCloseOperation> closeOps = fetchType(reqsByType, RSProcedureDispatcher.RegionCloseOperation.class);
if (!closeOps.isEmpty()) {
resolver.m0(env, closeOps);
}
List<ServerOperation> refreshOps = fetchType(reqsByType, RSProcedureDispatcher.ServerOperation.class);
if (!refreshOps.isEmpty()) {
resolver.dispatchServerOperations(env, refreshOps);
}
if (!reqsByType.isEmpty()) {
LOG.warn("unknown request type in the queue: " + reqsByType);
}
} | 3.26 |
hbase_RSProcedureDispatcher_sendRequest_rdh | // will be overridden in test.
protected ExecuteProceduresResponse sendRequest(final ServerName serverName, final ExecuteProceduresRequest request) throws IOException {
return FutureUtils.get(getRsAdmin().executeProcedures(request));
} | 3.26 |
hbase_KeyOnlyFilter_areSerializedFieldsEqual_rdh | /**
* Returns true if and only if the fields of the filter that are serialized are equal to the
* corresponding fields in other. Used for testing.
*/
@Override
boolean areSerializedFieldsEqual(Filter o) {
if (o == this) {
return true;
}
if (!(o instanceof KeyOnlyFilter)) {
return false;
}
KeyOnlyFilter other = ((KeyOnlyFilter) (o));
return this.lenAsVal == other.lenAsVal;
} | 3.26 |
hbase_KeyOnlyFilter_toByteArray_rdh | /**
* Returns The filter serialized using pb
*/
@Override
public byte[] toByteArray() {
FilterProtos.KeyOnlyFilter.Builder builder = FilterProtos.KeyOnlyFilter.newBuilder();
builder.setLenAsVal(this.lenAsVal);
return builder.build().toByteArray();
} | 3.26 |
hbase_KeyOnlyFilter_m1_rdh | /**
* Parse a serialized representation of {@link KeyOnlyFilter}
*
* @param pbBytes
* A pb serialized {@link KeyOnlyFilter} instance
* @return An instance of {@link KeyOnlyFilter} made from <code>bytes</code>
* @throws DeserializationException
* if an error occurred
* @see #toByteArray
*/
public static KeyOnlyFilter m1(final byte[] pbBytes) throws DeserializationException {
FilterProtos.KeyOnlyFilter proto;
try {
proto = FilterProtos.KeyOnlyFilter.parseFrom(pbBytes);
} catch (InvalidProtocolBufferException e) {
throw new DeserializationException(e);
}
return new KeyOnlyFilter(proto.getLenAsVal()); } | 3.26 |
hbase_MonitoredTaskImpl_expireNow_rdh | /**
* Force the completion timestamp backwards so that it expires now.
*/
@Override
public void expireNow() {
stateTime -= 180 * 1000;
} | 3.26 |
hbase_MonitoredTaskImpl_getStatusJournal_rdh | /**
* Returns the status journal. This implementation of status journal is not thread-safe. Currently
* we use this to track various stages of flushes and compactions where we can use this/pretty
* print for post task analysis, by which time we are already done changing states (writing to
* journal)
*/
@Override
public List<StatusJournalEntry>
getStatusJournal() {
if (journal == null) {
return Collections.emptyList();
} else {
return ImmutableList.copyOf(journal);
}
} | 3.26 |
hbase_SlowLogQueueService_consumeEventFromDisruptor_rdh | /**
* This implementation is specific to slowLog event. This consumes slowLog event from disruptor
* and inserts records to EvictingQueue.
*
* @param namedQueuePayload
* namedQueue payload from disruptor ring buffer
*/
@Override
public void consumeEventFromDisruptor(NamedQueuePayload namedQueuePayload) {
if (!isOnlineLogProviderEnabled) {return;
}
if (!(namedQueuePayload instanceof RpcLogDetails)) {
LOG.warn("SlowLogQueueService: NamedQueuePayload is not of type RpcLogDetails.");
return;
}
final RpcLogDetails rpcLogDetails = ((RpcLogDetails) (namedQueuePayload));final RpcCall rpcCall = rpcLogDetails.getRpcCall();
final String clientAddress = rpcLogDetails.getClientAddress();
final long responseSize = rpcLogDetails.getResponseSize();
final long blockBytesScanned = rpcLogDetails.getBlockBytesScanned();
final String className = rpcLogDetails.getClassName();
final TooSlowLog.SlowLogPayload.Type type = getLogType(rpcLogDetails);
if (type == null) {
return;
}
Descriptors.MethodDescriptor methodDescriptor = rpcCall.getMethod();
Message param = rpcLogDetails.getParam();
long receiveTime = rpcCall.getReceiveTime();
long startTime = rpcCall.getStartTime();
long endTime = EnvironmentEdgeManager.currentTime();
int processingTime = ((int) (endTime - startTime));
int qTime = ((int) (startTime - receiveTime));
final SlowLogParams slowLogParams = ProtobufUtil.getSlowLogParams(param, slowLogScanPayloadEnabled);
int numGets = 0;
int numMutations = 0;
int v19 = 0;
if (param
instanceof ClientProtos.MultiRequest)
{
ClientProtos.MultiRequest multi = ((ClientProtos.MultiRequest) (param));
for (ClientProtos.RegionAction regionAction : multi.getRegionActionList()) {
for (ClientProtos.Action action : regionAction.getActionList()) {
if (action.hasMutation()) {
numMutations++;
}
if (action.hasGet()) {
numGets++;}
if (action.hasServiceCall()) {
v19++;
}}
}
}
final String userName = rpcCall.getRequestUserName().orElse(StringUtils.EMPTY);final String methodDescriptorName = (methodDescriptor != null) ? methodDescriptor.getName()
: StringUtils.EMPTY;
TooSlowLog.SlowLogPayload.Builder slowLogPayloadBuilder = TooSlowLog.SlowLogPayload.newBuilder().setCallDetails(((methodDescriptorName + "(") + param.getClass().getName()) + ")").setClientAddress(clientAddress).setMethodName(methodDescriptorName).setMultiGets(numGets).setMultiMutations(numMutations).setMultiServiceCalls(v19).setParam(slowLogParams != null ? slowLogParams.getParams() : StringUtils.EMPTY).setProcessingTime(processingTime).setQueueTime(qTime).setRegionName(slowLogParams != null ? slowLogParams.getRegionName() : StringUtils.EMPTY).setResponseSize(responseSize).setBlockBytesScanned(blockBytesScanned).setServerClass(className).setStartTime(startTime).setType(type).setUserName(userName).addAllRequestAttribute(buildNameBytesPairs(rpcLogDetails.getRequestAttributes())).addAllConnectionAttribute(buildNameBytesPairs(rpcLogDetails.getConnectionAttributes()));
if ((slowLogParams != null) && (slowLogParams.getScan() != null)) {
slowLogPayloadBuilder.setScan(slowLogParams.getScan());
}
TooSlowLog.SlowLogPayload slowLogPayload = slowLogPayloadBuilder.build();
slowLogQueue.add(slowLogPayload);
if (isSlowLogTableEnabled)
{
if (!slowLogPayload.getRegionName().startsWith("hbase:slowlog")) {
slowLogPersistentService.addToQueueForSysTable(slowLogPayload);
}
}
} | 3.26 |
hbase_MultiRowMutationEndpoint_start_rdh | /**
* Stores a reference to the coprocessor environment provided by the
* {@link org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost} from the region where this
* coprocessor is loaded. Since this is a coprocessor endpoint, it always expects to be loaded on
* a table region, so always expects this to be an instance of
* {@link RegionCoprocessorEnvironment}.
*
* @param env
* the environment provided by the coprocessor host
* @throws IOException
* if the provided environment is not an instance of
* {@code RegionCoprocessorEnvironment}
*/
@Override
public void start(CoprocessorEnvironment env) throws IOException {
if (env instanceof RegionCoprocessorEnvironment) {
this.env = ((RegionCoprocessorEnvironment) (env));
}
else {
throw
new CoprocessorException("Must be loaded on a table region!");
}
} | 3.26 |
hbase_QuotaCache_getTableLimiter_rdh | /**
* Returns the limiter associated to the specified table.
*
* @param table
* the table to limit
* @return the limiter associated to the specified table
*/
public QuotaLimiter getTableLimiter(final TableName table) {
return getQuotaState(this.tableQuotaCache, table).getGlobalLimiter();
} | 3.26 |
hbase_QuotaCache_getNamespaceLimiter_rdh | /**
* Returns the limiter associated to the specified namespace.
*
* @param namespace
* the namespace to limit
* @return the limiter associated to the specified namespace
*/
public QuotaLimiter getNamespaceLimiter(final String namespace) {
return getQuotaState(this.namespaceQuotaCache, namespace).getGlobalLimiter();
} | 3.26 |
hbase_QuotaCache_getQuotaUserName_rdh | /**
* Applies a request attribute user override if available, otherwise returns the UGI's short
* username
*
* @param ugi
* The request's UserGroupInformation
*/
private String getQuotaUserName(final UserGroupInformation ugi) {if (userOverrideRequestAttributeKey == null) {
return ugi.getShortUserName();
}
Optional<RpcCall> rpcCall = RpcServer.getCurrentCall();
if (!rpcCall.isPresent()) {
return ugi.getShortUserName();
}
byte[] override = rpcCall.get().getRequestAttribute(userOverrideRequestAttributeKey);
if (override == null) {
return ugi.getShortUserName();
}
return Bytes.toString(override);
} | 3.26 |
hbase_QuotaCache_getQuotaState_rdh | /**
* Returns the QuotaState requested. If the quota info is not in cache an empty one will be
* returned and the quota request will be enqueued for the next cache refresh.
*/private <K> QuotaState getQuotaState(final ConcurrentMap<K, QuotaState> quotasMap, final K key) {
return computeIfAbsent(quotasMap, key, QuotaState::new, this::triggerCacheRefresh);
} | 3.26 |
hbase_QuotaCache_m0_rdh | /**
* Returns the QuotaState associated to the specified user.
*
* @param ugi
* the user
* @return the quota info associated to specified user
*/
public UserQuotaState m0(final UserGroupInformation ugi) {
return computeIfAbsent(userQuotaCache, getQuotaUserName(ugi), UserQuotaState::new, this::triggerCacheRefresh);
} | 3.26 |
hbase_QuotaCache_updateQuotaFactors_rdh | /**
* Update quota factors which is used to divide cluster scope quota into machine scope quota For
* user/namespace/user over namespace quota, use [1 / RSNum] as machine factor. For table/user
* over table quota, use [1 / TotalTableRegionNum * MachineTableRegionNum] as machine factor.
*/
private void updateQuotaFactors() {
// Update machine quota factor
ClusterMetrics clusterMetrics;
try {
clusterMetrics = rsServices.getConnection().getAdmin().getClusterMetrics(EnumSet.of(Option.SERVERS_NAME, Option.TABLE_TO_REGIONS_COUNT));
} catch (IOException e) {
f0.warn("Failed to get cluster metrics needed for updating quotas", e);
return;
}
int rsSize = clusterMetrics.getServersName().size();if (rsSize != 0) {
// TODO if use rs group, the cluster limit should be shared by the rs group
machineQuotaFactor = 1.0 / rsSize;
}
Map<TableName, RegionStatesCount> tableRegionStatesCount = clusterMetrics.getTableRegionStatesCount();
// Update table machine quota factors
for (TableName tableName :
tableQuotaCache.keySet()) {
if (tableRegionStatesCount.containsKey(tableName)) {
double factor = 1;
try {
long regionSize = tableRegionStatesCount.get(tableName).getOpenRegions();
if (regionSize == 0) {
factor = 0;
} else {
int localRegionSize = rsServices.getRegions(tableName).size();
factor = (1.0 * localRegionSize) / regionSize;
}
} catch (IOException e) {
f0.warn("Get table regions failed: {}", tableName, e);
}
tableMachineQuotaFactors.put(tableName, factor);
} else {
// TableName might have already been dropped (outdated)
tableMachineQuotaFactors.remove(tableName);
}
}
} | 3.26 |
hbase_QuotaCache_getUserLimiter_rdh | /**
* Returns the limiter associated to the specified user/table.
*
* @param ugi
* the user to limit
* @param table
* the table to limit
* @return the limiter associated to the specified user/table
*/
public QuotaLimiter getUserLimiter(final UserGroupInformation ugi, final TableName table) {
if (table.isSystemTable()) {
return NoopQuotaLimiter.get();
}
return m0(ugi).getTableLimiter(table);
} | 3.26 |
hbase_LazyInitializedWALProvider_getProviderNoCreate_rdh | /**
* Get the provider if it already initialized, otherwise just return {@code null} instead of
* creating it.
*/
WALProvider getProviderNoCreate() {
return holder.get();
} | 3.26 |
hbase_MBeanSourceImpl_register_rdh | /**
* Register an mbean with the underlying metrics system
*
* @param serviceName
* Metrics service/system name
* @param metricsName
* name of the metrics obejct to expose
* @param theMbean
* the actual MBean
* @return ObjectName from jmx
*/
@Override
public ObjectName register(String serviceName, String metricsName, Object theMbean)
{
return MBeans.register(serviceName, metricsName, theMbean);
} | 3.26 |
hbase_ByteBuffInputStream_read_rdh | /**
* Reads the next byte of data from this input stream. The value byte is returned as an
* <code>int</code> in the range <code>0</code> to <code>255</code>. If no byte is available
* because the end of the stream has been reached, the value <code>-1</code> is returned.
*
* @return the next byte of data, or <code>-1</code> if the end of the stream has been reached.
*/
@Override public int read() {
if (this.buf.hasRemaining()) {
return this.buf.get() & 0xff;
}
return -1;
} | 3.26 |
hbase_ByteBuffInputStream_skip_rdh | /**
* Skips <code>n</code> bytes of input from this input stream. Fewer bytes might be skipped if the
* end of the input stream is reached. The actual number <code>k</code> of bytes to be skipped is
* equal to the smaller of <code>n</code> and remaining bytes in the stream.
*
* @param n
* the number of bytes to be skipped.
* @return the actual number of bytes skipped.
*/
@Override
public long skip(long n) {
long k = Math.min(n, available());
if (k <= 0) {
return 0;
}
this.buf.skip(((int) (k)));
return k;
} | 3.26 |
hbase_HBaseRpcController_getTableName_rdh | /**
* Returns Region's table name or null if not available or pertinent.
*/
default TableName getTableName() {
return null;
} | 3.26 |
hbase_HBaseRpcController_hasRegionInfo_rdh | /**
* Returns True if this Controller is carrying the RPC target Region's RegionInfo.
*/
default boolean hasRegionInfo() {
return false;
} | 3.26 |
hbase_HBaseRpcController_setTableName_rdh | /**
* Sets Region's table name.
*/
default void setTableName(TableName tableName) {
} | 3.26 |
hbase_HBaseRpcController_getRegionInfo_rdh | /**
* Returns Target Region's RegionInfo or null if not available or pertinent.
*/
default RegionInfo getRegionInfo() {
return null;
} | 3.26 |
hbase_SkipFilter_areSerializedFieldsEqual_rdh | /**
* Returns true if and only if the fields of the filter that are serialized are equal to the
* corresponding fields in other. Used for testing.
*/
@Override
boolean areSerializedFieldsEqual(Filter o) {
if (o == this) {
return true;
}
if (!(o instanceof SkipFilter)) {
return false;
}
SkipFilter other = ((SkipFilter) (o));
return getFilter().areSerializedFieldsEqual(other.getFilter());
} | 3.26 |
hbase_SkipFilter_parseFrom_rdh | /**
* Parse a serialized representation of {@link SkipFilter}
*
* @param pbBytes
* A pb serialized {@link SkipFilter} instance
* @return An instance of {@link SkipFilter} made from <code>bytes</code>
* @throws DeserializationException
* if an error occurred
* @see #toByteArray
*/
public static SkipFilter
parseFrom(final byte[] pbBytes) throws DeserializationException {
FilterProtos.SkipFilter proto;
try {
proto = FilterProtos.SkipFilter.parseFrom(pbBytes);} catch (InvalidProtocolBufferException e) {
throw new DeserializationException(e);
}
try {
return new SkipFilter(ProtobufUtil.toFilter(proto.getFilter()));
} catch (IOException ioe) {
throw new DeserializationException(ioe);
}
} | 3.26 |
hbase_SkipFilter_toByteArray_rdh | /**
* Returns The filter serialized using pb
*/
@Override
public byte[] toByteArray() throws IOException {
FilterProtos.SkipFilter.Builder builder = FilterProtos.SkipFilter.newBuilder();
builder.setFilter(ProtobufUtil.toFilter(this.filter));
return builder.build().toByteArray();
} | 3.26 |
hbase_MiniZooKeeperCluster_hasValidClientPortInList_rdh | /**
* Check whether the client port in a specific position of the client port list is valid.
*
* @param index
* the specified position
*/
private boolean hasValidClientPortInList(int index) {
return (clientPortList.size() > index) && (clientPortList.get(index) > 0);
} | 3.26 |
hbase_MiniZooKeeperCluster_selectClientPort_rdh | /**
* Selects a ZK client port.
*
* @param seedPort
* the seed port to start with; -1 means first time.
* @return a valid and unused client port
*/
private int selectClientPort(int seedPort) {
int i;
int returnClientPort = seedPort + 1;
if (returnClientPort == 0) {
// If the new port is invalid, find one - starting with the default client port.
// If the default client port is not specified, starting with a random port.
// The random port is selected from the range between 49152 to 65535. These ports cannot be
// registered with IANA and are intended for dynamic allocation (see http://bit.ly/dynports).
if (defaultClientPort > 0) {
returnClientPort = defaultClientPort;
} else
{
returnClientPort = 0xc000 + ThreadLocalRandom.current().nextInt(0x3f00);}
}
// Make sure that the port is unused.
// break when an unused port is found
do {
for (i = 0; i < clientPortList.size();
i++) {
if (returnClientPort == clientPortList.get(i)) {
// Already used. Update the port and retry.
returnClientPort++;
break;
}
}
} while (i != clientPortList.size() );
return returnClientPort;
} | 3.26 |
hbase_MiniZooKeeperCluster_setupTestEnv_rdh | // / XXX: From o.a.zk.t.ClientBase
private static void setupTestEnv() {
// during the tests we run with 100K prealloc in the logs.
// on windows systems prealloc of 64M was seen to take ~15seconds
// resulting in test failure (client timeout on first session).
// set env and directly in order to handle static init/gc issues
System.setProperty("zookeeper.preAllocSize", "100");
FileTxnLog.setPreallocSize(100 *
1024);
// allow all 4 letter words
System.setProperty("zookeeper.4lw.commands.whitelist", "*");
} | 3.26 |
hbase_MiniZooKeeperCluster_addClientPort_rdh | /**
* Add a client port to the list.
*
* @param clientPort
* the specified port
*/
public void addClientPort(int clientPort) {
clientPortList.add(clientPort);
} | 3.26 |
hbase_MiniZooKeeperCluster_waitForServerUp_rdh | // XXX: From o.a.zk.t.ClientBase. Its in the test jar but we don't depend on zk test jar.
// We remove the SSL/secure bit. Not used in here.
private static boolean waitForServerUp(int port, long timeout) throws IOException {
long start = EnvironmentEdgeManager.currentTime();
while (true) {
try {
String result = send4LetterWord(HOST, port, "stat", false, ((int) (timeout)));
if (result.startsWith("Zookeeper version:") && (!result.contains("READ-ONLY"))) {
return true;
} else {
LOG.debug("Read {}", result);
}
} catch (ConnectException e) {
// ignore as this is expected, do not log stacktrace
LOG.info("{}:{} not up: {}", HOST, port, e.toString());
} catch (IOException | X509Exception e) {
// ignore as this is expected
LOG.info("{}:{} not up", HOST, port, e);
}
if (EnvironmentEdgeManager.currentTime() > (start + timeout)) {
break;
}
try {
Thread.sleep(TIMEOUT);
} catch (InterruptedException e) {
throw ((InterruptedIOException) (new InterruptedIOException().initCause(e)));
}
}
return false;
} | 3.26 |
hbase_MiniZooKeeperCluster_getAddress_rdh | /**
* Returns Address for this cluster instance.
*/
public Address getAddress() {
return Address.fromParts(HOST, getClientPort());
} | 3.26 |
hbase_MiniZooKeeperCluster_waitForServerDown_rdh | // XXX: From o.a.zk.t.ClientBase. We just dropped the check for ssl/secure.
private static boolean waitForServerDown(int port, long timeout) throws
IOException {
long start = EnvironmentEdgeManager.currentTime();
while (true) {
try {
send4LetterWord(HOST, port, "stat", false, ((int) (timeout)));
} catch (IOException | X509Exception e) {
return true;
}
if (EnvironmentEdgeManager.currentTime() > (start + timeout)) {
break;}
try {
Thread.sleep(TIMEOUT);
} catch (InterruptedException e) {
throw ((InterruptedIOException) (new InterruptedIOException().initCause(e)));
}
}
return false;
} | 3.26 |
hbase_MiniZooKeeperCluster_killOneBackupZooKeeperServer_rdh | /**
* Kill one back up ZK servers.
*
* @throws IOException
* if waiting for the shutdown of a server fails
*/
public void killOneBackupZooKeeperServer() throws IOException, InterruptedException {
if (((!started) || (activeZKServerIndex < 0)) || (standaloneServerFactoryList.size() <= 1)) {
return;
}
int backupZKServerIndex = activeZKServerIndex + 1;
// Shutdown the current active one
NIOServerCnxnFactory standaloneServerFactory = standaloneServerFactoryList.get(backupZKServerIndex);
int clientPort = clientPortList.get(backupZKServerIndex);
standaloneServerFactory.shutdown();if (!waitForServerDown(clientPort, connectionTimeout)) {
throw new IOException("Waiting for shutdown of standalone server");
}
zooKeeperServers.get(backupZKServerIndex).getZKDatabase().close();
// remove this backup zk server
standaloneServerFactoryList.remove(backupZKServerIndex);
clientPortList.remove(backupZKServerIndex);
zooKeeperServers.remove(backupZKServerIndex);
LOG.info("Kill one backup ZK servers in the cluster on client port: {}", clientPort);
} | 3.26 |
hbase_MiniZooKeeperCluster_shutdown_rdh | /**
*
* @throws IOException
* if waiting for the shutdown of a server fails
*/
public void shutdown() throws IOException {
// shut down all the zk servers
for (int i = 0; i < standaloneServerFactoryList.size(); i++) {
NIOServerCnxnFactory standaloneServerFactory = standaloneServerFactoryList.get(i);
int clientPort = clientPortList.get(i);
standaloneServerFactory.shutdown();
if (!waitForServerDown(clientPort, connectionTimeout)) {
throw new IOException((("Waiting for shutdown of standalone server at port=" + clientPort) + ", timeout=") + this.connectionTimeout);
}
}
standaloneServerFactoryList.clear();
for (ZooKeeperServer zkServer : zooKeeperServers) {
// Explicitly close ZKDatabase since ZookeeperServer does not close them
zkServer.getZKDatabase().close();
}
zooKeeperServers.clear();
// clear everything
if (started) {
started = false;
activeZKServerIndex = 0;
clientPortList.clear();
LOG.info("Shutdown MiniZK cluster with all ZK servers");
}
} | 3.26 |
hbase_MiniZooKeeperCluster_getClientPortList_rdh | /**
* Get the list of client ports.
*
* @return clientPortList the client port list
*/
@InterfaceAudience.Private
public List<Integer> getClientPortList() {
return clientPortList;
} | 3.26 |
hbase_RegionSplitRestriction_create_rdh | /**
* Create the RegionSplitRestriction configured for the given table.
*
* @param tableDescriptor
* the table descriptor
* @param conf
* the configuration
* @return a RegionSplitRestriction instance
* @throws IOException
* if an error occurs
*/
public static RegionSplitRestriction create(TableDescriptor tableDescriptor, Configuration conf) throws IOException {
String
type = tableDescriptor.getValue(RESTRICTION_TYPE_KEY);
if (type == null) {
type = conf.get(RESTRICTION_TYPE_KEY, RESTRICTION_TYPE_NONE);
}RegionSplitRestriction ret;
switch (type) {
case RESTRICTION_TYPE_NONE :
ret = new NoRegionSplitRestriction();
break;
case RESTRICTION_TYPE_KEY_PREFIX :
ret = new KeyPrefixRegionSplitRestriction();
break;
case RESTRICTION_TYPE_DELIMITED_KEY_PREFIX :
ret = new DelimitedKeyPrefixRegionSplitRestriction();
break;
default :
LOG.warn("Invalid RegionSplitRestriction type specified: {}. " + "Using the default RegionSplitRestriction", type);
ret = new NoRegionSplitRestriction();
break;
}ret.initialize(tableDescriptor, conf);
return ret;
} | 3.26 |
hbase_HDFSBlocksDistribution_addHostAndBlockWeight_rdh | /**
* add some weight to a specific host
*
* @param host
* the host name
* @param weight
* the weight
* @param weightForSsd
* the weight for ssd
*/
private void addHostAndBlockWeight(String host, long weight, long
weightForSsd) {
if (host == null) {
// erroneous data
return;
}
HostAndWeight hostAndWeight = this.hostAndWeights.get(host);
if (hostAndWeight == null) {
hostAndWeight = new HostAndWeight(host, weight, weightForSsd);
this.hostAndWeights.put(host, hostAndWeight);
} else {
hostAndWeight.addWeight(weight, weightForSsd);
}
} | 3.26 |
hbase_HDFSBlocksDistribution_getBlockLocalityIndex_rdh | /**
* Get the block locality index for a given host
*
* @param host
* the host name
* @return the locality index of the given host
*/
public float getBlockLocalityIndex(String host) {
if (uniqueBlocksTotalWeight == 0) {
return 0.0F;
} else {
return ((float) (getBlocksLocalityWeightInternal(host, HostAndWeight::getWeight))) / ((float) (uniqueBlocksTotalWeight));
}
} | 3.26 |
hbase_HDFSBlocksDistribution_getHost_rdh | /**
* Returns the host name
*/
public String getHost() {
return host;
} | 3.26 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.