name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hbase_MasterCoprocessorHost_postTruncateRegionAction_rdh | /**
* Invoked after calling the truncate region procedure
*
* @param region
* Region which was truncated
* @param user
* The user
*/
public void postTruncateRegionAction(final RegionInfo region, User user)
throws IOException {
execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation(user) {
@Override
public void call(MasterObserver observer) throws IOException {
observer.postTruncateRegionAction(this, region);
}});
} | 3.26 |
hbase_MasterCoprocessorHost_postRollBackMergeRegionsAction_rdh | /**
* Invoked after rollback merge regions operation
*
* @param regionsToMerge
* the regions to merge
* @param user
* the user
*/
public void postRollBackMergeRegionsAction(final RegionInfo[] regionsToMerge, final User user) throws IOException {
execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation(user) {
@Override
public void call(MasterObserver observer) throws IOException {
observer.postRollBackMergeRegionsAction(this, regionsToMerge);
}
});
} | 3.26 |
hbase_QuotaSettingsFactory_throttleTable_rdh | /**
* Throttle the specified table.
*
* @param tableName
* the table to throttle
* @param type
* the type of throttling
* @param limit
* the allowed number of request/data per timeUnit
* @param timeUnit
* the limit time unit
* @param scope
* the scope of throttling
* @return the quota settings
*/
public static QuotaSettings throttleTable(final TableName tableName, final ThrottleType type, final long limit, final TimeUnit timeUnit, QuotaScope scope) {
return throttle(null, tableName, null, null, type, limit, timeUnit, scope);
} | 3.26 |
hbase_QuotaSettingsFactory_m1_rdh | /**
* Throttle the specified user on the specified table.
*
* @param userName
* the user to throttle
* @param tableName
* the table to throttle
* @param type
* the type of throttling
* @param limit
* the allowed number of request/data per timeUnit
* @param timeUnit
* the limit time unit
* @param scope
* the scope of throttling
* @return the quota settings
*/
public static QuotaSettings m1(final String userName, final TableName tableName, final ThrottleType type, final long limit, final TimeUnit timeUnit, QuotaScope scope) {
return throttle(userName, tableName, null, null, type, limit, timeUnit, scope);
} | 3.26 |
hbase_QuotaSettingsFactory_bypassGlobals_rdh | /* ========================================================================== Global Settings */
/**
* Set the "bypass global settings" for the specified user
*
* @param userName
* the user to throttle
* @param bypassGlobals
* true if the global settings should be bypassed
* @return the quota settings
*/public static QuotaSettings bypassGlobals(final String userName, final boolean bypassGlobals) {
return new
QuotaGlobalsSettingsBypass(userName, null, null, null, bypassGlobals);
} | 3.26 |
hbase_QuotaSettingsFactory_unthrottleTableByThrottleType_rdh | /**
* Remove the throttling for the specified table.
*
* @param tableName
* the table
* @param type
* the type of throttling
* @return the quota settings
*/
public static QuotaSettings unthrottleTableByThrottleType(final TableName tableName, final ThrottleType type) {
return throttle(null, tableName, null, null, type, 0, null, QuotaScope.MACHINE);
} | 3.26 |
hbase_QuotaSettingsFactory_removeTableSpaceLimit_rdh | /**
* Creates a {@link QuotaSettings} object to remove the FileSystem space quota for the given
* table.
*
* @param tableName
* The name of the table to remove the quota for.
* @return A {@link QuotaSettings} object.
*/
public static QuotaSettings removeTableSpaceLimit(TableName tableName) {
return new SpaceLimitSettings(tableName);
} | 3.26 |
hbase_QuotaSettingsFactory_unthrottleRegionServerByThrottleType_rdh | /**
* Remove the throttling for the specified region server by throttle type.
*
* @param regionServer
* the region Server
* @param type
* the type of throttling
* @return the quota settings
*/
public static QuotaSettings unthrottleRegionServerByThrottleType(final String regionServer, final ThrottleType type) {
return throttle(null, null, null, regionServer, type, 0,
null, QuotaScope.MACHINE);} | 3.26 |
hbase_QuotaSettingsFactory_throttle_rdh | /* Throttle helper */
private static QuotaSettings throttle(final String userName, final TableName tableName, final String namespace, final String regionServer,
final ThrottleType type, final long limit, final TimeUnit timeUnit, QuotaScope scope) {
QuotaProtos.ThrottleRequest.Builder builder = QuotaProtos.ThrottleRequest.newBuilder();
if (type != null) {
builder.setType(ProtobufUtil.toProtoThrottleType(type));
}
if (timeUnit != null) {
builder.setTimedQuota(ProtobufUtil.toTimedQuota(limit, timeUnit, scope));
}
return new ThrottleSettings(userName, tableName, namespace,
regionServer, builder.build());
} | 3.26 |
hbase_QuotaSettingsFactory_throttleNamespace_rdh | /**
* Throttle the specified namespace.
*
* @param namespace
* the namespace to throttle
* @param type
* the type of throttling
* @param limit
* the allowed number of request/data per timeUnit
* @param timeUnit
* the limit time unit
* @param scope
* the scope of throttling
* @return the quota settings
*/
public static QuotaSettings throttleNamespace(final String namespace, final ThrottleType type, final long limit, final TimeUnit timeUnit, QuotaScope scope) {
return throttle(null, null, namespace, null, type, limit, timeUnit, scope);
} | 3.26 |
hbase_QuotaSettingsFactory_unthrottleTable_rdh | /**
* Remove the throttling for the specified table.
*
* @param tableName
* the table
* @return the quota settings
*/
public static QuotaSettings unthrottleTable(final TableName tableName) {
return throttle(null, tableName, null, null, null, 0, null, QuotaScope.MACHINE);
} | 3.26 |
hbase_QuotaSettingsFactory_unthrottleNamespace_rdh | /**
* Remove the throttling for the specified namespace.
*
* @param namespace
* the namespace
* @return the quota settings
*/
public static QuotaSettings unthrottleNamespace(final String namespace) {
return throttle(null, null, namespace, null, null, 0, null, QuotaScope.MACHINE);
} | 3.26 |
hbase_QuotaSettingsFactory_limitNamespaceSpace_rdh | /**
* Creates a {@link QuotaSettings} object to limit the FileSystem space usage for the given
* namespace to the given size in bytes. When the space usage is exceeded by all tables in the
* namespace, the provided {@link SpaceViolationPolicy} is enacted on all tables in the namespace.
*
* @param namespace
* The namespace on which the quota should be applied.
* @param sizeLimit
* The limit of the namespace's size in bytes.
* @param violationPolicy
* The action to take when the the quota is exceeded.
* @return An {@link QuotaSettings} object.
*/
public static QuotaSettings limitNamespaceSpace(final String namespace, long sizeLimit, final SpaceViolationPolicy violationPolicy) {
return new SpaceLimitSettings(namespace, sizeLimit, violationPolicy);
}
/**
* Creates a {@link QuotaSettings} object to remove the FileSystem space quota for the given
* namespace.
*
* @param namespace
* The namespace to remove the quota on.
* @return A {@link QuotaSettings} | 3.26 |
hbase_QuotaSettingsFactory_unthrottleUser_rdh | /**
* Remove the throttling for the specified user on the specified namespace.
*
* @param userName
* the user
* @param namespace
* the namespace
* @return the quota settings
*/
public static QuotaSettings unthrottleUser(final String userName, final String namespace) {
return throttle(userName, null, namespace, null, null, 0, null, QuotaScope.MACHINE);
} | 3.26 |
hbase_QuotaSettingsFactory_unthrottleRegionServer_rdh | /**
* Remove the throttling for the specified region server.
*
* @param regionServer
* the region Server
* @return the quota settings
*/
public static QuotaSettings unthrottleRegionServer(final String regionServer) {
return throttle(null, null, null, regionServer, null, 0, null, QuotaScope.MACHINE);
} | 3.26 |
hbase_QuotaSettingsFactory_throttleRegionServer_rdh | /**
* Throttle the specified region server.
*
* @param regionServer
* the region server to throttle
* @param type
* the type of throttling
* @param limit
* the allowed number of request/data per timeUnit
* @param timeUnit
* the limit time unit
* @return the quota settings
*/
public static QuotaSettings throttleRegionServer(final String regionServer, final ThrottleType type, final long limit, final TimeUnit timeUnit) {
return throttle(null, null, null, regionServer, type, limit, timeUnit, QuotaScope.MACHINE);
} | 3.26 |
hbase_QuotaSettingsFactory_unthrottleNamespaceByThrottleType_rdh | /**
* Remove the throttling for the specified namespace by throttle type.
*
* @param namespace
* the namespace
* @param type
* the type of throttling
* @return the quota settings
*/
public static QuotaSettings unthrottleNamespaceByThrottleType(final String namespace, final ThrottleType type) {
return throttle(null, null, namespace, null, type, 0, null, QuotaScope.MACHINE);
} | 3.26 |
hbase_QuotaSettingsFactory_m2_rdh | /**
* Remove the throttling for the specified user on the specified table.
*
* @param userName
* the user
* @param tableName
* the table
* @param type
* the type of throttling
* @return the quota settings
*/
public static QuotaSettings m2(final String userName, final TableName tableName, final ThrottleType type) {
return throttle(userName, tableName, null, null, type, 0,
null, QuotaScope.MACHINE);
} | 3.26 |
hbase_QuotaSettingsFactory_throttleUser_rdh | /**
* Throttle the specified user on the specified namespace.
*
* @param userName
* the user to throttle
* @param namespace
* the namespace to throttle
* @param type
* the type of throttling
* @param limit
* the allowed number of request/data per timeUnit
* @param timeUnit
* the limit time unit
* @param scope
* the scope of throttling
* @return the quota settings
*/
public static QuotaSettings throttleUser(final String userName, final String namespace, final ThrottleType type, final long limit, final TimeUnit timeUnit, QuotaScope scope) {
return throttle(userName, null, namespace, null, type, limit, timeUnit, scope);
} | 3.26 |
hbase_QuotaSettingsFactory_unthrottleUserByThrottleType_rdh | /**
* Remove the throttling for the specified user on the specified namespace.
*
* @param userName
* the user
* @param namespace
* the namespace
* @param type
* the type of throttling
* @return the quota settings
*/
public static QuotaSettings
unthrottleUserByThrottleType(final String userName, final String namespace, final ThrottleType type) {
return throttle(userName,
null, namespace, null, type, 0, null, QuotaScope.MACHINE);
} | 3.26 |
hbase_ReusableStreamGzipCodec_writeTrailer_rdh | /**
* re-implement because the relative method in jdk is invisible
*/
private void writeTrailer(byte[] paramArrayOfByte, int paramInt) throws IOException {
writeInt(((int) (this.crc.getValue())), paramArrayOfByte, paramInt);
writeInt(this.def.getTotalIn(), paramArrayOfByte, paramInt + 4);
} | 3.26 |
hbase_ReusableStreamGzipCodec_writeShort_rdh | /**
* re-implement because the relative method in jdk is invisible
*/
private void writeShort(int paramInt1, byte[] paramArrayOfByte, int paramInt2) throws IOException {
paramArrayOfByte[paramInt2] = ((byte) (paramInt1 & 0xff));
paramArrayOfByte[paramInt2 +
1] = ((byte) ((paramInt1 >> 8) & 0xff));} | 3.26 |
hbase_ReusableStreamGzipCodec_writeInt_rdh | /**
* re-implement because the relative method in jdk is invisible
*/
private void writeInt(int paramInt1, byte[] paramArrayOfByte, int paramInt2) throws IOException {
writeShort(paramInt1 & 0xffff, paramArrayOfByte, paramInt2);
writeShort((paramInt1 >> 16) & 0xffff, paramArrayOfByte, paramInt2
+ 2);} | 3.26 |
hbase_ReusableStreamGzipCodec_finish_rdh | /**
* Override because certain implementation calls def.end() which causes problem when resetting
* the stream for reuse.
*/
@Override
public void finish() throws IOException { if (HAS_BROKEN_FINISH) {
if (!def.finished()) {
def.finish();
while (!def.finished()) {
int i = def.deflate(this.buf, 0, this.buf.length);
if (def.finished() && (i <= (this.buf.length - TRAILER_SIZE))) {
writeTrailer(this.buf, i);
i += TRAILER_SIZE;
out.write(this.buf, 0, i);
return;
}
if (i >
0) {
out.write(this.buf, 0, i);
}
}
byte[] arrayOfByte = new byte[TRAILER_SIZE];
writeTrailer(arrayOfByte, 0);
out.write(arrayOfByte);
}
} else {
super.finish();
}
} | 3.26 |
hbase_VisibilityUtils_extractVisibilityTags_rdh | /**
* Extract the visibility tags of the given Cell into the given List
*
* @param cell
* - the cell
* @param tags
* - the array that will be populated if visibility tags are present
* @return The visibility tags serialization format
*/
public static Byte extractVisibilityTags(Cell cell, List<Tag> tags) {
Byte serializationFormat = null;
Iterator<Tag> tagsIterator = PrivateCellUtil.tagsIterator(cell);
while (tagsIterator.hasNext()) {
Tag tag = tagsIterator.next();
if (tag.getType() == TagType.VISIBILITY_EXP_SERIALIZATION_FORMAT_TAG_TYPE) {
serializationFormat = Tag.getValueAsByte(tag);
} else if (tag.getType() == VISIBILITY_TAG_TYPE) {
tags.add(tag);
}
}
return serializationFormat;} | 3.26 |
hbase_VisibilityUtils_getActiveUser_rdh | /**
*
* @return User who called RPC method. For non-RPC handling, falls back to system user
* @throws IOException
* When there is IOE in getting the system user (During non-RPC handling).
*/
public static User getActiveUser() throws IOException {
Optional<User> optionalUser = RpcServer.getRequestUser();
User user;
if (optionalUser.isPresent()) {
user = optionalUser.get();
} else {
user = User.getCurrent();
}
if (LOG.isTraceEnabled()) {
LOG.trace("Current active user name is " + user.getShortName());
}
return user;
} | 3.26 |
hbase_VisibilityUtils_readUserAuthsFromZKData_rdh | /**
* Reads back User auth data written to zookeeper.
*
* @return User auth details
*/
public static MultiUserAuthorizations readUserAuthsFromZKData(byte[] data) throws DeserializationException {
if (ProtobufUtil.isPBMagicPrefix(data)) {
int pblen = ProtobufUtil.lengthOfPBMagic();
try {
MultiUserAuthorizations.Builder builder = MultiUserAuthorizations.newBuilder();
ProtobufUtil.mergeFrom(builder, data, pblen, data.length - pblen);
return builder.build();
} catch (IOException e) {
throw new DeserializationException(e);
}
}
return null;
} | 3.26 |
hbase_VisibilityUtils_readLabelsFromZKData_rdh | /**
* Reads back from the zookeeper. The data read here is of the form written by
* writeToZooKeeper(Map<byte[], Integer> entries).
*
* @return Labels and their ordinal details
*/
public static List<VisibilityLabel> readLabelsFromZKData(byte[] data) throws DeserializationException {
if (ProtobufUtil.isPBMagicPrefix(data)) {
int pblen = ProtobufUtil.lengthOfPBMagic();
try {
VisibilityLabelsRequest.Builder builder = VisibilityLabelsRequest.newBuilder();
ProtobufUtil.mergeFrom(builder, data, pblen, data.length - pblen);
return builder.getVisLabelList();
} catch (IOException e)
{
throw new DeserializationException(e);
}
}
return null;
} | 3.26 |
hbase_VisibilityUtils_getDataToWriteToZooKeeper_rdh | /**
* Creates the labels data to be written to zookeeper.
*
* @return Bytes form of labels and their ordinal details to be written to zookeeper.
*/
public static byte[] getDataToWriteToZooKeeper(Map<String, Integer> existingLabels) {
VisibilityLabelsRequest.Builder visReqBuilder = VisibilityLabelsRequest.newBuilder();
for (Entry<String, Integer> entry : existingLabels.entrySet()) {
VisibilityLabel.Builder visLabBuilder = VisibilityLabel.newBuilder();
visLabBuilder.setLabel(ByteString.copyFrom(Bytes.toBytes(entry.getKey())));
visLabBuilder.setOrdinal(entry.getValue());visReqBuilder.addVisLabel(visLabBuilder.build());
}
return ProtobufUtil.prependPBMagic(visReqBuilder.build().toByteArray());
} | 3.26 |
hbase_VisibilityUtils_writeLabelOrdinalsToStream_rdh | /**
* This will sort the passed labels in ascending oder and then will write one after the other to
* the passed stream. Unsorted label ordinals Stream where to write the labels. When IOE during
* writes to Stream.
*/
private static void writeLabelOrdinalsToStream(List<Integer> labelOrdinals, DataOutputStream dos) throws IOException {
Collections.sort(labelOrdinals);
for (Integer labelOrdinal : labelOrdinals) {
StreamUtils.writeRawVInt32(dos, labelOrdinal);}
} | 3.26 |
hbase_ResultScanner_next_rdh | // get the pending next item and advance the iterator. returns null if
// there is no next item.
@Override
public Result next() {
// since hasNext() does the real advancing, we call this to determine
// if there is a next before proceeding.
if (!hasNext()) {
return null;
}
// if we get to here, then hasNext() has given us an item to return.
// we want to return the item and then null out the next pointer, so
// we use a temporary variable.
Result temp = next;
next = null;
return temp;} | 3.26 |
hbase_ResultScanner_hasNext_rdh | // return true if there is another item pending, false if there isn't.
// this method is where the actual advancing takes place, but you need
// to call next() to consume it. hasNext() will only advance if there
// isn't a pending next().
@Override
public boolean hasNext() {
if (next != null) {
return true;
}
try {
return (next = ResultScanner.this.next()) != null;
} catch (IOException e) {
throw new UncheckedIOException(e);
}
} | 3.26 |
hbase_MasterProcedureScheduler_wakePeerExclusiveLock_rdh | /**
* Wake the procedures waiting for the specified peer
*
* @see #waitPeerExclusiveLock(Procedure, String)
* @param procedure
* the procedure releasing the lock
* @param peerId
* the peer that has the exclusive lock
*/
public void wakePeerExclusiveLock(Procedure<?> procedure, String peerId) {
schedLock();
try {
final LockAndQueue lock = locking.getPeerLock(peerId);
if (lock.releaseExclusiveLock(procedure)) {
addToRunQueue(peerRunQueue, getPeerQueue(peerId), () -> procedure + " released exclusive lock");
int waitingCount = wakeWaitingProcedures(lock);
wakePollIfNeeded(waitingCount);
}
} finally {
schedUnlock();
}
}
// ============================================================================
// Meta Locking Helpers
// ============================================================================
/**
* Try to acquire the exclusive lock on meta.
*
* @see #wakeMetaExclusiveLock(Procedure)
* @param procedure
* the procedure trying to acquire the lock
* @return true if the procedure has to wait for meta to be available
* @deprecated only used for {@link RecoverMetaProcedure}. Should be removed along with
{@link RecoverMetaProcedure} | 3.26 |
hbase_MasterProcedureScheduler_wakeGlobalExclusiveLock_rdh | /**
* Wake the procedures waiting for global.
*
* @see #waitGlobalExclusiveLock(Procedure, String)
* @param procedure
* the procedure releasing the lock
*/
public void wakeGlobalExclusiveLock(Procedure<?> procedure, String globalId)
{
schedLock();
try {final LockAndQueue lock = locking.getGlobalLock(globalId);lock.releaseExclusiveLock(procedure);
addToRunQueue(globalRunQueue, getGlobalQueue(globalId), () -> procedure + " released shared lock");
int waitingCount = wakeWaitingProcedures(lock);
wakePollIfNeeded(waitingCount);
} finally {
schedUnlock();
}
} | 3.26 |
hbase_MasterProcedureScheduler_waitTableSharedLock_rdh | /**
* Suspend the procedure if the specified table is already locked. other "read" operations in the
* table-queue may be executed concurrently,
*
* @param procedure
* the procedure trying to acquire the lock
* @param table
* Table to lock
* @return true if the procedure has to wait for the table to be available
*/
public boolean waitTableSharedLock(final Procedure<?> procedure, final TableName table) {
return waitTableQueueSharedLock(procedure, table) == null;
} | 3.26 |
hbase_MasterProcedureScheduler_dumpLocks_rdh | /**
* For debugging. Expensive.
*/
public String dumpLocks() throws IOException {
schedLock();
try {
// TODO: Refactor so we stream out locks for case when millions; i.e. take a PrintWriter
return this.locking.toString();
} finally {
schedUnlock();
}
} | 3.26 |
hbase_MasterProcedureScheduler_getMetaQueue_rdh | // ============================================================================
// Meta Queue Lookup Helpers
// ============================================================================
private MetaQueue getMetaQueue() {
MetaQueue node = AvlTree.get(metaMap, TableName.META_TABLE_NAME, META_QUEUE_KEY_COMPARATOR);
if (node != null) {
return node;
}
node = new MetaQueue(locking.getMetaLock());
metaMap = AvlTree.insert(metaMap, node);
return node;
} | 3.26 |
hbase_MasterProcedureScheduler_wakeServerExclusiveLock_rdh | /**
* Wake the procedures waiting for the specified server
*
* @see #waitServerExclusiveLock(Procedure,ServerName)
* @param procedure
* the procedure releasing the lock
* @param serverName
* the server that has the exclusive lock
*/
public void wakeServerExclusiveLock(final Procedure<?> procedure, final ServerName serverName) {
schedLock();
try {
final LockAndQueue lock = locking.getServerLock(serverName);
// Only SCP will acquire/release server lock so do not need to check the return value here.
lock.releaseExclusiveLock(procedure);
// In tests we may pass procedures other than ServerProcedureInterface, just pass null if
// so.
addToRunQueue(serverRunQueue, getServerQueue(serverName, procedure instanceof ServerProcedureInterface ? ((ServerProcedureInterface) (procedure)) : null),
() -> procedure + " released exclusive lock");
int v64 = wakeWaitingProcedures(lock);
wakePollIfNeeded(v64);
} finally {
schedUnlock();
}
} | 3.26 |
hbase_MasterProcedureScheduler_getPeerQueue_rdh | // ============================================================================
// Peer Queue Lookup Helpers
// ============================================================================
private PeerQueue getPeerQueue(String peerId)
{
PeerQueue node = AvlTree.get(peerMap, peerId, PEER_QUEUE_KEY_COMPARATOR);
if (node != null) {
return node;}
node = new PeerQueue(peerId, locking.getPeerLock(peerId));
peerMap = AvlTree.insert(peerMap, node);
return node;
} | 3.26 |
hbase_MasterProcedureScheduler_waitRegions_rdh | /**
* Suspend the procedure if the specified set of regions are already locked.
*
* @param procedure
* the procedure trying to acquire the lock on the regions
* @param table
* the table name of the regions we are trying to lock
* @param regionInfos
* the list of regions we are trying to lock
* @return true if the procedure has to wait for the regions to be available
*/
public boolean waitRegions(final Procedure<?> procedure, final TableName table, final RegionInfo... regionInfos) {
Arrays.sort(regionInfos, RegionInfo.COMPARATOR);
schedLock();
try {
assert table != null;
if (waitTableSharedLock(procedure, table)) {
return true;
}
// acquire region xlocks or wait
boolean hasLock = true;
final LockAndQueue[] regionLocks = new LockAndQueue[regionInfos.length];
for (int i = 0; i < regionInfos.length; ++i) {
assert regionInfos[i] != null;
assert regionInfos[i].getTable() != null;assert regionInfos[i].getTable().equals(table) : (regionInfos[i] + " ") + procedure;
assert (i == 0) || (regionInfos[i] != regionInfos[i - 1]) : "duplicate region: " + regionInfos[i];
regionLocks[i] = locking.getRegionLock(regionInfos[i].getEncodedName());
if (!regionLocks[i].tryExclusiveLock(procedure)) {LOG.info("Waiting on xlock for {} held by pid={}", procedure, regionLocks[i].getExclusiveLockProcIdOwner());
waitProcedure(regionLocks[i], procedure);
hasLock = false;
while ((i--) > 0) {
regionLocks[i].releaseExclusiveLock(procedure);
}
break;
} else {
LOG.info("Took xlock for {}", procedure);
}
}
if (!hasLock) { wakeTableSharedLock(procedure, table);
}
return !hasLock;
} finally {
schedUnlock();
}
} | 3.26 |
hbase_MasterProcedureScheduler_wakeRegions_rdh | /**
* Wake the procedures waiting for the specified regions
*
* @param procedure
* the procedure that was holding the regions
* @param regionInfos
* the list of regions the procedure was holding
*/
public void wakeRegions(final Procedure<?> procedure, final TableName table, final RegionInfo... regionInfos) {Arrays.sort(regionInfos, RegionInfo.COMPARATOR);
schedLock();
try {
int numProcs = 0;
final Procedure<?>[] nextProcs = new Procedure[regionInfos.length];
for (int i = 0; i < regionInfos.length; ++i) {
assert regionInfos[i].getTable().equals(table);
assert (i == 0) || (regionInfos[i] != regionInfos[i - 1]) : "duplicate region: " + regionInfos[i];
LockAndQueue v55 = locking.getRegionLock(regionInfos[i].getEncodedName());
if (v55.releaseExclusiveLock(procedure)) {
if (!v55.isWaitingQueueEmpty()) {
// release one procedure at the time since regions has an xlock
nextProcs[numProcs++] = v55.removeFirst();
} else {
locking.removeRegionLock(regionInfos[i].getEncodedName());
}
}
}
// awake procedures if any
for (int i = numProcs - 1; i >= 0; --i) {
wakeProcedure(nextProcs[i]);
}wakePollIfNeeded(numProcs);
// release the table shared-lock.
wakeTableSharedLock(procedure, table);
} finally
{
schedUnlock();
}
} | 3.26 |
hbase_MasterProcedureScheduler_wakeTableExclusiveLock_rdh | /**
* Wake the procedures waiting for the specified table
*
* @param procedure
* the procedure releasing the lock
* @param table
* the name of the table that has the exclusive lock
*/
public void wakeTableExclusiveLock(final Procedure<?> procedure, final TableName table) {
schedLock();
try {
final LockAndQueue namespaceLock = locking.getNamespaceLock(table.getNamespaceAsString());
final LockAndQueue tableLock = locking.getTableLock(table);
int waitingCount = 0;
if (tableLock.releaseExclusiveLock(procedure)) {
waitingCount +=
wakeWaitingProcedures(tableLock);
}
if (namespaceLock.releaseSharedLock()) {
waitingCount += wakeWaitingProcedures(namespaceLock);
}
addToRunQueue(f0, m3(table), () -> procedure + " released the exclusive lock");
wakePollIfNeeded(waitingCount);
} finally {
schedUnlock();
}
} | 3.26 |
hbase_MasterProcedureScheduler_waitPeerExclusiveLock_rdh | // Peer Locking Helpers
// ============================================================================
/**
* Try to acquire the exclusive lock on the specified peer.
*
* @see #wakePeerExclusiveLock(Procedure, String)
* @param procedure
* the procedure trying to acquire the lock
* @param peerId
* peer to lock
* @return true if the procedure has to wait for the peer to be available
*/
public boolean waitPeerExclusiveLock(Procedure<?> procedure, String peerId) {
schedLock();
try {
final LockAndQueue lock = locking.getPeerLock(peerId);
if (lock.tryExclusiveLock(procedure)) {
removeFromRunQueue(peerRunQueue, getPeerQueue(peerId), () -> procedure + " held exclusive lock");
return false;
}
waitProcedure(lock, procedure);
logLockedResource(LockedResourceType.PEER, peerId);
return true;
} finally {
schedUnlock();
}
} | 3.26 |
hbase_MasterProcedureScheduler_getServerQueue_rdh | // ============================================================================
// Server Queue Lookup Helpers
// ============================================================================
private ServerQueue getServerQueue(ServerName serverName, ServerProcedureInterface proc) {
final int index = getBucketIndex(serverBuckets, serverName.hashCode());ServerQueue node = AvlTree.get(serverBuckets[index], serverName, SERVER_QUEUE_KEY_COMPARATOR);
if (node != null) {
return node;
}
int priority;
if (proc != null) {
priority = MasterProcedureUtil.getServerPriority(proc);
} else {
priority = 1;
}
node = new ServerQueue(serverName, priority, locking.getServerLock(serverName));
serverBuckets[index] = AvlTree.insert(serverBuckets[index], node);
return node;
} | 3.26 |
hbase_MasterProcedureScheduler_m3_rdh | // ============================================================================
// Table Queue Lookup Helpers
// ============================================================================
private TableQueue m3(TableName tableName) {
TableQueue node = AvlTree.get(f1, tableName, TABLE_QUEUE_KEY_COMPARATOR);
if (node != null)
return node;
node = new TableQueue(tableName, MasterProcedureUtil.getTablePriority(tableName), locking.getTableLock(tableName), locking.getNamespaceLock(tableName.getNamespaceAsString()));
f1 = AvlTree.insert(f1, node);
return node;
} | 3.26 |
hbase_MasterProcedureScheduler_logLockedResource_rdh | // ============================================================================
// Table Locking Helpers
// ============================================================================
/**
* Get lock info for a resource of specified type and name and log details
*/
private void logLockedResource(LockedResourceType resourceType, String resourceName) {
if (!LOG.isDebugEnabled()) {
return;
}
LockedResource lockedResource = getLockResource(resourceType, resourceName);
if (lockedResource != null) {
String msg = (((resourceType.toString() + " '") + resourceName) + "', shared lock count=") + lockedResource.getSharedLockCount();
Procedure<?> proc = lockedResource.getExclusiveLockOwnerProcedure();
if (proc != null) {
msg += ", exclusively locked by procId=" + proc.getProcId();
}
LOG.debug(msg);
}} | 3.26 |
hbase_MasterProcedureScheduler_wakeNamespaceExclusiveLock_rdh | /**
* Wake the procedures waiting for the specified namespace
*
* @see #waitNamespaceExclusiveLock(Procedure,String)
* @param procedure
* the procedure releasing the lock
* @param namespace
* the namespace that has the exclusive lock
*/
public void wakeNamespaceExclusiveLock(final Procedure<?> procedure, final String namespace) {
schedLock();
try {
final LockAndQueue namespaceLock = locking.getNamespaceLock(namespace);
final LockAndQueue systemNamespaceTableLock = locking.getTableLock(TableProcedureInterface.DUMMY_NAMESPACE_TABLE_NAME);
int waitingCount = 0;
if (namespaceLock.releaseExclusiveLock(procedure)) {
waitingCount += wakeWaitingProcedures(namespaceLock);
}if (systemNamespaceTableLock.releaseSharedLock()) {addToRunQueue(f0, m3(TableProcedureInterface.DUMMY_NAMESPACE_TABLE_NAME), () -> procedure + " released namespace exclusive lock");
waitingCount += wakeWaitingProcedures(systemNamespaceTableLock);
}
wakePollIfNeeded(waitingCount);
} finally {
schedUnlock();
}
} | 3.26 |
hbase_MasterProcedureScheduler_wakeRegion_rdh | /**
* Wake the procedures waiting for the specified region
*
* @param procedure
* the procedure that was holding the region
* @param regionInfo
* the region the procedure was holding
*/
public void wakeRegion(final Procedure<?> procedure, final RegionInfo regionInfo) {
wakeRegions(procedure, regionInfo.getTable(), regionInfo);
} | 3.26 |
hbase_MasterProcedureScheduler_wakeTableSharedLock_rdh | /**
* Wake the procedures waiting for the specified table
*
* @param procedure
* the procedure releasing the lock
* @param table
* the name of the table that has the shared lock
*/
public void wakeTableSharedLock(final Procedure<?> procedure, final TableName table) {
schedLock();
try {
final LockAndQueue namespaceLock = locking.getNamespaceLock(table.getNamespaceAsString());
final LockAndQueue tableLock = locking.getTableLock(table);
int waitingCount = 0;
if (tableLock.releaseSharedLock()) {
addToRunQueue(f0, m3(table), () -> procedure + " released the shared lock");
waitingCount += wakeWaitingProcedures(tableLock);
}
if (namespaceLock.releaseSharedLock()) {
waitingCount += wakeWaitingProcedures(namespaceLock);
}
wakePollIfNeeded(waitingCount);
} finally {
schedUnlock();
}
} | 3.26 |
hbase_MasterProcedureScheduler_waitNamespaceExclusiveLock_rdh | // ============================================================================
// Namespace Locking Helpers
// ============================================================================
/**
* Suspend the procedure if the specified namespace is already locked.
*
* @see #wakeNamespaceExclusiveLock(Procedure,String)
* @param procedure
* the procedure trying to acquire the lock
* @param namespace
* Namespace to lock
* @return true if the procedure has to wait for the namespace to be available
*/
public boolean waitNamespaceExclusiveLock(Procedure<?> procedure, String namespace) {
schedLock();
try {
final LockAndQueue v57 = locking.getTableLock(TableProcedureInterface.DUMMY_NAMESPACE_TABLE_NAME);
if (!v57.trySharedLock(procedure)) {
waitProcedure(v57, procedure);
logLockedResource(LockedResourceType.TABLE, TableProcedureInterface.DUMMY_NAMESPACE_TABLE_NAME.getNameAsString());
return true;
}
final LockAndQueue namespaceLock
= locking.getNamespaceLock(namespace);
if (!namespaceLock.tryExclusiveLock(procedure)) {
v57.releaseSharedLock();
waitProcedure(namespaceLock, procedure);
logLockedResource(LockedResourceType.NAMESPACE, namespace);
return true;
}
return false;
} finally {
schedUnlock();
}
} | 3.26 |
hbase_MasterProcedureScheduler_waitRegion_rdh | // ============================================================================
// Region Locking Helpers
// ============================================================================
/**
* Suspend the procedure if the specified region is already locked.
*
* @param procedure
* the procedure trying to acquire the lock on the region
* @param regionInfo
* the region we are trying to lock
* @return true if the procedure has to wait for the regions to be available
*/
public boolean waitRegion(final Procedure<?> procedure, final RegionInfo regionInfo) {
return waitRegions(procedure, regionInfo.getTable(), regionInfo);
} | 3.26 |
hbase_MasterProcedureScheduler_getGlobalQueue_rdh | // ============================================================================
// Global Queue Lookup Helpers
// ============================================================================
private GlobalQueue getGlobalQueue(String globalId) {
GlobalQueue node = AvlTree.get(f2, globalId, GLOBAL_QUEUE_KEY_COMPARATOR);
if (node != null) {
return node;
}
node = new GlobalQueue(globalId, locking.getGlobalLock(globalId));
f2 = AvlTree.insert(f2, node);
return node;
} | 3.26 |
hbase_MasterProcedureScheduler_waitGlobalExclusiveLock_rdh | // ============================================================================
// Global Locking Helpers
// ============================================================================
/**
* Try to acquire the share lock on global.
*
* @see #wakeGlobalExclusiveLock(Procedure, String)
* @param procedure
* the procedure trying to acquire the lock
* @return true if the procedure has to wait for global to be available
*/
public boolean waitGlobalExclusiveLock(Procedure<?> procedure, String globalId) {
schedLock();
try {
final LockAndQueue lock = locking.getGlobalLock(globalId);
if (lock.tryExclusiveLock(procedure)) {
removeFromRunQueue(globalRunQueue, getGlobalQueue(globalId), () -> procedure + " held shared lock");
return false;
}
waitProcedure(lock, procedure);
logLockedResource(LockedResourceType.GLOBAL, HConstants.EMPTY_STRING);
return true;
} finally {
schedUnlock();
}
} | 3.26 |
hbase_MasterProcedureScheduler_waitTableExclusiveLock_rdh | /**
* Suspend the procedure if the specified table is already locked. Other operations in the
* table-queue will be executed after the lock is released.
*
* @param procedure
* the procedure trying to acquire the lock
* @param table
* Table to lock
* @return true if the procedure has to wait for the table to be available
*/
public boolean waitTableExclusiveLock(final Procedure<?> procedure, final TableName table) {
schedLock();
try {
final String namespace = table.getNamespaceAsString();
final LockAndQueue namespaceLock = locking.getNamespaceLock(namespace);
final LockAndQueue tableLock = locking.getTableLock(table);
if (!namespaceLock.trySharedLock(procedure)) {
waitProcedure(namespaceLock, procedure);
logLockedResource(LockedResourceType.NAMESPACE, namespace);
return
true;
}
if (!tableLock.tryExclusiveLock(procedure)) {
namespaceLock.releaseSharedLock();
waitProcedure(tableLock, procedure);
logLockedResource(LockedResourceType.TABLE, table.getNameAsString());
return true;
}
removeFromRunQueue(f0, m3(table), () ->
procedure + " held the exclusive lock");
return false;
} finally {
schedUnlock();
}
} | 3.26 |
hbase_Struct_decode_rdh | /**
* Read the field at {@code index}. {@code src}'s position is not affected.
*/
public Object
decode(PositionedByteRange src, int index) {assert index >= 0;
StructIterator it = iterator(src.shallowCopy());
for (; index > 0; index--) {
it.skip();
}
return it.next();
} | 3.26 |
hbase_Struct_iterator_rdh | /**
* Retrieve an {@link Iterator} over the values encoded in {@code src}. {@code src}'s position is
* consumed by consuming this iterator.
*/
public StructIterator iterator(PositionedByteRange src) {
return new StructIterator(src, fields);
} | 3.26 |
hbase_FsDelegationToken_releaseDelegationToken_rdh | /**
* Releases a previously acquired delegation token.
*/
public void releaseDelegationToken() {
if (userProvider.isHadoopSecurityEnabled()) {
if ((userToken != null) && (!hasForwardedToken))
{
try {
userToken.cancel(this.fs.getConf());
} catch (Exception e) {
LOG.warn("Failed to cancel HDFS delegation token: " + userToken, e);
}
}
this.userToken = null;
this.fs = null;
}
} | 3.26 |
hbase_FsDelegationToken_getRenewer_rdh | /**
* Returns the account name that is allowed to renew the token.
*/
public String getRenewer() {
return renewer;
} | 3.26 |
hbase_FsDelegationToken_getUserToken_rdh | /**
* Returns the delegation token acquired, or null in case it was not acquired
*/
public Token<?> getUserToken() {
return userToken;
} | 3.26 |
hbase_FsDelegationToken_acquireDelegationToken_rdh | /**
* Acquire the delegation token for the specified filesystem and token kind. Before requesting a
* new delegation token, tries to find one already available.
*
* @param tokenKind
* non-null token kind to get delegation token from the {@link UserProvider}
* @param fs
* the filesystem that requires the delegation token
* @throws IOException
* on fs.getDelegationToken() failure
*/
public void acquireDelegationToken(final String tokenKind, final FileSystem fs) throws IOException {
Objects.requireNonNull(tokenKind, "tokenKind:null");
if (userProvider.isHadoopSecurityEnabled()) {
this.fs = fs;
userToken = userProvider.getCurrent().getToken(tokenKind, fs.getCanonicalServiceName());
if (userToken == null) {
hasForwardedToken = false;
userToken = fs.getDelegationToken(renewer);
} else {
hasForwardedToken = true;
LOG.info("Use the existing token: " + userToken);
}
}
} | 3.26 |
hbase_BulkLoadHFilesTool_tryAtomicRegionLoad_rdh | /**
* Attempts to do an atomic load of many hfiles into a region. If it fails, it returns a list of
* hfiles that need to be retried. If it is successful it will return an empty list. NOTE: To
* maintain row atomicity guarantees, region server side should succeed atomically and fails
* atomically.
*
* @param conn
* Connection to use
* @param tableName
* Table to which these hfiles should be loaded to
* @param copyFiles
* whether replicate to peer cluster while bulkloading
* @param first
* the start key of region
* @param lqis
* hfiles should be loaded
* @return empty list if success, list of items to retry on recoverable failure
*/
@InterfaceAudience.Private
protected CompletableFuture<Collection<LoadQueueItem>> tryAtomicRegionLoad(final AsyncClusterConnection conn, final TableName tableName, boolean copyFiles, final byte[] first, Collection<LoadQueueItem> lqis) {
List<Pair<byte[], String>> familyPaths = lqis.stream().map(lqi -> Pair.newPair(lqi.getFamily(), lqi.getFilePath().toString())).collect(Collectors.toList());
CompletableFuture<Collection<LoadQueueItem>> future = new CompletableFuture<>();
FutureUtils.addListener(conn.bulkLoad(tableName, familyPaths,
first, assignSeqIds, fsDelegationToken.getUserToken(), bulkToken, copyFiles, clusterIds, replicate), (loaded, error) -> {
if (error !=
null) {
LOG.error("Encountered unrecoverable error from region server", error);if (getConf().getBoolean(RETRY_ON_IO_EXCEPTION, false) && (numRetries.get() < getConf().getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER))) {
LOG.warn("Will attempt to retry loading failed HFiles. Retry #" + numRetries.incrementAndGet());
// return lqi's to retry
future.complete(lqis);
}
else {
LOG.error(RETRY_ON_IO_EXCEPTION + " is disabled or we have reached retry limit. Unable to recover");
future.completeExceptionally(error);
}
} else if (loaded) {
future.complete(Collections.emptyList());
} else {
LOG.warn(((((("Attempt to bulk load region containing " + Bytes.toStringBinary(first)) + " into table ") + tableName) + " with files ") + lqis) + " failed. This is recoverable and they will be retried.");
// return lqi's to retry
future.complete(lqis);
}
});
return future;
} | 3.26 |
hbase_BulkLoadHFilesTool_loadHFileQueue_rdh | /**
* Used by the replication sink to load the hfiles from the source cluster. It does the following,
* <ol>
* <li>{@link #groupOrSplitPhase(AsyncClusterConnection, TableName, ExecutorService, Deque, List)}
* </li>
* <li>{@link #bulkLoadPhase(AsyncClusterConnection, TableName, Deque, Multimap, boolean, Map)}
* </li>
* </ol>
*
* @param conn
* Connection to use
* @param tableName
* Table to which these hfiles should be loaded to
* @param queue
* {@code LoadQueueItem} has hfiles yet to be loaded
*/
public void loadHFileQueue(AsyncClusterConnection conn, TableName tableName, Deque<LoadQueueItem> queue, boolean copyFiles) throws IOException {
ExecutorService pool = createExecutorService();
try {
Multimap<ByteBuffer, LoadQueueItem> regionGroups = groupOrSplitPhase(conn, tableName, pool, queue, FutureUtils.get(conn.getRegionLocator(tableName).getStartEndKeys())).getFirst();bulkLoadPhase(conn, tableName, queue, regionGroups, copyFiles, null);
} finally {
pool.shutdown();
}
} | 3.26 |
hbase_BulkLoadHFilesTool_getUniqueName_rdh | // unique file name for the table
private String getUniqueName() {
return UUID.randomUUID().toString().replaceAll("-", "");
} | 3.26 |
hbase_BulkLoadHFilesTool_getRegionIndex_rdh | /**
*
* @param startEndKeys
* the start/end keys of regions belong to this table, the list in ascending
* order by start key
* @param key
* the key need to find which region belong to
* @return region index
*/
private int getRegionIndex(List<Pair<byte[], byte[]>> startEndKeys, byte[] key) {
int idx = Collections.binarySearch(startEndKeys, Pair.newPair(key, HConstants.EMPTY_END_ROW),
(p1, p2) -> Bytes.compareTo(p1.getFirst(), p2.getFirst()));
if (idx < 0) {
// not on boundary, returns -(insertion index). Calculate region it
// would be in.
idx = (-(idx +
1)) - 1;
}
return idx;
} | 3.26 |
hbase_BulkLoadHFilesTool_validateFamiliesInHFiles_rdh | /**
* Checks whether there is any invalid family name in HFiles to be bulk loaded.
*/
private static void validateFamiliesInHFiles(TableDescriptor tableDesc, Deque<LoadQueueItem> queue, boolean silence) throws IOException {
Set<String> v2 =
Arrays.stream(tableDesc.getColumnFamilies()).map(ColumnFamilyDescriptor::getNameAsString).collect(Collectors.toSet());
List<String> unmatchedFamilies = queue.stream().map(item -> Bytes.toString(item.getFamily())).filter(fn -> !v2.contains(fn)).distinct().collect(Collectors.toList());
if (unmatchedFamilies.size() > 0) {
String msg = (((("Unmatched family names found: unmatched family names in HFiles to be bulkloaded: " + unmatchedFamilies) + "; valid family names of table ") + tableDesc.getTableName()) + " are: ") + v2;
LOG.error(msg);
if (!silence) {
throw
new IOException(msg);
}
}
} | 3.26 |
hbase_BulkLoadHFilesTool_copyHFileHalf_rdh | /**
* Copy half of an HFile into a new HFile with favored nodes.
*/
private static void copyHFileHalf(Configuration conf, Path inFile, Path outFile, Reference reference, ColumnFamilyDescriptor familyDescriptor, AsyncTableRegionLocator loc) throws IOException {
FileSystem fs = inFile.getFileSystem(conf);
CacheConfig cacheConf = CacheConfig.DISABLED;
HalfStoreFileReader halfReader = null;
StoreFileWriter halfWriter = null;
try {
ReaderContext context = new ReaderContextBuilder().withFileSystemAndPath(fs, inFile).build();
StoreFileInfo storeFileInfo = new StoreFileInfo(conf, fs, fs.getFileStatus(inFile), reference);
storeFileInfo.initHFileInfo(context);
halfReader = ((HalfStoreFileReader) (storeFileInfo.createReader(context, cacheConf)));
storeFileInfo.getHFileInfo().initMetaAndIndex(halfReader.getHFileReader());
Map<byte[], byte[]> fileInfo = halfReader.loadFileInfo();
int blocksize = familyDescriptor.getBlocksize();Algorithm compression = familyDescriptor.getCompressionType();
BloomType bloomFilterType = familyDescriptor.getBloomFilterType();
HFileContext hFileContext = new HFileContextBuilder().withCompression(compression).withChecksumType(StoreUtils.getChecksumType(conf)).withBytesPerCheckSum(StoreUtils.getBytesPerChecksum(conf)).withBlockSize(blocksize).withDataBlockEncoding(familyDescriptor.getDataBlockEncoding()).withIncludesTags(true).withCreateTime(EnvironmentEdgeManager.currentTime()).build();
HFileScanner v76 = halfReader.getScanner(false, false, false);
v76.seekTo();
do {
final Cell cell
= v76.getCell();
if (null != halfWriter) {
halfWriter.append(cell);
} else {
// init halfwriter
if (conf.getBoolean(LOCALITY_SENSITIVE_CONF_KEY, DEFAULT_LOCALITY_SENSITIVE)) { byte[] rowKey = CellUtil.cloneRow(cell);
HRegionLocation hRegionLocation = FutureUtils.get(loc.getRegionLocation(rowKey));
InetSocketAddress[] favoredNodes = null;
if (null == hRegionLocation) {
LOG.warn("Failed get region location for rowkey {} , Using writer without favoured nodes.", Bytes.toString(rowKey));
halfWriter = new StoreFileWriter.Builder(conf, cacheConf, fs).withFilePath(outFile).withBloomType(bloomFilterType).withFileContext(hFileContext).build();
} else {
LOG.debug("First rowkey: [{}]", Bytes.toString(rowKey));
InetSocketAddress initialIsa = new InetSocketAddress(hRegionLocation.getHostname(), hRegionLocation.getPort());
if (initialIsa.isUnresolved()) {
LOG.warn("Failed get location for region {} , Using writer without favoured nodes.", hRegionLocation);
halfWriter = new StoreFileWriter.Builder(conf, cacheConf, fs).withFilePath(outFile).withBloomType(bloomFilterType).withFileContext(hFileContext).build();
} else {
LOG.debug("Use favored nodes writer: {}", initialIsa.getHostString());
favoredNodes = new InetSocketAddress[]{ initialIsa };
halfWriter = new StoreFileWriter.Builder(conf, cacheConf, fs).withFilePath(outFile).withBloomType(bloomFilterType).withFileContext(hFileContext).withFavoredNodes(favoredNodes).build();
}
}
} else {
halfWriter = new StoreFileWriter.Builder(conf, cacheConf, fs).withFilePath(outFile).withBloomType(bloomFilterType).withFileContext(hFileContext).build();
}
halfWriter.append(cell);
}
} while (v76.next() );
for (Map.Entry<byte[], byte[]> entry : fileInfo.entrySet()) {
if (shouldCopyHFileMetaKey(entry.getKey())) {
halfWriter.appendFileInfo(entry.getKey(), entry.getValue()); }
}
} finally {
if (halfReader != null) {
try {
halfReader.close(cacheConf.shouldEvictOnClose());
} catch (IOException e) {
LOG.warn("failed to close hfile reader for " + inFile, e);
}
}
if (halfWriter != null) {
halfWriter.close();
}
}
} | 3.26 |
hbase_BulkLoadHFilesTool_visitBulkHFiles_rdh | /**
* Iterate over the bulkDir hfiles. Skip reference, HFileLink, files starting with "_". Check and
* skip non-valid hfiles by default, or skip this validation by setting {@link #VALIDATE_HFILES}
* to false.
*/
private static <TFamily> void visitBulkHFiles(FileSystem fs, Path bulkDir, BulkHFileVisitor<TFamily> visitor, boolean validateHFile) throws IOException {
FileStatus[] familyDirStatuses = fs.listStatus(bulkDir);
for (FileStatus familyStat :
familyDirStatuses) {
if (!familyStat.isDirectory())
{
LOG.warn("Skipping non-directory " + familyStat.getPath());
continue;
}
Path familyDir = familyStat.getPath();
byte[] familyName = Bytes.toBytes(familyDir.getName());
// Skip invalid family
try {
ColumnFamilyDescriptorBuilder.isLegalColumnFamilyName(familyName);
} catch (IllegalArgumentException e) {
LOG.warn("Skipping invalid " + familyStat.getPath());
continue;
}
TFamily family = visitor.bulkFamily(familyName);
FileStatus[] hfileStatuses = fs.listStatus(familyDir);
for (FileStatus hfileStatus : hfileStatuses) {
if (!fs.isFile(hfileStatus.getPath())) {
LOG.warn("Skipping non-file " + hfileStatus);
continue;
}
Path hfile = hfileStatus.getPath();
// Skip "_", reference, HFileLink
String fileName =
hfile.getName();
if (fileName.startsWith("_")) {
continue;
}
if (StoreFileInfo.isReference(fileName)) {
LOG.warn("Skipping reference " + fileName);
continue;
}
if (HFileLink.isHFileLink(fileName)) {
LOG.warn("Skipping HFileLink " + fileName);
continue;
}
// Validate HFile Format if needed
if
(validateHFile) {
try {
if (!HFile.isHFileFormat(fs, hfile)) {
LOG.warn(("the file " + hfile) + " doesn't seems to be an hfile. skipping");
continue;
}
} catch (FileNotFoundException e) {
LOG.warn(("the file " + hfile) + " was removed");
continue;
}
}
visitor.bulkHFile(family, hfileStatus);
}
}
} | 3.26 |
hbase_BulkLoadHFilesTool_m1_rdh | /**
* Split a storefile into a top and bottom half with favored nodes, maintaining the metadata,
* recreating bloom filters, etc.
*/
@InterfaceAudience.Private
static void m1(AsyncTableRegionLocator loc, Configuration conf, Path inFile, ColumnFamilyDescriptor familyDesc, byte[] splitKey, Path bottomOut, Path topOut) throws IOException {
// Open reader with no block cache, and not in-memory
Reference topReference = Reference.createTopReference(splitKey);
Reference bottomReference = Reference.createBottomReference(splitKey);
copyHFileHalf(conf, inFile, topOut, topReference, familyDesc, loc);
copyHFileHalf(conf, inFile, bottomOut, bottomReference, familyDesc, loc);
} | 3.26 |
hbase_BulkLoadHFilesTool_groupOrSplit_rdh | /**
* Attempt to assign the given load queue item into its target region group. If the hfile boundary
* no longer fits into a region, physically splits the hfile such that the new bottom half will
* fit and returns the list of LQI's corresponding to the resultant hfiles.
* <p/>
* protected for testing
*
* @throws IOException
* if an IO failure is encountered
*/
@InterfaceAudience.Private
protected Pair<List<LoadQueueItem>, String> groupOrSplit(AsyncClusterConnection conn, TableName tableName, Multimap<ByteBuffer, LoadQueueItem> regionGroups, LoadQueueItem item, List<Pair<byte[], byte[]>> startEndKeys) throws IOException {
Path v53 = item.getFilePath();
Optional<byte[]> v54;
Optional<byte[]> last;
try (HFile.Reader hfr = HFile.createReader(v53.getFileSystem(getConf()), v53, CacheConfig.DISABLED,
true, getConf())) {
v54 = hfr.getFirstRowKey();
last = hfr.getLastRowKey();
} catch (FileNotFoundException fnfe) {
LOG.debug("encountered", fnfe);
return new Pair<>(null, v53.getName());
}
LOG.info((((("Trying to load hfile=" +
v53) + " first=") + v54.map(Bytes::toStringBinary)) + " last=") + last.map(Bytes::toStringBinary));
if ((!v54.isPresent()) || (!last.isPresent())) {
assert (!v54.isPresent()) && (!last.isPresent());
// TODO what if this is due to a bad HFile?
LOG.info(("hfile " + v53) + " has no entries, skipping");
return null;
}
if (Bytes.compareTo(v54.get(),
last.get()) > 0) {
throw new IllegalArgumentException((("Invalid range: " + Bytes.toStringBinary(v54.get())) + " > ") + Bytes.toStringBinary(last.get()));
}
int firstKeyRegionIdx = getRegionIndex(startEndKeys, v54.get());
checkRegionIndexValid(firstKeyRegionIdx, startEndKeys, tableName);
boolean lastKeyInRange = (Bytes.compareTo(last.get(), startEndKeys.get(firstKeyRegionIdx).getSecond()) < 0) || Bytes.equals(startEndKeys.get(firstKeyRegionIdx).getSecond(), HConstants.EMPTY_BYTE_ARRAY);
if (!lastKeyInRange) {
if (failIfNeedSplitHFile) {
throw new IOException((((("The key range of hfile=" + v53) + " fits into no region. ") + "And because ") + FAIL_IF_NEED_SPLIT_HFILE) + " was set to true, we just skip the next steps.");
}
int lastKeyRegionIdx = getRegionIndex(startEndKeys, last.get());
int splitIdx = (firstKeyRegionIdx + lastKeyRegionIdx) / 2;// make sure the splitPoint is valid in case region overlap occur, maybe the splitPoint bigger
// than hfile.endkey w/o this check
if (splitIdx != firstKeyRegionIdx) {
checkRegionIndexValid(splitIdx, startEndKeys, tableName);
}
byte[] splitPoint = startEndKeys.get(splitIdx).getSecond();
List<LoadQueueItem> lqis = splitStoreFile(conn.getRegionLocator(tableName), item, FutureUtils.get(conn.getAdmin().getDescriptor(tableName)), splitPoint);
return new Pair<>(lqis, null);
}
// group regions.
regionGroups.put(ByteBuffer.wrap(startEndKeys.get(firstKeyRegionIdx).getFirst()), item);
return null;
} | 3.26 |
hbase_BulkLoadHFilesTool_doBulkLoad_rdh | /**
* Perform a bulk load of the given directory into the given pre-existing table. This method is
* not threadsafe.
*
* @param tableName
* table to load the hfiles
* @param hfofDir
* the directory that was provided as the output path of a job using
* HFileOutputFormat
* @param silence
* true to ignore unmatched column families
* @param copyFile
* always copy hfiles if true
*/
private Map<LoadQueueItem, ByteBuffer> doBulkLoad(AsyncClusterConnection conn, TableName tableName, Path hfofDir, boolean silence, boolean copyFile) throws IOException {
tableExists(conn, tableName);
/* Checking hfile format is a time-consuming operation, we should have an option to skip this
step when bulkloading millions of HFiles. See HBASE-13985.
*/
boolean validateHFile = getConf().getBoolean(VALIDATE_HFILES, true);
if (!validateHFile) {
LOG.warn((("You are skipping HFiles validation, it might cause some data loss if files " + "are not correct. If you fail to read data from your table after using this ") + "option, consider removing the files and bulkload again without this option. ") + "See HBASE-13985");
}
// LQI queue does not need to be threadsafe -- all operations on this queue
// happen in this thread
Deque<LoadQueueItem> queue = new ArrayDeque<>();
ExecutorService pool = null;
try {
prepareHFileQueue(getConf(), conn, tableName, hfofDir, queue, validateHFile, silence);
if (queue.isEmpty()) {
LOG.warn("Bulk load operation did not find any files to load in directory {}. " + "Does it contain files in subdirectories that correspond to column family names?", hfofDir != null ? hfofDir.toUri().toString() : "");
return Collections.emptyMap();}
pool = createExecutorService();return performBulkLoad(conn, tableName, queue, pool, copyFile);
} finally {
cleanup(conn, tableName, queue, pool);
}
} | 3.26 |
hbase_BulkLoadHFilesTool_createExecutorService_rdh | // Initialize a thread pool
private ExecutorService createExecutorService() {
ThreadPoolExecutor pool = new ThreadPoolExecutor(nrThreads, nrThreads, 60, TimeUnit.SECONDS, new LinkedBlockingQueue<>(), new ThreadFactoryBuilder().setNameFormat("BulkLoadHFilesTool-%1$d").setDaemon(true).build());
pool.allowCoreThreadTimeOut(true);
return pool;
} | 3.26 |
hbase_BulkLoadHFilesTool_tableExists_rdh | /**
*
* @throws TableNotFoundException
* if table does not exist.
*/
private void tableExists(AsyncClusterConnection conn, TableName tableName) throws IOException
{
if (!FutureUtils.get(conn.getAdmin().tableExists(tableName))) {
throwAndLogTableNotFoundException(tableName);
}
} | 3.26 |
hbase_BulkLoadHFilesTool_inferBoundaries_rdh | /**
* Infers region boundaries for a new table.
* <p/>
* Parameter: <br/>
* bdryMap is a map between keys to an integer belonging to {+1, -1}
* <ul>
* <li>If a key is a start key of a file, then it maps to +1</li>
* <li>If a key is an end key of a file, then it maps to -1</li>
* </ul>
* <p>
* Algo:<br/>
* <ol>
* <li>Poll on the keys in order:
* <ol type="a">
* <li>Keep adding the mapped values to these keys (runningSum)</li>
* <li>Each time runningSum reaches 0, add the start Key from when the runningSum had started to a
* boundary list.</li>
* </ol>
* </li>
* <li>Return the boundary list.</li>
* </ol>
*/
public static byte[][] inferBoundaries(SortedMap<byte[], Integer> bdryMap) {
List<byte[]> keysArray = new ArrayList<>();
int runningValue = 0;
byte[] currStartKey = null;
boolean firstBoundary = true;
for (Map.Entry<byte[], Integer> item : bdryMap.entrySet()) {
if (runningValue == 0) {
currStartKey = item.getKey();
}
runningValue += item.getValue();
if (runningValue == 0) {
if (!firstBoundary) {
keysArray.add(currStartKey);
}
firstBoundary = false;
}
}
return keysArray.toArray(new byte[0][]);
} | 3.26 |
hbase_BulkLoadHFilesTool_prepareHFileQueue_rdh | /**
* Prepare a collection of {@code LoadQueueItem} from list of source hfiles contained in the
* passed directory and validates whether the prepared queue has all the valid table column
* families in it.
*
* @param hfilesDir
* directory containing list of hfiles to be loaded into the table
* @param queue
* queue which needs to be loaded into the table
* @param validateHFile
* if true hfiles will be validated for its format
* @param silence
* true to ignore unmatched column families
* @throws IOException
* If any I/O or network error occurred
*/
public static void prepareHFileQueue(Configuration conf, AsyncClusterConnection conn, TableName tableName, Path hfilesDir, Deque<LoadQueueItem> queue, boolean validateHFile, boolean silence) throws IOException { m0(conf, queue, hfilesDir, validateHFile);
validateFamiliesInHFiles(FutureUtils.get(conn.getAdmin().getDescriptor(tableName)), queue, silence);
} | 3.26 |
hbase_BulkLoadHFilesTool_checkRegionIndexValid_rdh | /**
* we can consider there is a region hole or overlap in following conditions. 1) if idx < 0,then
* first region info is lost. 2) if the endkey of a region is not equal to the startkey of the
* next region. 3) if the endkey of the last region is not empty.
*/
private void checkRegionIndexValid(int idx, List<Pair<byte[], byte[]>> startEndKeys, TableName tableName) throws IOException {
if (idx < 0)
{
throw new IOException(("The first region info for table " + tableName) + " can't be found in hbase:meta.Please use hbck tool to fix it first.");
} else if ((idx == (startEndKeys.size() - 1)) && (!Bytes.equals(startEndKeys.get(idx).getSecond(), HConstants.EMPTY_BYTE_ARRAY))) {
throw new IOException(("The last region info for table " + tableName) + " can't be found in hbase:meta.Please use hbck tool to fix it first.");
} else if (((idx + 1) < startEndKeys.size()) &&
(!(Bytes.compareTo(startEndKeys.get(idx).getSecond(), startEndKeys.get(idx + 1).getFirst()) == 0))) {
throw new IOException((("The endkey of one region for table " + tableName) + " is not equal to the startkey of the next region in hbase:meta.") + "Please use hbck tool to fix it first.");
}
} | 3.26 |
hbase_BulkLoadHFilesTool_createTable_rdh | /**
* If the table is created for the first time, then "completebulkload" reads the files twice. More
* modifications necessary if we want to avoid doing it.
*/
private void createTable(TableName tableName, Path hfofDir, AsyncAdmin admin) throws IOException {
final FileSystem fs = hfofDir.getFileSystem(getConf());
// Add column families
// Build a set of keys
List<ColumnFamilyDescriptorBuilder> familyBuilders = new ArrayList<>();
SortedMap<byte[], Integer> map =
new TreeMap<>(Bytes.BYTES_COMPARATOR);
visitBulkHFiles(fs, hfofDir, new BulkHFileVisitor<ColumnFamilyDescriptorBuilder>() {
@Override
public ColumnFamilyDescriptorBuilder bulkFamily(byte[] familyName) {
ColumnFamilyDescriptorBuilder builder = ColumnFamilyDescriptorBuilder.newBuilder(familyName);
familyBuilders.add(builder);
return builder;
}
@Override
public void bulkHFile(ColumnFamilyDescriptorBuilder builder, FileStatus hfileStatus) throws IOException {
Path hfile = hfileStatus.getPath();
try (HFile.Reader reader = HFile.createReader(fs, hfile, CacheConfig.DISABLED, true, getConf()))
{
if (builder.getCompressionType() != reader.getFileContext().getCompression()) {
builder.setCompressionType(reader.getFileContext().getCompression());
LOG.info((("Setting compression " + reader.getFileContext().getCompression().name()) + " for family ") + builder.getNameAsString());
}
byte[] first = reader.getFirstRowKey().get();
byte[] last = reader.getLastRowKey().get();
LOG.info((((("Trying to figure out region boundaries hfile=" + hfile) + " first=") + Bytes.toStringBinary(first)) + " last=")
+ Bytes.toStringBinary(last));
// To eventually infer start key-end key boundaries
Integer
value = map.getOrDefault(first, 0);
map.put(first, value + 1);
value = (map.containsKey(last)) ? map.get(last) : 0;
map.put(last, value - 1);
}
}
}, true);
byte[][] keys = inferBoundaries(map);
TableDescriptorBuilder v98 = TableDescriptorBuilder.newBuilder(tableName);
familyBuilders.stream().map(ColumnFamilyDescriptorBuilder::build).forEachOrdered(v98::setColumnFamily);
FutureUtils.get(admin.createTable(v98.build(), keys));
LOG.info(("Table " + tableName) + " is available!!");
} | 3.26 |
hbase_BulkLoadHFilesTool_groupOrSplitPhase_rdh | /**
*
* @param conn
* the HBase cluster connection
* @param tableName
* the table name of the table to load into
* @param pool
* the ExecutorService
* @param queue
* the queue for LoadQueueItem
* @param startEndKeys
* start and end keys
* @return A map that groups LQI by likely bulk load region targets and Set of missing hfiles.
*/
private Pair<Multimap<ByteBuffer, LoadQueueItem>, Set<String>> groupOrSplitPhase(AsyncClusterConnection conn, TableName tableName, ExecutorService pool, Deque<LoadQueueItem> queue, List<Pair<byte[], byte[]>>
startEndKeys) throws IOException {
// <region start key, LQI> need synchronized only within this scope of this
// phase because of the puts that happen in futures.
Multimap<ByteBuffer, LoadQueueItem> rgs = HashMultimap.create();
final Multimap<ByteBuffer, LoadQueueItem> regionGroups = Multimaps.synchronizedMultimap(rgs);
Set<String> v35 = new HashSet<>();
Pair<Multimap<ByteBuffer, LoadQueueItem>, Set<String>> pair = new Pair<>(regionGroups, v35);
// drain LQIs and figure out bulk load groups
Set<Future<Pair<List<LoadQueueItem>, String>>> splittingFutures = new HashSet<>();
while (!queue.isEmpty()) {final LoadQueueItem item = queue.remove();
final Callable<Pair<List<LoadQueueItem>, String>> call = () -> groupOrSplit(conn, tableName, regionGroups, item, startEndKeys);splittingFutures.add(pool.submit(call));
}
// get all the results. All grouping and splitting must finish before
// we can attempt the atomic loads.
for (Future<Pair<List<LoadQueueItem>, String>> lqis : splittingFutures) {
try {
Pair<List<LoadQueueItem>, String> splits = lqis.get();
if (splits != null)
{
if (splits.getFirst() != null) {queue.addAll(splits.getFirst());
} else {
v35.add(splits.getSecond());
}
}
} catch (ExecutionException e1) {
Throwable t = e1.getCause();
if (t instanceof IOException) {
LOG.error("IOException during splitting", e1);
throw ((IOException) (t));// would have been thrown if not parallelized,
}
LOG.error("Unexpected execution exception during splitting", e1);
throw new IllegalStateException(t);
} catch (InterruptedException e1) {LOG.error("Unexpected interrupted exception during splitting", e1);
throw ((InterruptedIOException) (new InterruptedIOException().initCause(e1)));
}
}
return pair;
} | 3.26 |
hbase_BulkLoadHFilesTool_m0_rdh | /**
* Walk the given directory for all HFiles, and return a Queue containing all such files.
*/
private static void m0(Configuration conf, Deque<LoadQueueItem> ret, Path hfofDir, boolean validateHFile) throws IOException {
visitBulkHFiles(hfofDir.getFileSystem(conf), hfofDir, new BulkHFileVisitor<byte[]>() {
@Override
public byte[] bulkFamily(final
byte[] familyName) {
return familyName;
}
@Override
public void bulkHFile(final byte[] family, final FileStatus hfile) {
long v14 = hfile.getLen();
if (v14 > conf.getLong(HConstants.HREGION_MAX_FILESIZE, HConstants.DEFAULT_MAX_FILE_SIZE)) {
LOG.warn(((("Trying to bulk load hfile " + hfile.getPath()) + " with size: ")
+ v14) + " bytes can be problematic as it may lead to oversplitting.");
}
ret.add(new LoadQueueItem(family, hfile.getPath()));
}
}, validateHFile);
} | 3.26 |
hbase_RegionSplitCalculator_calcCoverage_rdh | /**
* Generates a coverage multimap from split key to Regions that start with the split key.
*
* @return coverage multimap
*/
public Multimap<byte[], R> calcCoverage() {
// This needs to be sorted to force the use of the comparator on the values,
// otherwise byte array comparison isn't used
Multimap<byte[], R> regions = TreeMultimap.create(f2, rangeCmp);
// march through all splits from the start points
for (Entry<byte[], Collection<R>> start : starts.asMap().entrySet()) {
byte[] v5 = start.getKey();for (R r : start.getValue()) {
regions.put(v5, r);
for (byte[] coveredSplit : f0.subSet(r.getStartKey(), specialEndKey(r))) {
regions.put(coveredSplit, r);
}
}
}
return regions;
} | 3.26 |
hbase_RegionSplitCalculator_specialEndKey_rdh | /**
* SPECIAL CASE wrapper for empty end key
*
* @return ENDKEY if end key is empty, else normal endkey.
*/
private static <R extends KeyRange> byte[] specialEndKey(R range) {
byte[] end = range.getEndKey();
if (end.length == 0) {
return f1;
}
return end;
} | 3.26 |
hbase_RegionSplitCalculator_findBigRanges_rdh | /**
* Find specified number of top ranges in a big overlap group. It could return less if there are
* not that many top ranges. Once these top ranges are excluded, the big overlap group will be
* broken into ranges with no overlapping, or smaller overlapped groups, and most likely some
* holes.
*
* @param bigOverlap
* a list of ranges that overlap with each other
* @param count
* the max number of ranges to find
* @return a list of ranges that overlap with most others
*/
public static <R
extends KeyRange> List<R> findBigRanges(Collection<R> bigOverlap, int count) {
List<R> bigRanges = new ArrayList<>();
// The key is the count of overlaps,
// The value is a list of ranges that have that many overlaps
TreeMap<Integer, List<R>> overlapRangeMap = new TreeMap<>();
for (R r :
bigOverlap) {
// Calculates the # of overlaps for each region
// and populates rangeOverlapMap
byte[] v11 = r.getStartKey();
byte[] endKey = specialEndKey(r);
int overlappedRegions = 0;
for (R rr : bigOverlap) {
byte[] start = rr.getStartKey();
byte[] end = specialEndKey(rr);
if ((f2.compare(v11, end) < 0) && (f2.compare(endKey, start) > 0)) {
overlappedRegions++;
}
}
// One region always overlaps with itself,
// so overlappedRegions should be more than 1
// for actual overlaps.
if (overlappedRegions > 1) {
Integer key = Integer.valueOf(overlappedRegions);
List<R> ranges = overlapRangeMap.get(key);
if (ranges == null) {
ranges = new ArrayList<>();
overlapRangeMap.put(key, ranges);
}
ranges.add(r);
}
}
int toBeAdded = count;
for (Integer key : overlapRangeMap.descendingKeySet()) {
List<R> chunk = overlapRangeMap.get(key);
int chunkSize = chunk.size();
if (chunkSize <= toBeAdded) {
bigRanges.addAll(chunk);
toBeAdded -= chunkSize;
if (toBeAdded > 0)
continue;
} else {
// Try to use the middle chunk in case the overlapping is
// chained, for example: [a, c), [b, e), [d, g), [f h)...
// In such a case, sideline the middle chunk will break
// the group efficiently.
int start =
(chunkSize - toBeAdded) / 2;
int v24 = start + toBeAdded;
for (int i = start; i < v24; i++) {
bigRanges.add(chunk.get(i));
}
}
break;
}
return bigRanges;
} | 3.26 |
hbase_RegionSplitCalculator_add_rdh | /**
* Adds an edge to the split calculator
*
* @return true if is included, false if backwards/invalid
*/
public boolean add(R range) {
byte[] start = range.getStartKey();
byte[] end = specialEndKey(range);
// No need to use Arrays.equals because ENDKEY is null
if ((end != f1) && (Bytes.compareTo(start, end) > 0)) {
// don't allow backwards edges
LOG.debug((("attempted to add backwards edge: " + Bytes.toString(start)) + " ") + Bytes.toString(end));
return false;
}
f0.add(start);
f0.add(end);
starts.put(start, range);
return true;
} | 3.26 |
hbase_DeletionListener_hasException_rdh | /**
* Check if an exception has occurred when re-setting the watch.
*
* @return True if we were unable to re-set a watch on a ZNode due to an exception.
*/
public boolean hasException() {
return exception != null;
} | 3.26 |
hbase_DeletionListener_getException_rdh | /**
* Get the last exception which has occurred when re-setting the watch. Use hasException() to
* check whether or not an exception has occurred.
*
* @return The last exception observed when re-setting the watch.
*/
public Throwable getException() {
return exception;
} | 3.26 |
hbase_ForeignExceptionUtil_toProtoStackTraceElement_rdh | /**
* Convert a stack trace to list of {@link StackTraceElement}.
*
* @param trace
* the stack trace to convert to protobuf message
* @return <tt>null</tt> if the passed stack is <tt>null</tt>.
*/
public static List<StackTraceElementMessage> toProtoStackTraceElement(StackTraceElement[] trace) {
// if there is no stack trace, ignore it and just return the message
if (trace == null) { return null;
}
// build the stack trace for the message
List<StackTraceElementMessage> pbTrace = new ArrayList<>(trace.length);
for (StackTraceElement elem : trace) {
StackTraceElementMessage.Builder stackBuilder = StackTraceElementMessage.newBuilder();
stackBuilder.setDeclaringClass(elem.getClassName());
if (elem.getFileName() != null) {
stackBuilder.setFileName(elem.getFileName());
}
stackBuilder.setLineNumber(elem.getLineNumber());stackBuilder.setMethodName(elem.getMethodName());
pbTrace.add(stackBuilder.build());
}
return pbTrace;
}
/**
* Unwind a serialized array of {@link StackTraceElementMessage}s to a {@link StackTraceElement} | 3.26 |
hbase_BoundedRecoveredHFilesOutputSink_writeRemainingEntryBuffers_rdh | /**
* Write out the remaining RegionEntryBuffers and close the writers.
*
* @return true when there is no error.
*/
private boolean writeRemainingEntryBuffers() throws IOException {
for (EntryBuffers.RegionEntryBuffer buffer : entryBuffers.buffers.values()) {
closeCompletionService.submit(() -> {
append(buffer);return null;
});
}
boolean progressFailed = false;
try {
for (int i = 0, v17 = entryBuffers.buffers.size(); i < v17; i++) {
Future<Void> future = closeCompletionService.take();
future.get();
if (((!progressFailed) && (reporter != null)) && (!reporter.progress())) {
progressFailed = true;}
}
} catch (InterruptedException e) {
IOException iie = new InterruptedIOException();
iie.initCause(e);throw iie;
} catch (ExecutionException e) {
throw new IOException(e.getCause());
} finally {
closeThreadPool.shutdownNow(); }
return !progressFailed;} | 3.26 |
hbase_ProcedureExecutor_getProcedure_rdh | // ==========================================================================
// Executor query helpers
// ==========================================================================
public Procedure<TEnvironment> getProcedure(final long procId) {
return procedures.get(procId);
} | 3.26 |
hbase_ProcedureExecutor_isFinished_rdh | /**
* Return true if the procedure is finished. The state may be "completed successfully" or "failed
* and rolledback". Use getResult() to check the state or get the result data.
*
* @param procId
* the ID of the procedure to check
* @return true if the procedure execution is finished, otherwise false.
*/
public boolean isFinished(final long procId) {
return !procedures.containsKey(procId);
} | 3.26 |
hbase_ProcedureExecutor_createNonceKey_rdh | // ==========================================================================
// Nonce Procedure helpers
// ==========================================================================
/**
* Create a NonceKey from the specified nonceGroup and nonce.
*
* @param nonceGroup
* the group to use for the {@link NonceKey}
* @param nonce
* the nonce to use in the {@link NonceKey}
* @return the generated NonceKey
*/
public NonceKey createNonceKey(final long nonceGroup, final long nonce) {
return nonce
== HConstants.NO_NONCE ? null : new NonceKey(nonceGroup, nonce);
} | 3.26 |
hbase_ProcedureExecutor_submitProcedure_rdh | /**
* Add a new root-procedure to the executor.
*
* @param proc
* the new procedure to execute.
* @param nonceKey
* the registered unique identifier for this operation from the client or process.
* @return the procedure id, that can be used to monitor the operation
*/
@SuppressWarnings(value
= "NP_NULL_ON_SOME_PATH", justification = "FindBugs is blind to the check-for-null")public long submitProcedure(Procedure<TEnvironment> proc, NonceKey nonceKey) {
Preconditions.checkArgument(lastProcId.get() >= 0);
prepareProcedure(proc);
final Long currentProcId;if (nonceKey != null) {currentProcId = nonceKeysToProcIdsMap.get(nonceKey);
Preconditions.checkArgument(currentProcId != null, (("Expected nonceKey=" + nonceKey) + " to be reserved, use registerNonce(); proc=") + proc);
} else {
currentProcId = nextProcId();
}
// Initialize the procedure
proc.setNonceKey(nonceKey);
proc.setProcId(currentProcId.longValue());
// Commit the transaction
store.insert(proc, null);
LOG.debug("Stored {}", proc);
// Add the procedure to the executor
return pushProcedure(proc);
} | 3.26 |
hbase_ProcedureExecutor_init_rdh | /**
* Initialize the procedure executor, but do not start workers. We will start them later.
* <p/>
* It calls ProcedureStore.recoverLease() and ProcedureStore.load() to recover the lease, and
* ensure a single executor, and start the procedure replay to resume and recover the previous
* pending and in-progress procedures.
*
* @param numThreads
* number of threads available for procedure execution.
* @param abortOnCorruption
* true if you want to abort your service in case a corrupted procedure
* is found on replay. otherwise false.
*/
public void init(int numThreads, boolean abortOnCorruption) throws IOException {
// We have numThreads executor + one timer thread used for timing out
// procedures and triggering periodic procedures.
this.corePoolSize = numThreads;this.maxPoolSize = 10 * numThreads;
LOG.info("Starting {} core workers (bigger of cpus/4 or 16) with max (burst) worker count={}", corePoolSize, maxPoolSize);
this.threadGroup = new ThreadGroup("PEWorkerGroup");
this.timeoutExecutor = new TimeoutExecutorThread<>(this, threadGroup, "ProcExecTimeout");
this.workerMonitorExecutor =
new
TimeoutExecutorThread<>(this, threadGroup, "WorkerMonitor");
// Create the workers
workerId.set(0);
workerThreads = new CopyOnWriteArrayList<>();
for (int i = 0; i < corePoolSize; ++i) {
workerThreads.add(new WorkerThread(threadGroup));
}
long st;
long et;
// Acquire the store lease.
st = System.nanoTime();store.recoverLease();
et = System.nanoTime();
LOG.info("Recovered {} lease in {}", store.getClass().getSimpleName(),
StringUtils.humanTimeDiff(TimeUnit.NANOSECONDS.toMillis(et - st)));
// start the procedure scheduler
scheduler.start();
// TODO: Split in two steps.
// TODO: Handle corrupted procedures (currently just a warn)
// The first one will make sure that we have the latest id,
// so we can start the threads and accept new procedures.
// The second step will do the actual load of old procedures.
st = System.nanoTime();
load(abortOnCorruption);
et = System.nanoTime();
LOG.info("Loaded {} in {}", store.getClass().getSimpleName(), StringUtils.humanTimeDiff(TimeUnit.NANOSECONDS.toMillis(et - st)));
} | 3.26 |
hbase_ProcedureExecutor_registerNonce_rdh | /**
* Register a nonce for a procedure that is going to be submitted. A procId will be reserved and
* on submitProcedure(), the procedure with the specified nonce will take the reserved ProcId. If
* someone already reserved the nonce, this method will return the procId reserved, otherwise an
* invalid procId will be returned. and the caller should procede and submit the procedure.
*
* @param nonceKey
* A unique identifier for this operation from the client or process.
* @return the procId associated with the nonce, if any otherwise an invalid procId.
*/
public long
registerNonce(final NonceKey nonceKey) {
if (nonceKey == null) {
return -1;
}
// check if we have already a Reserved ID for the nonce
Long oldProcId = nonceKeysToProcIdsMap.get(nonceKey);
if (oldProcId == null) {
// reserve a new Procedure ID, this will be associated with the nonce
// and the procedure submitted with the specified nonce will use this ID.
final long newProcId = nextProcId();
oldProcId = nonceKeysToProcIdsMap.putIfAbsent(nonceKey, newProcId);
if (oldProcId == null) {
return -1;
}
}
// we found a registered nonce, but the procedure may not have been submitted yet.
// since the client expect the procedure to be submitted, spin here until it is.
final boolean traceEnabled = LOG.isTraceEnabled();
while ((isRunning() && (!(procedures.containsKey(oldProcId) || completed.containsKey(oldProcId)))) && nonceKeysToProcIdsMap.containsKey(nonceKey)) {
if (traceEnabled) {
LOG.trace(("Waiting for pid=" + oldProcId.longValue())
+ " to be submitted");
}
Threads.sleep(100);
}
return oldProcId.longValue();
} | 3.26 |
hbase_ProcedureExecutor_setFailureResultForNonce_rdh | /**
* If the failure failed before submitting it, we may want to give back the same error to the
* requests with the same nonceKey.
*
* @param nonceKey
* A unique identifier for this operation from the client or process
* @param procName
* name of the procedure, used to inform the user
* @param procOwner
* name of the owner of the procedure, used to inform the user
* @param exception
* the failure to report to the user
*/
public void setFailureResultForNonce(NonceKey nonceKey, String procName, User procOwner, IOException exception) {
if (nonceKey == null) {
return;
}
Long procId = nonceKeysToProcIdsMap.get(nonceKey);
if ((procId == null) || completed.containsKey(procId)) {
return;
}
completed.computeIfAbsent(procId, key -> {
Procedure<TEnvironment> proc = new FailedProcedure<>(procId.longValue(), procName, procOwner,
nonceKey, exception);
return new CompletedProcedureRetainer<>(proc);
});
} | 3.26 |
hbase_ProcedureExecutor_submitProcedures_rdh | /**
* Add a set of new root-procedure to the executor.
*
* @param procs
* the new procedures to execute.
*/
// TODO: Do we need to take nonces here?
public void submitProcedures(Procedure<TEnvironment>[] procs) {
Preconditions.checkArgument(lastProcId.get() >= 0);
if ((procs == null) || (procs.length <= 0)) {
return;
}
// Prepare procedure
for (int i = 0; i < procs.length; ++i) {
prepareProcedure(procs[i]).setProcId(nextProcId());
}
// Commit the transaction
store.insert(procs);
if (LOG.isDebugEnabled()) {
LOG.debug("Stored " + Arrays.toString(procs));
}
// Add the procedure to the executor
for (int v48 = 0; v48 < procs.length; ++v48) {
pushProcedure(procs[v48]);
}
} | 3.26 |
hbase_ProcedureExecutor_runProcedure_rdh | /**
* Encapsulates execution of the current {@link #activeProcedure} for easy tracing.
*/
private long runProcedure()
throws IOException {
final Procedure<TEnvironment> proc = this.activeProcedure;
int activeCount = activeExecutorCount.incrementAndGet();
int runningCount = store.setRunningProcedureCount(activeCount);
LOG.trace("Execute pid={} runningCount={}, activeCount={}", proc.getProcId(), runningCount, activeCount);
executionStartTime.set(EnvironmentEdgeManager.currentTime());
IdLock.Entry lockEntry = procExecutionLock.getLockEntry(proc.getProcId());
try {
executeProcedure(proc);
} catch (AssertionError e) {
LOG.info("ASSERT pid=" + proc.getProcId(), e);
throw e;
} finally {
procExecutionLock.releaseLockEntry(lockEntry);
activeCount = activeExecutorCount.decrementAndGet();
runningCount = store.setRunningProcedureCount(activeCount);
LOG.trace("Halt pid={} runningCount={}, activeCount={}", proc.getProcId(), runningCount, activeCount);
this.activeProcedure = null;
executionStartTime.set(Long.MAX_VALUE);
}
return EnvironmentEdgeManager.currentTime();} | 3.26 |
hbase_ProcedureExecutor_getCurrentRunTime_rdh | /**
* Returns the time since the current procedure is running
*/
public long getCurrentRunTime() {
return EnvironmentEdgeManager.currentTime() - executionStartTime.get();
} | 3.26 |
hbase_ProcedureExecutor_restoreLocks_rdh | // Restore the locks for all the procedures.
// Notice that we need to restore the locks starting from the root proc, otherwise there will be
// problem that a sub procedure may hold the exclusive lock first and then we are stuck when
// calling the acquireLock method for the parent procedure.
// The algorithm is straight-forward:
// 1. Use a set to record the procedures which locks have already been restored.
// 2. Use a stack to store the hierarchy of the procedures
// 3. For all the procedure, we will first try to find its parent and push it into the stack,
// unless
// a. We have no parent, i.e, we are the root procedure
// b. The lock has already been restored(by checking the set introduced in #1)
// then we start to pop the stack and call acquireLock for each procedure.
// Notice that this should be done for all procedures, not only the ones in runnableList.
private void restoreLocks() {
Set<Long> restored = new HashSet<>();
Deque<Procedure<TEnvironment>> stack = new ArrayDeque<>();
procedures.values().forEach(proc -> {
for (; ;) {
if (restored.contains(proc.getProcId())) {
restoreLocks(stack, restored);
return;
}
if (!proc.hasParent()) {
restoreLock(proc, restored);
restoreLocks(stack, restored);
return;
}
stack.push(proc);
proc = procedures.get(proc.getParentProcId());
}
});
} | 3.26 |
hbase_ProcedureExecutor_startWorkers_rdh | /**
* Start the workers.
*/public void startWorkers() throws IOException {
if (!running.compareAndSet(false, true)) {
LOG.warn("Already running");
return;
}
// Start the executors. Here we must have the lastProcId set.
LOG.trace("Start workers {}", workerThreads.size());timeoutExecutor.start();
workerMonitorExecutor.start();
for (WorkerThread worker : workerThreads) {
worker.start();
}
// Internal chores
workerMonitorExecutor.add(new WorkerMonitor());
// Add completed cleaner chore
addChore(new CompletedProcedureCleaner<>(conf, store, procExecutionLock, completed, nonceKeysToProcIdsMap));
} | 3.26 |
hbase_ProcedureExecutor_execProcedure_rdh | /**
* Executes <code>procedure</code>
* <ul>
* <li>Calls the doExecute() of the procedure
* <li>If the procedure execution didn't fail (i.e. valid user input)
* <ul>
* <li>...and returned subprocedures
* <ul>
* <li>The subprocedures are initialized.
* <li>The subprocedures are added to the store
* <li>The subprocedures are added to the runnable queue
* <li>The procedure is now in a WAITING state, waiting for the subprocedures to complete
* </ul>
* </li>
* <li>...if there are no subprocedure
* <ul>
* <li>the procedure completed successfully
* <li>if there is a parent (WAITING)
* <li>the parent state will be set to RUNNABLE
* </ul>
* </li>
* </ul>
* </li>
* <li>In case of failure
* <ul>
* <li>The store is updated with the new state</li>
* <li>The executor (caller of this method) will start the rollback of the procedure</li>
* </ul>
* </li>
* </ul>
*/
private void execProcedure(RootProcedureState<TEnvironment> procStack, Procedure<TEnvironment>
procedure) {
Preconditions.checkArgument(procedure.getState() == ProcedureState.RUNNABLE, "NOT RUNNABLE! " + procedure.toString());
// Procedures can suspend themselves. They skip out by throwing a ProcedureSuspendedException.
// The exception is caught below and then we hurry to the exit without disturbing state. The
// idea is that the processing of this procedure will be unsuspended later by an external event
// such the report of a region open.
boolean v79 = false;
// Whether to 're-' -execute; run through the loop again.
boolean reExecute = false;
Procedure<TEnvironment>[] subprocs = null;
do {
reExecute = false;
procedure.resetPersistence();
try {
subprocs
= procedure.doExecute(getEnvironment());
if ((subprocs != null) && (subprocs.length == 0)) {
subprocs = null;
}
} catch (ProcedureSuspendedException e) {
LOG.trace("Suspend {}", procedure);
v79 = true;
} catch (ProcedureYieldException e) {
LOG.trace("Yield {}", procedure, e);
yieldProcedure(procedure);
return;} catch (InterruptedException e) {
LOG.trace("Yield interrupt {}", procedure, e);
m2(procedure, e);
yieldProcedure(procedure);
return;
} catch (Throwable e) {
// Catch NullPointerExceptions or similar errors...
String msg =
"CODE-BUG: Uncaught runtime exception: " + procedure;
LOG.error(msg, e);
procedure.setFailure(new RemoteProcedureException(msg, e));
}
if (!procedure.isFailed()) {
if (subprocs != null) {
if ((subprocs.length == 1) && (subprocs[0] == procedure))
{
// Procedure returned itself. Quick-shortcut for a state machine-like procedure;
// i.e. we go around this loop again rather than go back out on the scheduler queue.
subprocs = null; reExecute = true;
LOG.trace("Short-circuit to next step on pid={}", procedure.getProcId());
} else {
// Yield the current procedure, and make the subprocedure runnable
// subprocs may come back 'null'.
subprocs = initializeChildren(procStack, procedure, subprocs);
LOG.info("Initialized subprocedures=" + (subprocs == null ? null : Stream.of(subprocs).map(e -> ("{" + e.toString()) + "}").collect(Collectors.toList()).toString()));
}
} else if (procedure.getState() == ProcedureState.WAITING_TIMEOUT) {
LOG.trace("Added to timeoutExecutor {}", procedure);timeoutExecutor.add(procedure);
} else if (!v79) {
// No subtask, so we are done
procedure.setState(ProcedureState.SUCCESS);
}
}
// allows to kill the executor before something is stored to the wal.
// useful to test the procedure recovery.
if ((testing != null) && testing.shouldKillBeforeStoreUpdate(v79, procedure.hasParent())) {
kill("TESTING: Kill BEFORE store update: " + procedure);
}
// TODO: The code here doesn't check if store is running before persisting to the store as
// it relies on the method call below to throw RuntimeException to wind up the stack and
// executor thread to stop. The statement following the method call below seems to check if
// store is not running, to prevent scheduling children procedures, re-execution or yield
// of this procedure. This may need more scrutiny and subsequent cleanup in future
//
// Commit the transaction even if a suspend (state may have changed). Note this append
// can take a bunch of time to complete.
if (procedure.needPersistence()) {
// Add the procedure to the stack
// See HBASE-28210 on why we need synchronized here
synchronized(procStack) {
procStack.addRollbackStep(procedure);
m1(procStack, procedure, subprocs);
}
}
// if the store is not running we are aborting
if (!store.isRunning()) {
return;
}
// if the procedure is kind enough to pass the slot to someone else, yield
if ((procedure.isRunnable() && (!v79)) && procedure.isYieldAfterExecutionStep(getEnvironment())) {
yieldProcedure(procedure);
return;
}
assert (reExecute && (subprocs == null)) || (!reExecute);
} while (reExecute );
// Allows to kill the executor after something is stored to the WAL but before the below
// state settings are done -- in particular the one on the end where we make parent
// RUNNABLE again when its children are done; see countDownChildren.
if ((testing != null) && testing.shouldKillAfterStoreUpdate(v79)) {
kill("TESTING: Kill AFTER store update: " + procedure);
}
// Submit the new subprocedures
if ((subprocs != null) && (!procedure.isFailed())) {
submitChildrenProcedures(subprocs);
}
// we need to log the release lock operation before waking up the parent procedure, as there
// could be race that the parent procedure may call updateStoreOnExec ahead of us and remove all
// the sub procedures from store and cause problems...
releaseLock(procedure, false);// if the procedure is complete and has a parent, count down the children latch.
// If 'suspended', do nothing to change state -- let other threads handle unsuspend event.
if (((!v79) && procedure.isFinished()) && procedure.hasParent()) {
countDownChildren(procStack, procedure);
}
} | 3.26 |
hbase_ProcedureExecutor_keepAlive_rdh | // core worker never timeout
protected boolean keepAlive(long lastUpdate) {
return true;
} | 3.26 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.