name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hbase_FileArchiverNotifierImpl_bucketFilesToSnapshot_rdh | /**
* For the given snapshot, find all files which this {@code snapshotName} references. After a file
* is found to be referenced by the snapshot, it is removed from {@code filesToUpdate} and
* {@code snapshotSizeChanges} is updated in concert.
*
* @param snapshotName
* The snapshot to check
* @param filesToUpdate
* A mapping of archived files to their size
* @param snapshotSizeChanges
* A mapping of snapshots and their change in size
*/
void bucketFilesToSnapshot(String snapshotName, Map<String, Long> filesToUpdate, Map<String, Long> snapshotSizeChanges) throws
IOException {
// A quick check to avoid doing work if the caller unnecessarily invoked this method.
if (filesToUpdate.isEmpty()) {
return;
} Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, CommonFSUtils.getRootDir(conf));
SnapshotDescription sd = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir);
SnapshotManifest v8 = SnapshotManifest.open(conf, fs, snapshotDir, sd);
// For each region referenced by the snapshot
for (SnapshotRegionManifest rm : v8.getRegionManifests()) {
// For each column family in this region
for (FamilyFiles
ff : rm.getFamilyFilesList()) {
// And each store file in that family
for (StoreFile sf : ff.getStoreFilesList()) {
Long valueOrNull = filesToUpdate.remove(sf.getName());
if (valueOrNull != null) {
// This storefile was recently archived, we should update this snapshot with its size
snapshotSizeChanges.merge(snapshotName, valueOrNull, Long::sum);}
// Short-circuit, if we have no more files that were archived, we don't need to iterate
// over the rest of the snapshot.
if (filesToUpdate.isEmpty()) {
return;
}
}
}
}
} | 3.26 |
hbase_CryptoAES_unwrap_rdh | /**
* Decrypts input data. The input composes of (msg, padding if needed, mac) and sequence num. The
* result is msg.
*
* @param data
* the input byte array
* @param offset
* the offset in input where the input starts
* @param len
* the input length
* @return the new decrypted byte array.
* @throws SaslException
* if error happens
*/
public byte[] unwrap(byte[] data, int offset, int len) throws SaslException {
// get plaintext and seqNum
byte[] v8 = new byte[len - 4];
byte[] peerSeqNum = new byte[4];
try {
decryptor.update(data, offset, len - 4, v8, 0);
} catch (ShortBufferException sbe) {
// this should not happen
throw new SaslException("Error happens during decrypt data", sbe);
}
System.arraycopy(data, offset + v8.length, peerSeqNum, 0, 4);
// get msg and mac
byte[] msg = new byte[v8.length - 10];byte[] mac = new byte[10];
System.arraycopy(v8, 0, msg, 0,
msg.length);
System.arraycopy(v8, msg.length, mac, 0, 10);
// check mac integrity and msg sequence
if (!integrity.compareHMAC(mac, peerSeqNum, msg, 0, msg.length)) {
throw new SaslException("Unmatched MAC");
}
if (!integrity.comparePeerSeqNum(peerSeqNum)) {throw new SaslException((("Out of order sequencing of messages. Got: " + integrity.byteToInt(peerSeqNum)) + " Expected: ") + integrity.peerSeqNum);}integrity.incPeerSeqNum();
return msg;
} | 3.26 |
hbase_CryptoAES_wrap_rdh | /**
* Encrypts input data. The result composes of (msg, padding if needed, mac) and sequence num.
*
* @param data
* the input byte array
* @param offset
* the offset in input where the input starts
* @param len
* the input length
* @return the new encrypted byte array.
* @throws SaslException
* if error happens
*/
public byte[] wrap(byte[] data, int offset, int len) throws SaslException {
// mac
byte[]
mac = integrity.m0(data, offset, len);
integrity.incMySeqNum();
// encrypt
byte[] encrypted = new byte[len + 10];
try {
int n
=
encryptor.update(data, offset, len, encrypted, 0);
encryptor.update(mac, 0, 10, encrypted, n);
} catch (ShortBufferException sbe) {
// this should not happen
throw new SaslException("Error happens during encrypt data", sbe);
}
// append seqNum used for mac
byte[] wrapped = new byte[encrypted.length + 4];
System.arraycopy(encrypted, 0, wrapped, 0, encrypted.length);
System.arraycopy(integrity.getSeqNum(), 0, wrapped, encrypted.length,
4);
return wrapped;
} | 3.26 |
hbase_RegionSizeCalculator_getRegionSize_rdh | /**
* Returns size of given region in bytes. Returns 0 if region was not found.
*/
public long getRegionSize(byte[] regionId) {
Long v7 = sizeMap.get(regionId);
if (v7 == null) {
LOG.debug("Unknown region:" + Arrays.toString(regionId));
return 0;
} else {
return v7;}
} | 3.26 |
hbase_MasterRpcServices_isProcedureDone_rdh | /**
* Checks if the specified procedure is done.
*
* @return true if the procedure is done, false if the procedure is in the process of completing
* @throws ServiceException
* if invalid procedure or failed procedure with progress failure reason.
*/
@Override
public IsProcedureDoneResponse isProcedureDone(RpcController controller, IsProcedureDoneRequest request) throws ServiceException {
try {
server.checkInitialized();
ProcedureDescription desc = request.getProcedure();
MasterProcedureManager mpm = server.getMasterProcedureManagerHost().getProcedureManager(desc.getSignature());
if (mpm == null) {
throw new ServiceException("The procedure is not registered: " + desc.getSignature());}
LOG.debug(("Checking to see if procedure from request:" + desc.getSignature()) + " is done");
IsProcedureDoneResponse.Builder builder = IsProcedureDoneResponse.newBuilder();
boolean done = mpm.isProcedureDone(desc);
builder.setDone(done);
return builder.build();
} catch (ForeignException e) {
throw new ServiceException(e.getCause());
} catch (IOException e) {throw new ServiceException(e);
}
} | 3.26 |
hbase_MasterRpcServices_assigns_rdh | /**
* A 'raw' version of assign that does bulk and can skirt Master state checks if override is set;
* i.e. assigns can be forced during Master startup or if RegionState is unclean. Used by HBCK2.
*/
@Override
public AssignsResponse assigns(RpcController controller, MasterProtos.AssignsRequest request) throws ServiceException {
checkMasterProcedureExecutor();
final ProcedureExecutor<MasterProcedureEnv> pe = server.getMasterProcedureExecutor();final AssignmentManager am = server.getAssignmentManager();
MasterProtos.AssignsResponse.Builder responseBuilder = MasterProtos.AssignsResponse.newBuilder();
final boolean override = request.getOverride();
LOG.info("{} assigns, override={}", server.getClientIdAuditPrefix(), override);
for (HBaseProtos.RegionSpecifier rs : request.getRegionList()) {final RegionInfo info = getRegionInfo(rs);
if (info == null) {LOG.info("Unknown region {}", rs);
continue;
}
responseBuilder.addPid(Optional.ofNullable(am.createOneAssignProcedure(info, override)).map(pe::submitProcedure).orElse(Procedure.NO_PROC_ID));
}
return responseBuilder.build();
} | 3.26 |
hbase_MasterRpcServices_getCompletedSnapshots_rdh | /**
* List the currently available/stored snapshots. Any in-progress snapshots are ignored
*/
@Override
public GetCompletedSnapshotsResponse getCompletedSnapshots(RpcController controller, GetCompletedSnapshotsRequest request)
throws ServiceException {
try {
server.checkInitialized();
GetCompletedSnapshotsResponse.Builder builder = GetCompletedSnapshotsResponse.newBuilder();
List<SnapshotDescription> snapshots = server.snapshotManager.getCompletedSnapshots();
// convert to protobuf
for (SnapshotDescription snapshot : snapshots) {
builder.addSnapshots(snapshot);
}
return builder.build();
} catch (IOException e) {
throw new ServiceException(e);
}
} | 3.26 |
hbase_MasterRpcServices_hasAccessControlServiceCoprocessor_rdh | /**
* Determines if there is a MasterCoprocessor deployed which implements
* {@link AccessControlService.Interface}.
*/
boolean hasAccessControlServiceCoprocessor(MasterCoprocessorHost cpHost) {
return checkCoprocessorWithService(cpHost.findCoprocessors(MasterCoprocessor.class), Interface.class);
} | 3.26 |
hbase_MasterRpcServices_hasVisibilityLabelsServiceCoprocessor_rdh | /**
* Determines if there is a MasterCoprocessor deployed which implements
* {@link VisibilityLabelsService.Interface}.
*/
boolean
hasVisibilityLabelsServiceCoprocessor(MasterCoprocessorHost cpHost) {
return checkCoprocessorWithService(cpHost.findCoprocessors(MasterCoprocessor.class), Interface.class);
} | 3.26 |
hbase_MasterRpcServices_rpcPreCheck_rdh | /**
* Checks for the following pre-checks in order:
* <ol>
* <li>Master is initialized</li>
* <li>Rpc caller has admin permissions</li>
* </ol>
*
* @param requestName
* name of rpc request. Used in reporting failures to provide context.
* @throws ServiceException
* If any of the above listed pre-check fails.
*/
private void rpcPreCheck(String requestName) throws ServiceException {
try {
server.checkInitialized();requirePermission(requestName, Action.ADMIN);
} catch (IOException ioe) {
throw new ServiceException(ioe);}
} | 3.26 |
hbase_MasterRpcServices_getTableDescriptors_rdh | /**
* Get list of TableDescriptors for requested tables.
*
* @param c
* Unused (set to null).
* @param req
* GetTableDescriptorsRequest that contains: - tableNames: requested tables, or if
* empty, all are requested.
*/
@Override
public GetTableDescriptorsResponse getTableDescriptors(RpcController c, GetTableDescriptorsRequest req) throws ServiceException {
try {
server.checkInitialized();
final String regex = (req.hasRegex()) ? req.getRegex() : null;
final String namespace = (req.hasNamespace()) ? req.getNamespace() : null;
List<TableName> tableNameList = null;
if (req.getTableNamesCount() > 0) {
tableNameList = new ArrayList<TableName>(req.getTableNamesCount());
for (HBaseProtos.TableName tableNamePB : req.getTableNamesList()) {
tableNameList.add(ProtobufUtil.toTableName(tableNamePB));
}
}
List<TableDescriptor> descriptors
= server.listTableDescriptors(namespace, regex, tableNameList, req.getIncludeSysTables());
GetTableDescriptorsResponse.Builder builder = GetTableDescriptorsResponse.newBuilder();
if ((descriptors != null) && (descriptors.size()
> 0)) {
// Add the table descriptors to the response
for (TableDescriptor htd : descriptors) {
builder.addTableSchema(ProtobufUtil.toTableSchema(htd));
}
}
return builder.build();
} catch (IOException ioe) {
throw new ServiceException(ioe);
}
} | 3.26 |
hbase_MasterRpcServices_lockHeartbeat_rdh | /**
*
* @return LOCKED, if procedure is found and it has the lock; else UNLOCKED.
* @throws ServiceException
* if given proc id is found but it is not a LockProcedure.
*/
@Override
public LockHeartbeatResponse lockHeartbeat(RpcController controller, LockHeartbeatRequest request) throws ServiceException {
try {
if (server.getLockManager().remoteLocks().lockHeartbeat(request.getProcId(), request.getKeepAlive())) {
return LockHeartbeatResponse.newBuilder().setTimeoutMs(server.getConfiguration().getInt(LockProcedure.REMOTE_LOCKS_TIMEOUT_MS_CONF, LockProcedure.DEFAULT_REMOTE_LOCKS_TIMEOUT_MS)).setLockStatus(LockStatus.LOCKED).build();
} else {
return LockHeartbeatResponse.newBuilder().setLockStatus(LockStatus.UNLOCKED).build();
}
} catch (IOException e) {
throw new ServiceException(e);
}} | 3.26 |
hbase_MasterRpcServices_getServices_rdh | /**
* Returns list of blocking services and their security info classes that this server supports
*/
@Override
protected List<BlockingServiceAndInterface> getServices() {
List<BlockingServiceAndInterface>
bssi = new ArrayList<>(5);bssi.add(new BlockingServiceAndInterface(MasterService.newReflectiveBlockingService(this), BlockingInterface.class));
bssi.add(new BlockingServiceAndInterface(RegionServerStatusService.newReflectiveBlockingService(this), BlockingInterface.class));
bssi.add(new BlockingServiceAndInterface(LockService.newReflectiveBlockingService(this), BlockingInterface.class));
bssi.add(new BlockingServiceAndInterface(HbckService.newReflectiveBlockingService(this), BlockingInterface.class));
bssi.add(new BlockingServiceAndInterface(ClientMetaService.newReflectiveBlockingService(this), BlockingInterface.class));
bssi.add(new BlockingServiceAndInterface(AdminService.newReflectiveBlockingService(this), BlockingInterface.class));return bssi;
} | 3.26 |
hbase_MasterRpcServices_switchBalancer_rdh | /**
* Assigns balancer switch according to BalanceSwitchMode
*
* @param b
* new balancer switch
* @param mode
* BalanceSwitchMode
* @return old balancer switch
*/
boolean switchBalancer(final boolean b, BalanceSwitchMode mode) throws IOException {
boolean oldValue = server.loadBalancerStateStore.get();
boolean v3 = b;
try
{
if (server.cpHost != null) {
server.cpHost.preBalanceSwitch(v3);
}
if (mode == BalanceSwitchMode.SYNC) {
synchronized(server.getLoadBalancer()) {
server.loadBalancerStateStore.set(v3);
}
} else {
server.loadBalancerStateStore.set(v3);
}
LOG.info((server.getClientIdAuditPrefix() + " set balanceSwitch=") + v3);
if (server.cpHost != null) {
server.cpHost.postBalanceSwitch(oldValue, v3);}
server.getLoadBalancer().updateBalancerStatus(v3);
} catch (IOException ioe) {
LOG.warn("Error flipping balance switch", ioe);
}
return oldValue;
} | 3.26 |
hbase_MasterRpcServices_setTableStateInMeta_rdh | /**
* Update state of the table in meta only. This is required by hbck in some situations to cleanup
* stuck assign/ unassign regions procedures for the table.
*
* @return previous state of the table
*/
@Override
public GetTableStateResponse setTableStateInMeta(RpcController controller, SetTableStateInMetaRequest request) throws ServiceException {
rpcPreCheck("setTableStateInMeta");
TableName tn = ProtobufUtil.toTableName(request.getTableName());
try {
TableState prevState = this.server.getTableStateManager().getTableState(tn);
TableState newState = TableState.convert(tn, request.getTableState());
LOG.info("{} set table={} state from {} to {}", server.getClientIdAuditPrefix(), tn, prevState.getState(), newState.getState());
this.server.getTableStateManager().setTableState(tn, newState.getState());
return GetTableStateResponse.newBuilder().setTableState(prevState.convert()).build();
} catch (Exception e) {
throw new ServiceException(e);
}
} | 3.26 |
hbase_MasterRpcServices_setRegionStateInMeta_rdh | /**
* Update state of the region in meta only. This is required by hbck in some situations to cleanup
* stuck assign/ unassign regions procedures for the table.
*
* @return previous states of the regions
*/
@Override
public SetRegionStateInMetaResponse setRegionStateInMeta(RpcController controller, SetRegionStateInMetaRequest request) throws ServiceException {rpcPreCheck("setRegionStateInMeta");
SetRegionStateInMetaResponse.Builder builder = SetRegionStateInMetaResponse.newBuilder();
final AssignmentManager am
= server.getAssignmentManager();
try {
for (RegionSpecifierAndState s : request.getStatesList()) {
final RegionSpecifier spec = s.getRegionSpecifier();
final RegionInfo targetRegionInfo = getRegionInfo(spec);
final
RegionState.State targetState = RegionState.State.convert(s.getState());final RegionState.State currentState = Optional.ofNullable(targetRegionInfo).map(info -> am.getRegionStates().getRegionState(info)).map(RegionState::getState).orElseThrow(() -> new ServiceException(("No existing state known for region '" + spec) + "'."));
LOG.info("{} set region={} state from {} to {}", server.getClientIdAuditPrefix(), targetRegionInfo, currentState, targetState);
if (currentState == targetState) {
LOG.debug("Proposed state matches current state. {}, {}", targetRegionInfo, currentState);
continue;
}
MetaTableAccessor.updateRegionState(server.getConnection(), targetRegionInfo, targetState);
// Loads from meta again to refresh AM cache with the new region state
am.populateRegionStatesFromMeta(targetRegionInfo);
builder.addStates(RegionSpecifierAndState.newBuilder().setRegionSpecifier(spec).setState(currentState.convert()));
}
} catch (IOException e) {
throw new ServiceException(e);
}
return builder.build();
}
/**
* Get {@link RegionInfo} from Master using content of {@link RegionSpecifier} as key.
*
* @return {@link RegionInfo} found by decoding {@code rs} or {@code null} if {@code rs} | 3.26 |
hbase_MasterRpcServices_execProcedureWithRet_rdh | /**
* Triggers a synchronous attempt to run a distributed procedure and sets return data in response.
* {@inheritDoc }
*/
@Override
public ExecProcedureResponse execProcedureWithRet(RpcController controller, ExecProcedureRequest request) throws ServiceException {
rpcPreCheck("execProcedureWithRet");
try {
ProcedureDescription desc = request.getProcedure();
MasterProcedureManager mpm = server.getMasterProcedureManagerHost().getProcedureManager(desc.getSignature());
if (mpm == null) {
throw new ServiceException("The procedure is not registered: " + desc.getSignature());
}
LOG.info((server.getClientIdAuditPrefix() + " procedure request for: ") + desc.getSignature());
byte[] data = mpm.execProcedureWithRet(desc);
ExecProcedureResponse.Builder builder = ExecProcedureResponse.newBuilder();
// set return data if available
if (data != null) {
builder.setReturnData(UnsafeByteOperations.unsafeWrap(data));
}
return builder.build();
} catch (IOException e) {
throw new ServiceException(e);
}
} | 3.26 |
hbase_MasterRpcServices_switchSnapshotCleanup_rdh | /**
* Turn on/off snapshot auto-cleanup based on TTL
*
* @param enabledNewVal
* Set to <code>true</code> to enable, <code>false</code> to disable
* @param synchronous
* If <code>true</code>, it waits until current snapshot cleanup is
* completed, if outstanding
* @return previous snapshot auto-cleanup mode
*/
private synchronized boolean switchSnapshotCleanup(final boolean enabledNewVal, final boolean synchronous) throws IOException {
final boolean oldValue = server.snapshotCleanupStateStore.get();
server.switchSnapshotCleanup(enabledNewVal,
synchronous);
LOG.info("{} Successfully set snapshot cleanup to {}", server.getClientIdAuditPrefix(), enabledNewVal);
return oldValue;
} | 3.26 |
hbase_MasterRpcServices_checkCoprocessorWithService_rdh | /**
* Determines if there is a coprocessor implementation in the provided argument which extends or
* implements the provided {@code service}.
*/
boolean checkCoprocessorWithService(List<MasterCoprocessor> coprocessorsToCheck, Class<?> service) {
if ((coprocessorsToCheck == null) || coprocessorsToCheck.isEmpty()) {
return false;
}
for (MasterCoprocessor cp : coprocessorsToCheck) {
if (service.isAssignableFrom(cp.getClass())) {
return true;
}
}
return false;
} | 3.26 |
hbase_MasterRpcServices_offlineRegion_rdh | /**
* Offline specified region from master's in-memory state. It will not attempt to reassign the
* region as in unassign. This is a special method that should be used by experts or hbck.
*/
@Override
public OfflineRegionResponse offlineRegion(RpcController controller, OfflineRegionRequest request) throws ServiceException {
try {
server.checkInitialized();
final RegionSpecifierType type = request.getRegion().getType();
if (type != RegionSpecifierType.REGION_NAME) {
LOG.warn((("moveRegion specifier type: expected: " + RegionSpecifierType.REGION_NAME) + " actual: ") + type);}
final byte[] regionName = request.getRegion().getValue().toByteArray();
final RegionInfo hri = server.getAssignmentManager().getRegionInfo(regionName);
if (hri == null) {
throw new UnknownRegionException(Bytes.toStringBinary(regionName));
}
if
(server.cpHost != null) {server.cpHost.preRegionOffline(hri);
}
LOG.info((server.getClientIdAuditPrefix() + " offline ") + hri.getRegionNameAsString());
server.getAssignmentManager().offlineRegion(hri);if
(server.cpHost != null)
{
server.cpHost.postRegionOffline(hri);
}
} catch (IOException ioe) {
throw new ServiceException(ioe);
}
return OfflineRegionResponse.newBuilder().build();
} | 3.26 |
hbase_MasterRpcServices_snapshot_rdh | /**
* Triggers an asynchronous attempt to take a snapshot. {@inheritDoc }
*/@Overridepublic SnapshotResponse snapshot(RpcController controller, SnapshotRequest request) throws ServiceException {
try {
server.checkInitialized();
server.snapshotManager.checkSnapshotSupport();
LOG.info((server.getClientIdAuditPrefix() + " snapshot request for:") + ClientSnapshotDescriptionUtils.toString(request.getSnapshot()));
// get the snapshot information
SnapshotDescription snapshot = SnapshotDescriptionUtils.validate(request.getSnapshot(), server.getConfiguration());
// send back the max amount of time the client should wait for the snapshot to complete
long waitTime = SnapshotDescriptionUtils.getMaxMasterTimeout(server.getConfiguration(), snapshot.getType(), SnapshotDescriptionUtils.DEFAULT_MAX_WAIT_TIME);
SnapshotResponse.Builder builder = SnapshotResponse.newBuilder().setExpectedTimeout(waitTime);
// If there is nonce group and nonce in the snapshot request, then the client can
// handle snapshot procedure procId. And if enable the snapshot procedure, we
// will do the snapshot work with proc-v2, otherwise we will fall back to zk proc.
if ((request.hasNonceGroup() && request.hasNonce()) && server.snapshotManager.snapshotProcedureEnabled()) {
long nonceGroup = request.getNonceGroup();
long nonce = request.getNonce();
long procId = server.snapshotManager.takeSnapshot(snapshot, nonceGroup, nonce);
return builder.setProcId(procId).build();
} else {
server.snapshotManager.takeSnapshot(snapshot);
return builder.build();
}
} catch (ForeignException e) {
throw new ServiceException(e.getCause());
} catch (IOException e) {
throw new ServiceException(e);
}
} | 3.26 |
hbase_MasterRpcServices_checkMasterProcedureExecutor_rdh | /**
*
* @throws ServiceException
* If no MasterProcedureExecutor
*/
private void checkMasterProcedureExecutor() throws ServiceException {
if (this.server.getMasterProcedureExecutor() == null) {
throw new ServiceException("Master's ProcedureExecutor not initialized; retry later");
}
} | 3.26 |
hbase_MasterRpcServices_runHbckChore_rdh | // HBCK Services
@Override
public RunHbckChoreResponse runHbckChore(RpcController c, RunHbckChoreRequest req) throws ServiceException {
rpcPreCheck("runHbckChore");
LOG.info("{} request HBCK chore to run", server.getClientIdAuditPrefix());
HbckChore hbckChore = server.getHbckChore();boolean ran = hbckChore.runChore();
return RunHbckChoreResponse.newBuilder().setRan(ran).build();
} | 3.26 |
hbase_MasterRpcServices_getTableNames_rdh | /**
* Get list of userspace table names
*
* @param controller
* Unused (set to null).
* @param req
* GetTableNamesRequest
*/
@Override
public GetTableNamesResponse getTableNames(RpcController controller, GetTableNamesRequest req) throws ServiceException {
try {
server.checkServiceStarted();
final String regex = (req.hasRegex()) ? req.getRegex() : null;
final String namespace = (req.hasNamespace()) ? req.getNamespace() : null;
List<TableName> tableNames = server.listTableNames(namespace, regex, req.getIncludeSysTables());
GetTableNamesResponse.Builder
builder = GetTableNamesResponse.newBuilder();
if ((tableNames != null) && (tableNames.size() > 0)) {
// Add the table names to the response
for (TableName table : tableNames) {
builder.addTableNames(ProtobufUtil.toProtoTableName(table));
}
}
return builder.build();
} catch (IOException e) {
throw new ServiceException(e);
}
} | 3.26 |
hbase_MasterRpcServices_restoreSnapshot_rdh | /**
* Execute Restore/Clone snapshot operation.
* <p>
* If the specified table exists a "Restore" is executed, replacing the table schema and directory
* data with the content of the snapshot. The table must be disabled, or a
* UnsupportedOperationException will be thrown.
* <p>
* If the table doesn't exist a "Clone" is executed, a new table is created using the schema at
* the time of the snapshot, and the content of the snapshot.
* <p>
* The restore/clone operation does not require copying HFiles. Since HFiles are immutable the
* table can point to and use the same files as the original one.
*/
@Override
public RestoreSnapshotResponse restoreSnapshot(RpcController controller, RestoreSnapshotRequest request)
throws ServiceException {
try {
long procId = server.restoreSnapshot(request.getSnapshot(), request.getNonceGroup(), request.getNonce(), request.getRestoreACL(), request.getCustomSFT());
return RestoreSnapshotResponse.newBuilder().setProcId(procId).build();
} catch (ForeignException e) {
throw new ServiceException(e.getCause());
} catch (IOException e) {
throw new ServiceException(e);
}
} | 3.26 |
hbase_MasterRpcServices_bypassProcedure_rdh | /**
* Bypass specified procedure to completion. Procedure is marked completed but no actual work is
* done from the current state/ step onwards. Parents of the procedure are also marked for bypass.
* NOTE: this is a dangerous operation and may be used to unstuck buggy procedures. This may leave
* system in incoherent state. This may need to be followed by some cleanup steps/ actions by
* operator.
*
* @return BypassProcedureToCompletionResponse indicating success or failure
*/
@Override
public BypassProcedureResponse bypassProcedure(RpcController controller, MasterProtos.BypassProcedureRequest request) throws ServiceException {
try {
LOG.info("{} bypass procedures={}, waitTime={}, override={}, recursive={}", server.getClientIdAuditPrefix(), request.getProcIdList(), request.getWaitTime(), request.getOverride(), request.getRecursive());
List<Boolean> ret = server.getMasterProcedureExecutor().bypassProcedure(request.getProcIdList(), request.getWaitTime(), request.getOverride(), request.getRecursive());
return MasterProtos.BypassProcedureResponse.newBuilder().addAllBypassed(ret).build();
} catch (IOException e) {
throw new ServiceException(e);
}
} | 3.26 |
hbase_MasterRpcServices_getSecurityCapabilities_rdh | /**
* Returns the security capabilities in effect on the cluster
*/
@Overridepublic SecurityCapabilitiesResponse getSecurityCapabilities(RpcController controller, SecurityCapabilitiesRequest request) throws ServiceException {
SecurityCapabilitiesResponse.Builder response = SecurityCapabilitiesResponse.newBuilder();
try
{ server.checkInitialized();
Set<SecurityCapabilitiesResponse.Capability> v170 = new HashSet<>();
// Authentication
if (User.isHBaseSecurityEnabled(server.getConfiguration())) {v170.add(Capability.SECURE_AUTHENTICATION);
} else {
v170.add(Capability.SIMPLE_AUTHENTICATION);
}
// A coprocessor that implements AccessControlService can provide AUTHORIZATION and
// CELL_AUTHORIZATION
if ((server.cpHost
!= null) && hasAccessControlServiceCoprocessor(server.cpHost)) {
if (AccessChecker.isAuthorizationSupported(server.getConfiguration())) {
v170.add(Capability.AUTHORIZATION);
}
if (AccessController.isCellAuthorizationSupported(server.getConfiguration())) {
v170.add(Capability.CELL_AUTHORIZATION);
}
}
// A coprocessor that implements VisibilityLabelsService can provide CELL_VISIBILITY.
if ((server.cpHost != null) && hasVisibilityLabelsServiceCoprocessor(server.cpHost)) {
if (VisibilityController.isCellAuthorizationSupported(server.getConfiguration())) {
v170.add(Capability.CELL_VISIBILITY);}
}
response.addAllCapabilities(v170);
} catch (IOException e) {
throw new ServiceException(e);
}
return response.build();
} | 3.26 |
hbase_MobFileName_getDateFromName_rdh | /**
* get date from MobFileName.
*
* @param fileName
* file name.
*/
public static String getDateFromName(final String fileName) {
return fileName.substring(STARTKEY_END_INDEX, DATE_END_INDEX);
} | 3.26 |
hbase_MobFileName_m0_rdh | /**
* Creates an instance of MobFileName
*
* @param startKey
* The md5 hex string of the start key.
* @param date
* The string of the latest timestamp of cells in this file, the format is
* yyyymmdd.
* @param uuid
* The uuid.
* @param regionName
* name of a region, where this file was created during flush or compaction.
* @return An instance of a MobFileName.
*/
public static MobFileName m0(byte[] startKey, String date, String uuid,
String regionName) {
return new MobFileName(startKey, date, uuid, regionName);
} | 3.26 |
hbase_MobFileName_getDate_rdh | /**
* Gets the date string. Its format is yyyymmdd.
*
* @return The date string.
*/
public String getDate() {return this.date;
} | 3.26 |
hbase_MobFileName_create_rdh | /**
* Creates an instance of MobFileName.
*
* @param fileName
* The string format of a file name.
* @return An instance of a MobFileName.
*/
public static MobFileName create(String fileName) {
// The format of a file name is md5HexString(0-31bytes) + date(32-39bytes) + UUID
// + "_" + region
// The date format is yyyyMMdd
String startKey = fileName.substring(0, STARTKEY_END_INDEX);
String date = fileName.substring(STARTKEY_END_INDEX, DATE_END_INDEX);
String uuid = fileName.substring(DATE_END_INDEX, UUID_END_INDEX);
String regionName = fileName.substring(UUID_END_INDEX + 1);
return new MobFileName(startKey, date, uuid, regionName);
} | 3.26 |
hbase_MobFileName_m1_rdh | /**
* Gets region name
*
* @return name of a region, where this file was created during flush or compaction.
*/
public String
m1() {
return regionName;
} | 3.26 |
hbase_MobFileName_getStartKey_rdh | /**
* Gets the hex string of the md5 for a start key.
*
* @return The hex string of the md5 for a start key.
*/
public String getStartKey() {return startKey;
} | 3.26 |
hbase_MobFileName_getFileName_rdh | /**
* Gets the file name.
*
* @return The file name.
*/
public String getFileName() {
return this.fileName;
} | 3.26 |
hbase_MobFileName_getStartKeyFromName_rdh | /**
* get startKey from MobFileName.
*
* @param fileName
* file name.
*/
public static String getStartKeyFromName(final String fileName) {
return fileName.substring(0, STARTKEY_END_INDEX);
} | 3.26 |
hbase_NettyRpcFrameDecoder_readRawVarint32_rdh | /**
* Reads variable length 32bit int from buffer This method is from ProtobufVarint32FrameDecoder in
* Netty and modified a little bit to pass the cyeckstyle rule.
*
* @return decoded int if buffers readerIndex has been forwarded else nonsense value
*/
private static int readRawVarint32(ByteBuf buffer) {
if (!buffer.isReadable()) {
return 0;
}
buffer.markReaderIndex();
byte tmp = buffer.readByte();
if (tmp >= 0) {
return tmp;
} else {
int result = tmp & 127;
if (!buffer.isReadable()) {
buffer.resetReaderIndex();
return 0;
}
tmp = buffer.readByte();
if (tmp >= 0) {
result |= tmp << 7;
} else {
result |= (tmp &
127) << 7;
if (!buffer.isReadable()) {
buffer.resetReaderIndex();
return 0;
}
tmp = buffer.readByte();
if (tmp >= 0) {
result |= tmp << 14;
} else {
result |= (tmp & 127) << 14;
if (!buffer.isReadable()) {
buffer.resetReaderIndex();
return 0;
}
tmp = buffer.readByte();
if (tmp >= 0) {
result |= tmp << 21;
} else {
result |= (tmp & 127) << 21;
if (!buffer.isReadable()) {
buffer.resetReaderIndex();
return 0;
}
tmp = buffer.readByte();
result |= tmp << 28;
if (tmp < 0) {
throw new CorruptedFrameException("malformed varint.");
}
}
}
}
return result;
}
} | 3.26 |
hbase_FavoredNodeAssignmentHelper_getOneRandomServer_rdh | /**
* Gets a random server from the specified rack and skips anything specified.
*
* @param rack
* rack from a server is needed
* @param skipServerSet
* the server shouldn't belong to this set
*/
protected ServerName getOneRandomServer(String rack, Set<ServerName>
skipServerSet) {
// Is the rack valid? Do we recognize it?
if (((rack == null) || (getServersFromRack(rack) == null)) || getServersFromRack(rack).isEmpty()) {
return null;
}
// Lets use a set so we can eliminate duplicates
Set<StartcodeAgnosticServerName> serversToChooseFrom = Sets.newHashSet();
for (ServerName sn :
getServersFromRack(rack)) {
serversToChooseFrom.add(StartcodeAgnosticServerName.valueOf(sn));
}
if ((skipServerSet != null) && (skipServerSet.size() > 0)) {
for (ServerName sn : skipServerSet) {
serversToChooseFrom.remove(StartcodeAgnosticServerName.valueOf(sn));
}
// Do we have any servers left to choose from?
if (serversToChooseFrom.isEmpty()) {
return null;
}
}
ServerName randomServer = null;
int randomIndex = ThreadLocalRandom.current().nextInt(serversToChooseFrom.size());
int j = 0;
for (StartcodeAgnosticServerName sn : serversToChooseFrom) {
if (j == randomIndex) {
randomServer = sn;
break;
}
j++;
}
if (randomServer != null) {
return ServerName.valueOf(randomServer.getAddress(), randomServer.getStartcode());
} else {
return null;
}
} | 3.26 |
hbase_FavoredNodeAssignmentHelper_initialize_rdh | // Always initialize() when FavoredNodeAssignmentHelper is constructed.
public void initialize() {
for (ServerName sn : this.servers) {
String rackName = getRackOfServer(sn);
List<ServerName> serverList =
this.rackToRegionServerMap.get(rackName);
if (serverList == null) {
serverList =
Lists.newArrayList();
// Add the current rack to the unique rack list
this.uniqueRackList.add(rackName);
this.rackToRegionServerMap.put(rackName, serverList);
}
for (ServerName serverName : serverList) {
if (ServerName.isSameAddress(sn, serverName)) {
// The server is already present, ignore.
break;
}
}
serverList.add(sn);this.regionServerToRackMap.put(sn.getHostname(), rackName);
}
} | 3.26 |
hbase_FavoredNodeAssignmentHelper_placePrimaryRSAsRoundRobin_rdh | // Place the regions round-robin across the racks picking one server from each
// rack at a time. Start with a random rack, and a random server from every rack.
// If a rack doesn't have enough servers it will go to the next rack and so on.
// for choosing a primary.
// For example, if 4 racks (r1 .. r4) with 8 servers (s1..s8) each, one possible
// placement could be r2:s5, r3:s5, r4:s5, r1:s5, r2:s6, r3:s6..
// If there were fewer servers in one rack, say r3, which had 3 servers, one possible
// placement could be r2:s5, <skip-r3>, r4:s5, r1:s5, r2:s6, <skip-r3> ...
// The regions should be distributed proportionately to the racksizes
public void placePrimaryRSAsRoundRobin(Map<ServerName, List<RegionInfo>> assignmentMap, Map<RegionInfo, ServerName> primaryRSMap, List<RegionInfo> regions) {
List<String> rackList = new ArrayList<>(rackToRegionServerMap.size());
rackList.addAll(rackToRegionServerMap.keySet());
int rackIndex = ThreadLocalRandom.current().nextInt(rackList.size());
int maxRackSize = 0;
for (Map.Entry<String, List<ServerName>> r : rackToRegionServerMap.entrySet()) {
if (r.getValue().size() > maxRackSize) {
maxRackSize = r.getValue().size();
}
}
int numIterations = 0;
// Initialize the current processing host index.
int serverIndex = ThreadLocalRandom.current().nextInt(maxRackSize);
for (RegionInfo regionInfo : regions) {
List<ServerName> currentServerList;
String rackName;
while (true) { rackName = rackList.get(rackIndex);
numIterations++;
// Get the server list for the current rack
currentServerList = rackToRegionServerMap.get(rackName);
if (serverIndex >= currentServerList.size()) {
// not enough machines in this rack
if ((numIterations % rackList.size()) ==
0) {
if ((++serverIndex) >= maxRackSize)
serverIndex = 0;
}
if ((++rackIndex) >= rackList.size()) {
rackIndex = 0;// reset the rack index to 0
}
} else
break;
}
// Get the current process region server
ServerName currentServer = currentServerList.get(serverIndex);
// Place the current region with the current primary region server
primaryRSMap.put(regionInfo, currentServer);
if (assignmentMap != null) {
List<RegionInfo> regionsForServer = assignmentMap.get(currentServer);
if (regionsForServer == null) {
regionsForServer = new ArrayList<>();
assignmentMap.put(currentServer, regionsForServer);
}regionsForServer.add(regionInfo);
}
// Set the next processing index
if ((numIterations % rackList.size()) == 0) {
++serverIndex;
}
if ((++rackIndex) >= rackList.size()) {
rackIndex = 0;// reset the rack index to 0
}
}
} | 3.26 |
hbase_FavoredNodeAssignmentHelper_getFavoredNodes_rdh | /**
* Returns PB'ed bytes of {@link FavoredNodes} generated by the server list.
*/
public static byte[] getFavoredNodes(List<ServerName> serverAddrList) {
FavoredNodes.Builder f
= FavoredNodes.newBuilder();
for (ServerName s : serverAddrList) {
HBaseProtos.ServerName.Builder b = HBaseProtos.ServerName.newBuilder();
b.setHostName(s.getHostname());
b.setPort(s.getPort());
b.setStartCode(ServerName.NON_STARTCODE);
f.addFavoredNode(b.build());
}
return f.build().toByteArray();
} | 3.26 |
hbase_FavoredNodeAssignmentHelper_m1_rdh | /**
* Place secondary and tertiary nodes in a multi rack case. If there are only two racks, then we
* try the place the secondary and tertiary on different rack than primary. But if the other rack
* has only one region server, then we place primary and tertiary on one rack and secondary on
* another. The aim is two distribute the three favored nodes on >= 2 racks. TODO: see how we can
* use generateMissingFavoredNodeMultiRack API here
*
* @param primaryRS
* The primary favored node.
* @param primaryRack
* The rack of the primary favored node.
* @return Array containing secondary and tertiary favored nodes.
* @throws IOException
* Signals that an I/O exception has occurred.
*/
private ServerName[] m1(ServerName primaryRS, String primaryRack) throws IOException {
List<ServerName> favoredNodes = Lists.newArrayList(primaryRS);
// Create the secondary and tertiary pair
ServerName secondaryRS = generateMissingFavoredNodeMultiRack(favoredNodes);
favoredNodes.add(secondaryRS);
String secondaryRack = getRackOfServer(secondaryRS);
ServerName tertiaryRS;
if (primaryRack.equals(secondaryRack)) {
tertiaryRS = generateMissingFavoredNode(favoredNodes);
} else {
// Try to place tertiary in secondary RS rack else place on primary rack.
tertiaryRS = getOneRandomServer(secondaryRack, Sets.newHashSet(secondaryRS));
if (tertiaryRS == null) {
tertiaryRS = getOneRandomServer(primaryRack, Sets.newHashSet(primaryRS));
}
// We couldn't find anything in secondary rack, get any FN
if (tertiaryRS == null) {
tertiaryRS = generateMissingFavoredNode(Lists.newArrayList(primaryRS, secondaryRS));
}
}
return new ServerName[]{ secondaryRS, tertiaryRS };
} | 3.26 |
hbase_FavoredNodeAssignmentHelper_generateFavoredNodes_rdh | /* Generate favored nodes for a set of regions when we know where they are currently hosted. */
private Map<RegionInfo,
List<ServerName>> generateFavoredNodes(Map<RegionInfo, ServerName> primaryRSMap) {
Map<RegionInfo, List<ServerName>> generatedFavNodes = new HashMap<>();
Map<RegionInfo, ServerName[]> secondaryAndTertiaryRSMap = m0(primaryRSMap);for (Entry<RegionInfo, ServerName> entry : primaryRSMap.entrySet()) {
List<ServerName> favoredNodesForRegion = new ArrayList<>(FAVORED_NODES_NUM);
RegionInfo
region = entry.getKey();
ServerName primarySN = entry.getValue();
favoredNodesForRegion.add(ServerName.valueOf(primarySN.getHostname(), primarySN.getPort(), NON_STARTCODE));
ServerName[] secondaryAndTertiaryNodes = secondaryAndTertiaryRSMap.get(region);
if (secondaryAndTertiaryNodes !=
null) {
favoredNodesForRegion.add(ServerName.valueOf(secondaryAndTertiaryNodes[0].getHostname(), secondaryAndTertiaryNodes[0].getPort(), NON_STARTCODE));
favoredNodesForRegion.add(ServerName.valueOf(secondaryAndTertiaryNodes[1].getHostname(), secondaryAndTertiaryNodes[1].getPort(), NON_STARTCODE));
}
generatedFavNodes.put(region, favoredNodesForRegion);
}
return generatedFavNodes;
} | 3.26 |
hbase_FavoredNodeAssignmentHelper_updateMetaWithFavoredNodesInfo_rdh | /**
* Update meta table with favored nodes info
*/
public static void updateMetaWithFavoredNodesInfo(Map<RegionInfo, List<ServerName>> regionToFavoredNodes, Configuration conf) throws
IOException {
// Write the region assignments to the meta table.
// TODO: See above overrides take a Connection rather than a Configuration only the
// Connection is a short circuit connection. That is not going to good in all cases, when
// master and meta are not colocated. Fix when this favored nodes feature is actually used
// someday.
try (Connection conn = ConnectionFactory.createConnection(conf)) {
updateMetaWithFavoredNodesInfo(regionToFavoredNodes, conn);}
} | 3.26 |
hbase_FavoredNodeAssignmentHelper_getRackOfServer_rdh | /**
* Get the rack of server from local mapping when present, saves lookup by the RackManager.
*/
private String getRackOfServer(ServerName sn)
{
if (this.regionServerToRackMap.containsKey(sn.getHostname())) {
return this.regionServerToRackMap.get(sn.getHostname());} else {
String rack = this.f0.getRack(sn);this.regionServerToRackMap.put(sn.getHostname(), rack);
return rack;
}
} | 3.26 |
hbase_FavoredNodeAssignmentHelper_getFavoredNodesList_rdh | /**
* Convert PB bytes to ServerName.
*
* @param favoredNodes
* The PB'ed bytes of favored nodes
* @return the array of {@link ServerName} for the byte array of favored nodes.
*/public static ServerName[] getFavoredNodesList(byte[] favoredNodes) throws IOException {
FavoredNodes f = FavoredNodes.parseFrom(favoredNodes);
List<HBaseProtos.ServerName> protoNodes = f.getFavoredNodeList();
ServerName[] servers = new ServerName[protoNodes.size()];
int i = 0;
for (HBaseProtos.ServerName v16 : protoNodes) {
servers[i++] = ProtobufUtil.toServerName(v16);
}
return servers;
} | 3.26 |
hbase_FavoredNodeAssignmentHelper_placeSecondaryAndTertiaryWithRestrictions_rdh | /**
* For regions that share the primary, avoid placing the secondary and tertiary on a same RS. Used
* for generating new assignments for the primary/secondary/tertiary RegionServers
*
* @return the map of regions to the servers the region-files should be hosted on
*/
public Map<RegionInfo, ServerName[]> placeSecondaryAndTertiaryWithRestrictions(Map<RegionInfo, ServerName> primaryRSMap) {
Map<ServerName, Set<RegionInfo>>
serverToPrimaries = mapRSToPrimaries(primaryRSMap);
Map<RegionInfo, ServerName[]> secondaryAndTertiaryMap = new HashMap<>();
for (Entry<RegionInfo, ServerName> entry : primaryRSMap.entrySet()) {
// Get the target region and its primary region server rack
RegionInfo regionInfo = entry.getKey();
ServerName v45 =
entry.getValue();
try {
// Get the rack for the primary region server
String primaryRack = getRackOfServer(v45);
ServerName[] favoredNodes = null;
if (getTotalNumberOfRacks() == 1) {
// Single rack case: have to pick the secondary and tertiary
// from the same rack
favoredNodes = singleRackCase(regionInfo, v45, primaryRack);
} else {
favoredNodes = multiRackCaseWithRestrictions(serverToPrimaries, secondaryAndTertiaryMap, primaryRack, v45, regionInfo);
}
if (favoredNodes != null) {
secondaryAndTertiaryMap.put(regionInfo, favoredNodes);
LOG.debug("Place the secondary and tertiary region server for region " + regionInfo.getRegionNameAsString());
}
} catch (Exception e) {
LOG.warn((("Cannot place the favored nodes for region " + regionInfo.getRegionNameAsString()) + " because ") + e, e); continue;
}
}
return secondaryAndTertiaryMap;
} | 3.26 |
hbase_SaslChallengeDecoder_tryDecodeError_rdh | // will throw a RemoteException out if data is enough, so do not need to return anything.
private void tryDecodeError(ByteBuf in, int offset, int readableBytes) throws IOException {
if (readableBytes < 4) {
return;
}
int classLen = in.getInt(offset);
if (classLen <= 0) {
throw new IOException("Invalid exception class name length " + classLen);
}
if (classLen > MAX_CHALLENGE_SIZE) {
throw new IOException((("Exception class name length too large(" + classLen) + "), max allowed is ") + MAX_CHALLENGE_SIZE);
}
if (readableBytes < ((4 + classLen) + 4)) {return;
}
int msgLen = in.getInt((offset + 4) + classLen);
if (msgLen <= 0) {
throw new IOException("Invalid exception message length " + msgLen);
}
if (msgLen >
MAX_CHALLENGE_SIZE) {
throw new IOException((("Exception message length too large(" + msgLen) + "), max allowed is ") + MAX_CHALLENGE_SIZE);
}
int totalLen = (classLen + msgLen) + 8;
if (readableBytes < totalLen) {
return;
}
String className = in.toString(offset + 4, classLen, HConstants.UTF8_CHARSET);
String msg = in.toString((offset + classLen) + 8, msgLen, HConstants.UTF8_CHARSET);
in.readerIndex(offset + totalLen);
throw new RemoteException(className, msg);
} | 3.26 |
hbase_MultiTableInputFormat_getConf_rdh | /**
* Returns the current configuration.
*
* @return The current configuration.
* @see org.apache.hadoop.conf.Configurable#getConf()
*/
@Override
public Configuration getConf() {
return conf;
} | 3.26 |
hbase_MultiTableInputFormat_setConf_rdh | /**
* Sets the configuration. This is used to set the details for the tables to be scanned.
*
* @param configuration
* The configuration to set.
* @see org.apache.hadoop.conf.Configurable#setConf( org.apache.hadoop.conf.Configuration)
*/
@Override
public void setConf(Configuration configuration) {
this.conf = configuration;
String[] rawScans = conf.getStrings(SCANS);
if (rawScans.length <= 0) {
throw new IllegalArgumentException("There must be at least 1 scan configuration set to : " + SCANS);
}
List<Scan> scans = new ArrayList<>();
for (int i = 0; i < rawScans.length; i++) {
try {
scans.add(TableMapReduceUtil.convertStringToScan(rawScans[i]));
}
catch (IOException e)
{
throw new RuntimeException(("Failed to convert Scan : " + rawScans[i]) + " to string", e);
}
}
this.setScans(scans);
} | 3.26 |
hbase_ReversedStoreScanner_seekAsDirection_rdh | /**
* Do a backwardSeek in a reversed StoreScanner(scan backward)
*/
@Override
protected boolean seekAsDirection(Cell kv) throws IOException {
return backwardSeek(kv);
} | 3.26 |
hbase_HFileCleaner_countDeletedFiles_rdh | // Currently only for testing purpose
private void countDeletedFiles(boolean isLargeFile, boolean fromLargeQueue) {
if (isLargeFile) {
if (deletedLargeFiles.get() == Long.MAX_VALUE) {
LOG.debug("Deleted more than Long.MAX_VALUE large files, reset counter to 0");
deletedLargeFiles.set(0L);
}
deletedLargeFiles.incrementAndGet();
} else {
if (deletedSmallFiles.get() == Long.MAX_VALUE) {
LOG.debug("Deleted more than Long.MAX_VALUE small files, reset counter to 0");
deletedSmallFiles.set(0L);
}
if (fromLargeQueue) {
LOG.trace("Stolen a small file deletion task in large file thread");
}
deletedSmallFiles.incrementAndGet();
}
} | 3.26 |
hbase_HFileCleaner_checkAndUpdateConfigurations_rdh | /**
* Check new configuration and update settings if value changed
*
* @param conf
* The new configuration
* @return true if any configuration for HFileCleaner changes, false if no change
*/
private boolean checkAndUpdateConfigurations(Configuration conf) {
boolean updated = false;
int throttlePoint =
conf.getInt(f0, f1);
if (throttlePoint != this.throttlePoint) {
LOG.debug("Updating throttle point, from {} to {}", this.throttlePoint, throttlePoint);this.throttlePoint
= throttlePoint;
updated = true;
}
int largeQueueInitSize = conf.getInt(LARGE_HFILE_QUEUE_INIT_SIZE, DEFAULT_LARGE_HFILE_QUEUE_INIT_SIZE);
if (largeQueueInitSize != this.largeQueueInitSize) {
LOG.debug("Updating largeQueueInitSize, from {} to {}", this.largeQueueInitSize, largeQueueInitSize);
this.largeQueueInitSize = largeQueueInitSize;
updated =
true;}
int
smallQueueInitSize = conf.getInt(SMALL_HFILE_QUEUE_INIT_SIZE, DEFAULT_SMALL_HFILE_QUEUE_INIT_SIZE);
if (smallQueueInitSize != this.smallQueueInitSize)
{
LOG.debug("Updating smallQueueInitSize, from {} to {}", this.smallQueueInitSize, smallQueueInitSize);
this.smallQueueInitSize = smallQueueInitSize;
updated = true;
}
int largeFileDeleteThreadNumber = conf.getInt(LARGE_HFILE_DELETE_THREAD_NUMBER, DEFAULT_LARGE_HFILE_DELETE_THREAD_NUMBER);
if (largeFileDeleteThreadNumber != this.largeFileDeleteThreadNumber)
{
LOG.debug("Updating largeFileDeleteThreadNumber, from {} to {}", this.largeFileDeleteThreadNumber, largeFileDeleteThreadNumber);
this.largeFileDeleteThreadNumber = largeFileDeleteThreadNumber;
updated = true;
}
int smallFileDeleteThreadNumber
= conf.getInt(SMALL_HFILE_DELETE_THREAD_NUMBER, DEFAULT_SMALL_HFILE_DELETE_THREAD_NUMBER);
if (smallFileDeleteThreadNumber != this.smallFileDeleteThreadNumber) {
LOG.debug("Updating smallFileDeleteThreadNumber, from {} to {}", this.smallFileDeleteThreadNumber, smallFileDeleteThreadNumber);
this.smallFileDeleteThreadNumber = smallFileDeleteThreadNumber;
updated = true;
}
long cleanerThreadTimeoutMsec = conf.getLong(f2, f3);
if (cleanerThreadTimeoutMsec != this.cleanerThreadTimeoutMsec) { this.cleanerThreadTimeoutMsec = cleanerThreadTimeoutMsec;
updated = true;
}
long cleanerThreadCheckIntervalMsec = conf.getLong(HFILE_DELETE_THREAD_CHECK_INTERVAL_MSEC, DEFAULT_HFILE_DELETE_THREAD_CHECK_INTERVAL_MSEC);
if (cleanerThreadCheckIntervalMsec != this.cleanerThreadCheckIntervalMsec) {
this.cleanerThreadCheckIntervalMsec = cleanerThreadCheckIntervalMsec;
updated = true;
}
return updated;
} | 3.26 |
hbase_HFileCleaner_m1_rdh | /**
* Stop threads for hfile deletion
*/
private void m1() {
running = false;
LOG.debug("Stopping file delete threads");
for (Thread thread : threads) {
thread.interrupt();
}
} | 3.26 |
hbase_HFileCleaner_startHFileDeleteThreads_rdh | /**
* Start threads for hfile deletion
*/
private void startHFileDeleteThreads() {
final String n = Thread.currentThread().getName();running = true;
// start thread for large file deletion
for (int i = 0; i < largeFileDeleteThreadNumber; i++) {
Thread large = new Thread() {
@Override
public void run() {
m0(largeFileQueue);
}
};
large.setDaemon(true);
large.setName((((n + "-HFileCleaner.large.") + i) + "-") + EnvironmentEdgeManager.currentTime());
large.start();
LOG.debug("Starting for large file={}", large);
threads.add(large);
}
// start thread for small file deletion
for (int v10 = 0; v10 < smallFileDeleteThreadNumber; v10++) {
Thread small = new Thread() {
@Override
public void run() {
m0(smallFileQueue);
}
};
small.setDaemon(true);
small.setName((((n + "-HFileCleaner.small.") + v10) + "-") + EnvironmentEdgeManager.currentTime());
small.start();
LOG.debug("Starting for small files={}", small);
threads.add(small);
}
} | 3.26 |
hbase_HFileCleaner_getDelegatesForTesting_rdh | /**
* Exposed for TESTING!
*/public List<BaseHFileCleanerDelegate> getDelegatesForTesting() {
return this.cleanersChain;
} | 3.26 |
hbase_HFileCleaner_deleteFile_rdh | /**
* Construct an {@link HFileDeleteTask} for each file to delete and add into the correct queue
*
* @param file
* the file to delete
* @return HFileDeleteTask to track progress
*/
private HFileDeleteTask deleteFile(FileStatus file) {
HFileDeleteTask task = new HFileDeleteTask(file, cleanerThreadTimeoutMsec);
boolean enqueued = dispatch(task);
return enqueued ? task : null;
} | 3.26 |
hbase_Random64_seedUniquifier_rdh | /**
* Copy from {@link Random#seedUniquifier()}
*/
private static long seedUniquifier() {
for (; ;) {
long current = seedUniquifier.get();
long next = current * 181783497276652981L;
if (seedUniquifier.compareAndSet(current, next)) {return next;
}
}
} | 3.26 |
hbase_Random64_main_rdh | /**
* Random64 is a pseudorandom algorithm(LCG). Therefore, we will get same sequence if seeds are
* the same. This main will test how many calls nextLong() it will get the same seed. We do not
* need to save all numbers (that is too large). We could save once every 100000 calls nextLong().
* If it get a same seed, we can detect this by calling nextLong() 100000 times continuously.
*/
public static void main(String[] args) {
long
defaultTotalTestCnt = 1000000000000L;// 1 trillion
if (args.length == 1) {
defaultTotalTestCnt = Long.parseLong(args[0]);
}
Preconditions.checkArgument(defaultTotalTestCnt >
0, "totalTestCnt <= 0");
final int precision = 100000;
final long totalTestCnt = defaultTotalTestCnt + precision;
final int reportPeriod = 100 * precision;
final long startTime = EnvironmentEdgeManager.currentTime();
System.out.println("Do collision test, totalTestCnt=" + totalTestCnt);
Random64 rand = new Random64();
Set<Long> longSet = new HashSet<>();
for (long cnt = 1; cnt <= totalTestCnt; cnt++) {
final long randLong = rand.nextLong();
if (longSet.contains(randLong)) {
System.err.println("Conflict! count=" + cnt);
System.exit(1);
}
if ((cnt % precision) == 0) {
if (!longSet.add(randLong)) {
System.err.println("Conflict! count=" + cnt);
System.exit(1);
}
if ((cnt % reportPeriod) == 0) {
long cost = EnvironmentEdgeManager.currentTime() - startTime;
long remainingMs = ((long) (((1.0 * (totalTestCnt - cnt)) * cost) / cnt));
System.out.println(String.format("Progress: %.3f%%, remaining %d minutes", (100.0 * cnt) / totalTestCnt, remainingMs / 60000));
}
}
}
System.out.println("No collision!");
} | 3.26 |
hbase_NettyRpcClientConfigHelper_setEventLoopConfig_rdh | /**
* Set the EventLoopGroup and channel class for {@code AsyncRpcClient}.
*/
public static void setEventLoopConfig(Configuration conf, EventLoopGroup group, Class<? extends Channel> channelClass) {
Preconditions.checkNotNull(group, "group is null");
Preconditions.checkNotNull(channelClass, "channel class is null");
conf.set(EVENT_LOOP_CONFIG, CONFIG_NAME);
EVENT_LOOP_CONFIG_MAP.put(CONFIG_NAME, Pair.<EventLoopGroup, Class<? extends Channel>>newPair(group, channelClass));
} | 3.26 |
hbase_NettyRpcClientConfigHelper_createEventLoopPerClient_rdh | /**
* The {@link NettyRpcClient} will create its own {@code NioEventLoopGroup}.
*/
public static void createEventLoopPerClient(Configuration conf) {
conf.set(EVENT_LOOP_CONFIG, "");
EVENT_LOOP_CONFIG_MAP.clear();
} | 3.26 |
hbase_WindowMovingAverage_getNumberOfStatistics_rdh | /**
* Returns number of statistics
*/
protected int getNumberOfStatistics() {
return
lastN.length;
} | 3.26 |
hbase_WindowMovingAverage_moveForwardMostRecentPosition_rdh | /**
* Move forward the most recent index.
*
* @return the most recent index
*/
protected int moveForwardMostRecentPosition() {
int index = ++mostRecent;
if ((!oneRound) && (index == getNumberOfStatistics())) {
// Back to the head of the lastN, from now on will
// start to evict oldest value.
oneRound = true;
}
mostRecent = index % getNumberOfStatistics();
return mostRecent;
} | 3.26 |
hbase_WindowMovingAverage_getMostRecentPosition_rdh | /**
* Returns index of most recent
*/
protected int getMostRecentPosition() {
return mostRecent;
} | 3.26 |
hbase_WindowMovingAverage_enoughStatistics_rdh | /**
* Check if there are enough statistics.
*
* @return true if lastN is full
*/
protected boolean enoughStatistics() {
return oneRound;
} | 3.26 |
hbase_WindowMovingAverage_getStatisticsAtIndex_rdh | /**
* Get statistics at index.
*
* @param index
* index of bar
*/
protected long getStatisticsAtIndex(int index) {
if ((index < 0) || (index >= getNumberOfStatistics())) { // This case should not happen, but a prudent check.
throw new IndexOutOfBoundsException();
}
return lastN[index];
} | 3.26 |
hbase_RowIndexSeekerV1_copyFromNext_rdh | /**
* Copy the state from the next one into this instance (the previous state placeholder). Used to
* save the previous state when we are advancing the seeker to the next key/value.
*/
protected void copyFromNext(SeekerState nextState)
{
f2 = nextState.f2;
currentKey.setKey(nextState.f2, nextState.currentKey.getRowPosition() - Bytes.SIZEOF_SHORT, nextState.keyLength);
startOffset =
nextState.startOffset;
valueOffset = nextState.valueOffset;
keyLength = nextState.keyLength;
f1 = nextState.f1;
nextKvOffset = nextState.nextKvOffset;
memstoreTS = nextState.memstoreTS;
currentBuffer = nextState.currentBuffer;
tagsOffset = nextState.tagsOffset;
tagsLength = nextState.tagsLength;
} | 3.26 |
hbase_SnapshotFileCache_getFiles_rdh | /**
* Returns the hfiles in the snapshot when <tt>this</tt> was made.
*/
public Collection<String> getFiles() {
return this.f2;
} | 3.26 |
hbase_SnapshotFileCache_getUnreferencedFiles_rdh | /**
* Check to see if any of the passed file names is contained in any of the snapshots. First checks
* an in-memory cache of the files to keep. If its not in the cache, then the cache is refreshed
* and the cache checked again for that file. This ensures that we never return files that exist.
* <p>
* Note this may lead to periodic false positives for the file being referenced. Periodically, the
* cache is refreshed even if there are no requests to ensure that the false negatives get removed
* eventually. For instance, suppose you have a file in the snapshot and it gets loaded into the
* cache. Then at some point later that snapshot is deleted. If the cache has not been refreshed
* at that point, cache will still think the file system contains that file and return
* <tt>true</tt>, even if it is no longer present (false positive). However, if the file never was
* on the filesystem, we will never find it and always return <tt>false</tt>.
*
* @param files
* file to check
* @return <tt>unReferencedFiles</tt> the collection of files that do not have snapshot references
* @throws IOException
* if there is an unexpected error reaching the filesystem.
*/public Iterable<FileStatus> getUnreferencedFiles(List<FileStatus> files, final SnapshotManager snapshotManager) throws IOException {
List<FileStatus> unReferencedFiles = Lists.newArrayList();
List<String> snapshotsInProgress = null;
boolean refreshed = false;
Lock lock = null;
if (snapshotManager != null) {
lock = snapshotManager.getTakingSnapshotLock().writeLock();
}
try {
if ((lock == null) || lock.tryLock(LOCK_TIMEOUT_MS, TimeUnit.MILLISECONDS)) {
try {
if ((snapshotManager != null) && snapshotManager.isTakingAnySnapshot()) {
f0.warn("Not checking unreferenced files since snapshot is running, it will " + "skip to clean the HFiles this time");
return unReferencedFiles;
}
ImmutableSet<String> v4 = cache;
for (FileStatus file : files) {
String fileName = file.getPath().getName();
if ((!refreshed) && (!v4.contains(fileName))) {
synchronized(this) {
refreshCache();
v4 = cache;
refreshed = true;
}
}
if (v4.contains(fileName)) {
continue;
}
if (snapshotsInProgress == null) {
snapshotsInProgress = getSnapshotsInProgress();
}
if (snapshotsInProgress.contains(fileName)) {
continue;
}
unReferencedFiles.add(file);
}
} finally {
if (lock != null) {
lock.unlock();
}
}
} else {
f0.warn("Failed to acquire write lock on taking snapshot after waiting {}ms", LOCK_TIMEOUT_MS);
}} catch (InterruptedException e) {
f0.warn("Interrupted while acquiring write lock on taking snapshot");
Thread.currentThread().interrupt();// restore the interrupt flag
}
return unReferencedFiles;
} | 3.26 |
hbase_SnapshotFileCache_triggerCacheRefreshForTesting_rdh | /**
* Trigger a cache refresh, even if its before the next cache refresh. Does not affect pending
* cache refreshes.
* <p/>
* Blocks until the cache is refreshed.
* <p/>
* Exposed for TESTING.
*/
public synchronized void triggerCacheRefreshForTesting() {
try {
refreshCache();
} catch (IOException e) {
f0.warn("Failed to refresh snapshot hfile cache!", e);
}
f0.debug("Current cache:" + cache);
} | 3.26 |
hbase_DrainingServerTracker_start_rdh | /**
* Starts the tracking of draining RegionServers.
* <p>
* All Draining RSs will be tracked after this method is called.
*/
public void start() throws KeeperException, IOException {
watcher.registerListener(this);
// Add a ServerListener to check if a server is draining when it's added.
serverManager.registerListener(new ServerListener() {
@Override
public void serverAdded(ServerName sn) {
if (f0.contains(sn)) {
serverManager.addServerToDrainList(sn);
}
}
});
List<String> servers = ZKUtil.listChildrenAndWatchThem(watcher, watcher.getZNodePaths().drainingZNode);
add(servers);
} | 3.26 |
hbase_GroupingTableMap_createGroupKey_rdh | /**
* Create a key by concatenating multiple column values. Override this function in order to
* produce different types of keys.
*
* @return key generated by concatenating multiple column values
*/
protected ImmutableBytesWritable createGroupKey(byte[][] vals) {
if (vals == null) {
return null;
}
StringBuilder v10 = new StringBuilder();
for (int i = 0; i < vals.length; i++) {
if (i > 0) {
v10.append(" ");
}
v10.append(Bytes.toString(vals[i]));
}
return new ImmutableBytesWritable(Bytes.toBytesBinary(v10.toString()));
} | 3.26 |
hbase_GroupingTableMap_map_rdh | /**
* Extract the grouping columns from value to construct a new key. Pass the new key and value to
* reduce. If any of the grouping columns are not found in the value, the record is skipped.
*/
public void map(ImmutableBytesWritable key, Result value, OutputCollector<ImmutableBytesWritable, Result> output, Reporter reporter) throws IOException {
byte[][] keyVals = extractKeyValues(value);
if (keyVals != null) {
ImmutableBytesWritable tKey =
createGroupKey(keyVals);
output.collect(tKey, value);
}
} | 3.26 |
hbase_GroupingTableMap_initJob_rdh | /**
* Use this before submitting a TableMap job. It will appropriately set up the JobConf.
*
* @param table
* table to be processed
* @param columns
* space separated list of columns to fetch
* @param groupColumns
* space separated list of columns used to form the key used in collect
* @param mapper
* map class
* @param job
* job configuration object
*/
@SuppressWarnings("unchecked")
public static void initJob(String table, String columns, String groupColumns, Class<? extends TableMap> mapper, JobConf job) {
TableMapReduceUtil.initTableMapJob(table, columns, mapper, ImmutableBytesWritable.class, Result.class, job);
job.set(GROUP_COLUMNS, groupColumns);
} | 3.26 |
hbase_GroupingTableMap_extractKeyValues_rdh | /**
* Extract columns values from the current record. This method returns null if any of the columns
* are not found. Override this method if you want to deal with nulls differently.
*
* @return array of byte values
*/
protected byte[][] extractKeyValues(Result r) {
byte[][] keyVals = null;
ArrayList<byte[]> foundList = new ArrayList<>();
int numCols = columns.length;
if (numCols > 0) {
for (Cell value : r.listCells()) {
byte[] column = CellUtil.makeColumn(CellUtil.cloneFamily(value), CellUtil.cloneQualifier(value));
for (int i = 0; i
< numCols; i++) {
if (Bytes.equals(column, columns[i])) {
foundList.add(CellUtil.cloneValue(value));
break;
}
}
}
if (foundList.size() == numCols) {
keyVals = foundList.toArray(new byte[numCols][]);
}}
return keyVals;
} | 3.26 |
hbase_Connection_getClusterId_rdh | /**
* Returns the cluster ID unique to this HBase cluster. <br>
* The default implementation is added to keep client compatibility.
*/
default String getClusterId() {
return null;
} | 3.26 |
hbase_Connection_getHbck_rdh | /**
* Retrieve an Hbck implementation to fix an HBase cluster. The returned Hbck is not guaranteed to
* be thread-safe. A new instance should be created by each thread. This is a lightweight
* operation. Pooling or caching of the returned Hbck instance is not recommended. <br>
* The caller is responsible for calling {@link Hbck#close()} on the returned Hbck instance. <br>
* This will be used mostly by hbck tool. This may only be used to by pass getting registered
* master from ZK. In situations where ZK is not available or active master is not registered with
* ZK and user can get master address by other means, master can be explicitly specified.
*
* @param masterServer
* explicit {@link ServerName} for master server
* @return an Hbck instance for a specified master server
*/
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.HBCK)
default Hbck getHbck(ServerName masterServer) throws IOException {
return toAsyncConnection().getHbck(masterServer);
} | 3.26 |
hbase_Connection_getTable_rdh | /**
* Retrieve a Table implementation for accessing a table. The returned Table is not thread safe, a
* new instance should be created for each using thread. This is a lightweight operation, pooling
* or caching of the returned Table is neither required nor desired.
* <p>
* The caller is responsible for calling {@link Table#close()} on the returned table instance.
* <p>
* Since 0.98.1 this method no longer checks table existence. An exception will be thrown if the
* table does not exist only when the first operation is attempted.
*
* @param tableName
* the name of the table
* @param pool
* The thread pool to use for batch operations, null to use a default pool.
* @return a Table to use for interactions with this table
*/
default Table getTable(TableName tableName, ExecutorService pool) throws IOException {
return m1(tableName, pool).build();
}
/**
* <p>
* Retrieve a {@link BufferedMutator} for performing client-side buffering of writes. The
* {@link BufferedMutator} returned by this method is thread-safe. This BufferedMutator will use
* the Connection's ExecutorService. This object can be used for long lived operations.
* </p>
* <p>
* The caller is responsible for calling {@link BufferedMutator#close()} on the returned
* {@link BufferedMutator} instance.
* </p>
* <p>
* This accessor will use the connection's ExecutorService and will throw an exception in the main
* thread when an asynchronous exception occurs.
*
* @param tableName
* the name of the table
* @return a {@link BufferedMutator} | 3.26 |
hbase_OpenRegionHandler_updateMeta_rdh | /**
* Update ZK or META. This can take a while if for example the hbase:meta is not available -- if
* server hosting hbase:meta crashed and we are waiting on it to come back -- so run in a thread
* and keep updating znode state meantime so master doesn't timeout our region-in-transition.
* Caller must cleanup region if this fails.
*/
private boolean updateMeta(final HRegion r, long masterSystemTime) {
if (this.server.isStopped() || this.rsServices.isStopping()) {
return false;
}
// Object we do wait/notify on. Make it boolean. If set, we're done.
// Else, wait.
final AtomicBoolean signaller = new AtomicBoolean(false);
PostOpenDeployTasksThread t = new PostOpenDeployTasksThread(r, this.server, this.rsServices, signaller,
masterSystemTime);
t.start();
// Post open deploy task:
// meta => update meta location in ZK
// other region => update meta
while (((((!signaller.get()) && t.isAlive()) && (!this.server.isStopped())) && (!this.rsServices.isStopping())) && isRegionStillOpening()) {
synchronized(signaller) {
try {
// Wait for 10 seconds, so that server shutdown
// won't take too long if this thread happens to run.
if (!signaller.get())
signaller.wait(10000);
} catch (InterruptedException e) {
// Go to the loop check.
}
}
}
// Is thread still alive? We may have left above loop because server is
// stopping or we timed out the edit. Is so, interrupt it.
if (t.isAlive()) {
if (!signaller.get()) {
// Thread still running; interrupt
LOG.debug("Interrupting thread " + t);
t.interrupt();
}
try {
t.join();
} catch (InterruptedException ie) {
LOG.warn("Interrupted joining " + r.getRegionInfo().getRegionNameAsString(), ie);
Thread.currentThread().interrupt();
}
}
// Was there an exception opening the region? This should trigger on
// InterruptedException too. If so, we failed.
return (!Thread.interrupted()) && (t.getException() == null);
} | 3.26 |
hbase_OpenRegionHandler_openRegion_rdh | /**
* Returns Instance of HRegion if successful open else null.
*/
private HRegion openRegion() {
HRegion region = null;
boolean compactionEnabled
= ((HRegionServer) (server)).getCompactSplitThread().isCompactionsEnabled();
this.server.getConfiguration().setBoolean(HBASE_REGION_SERVER_ENABLE_COMPACTION, compactionEnabled);
try {
// Instantiate the region. This also periodically tickles OPENING
// state so master doesn't timeout this region in transition.
region = HRegion.openHRegion(this.regionInfo, this.htd, this.rsServices.getWAL(this.regionInfo), this.server.getConfiguration(), this.rsServices, new CancelableProgressable() {
@Override
public boolean progress() {
if (!isRegionStillOpening()) {
LOG.warn("Open region aborted since it isn't opening any more");
return false;
}
return true;
}
});
} catch (Throwable t) {
// We failed open. Our caller will see the 'null' return value
// and transition the node back to FAILED_OPEN. If that fails,
// we rely on the Timeout Monitor in the master to reassign.
LOG.error("Failed open of region=" + this.regionInfo.getRegionNameAsString(), t);}
return region;
} | 3.26 |
hbase_OpenRegionHandler_getException_rdh | /**
* Returns Null or the run exception; call this method after thread is done.
*/
Throwable getException()
{
return this.exception;
} | 3.26 |
hbase_MemoryBoundedLogMessageBuffer_dumpTo_rdh | /**
* Dump the contents of the buffer to the given stream.
*/
public synchronized void dumpTo(PrintWriter out) {
SimpleDateFormat df = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss");
for (LogMessage msg : messages) {
out.write(df.format(new Date(msg.timestamp)));
out.write(" ");
out.println(new String(msg.message, Charsets.UTF_8));}
} | 3.26 |
hbase_MemoryBoundedLogMessageBuffer_estimateHeapUsage_rdh | /**
* Estimate the number of bytes this buffer is currently using.
*/
synchronized long estimateHeapUsage() {
return usage;
} | 3.26 |
hbase_MemoryBoundedLogMessageBuffer_add_rdh | /**
* Append the given message to this buffer, automatically evicting older messages until the
* desired memory limit is achieved.
*/
public synchronized void add(String messageText) {
LogMessage message = new LogMessage(messageText, EnvironmentEdgeManager.currentTime());
usage += message.estimateHeapUsage();
messages.add(message);
while (usage > maxSizeBytes) {
LogMessage removed = messages.remove();
usage -= removed.estimateHeapUsage();
assert usage >= 0;
}
} | 3.26 |
hbase_Procedure_bypass_rdh | /**
* Set the bypass to true. Only called in
* {@link ProcedureExecutor#bypassProcedure(long, long, boolean, boolean)} for now. DO NOT use
* this method alone, since we can't just bypass one single procedure. We need to bypass its
* ancestor too. If your Procedure has set state, it needs to undo it in here.
*
* @param env
* Current environment. May be null because of context; e.g. pretty-printing procedure
* WALs where there is no 'environment' (and where Procedures that require an
* 'environment' won't be run.
*/
protected void bypass(TEnvironment env) {
this.bypass = true;
} | 3.26 |
hbase_Procedure_doExecute_rdh | // ==========================================================================
// Internal methods - called by the ProcedureExecutor
// ==========================================================================
/**
* Internal method called by the ProcedureExecutor that starts the user-level code execute().
*
* @throws ProcedureSuspendedException
* This is used when procedure wants to halt processing and
* skip out without changing states or releasing any locks
* held.
*/
protected Procedure<TEnvironment>[] doExecute(TEnvironment env) throws ProcedureYieldException, ProcedureSuspendedException, InterruptedException {
try {
updateTimestamp();
if (bypass) {LOG.info("{} bypassed, returning null to finish it", this);
return null;
}
return execute(env);
} finally {
updateTimestamp();
}
} | 3.26 |
hbase_Procedure_tryRunnable_rdh | /**
* Try to set this procedure into RUNNABLE state. Succeeds if all subprocedures/children are done.
*
* @return True if we were able to move procedure to RUNNABLE state.
*/
synchronized boolean tryRunnable() {
// Don't use isWaiting in the below; it returns true for WAITING and WAITING_TIMEOUT
if ((getState() == ProcedureState.WAITING) && childrenCountDown()) {setState(ProcedureState.RUNNABLE);
return true;
} else {
return false;
}
} | 3.26 |
hbase_Procedure_setProcId_rdh | /**
* Called by the ProcedureExecutor to assign the ID to the newly created procedure.
*/
protected void setProcId(long procId) {
this.procId = procId;
this.submittedTime = EnvironmentEdgeManager.currentTime();
setState(ProcedureState.RUNNABLE);
} | 3.26 |
hbase_Procedure_isRunnable_rdh | // ==============================================================================================
// Runtime state, updated every operation by the ProcedureExecutor
//
// There is always 1 thread at the time operating on the state of the procedure.
// The ProcedureExecutor may check and set states, or some Procecedure may
// update its own state. but no concurrent updates. we use synchronized here
// just because the procedure can get scheduled on different executor threads on each step.
// ==============================================================================================
/**
* Returns true if the procedure is in a RUNNABLE state.
*/
public synchronized boolean isRunnable() {
return state == ProcedureState.RUNNABLE;
} | 3.26 |
hbase_Procedure_getTimeout_rdh | /**
* Returns the timeout in msec
*/
public int getTimeout() {
return timeout;
} | 3.26 |
hbase_Procedure_toStringSimpleSB_rdh | /**
* Build the StringBuilder for the simple form of procedure string.
*
* @return the StringBuilder
*/
protected StringBuilder toStringSimpleSB() {
final StringBuilder sb = new StringBuilder();
sb.append("pid=");
sb.append(getProcId());
if (hasParent()) {
sb.append(", ppid=");sb.append(getParentProcId());
}
/* TODO Enable later when this is being used. Currently owner not used. if (hasOwner()) {
sb.append(", owner="); sb.append(getOwner()); }
*/
sb.append(", state=");// pState for Procedure State as opposed to any other kind.
toStringState(sb);
sb.append(", hasLock=").append(locked);
if (bypass) {
sb.append(", bypass=").append(bypass);
}
if (hasException()) {
sb.append(", exception=" + getException());
}
sb.append("; ");
toStringClassDetails(sb);
return sb;
} | 3.26 |
hbase_Procedure_addStackIndex_rdh | /**
* Called by the RootProcedureState on procedure execution. Each procedure store its stack-index
* positions.
*/
protected synchronized void addStackIndex(final int index) {
if (stackIndexes == null) {
stackIndexes = new int[]{ index };
} else {
int count = stackIndexes.length;
stackIndexes = Arrays.copyOf(stackIndexes, count + 1);
stackIndexes[count] = index;
}
} | 3.26 |
hbase_Procedure_getResult_rdh | /**
* Returns the serialized result if any, otherwise null
*/
public byte[] getResult() {
return result;
} | 3.26 |
hbase_Procedure_setSubmittedTime_rdh | /**
* Called on store load to initialize the Procedure internals after the creation/deserialization.
*/
protected void setSubmittedTime(long
submittedTime) {
this.submittedTime =
submittedTime;
} | 3.26 |
hbase_Procedure_releaseLock_rdh | /**
* The user should override this method, and release lock if necessary.
*/
protected void releaseLock(TEnvironment env) {
// no-op
} | 3.26 |
hbase_Procedure_toStringDetails_rdh | /**
* Extend the toString() information with more procedure details
*/
public String toStringDetails() {
final StringBuilder sb = toStringSimpleSB();
sb.append(" submittedTime=");sb.append(getSubmittedTime());
sb.append(", lastUpdate=");
sb.append(getLastUpdate());
final int[] v7 = getStackIndexes();
if (v7 != null) {sb.append("\n");
sb.append("stackIndexes=");
sb.append(Arrays.toString(v7));
}
return sb.toString();} | 3.26 |
hbase_Procedure_doReleaseLock_rdh | /**
* Internal method called by the ProcedureExecutor that starts the user-level code releaseLock().
*/
final void doReleaseLock(TEnvironment env, ProcedureStore store) {
locked = false;
// persist that we have released the lock. This must be done before we actually release the
// lock. Another procedure may take this lock immediately after we release the lock, and if we
// crash before persist the information that we have already released the lock, then when
// restarting there will be two procedures which both have the lock and cause problems.
if (getState() != ProcedureState.ROLLEDBACK) {
// If the state is ROLLEDBACK, it means that we have already deleted the procedure from
// procedure store, so do not need to log the release operation any more.
store.update(this);
}
releaseLock(env);
} | 3.26 |
hbase_Procedure_elapsedTime_rdh | // ==========================================================================
// runtime state
// ==========================================================================
/**
* Returns the time elapsed between the last update and the start time of the procedure.
*/
public long elapsedTime() {
return getLastUpdate() - getSubmittedTime();
} | 3.26 |
hbase_Procedure_setStackIndexes_rdh | /**
* Called on store load to initialize the Procedure internals after the creation/deserialization.
*/
protected synchronized void setStackIndexes(final List<Integer> stackIndexes) {
this.stackIndexes = new int[stackIndexes.size()];
for (int i = 0; i < this.stackIndexes.length; ++i) { this.stackIndexes[i] = stackIndexes.get(i);
}
} | 3.26 |
hbase_Procedure_hasLock_rdh | /**
* This is used in conjunction with {@link #holdLock(Object)}. If {@link #holdLock(Object)}
* returns true, the procedure executor will call acquireLock() once and thereafter not call
* {@link #releaseLock(Object)} until the Procedure is done (Normally, it calls release/acquire
* around each invocation of {@link #execute(Object)}.
*
* @see #holdLock(Object)
* @return true if the procedure has the lock, false otherwise.
*/
public final boolean hasLock() {
return locked;
} | 3.26 |
hbase_Procedure_setTimeout_rdh | // ==========================================================================
// runtime state - timeout related
// ==========================================================================
/**
*
* @param timeout
* timeout interval in msec
*/
protected void setTimeout(int timeout) {
this.timeout = timeout;
} | 3.26 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.