name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hbase_HMaster_getMaxBalancingTime_rdh | /**
* Returns Maximum time we should run balancer for
*/private int getMaxBalancingTime() {
// if max balancing time isn't set, defaulting it to period time
int maxBalancingTime = getConfiguration().getInt(HConstants.HBASE_BALANCER_MAX_BALANCING, getConfiguration().getInt(HConstants.HBASE_BALANCER_PERIOD, HConstants.DEFAULT_HBASE_BALANCER_PERIOD));
return maxBalancingTime;
} | 3.26 |
hbase_HMaster_balanceOrUpdateMetrics_rdh | /**
* Trigger a normal balance, see {@link HMaster#balance()} . If the balance is not executed this
* time, the metrics related to the balance will be updated. When balance is running, related
* metrics will be updated at the same time. But if some checking logic failed and cause the
* balancer exit early, we lost the chance to update balancer metrics. This will lead to user
* missing the latest balancer info.
*/
public BalanceResponse balanceOrUpdateMetrics() throws IOException {
synchronized(this.balancer) {
BalanceResponse response =
balance();
if (!response.isBalancerRan()) {
Map<TableName, Map<ServerName, List<RegionInfo>>> v104 = this.assignmentManager.getRegionStates().getAssignmentsForBalancer(this.tableStateManager, this.serverManager.getOnlineServersList());
for (Map<ServerName,
List<RegionInfo>> serverMap : v104.values()) {
serverMap.keySet().removeAll(this.serverManager.getDrainingServersList());
}
this.balancer.updateBalancerLoadInfo(v104);
}
return response;
}
} | 3.26 |
hbase_HMaster_listTableNames_rdh | /**
* Returns the list of table names that match the specified request
*
* @param regex
* The regular expression to match against, or null if querying for all
* @param namespace
* the namespace to query, or null if querying for all
* @param includeSysTables
* False to match only against userspace tables
* @return the list of table names
*/
public List<TableName> listTableNames(final String namespace, final String regex, final boolean includeSysTables) throws IOException {
List<TableDescriptor> htds = new ArrayList<>();
if (cpHost != null) {
cpHost.preGetTableNames(htds, regex);
}
htds = getTableDescriptors(htds, namespace, regex, null, includeSysTables);
if (cpHost != null) {
cpHost.postGetTableNames(htds, regex);
}
List<TableName> result = new ArrayList<>(htds.size());
for (TableDescriptor htd : htds)
result.add(htd.getTableName());
return result;
} | 3.26 |
hbase_HMaster_deleteNamespace_rdh | /**
* Delete an existing Namespace. Only empty Namespaces (no tables) can be removed.
*
* @param nonceGroup
* Identifier for the source of the request, a client or process.
* @param nonce
* A unique identifier for this operation from the client or process identified
* by <code>nonceGroup</code> (the source must ensure each operation gets a
* unique id).
* @return procedure id
*/
long deleteNamespace(final String name, final long
nonceGroup, final long nonce) throws IOException {
checkInitialized();
return MasterProcedureUtil.submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) {@Overrideprotected void run() throws IOException {
getMaster().getMasterCoprocessorHost().preDeleteNamespace(name);
LOG.info((getClientIdAuditPrefix() + " delete ") + name);
// Execute the operation synchronously - wait for the operation to complete before
// continuing.
//
// We need to wait for the procedure to potentially fail due to "prepare" sanity
// checks. This will block only the beginning of the procedure. See HBASE-19953.
ProcedurePrepareLatch latch = ProcedurePrepareLatch.createBlockingLatch();
setProcId(submitProcedure(new DeleteNamespaceProcedure(procedureExecutor.getEnvironment(), name, latch)));
latch.await();
// Will not be invoked in the face of Exception thrown by the Procedure's execution
getMaster().getMasterCoprocessorHost().postDeleteNamespace(name);
}
@Override
protected String getDescription() {
return "DeleteNamespaceProcedure";
}
});
} | 3.26 |
hbase_HMaster_initializeZKBasedSystemTrackers_rdh | /**
* Initialize all ZK based system trackers. But do not include {@link RegionServerTracker}, it
* should have already been initialized along with {@link ServerManager}.
*/
private void initializeZKBasedSystemTrackers() throws IOException, KeeperException, ReplicationException, DeserializationException {
if (maintenanceMode) {
// in maintenance mode, always use MaintenanceLoadBalancer.
conf.unset(LoadBalancer.HBASE_RSGROUP_LOADBALANCER_CLASS);
conf.setClass(HConstants.HBASE_MASTER_LOADBALANCER_CLASS, MaintenanceLoadBalancer.class, LoadBalancer.class);
}
this.balancer = new RSGroupBasedLoadBalancer();
this.loadBalancerStateStore = new LoadBalancerStateStore(f8, zooKeeper);
this.regionNormalizerManager = RegionNormalizerFactory.createNormalizerManager(conf, f8, zooKeeper, this);
this.configurationManager.registerObserver(regionNormalizerManager);
this.regionNormalizerManager.start();
this.splitOrMergeStateStore = new SplitOrMergeStateStore(f8, zooKeeper, conf);
// This is for backwards compatible. We do not need the CP for rs group now but if user want to
// load it, we need to enable rs group.
String[] cpClasses = conf.getStrings(MasterCoprocessorHost.MASTER_COPROCESSOR_CONF_KEY);
if (cpClasses != null) {
for (String cpClass : cpClasses) {
if (RSGroupAdminEndpoint.class.getName().equals(cpClass)) {
RSGroupUtil.enableRSGroup(conf);
break;
}
}
}
this.f1 = RSGroupInfoManager.create(this);
this.replicationPeerManager = ReplicationPeerManager.create(this, clusterId);
this.configurationManager.registerObserver(replicationPeerManager);
this.replicationPeerModificationStateStore = new ReplicationPeerModificationStateStore(f8);
this.drainingServerTracker = new DrainingServerTracker(zooKeeper, this,
this.serverManager);
this.drainingServerTracker.start();
this.snapshotCleanupStateStore = new SnapshotCleanupStateStore(f8, zooKeeper);
String clientQuorumServers = conf.get(HConstants.CLIENT_ZOOKEEPER_QUORUM);
boolean clientZkObserverMode = conf.getBoolean(HConstants.CLIENT_ZOOKEEPER_OBSERVER_MODE, HConstants.DEFAULT_CLIENT_ZOOKEEPER_OBSERVER_MODE);
if ((clientQuorumServers != null) && (!clientZkObserverMode)) {
// we need to take care of the ZK information synchronization
// if given client ZK are not observer nodes
ZKWatcher clientZkWatcher = new ZKWatcher(conf, ((getProcessName() + ":") + rpcServices.getSocketAddress().getPort()) + "-clientZK", this, false, true);
this.metaLocationSyncer = new MetaLocationSyncer(zooKeeper, clientZkWatcher, this);
this.metaLocationSyncer.start();
this.masterAddressSyncer = new MasterAddressSyncer(zooKeeper, clientZkWatcher, this);
this.masterAddressSyncer.start();
// set cluster id is a one-go effort
ZKClusterId.setClusterId(clientZkWatcher, f0.getClusterId());
}
// Set the cluster as up. If new RSs, they'll be waiting on this before
// going ahead with their startup.
boolean wasUp =
this.clusterStatusTracker.isClusterUp();
if (!wasUp)
this.clusterStatusTracker.setClusterUp();
LOG.info(((((("Active/primary master=" + this.serverName) + ", sessionid=0x") + Long.toHexString(this.zooKeeper.getRecoverableZooKeeper().getSessionId())) + ", setting cluster-up flag (Was=") + wasUp) + ")");
// create/initialize the snapshot manager and other procedure managers
this.snapshotManager
= new SnapshotManager();
this.mpmHost = new MasterProcedureManagerHost();
this.mpmHost.register(this.snapshotManager);
this.mpmHost.register(new MasterFlushTableProcedureManager());
this.mpmHost.loadProcedures(conf);
this.mpmHost.initialize(this, this.metricsMaster);
} | 3.26 |
hbase_HMaster_getActiveMasterInfoPort_rdh | /**
* Returns info port of active master or 0 if any exception occurs.
*/
public int getActiveMasterInfoPort() {
return activeMasterManager.getActiveMasterInfoPort();
} | 3.26 |
hbase_HMaster_setCatalogJanitorEnabled_rdh | /**
* Switch for the background CatalogJanitor thread. Used for testing. The thread will continue to
* run. It will just be a noop if disabled.
*
* @param b
* If false, the catalog janitor won't do anything.
*/public void
setCatalogJanitorEnabled(final boolean b) {this.f3.setEnabled(b);
} | 3.26 |
hbase_HMaster_initClusterSchemaService_rdh | // Will be overridden in tests
@InterfaceAudience.Private
protected void initClusterSchemaService() throws IOException, InterruptedException {
this.clusterSchemaService = new ClusterSchemaServiceImpl(this);
this.clusterSchemaService.startAsync();
try {
this.clusterSchemaService.awaitRunning(getConfiguration().getInt(HBASE_MASTER_WAIT_ON_SERVICE_IN_SECONDS, DEFAULT_HBASE_MASTER_WAIT_ON_SERVICE_IN_SECONDS), TimeUnit.SECONDS);
} catch (TimeoutException toe) {
throw new IOException("Timedout starting ClusterSchemaService", toe);
}
} | 3.26 |
hbase_HMaster_main_rdh | /**
*
* @see org.apache.hadoop.hbase.master.HMasterCommandLine
*/
public static void main(String[] args) {
LOG.info("STARTING service " + HMaster.class.getSimpleName());
VersionInfo.logVersion();
new HMasterCommandLine(HMaster.class).doMain(args);
} | 3.26 |
hbase_HMaster_shutdown_rdh | /**
* Shutdown the cluster. Master runs a coordinated stop of all RegionServers and then itself.
*/
public void shutdown() throws IOException {
TraceUtil.trace(() -> {
if (cpHost != null) {
cpHost.preShutdown();
}
// Tell the servermanager cluster shutdown has been called. This makes it so when Master is
// last running server, it'll stop itself. Next, we broadcast the cluster shutdown by setting
// the cluster status as down. RegionServers will notice this change in state and will start
// shutting themselves down. When last has exited, Master can go down.
if (this.serverManager != null) {
this.serverManager.shutdownCluster();
}if (this.clusterStatusTracker != null) {
try {
this.clusterStatusTracker.setClusterDown();
} catch (KeeperException e) {
LOG.error("ZooKeeper exception trying to set cluster as down in ZK", e);
}
}
// Stop the procedure executor. Will stop any ongoing assign, unassign, server crash etc.,
// processing so we can go down.
if (this.procedureExecutor != null) {
this.procedureExecutor.stop();}
// Shutdown our cluster connection. This will kill any hosted RPCs that might be going on;
// this is what we want especially if the Master is in startup phase doing call outs to
// hbase:meta, etc. when cluster is down. Without ths connection close, we'd have to wait on
// the rpc to timeout.
if (this.asyncClusterConnection != null) {
this.asyncClusterConnection.close();
}
}, "HMaster.shutdown");
} | 3.26 |
hbase_HMaster_listTableDescriptors_rdh | /**
* Returns the list of table descriptors that match the specified request
*
* @param namespace
* the namespace to query, or null if querying for all
* @param regex
* The regular expression to match against, or null if querying for all
* @param tableNameList
* the list of table names, or null if querying for all
* @param includeSysTables
* False to match only against userspace tables
* @return the list of table descriptors
*/
public List<TableDescriptor> listTableDescriptors(final String namespace, final String regex, final List<TableName> tableNameList, final boolean includeSysTables) throws IOException {List<TableDescriptor> htds = new ArrayList<>();
if (cpHost != null) {
cpHost.preGetTableDescriptors(tableNameList, htds, regex);
}
htds = getTableDescriptors(htds, namespace, regex, tableNameList, includeSysTables); if (cpHost != null) {
cpHost.postGetTableDescriptors(tableNameList, htds, regex);
}
return htds;
} | 3.26 |
hbase_HMaster_createActiveMasterManager_rdh | /**
* Protected to have custom implementations in tests override the default ActiveMaster
* implementation.
*/
protected ActiveMasterManager createActiveMasterManager(ZKWatcher zk, ServerName sn, Server server) throws InterruptedIOException {
return new ActiveMasterManager(zk, sn, server);
} | 3.26 |
hbase_HMaster_createNamespace_rdh | /**
* Create a new Namespace.
*
* @param namespaceDescriptor
* descriptor for new Namespace
* @param nonceGroup
* Identifier for the source of the request, a client or process.
* @param nonce
* A unique identifier for this operation from the client or process
* identified by <code>nonceGroup</code> (the source must ensure each
* operation gets a unique id).
* @return procedure id
*/
long createNamespace(final NamespaceDescriptor namespaceDescriptor, final long nonceGroup, final long nonce) throws IOException {
checkInitialized();
TableName.isLegalNamespaceName(Bytes.toBytes(namespaceDescriptor.getName()));
return MasterProcedureUtil.submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) {
@Override
protected void run() throws IOException {
getMaster().getMasterCoprocessorHost().preCreateNamespace(namespaceDescriptor);
// We need to wait for the procedure to potentially fail due to "prepare" sanity
// checks. This will block only the beginning of the procedure. See HBASE-19953.
ProcedurePrepareLatch latch = ProcedurePrepareLatch.createBlockingLatch();
LOG.info((getClientIdAuditPrefix() + " creating ") + namespaceDescriptor);
// Execute the operation synchronously - wait for the operation to complete before
// continuing.
setProcId(getClusterSchema().createNamespace(namespaceDescriptor, getNonceKey(), latch));
latch.await();
getMaster().getMasterCoprocessorHost().postCreateNamespace(namespaceDescriptor);
}
@Override
protected String getDescription() {
return "CreateNamespaceProcedure";
}
});
} | 3.26 |
hbase_HMaster_executeRegionPlansWithThrottling_rdh | /**
* Execute region plans with throttling
*
* @param plans
* to execute
* @return succeeded plans
*/
public List<RegionPlan> executeRegionPlansWithThrottling(List<RegionPlan> plans) {
List<RegionPlan> successRegionPlans = new ArrayList<>();
int maxRegionsInTransition = m2();long balanceStartTime = EnvironmentEdgeManager.currentTime();long cutoffTime = balanceStartTime + this.maxBalancingTime;
int rpCount = 0;// number of RegionPlans balanced so far
if ((plans != null) && (!plans.isEmpty())) {
int balanceInterval = this.maxBalancingTime / plans.size();
LOG.info((((("Balancer plans size is " + plans.size()) + ", the balance interval is ") + balanceInterval) + " ms, and the max number regions in transition is ") + maxRegionsInTransition);
for (RegionPlan plan : plans) {
LOG.info("balance " + plan);
// TODO: bulk assign
try {
this.assignmentManager.balance(plan);
} catch (HBaseIOException hioe) {
// should ignore failed plans here, avoiding the whole balance plans be aborted
// later calls of balance() can fetch up the failed and skipped plans
LOG.warn("Failed balance plan {}, skipping...", plan, hioe);
}
// rpCount records balance plans processed, does not care if a plan succeeds
rpCount++;successRegionPlans.add(plan);
if (this.maxBalancingTime > 0) {
balanceThrottling(balanceStartTime + (rpCount * balanceInterval), maxRegionsInTransition, cutoffTime);
}
// if performing next balance exceeds cutoff time, exit the loop
if (((this.maxBalancingTime > 0) && (rpCount < plans.size())) && (EnvironmentEdgeManager.currentTime() > cutoffTime)) {
// TODO: After balance, there should not be a cutoff time (keeping it as
// a security net for now)
LOG.debug("No more balancing till next balance run; maxBalanceTime=" + this.maxBalancingTime);
break;
}
}
}
LOG.debug("Balancer is going into sleep until next period in {}ms", getConfiguration().getInt(HConstants.HBASE_BALANCER_PERIOD, HConstants.DEFAULT_HBASE_BALANCER_PERIOD));
return successRegionPlans;
} | 3.26 |
hbase_HMaster_getMasterFinishedInitializationTime_rdh | /**
* Returns timestamp in millis when HMaster finished becoming the active master
*/
public long getMasterFinishedInitializationTime() {
return masterFinishedInitializationTime;
} | 3.26 |
hbase_HMaster_getMasterCoprocessors_rdh | /**
* Returns array of coprocessor SimpleNames.
*/
public String[] getMasterCoprocessors() {
Set<String> masterCoprocessors = getMasterCoprocessorHost().getCoprocessors();
return masterCoprocessors.toArray(new String[masterCoprocessors.size()]);
} | 3.26 |
hbase_HMaster_finishActiveMasterInitialization_rdh | /**
* Finish initialization of HMaster after becoming the primary master.
* <p/>
* The startup order is a bit complicated but very important, do not change it unless you know
* what you are doing.
* <ol>
* <li>Initialize file system based components - file system manager, wal manager, table
* descriptors, etc</li>
* <li>Publish cluster id</li>
* <li>Here comes the most complicated part - initialize server manager, assignment manager and
* region server tracker
* <ol type='i'>
* <li>Create server manager</li>
* <li>Create master local region</li>
* <li>Create procedure executor, load the procedures, but do not start workers. We will start it
* later after we finish scheduling SCPs to avoid scheduling duplicated SCPs for the same
* server</li>
* <li>Create assignment manager and start it, load the meta region state, but do not load data
* from meta region</li>
* <li>Start region server tracker, construct the online servers set and find out dead servers and
* schedule SCP for them. The online servers will be constructed by scanning zk, and we will also
* scan the wal directory and load from master local region to find out possible live region
* servers, and the differences between these two sets are the dead servers</li>
* </ol>
* </li>
* <li>If this is a new deploy, schedule a InitMetaProcedure to initialize meta</li>
* <li>Start necessary service threads - balancer, catalog janitor, executor services, and also
* the procedure executor, etc. Notice that the balancer must be created first as assignment
* manager may use it when assigning regions.</li>
* <li>Wait for meta to be initialized if necessary, start table state manager.</li>
* <li>Wait for enough region servers to check-in</li>
* <li>Let assignment manager load data from meta and construct region states</li>
* <li>Start all other things such as chore services, etc</li>
* </ol>
* <p/>
* Notice that now we will not schedule a special procedure to make meta online(unless the first
* time where meta has not been created yet), we will rely on SCP to bring meta online.
*/
private void finishActiveMasterInitialization() throws IOException, InterruptedException, KeeperException, ReplicationException, DeserializationException {
/* We are active master now... go initialize components we need to run. */
startupTaskGroup.addTask("Initializing Master file system");
this.masterActiveTime = EnvironmentEdgeManager.currentTime();
// TODO: Do this using Dependency Injection, using PicoContainer, Guice or Spring.
// always initialize the MemStoreLAB as we use a region to store data in master now, see
// localStore.
initializeMemStoreChunkCreator(null);
this.f0 = new MasterFileSystem(conf);
this.walManager = new MasterWalManager(this);
// warm-up HTDs cache on master initialization
if (preLoadTableDescriptors) {
startupTaskGroup.addTask("Pre-loading table descriptors");
this.tableDescriptors.getAll();
}// Publish cluster ID; set it in Master too. The superclass RegionServer does this later but
// only after it has checked in with the Master. At least a few tests ask Master for clusterId
// before it has called its run method and before RegionServer has done the reportForDuty.
ClusterId clusterId = f0.getClusterId();
startupTaskGroup.addTask(("Publishing Cluster ID " + clusterId) + " in ZooKeeper");
ZKClusterId.setClusterId(this.zooKeeper, f0.getClusterId());
this.clusterId = clusterId.toString();
// Precaution. Put in place the old hbck1 lock file to fence out old hbase1s running their
// hbck1s against an hbase2 cluster; it could do damage. To skip this behavior, set
// hbase.write.hbck1.lock.file to false.
if (this.conf.getBoolean("hbase.write.hbck1.lock.file", true)) {
Pair<Path, FSDataOutputStream> result = null;try {
result = HBaseFsck.checkAndMarkRunningHbck(this.conf, HBaseFsck.createLockRetryCounterFactory(this.conf).create());
} finally {
if (result != null) {
Closeables.close(result.getSecond(), true);
}
}
}
startupTaskGroup.addTask("Initialize ServerManager and schedule SCP for crash servers");
// The below two managers must be created before loading procedures, as they will be used during
// loading.
// initialize master local region
f8 = MasterRegionFactory.create(this);
rsListStorage = new MasterRegionServerList(f8, this);
this.serverManager = createServerManager(this, rsListStorage);
this.syncReplicationReplayWALManager = new SyncReplicationReplayWALManager(this);
if (!conf.getBoolean(HBASE_SPLIT_WAL_COORDINATED_BY_ZK, DEFAULT_HBASE_SPLIT_COORDINATED_BY_ZK)) {
this.splitWALManager = new SplitWALManager(this);
}
tryMigrateMetaLocationsFromZooKeeper();
createProcedureExecutor();
Map<Class<?>, List<Procedure<MasterProcedureEnv>>> procsByType = procedureExecutor.getActiveProceduresNoCopy().stream().collect(Collectors.groupingBy(p -> p.getClass()));
// Create Assignment Manager
this.assignmentManager = createAssignmentManager(this, f8);
this.assignmentManager.start();
// TODO: TRSP can perform as the sub procedure for other procedures, so even if it is marked as
// completed, it could still be in the procedure list. This is a bit strange but is another
// story, need to verify the implementation for ProcedureExecutor and ProcedureStore.
List<TransitRegionStateProcedure> ritList = procsByType.getOrDefault(TransitRegionStateProcedure.class, Collections.emptyList()).stream().filter(p -> !p.isFinished()).map(p -> ((TransitRegionStateProcedure)
(p))).collect(Collectors.toList());
this.assignmentManager.setupRIT(ritList);
// Start RegionServerTracker with listing of servers found with exiting SCPs -- these should
// be registered in the deadServers set -- and the servernames loaded from the WAL directory
// and master local region that COULD BE 'alive'(we'll schedule SCPs for each and let SCP figure
// it out).
// We also pass dirs that are already 'splitting'... so we can do some checks down in tracker.
// TODO: Generate the splitting and live Set in one pass instead of two as we currently do.
this.regionServerTracker.upgrade(procsByType.getOrDefault(ServerCrashProcedure.class, Collections.emptyList()).stream().map(p -> ((ServerCrashProcedure) (p))).map(p -> p.getServerName()).collect(Collectors.toSet()), Sets.union(rsListStorage.getAll(), walManager.getLiveServersFromWALDir()), walManager.getSplittingServersFromWALDir());
// This manager must be accessed AFTER hbase:meta is confirmed on line..
this.tableStateManager = new TableStateManager(this);
startupTaskGroup.addTask("Initializing ZK system trackers");
initializeZKBasedSystemTrackers();
startupTaskGroup.addTask("Loading last flushed sequence id of regions");
try {
this.serverManager.loadLastFlushedSequenceIds();
} catch (IOException e) {
LOG.info("Failed to load last flushed sequence id of regions" + " from file system", e);
}
// Set ourselves as active Master now our claim has succeeded up in zk.
this.activeMaster = true;
// Start the Zombie master detector after setting master as active, see HBASE-21535
Thread zombieDetector = new Thread(new MasterInitializationMonitor(this), "ActiveMasterInitializationMonitor-" + EnvironmentEdgeManager.currentTime());zombieDetector.setDaemon(true);
zombieDetector.start();
if (!maintenanceMode) {
startupTaskGroup.addTask("Initializing master coprocessors");
setQuotasObserver(conf);
initializeCoprocessorHost(conf);
}
else {
// start an in process region server for carrying system regions
maintenanceRegionServer = JVMClusterUtil.createRegionServerThread(getConfiguration(), HRegionServer.class, 0);
maintenanceRegionServer.start();
}
// Checking if meta needs initializing.
startupTaskGroup.addTask("Initializing meta table if this is a new deploy");
InitMetaProcedure initMetaProc = null;
// Print out state of hbase:meta on startup; helps debugging.
if (!this.assignmentManager.getRegionStates().hasTableRegionStates(TableName.META_TABLE_NAME)) {
Optional<InitMetaProcedure> optProc = procedureExecutor.getProcedures().stream().filter(p -> p
instanceof InitMetaProcedure).map(o -> ((InitMetaProcedure) (o))).findAny();
initMetaProc = optProc.orElseGet(() -> {
// schedule an init meta procedure if meta has not been deployed yet
InitMetaProcedure
v37 = new InitMetaProcedure();
procedureExecutor.submitProcedure(v37);
return v37;
});
}
// initialize load balancer
this.balancer.setMasterServices(this);
this.balancer.initialize();
this.balancer.updateClusterMetrics(getClusterMetricsWithoutCoprocessor());
// try migrate replication data
ZKReplicationQueueStorageForMigration oldReplicationQueueStorage = new ZKReplicationQueueStorageForMigration(zooKeeper, conf);
// check whether there are something to migrate and we haven't scheduled a migration procedure
// yet
if (oldReplicationQueueStorage.hasData() && procedureExecutor.getProcedures().stream().allMatch(p -> !(p instanceof MigrateReplicationQueueFromZkToTableProcedure))) {
procedureExecutor.submitProcedure(new MigrateReplicationQueueFromZkToTableProcedure());
}// start up all service threads.
startupTaskGroup.addTask("Initializing master service threads");
startServiceThreads();
// wait meta to be initialized after we start procedure executor
if (initMetaProc != null) {
initMetaProc.await();
}
// Wake up this server to check in
sleeper.skipSleepCycle();
// Wait for region servers to report in.
// With this as part of master initialization, it precludes our being able to start a single
// server that is both Master and RegionServer. Needs more thought. TODO.
String statusStr = "Wait for region servers to report in";
MonitoredTask waitRegionServer = startupTaskGroup.addTask(statusStr);
LOG.info(Objects.toString(waitRegionServer));
waitForRegionServers(waitRegionServer);
// Check if master is shutting down because issue initializing regionservers or balancer.
if (isStopped()) {
return;
}
startupTaskGroup.addTask("Starting assignment manager");
// FIRST HBASE:META READ!!!!
// The below cannot make progress w/o hbase:meta being online.
// This is the FIRST attempt at going to hbase:meta. Meta on-lining is going on in background
// as procedures run -- in particular SCPs for crashed servers... One should put up hbase:meta
// if it is down. It may take a while to come online. So, wait here until meta if for sure
// available. That's what waitForMetaOnline does.
if (!waitForMetaOnline()) {
return;
}
TableDescriptor metaDescriptor = tableDescriptors.get(TableName.META_TABLE_NAME);
final ColumnFamilyDescriptor tableFamilyDesc = metaDescriptor.getColumnFamily(HConstants.TABLE_FAMILY);
final ColumnFamilyDescriptor
replBarrierFamilyDesc = metaDescriptor.getColumnFamily(HConstants.REPLICATION_BARRIER_FAMILY);
this.assignmentManager.joinCluster();
// The below depends on hbase:meta being online.
this.assignmentManager.processOfflineRegions();
// this must be called after the above processOfflineRegions to prevent race
this.assignmentManager.wakeMetaLoadedEvent();
// for migrating from a version without HBASE-25099, and also for honoring the configuration
// first.
if (conf.get(HConstants.META_REPLICAS_NUM) != null) {int replicasNumInConf = conf.getInt(HConstants.META_REPLICAS_NUM, HConstants.DEFAULT_META_REPLICA_NUM);
TableDescriptor metaDesc = tableDescriptors.get(TableName.META_TABLE_NAME);
if (metaDesc.getRegionReplication() != replicasNumInConf) {
// it is possible that we already have some replicas before upgrading, so we must set the
// region replication number in meta TableDescriptor directly first, without creating a
// ModifyTableProcedure, otherwise it may cause a double assign for the meta replicas.
int existingReplicasCount = assignmentManager.getRegionStates().getRegionsOfTable(TableName.META_TABLE_NAME).size();
if (existingReplicasCount > metaDesc.getRegionReplication()) {
LOG.info("Update replica count of hbase:meta from {}(in TableDescriptor)" + " to {}(existing ZNodes)", metaDesc.getRegionReplication(), existingReplicasCount);
metaDesc = TableDescriptorBuilder.newBuilder(metaDesc).setRegionReplication(existingReplicasCount).build();
tableDescriptors.update(metaDesc);
}
// check again, and issue a ModifyTableProcedure if needed
if (metaDesc.getRegionReplication() != replicasNumInConf) {
LOG.info("The {} config is {} while the replica count in TableDescriptor is {}" +
" for hbase:meta, altering...", HConstants.META_REPLICAS_NUM, replicasNumInConf, metaDesc.getRegionReplication());
procedureExecutor.submitProcedure(new ModifyTableProcedure(procedureExecutor.getEnvironment(), TableDescriptorBuilder.newBuilder(metaDesc).setRegionReplication(replicasNumInConf).build(), null, metaDesc, false, true));
}
}
}
// Initialize after meta is up as below scans meta
FavoredNodesManager fnm = getFavoredNodesManager();
if (fnm != null) {
fnm.initializeFromMeta();
}
// set cluster status again after user regions are assigned
this.balancer.updateClusterMetrics(getClusterMetricsWithoutCoprocessor());// Start balancer and meta catalog janitor after meta and regions have been assigned.
startupTaskGroup.addTask("Starting balancer and catalog janitor");
this.clusterStatusChore = new
ClusterStatusChore(this, balancer);
getChoreService().scheduleChore(clusterStatusChore);
this.balancerChore = new BalancerChore(this);if (!disableBalancerChoreForTest) {
getChoreService().scheduleChore(balancerChore);
}
if (regionNormalizerManager != null) {
getChoreService().scheduleChore(regionNormalizerManager.getRegionNormalizerChore());
}
this.f3 = new CatalogJanitor(this);
getChoreService().scheduleChore(f3);
this.hbckChore = new HbckChore(this);
getChoreService().scheduleChore(hbckChore);
this.serverManager.startChore();
// Only for rolling upgrade, where we need to migrate the data in namespace table to meta table.
if (!m0()) {
return;
}
startupTaskGroup.addTask("Starting cluster schema service");
try {
initClusterSchemaService();
} catch (IllegalStateException e) {
if ((((e.getCause() != null) && (e.getCause() instanceof NoSuchColumnFamilyException)) && (tableFamilyDesc == null)) && (replBarrierFamilyDesc == null)) {
LOG.info("ClusterSchema service could not be initialized. This is " + "expected during HBase 1 to 2 upgrade", e);
} else {
throw e;
}
}
if (this.cpHost != null) {
try {
this.cpHost.preMasterInitialization();
} catch (IOException e) {
LOG.error("Coprocessor preMasterInitialization() hook failed", e);
}
}
LOG.info(String.format("Master has completed initialization %.3fsec", (EnvironmentEdgeManager.currentTime() - masterActiveTime) / 1000.0F));
this.masterFinishedInitializationTime = EnvironmentEdgeManager.currentTime();
configurationManager.registerObserver(this.balancer);
configurationManager.registerObserver(this.f4);
configurationManager.registerObserver(this.logCleaner);
configurationManager.registerObserver(this.regionsRecoveryConfigManager);
configurationManager.registerObserver(this.exclusiveHFileCleanerPool);
if (this.sharedHFileCleanerPool
!= null) {
configurationManager.registerObserver(this.sharedHFileCleanerPool);
}
if (this.hfileCleaners != null) {
for (HFileCleaner cleaner : hfileCleaners) {
configurationManager.registerObserver(cleaner);}
}
// Set master as 'initialized'.
setInitialized(true);
startupTaskGroup.markComplete("Initialization successful");
MonitoredTask status = TaskMonitor.get().createStatus("Progress after master initialized",
false, true);
if ((tableFamilyDesc == null) && (replBarrierFamilyDesc == null)) {
// create missing CFs in meta table after master is set to 'initialized'.
createMissingCFsInMetaDuringUpgrade(metaDescriptor);
// Throwing this Exception to abort active master is painful but this
// seems the only way to add missing CFs in meta while upgrading from
// HBase 1 to 2 (where HBase 2 has HBASE-23055 & HBASE-23782 checked-in).
// So, why do we abort active master after adding missing CFs in meta?
// When we reach here, we would have already bypassed NoSuchColumnFamilyException
// in initClusterSchemaService(), meaning ClusterSchemaService is not
// correctly initialized but we bypassed it. Similarly, we bypassed
// tableStateManager.start() as well. Hence, we should better abort
// current active master because our main task - adding missing CFs
// in meta table is done (possible only after master state is set as
// initialized) at the expense of bypassing few important tasks as part
// of active master init routine. So now we abort active master so that
// next active master init will not face any issues and all mandatory
// services will be started during master init phase.
throw new PleaseRestartMasterException(("Aborting active master after missing" + " CFs are successfully added in meta. Subsequent active master ") + "initialization should be uninterrupted");
}
if (maintenanceMode) {
LOG.info("Detected repair mode, skipping final initialization steps.");
return;
}
assignmentManager.checkIfShouldMoveSystemRegionAsync();
status.setStatus("Starting quota manager");
initQuotaManager();
if (QuotaUtil.isQuotaEnabled(conf)) {
// Create the quota snapshot notifier
spaceQuotaSnapshotNotifier =
createQuotaSnapshotNotifier();
spaceQuotaSnapshotNotifier.initialize(getConnection());
this.quotaObserverChore = new
QuotaObserverChore(this, getMasterMetrics());// Start the chore to read the region FS space reports and act on them
getChoreService().scheduleChore(quotaObserverChore);
this.snapshotQuotaChore = new SnapshotQuotaObserverChore(this, getMasterMetrics());
// Start the chore to read snapshots and add their usage to table/NS quotas
getChoreService().scheduleChore(snapshotQuotaChore);
}
final SlowLogMasterService slowLogMasterService = new SlowLogMasterService(conf, this);
slowLogMasterService.init();
WALEventTrackerTableCreator.createIfNeededAndNotExists(conf, this);
// Create REPLICATION.SINK_TRACKER table if needed.
ReplicationSinkTrackerTableCreator.createIfNeededAndNotExists(conf, this);
// clear the dead servers with same host name and port of online server because we are not
// removing dead server with same hostname and port of rs which is trying to check in before
// master initialization. See HBASE-5916.
this.serverManager.clearDeadServersWithSameHostNameAndPortOfOnlineServer();
// Check and set the znode ACLs if needed in case we are overtaking a non-secure configuration
status.setStatus("Checking ZNode ACLs");zooKeeper.checkAndSetZNodeAcls();
status.setStatus("Initializing MOB Cleaner");
initMobCleaner();
// delete the stale data for replication sync up tool if necessary
status.setStatus("Cleanup ReplicationSyncUp status if necessary");
Path replicationSyncUpInfoFile = new Path(new Path(dataRootDir, ReplicationSyncUp.INFO_DIR), ReplicationSyncUp.INFO_FILE);
if (dataFs.exists(replicationSyncUpInfoFile)) {
// info file is available, load the timestamp and use it to clean up stale data in replication
// queue storage.
byte[] data;
try
(FSDataInputStream in
= dataFs.open(replicationSyncUpInfoFile)) {
data = ByteStreams.toByteArray(in);
}
ReplicationSyncUpToolInfo info = null;
try {
info = JsonMapper.fromJson(Bytes.toString(data), ReplicationSyncUpToolInfo.class);
} catch (JsonParseException e) {
// usually this should be a partial file, which means the ReplicationSyncUp tool did not
// finish properly, so not a problem. Here we do not clean up the status as we do not know
// the reason why the tool did not finish properly, so let users clean the status up
// manually
LOG.warn("failed to parse replication sync up info file, ignore and continue...", e);
}
if (info
!= null) {
LOG.info("Remove last sequence ids and hfile references which are written before {}({})", info.getStartTimeMs(), DateTimeFormatter.ISO_DATE_TIME.withZone(ZoneId.systemDefault()).format(Instant.ofEpochMilli(info.getStartTimeMs())));
replicationPeerManager.getQueueStorage().removeLastSequenceIdsAndHFileRefsBefore(info.getStartTimeMs());
// delete the file after removing the stale data, so next time we do not need to do this
// again.
dataFs.delete(replicationSyncUpInfoFile, false);
}
}
status.setStatus("Calling postStartMaster coprocessors");
if (this.cpHost != null) {
// don't let cp initialization errors kill the master
try {
this.cpHost.postStartMaster();
} catch (IOException ioe) {
LOG.error("Coprocessor postStartMaster() hook failed", ioe);
}
}
zombieDetector.interrupt();
/* After master has started up, lets do balancer post startup initialization. Since this runs in
activeMasterManager thread, it should be fine.
*/
long
start = EnvironmentEdgeManager.currentTime();
this.balancer.postMasterStartupInitialize();if (LOG.isDebugEnabled()) {
LOG.debug(("Balancer post startup initialization complete, took " + ((EnvironmentEdgeManager.currentTime() - start) / 1000)) + " seconds");
}
this.rollingUpgradeChore = new RollingUpgradeChore(this);
getChoreService().scheduleChore(rollingUpgradeChore);
status.markComplete("Progress after master initialized complete");
} | 3.26 |
hbase_HMaster_m2_rdh | /**
* Returns Maximum number of regions in transition
*/
private int m2() {
int numRegions =
this.assignmentManager.getRegionStates().getRegionAssignments().size();
return Math.max(((int) (Math.floor(numRegions * this.maxRitPercent))), 1);
} | 3.26 |
hbase_HMaster_isNormalizerOn_rdh | /**
* Queries the state of the {@link RegionNormalizerStateStore}. If it's not initialized, false is
* returned.
*/
public boolean isNormalizerOn() {
return (!isInMaintenanceMode()) && getRegionNormalizerManager().isNormalizerOn();
} | 3.26 |
hbase_HMaster_reopenRegions_rdh | /**
* Reopen regions provided in the argument
*
* @param tableName
* The current table name
* @param regionNames
* The region names of the regions to reopen
* @param nonceGroup
* Identifier for the source of the request, a client or process
* @param nonce
* A unique identifier for this operation from the client or process identified
* by <code>nonceGroup</code> (the source must ensure each operation gets a
* unique id).
* @return procedure Id
* @throws IOException
* if reopening region fails while running procedure
*/
long reopenRegions(final TableName tableName, final List<byte[]> regionNames, final long nonceGroup, final long nonce) throws IOException {
return MasterProcedureUtil.submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) {
@Override
protected void m6() throws IOException {
submitProcedure(new
ReopenTableRegionsProcedure(tableName, regionNames));
}
@Override
protected String getDescription() {
return "ReopenTableRegionsProcedure";
}
});
} | 3.26 |
hbase_HMaster_getClusterMetrics_rdh | /**
* Returns cluster status
*/
public ClusterMetrics getClusterMetrics() throws IOException {
return getClusterMetrics(EnumSet.allOf(Option.class));} | 3.26 |
hbase_HMaster_decommissionRegionServers_rdh | /**
* Mark region server(s) as decommissioned (previously called 'draining') to prevent additional
* regions from getting assigned to them. Also unload the regions on the servers asynchronously.0
*
* @param servers
* Region servers to decommission.
*/
public void decommissionRegionServers(final List<ServerName> servers, final boolean offload) throws IOException {
List<ServerName> serversAdded = new ArrayList<>(servers.size());
// Place the decommission marker first.
String parentZnode
= getZooKeeper().getZNodePaths().drainingZNode;
for (ServerName server : servers) {
try {
String node = ZNodePaths.joinZNode(parentZnode, server.getServerName());
ZKUtil.createAndFailSilent(getZooKeeper(), node);
} catch (KeeperException ke) {
throw new HBaseIOException(this.zooKeeper.prefix(("Unable to decommission '" + server.getServerName()) + "'."), ke);
}
if (this.serverManager.addServerToDrainList(server)) {
serversAdded.add(server);
}}
// Move the regions off the decommissioned servers.
if (offload) {
final List<ServerName> destServers = this.serverManager.createDestinationServersList();
for (ServerName server : serversAdded) {
final List<RegionInfo> regionsOnServer = this.assignmentManager.getRegionsOnServer(server);
for (RegionInfo hri : regionsOnServer) {
ServerName dest = balancer.randomAssignment(hri, destServers);
if (dest == null) {
throw new HBaseIOException("Unable to determine a plan to move " + hri);}
RegionPlan rp = new RegionPlan(hri, server, dest);
this.assignmentManager.moveAsync(rp);
}
}
}
} | 3.26 |
hbase_HMaster_getCompactionState_rdh | /**
* Get the compaction state of the table
*
* @param tableName
* The table name
* @return CompactionState Compaction state of the table
*/
public CompactionState getCompactionState(final TableName tableName) {
CompactionState compactionState = CompactionState.NONE;
try {
List<RegionInfo> v250 = assignmentManager.getRegionStates().getRegionsOfTable(tableName);
for (RegionInfo regionInfo : v250) {
ServerName serverName = assignmentManager.getRegionStates().getRegionServerOfRegion(regionInfo);
if (serverName == null) {
continue;
}
ServerMetrics sl = serverManager.getLoad(serverName);
if (sl == null) {
continue;
}
RegionMetrics regionMetrics = sl.getRegionMetrics().get(regionInfo.getRegionName());
if (regionMetrics == null) {
LOG.warn("Can not get compaction details for the region: {} , it may be not online.", regionInfo.getRegionNameAsString());
continue;
}
if (regionMetrics.getCompactionState() == CompactionState.MAJOR) {
if (compactionState == CompactionState.MINOR) {
compactionState = CompactionState.MAJOR_AND_MINOR;
} else {
compactionState = CompactionState.MAJOR;
}
} else if (regionMetrics.getCompactionState() == CompactionState.MINOR) {
if (compactionState == CompactionState.MAJOR) {
compactionState = CompactionState.MAJOR_AND_MINOR;
} else {
compactionState = CompactionState.MINOR;
}
}
}
} catch (Exception e) {
compactionState = null;
LOG.error("Exception when get compaction state for " + tableName.getNameAsString(), e);
}
return compactionState;
} | 3.26 |
hbase_HMaster_putUpJettyServer_rdh | // return the actual infoPort, -1 means disable info server.
private int putUpJettyServer() throws IOException {
if (!conf.getBoolean("hbase.master.infoserver.redirect", true)) {
return -1;
}
final int infoPort = conf.getInt("hbase.master.info.port.orig", HConstants.DEFAULT_MASTER_INFOPORT);
// -1 is for disabling info server, so no redirecting
if ((infoPort < 0) || (infoServer == null)) {
return -1;
}
if (infoPort == infoServer.getPort()) {
// server is already running
return infoPort;
}
final String addr = conf.get("hbase.master.info.bindAddress", "0.0.0.0");if (!Addressing.isLocalAddress(InetAddress.getByName(addr))) {
String msg = (("Failed to start redirecting jetty server. Address " + addr) + " does not belong to this host. Correct configuration parameter: ") + "hbase.master.info.bindAddress";
LOG.error(msg);
throw new IOException(msg);
}
// TODO I'm pretty sure we could just add another binding to the InfoServer run by
// the RegionServer and have it run the RedirectServlet instead of standing up
// a second entire stack here.
masterJettyServer = new Server();
final ServerConnector connector = new ServerConnector(masterJettyServer);
connector.setHost(addr);
connector.setPort(infoPort);
masterJettyServer.addConnector(connector);masterJettyServer.setStopAtShutdown(true);
masterJettyServer.setHandler(HttpServer.buildGzipHandler(masterJettyServer.getHandler()));
final String redirectHostname = (StringUtils.isBlank(useThisHostnameInstead)) ? null : useThisHostnameInstead;
final MasterRedirectServlet redirect = new MasterRedirectServlet(infoServer, redirectHostname);
final WebAppContext context
= new WebAppContext(null, "/", null, null, null, null, WebAppContext.NO_SESSIONS);
context.addServlet(new
ServletHolder(redirect), "/*");
context.setServer(masterJettyServer);
try {
masterJettyServer.start();
} catch
(Exception e) {
throw new IOException("Failed to start redirecting jetty server", e);
}
return connector.getLocalPort();
} | 3.26 |
hbase_HMaster_getNamespaces_rdh | /**
* Get all Namespaces
*
* @return All Namespace descriptors
*/
List<NamespaceDescriptor> getNamespaces() throws IOException {
checkInitialized();
final List<NamespaceDescriptor> nsds = new ArrayList<>();
if (cpHost != null) {
cpHost.preListNamespaceDescriptors(nsds);
}nsds.addAll(this.clusterSchemaService.getNamespaces());
if (this.cpHost != null) {
this.cpHost.postListNamespaceDescriptors(nsds);
}
return nsds;
} | 3.26 |
hbase_HMaster_isOnline_rdh | /**
* Report whether this master is started This method is used for testing.
*
* @return true if master is ready to go, false if not.
*/
public boolean isOnline() {
return serviceStarted;
} | 3.26 |
hbase_HMaster_constructMaster_rdh | /**
* Utility for constructing an instance of the passed HMaster class.
*
* @return HMaster instance.
*/
public static HMaster constructMaster(Class<? extends HMaster> masterClass, final Configuration conf) {
try {
Constructor<? extends HMaster> c = masterClass.getConstructor(Configuration.class);
return c.newInstance(conf);
} catch (Exception e) {
Throwable error = e;
if ((e instanceof InvocationTargetException) && (((InvocationTargetException) (e)).getTargetException() != null)) {
error = ((InvocationTargetException) (e)).getTargetException();
}
throw new RuntimeException(("Failed construction of Master: " + masterClass.toString()) + ". ", error);
}
} | 3.26 |
hbase_HMaster_getTableDescriptors_rdh | /**
* Return a list of table table descriptors after applying any provided filter parameters. Note
* that the user-facing description of this filter logic is presented on the class-level javadoc
* of {@link NormalizeTableFilterParams}.
*/
private List<TableDescriptor> getTableDescriptors(final List<TableDescriptor> htds, final String namespace, final String regex, final List<TableName> tableNameList, final boolean includeSysTables) throws IOException {
if ((tableNameList == null) || tableNameList.isEmpty()) {
// request for all TableDescriptors
Collection<TableDescriptor> allHtds;
if ((namespace != null) && (namespace.length() > 0)) {
// Do a check on the namespace existence. Will fail if does not exist.
this.clusterSchemaService.getNamespace(namespace);
allHtds = tableDescriptors.getByNamespace(namespace).values();
} else {
allHtds = tableDescriptors.getAll().values();
}
for (TableDescriptor desc : allHtds) {
if (tableStateManager.isTablePresent(desc.getTableName()) && (includeSysTables || (!desc.getTableName().isSystemTable()))) {
htds.add(desc);
}
}
} else {
for (TableName s : tableNameList) {
if (tableStateManager.isTablePresent(s)) {
TableDescriptor desc =
tableDescriptors.get(s);
if (desc != null) {
htds.add(desc);
}
}
}
}
// Retains only those matched by regular expression.
if (regex != null)
filterTablesByRegex(htds, Pattern.compile(regex));
return htds;
} | 3.26 |
hbase_HMaster_getMobCompactionState_rdh | /**
* Gets the mob file compaction state for a specific table. Whether all the mob files are selected
* is known during the compaction execution, but the statistic is done just before compaction
* starts, it is hard to know the compaction type at that time, so the rough statistics are chosen
* for the mob file compaction. Only two compaction states are available,
* CompactionState.MAJOR_AND_MINOR and CompactionState.NONE.
*
* @param tableName
* The current table name.
* @return If a given table is in mob file compaction now.
*/
public CompactionState getMobCompactionState(TableName tableName) {
AtomicInteger compactionsCount = f7.get(tableName);
if ((compactionsCount != null) && (compactionsCount.get() != 0)) {
return CompactionState.MAJOR_AND_MINOR;
}
return CompactionState.NONE;
} | 3.26 |
hbase_HMaster_modifyNamespace_rdh | /**
* Modify an existing Namespace.
*
* @param nonceGroup
* Identifier for the source of the request, a client or process.
* @param nonce
* A unique identifier for this operation from the client or process identified
* by <code>nonceGroup</code> (the source must ensure each operation gets a
* unique id).
* @return procedure id
*/
long modifyNamespace(final NamespaceDescriptor newNsDescriptor, final long nonceGroup, final long nonce) throws IOException {
checkInitialized();
TableName.isLegalNamespaceName(Bytes.toBytes(newNsDescriptor.getName()));
return MasterProcedureUtil.submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) {
@Override
protected void run() throws IOException {
NamespaceDescriptor oldNsDescriptor = getNamespace(newNsDescriptor.getName());
getMaster().getMasterCoprocessorHost().preModifyNamespace(oldNsDescriptor, newNsDescriptor);// We need to wait for the procedure to potentially fail due to "prepare" sanity
// checks. This will block only the beginning of the procedure. See HBASE-19953.
ProcedurePrepareLatch latch = ProcedurePrepareLatch.createBlockingLatch();
LOG.info((getClientIdAuditPrefix() + " modify ") + newNsDescriptor);
// Execute the operation synchronously - wait for the operation to complete before
// continuing.
setProcId(getClusterSchema().modifyNamespace(newNsDescriptor, getNonceKey(), latch));
latch.await();
getMaster().getMasterCoprocessorHost().postModifyNamespace(oldNsDescriptor, newNsDescriptor);
}
@Override
protected String getDescription() {
return "ModifyNamespaceProcedure";
}
});
} | 3.26 |
hbase_HMaster_createServerManager_rdh | /**
* <p>
* Create a {@link ServerManager} instance.
* </p>
* <p>
* Will be overridden in tests.
* </p>
*/
@InterfaceAudience.Private
protected ServerManager createServerManager(MasterServices master, RegionServerList storage) throws IOException {
// We put this out here in a method so can do a Mockito.spy and stub it out
// w/ a mocked up ServerManager.
setupClusterConnection();
return
new ServerManager(master, storage);
} | 3.26 |
hbase_HMaster_isSplitOrMergeEnabled_rdh | /**
* Queries the state of the {@link SplitOrMergeStateStore}. If it is not initialized, false is
* returned. If switchType is illegal, false will return.
*
* @param switchType
* see {@link org.apache.hadoop.hbase.client.MasterSwitchType}
* @return The state of the switch
*/
@Override
public boolean isSplitOrMergeEnabled(MasterSwitchType
switchType) {
return ((!isInMaintenanceMode()) && (splitOrMergeStateStore != null)) && splitOrMergeStateStore.isSplitOrMergeEnabled(switchType);
} | 3.26 |
hbase_HMaster_isBalancerOn_rdh | /**
* Queries the state of the {@link LoadBalancerStateStore}. If the balancer is not initialized,
* false is returned.
*
* @return The state of the load balancer, or false if the load balancer isn't defined.
*/
public boolean isBalancerOn() {
return ((!isInMaintenanceMode()) && (loadBalancerStateStore != null)) && loadBalancerStateStore.get();} | 3.26 |
hbase_HMaster_m0_rdh | /**
* Check hbase:namespace table is assigned. If not, startup will hang looking for the ns table
* <p/>
* This is for rolling upgrading, later we will migrate the data in ns table to the ns family of
* meta table. And if this is a new cluster, this method will return immediately as there will be
* no namespace table/region.
*
* @return True if namespace table is up/online.
*/
private boolean m0() throws IOException {
TableState nsTableState = MetaTableAccessor.getTableState(getConnection(), TableName.NAMESPACE_TABLE_NAME);
if ((nsTableState == null) || nsTableState.isDisabled()) {
// this means we have already migrated the data and disabled or deleted the namespace table,
// or this is a new deploy which does not have a namespace table from the beginning.
return true;
}List<RegionInfo> ris = this.assignmentManager.getRegionStates().getRegionsOfTable(TableName.NAMESPACE_TABLE_NAME);
if (ris.isEmpty()) {
// maybe this will not happen any more, but anyway, no harm to add a check here...
return true;
}
// Else there are namespace regions up in meta. Ensure they are assigned before we go on.
for (RegionInfo ri : ris) {
if (!isRegionOnline(ri)) {
return false;
} }
return true;
} | 3.26 |
hbase_HMaster_decorateMasterConfiguration_rdh | /**
* This method modifies the master's configuration in order to inject replication-related features
*/
@InterfaceAudience.Private
public static void decorateMasterConfiguration(Configuration conf) {
String plugins = conf.get(HBASE_MASTER_LOGCLEANER_PLUGINS);
String cleanerClass = ReplicationLogCleaner.class.getCanonicalName();
if ((plugins == null) || (!plugins.contains(cleanerClass))) {
conf.set(HBASE_MASTER_LOGCLEANER_PLUGINS, (plugins + ",") + cleanerClass);
}
if (ReplicationUtils.isReplicationForBulkLoadDataEnabled(conf))
{
plugins = conf.get(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS);
cleanerClass = ReplicationHFileCleaner.class.getCanonicalName();
if (!plugins.contains(cleanerClass)) {
conf.set(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS, (plugins + ",") + cleanerClass);
}
}
} | 3.26 |
hbase_HMaster_getMasterProcedureManagerHost_rdh | /**
* Returns the underlying MasterProcedureManagerHost
*/
@Overridepublic MasterProcedureManagerHost getMasterProcedureManagerHost() {
return mpmHost;
} | 3.26 |
hbase_HMaster_recommissionRegionServer_rdh | /**
* Remove decommission marker (previously called 'draining') from a region server to allow regions
* assignments. Load regions onto the server asynchronously if a list of regions is given
*
* @param server
* Region server to remove decommission marker from.
*/
public void recommissionRegionServer(final ServerName server, final List<byte[]> encodedRegionNames) throws IOException {
// Remove the server from decommissioned (draining) server list.
String parentZnode = getZooKeeper().getZNodePaths().drainingZNode;
String node = ZNodePaths.joinZNode(parentZnode, server.getServerName());
try {
ZKUtil.deleteNodeFailSilent(getZooKeeper(), node);
} catch (KeeperException ke) {
throw new HBaseIOException(this.zooKeeper.prefix(("Unable to recommission '" + server.getServerName()) + "'."), ke);
}
this.serverManager.removeServerFromDrainList(server);
// Load the regions onto the server if we are given a list of regions.
if ((encodedRegionNames == null) || encodedRegionNames.isEmpty()) {
return;
}
if (!this.serverManager.isServerOnline(server)) {
return;
}
for (byte[] encodedRegionName : encodedRegionNames) {
RegionState regionState = assignmentManager.getRegionStates().getRegionState(Bytes.toString(encodedRegionName));
if (regionState == null) {
LOG.warn("Unknown region " + Bytes.toStringBinary(encodedRegionName));continue;
}
RegionInfo hri = regionState.getRegion();if (server.equals(regionState.getServerName())) {
LOG.info(((("Skipping move of region " + hri.getRegionNameAsString()) + " because region already assigned to the same server ") + server) + ".");
continue;
}
RegionPlan rp = new RegionPlan(hri, regionState.getServerName(), server);
this.assignmentManager.moveAsync(rp);
}
} | 3.26 |
hbase_HMaster_getMasterActiveTime_rdh | /**
* Returns timestamp in millis when HMaster became the active master.
*/
public long getMasterActiveTime() {
return masterActiveTime;
} | 3.26 |
hbase_HMaster_getClientIdAuditPrefix_rdh | /**
* Returns Client info for use as prefix on an audit log string; who did an action
*/
@Override
public String getClientIdAuditPrefix() {
return
(("Client=" + RpcServer.getRequestUserName().orElse(null)) + "/") + RpcServer.getRemoteAddress().orElse(null);
} | 3.26 |
hbase_HMaster_getBackupMasterInfoPort_rdh | /**
*
* @param sn
* is ServerName of the backup master
* @return info port of backup master or 0 if any exception occurs.
*/
public int getBackupMasterInfoPort(final ServerName sn) {
return activeMasterManager.getBackupMasterInfoPort(sn);
} | 3.26 |
hbase_HMaster_m1_rdh | /**
* Adds the {@code MasterQuotasObserver} to the list of configured Master observers to
* automatically remove quotas for a table when that table is deleted.
*/
@InterfaceAudience.Private
public void m1(Configuration conf) {
// We're configured to not delete quotas on table deletion, so we don't need to add the obs.
if (!conf.getBoolean(MasterQuotasObserver.REMOVE_QUOTA_ON_TABLE_DELETE, MasterQuotasObserver.REMOVE_QUOTA_ON_TABLE_DELETE_DEFAULT)) {
return;
}
String[] masterCoprocs =
conf.getStrings(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY);
final int length = (null == masterCoprocs) ? 0 : masterCoprocs.length;
String[] updatedCoprocs = new String[length + 1];
if (length > 0) {
System.arraycopy(masterCoprocs, 0, updatedCoprocs, 0, masterCoprocs.length);
}
updatedCoprocs[length] = MasterQuotasObserver.class.getName();
conf.setStrings(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, updatedCoprocs);
} | 3.26 |
hbase_HMaster_move_rdh | // Public so can be accessed by tests. Blocks until move is done.
// Replace with an async implementation from which you can get
// a success/failure result.
@InterfaceAudience.Private
public void move(final byte[] encodedRegionName, byte[] destServerName) throws IOException {
RegionState regionState = assignmentManager.getRegionStates().getRegionState(Bytes.toString(encodedRegionName));
RegionInfo hri;
if (regionState != null) {
hri = regionState.getRegion();
} else {
throw new UnknownRegionException(Bytes.toStringBinary(encodedRegionName));
}
ServerName dest;
List<ServerName> exclude = (hri.getTable().isSystemTable()) ? assignmentManager.getExcludedServersForSystemTable() : new ArrayList<>(1);
if ((destServerName != null) && exclude.contains(ServerName.valueOf(Bytes.toString(destServerName))))
{
LOG.info(((Bytes.toString(encodedRegionName) + " can not move to ") + Bytes.toString(destServerName)) + " because the server is in exclude list");
destServerName = null;
}
if ((destServerName == null) || (destServerName.length == 0)) {
LOG.info("Passed destination servername is null/empty so " + "choosing a server at random");
exclude.add(regionState.getServerName());
final List<ServerName> destServers = this.serverManager.createDestinationServersList(exclude);
dest = balancer.randomAssignment(hri, destServers);
if (dest == null) {
LOG.debug("Unable to determine a plan to assign " + hri);
return;
}
} else {
ServerName candidate = ServerName.valueOf(Bytes.toString(destServerName));
dest = balancer.randomAssignment(hri, Lists.newArrayList(candidate));
if (dest == null) {
LOG.debug("Unable to determine a plan to assign " + hri);
return;
}
// TODO: deal with table on master for rs group.
if (dest.equals(serverName)) {
// To avoid unnecessary region moving later by balancer. Don't put user
// regions on master.
LOG.debug((("Skipping move of region " + hri.getRegionNameAsString()) + " to avoid unnecessary region moving later by load balancer,") + " because it should not be on master");
return;
}
}
if (dest.equals(regionState.getServerName())) {
LOG.debug(((("Skipping move of region " + hri.getRegionNameAsString()) + " because region already assigned to the same server ") + dest) + ".");
return;
}
// Now we can do the move
RegionPlan rp
= new RegionPlan(hri, regionState.getServerName(), dest);
assert rp.getDestination() != null : (rp.toString() + " ") + dest;
try {checkInitialized();
if (this.cpHost != null) {
this.cpHost.preMove(hri, rp.getSource(), rp.getDestination());
}
TransitRegionStateProcedure
proc = this.assignmentManager.createMoveRegionProcedure(rp.getRegionInfo(), rp.getDestination());
if (conf.getBoolean(WARMUP_BEFORE_MOVE, DEFAULT_WARMUP_BEFORE_MOVE)) {
// Warmup the region on the destination before initiating the move.
// A region server could reject the close request because it either does not
// have the specified region or the region is being split.
LOG.info((((getClientIdAuditPrefix() + " move ") + rp) + ", warming up region on ") + rp.getDestination());
warmUpRegion(rp.getDestination(), hri);
}
LOG.info(((getClientIdAuditPrefix() + " move ") + rp) + ", running balancer");
Future<byte[]> future = ProcedureSyncWait.submitProcedure(this.procedureExecutor, proc);try {
// Is this going to work? Will we throw exception on error?
// TODO: CompletableFuture rather than this stunted Future.
future.get();
} catch (InterruptedException | ExecutionException e) {
throw new
HBaseIOException(e);
}
if (this.cpHost != null) {
this.cpHost.postMove(hri, rp.getSource(), rp.getDestination());
}} catch (IOException ioe) {
if (ioe instanceof HBaseIOException) {
throw ((HBaseIOException) (ioe));
}
throw new HBaseIOException(ioe);
}
} | 3.26 |
hbase_HMaster_getMasterStartTime_rdh | /**
* Returns timestamp in millis when HMaster was started.
*/
public long getMasterStartTime() {
return startcode;
} | 3.26 |
hbase_HMaster_getLoadedCoprocessors_rdh | /**
* The set of loaded coprocessors is stored in a static set. Since it's statically allocated, it
* does not require that HMaster's cpHost be initialized prior to accessing it.
*
* @return a String representation of the set of names of the loaded coprocessors.
*/
public static String getLoadedCoprocessors() {
return CoprocessorHost.getLoadedCoprocessors().toString();
} | 3.26 |
hbase_HMaster_getAverageLoad_rdh | /**
* Compute the average load across all region servers. Currently, this uses a very naive
* computation - just uses the number of regions being served, ignoring stats about number of
* requests.
*
* @return the average load
*/
public double getAverageLoad() {
if (this.assignmentManager == null) {
return 0;
}
RegionStates regionStates = this.assignmentManager.getRegionStates();
if (regionStates == null) {
return 0;}
return regionStates.getAverageLoad();
} | 3.26 |
hbase_HMaster_balanceThrottling_rdh | /**
* It first sleep to the next balance plan start time. Meanwhile, throttling by the max number
* regions in transition to protect availability.
*
* @param nextBalanceStartTime
* The next balance plan start time
* @param maxRegionsInTransition
* max number of regions in transition
* @param cutoffTime
* when to exit balancer
*/
private void balanceThrottling(long nextBalanceStartTime, int maxRegionsInTransition, long cutoffTime) {
boolean interrupted
= false;
// Sleep to next balance plan start time
// But if there are zero regions in transition, it can skip sleep to speed up.
while (((!interrupted) && (EnvironmentEdgeManager.currentTime() < nextBalanceStartTime)) && this.assignmentManager.getRegionStates().hasRegionsInTransition()) {
try {
Thread.sleep(100);
} catch (InterruptedException ie)
{
interrupted = true;
}
}
// Throttling by max number regions in transition
while ((((!interrupted) && (maxRegionsInTransition > 0)) && (this.assignmentManager.getRegionStates().getRegionsInTransitionCount() >= maxRegionsInTransition))
&& (EnvironmentEdgeManager.currentTime() <= cutoffTime)) {
try {
// sleep if the number of regions in transition exceeds the limit
Thread.sleep(100);
} catch (InterruptedException ie) {
interrupted = true;
}
} if (interrupted)
Thread.currentThread().interrupt();
} | 3.26 |
hbase_HMaster_isInMaintenanceMode_rdh | /**
* Report whether this master is in maintenance mode.
*
* @return true if master is in maintenanceMode
*/
@Override
public boolean isInMaintenanceMode() {
return maintenanceMode;
} | 3.26 |
hbase_HMaster_run_rdh | // Main run loop. Calls through to the regionserver run loop AFTER becoming active Master; will
// block in here until then.
@Override
public void run() {
try {
installShutdownHook();
registerConfigurationObservers();
Threads.setDaemonThreadRunning(new Thread(TraceUtil.tracedRunnable(() -> {
try {
int infoPort = putUpJettyServer();
startActiveMasterManager(infoPort);
} catch (Throwable t) {
// Make sure we log the exception.
String error = "Failed to become Active Master";
LOG.error(error, t);
// Abort should have been called already.
if (!isAborted()) {
abort(error, t);
}
}
}, "HMaster.becomeActiveMaster")), getName() + ":becomeActiveMaster");
while ((!isStopped()) && (!isAborted())) {
sleeper.sleep();
}
final Span span = TraceUtil.createSpan("HMaster exiting main loop");
try (Scope ignored = span.makeCurrent()) {
stopInfoServer();
closeClusterConnection();
stopServiceThreads();
if (this.rpcServices != null) {
this.rpcServices.stop();
}
closeZooKeeper();
closeTableDescriptors();span.setStatus(StatusCode.OK);
} finally {
span.end();
}
} finally {
if (this.clusterSchemaService != null) {
// If on way out, then we are no longer active master.
this.clusterSchemaService.stopAsync();
try {
this.clusterSchemaService.awaitTerminated(getConfiguration().getInt(HBASE_MASTER_WAIT_ON_SERVICE_IN_SECONDS, DEFAULT_HBASE_MASTER_WAIT_ON_SERVICE_IN_SECONDS), TimeUnit.SECONDS);
} catch (TimeoutException te) {
LOG.warn("Failed shutdown of clusterSchemaService", te);
}
}this.activeMaster = false;
}
} | 3.26 |
hbase_HMaster_getRemoteInetAddress_rdh | /**
* Returns Get remote side's InetAddress
*/
InetAddress getRemoteInetAddress(final int port, final long serverStartCode) throws UnknownHostException {
// Do it out here in its own little method so can fake an address when
// mocking up in tests.
InetAddress ia = RpcServer.getRemoteIp();
// The call could be from the local regionserver,
// in which case, there is no remote address.
if ((ia == null) && (serverStartCode == startcode)) {
InetSocketAddress isa = rpcServices.getSocketAddress();
if ((isa != null) && (isa.getPort() == port)) {
ia = isa.getAddress();
}
}
return ia;
} | 3.26 |
hbase_HMaster_getLoadBalancerClassName_rdh | /**
* Fetch the configured {@link LoadBalancer} class name. If none is set, a default is returned.
* <p/>
* Notice that, the base load balancer will always be {@link RSGroupBasedLoadBalancer} now, so
* this method will return the balancer used inside each rs group.
*
* @return The name of the {@link LoadBalancer} in use.
*/
public String getLoadBalancerClassName() {
return conf.get(HConstants.HBASE_MASTER_LOADBALANCER_CLASS, LoadBalancerFactory.getDefaultLoadBalancerClass().getName());
} | 3.26 |
hbase_HMaster_startProcedureExecutor_rdh | // will be override in UT
protected void startProcedureExecutor() throws IOException {
procedureExecutor.startWorkers();
} | 3.26 |
hbase_HMaster_skipRegionManagementAction_rdh | /**
* Checks master state before initiating action over region topology.
*
* @param action
* the name of the action under consideration, for logging.
* @return {@code true} when the caller should exit early, {@code false} otherwise.
*/
@Override
public boolean skipRegionManagementAction(final String action) {
// Note: this method could be `default` on MasterServices if but for logging.
if (!isInitialized()) {
LOG.debug("Master has not been initialized, don't run {}.", action);
return true;
}
if (this.getServerManager().isClusterShutdown()) {
LOG.info("Cluster is shutting down, don't run {}.", action);
return true;
}
if (isInMaintenanceMode()) {
LOG.info("Master is in maintenance mode, don't run {}.", action);
return true;
}
return false;
} | 3.26 |
hbase_HMaster_createAssignmentManager_rdh | // Will be overriden in test to inject customized AssignmentManager
@InterfaceAudience.Private
protected AssignmentManager createAssignmentManager(MasterServices master, MasterRegion masterRegion) {
return new AssignmentManager(master, masterRegion);
} | 3.26 |
hbase_HMaster_getSnapshotManager_rdh | /**
* Returns the underlying snapshot manager
*/
@Override
public SnapshotManager getSnapshotManager() {
return
this.snapshotManager;
} | 3.26 |
hbase_HMaster_getNamespace_rdh | /**
* Get a Namespace
*
* @param name
* Name of the Namespace
* @return Namespace descriptor for <code>name</code>
*/
NamespaceDescriptor getNamespace(String name) throws IOException {
checkInitialized();
if (this.cpHost != null)
this.cpHost.preGetNamespaceDescriptor(name);
NamespaceDescriptor nsd =
this.clusterSchemaService.getNamespace(name);
if (this.cpHost != null)
this.cpHost.postGetNamespaceDescriptor(nsd);
return nsd;
} | 3.26 |
hbase_MetaFixer_createRegionInfosForHoles_rdh | /**
* Create a new {@link RegionInfo} corresponding to each provided "hole" pair.
*/
private static List<RegionInfo> createRegionInfosForHoles(final List<Pair<RegionInfo, RegionInfo>> holes) {
final List<RegionInfo> newRegionInfos = holes.stream().map(MetaFixer::getHoleCover).filter(Optional::isPresent).map(Optional::get).collect(Collectors.toList());
f0.debug("Constructed {}/{} RegionInfo descriptors corresponding to identified holes.", newRegionInfos.size(), holes.size());return newRegionInfos;
}
/**
*
* @return Attempts to calculate a new {@link RegionInfo} that covers the region range described
in {@code hole} | 3.26 |
hbase_MetaFixer_calculateMerges_rdh | /**
* Run through <code>overlaps</code> and return a list of merges to run. Presumes overlaps are
* ordered (which they are coming out of the CatalogJanitor consistency report).
*
* @param maxMergeCount
* Maximum regions to merge at a time (avoid merging 100k regions in one go!)
*/
static List<SortedSet<RegionInfo>> calculateMerges(int maxMergeCount, List<Pair<RegionInfo, RegionInfo>> overlaps) {
if (overlaps.isEmpty()) {
f0.debug("No overlaps.");
return Collections.emptyList();
}
List<SortedSet<RegionInfo>> merges = new ArrayList<>();
// First group overlaps by table then calculate merge table by table.
ListMultimap<TableName, Pair<RegionInfo, RegionInfo>> overlapGroups = ArrayListMultimap.create();
for (Pair<RegionInfo, RegionInfo> pair : overlaps) {
overlapGroups.put(pair.getFirst().getTable(), pair);
}
for (Map.Entry<TableName, Collection<Pair<RegionInfo, RegionInfo>>> v23 : overlapGroups.asMap().entrySet()) {
calculateTableMerges(maxMergeCount, merges, v23.getValue());
}
return merges;
} | 3.26 |
hbase_MetaFixer_m0_rdh | /**
* If hole, it papers it over by adding a region in the filesystem and to hbase:meta. Does not
* assign.
*/
void m0(CatalogJanitorReport report) {
final List<Pair<RegionInfo, RegionInfo>> holes = report.getHoles();
if (holes.isEmpty()) {
f0.info("CatalogJanitor Report contains no holes to fix. Skipping.");
return;
}
f0.info("Identified {} region holes to fix. Detailed fixup progress logged at DEBUG.", holes.size());
final List<RegionInfo> newRegionInfos = createRegionInfosForHoles(holes);
final List<RegionInfo> newMetaEntries = createMetaEntries(masterServices, newRegionInfos);
final TransitRegionStateProcedure[] assignProcedures = masterServices.getAssignmentManager().createRoundRobinAssignProcedures(newMetaEntries);
masterServices.getMasterProcedureExecutor().submitProcedures(assignProcedures);
f0.info("Scheduled {}/{} new regions for assignment.", assignProcedures.length, holes.size());
} | 3.26 |
hbase_MetaFixer_fixOverlaps_rdh | /**
* Fix overlaps noted in CJ consistency report.
*/
List<Long> fixOverlaps(CatalogJanitorReport report) throws IOException {
List<Long> pidList = new ArrayList<>();
for (Set<RegionInfo> regions : calculateMerges(maxMergeCount, report.getOverlaps())) {
RegionInfo[] v19 = regions.toArray(new RegionInfo[]{ });
try {
pidList.add(this.masterServices.mergeRegions(v19, true, HConstants.NO_NONCE, HConstants.NO_NONCE));
} catch (MergeRegionException mre) {
f0.warn("Failed overlap fix of {}", v19, mre);
}
}
return pidList;
} | 3.26 |
hbase_ByteBufferArray_next_rdh | /**
* The returned ByteBuffer is an sliced one, it won't affect the position or limit of the
* original one.
*/
@Override
public ByteBuffer next()
{
ByteBuffer bb =
buffers[curIndex].duplicate();
if (curIndex == startBuffer) {
bb.position(f0).limit(Math.min(bufferSize, f0 + len));
} else if (curIndex == endBuffer) {
bb.position(0).limit(endOffset);} else {
bb.position(0).limit(bufferSize);
}
curIndex++;
sum += bb.remaining();
// Make sure that its pos is zero, it's important because MBB will count from zero for all nio
// ByteBuffers.
return bb.slice();
} | 3.26 |
hbase_ByteBufferArray_internalTransfer_rdh | /**
* Transferring all remaining bytes from b to the buffers array starting at offset, or
* transferring bytes from the buffers array at offset to b until b is filled. Notice that
* position of ByteBuff b will be advanced.
*
* @param offset
* where we start in the big logical array.
* @param b
* the ByteBuff to transfer from or to
* @param transfer
* the transfer interface.
* @return the length of bytes we transferred.
*/
private int internalTransfer(long offset, ByteBuff b, BiConsumer<ByteBuffer, ByteBuff> transfer) {
int expectedTransferLen = b.remaining();
if (expectedTransferLen == 0) {
return 0;
}BufferIterator it = new BufferIterator(offset, expectedTransferLen);
while (it.hasNext()) {
ByteBuffer a = it.next();
transfer.accept(a, b);
assert !a.hasRemaining();
}
assert expectedTransferLen == it.getSum() : ((("Expected transfer length (=" + expectedTransferLen) + ") don't match the actual transfer length(=") + it.getSum()) + ")";
return expectedTransferLen;
} | 3.26 |
hbase_ByteBufferArray_read_rdh | /**
* Transfers bytes from this buffers array into the given destination {@link ByteBuff}
*
* @param offset
* start position in this big logical array.
* @param dst
* the destination ByteBuff. Notice that its position will be advanced.
* @return number of bytes read
*/
public int read(long offset, ByteBuff dst) {
return internalTransfer(offset, dst, READER);
} | 3.26 |
hbase_ByteBufferArray_write_rdh | /**
* Transfers bytes from the given source {@link ByteBuff} into this buffer array
*
* @param offset
* start offset of this big logical array.
* @param src
* the source ByteBuff. Notice that its position will be advanced.
* @return number of bytes write
*/
public int write(long offset, ByteBuff src) {
return internalTransfer(offset, src, WRITER);} | 3.26 |
hbase_TableState_isEnabled_rdh | /**
* Returns True if table is {@link State#ENABLED}.
*/
public boolean isEnabled() {
return isInStates(State.ENABLED);
} | 3.26 |
hbase_TableState_isDisabled_rdh | /**
* Returns True if table is disabled.
*/
public boolean isDisabled() {
return isInStates(State.DISABLED);
} | 3.26 |
hbase_TableState_isDisabledOrDisabling_rdh | /**
* Returns True if {@link State#DISABLED} or {@link State#DISABLED}
*/
public boolean isDisabledOrDisabling() {
return isInStates(State.DISABLED, State.DISABLING);
} | 3.26 |
hbase_TableState_getState_rdh | /**
* Returns table state
*/
public State getState() {
return state;
} | 3.26 |
hbase_TableState_isInStates_rdh | /**
* Static version of state checker
*
* @param target
* equals to any of
* @return true if satisfies
*/
public boolean isInStates(State... target) {
for (State tableState : target) {
if (this.state.equals(tableState)) {
return true;
}
}
return false;
} | 3.26 |
hbase_TableState_getTableName_rdh | /**
* Table name for state
*/
public TableName getTableName() {
return tableName;
} | 3.26 |
hbase_TableState_isDisabling_rdh | /**
* Returns True if table is disabling.
*/
public boolean isDisabling() {
return isInStates(State.DISABLING);
} | 3.26 |
hbase_TableState_convert_rdh | /**
* Covert from PB version of TableState
*
* @param tableName
* table this state of
* @param tableState
* convert from
*/
public static TableState convert(TableName tableName, HBaseProtos.TableState tableState) {
TableState.State
state = State.convert(tableState.getState());
return new TableState(tableName, state);
} | 3.26 |
hbase_TableState_isEnabling_rdh | /**
* Returns True if table is {@link State#ENABLING}.
*/
public boolean isEnabling() {
return isInStates(State.ENABLING);
} | 3.26 |
hbase_TableState_inStates_rdh | /**
* Check that table in given states
*
* @param states
* state list
* @return true if satisfies
*/
public boolean inStates(State... states) {
for (State s : states) {
if (s.equals(this.state))
return true;
}
return false;} | 3.26 |
hbase_TableState_isEnabledOrEnabling_rdh | /**
* Returns True if {@link State#ENABLED} or {@link State#ENABLING}
*/
public boolean isEnabledOrEnabling() {
return isInStates(State.ENABLED, State.ENABLING);
} | 3.26 |
hbase_RegionReplicationFlushRequester_recordFlush_rdh | /**
* Record that we have already finished a flush with the given {@code sequenceId}.
* <p/>
* We can cancel the pending flush request if the failed sequence id is less than the given
* {@code sequenceId}.
*/
synchronized void recordFlush(long sequenceId) {
this.lastFlushedSequenceId = sequenceId;
// cancel the pending flush request if it is necessary, i.e, we have already finished a flush
// with higher sequence id.
if ((sequenceId > pendingFlushRequestSequenceId) && (pendingFlushRequest != null)) {
pendingFlushRequest.cancel();
pendingFlushRequest = null;
}
} | 3.26 |
hbase_ZKProcedureUtil_getReachedBarrierNode_rdh | /**
* Get the full znode path for the node used by the coordinator to trigger a global barrier
* execution and release on each subprocedure.
*
* @param controller
* controller running the procedure
* @param opInstanceName
* name of the running procedure instance (not the procedure description).
* @return full znode path to the commit barrier
*/
public static String getReachedBarrierNode(ZKProcedureUtil controller, String opInstanceName) {
return ZNodePaths.joinZNode(controller.reachedZnode, opInstanceName);
} | 3.26 |
hbase_ZKProcedureUtil_getAbortNode_rdh | /**
* Get the full znode path for the node used by the coordinator or member to trigger an abort of
* the global barrier acquisition or execution in subprocedures.
*
* @param controller
* controller running the procedure
* @param opInstanceName
* name of the running procedure instance (not the procedure description).
* @return full znode path to the abort znode
*/
public static String getAbortNode(ZKProcedureUtil controller, String opInstanceName) {
return ZNodePaths.joinZNode(controller.abortZnode, opInstanceName);
} | 3.26 |
hbase_ZKProcedureUtil_isAbortPathNode_rdh | /**
* Is this in the procedure barrier abort znode path
*/
public boolean isAbortPathNode(String path) {
return path.startsWith(this.abortZnode) && (!path.equals(abortZnode)); } | 3.26 |
hbase_ZKProcedureUtil_isAcquiredNode_rdh | /**
* Is this the exact procedure barrier acquired znode
*/
boolean isAcquiredNode(String path) {
return path.equals(acquiredZnode);
} | 3.26 |
hbase_ZKProcedureUtil_getAcquireBarrierNode_rdh | /**
* Get the full znode path for the node used by the coordinator to trigger a global barrier
* acquire on each subprocedure.
*
* @param controller
* controller running the procedure
* @param opInstanceName
* name of the running procedure instance (not the procedure description).
* @return full znode path to the prepare barrier/start node
*/
public static String getAcquireBarrierNode(ZKProcedureUtil controller, String opInstanceName) {return ZNodePaths.joinZNode(controller.acquiredZnode, opInstanceName);
} | 3.26 |
hbase_ZKProcedureUtil_logZKTree_rdh | /**
* Helper method to print the current state of the ZK tree.
*
* @see #logZKTree(String)
* @throws KeeperException
* if an unexpected exception occurs
*/
protected void logZKTree(String root, String prefix) throws KeeperException {
List<String> children = ZKUtil.listChildrenNoWatch(watcher, root);
if (children == null)
return;
for (String child : children) {
LOG.debug(prefix + child);
String node = ZNodePaths.joinZNode(root.equals("/") ? "" : root, child);
logZKTree(node, prefix + "---");
}
} | 3.26 |
hbase_ZKProcedureUtil_isReachedNode_rdh | /**
* Is this the exact procedure barrier reached znode
*/
boolean isReachedNode(String path) {return path.equals(reachedZnode);
} | 3.26 |
hbase_ZKProcedureUtil_isAcquiredPathNode_rdh | /**
* Is this in the procedure barrier acquired znode path
*/
boolean isAcquiredPathNode(String path) {
return (path.startsWith(this.acquiredZnode) && (!path.equals(acquiredZnode))) && isMemberNode(path, acquiredZnode);
} | 3.26 |
hbase_ZKProcedureUtil_isInProcedurePath_rdh | /**
* Is this a procedure related znode path? TODO: this is not strict, can return true if had name
* just starts with same prefix but is different zdir.
*
* @return true if starts with baseZnode
*/
boolean isInProcedurePath(String path) {
return path.startsWith(baseZNode);
} | 3.26 |
hbase_ZKProcedureUtil_isReachedPathNode_rdh | /**
* Is this in the procedure barrier reached znode path
*/
boolean isReachedPathNode(String path) {
return (path.startsWith(this.reachedZnode) && (!path.equals(reachedZnode))) &&
isMemberNode(path, reachedZnode);
} | 3.26 |
hbase_ZKProcedureUtil_isAbortNode_rdh | /**
* Is this in the procedure barrier abort znode path
*/
boolean isAbortNode(String path) {
return path.equals(abortZnode);
} | 3.26 |
hbase_VersionModel_setOSVersion_rdh | /**
*
* @param version
* the OS version string
*/
public void setOSVersion(String version) {
this.osVersion = version;
} | 3.26 |
hbase_VersionModel_setJVMVersion_rdh | /**
*
* @param version
* the JVM version string
*/
public void setJVMVersion(String version) {
this.jvmVersion = version;
} | 3.26 |
hbase_VersionModel_setRESTVersion_rdh | /**
*
* @param version
* the REST gateway version string
*/
public void setRESTVersion(String version) {
this.restVersion = version;
} | 3.26 |
hbase_VersionModel_toString_rdh | /* (non-Javadoc)
@see java.lang.Object#toString()
*/
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("rest ");
sb.append(restVersion);
sb.append(" [JVM: ");
sb.append(jvmVersion);
sb.append("] [OS: ");
sb.append(osVersion);
sb.append("] [Server: ");
sb.append(serverVersion);
sb.append("] [Jersey: ");sb.append(jerseyVersion);
sb.append("]\n");
return sb.toString();
} | 3.26 |
hbase_VersionModel_getServerVersion_rdh | /**
* Returns the servlet container version
*/
@XmlAttribute(name = "Server")
public String getServerVersion() {
return serverVersion;
} | 3.26 |
hbase_VersionModel_getJerseyVersion_rdh | /**
* Returns the version of the embedded Jersey framework
*/
@XmlAttribute(name = "Jersey")
public String getJerseyVersion() {
return jerseyVersion;
} | 3.26 |
hbase_VersionModel_getJVMVersion_rdh | /**
* Returns the JVM vendor and version
*/
@XmlAttribute(name = "JVM")
public String getJVMVersion() {
return jvmVersion;
} | 3.26 |
hbase_VersionModel_getRESTVersion_rdh | /**
* Returns the REST gateway version
*/
@XmlAttribute(name = "REST")
public String getRESTVersion() {
return restVersion;
} | 3.26 |
hbase_VersionModel_getOSVersion_rdh | /**
* Returns the OS name, version, and hardware architecture
*/
@XmlAttribute(name = "OS")
public String getOSVersion() {
return osVersion;
} | 3.26 |
hbase_VersionModel_setServerVersion_rdh | /**
*
* @param version
* the servlet container version string
*/
public void setServerVersion(String version) {
this.serverVersion = version;
} | 3.26 |
hbase_VersionModel_setJerseyVersion_rdh | /**
*
* @param version
* the Jersey framework version string
*/
public void setJerseyVersion(String version) {
this.jerseyVersion = version;
} | 3.26 |
hbase_Result_isEmpty_rdh | /**
* Check if the underlying Cell [] is empty or not
*
* @return true if empty
*/
public boolean isEmpty() {
return (this.cells == null) || (this.cells.length == 0);
} | 3.26 |
hbase_Result_getRow_rdh | /**
* Method for retrieving the row key that corresponds to the row from which this Result was
* created.
*/
public byte[] getRow() {
if (this.row == null) {
this.row = ((this.cells == null) || (this.cells.length == 0)) ? null : CellUtil.cloneRow(this.cells[0]);
}
return this.row;
}
/**
* Return the array of Cells backing this Result instance. The array is sorted from smallest ->
* largest using the {@link CellComparator}. The array only contains what your Get or Scan
* specifies and no more. For example if you request column "A" 1 version you will have at most 1
* Cell in the array. If you request column "A" with 2 version you will have at most 2 Cells, with
* the first one being the newer timestamp and the second being the older timestamp (this is the
* sort order defined by {@link CellComparator} | 3.26 |
hbase_Result_copyFrom_rdh | /**
* Copy another Result into this one. Needed for the old Mapred framework
*
* @throws UnsupportedOperationException
* if invoked on instance of EMPTY_RESULT (which is supposed
* to be immutable).
*/
public void copyFrom(Result other) {
m1();
this.row = null;
this.familyMap =
null;
this.cells = other.cells;} | 3.26 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.