name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hbase_HRegionServer_closeRegionIgnoreErrors_rdh | /**
* Try to close the region, logs a warning on failure but continues.
*
* @param region
* Region to close
*/
private void closeRegionIgnoreErrors(RegionInfo region, final boolean abort) {
try {
if (!closeRegion(region.getEncodedName(), abort, null)) {LOG.warn(("Failed to close " + region.getRegionNameAsString()) + " - ignoring and continuing");
}
} catch (IOException e) {
LOG.warn(("Failed to close " + region.getRegionNameAsString()) + " - ignoring and continuing", e);
}
} | 3.26 |
hbase_HRegionServer_buildRegionSpaceUseReportRequest_rdh | /**
* Builds a {@link RegionSpaceUseReportRequest} protobuf message from the region size map.
*
* @param regionSizes
* The size in bytes of regions
* @return The corresponding protocol buffer message.
*/
RegionSpaceUseReportRequest buildRegionSpaceUseReportRequest(RegionSizeStore regionSizes) {
RegionSpaceUseReportRequest.Builder request = RegionSpaceUseReportRequest.newBuilder();
for (Entry<RegionInfo, RegionSize> entry : regionSizes) {
request.addSpaceUse(convertRegionSize(entry.getKey(), entry.getValue().getSize()));
}
return request.build();
} | 3.26 |
hbase_HRegionServer_getRegion_rdh | /**
* Protected Utility method for safely obtaining an HRegion handle.
*
* @param regionName
* Name of online {@link HRegion} to return
* @return {@link HRegion} for <code>regionName</code>
*/
protected HRegion getRegion(final byte[] regionName) throws NotServingRegionException {
String encodedRegionName = RegionInfo.encodeRegionName(regionName);
return getRegionByEncodedName(regionName, encodedRegionName);
} | 3.26 |
hbase_HRegionServer_getCompactSplitThread_rdh | /**
* Returns the underlying {@link CompactSplit} for the servers
*/
public CompactSplit getCompactSplitThread() {
return this.compactSplitThread;
} | 3.26 |
hbase_HRegionServer_preRegistrationInitialization_rdh | /**
* All initialization needed before we go register with Master.<br>
* Do bare minimum. Do bulk of initializations AFTER we've connected to the Master.<br>
* In here we just put up the RpcServer, setup Connection, and ZooKeeper.
*/
private void preRegistrationInitialization() {
final Span span = TraceUtil.createSpan("HRegionServer.preRegistrationInitialization");
try (Scope ignored = span.makeCurrent()) {
m0();
setupClusterConnection();
bootstrapNodeManager = new BootstrapNodeManager(asyncClusterConnection, masterAddressTracker);
regionReplicationBufferManager = new RegionReplicationBufferManager(this);
// Setup RPC client for master communication
this.rpcClient = asyncClusterConnection.getRpcClient();
span.setStatus(StatusCode.OK);
} catch (Throwable t) {
// Call stop if error or process will stick around for ever since server
// puts up non-daemon threads.
TraceUtil.setError(span, t);
this.rpcServices.stop();
abort("Initialization of RS failed. Hence aborting RS.",
t);
} finally {span.end();
}
} | 3.26 |
hbase_HRegionServer_abort_rdh | /**
* Cause the server to exit without closing the regions it is serving, the log it is using and
* without notifying the master. Used unit testing and on catastrophic events such as HDFS is
* yanked out from under hbase or we OOME. the reason we are aborting the exception that caused
* the abort, or null
*/
@Overridepublic void abort(String reason, Throwable cause) {
if (!setAbortRequested()) {
// Abort already in progress, ignore the new request.
LOG.debug("Abort already in progress. Ignoring the current request with reason: {}", reason);
return;
}
String msg = ((("***** ABORTING region server "
+ this) + ": ") + reason) + " *****";if
(cause != null) {
LOG.error(HBaseMarkers.FATAL, msg, cause);
} else {
LOG.error(HBaseMarkers.FATAL, msg);
}
// HBASE-4014: show list of coprocessors that were loaded to help debug
// regionserver crashes.Note that we're implicitly using
// java.util.HashSet's toString() method to print the coprocessor names.
LOG.error(HBaseMarkers.FATAL, "RegionServer abort: loaded coprocessors are: " + CoprocessorHost.getLoadedCoprocessors());
// Try and dump metrics if abort -- might give clue as to how fatal came about....
try {
LOG.info("Dump of metrics as JSON on abort: " + DumpRegionServerMetrics.dumpMetrics());
}
catch (MalformedObjectNameException | IOException e) {
LOG.warn("Failed dumping metrics", e);
}
// Do our best to report our abort to the master, but this may not work
try {
if (cause != null) {
msg += "\nCause:\n" + Throwables.getStackTraceAsString(cause);
}
// Report to the master but only if we have already registered with the master.
RegionServerStatusService.BlockingInterface rss = f0;
if ((rss != null) && (this.serverName != null)) {
ReportRSFatalErrorRequest.Builder builder = ReportRSFatalErrorRequest.newBuilder();
builder.setServer(ProtobufUtil.toServerName(this.serverName));
builder.setErrorMessage(msg);
rss.reportRSFatalError(null, builder.build());
}
} catch (Throwable t) {
LOG.warn("Unable to report fatal error to master", t);
}
scheduleAbortTimer();
// shutdown should be run as the internal user
stop(reason, true, null);
} | 3.26 |
hbase_HRegionServer_createNewReplicationInstance_rdh | //
// Main program and support routines
//
/**
* Load the replication executorService objects, if any
*/
private static void createNewReplicationInstance(Configuration conf, HRegionServer server, FileSystem walFs, Path walDir, Path oldWALDir, WALFactory walFactory) throws IOException {
// read in the name of the source replication class from the config file.
String sourceClassname = conf.get(HConstants.REPLICATION_SOURCE_SERVICE_CLASSNAME, HConstants.REPLICATION_SERVICE_CLASSNAME_DEFAULT);
// read in the name of the sink replication class from the config file.
String v231 = conf.get(HConstants.REPLICATION_SINK_SERVICE_CLASSNAME, HConstants.REPLICATION_SINK_SERVICE_CLASSNAME_DEFAULT);
// If both the sink and the source class names are the same, then instantiate
// only one object.
if (sourceClassname.equals(v231)) {
server.replicationSourceHandler = newReplicationInstance(sourceClassname, ReplicationSourceService.class, conf, server, walFs, walDir,
oldWALDir, walFactory);
server.replicationSinkHandler = ((ReplicationSinkService) (server.replicationSourceHandler));
server.sameReplicationSourceAndSink = true;
} else {
server.replicationSourceHandler = newReplicationInstance(sourceClassname, ReplicationSourceService.class, conf, server, walFs, walDir, oldWALDir, walFactory);
server.replicationSinkHandler = newReplicationInstance(v231, ReplicationSinkService.class, conf, server, walFs, walDir, oldWALDir, walFactory);
server.sameReplicationSourceAndSink = false;
}} | 3.26 |
hbase_HRegionServer_getWriteRequestCount_rdh | /**
* Returns Current write count for all online regions.
*/
private long
getWriteRequestCount() {
long writeCount = 0;
for (Map.Entry<String, HRegion> e : this.onlineRegions.entrySet()) {
writeCount += e.getValue().getWriteRequestsCount();}
return writeCount;
} | 3.26 |
hbase_HRegionServer_getMobFileCache_rdh | /**
* May be null if this is a master which not carry table.
*
* @return The cache for mob files used by the regionserver.
*/
@Override
public Optional<MobFileCache> getMobFileCache() {
return Optional.ofNullable(this.mobFileCache);
} | 3.26 |
hbase_HRegionServer_checkFileSystem_rdh | /**
* Checks to see if the file system is still accessible. If not, sets abortRequested and
* stopRequested
*
* @return false if file system is not available
*/
boolean checkFileSystem() {
if (this.dataFsOk && (this.dataFs
!= null)) {
try {
FSUtils.checkFileSystemAvailable(this.dataFs);
} catch (IOException e) {
abort("File System not available",
e);
this.dataFsOk = false;
}
}
return this.dataFsOk;
} | 3.26 |
hbase_HRegionServer_startReplicationService_rdh | /**
* Start up replication source and sink handlers.
*/
private void startReplicationService() throws IOException {
if (sameReplicationSourceAndSink && (this.replicationSourceHandler != null)) {
this.replicationSourceHandler.startReplicationService();
} else {
if (this.replicationSourceHandler != null) {this.replicationSourceHandler.startReplicationService();
}
if (this.replicationSinkHandler != null) {
this.replicationSinkHandler.startReplicationService();
}
}
} | 3.26 |
hbase_HRegionServer_scheduleAbortTimer_rdh | // Limits the time spent in the shutdown process.
private void scheduleAbortTimer() {
if (this.abortMonitor == null) {
this.abortMonitor = new Timer("Abort regionserver monitor", true);
TimerTask abortTimeoutTask = null;
try {
Constructor<? extends TimerTask> timerTaskCtor =
Class.forName(conf.get(ABORT_TIMEOUT_TASK, HRegionServer.SystemExitWhenAbortTimeout.class.getName())).asSubclass(TimerTask.class).getDeclaredConstructor();
timerTaskCtor.setAccessible(true);
abortTimeoutTask = timerTaskCtor.newInstance();
} catch (Exception e) {
LOG.warn("Initialize abort timeout task failed", e);
}
if (abortTimeoutTask != null) {
abortMonitor.schedule(abortTimeoutTask, conf.getLong(ABORT_TIMEOUT, DEFAULT_ABORT_TIMEOUT));}
}
} | 3.26 |
hbase_HRegionServer_stop_rdh | /**
* Stops the regionserver.
*
* @param msg
* Status message
* @param force
* True if this is a regionserver abort
* @param user
* The user executing the stop request, or null if no user is associated
*/public void stop(final String msg, final boolean force, final User user) {
if (!this.stopped) {
LOG.info(("***** STOPPING region server '" + this) + "' *****");
if (this.rsHost !=
null) {
// when forced via abort don't allow CPs to override
try {
this.rsHost.preStop(msg, user);
} catch (IOException ioe) {
if (!force) {
LOG.warn("The region server did not stop", ioe);
return;
}
LOG.warn("Skipping coprocessor exception on preStop() due to forced shutdown",
ioe);
}
}
this.stopped = true;
LOG.info("STOPPED: " + msg);
// Wakes run() if it is sleeping
sleeper.skipSleepCycle();
}
} | 3.26 |
hbase_HRegionServer_isDataFileSystemOk_rdh | /**
* Returns {@code true} when the data file system is available, {@code false} otherwise.
*/
boolean isDataFileSystemOk() {
return this.dataFsOk;
} | 3.26 |
hbase_HRegionServer_convertRegionSize_rdh | /**
* Converts a pair of {@link RegionInfo} and {@code long} into a {@link RegionSpaceUse} protobuf
* message.
*
* @param regionInfo
* The RegionInfo
* @param sizeInBytes
* The size in bytes of the Region
* @return The protocol buffer
*/
RegionSpaceUse convertRegionSize(RegionInfo regionInfo, Long sizeInBytes) {
return RegionSpaceUse.newBuilder().setRegionInfo(ProtobufUtil.toRegionInfo(Objects.requireNonNull(regionInfo))).setRegionSize(Objects.requireNonNull(sizeInBytes)).build();
} | 3.26 |
hbase_HRegionServer_startServices_rdh | /**
* Start maintenance Threads, Server, Worker and lease checker threads. Start all threads we need
* to run. This is called after we've successfully registered with the Master. Install an
* UncaughtExceptionHandler that calls abort of RegionServer if we get an unhandled exception. We
* cannot set the handler on all threads. Server's internal Listener thread is off limits. For
* Server, if an OOME, it waits a while then retries. Meantime, a flush or a compaction that tries
* to run should trigger same critical condition and the shutdown will run. On its way out, this
* server will shut down Server. Leases are sort of inbetween. It has an internal thread that
* while it inherits from Chore, it keeps its own internal stop mechanism so needs to be stopped
* by this hosting server. Worker logs the exception and exits.
*/
private void startServices() throws IOException {
if ((!isStopped()) && (!isAborted())) {
initializeThreads();
}
this.secureBulkLoadManager = new SecureBulkLoadManager(this.conf, asyncClusterConnection);this.secureBulkLoadManager.start();
// Health checker thread.
if (isHealthCheckerConfigured()) {
int sleepTime = this.conf.getInt(HConstants.HEALTH_CHORE_WAKE_FREQ, HConstants.DEFAULT_THREAD_WAKE_FREQUENCY);
healthCheckChore = new HealthCheckChore(sleepTime,
this, getConfiguration());
}
// Executor status collect thread.
if (this.conf.getBoolean(HConstants.EXECUTOR_STATUS_COLLECT_ENABLED, HConstants.DEFAULT_EXECUTOR_STATUS_COLLECT_ENABLED)) {
int sleepTime = this.conf.getInt(ExecutorStatusChore.WAKE_FREQ, ExecutorStatusChore.DEFAULT_WAKE_FREQ);
executorStatusChore = new ExecutorStatusChore(sleepTime, this, this.getExecutorService(), this.metricsRegionServer.getMetricsSource());
}
this.walRoller = new LogRoller(this);
this.flushThroughputController = FlushThroughputControllerFactory.create(this, conf);
this.procedureResultReporter = new RemoteProcedureResultReporter(this);
// Create the CompactedFileDischarger chore executorService. This chore helps to
// remove the compacted files that will no longer be used in reads.
// Default is 2 mins. The default value for TTLCleaner is 5 mins so we set this to
// 2 mins so that compacted files can be archived before the TTLCleaner runs
int cleanerInterval
= conf.getInt("hbase.hfile.compaction.discharger.interval", (2 * 60) * 1000);
this.compactedFileDischarger = new CompactedHFilesDischarger(cleanerInterval, this, this);
choreService.scheduleChore(compactedFileDischarger);
// Start executor services
final int openRegionThreads = conf.getInt("hbase.regionserver.executor.openregion.threads", 3);
executorService.startExecutorService(executorService.new ExecutorConfig().setExecutorType(ExecutorType.RS_OPEN_REGION).setCorePoolSize(openRegionThreads));
final int openMetaThreads = conf.getInt("hbase.regionserver.executor.openmeta.threads", 1);executorService.startExecutorService(executorService.new ExecutorConfig().setExecutorType(ExecutorType.RS_OPEN_META).setCorePoolSize(openMetaThreads));
final int openPriorityRegionThreads = conf.getInt("hbase.regionserver.executor.openpriorityregion.threads", 3);
executorService.startExecutorService(executorService.new ExecutorConfig().setExecutorType(ExecutorType.RS_OPEN_PRIORITY_REGION).setCorePoolSize(openPriorityRegionThreads));
final
int closeRegionThreads = conf.getInt("hbase.regionserver.executor.closeregion.threads", 3);
executorService.startExecutorService(executorService.new ExecutorConfig().setExecutorType(ExecutorType.RS_CLOSE_REGION).setCorePoolSize(closeRegionThreads));
final int closeMetaThreads = conf.getInt("hbase.regionserver.executor.closemeta.threads", 1);
executorService.startExecutorService(executorService.new ExecutorConfig().setExecutorType(ExecutorType.RS_CLOSE_META).setCorePoolSize(closeMetaThreads));
if (conf.getBoolean(StoreScanner.STORESCANNER_PARALLEL_SEEK_ENABLE, false)) {
final int storeScannerParallelSeekThreads =
conf.getInt("hbase.storescanner.parallel.seek.threads", 10);
executorService.startExecutorService(executorService.new ExecutorConfig().setExecutorType(ExecutorType.RS_PARALLEL_SEEK).setCorePoolSize(storeScannerParallelSeekThreads).setAllowCoreThreadTimeout(true));
}
final int logReplayOpsThreads = conf.getInt(HBASE_SPLIT_WAL_MAX_SPLITTER, DEFAULT_HBASE_SPLIT_WAL_MAX_SPLITTER);
executorService.startExecutorService(executorService.new ExecutorConfig().setExecutorType(ExecutorType.RS_LOG_REPLAY_OPS).setCorePoolSize(logReplayOpsThreads).setAllowCoreThreadTimeout(true));// Start the threads for compacted files discharger
final int compactionDischargerThreads = conf.getInt(CompactionConfiguration.HBASE_HFILE_COMPACTION_DISCHARGER_THREAD_COUNT, 10);
executorService.startExecutorService(executorService.new ExecutorConfig().setExecutorType(ExecutorType.RS_COMPACTED_FILES_DISCHARGER).setCorePoolSize(compactionDischargerThreads)); if (ServerRegionReplicaUtil.isRegionReplicaWaitForPrimaryFlushEnabled(conf)) {
final int regionReplicaFlushThreads = conf.getInt("hbase.regionserver.region.replica.flusher.threads", conf.getInt("hbase.regionserver.executor.openregion.threads", 3));
executorService.startExecutorService(executorService.new ExecutorConfig().setExecutorType(ExecutorType.RS_REGION_REPLICA_FLUSH_OPS).setCorePoolSize(regionReplicaFlushThreads));
}
final int refreshPeerThreads = conf.getInt("hbase.regionserver.executor.refresh.peer.threads", 2);
executorService.startExecutorService(executorService.new ExecutorConfig().setExecutorType(ExecutorType.RS_REFRESH_PEER).setCorePoolSize(refreshPeerThreads));
final int replaySyncReplicationWALThreads = conf.getInt("hbase.regionserver.executor.replay.sync.replication.wal.threads", 1);
executorService.startExecutorService(executorService.new ExecutorConfig().setExecutorType(ExecutorType.RS_REPLAY_SYNC_REPLICATION_WAL).setCorePoolSize(replaySyncReplicationWALThreads));
final int switchRpcThrottleThreads = conf.getInt("hbase.regionserver.executor.switch.rpc.throttle.threads", 1);
executorService.startExecutorService(executorService.new ExecutorConfig().setExecutorType(ExecutorType.RS_SWITCH_RPC_THROTTLE).setCorePoolSize(switchRpcThrottleThreads));
final int claimReplicationQueueThreads = conf.getInt("hbase.regionserver.executor.claim.replication.queue.threads", 1);
executorService.startExecutorService(executorService.new ExecutorConfig().setExecutorType(ExecutorType.RS_CLAIM_REPLICATION_QUEUE).setCorePoolSize(claimReplicationQueueThreads));
final int rsSnapshotOperationThreads = conf.getInt("hbase.regionserver.executor.snapshot.operations.threads", 3);
executorService.startExecutorService(executorService.new ExecutorConfig().setExecutorType(ExecutorType.RS_SNAPSHOT_OPERATIONS).setCorePoolSize(rsSnapshotOperationThreads));
final int v158 = conf.getInt("hbase.regionserver.executor.flush.operations.threads", 3);
executorService.startExecutorService(executorService.new ExecutorConfig().setExecutorType(ExecutorType.RS_FLUSH_OPERATIONS).setCorePoolSize(v158));
Threads.setDaemonThreadRunning(this.walRoller, getName() + ".logRoller", uncaughtExceptionHandler);
if (this.cacheFlusher != null) {
this.cacheFlusher.start(uncaughtExceptionHandler);
}
Threads.setDaemonThreadRunning(this.procedureResultReporter, getName() + ".procedureResultReporter", uncaughtExceptionHandler);
if (this.compactionChecker != null) {choreService.scheduleChore(compactionChecker);
}
if (this.periodicFlusher != null) {
choreService.scheduleChore(periodicFlusher);
}
if (this.healthCheckChore != null) {
choreService.scheduleChore(healthCheckChore);
}
if (this.executorStatusChore != null) {
choreService.scheduleChore(executorStatusChore);
}
if (this.nonceManagerChore != null) {
choreService.scheduleChore(nonceManagerChore);
}
if (this.storefileRefresher != null) {
choreService.scheduleChore(storefileRefresher);
}if (this.fsUtilizationChore != null) {
choreService.scheduleChore(fsUtilizationChore);
}
if (this.namedQueueServiceChore != null) {
choreService.scheduleChore(namedQueueServiceChore);
}
if (this.brokenStoreFileCleaner != null) {
choreService.scheduleChore(brokenStoreFileCleaner);
}
if (this.rsMobFileCleanerChore !=
null) {
choreService.scheduleChore(rsMobFileCleanerChore);
}
if (f1 != null) {
LOG.info("Starting replication marker chore");
choreService.scheduleChore(f1);
}
// Leases is not a Thread. Internally it runs a daemon thread. If it gets
// an unhandled exception, it will just exit.
Threads.setDaemonThreadRunning(this.leaseManager, getName() + ".leaseChecker", uncaughtExceptionHandler);
// Create the log splitting worker and start it
// set a smaller retries to fast fail otherwise splitlogworker could be blocked for
// quite a while inside Connection layer. The worker won't be available for other
// tasks even after current task is preempted after a split task times out.
Configuration v159 = HBaseConfiguration.create(conf);
v159.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, conf.getInt("hbase.log.replay.retries.number", 8));// 8 retries take about 23 seconds
v159.setInt(HConstants.HBASE_RPC_TIMEOUT_KEY, conf.getInt("hbase.log.replay.rpc.timeout", 30000));// default 30 seconds
v159.setInt(HConstants.HBASE_CLIENT_SERVERSIDE_RETRIES_MULTIPLIER, 1);
if ((this.csm != null) && conf.getBoolean(HBASE_SPLIT_WAL_COORDINATED_BY_ZK, DEFAULT_HBASE_SPLIT_COORDINATED_BY_ZK)) {// SplitLogWorker needs csm. If none, don't start this.
this.splitLogWorker = new SplitLogWorker(v159, this, this, walFactory);
splitLogWorker.start();
LOG.debug("SplitLogWorker started");
}
// Memstore services.
startHeapMemoryManager();
// Call it after starting HeapMemoryManager.
initializeMemStoreChunkCreator(hMemManager);
} | 3.26 |
hbase_HRegionServer_isOnline_rdh | /**
* Report the status of the server. A server is online once all the startup is completed (setting
* up filesystem, starting executorService threads, etc.). This method is designed mostly to be
* useful in tests.
*
* @return true if online, false if not.
*/
public boolean isOnline() {
return online.get();
} | 3.26 |
hbase_HRegionServer_getMasterAddressTracker_rdh | /**
* Returns Master address tracker instance.
*/
public MasterAddressTracker
getMasterAddressTracker() {
return this.masterAddressTracker;
} | 3.26 |
hbase_HRegionServer_isHealthy_rdh | /* Verify that server is healthy */private boolean isHealthy() {
if (!dataFsOk) {
// File system problem
return false;
}
// Verify that all threads are alive
boolean healthy = (((((this.leaseManager == null)
|| this.leaseManager.isAlive()) && ((this.cacheFlusher == null) || this.cacheFlusher.isAlive())) && ((this.walRoller == null) || this.walRoller.isAlive())) && ((this.compactionChecker == null) || this.compactionChecker.isScheduled())) && ((this.periodicFlusher == null) || this.periodicFlusher.isScheduled());
if (!healthy) {
stop("One or more threads are no longer alive -- stop");
}
return healthy;
} | 3.26 |
hbase_HRegionServer_isClusterUp_rdh | /**
* Returns True if the cluster is up.
*/
@Override
public boolean isClusterUp() {
return this.masterless || ((this.clusterStatusTracker != null) && this.clusterStatusTracker.isClusterUp());
} | 3.26 |
hbase_HRegionServer_finishRegionProcedure_rdh | /**
* See {@link #submitRegionProcedure(long)}.
*
* @param procId
* the id of the open/close region procedure
*/
public void finishRegionProcedure(long procId) {
executedRegionProcedures.put(procId, procId);
submittedRegionProcedures.remove(procId);
} | 3.26 |
hbase_HRegionServer_skipReportingTransition_rdh | /**
* Helper method for use in tests. Skip the region transition report when there's no master around
* to receive it.
*/
private boolean skipReportingTransition(final RegionStateTransitionContext context) {
final TransitionCode code = context.getCode();
final long openSeqNum = context.getOpenSeqNum();
long masterSystemTime = context.getMasterSystemTime();
final RegionInfo[] hris = context.getHris();
if (code == TransitionCode.OPENED)
{
Preconditions.checkArgument((hris != null) && (hris.length == 1));
if (hris[0].isMetaRegion()) {
LOG.warn("meta table location is stored in master local store, so we can not skip reporting");
return false;
} else {
try {
MetaTableAccessor.updateRegionLocation(asyncClusterConnection.toConnection(), hris[0], serverName, openSeqNum, masterSystemTime);
} catch (IOException e) {
LOG.info("Failed to update meta", e);
return false;
}
}
}
return true;
} | 3.26 |
hbase_JMXJsonServlet_init_rdh | /**
* Initialize this servlet.
*/
@Override
public void init() throws ServletException {
// Retrieve the MBean server
mBeanServer = ManagementFactory.getPlatformMBeanServer();
this.jsonBeanWriter = new JSONBean();
} | 3.26 |
hbase_JMXJsonServlet_doGet_rdh | /**
* Process a GET request for the specified resource. The servlet request we are processing The
* servlet response we are creating
*/
@Override
public void
doGet(HttpServletRequest request, HttpServletResponse response) throws IOException {
try {
if (!HttpServer.isInstrumentationAccessAllowed(getServletContext(), request, response)) {
return;
}
String jsonpcb = null;
PrintWriter writer = null;
JSONBean.Writer beanWriter = null;
try {
jsonpcb = checkCallbackName(request.getParameter(CALLBACK_PARAM));
writer = response.getWriter();
// "callback" parameter implies JSONP outpout
if (jsonpcb != null) {
response.setContentType("application/javascript; charset=utf8");
writer.write(jsonpcb + "(");
} else {
response.setContentType("application/json; charset=utf8");
}
beanWriter = this.jsonBeanWriter.open(writer);
// Should we output description on each attribute and bean?
boolean description = "true".equals(request.getParameter(INCLUDE_DESCRIPTION));
// query per mbean attribute
String getmethod = request.getParameter("get");
if (getmethod != null) {
List<String> splitStrings = Splitter.onPattern("\\:\\:").splitToList(getmethod);
if
(splitStrings.size() != 2) {
beanWriter.write("result", "ERROR");
beanWriter.write("message", "query format is not as expected.");
beanWriter.flush();response.setStatus(HttpServletResponse.SC_BAD_REQUEST);
return;}
Iterator<String> i = splitStrings.iterator();
if (beanWriter.write(this.mBeanServer, new ObjectName(i.next()), i.next(), description) != 0) {
beanWriter.flush();
response.setStatus(HttpServletResponse.SC_BAD_REQUEST);
}
return;
}
// query per mbean
String qry = request.getParameter("qry");
if (qry == null) {
qry = "*:*";
}
String excl = request.getParameter("excl");
ObjectName excluded = (excl == null) ? null : new ObjectName(excl);
if (beanWriter.write(this.mBeanServer, new ObjectName(qry), null, description, excluded) != 0) {
beanWriter.flush();
response.setStatus(HttpServletResponse.SC_BAD_REQUEST);
}
} finally {
if (beanWriter != null) {
beanWriter.close();
}
if (jsonpcb != null) {
writer.write(");");
}
if
(writer != null) {
writer.close();
}
}
} catch (IOException e) {
LOG.error("Caught an exception while processing JMX request", e);
response.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
} catch (MalformedObjectNameException e) {
LOG.error("Caught an exception while processing JMX request", e);
response.sendError(HttpServletResponse.SC_BAD_REQUEST);
}
} | 3.26 |
hbase_JMXJsonServlet_checkCallbackName_rdh | /**
* Verifies that the callback property, if provided, is purely alphanumeric. This prevents a
* malicious callback name (that is javascript code) from being returned by the UI to an
* unsuspecting user.
*
* @param callbackName
* The callback name, can be null.
* @return The callback name
* @throws IOException
* If the name is disallowed.
*/
private String checkCallbackName(String callbackName) throws IOException {if (null == callbackName) {
return null;
}
if
(callbackName.matches("[A-Za-z0-9_]+"))
{
return callbackName;
}
throw new IOException("'callback' must be alphanumeric");
} | 3.26 |
hbase_FirstKeyOnlyFilter_hasFoundKV_rdh | /**
* Returns true if first KV has been found.
*/
protected boolean hasFoundKV() {
return this.foundKV;
} | 3.26 |
hbase_FirstKeyOnlyFilter_parseFrom_rdh | /**
* Parse a serialized representation of {@link FirstKeyOnlyFilter}
*
* @param pbBytes
* A pb serialized {@link FirstKeyOnlyFilter} instance
* @return An instance of {@link FirstKeyOnlyFilter} made from <code>bytes</code>
* @throws DeserializationException
* if an error occurred
* @see #toByteArray
*/
public static FirstKeyOnlyFilter parseFrom(final byte[] pbBytes) throws DeserializationException {
// There is nothing to deserialize. Why do this at all?
try {
FilterProtos.FirstKeyOnlyFilter.parseFrom(pbBytes);
} catch (InvalidProtocolBufferException e) {
throw new DeserializationException(e);
}
// Just return a new instance.
return new FirstKeyOnlyFilter();
} | 3.26 |
hbase_FirstKeyOnlyFilter_toByteArray_rdh | /**
* Returns The filter serialized using pb
*/
@Override
public byte[] toByteArray() {
FilterProtos.FirstKeyOnlyFilter.Builder builder = FilterProtos.FirstKeyOnlyFilter.newBuilder();
return builder.build().toByteArray();
} | 3.26 |
hbase_FirstKeyOnlyFilter_m0_rdh | /**
* Returns true if and only if the fields of the filter that are serialized are equal to the
* corresponding fields in other. Used for testing.
*/
@Override
boolean m0(Filter o) {if (o == this) {
return true;
}
if (!(o instanceof FirstKeyOnlyFilter)) {
return false;
}
return true;
} | 3.26 |
hbase_FirstKeyOnlyFilter_setFoundKV_rdh | /**
* Set or clear the indication if the first KV has been found.
*
* @param value
* update {@link #foundKV} flag with value.
*/
protected void setFoundKV(boolean value) {
this.foundKV = value;
} | 3.26 |
hbase_WALCellCodec_create_rdh | /**
* Create and setup a {@link WALCellCodec} from the CompressionContext. Cell Codec classname is
* read from {@link Configuration}. Fully prepares the codec for use.
*
* @param conf
* {@link Configuration} to read for the user-specified codec. If none is
* specified, uses a {@link WALCellCodec}.
* @param compression
* compression the codec should use
* @return a {@link WALCellCodec} ready for use.
* @throws UnsupportedOperationException
* if the codec cannot be instantiated
*/
public static WALCellCodec create(Configuration conf, CompressionContext compression) throws UnsupportedOperationException {
String cellCodecClsName = getWALCellCodecClass(conf).getName();
return ReflectionUtils.instantiateWithCustomCtor(cellCodecClsName, new Class[]{ Configuration.class, CompressionContext.class }, new Object[]{ conf, compression });
} | 3.26 |
hbase_RpcExecutor_m0_rdh | /**
* Start up our handlers.
*/
protected void m0(final
String nameSuffix, final int numHandlers, final List<BlockingQueue<CallRunner>> callQueues, final int qindex, final int qsize, final int port, final AtomicInteger activeHandlerCount)
{final String threadPrefix = name + Strings.nullToEmpty(nameSuffix);
double v9 = (f1 == null) ? 1.0 : f1.getDouble(HConstants.REGION_SERVER_HANDLER_ABORT_ON_ERROR_PERCENT, HConstants.DEFAULT_REGION_SERVER_HANDLER_ABORT_ON_ERROR_PERCENT);
for (int i = 0; i < numHandlers; i++) {
final int index =
qindex + (i % qsize);
String name = (((((("RpcServer." + threadPrefix) + ".handler=") + handlers.size()) + ",queue=") + index) + ",port=") + port;
RpcHandler handler = getHandler(name, v9, handlerCount, callQueues.get(index), activeHandlerCount, failedHandlerCount, abortable);
handler.start();
handlers.add(handler);
}
LOG.debug("Started handlerCount={} with threadPrefix={}, numCallQueues={}, port={}", handlers.size(), threadPrefix, qsize, port);
} | 3.26 |
hbase_RpcExecutor_getRpcCallSize_rdh | /**
* Return the {@link RpcCall#getSize()} from {@code callRunner} or 0L.
*/private static long getRpcCallSize(final CallRunner callRunner) {
return Optional.ofNullable(callRunner).map(CallRunner::getRpcCall).map(RpcCall::getSize).orElse(0L);
} | 3.26 |
hbase_RpcExecutor_getHandler_rdh | /**
* Override if providing alternate Handler implementation.
*/
protected RpcHandler getHandler(final String name, final double handlerFailureThreshhold, final int handlerCount,
final
BlockingQueue<CallRunner> q, final AtomicInteger activeHandlerCount, final AtomicInteger failedHandlerCount, final Abortable abortable) {
return new RpcHandler(name, handlerFailureThreshhold, handlerCount, q, activeHandlerCount, failedHandlerCount, abortable);
} | 3.26 |
hbase_RpcExecutor_getMethodName_rdh | /**
* Return the {@link Descriptors.MethodDescriptor#getName()} from {@code callRunner} or "Unknown".
*/
private static String getMethodName(final CallRunner callRunner) {
return Optional.ofNullable(callRunner).map(CallRunner::getRpcCall).map(RpcCall::getMethod).map(Descriptors.MethodDescriptor::getName).orElse("Unknown");
} | 3.26 |
hbase_RpcExecutor_getQueues_rdh | /**
* Returns the list of request queues
*/
protected List<BlockingQueue<CallRunner>> getQueues() {
return f0;
} | 3.26 |
hbase_RpcExecutor_resizeQueues_rdh | /**
* Update current soft limit for executor's call queues
*
* @param conf
* updated configuration
*/
public void resizeQueues(Configuration conf) {
String configKey = RpcScheduler.IPC_SERVER_MAX_CALLQUEUE_LENGTH;
if (name != null) {
if (name.toLowerCase(Locale.ROOT).contains("priority")) {
configKey = RpcScheduler.IPC_SERVER_PRIORITY_MAX_CALLQUEUE_LENGTH;
} else if (name.toLowerCase(Locale.ROOT).contains("replication")) {
configKey = RpcScheduler.IPC_SERVER_REPLICATION_MAX_CALLQUEUE_LENGTH;
} else if (name.toLowerCase(Locale.ROOT).contains("bulkload")) {
configKey = RpcScheduler.IPC_SERVER_BULKLOAD_MAX_CALLQUEUE_LENGTH;
}}
final int queueLimit = currentQueueLimit;
currentQueueLimit = conf.getInt(configKey, queueLimit);
} | 3.26 |
hbase_RpcExecutor_getQueueLength_rdh | /**
* Returns the length of the pending queue
*/
public int getQueueLength() {
int length = 0;
for (final BlockingQueue<CallRunner> queue : f0) {
length += queue.size();
}
return length;
} | 3.26 |
hbase_RpcCallContext_getRequestUserName_rdh | /**
* Returns Current request's user name or not present if none ongoing.
*/
default Optional<String> getRequestUserName() {
return getRequestUser().map(User::getShortName);
} | 3.26 |
hbase_RpcServer_getService_rdh | /**
*
* @param serviceName
* Some arbitrary string that represents a 'service'.
* @param services
* Available services and their service interfaces.
* @return BlockingService that goes with the passed <code>serviceName</code>
*/
protected static BlockingService getService(final List<BlockingServiceAndInterface> services, final String serviceName) {
BlockingServiceAndInterface v54 = getServiceAndInterface(services, serviceName);
return v54 == null ? null : v54.getBlockingService();
} | 3.26 |
hbase_RpcServer_getMetrics_rdh | /**
* Returns the metrics instance for reporting RPC call statistics
*/
@Overridepublic MetricsHBaseServer getMetrics() {
return metrics;
} | 3.26 |
hbase_RpcServer_getCurrentServerCallWithCellScanner_rdh | /**
* Just return the current rpc call if it is a {@link ServerCall} and also has {@link CellScanner}
* attached.
* <p/>
* Mainly used for reference counting as {@link CellScanner} may reference non heap memory.
*/
public static Optional<ServerCall<?>> getCurrentServerCallWithCellScanner() {
return getCurrentCall().filter(c -> c instanceof ServerCall).filter(c -> c.getCellScanner() != null).map(c -> ((ServerCall<?>) (c)));} | 3.26 |
hbase_RpcServer_setErrorHandler_rdh | /**
* Set the handler for calling out of RPC for error conditions.
*
* @param handler
* the handler implementation
*/
@Override
public void setErrorHandler(HBaseRPCErrorHandler handler) {
this.errorHandler = handler;
} | 3.26 |
hbase_RpcServer_setCurrentCall_rdh | /**
* Used by {@link org.apache.hadoop.hbase.procedure2.store.region.RegionProcedureStore}. Set the
* rpc call back after mutate region.
*/
public static void setCurrentCall(RpcCall rpcCall) {CurCall.set(rpcCall);
} | 3.26 |
hbase_RpcServer_truncateTraceLog_rdh | /**
* Truncate to number of chars decided by conf hbase.ipc.trace.log.max.length if TRACE is on else
* to 150 chars Refer to Jira HBASE-20826 and HBASE-20942
*
* @param strParam
* stringifiedParam to be truncated
* @return truncated trace log string
*/
String truncateTraceLog(String strParam) {
if (f0.isTraceEnabled()) {
int traceLogMaxLength = getConf().getInt(TRACE_LOG_MAX_LENGTH, DEFAULT_TRACE_LOG_MAX_LENGTH);int truncatedLength = (strParam.length() < traceLogMaxLength) ? strParam.length() : traceLogMaxLength;
String truncatedFlag = (truncatedLength == strParam.length()) ? "" : KEY_WORD_TRUNCATED;
return strParam.subSequence(0, truncatedLength) + truncatedFlag;
}
return strParam.subSequence(0, 150) + KEY_WORD_TRUNCATED;
} | 3.26 |
hbase_RpcServer_call_rdh | /**
* This is a server side method, which is invoked over RPC. On success the return response has
* protobuf response payload. On failure, the exception name and the stack trace are returned in
* the protobuf response.
*/
@Override
public Pair<Message, CellScanner> call(RpcCall call, MonitoredRPCHandler status) throws IOException {
try {
MethodDescriptor md = call.getMethod();
Message param = call.getParam();
status.setRPC(md.getName(), new Object[]{ param }, call.getReceiveTime());
// TODO: Review after we add in encoded data blocks.
status.setRPCPacket(param);
status.resume("Servicing call");
// get an instance of the method arg type
HBaseRpcController controller = new HBaseRpcControllerImpl(call.getCellScanner());
controller.setCallTimeout(call.getTimeout());
Message result
= call.getService().callBlockingMethod(md, controller, param);
long receiveTime = call.getReceiveTime();
long startTime = call.getStartTime();
long endTime = EnvironmentEdgeManager.currentTime();
int processingTime = ((int) (endTime - startTime));
int qTime = ((int) (startTime - receiveTime));
int totalTime = ((int) (endTime - receiveTime));
if (f0.isTraceEnabled()) {
f0.trace("{}, response: {}, receiveTime: {}, queueTime: {}, processingTime: {}, totalTime: {}", CurCall.get().toString(), TextFormat.shortDebugString(result), CurCall.get().getReceiveTime(), qTime, processingTime, totalTime);
}
// Use the raw request call size for now.
long requestSize = call.getSize();
long responseSize = result.getSerializedSize();
long responseBlockSize = call.getBlockBytesScanned();
if (call.isClientCellBlockSupported()) {
// Include the payload size in HBaseRpcController
responseSize += call.getResponseCellSize();
}metrics.dequeuedCall(qTime);
metrics.processedCall(processingTime);
metrics.totalCall(totalTime);
metrics.receivedRequest(requestSize);
metrics.sentResponse(responseSize);
// log any RPC responses that are slower than the configured warn
// response time or larger than configured warning size
boolean tooSlow = isTooSlow(call, processingTime);
boolean tooLarge = isTooLarge(call, responseSize, responseBlockSize);
if (tooSlow || tooLarge) {
final String userName = call.getRequestUserName().orElse(StringUtils.EMPTY);
// when tagging, we let TooLarge trump TooSmall to keep output simple
// note that large responses will often also be slow.
logResponse(param, md.getName(), ((md.getName() + "(") + param.getClass().getName()) + ")", tooLarge, tooSlow, status.getClient(), startTime, processingTime, qTime, responseSize, responseBlockSize, userName);
if ((this.namedQueueRecorder != null) && this.isOnlineLogProviderEnabled) {
// send logs to ring buffer owned by slowLogRecorder
final String className = (server == null) ? StringUtils.EMPTY : server.getClass().getSimpleName();
this.namedQueueRecorder.addRecord(new RpcLogDetails(call, param, status.getClient(), responseSize, responseBlockSize, className, tooSlow, tooLarge));
}
}
return new Pair<>(result, controller.cellScanner());
} catch (Throwable e) {
// The above callBlockingMethod will always return a SE. Strip the SE wrapper before
// putting it on the wire. Its needed to adhere to the pb Service Interface but we don't
// need to pass it over the wire.
if (e instanceof ServiceException) {
if (e.getCause() == null) {
f0.debug("Caught a ServiceException with null cause", e);
} else {
e = e.getCause();
}
}
// increment the number of requests that were exceptions.
metrics.exception(e);
if (e instanceof LinkageError)
throw new DoNotRetryIOException(e);
if (e instanceof IOException)
throw ((IOException) (e));
f0.error("Unexpected throwable object ", e);
throw new IOException(e.getMessage(),
e);
}
} | 3.26 |
hbase_RpcServer_getRequestUserName_rdh | /**
* Returns the username for any user associated with the current RPC request or not present if no
* user is set.
*/
public static Optional<String> getRequestUserName() {
return getRequestUser().map(User::getShortName);
} | 3.26 |
hbase_RpcServer_logResponse_rdh | /**
* Logs an RPC response to the LOG file, producing valid JSON objects for client Operations.
*
* @param param
* The parameters received in the call.
* @param methodName
* The name of the method invoked
* @param call
* The string representation of the call
* @param tooLarge
* To indicate if the event is tooLarge
* @param tooSlow
* To indicate if the event is tooSlow
* @param clientAddress
* The address of the client who made this call.
* @param startTime
* The time that the call was initiated, in ms.
* @param processingTime
* The duration that the call took to run, in ms.
* @param qTime
* The duration that the call spent on the queue prior to being
* initiated, in ms.
* @param responseSize
* The size in bytes of the response buffer.
* @param blockBytesScanned
* The size of block bytes scanned to retrieve the response.
* @param userName
* UserName of the current RPC Call
*/
void logResponse(Message param, String methodName, String call, boolean tooLarge, boolean tooSlow, String clientAddress, long startTime, int processingTime, int qTime, long
responseSize, long blockBytesScanned, String userName) {
final String className
= (server == null) ? StringUtils.EMPTY : server.getClass().getSimpleName();
// base information that is reported regardless of type of call
Map<String, Object> responseInfo = new HashMap<>();
responseInfo.put("starttimems", startTime);
responseInfo.put("processingtimems", processingTime);
responseInfo.put("queuetimems", qTime);
responseInfo.put("responsesize", responseSize);
responseInfo.put("blockbytesscanned", blockBytesScanned);
responseInfo.put("client", clientAddress);
responseInfo.put("class", className);
responseInfo.put("method", methodName);
responseInfo.put("call", call);
// The params could be really big, make sure they don't kill us at WARN
String
stringifiedParam = ProtobufUtil.getShortTextFormat(param);
if (stringifiedParam.length() > 150) {
// Truncate to 1000 chars if TRACE is on, else to 150 chars
stringifiedParam = truncateTraceLog(stringifiedParam);
}
responseInfo.put("param", stringifiedParam);
if ((param instanceof ClientProtos.ScanRequest) && (rsRpcServices != null)) {
ClientProtos.ScanRequest request
= ((ClientProtos.ScanRequest) (param));String scanDetails;
if (request.hasScannerId()) {
long scannerId = request.getScannerId();
scanDetails = rsRpcServices.getScanDetailsWithId(scannerId);
} else {
scanDetails = rsRpcServices.getScanDetailsWithRequest(request);
}
if (scanDetails != null) {
responseInfo.put("scandetails",
scanDetails);
}
}
if (param instanceof ClientProtos.MultiRequest) {
int
numGets = 0;
int numMutations = 0;
int numServiceCalls = 0;
ClientProtos.MultiRequest multi = ((ClientProtos.MultiRequest) (param));
for (ClientProtos.RegionAction regionAction : multi.getRegionActionList()) {
for (ClientProtos.Action action : regionAction.getActionList()) {if (action.hasMutation()) {
numMutations++;
}if
(action.hasGet()) {
numGets++;
}
if (action.hasServiceCall()) {
numServiceCalls++;
}
}
}
responseInfo.put(MULTI_GETS, numGets);
responseInfo.put(MULTI_MUTATIONS, numMutations);
responseInfo.put(MULTI_SERVICE_CALLS, numServiceCalls);
}
final String tag = (tooLarge && tooSlow) ? "TooLarge & TooSlow" : tooSlow ? "TooSlow" : "TooLarge";
f0.warn((("(response" + tag) + "): ") + GSON.toJson(responseInfo));
} | 3.26 |
hbase_RpcServer_channelIO_rdh | /**
* Helper for {@link #channelRead(java.nio.channels.ReadableByteChannel, java.nio.ByteBuffer)}.
* Only one of readCh or writeCh should be non-null.
*
* @param readCh
* read channel
* @param writeCh
* write channel
* @param buf
* buffer to read or write into/out of
* @return bytes written
* @throws java.io.IOException
* e
* @see #channelRead(java.nio.channels.ReadableByteChannel, java.nio.ByteBuffer)
*/
private static int channelIO(ReadableByteChannel readCh, WritableByteChannel writeCh, ByteBuffer buf) throws IOException {
int originalLimit = buf.limit();
int initialRemaining = buf.remaining();
int ret = 0;
while (buf.remaining() > 0) {
try {
int ioSize = Math.min(buf.remaining(), NIO_BUFFER_LIMIT);
buf.limit(buf.position() + ioSize);
ret = (readCh == null) ? writeCh.write(buf) : readCh.read(buf);
if (ret < ioSize) {
break;
}
} finally {
buf.limit(originalLimit);
}
} int nBytes = initialRemaining - buf.remaining();
return nBytes > 0 ? nBytes : ret;
} | 3.26 |
hbase_RpcServer_getRequestUser_rdh | /**
* Returns the user credentials associated with the current RPC request or not present if no
* credentials were provided.
*
* @return A User
*/
public static Optional<User> getRequestUser() {
Optional<RpcCall> ctx = getCurrentCall();
return ctx.isPresent() ? ctx.get().getRequestUser() : Optional.empty();
} | 3.26 |
hbase_RpcServer_authorize_rdh | /**
* Authorize the incoming client connection.
*
* @param user
* client user
* @param connection
* incoming connection
* @param addr
* InetAddress of incoming connection
* @throws AuthorizationException
* when the client isn't authorized to talk the protocol
*/public synchronized void authorize(UserGroupInformation
user, ConnectionHeader connection, InetAddress addr) throws AuthorizationException
{
if (authorize) {
Class<?> c = getServiceInterface(services, connection.getServiceName());
authManager.authorize(user, c, getConf(), addr);
}
} | 3.26 |
hbase_RpcServer_channelRead_rdh | /**
* This is a wrapper around
* {@link java.nio.channels.ReadableByteChannel#read(java.nio.ByteBuffer)}. If the amount of data
* is large, it writes to channel in smaller chunks. This is to avoid jdk from creating many
* direct buffers as the size of ByteBuffer increases. There should not be any performance
* degredation.
*
* @param channel
* writable byte channel to write on
* @param buffer
* buffer to write
* @return number of bytes written
* @throws java.io.IOException
* e
* @see java.nio.channels.ReadableByteChannel#read(java.nio.ByteBuffer)
*/
protected int channelRead(ReadableByteChannel channel, ByteBuffer buffer) throws IOException {
int count
= (buffer.remaining() <= NIO_BUFFER_LIMIT) ? channel.read(buffer) : channelIO(channel, null, buffer);
if (count > 0) {
metrics.receivedBytes(count);
}
return count;
} | 3.26 |
hbase_RpcServer_getServiceAndInterface_rdh | /**
*
* @param serviceName
* Some arbitrary string that represents a 'service'.
* @param services
* Available service instances
* @return Matching BlockingServiceAndInterface pair
*/
protected static BlockingServiceAndInterface getServiceAndInterface(final List<BlockingServiceAndInterface> services, final String serviceName) {
for (BlockingServiceAndInterface bs : services)
{
if (bs.getBlockingService().getDescriptorForType().getName().equals(serviceName)) {
return bs;
}
}
return null;
} | 3.26 |
hbase_RpcServer_getRemoteAddress_rdh | /**
* Returns Address of remote client if a request is ongoing, else null
*/
public static Optional<InetAddress>
getRemoteAddress() {
return getCurrentCall().map(RpcCall::getRemoteAddress);
} | 3.26 |
hbase_RpcServer_getCurrentCall_rdh | /**
* Needed for features such as delayed calls. We need to be able to store the current call so that
* we can complete it later or ask questions of what is supported by the current ongoing call.
*
* @return An RpcCallContext backed by the currently ongoing call (gotten from a thread local)
*/
public static Optional<RpcCall> getCurrentCall() {
return Optional.ofNullable(CurCall.get());
} | 3.26 |
hbase_Response_getLocation_rdh | /**
* Returns the value of the Location header
*/
public String getLocation() {
return getHeader("Location");
} | 3.26 |
hbase_Response_hasBody_rdh | /**
* Returns true if a response body was sent
*/
public boolean hasBody() {
return body != null;
} | 3.26 |
hbase_Response_setBody_rdh | /**
*
* @param body
* the response body
*/
public void setBody(byte[] body) {
this.body = body;
} | 3.26 |
hbase_Response_setCode_rdh | /**
*
* @param code
* the HTTP response code
*/
public void setCode(int code) {
this.code = code;
} | 3.26 |
hbase_Response_setHeaders_rdh | /**
*
* @param headers
* the HTTP response headers
*/
public void setHeaders(Header[] headers) {
this.headers = headers;
} | 3.26 |
hbase_Response_getHeaders_rdh | /**
* Returns the HTTP response headers
*/public Header[] getHeaders() {
return headers;
} | 3.26 |
hbase_Response_getBody_rdh | /**
* Returns the HTTP response body
*/
public byte[] getBody() {
if (body == null) {
try {body = Client.getResponseBody(resp);
} catch (IOException ioe) {
LOG.debug("encountered ioe when obtaining body", ioe);
}
}
return
body;
} | 3.26 |
hbase_Response_getStream_rdh | /**
* Gets the input stream instance.
*
* @return an instance of InputStream class.
*/
public InputStream getStream() {
return this.stream;
} | 3.26 |
hbase_ServerSideScanMetrics_getMetricsMap_rdh | /**
* Get all of the values. If reset is true, we will reset the all AtomicLongs back to 0.
*
* @param reset
* whether to reset the AtomicLongs to 0.
* @return A Map of String -> Long for metrics
*/
public Map<String, Long> getMetricsMap(boolean reset) {
// Create a builder
ImmutableMap.Builder<String, Long> builder = ImmutableMap.builder();
for (Map.Entry<String, AtomicLong> e : this.counters.entrySet()) {
long value = (reset) ? e.getValue().getAndSet(0) : e.getValue().get();
builder.put(e.getKey(), value);
}
// Build the immutable map so that people can't mess around with it.
return builder.build();
} | 3.26 |
hbase_ServerSideScanMetrics_hasCounter_rdh | /**
* Returns true if a counter exists with the counterName
*/public boolean hasCounter(String counterName) {
return this.counters.containsKey(counterName);
} | 3.26 |
hbase_ServerSideScanMetrics_getCounter_rdh | /**
* Returns {@link AtomicLong} instance for this counter name, null if counter does not exist.
*/
public AtomicLong getCounter(String counterName) {
return this.counters.get(counterName);
} | 3.26 |
hbase_ServerSideScanMetrics_createCounter_rdh | /**
* Create a new counter with the specified name
*
* @return {@link AtomicLong} instance for the counter with counterName
*/
protected AtomicLong createCounter(String counterName) {
AtomicLong c = new AtomicLong(0);
counters.put(counterName, c);
return c;
} | 3.26 |
hbase_ColumnCountGetFilter_parseFrom_rdh | /**
* Parse a serialized representation of {@link ColumnCountGetFilter}
*
* @param pbBytes
* A pb serialized {@link ColumnCountGetFilter} instance
* @return An instance of {@link ColumnCountGetFilter} made from <code>bytes</code>
* @throws DeserializationException
* if an error occurred
* @see #toByteArray
*/
public static ColumnCountGetFilter parseFrom(final byte[] pbBytes) throws DeserializationException {
FilterProtos.ColumnCountGetFilter proto;
try {
proto = FilterProtos.ColumnCountGetFilter.parseFrom(pbBytes);
} catch (InvalidProtocolBufferException e) {
throw new DeserializationException(e);
}
return new ColumnCountGetFilter(proto.getLimit());
} | 3.26 |
hbase_ColumnCountGetFilter_areSerializedFieldsEqual_rdh | /**
* Returns true if and only if the fields of the filter that are serialized are equal to the
* corresponding fields in other. Used for testing.
*/
@Override
boolean areSerializedFieldsEqual(Filter o) {
if (o == this) {
return true;
}
if (!(o instanceof ColumnCountGetFilter)) {
return false;
}
ColumnCountGetFilter other = ((ColumnCountGetFilter) (o)); return this.getLimit() == other.getLimit();
} | 3.26 |
hbase_ColumnCountGetFilter_toByteArray_rdh | /**
* Returns The filter serialized using pb
*/
@Override
public byte[] toByteArray() {
FilterProtos.ColumnCountGetFilter.Builder builder = FilterProtos.ColumnCountGetFilter.newBuilder();
builder.setLimit(this.limit);
return builder.build().toByteArray();
} | 3.26 |
hbase_ReplicationMarkerChore_getRowKey_rdh | /**
* Creates a rowkey with region server name and timestamp.
*
* @param serverName
* region server name
* @param timestamp
* timestamp
*/
public static byte[] getRowKey(String serverName, long timestamp) {
// converting to string since this will help seeing the timestamp in string format using
// hbase shell commands.
String timestampStr = String.valueOf(timestamp);
final String rowKeyStr = (serverName + DELIMITER) + timestampStr;
return Bytes.toBytes(rowKeyStr);
} | 3.26 |
hbase_SplitTableRegionProcedure_prepareSplitRegion_rdh | /**
* Prepare to Split region.
*
* @param env
* MasterProcedureEnv
*/
public boolean prepareSplitRegion(final MasterProcedureEnv env) throws IOException { // Fail if we are taking snapshot for the given table
if (env.getMasterServices().getSnapshotManager().isTakingSnapshot(getParentRegion().getTable())) {
setFailure(new
IOException((("Skip splitting region " + getParentRegion().getShortNameToLog()) + ", because we are taking snapshot for the table ") + getParentRegion().getTable()));
return false;
}
// Check whether the region is splittable
RegionStateNode node = env.getAssignmentManager().getRegionStates().getRegionStateNode(getParentRegion());
if (node == null) {
throw new UnknownRegionException(getParentRegion().getRegionNameAsString());
}
RegionInfo parentHRI = node.getRegionInfo();
if (parentHRI ==
null) {
LOG.info("Unsplittable; parent region is null; node={}", node);
return false;
}
// Lookup the parent HRI state from the AM, which has the latest updated info.
// Protect against the case where concurrent SPLIT requests came in and succeeded
// just before us.
if (node.isInState(State.SPLIT)) {
LOG.info(("Split of " + parentHRI) + " skipped; state is already SPLIT");
return false;
}
if (parentHRI.isSplit() || parentHRI.isOffline()) {
LOG.info(("Split of " + parentHRI) + " skipped because offline/split.");
return false;}
// expected parent to be online or closed
if (!node.isInState(EXPECTED_SPLIT_STATES)) {
// We may have SPLIT already?
setFailure(new IOException((((("Split " + parentHRI.getRegionNameAsString()) + " FAILED because state=") + node.getState()) + "; expected ") + Arrays.toString(EXPECTED_SPLIT_STATES)));
return false;
}
// Mostly this check is not used because we already check the switch before submit a split
// procedure. Just for safe, check the switch again. This procedure can be rollbacked if
// the switch was set to false after submit.
if (!env.getMasterServices().isSplitOrMergeEnabled(MasterSwitchType.SPLIT)) {
LOG.warn((("pid=" +
getProcId()) + " split switch is off! skip split of ") + parentHRI);
setFailure(new IOException(("Split region " + parentHRI.getRegionNameAsString()) + " failed due to split switch off"));
return false;
}
if (!env.getMasterServices().getTableDescriptors().get(getTableName()).isSplitEnabled()) {
LOG.warn("pid={}, split is disabled for the table! Skipping split of {}", getProcId(), parentHRI);
setFailure(new IOException(("Split region " + parentHRI.getRegionNameAsString()) + " failed as region split is disabled for the table"));
return false;
}
// set node state as SPLITTING
node.setState(State.SPLITTING);
// Since we have the lock and the master is coordinating the operation
// we are always able to split the region
return true;
} | 3.26 |
hbase_SplitTableRegionProcedure_postRollBackSplitRegion_rdh | /**
* Action after rollback a split table region action.
*
* @param env
* MasterProcedureEnv
*/
private void postRollBackSplitRegion(final MasterProcedureEnv env) throws IOException {final MasterCoprocessorHost
cpHost = env.getMasterCoprocessorHost();
if (cpHost != null) {
cpHost.postRollBackSplitRegionAction(getUser());
}
} | 3.26 |
hbase_SplitTableRegionProcedure_isRollbackSupported_rdh | /* Check whether we are in the state that can be rollback */
@Overrideprotected boolean isRollbackSupported(final SplitTableRegionState state) {
switch (state) {
case SPLIT_TABLE_REGION_POST_OPERATION :
case SPLIT_TABLE_REGION_OPEN_CHILD_REGIONS :
case SPLIT_TABLE_REGION_PRE_OPERATION_AFTER_META :
case SPLIT_TABLE_REGION_UPDATE_META :
// It is not safe to rollback if we reach to these states.
return false;
default :
break;
}
return true;
} | 3.26 |
hbase_SplitTableRegionProcedure_preSplitRegionAfterMETA_rdh | /**
* Pre split region actions after the Point-of-No-Return step
*
* @param env
* MasterProcedureEnv
*/
private void preSplitRegionAfterMETA(final MasterProcedureEnv env) throws IOException, InterruptedException {
final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost();
if (cpHost != null) {
cpHost.preSplitAfterMETAAction(getUser());
}
} | 3.26 |
hbase_SplitTableRegionProcedure_openParentRegion_rdh | /**
* Rollback close parent region
*/
private void openParentRegion(MasterProcedureEnv env) throws IOException {
AssignmentManagerUtil.reopenRegionsForRollback(env, Collections.singletonList(getParentRegion()), getRegionReplication(env), getParentRegionServerName(env));
} | 3.26 |
hbase_SplitTableRegionProcedure_updateMeta_rdh | /**
* Add daughter regions to META
*
* @param env
* MasterProcedureEnv
*/
private void updateMeta(final MasterProcedureEnv env) throws IOException {
env.getAssignmentManager().markRegionAsSplit(getParentRegion(), getParentRegionServerName(env), daughterOneRI, daughterTwoRI);
} | 3.26 |
hbase_SplitTableRegionProcedure_createDaughterRegions_rdh | /**
* Create daughter regions
*/
public void createDaughterRegions(final MasterProcedureEnv env) throws IOException {
final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
final Path tabledir = CommonFSUtils.getTableDir(mfs.getRootDir(), getTableName());
final FileSystem fs = mfs.getFileSystem();
HRegionFileSystem regionFs = HRegionFileSystem.openRegionFromFileSystem(env.getMasterConfiguration(), fs, tabledir, getParentRegion(), false);
regionFs.createSplitsDir(daughterOneRI, daughterTwoRI);
Pair<List<Path>, List<Path>> expectedReferences = splitStoreFiles(env, regionFs);
assertSplitResultFilesCount(fs, expectedReferences.getFirst().size(), regionFs.getSplitsDir(daughterOneRI));
regionFs.commitDaughterRegion(daughterOneRI, expectedReferences.getFirst(), env);
assertSplitResultFilesCount(fs, expectedReferences.getFirst().size(), new Path(tabledir, daughterOneRI.getEncodedName()));
assertSplitResultFilesCount(fs, expectedReferences.getSecond().size(), regionFs.getSplitsDir(daughterTwoRI));
regionFs.commitDaughterRegion(daughterTwoRI, expectedReferences.getSecond(), env);
assertSplitResultFilesCount(fs, expectedReferences.getSecond().size(), new Path(tabledir, daughterTwoRI.getEncodedName()));
} | 3.26 |
hbase_SplitTableRegionProcedure_splitStoreFiles_rdh | /**
* Create Split directory
*
* @param env
* MasterProcedureEnv
*/
private Pair<List<Path>, List<Path>> splitStoreFiles(final MasterProcedureEnv env, final HRegionFileSystem regionFs) throws IOException
{
final Configuration conf = env.getMasterConfiguration();
TableDescriptor htd = env.getMasterServices().getTableDescriptors().get(getTableName());
// The following code sets up a thread pool executor with as many slots as
// there's files to split. It then fires up everything, waits for
// completion and finally checks for any exception
//
// Note: From HBASE-26187, splitStoreFiles now creates daughter region dirs straight under the
// table dir. In case of failure, the proc would go through this again, already existing
// region dirs and split files would just be ignored, new split files should get created.
int nbFiles = 0;
final Map<String, Collection<StoreFileInfo>> files = new HashMap<String, Collection<StoreFileInfo>>(htd.getColumnFamilyCount());
for (ColumnFamilyDescriptor cfd : htd.getColumnFamilies()) { String family = cfd.getNameAsString();
StoreFileTracker tracker = StoreFileTrackerFactory.create(env.getMasterConfiguration(), htd, cfd, regionFs);
Collection<StoreFileInfo> sfis = tracker.load();
if (sfis == null) {
continue;
}
Collection<StoreFileInfo> filteredSfis = null;
for (StoreFileInfo sfi : sfis) {
// Filter. There is a lag cleaning up compacted reference files. They get cleared
// after a delay in case outstanding Scanners still have references. Because of this,
// the listing of the Store content may have straggler reference files. Skip these.
// It should be safe to skip references at this point because we checked above with
// the region if it thinks it is splittable and if we are here, it thinks it is
// splitable.
if (sfi.isReference()) {
LOG.info(("Skipping split of " + sfi) + "; presuming ready for archiving.");
continue;
}if (filteredSfis == null) {
filteredSfis = new ArrayList<StoreFileInfo>(sfis.size());
files.put(family, filteredSfis);
}
filteredSfis.add(sfi);
nbFiles++;
}
}
if (nbFiles == 0) {
// no file needs to be splitted.
return new Pair<>(Collections.emptyList(), Collections.emptyList());
}
// Max #threads is the smaller of the number of storefiles or the default max determined above.
int maxThreads = Math.min(conf.getInt(HConstants.REGION_SPLIT_THREADS_MAX, conf.getInt(HStore.BLOCKING_STOREFILES_KEY, HStore.DEFAULT_BLOCKING_STOREFILE_COUNT)), nbFiles);
LOG.info((((((("pid=" + getProcId()) + " splitting ") + nbFiles) + " storefiles, region=") + getParentRegion().getShortNameToLog()) + ", threads=") + maxThreads);
final ExecutorService threadPool = Executors.newFixedThreadPool(maxThreads, new ThreadFactoryBuilder().setNameFormat("StoreFileSplitter-pool-%d").setDaemon(true).setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build());
final List<Future<Pair<Path, Path>>> futures = new ArrayList<Future<Pair<Path, Path>>>(nbFiles);
// Split each store file.
for (Map.Entry<String, Collection<StoreFileInfo>> e :
files.entrySet()) {
byte[] familyName = Bytes.toBytes(e.getKey());
final ColumnFamilyDescriptor hcd = htd.getColumnFamily(familyName);
final Collection<StoreFileInfo> storeFiles = e.getValue();
if ((storeFiles != null) && (storeFiles.size() > 0)) {
final Configuration storeConfiguration = StoreUtils.createStoreConfiguration(env.getMasterConfiguration(), htd, hcd);
for (StoreFileInfo v45 : storeFiles) {
// As this procedure is running on master, use CacheConfig.DISABLED means
// don't cache any block.
// We also need to pass through a suitable CompoundConfiguration as if this
// is running in a regionserver's Store context, or we might not be able
// to read the hfiles.
v45.setConf(storeConfiguration);
StoreFileSplitter sfs = new StoreFileSplitter(regionFs, familyName, new HStoreFile(v45, hcd.getBloomFilterType(), CacheConfig.DISABLED));
futures.add(threadPool.submit(sfs));
}
}
}// Shutdown the pool
threadPool.shutdown();
// Wait for all the tasks to finish.
// When splits ran on the RegionServer, how-long-to-wait-configuration was named
// hbase.regionserver.fileSplitTimeout. If set, use its value.
long fileSplitTimeout = conf.getLong("hbase.master.fileSplitTimeout", conf.getLong("hbase.regionserver.fileSplitTimeout", 600000));
try {
boolean stillRunning = !threadPool.awaitTermination(fileSplitTimeout, TimeUnit.MILLISECONDS);
if (stillRunning) {
threadPool.shutdownNow();
// wait for the thread to shutdown completely.
while (!threadPool.isTerminated()) {
Thread.sleep(50);
}
throw new IOException("Took too long to split the" + " files and create the references, aborting split");
}
} catch (InterruptedException e) {
throw ((InterruptedIOException) (new InterruptedIOException().initCause(e)));
}
List<Path> daughterA = new ArrayList<>();
List<Path> daughterB = new ArrayList<>();
// Look for any exception
for (Future<Pair<Path, Path>>
future : futures) {
try {
Pair<Path, Path> p = future.get();
if (p.getFirst() != null) {
daughterA.add(p.getFirst());
}
if (p.getSecond()
!= null) {
daughterB.add(p.getSecond());
}
} catch (InterruptedException e) {
throw ((InterruptedIOException) (new InterruptedIOException().initCause(e)));
} catch (ExecutionException e) {
throw new IOException(e);
}
}if (LOG.isDebugEnabled()) {
LOG.debug(((((((("pid=" + getProcId()) + " split storefiles for region ") + getParentRegion().getShortNameToLog()) + " Daughter A: ") + daughterA) + " storefiles, Daughter B: ") + daughterB) + " storefiles.");
}
return new Pair<>(daughterA, daughterB);
} | 3.26 |
hbase_SplitTableRegionProcedure_checkSplittable_rdh | /**
* Check whether the region is splittable
*
* @param env
* MasterProcedureEnv
* @param regionToSplit
* parent Region to be split
*/
private void checkSplittable(final MasterProcedureEnv env, final RegionInfo regionToSplit) throws IOException {
// Ask the remote RS if this region is splittable.
// If we get an IOE, report it along w/ the failure so can see why we are not splittable at
// this time.
if (regionToSplit.getReplicaId() != RegionInfo.DEFAULT_REPLICA_ID) {
throw new IllegalArgumentException("Can't invoke split on non-default regions directly");
}
RegionStateNode node = env.getAssignmentManager().getRegionStates().getRegionStateNode(getParentRegion());
IOException splittableCheckIOE = null;
boolean v9 = false;
if (node != null) {
try {
GetRegionInfoResponse response;
if (!hasBestSplitRow()) {
LOG.info("{} splitKey isn't explicitly specified, will try to find a best split key from RS {}", node.getRegionInfo().getRegionNameAsString(), node.getRegionLocation());
response = AssignmentManagerUtil.getRegionInfoResponse(env, node.getRegionLocation(), node.getRegionInfo(), true);
bestSplitRow = (response.hasBestSplitRow()) ? response.getBestSplitRow().toByteArray() : null;
} else {
response = AssignmentManagerUtil.getRegionInfoResponse(env, node.getRegionLocation(), node.getRegionInfo(), false);
}
v9 = response.hasSplittable() && response.getSplittable();
if (LOG.isDebugEnabled()) {
LOG.debug((("Splittable=" + v9) + " ") + node.toShortString());
}
} catch (IOException e) {
splittableCheckIOE = e;
}
}
if (!v9) {
IOException e = new DoNotRetryIOException(regionToSplit.getShortNameToLog()
+ " NOT splittable");
if (splittableCheckIOE != null) {
e.initCause(splittableCheckIOE);
}
throw e;
}
if (!hasBestSplitRow()) {
throw new DoNotRetryIOException("Region not splittable because bestSplitPoint = null, " + "maybe table is too small for auto split. For force split, try specifying split row");
}
if (Bytes.equals(regionToSplit.getStartKey(), bestSplitRow)) {throw new DoNotRetryIOException("Split row is equal to startkey: " + Bytes.toStringBinary(bestSplitRow));
}
if (!regionToSplit.containsRow(bestSplitRow)) {
throw new DoNotRetryIOException((("Split row is not inside region key range splitKey:" + Bytes.toStringBinary(bestSplitRow)) + " region: ") + regionToSplit);
}
} | 3.26 |
hbase_SplitTableRegionProcedure_getDaughterRegionIdTimestamp_rdh | /**
* Calculate daughter regionid to use.
*
* @param hri
* Parent {@link RegionInfo}
* @return Daughter region id (timestamp) to use.
*/
private static long getDaughterRegionIdTimestamp(final RegionInfo hri) {
long rid = EnvironmentEdgeManager.currentTime();
// Regionid is timestamp. Can't be less than that of parent else will insert
// at wrong location in hbase:meta (See HBASE-710).
if (rid < hri.getRegionId()) {
LOG.warn((("Clock skew; parent regions id is " + hri.getRegionId()) + " but current time here is ") + rid);
rid = hri.getRegionId() + 1;
}
return rid;
} | 3.26 |
hbase_SplitTableRegionProcedure_postSplitRegion_rdh | /**
* Post split region actions
*
* @param env
* MasterProcedureEnv
*/
private void postSplitRegion(final MasterProcedureEnv env)
throws IOException {
final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost();
if (cpHost != null) {
cpHost.postCompletedSplitRegionAction(daughterOneRI, daughterTwoRI, getUser());
}
} | 3.26 |
hbase_SplitTableRegionProcedure_preSplitRegion_rdh | /**
* Action before splitting region in a table.
*
* @param env
* MasterProcedureEnv
*/
private void preSplitRegion(final MasterProcedureEnv env) throws IOException, InterruptedException {
final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost();
if (cpHost != null) {
cpHost.preSplitRegionAction(getTableName(), getSplitRow(), getUser());
}
// TODO: Clean up split and merge. Currently all over the place.
// Notify QuotaManager and RegionNormalizer
try {
env.getMasterServices().getMasterQuotaManager().onRegionSplit(this.getParentRegion());
} catch (QuotaExceededException e) {
// TODO: why is this here? split requests can be submitted by actors other than the normalizer
env.getMasterServices().getRegionNormalizerManager().planSkipped(PlanType.SPLIT);
throw
e;
}
} | 3.26 |
hbase_SplitTableRegionProcedure_m0_rdh | /**
* Post split region actions before the Point-of-No-Return step
*
* @param env
* MasterProcedureEnv
*/
private void m0(final MasterProcedureEnv env) throws IOException, InterruptedException {
final List<Mutation> metaEntries = new ArrayList<Mutation>();
final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost();
if (cpHost != null) {
cpHost.preSplitBeforeMETAAction(getSplitRow(), metaEntries, getUser());try {
for (Mutation p : metaEntries) {
RegionInfo.parseRegionName(p.getRow());
}
} catch (IOException e) {
LOG.error(((("pid=" + getProcId()) + " row key of mutation from coprocessor not parsable as ") + "region name.") + "Mutations from coprocessor should only for hbase:meta table.");
throw e;
}
}
} | 3.26 |
hbase_CatalogReplicaLoadBalanceSelectorFactory_createSelector_rdh | /**
* Create a CatalogReplicaLoadBalanceReplicaSelector based on input config.
*
* @param replicaSelectorClass
* Selector classname.
* @param tableName
* System table name.
* @param conn
* {@link AsyncConnectionImpl}
* @return {@link CatalogReplicaLoadBalanceSelector}
*/
public static CatalogReplicaLoadBalanceSelector createSelector(String replicaSelectorClass, TableName tableName, AsyncConnectionImpl conn, IntSupplier getReplicaCount) {
return ReflectionUtils.instantiateWithCustomCtor(replicaSelectorClass, new Class[]{ TableName.class, AsyncConnectionImpl.class, IntSupplier.class }, new Object[]{ tableName, conn, getReplicaCount });
} | 3.26 |
hbase_ZKNodeTracker_checkIfBaseNodeAvailable_rdh | /**
* Checks if the baseznode set as per the property 'zookeeper.znode.parent' exists.
*
* @return true if baseznode exists. false if doesnot exists.
*/
public boolean checkIfBaseNodeAvailable() {
try {if (ZKUtil.checkExists(watcher, watcher.getZNodePaths().baseZNode) == (-1)) {
return false;
}
} catch (KeeperException e) {
abortable.abort(("Exception while checking if basenode (" + watcher.getZNodePaths().baseZNode) + ") exists in ZooKeeper.", e);
}
return true;
} | 3.26 |
hbase_ZKNodeTracker_postStart_rdh | /**
* Called after start is called. Sub classes could implement this method to load more data on zk.
*/
protected void postStart()
{
} | 3.26 |
hbase_ZKNodeTracker_getData_rdh | /**
* Gets the data of the node.
* <p>
* If the node is currently available, the most up-to-date known version of the data is returned.
* If the node is not currently available, null is returned.
*
* @param refresh
* whether to refresh the data by calling ZK directly.
* @return data of the node, null if unavailable
*/
public synchronized byte[] getData(boolean refresh) {
if (refresh) {
try {
this.data = ZKUtil.getDataAndWatch(watcher, node);
} catch (KeeperException e) {
abortable.abort("Unexpected exception handling getData", e);
}
}
return this.data;
} | 3.26 |
hbase_ZKNodeTracker_blockUntilAvailable_rdh | /**
* Gets the data of the node, blocking until the node is available or the specified timeout has
* elapsed.
*
* @param timeout
* maximum time to wait for the node data to be available, n milliseconds. Pass 0
* for no timeout.
* @return data of the node
* @throws InterruptedException
* if the waiting thread is interrupted
*/
public synchronized byte[] blockUntilAvailable(long timeout, boolean refresh) throws InterruptedException {
if (timeout < 0) {
throw new IllegalArgumentException();
}
boolean notimeout = timeout == 0;
long startTime = EnvironmentEdgeManager.currentTime();
long remaining = timeout;
if (refresh) {
try {
// This does not create a watch if the node does not exists
this.data = ZKUtil.getDataAndWatch(watcher, node);
} catch (KeeperException e) {
// We use to abort here, but in some cases the abort is ignored (
// (empty Abortable), so it's better to log...
LOG.warn("Unexpected exception handling blockUntilAvailable", e);
abortable.abort("Unexpected exception handling blockUntilAvailable", e);
}
}
boolean nodeExistsChecked = (!refresh) || (data != null);
while (((!this.stopped) && (notimeout || (remaining > 0))) && (this.data == null)) {
if (!nodeExistsChecked) {
try {
nodeExistsChecked = ZKUtil.checkExists(watcher,
node) != (-1);
} catch (KeeperException e) {
LOG.warn((("Got exception while trying to check existence in ZooKeeper" + " of the node: ") + node) + ", retrying if timeout not reached", e);}
// It did not exists, and now it does.
if (nodeExistsChecked) {
LOG.debug("Node {} now exists, resetting a watcher", node);
try {
// This does not create a watch if the node does not exists
this.data = ZKUtil.getDataAndWatch(watcher, node);
} catch (KeeperException e) {
LOG.warn("Unexpected exception handling blockUntilAvailable", e);
abortable.abort("Unexpected exception handling blockUntilAvailable", e);
}
}
}
// We expect a notification; but we wait with a
// a timeout to lower the impact of a race condition if any
wait(100);
remaining = timeout - (EnvironmentEdgeManager.currentTime() - startTime);
}
return this.data;
} | 3.26 |
hbase_ZKNodeTracker_start_rdh | /**
* Starts the tracking of the node in ZooKeeper.
* <p/>
* Use {@link #blockUntilAvailable()} to block until the node is available or
* {@link #getData(boolean)} to get the data of the node if it is available.
*/
public synchronized void start() {this.watcher.registerListener(this);
try {
if (ZKUtil.watchAndCheckExists(watcher, node)) {byte[] data = ZKUtil.getDataAndWatch(watcher, node);
if (data != null) {
this.data = data;
} else {
// It existed but now does not, try again to ensure a watch is set
LOG.debug("Try starting again because there is no data from {}", node);
start();
}
}
}
catch (KeeperException e) {
abortable.abort("Unexpected exception during initialization, aborting", e);
}
postStart();
} | 3.26 |
hbase_MobFileCleanupUtil_archiveMobFiles_rdh | /**
* Archives the mob files.
*
* @param conf
* The current configuration.
* @param tableName
* The table name.
* @param family
* The name of the column family.
* @param storeFiles
* The files to be archived.
* @throws IOException
* exception
*/
private static void archiveMobFiles(Configuration conf, TableName tableName, Admin admin, byte[] family, List<Path> storeFiles) throws IOException {
if (storeFiles.size() == 0) {
// nothing to remove
LOG.debug("Skipping archiving old MOB files - no files found for table={} cf={}", tableName, Bytes.toString(family));
return;
}
Path mobTableDir = CommonFSUtils.getTableDir(MobUtils.getMobHome(conf), tableName);
FileSystem fs = storeFiles.get(0).getFileSystem(conf);
for (Path p : storeFiles) {
LOG.debug("MOB Cleaner is archiving: {}", p);
HFileArchiver.archiveStoreFile(conf, fs, MobUtils.getMobRegionInfo(tableName), mobTableDir, family, p);
}
} | 3.26 |
hbase_MobFileCleanupUtil_m0_rdh | /**
* Performs housekeeping file cleaning (called by MOB Cleaner chore)
*
* @param conf
* configuration
* @param table
* table name
* @throws IOException
* exception
*/
public static void m0(Configuration conf, TableName table, Admin admin) throws IOException {
long minAgeToArchive = conf.getLong(MobConstants.MIN_AGE_TO_ARCHIVE_KEY, MobConstants.DEFAULT_MIN_AGE_TO_ARCHIVE);
// We check only those MOB files, which creation time is less
// than maxCreationTimeToArchive. This is a current time - 1h. 1 hour gap
// gives us full confidence that all corresponding store files will
// exist at the time cleaning procedure begins and will be examined.
// So, if MOB file creation time is greater than this maxTimeToArchive,
// this will be skipped and won't be archived.
long maxCreationTimeToArchive = EnvironmentEdgeManager.currentTime() - minAgeToArchive;
TableDescriptor htd = admin.getDescriptor(table);List<ColumnFamilyDescriptor> list = MobUtils.getMobColumnFamilies(htd);
if (list.size() == 0) {
LOG.info("Skipping non-MOB table [{}]", table);
return;
} else {
LOG.info("Only MOB files whose creation time older than {} will be archived, table={}", maxCreationTimeToArchive, table);
}
FileSystem fs = FileSystem.get(conf);
Set<String> v5 = new HashSet<>();
Path rootDir = CommonFSUtils.getRootDir(conf);
Path tableDir = CommonFSUtils.getTableDir(rootDir, table);
List<Path> regionDirs = FSUtils.getRegionDirs(fs, tableDir);
Set<String> allActiveMobFileName = new HashSet<String>();
for (Path regionPath : regionDirs) {v5.add(regionPath.getName());
for (ColumnFamilyDescriptor hcd : list) {
String family = hcd.getNameAsString();
Path storePath = new Path(regionPath, family);
boolean succeed = false;
Set<String> regionMobs = new HashSet<String>();
while (!succeed) {
if (!fs.exists(storePath)) {
String errMsg = String.format("Directory %s was deleted during MOB file cleaner chore" + " execution, aborting MOB file cleaner chore.", storePath);
throw new IOException(errMsg);
}
RemoteIterator<LocatedFileStatus> rit = fs.listLocatedStatus(storePath);
List<Path> storeFiles = new ArrayList<Path>();
// Load list of store files first
while (rit.hasNext()) {
Path p = rit.next().getPath();
if (fs.isFile(p)) {
storeFiles.add(p);
}
}
LOG.info("Found {} store files in: {}", storeFiles.size(), storePath);
Path currentPath = null;
try {
for (Path pp : storeFiles) {
currentPath = pp;
LOG.trace("Store file: {}", pp);
HStoreFile sf = null;
byte[] mobRefData
= null;
byte[] bulkloadMarkerData = null;
try {
sf = new HStoreFile(fs, pp, conf, CacheConfig.DISABLED, BloomType.NONE, true);
sf.initReader();
mobRefData = sf.getMetadataValue(HStoreFile.MOB_FILE_REFS);
bulkloadMarkerData = sf.getMetadataValue(HStoreFile.BULKLOAD_TASK_KEY);
// close store file to avoid memory leaks
sf.closeStoreFile(true);
} catch (IOException ex) {
// When FileBased SFT is active the store dir can contain corrupted or incomplete
// files. So read errors are expected. We just skip these files.
if (ex instanceof FileNotFoundException) {
throw ex;
}
LOG.debug("Failed to get mob data from file: {} due to error.", pp.toString(), ex);
continue;
}
if (mobRefData == null) {
if (bulkloadMarkerData ==
null) {
LOG.warn("Found old store file with no MOB_FILE_REFS: {} - " + "can not proceed until all old files will be MOB-compacted.", pp);
return;
} else {
LOG.debug("Skipping file without MOB references (bulkloaded file):{}", pp);
continue;
}
}
// file may or may not have MOB references, but was created by the distributed
// mob compaction code.
try {
SetMultimap<TableName, String> mobs = MobUtils.deserializeMobFileRefs(mobRefData).build();
LOG.debug("Found {} mob references for store={}", mobs.size(), sf);
LOG.trace("Specific mob references found for store={} : {}", sf, mobs);regionMobs.addAll(mobs.values());
} catch (RuntimeException exception) {
throw new IOException("failure getting mob references for hfile " + sf, exception);
}}
} catch (FileNotFoundException e) {
LOG.warn("Missing file:{} Starting MOB cleaning cycle from the beginning" + " due to error", currentPath, e);
regionMobs.clear();
continue;
}
succeed = true;
}
// Add MOB references for current region/family
allActiveMobFileName.addAll(regionMobs);
}// END column families
}// END regions
// Check if number of MOB files too big (over 1M)
if (allActiveMobFileName.size() > 1000000) {
LOG.warn("Found too many active MOB files: {}, table={}, " + "this may result in high memory pressure.", allActiveMobFileName.size(), table);
}
LOG.debug("Found: {} active mob refs for table={}", allActiveMobFileName.size(), table);
allActiveMobFileName.stream().forEach(LOG::trace);
// Now scan MOB directories and find MOB files with no references to them
for (ColumnFamilyDescriptor hcd : list) {
checkColumnFamilyDescriptor(conf, table, fs, admin, hcd, v5, maxCreationTimeToArchive);
}
} | 3.26 |
hbase_BufferedDataBlockEncoder_copyFromNext_rdh | /**
* Copy the state from the next one into this instance (the previous state placeholder). Used to
* save the previous state when we are advancing the seeker to the next key/value.
*/
protected void copyFromNext(SeekerState nextState) {
if (keyBuffer.length != nextState.keyBuffer.length) {
keyBuffer = nextState.keyBuffer.clone();
} else if (!isValid()) {
// Note: we can only call isValid before we override our state, so this
// comes before all the assignments at the end of this method.
System.arraycopy(nextState.keyBuffer, 0, keyBuffer, 0, nextState.keyLength);
} else {
// don't copy the common prefix between this key and the previous one
System.arraycopy(nextState.keyBuffer, nextState.lastCommonPrefix, keyBuffer, nextState.lastCommonPrefix, nextState.keyLength - nextState.lastCommonPrefix);
}
currentKey.set(nextState.currentKey);
valueOffset = nextState.valueOffset;
keyLength = nextState.keyLength;
valueLength = nextState.valueLength;
lastCommonPrefix = nextState.lastCommonPrefix;
nextKvOffset = nextState.nextKvOffset;
memstoreTS = nextState.memstoreTS;
currentBuffer
= nextState.currentBuffer;
tagsOffset = nextState.tagsOffset;
tagsLength = nextState.tagsLength;
if (nextState.tagCompressionContext != null) {
tagCompressionContext = nextState.tagCompressionContext;
}
} | 3.26 |
hbase_BufferedDataBlockEncoder_afterEncodingKeyValue_rdh | /**
* Returns unencoded size added
*/
protected final int afterEncodingKeyValue(Cell cell, DataOutputStream out, HFileBlockDefaultEncodingContext encodingCtx) throws IOException {
int size = 0;
if (encodingCtx.getHFileContext().isIncludesTags())
{
int tagsLength = cell.getTagsLength();
ByteBufferUtils.putCompressedInt(out, tagsLength);
// There are some tags to be written
if (tagsLength > 0) {
TagCompressionContext tagCompressionContext = encodingCtx.getTagCompressionContext();
// When tag compression is enabled, tagCompressionContext will have a not null value. Write
// the tags using Dictionary compression in such a case
if (tagCompressionContext != null) {
// Not passing tagsLength considering that parsing of the tagsLength is not costly
PrivateCellUtil.compressTags(out, cell, tagCompressionContext);
} else {
PrivateCellUtil.writeTags(out, cell, tagsLength);
}
}
size += tagsLength + KeyValue.TAGS_LENGTH_SIZE;
}
if (encodingCtx.getHFileContext().isIncludesMvcc()) {
// Copy memstore timestamp from the byte buffer to the output stream.
long memstoreTS = cell.getSequenceId();
WritableUtils.writeVLong(out, memstoreTS);
// TODO use a writeVLong which returns the #bytes written so that 2 time parsing can be
// avoided.
size += WritableUtils.getVIntSize(memstoreTS);
}
return size;
} | 3.26 |
hbase_BufferedDataBlockEncoder_findCommonPrefixInRowPart_rdh | // These findCommonPrefix* methods rely on the fact that keyOnlyKv is the "right" cell argument
// and always on-heap
private static int findCommonPrefixInRowPart(Cell
left, KeyValue.KeyOnlyKeyValue right, int rowCommonPrefix) {
if (left instanceof ByteBufferExtendedCell) {
ByteBufferExtendedCell bbLeft = ((ByteBufferExtendedCell) (left));
return ByteBufferUtils.findCommonPrefix(bbLeft.getRowByteBuffer(), bbLeft.getRowPosition() + rowCommonPrefix, left.getRowLength() - rowCommonPrefix, right.getRowArray(), right.getRowOffset() + rowCommonPrefix, right.getRowLength() - rowCommonPrefix);
} else {
return Bytes.findCommonPrefix(left.getRowArray(),
right.getRowArray(), left.getRowLength() - rowCommonPrefix, right.getRowLength() - rowCommonPrefix, left.getRowOffset() + rowCommonPrefix, right.getRowOffset() + rowCommonPrefix);
}
} | 3.26 |
hbase_BufferedDataBlockEncoder_compareCommonRowPrefix_rdh | /**
* ******************* common prefixes ************************
*/
// Having this as static is fine but if META is having DBE then we should
// change this.
public static int compareCommonRowPrefix(Cell left, Cell right, int rowCommonPrefix) {
if (left instanceof ByteBufferExtendedCell) {
ByteBufferExtendedCell bbLeft = ((ByteBufferExtendedCell) (left));
if (right instanceof ByteBufferExtendedCell) {
ByteBufferExtendedCell bbRight = ((ByteBufferExtendedCell) (right));
return ByteBufferUtils.compareTo(bbLeft.getRowByteBuffer(), bbLeft.getRowPosition() + rowCommonPrefix, left.getRowLength() - rowCommonPrefix, bbRight.getRowByteBuffer(), bbRight.getRowPosition() + rowCommonPrefix, right.getRowLength() - rowCommonPrefix);
} else {
return ByteBufferUtils.compareTo(bbLeft.getRowByteBuffer(), bbLeft.getRowPosition() + rowCommonPrefix, left.getRowLength() - rowCommonPrefix, right.getRowArray(), right.getRowOffset() + rowCommonPrefix, right.getRowLength() - rowCommonPrefix);
}
} else if (right instanceof ByteBufferExtendedCell) {ByteBufferExtendedCell bbRight = ((ByteBufferExtendedCell) (right));return ByteBufferUtils.compareTo(left.getRowArray(), left.getRowOffset() + rowCommonPrefix, left.getRowLength() - rowCommonPrefix, bbRight.getRowByteBuffer(), bbRight.getRowPosition() + rowCommonPrefix, right.getRowLength() - rowCommonPrefix);
} else {
return Bytes.compareTo(left.getRowArray(), left.getRowOffset() + rowCommonPrefix, left.getRowLength() - rowCommonPrefix, right.getRowArray(), right.getRowOffset() + rowCommonPrefix, right.getRowLength() - rowCommonPrefix);
}
} | 3.26 |
hbase_MultiTableHFileOutputFormat_createCompositeKey_rdh | /**
* Alternate api which accepts a String for the tableName and ImmutableBytesWritable for the
* suffix
*
* @see MultiTableHFileOutputFormat#createCompositeKey(byte[], byte[])
*/
public static byte[] createCompositeKey(String tableName, ImmutableBytesWritable suffix) {
return combineTableNameSuffix(tableName.getBytes(Charset.forName("UTF-8")), suffix.get());
} | 3.26 |
hbase_MultiTableHFileOutputFormat_configureIncrementalLoad_rdh | /**
* Analogous to
* {@link HFileOutputFormat2#configureIncrementalLoad(Job, TableDescriptor, RegionLocator)}, this
* function will configure the requisite number of reducers to write HFiles for multple tables
* simultaneously
*
* @param job
* See {@link org.apache.hadoop.mapreduce.Job}
* @param multiTableDescriptors
* Table descriptor and region locator pairs
*/
public static void configureIncrementalLoad(Job job, List<TableInfo> multiTableDescriptors) throws IOException {
MultiTableHFileOutputFormat.configureIncrementalLoad(job, multiTableDescriptors, MultiTableHFileOutputFormat.class);
} | 3.26 |
hbase_ThrottledInputStream_toString_rdh | /**
* {@inheritDoc }
*/
@Override
public String toString() {
return (((((((("ThrottledInputStream{" + "bytesRead=") + bytesRead) + ", maxBytesPerSec=") + maxBytesPerSec) + ", bytesPerSec=") + getBytesPerSec()) + ", totalSleepTime=") + totalSleepTime) + '}';
} | 3.26 |
hbase_ThrottledInputStream_read_rdh | /**
* Read bytes starting from the specified position. This requires rawStream is an instance of
* {@link PositionedReadable}.
*
* @return the number of bytes read
*/
public int read(long position, byte[] buffer, int offset, int length) throws IOException {
if (!(f0 instanceof PositionedReadable)) {
throw
new UnsupportedOperationException("positioned read is not supported by the internal stream");
}
throttle();
int v3 = ((PositionedReadable) (f0)).read(position, buffer, offset, length);
if (v3 != (-1)) {
bytesRead += v3;
}
return v3;
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.