name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hbase_Procedure_updateTimestamp_rdh | /**
* Called by ProcedureExecutor after each time a procedure step is executed.
*/protected void updateTimestamp() {
this.lastUpdate = EnvironmentEdgeManager.currentTime();
} | 3.26 |
hbase_Procedure_setChildrenLatch_rdh | /**
* Called by the ProcedureExecutor on procedure-load to restore the latch state
*/
protected synchronized void setChildrenLatch(int numChildren) {
this.childrenLatch = numChildren;if (LOG.isTraceEnabled()) { LOG.trace("CHILD LATCH INCREMENT SET " + this.childrenLatch, new Throwable(this.toString()));
}
} | 3.26 |
hbase_Procedure_setNonceKey_rdh | /**
* Called by the ProcedureExecutor to set the value to the newly created procedure.
*/
protected void setNonceKey(NonceKey nonceKey) {
this.nonceKey = nonceKey;
} | 3.26 |
hbase_Procedure_getTimeoutTimestamp_rdh | /**
* Timeout of the next timeout. Called by the ProcedureExecutor if the procedure has timeout set
* and the procedure is in the waiting queue.
*
* @return the timestamp of the next timeout.
*/
protected long getTimeoutTimestamp() {
return getLastUpdate() + getTimeout();} | 3.26 |
hbase_Procedure_afterReplay_rdh | /**
* Called when the procedure is ready to be added to the queue after the loading/replay operation.
*/
protected void afterReplay(TEnvironment env) {
// no-op
} | 3.26 |
hbase_Procedure_isFailed_rdh | /**
* Returns true if the procedure has failed. It may or may not have rolled back.
*/
public synchronized boolean isFailed() {
return (state == ProcedureState.FAILED) || (state == ProcedureState.ROLLEDBACK);
} | 3.26 |
hbase_Procedure_getProcIdHashCode_rdh | // ==========================================================================
// misc utils
// ==========================================================================
/**
* Get an hashcode for the specified Procedure ID
*
* @return the hashcode for the specified procId
*/
public static long getProcIdHashCode(long procId) {
long h = procId;
h ^=
h >> 16;
h *= 0x85ebca6b;
h ^= h >> 13;
h *= 0xc2b2ae35;
h ^= h >> 16;
return h;
} | 3.26 |
hbase_Procedure_childrenCountDown_rdh | /**
* Called by the ProcedureExecutor to notify that one of the sub-procedures has completed.
*/
private synchronized boolean childrenCountDown() {
assert childrenLatch > 0 : this;
boolean b = (--childrenLatch) == 0;
if (LOG.isTraceEnabled()) {
LOG.trace("CHILD LATCH DECREMENT " + childrenLatch, new Throwable(this.toString()));
}
return b;
} | 3.26 |
hbase_Procedure_waitInitialized_rdh | /**
* The {@link #doAcquireLock(Object, ProcedureStore)} will be split into two steps, first, it will
* call us to determine whether we need to wait for initialization, second, it will call
* {@link #acquireLock(Object)} to actually handle the lock for this procedure.
* <p/>
* This is because that when master restarts, we need to restore the lock state for all the
* procedures to not break the semantic if {@link #holdLock(Object)} is true. But the
* {@link ProcedureExecutor} will be started before the master finish initialization(as it is part
* of the initialization!), so we need to split the code into two steps, and when restore, we just
* restore the lock part and ignore the waitInitialized part. Otherwise there will be dead lock.
*
* @return true means we need to wait until the environment has been initialized, otherwise true.
*/
protected boolean waitInitialized(TEnvironment env) {
return
false;
} | 3.26 |
hbase_Procedure_haveSameParent_rdh | /**
*
* @param a
* the first procedure to be compared.
* @param b
* the second procedure to be compared.
* @return true if the two procedures have the same parent
*/
public static boolean haveSameParent(Procedure<?> a, Procedure<?> b) {
return (a.hasParent() && b.hasParent()) && (a.getParentProcId() == b.getParentProcId());
} | 3.26 |
hbase_Procedure_toStringClassDetails_rdh | /**
* Extend the toString() information with the procedure details e.g. className and parameters
*
* @param builder
* the string builder to use to append the proc specific information
*/
protected void toStringClassDetails(StringBuilder builder) {
builder.append(getClass().getName());} | 3.26 |
hbase_Procedure_updateMetricsOnFinish_rdh | /**
* This function will be called just after procedure execution is finished. Override this method
* to update metrics at the end of the procedure. If {@link #getProcedureMetrics(Object)} returns
* non-null {@link ProcedureMetrics}, the default implementation adds runtime of a procedure to a
* time histogram for successfully completed procedures. Increments failed counter for failed
* procedures.
* <p/>
* TODO: As any of the sub-procedures on failure rolls back all procedures in the stack, including
* successfully finished siblings, this function may get called twice in certain cases for certain
* procedures. Explore further if this can be called once.
*
* @param env
* The environment passed to the procedure executor
* @param runtime
* Runtime of the procedure in milliseconds
* @param success
* true if procedure is completed successfully
*/
protected void updateMetricsOnFinish(TEnvironment env, long runtime, boolean success) {
ProcedureMetrics metrics = getProcedureMetrics(env);
if (metrics == null) {
return;
}
if
(success) {
Histogram timeHisto
= metrics.getTimeHisto();
if (timeHisto != null) {
timeHisto.update(runtime);
}
} else {
Counter failedCounter = metrics.getFailedCounter();
if (failedCounter != null) {
failedCounter.increment();
}
}
} | 3.26 |
hbase_Procedure_setLastUpdate_rdh | /**
* Called on store load to initialize the Procedure internals after the creation/deserialization.
*/
protected void setLastUpdate(long lastUpdate) {
this.lastUpdate = lastUpdate;
} | 3.26 |
hbase_Procedure_isSuccess_rdh | /**
* Returns true if the procedure is finished successfully.
*/
public synchronized boolean isSuccess() {return (state == ProcedureState.SUCCESS) && (!hasException());
} | 3.26 |
hbase_Procedure_completionCleanup_rdh | /**
* Called when the procedure is marked as completed (success or rollback). The procedure
* implementor may use this method to cleanup in-memory states. This operation will not be retried
* on failure. If a procedure took a lock, it will have been released when this method runs.
*/
protected void completionCleanup(TEnvironment env) {
// no-op
} | 3.26 |
hbase_Procedure_getRootProcedureId_rdh | /**
* Helper to lookup the root Procedure ID given a specified procedure.
*/
protected static <T> Long getRootProcedureId(Map<Long, Procedure<T>> procedures, Procedure<T> proc) {
while (proc.hasParent()) {
proc = procedures.get(proc.getParentProcId());
if (proc == null) {
return null;
}
}
return proc.getProcId();} | 3.26 |
hbase_Procedure_lockedWhenLoading_rdh | /**
* Will only be called when loading procedures from procedure store, where we need to record
* whether the procedure has already held a lock. Later we will call {@link #restoreLock(Object)}
* to actually acquire the lock.
*/
final void lockedWhenLoading() {
this.lockedWhenLoading = true;
} | 3.26 |
hbase_Procedure_doRollback_rdh | /**
* Internal method called by the ProcedureExecutor that starts the user-level code rollback().
*/
protected void doRollback(TEnvironment env) throws IOException, InterruptedException {
try {
updateTimestamp();
if (bypass) {
LOG.info("{} bypassed, skipping rollback", this);
return;
}
rollback(env);
} finally {
updateTimestamp();
}} | 3.26 |
hbase_Procedure_isLockedWhenLoading_rdh | /**
* Can only be called when restarting, before the procedure actually being executed, as after we
* actually call the {@link #doAcquireLock(Object, ProcedureStore)} method, we will reset
* {@link #lockedWhenLoading} to false.
* <p/>
* Now it is only used in the ProcedureScheduler to determine whether we should put a Procedure in
* front of a queue.
*/
public boolean isLockedWhenLoading() {
return lockedWhenLoading;
} | 3.26 |
hbase_Procedure_incChildrenLatch_rdh | /**
* Called by the ProcedureExecutor on procedure-load to restore the latch state
*/
protected synchronized void incChildrenLatch() {
// TODO: can this be inferred from the stack? I think so...
this.childrenLatch++;
if (LOG.isTraceEnabled()) {
LOG.trace("CHILD LATCH INCREMENT " + this.childrenLatch, new Throwable(this.toString()));
}
} | 3.26 |
hbase_Procedure_isWaiting_rdh | /**
* Returns true if the procedure is waiting for a child to finish or for an external event.
*/
public synchronized boolean isWaiting() {
switch (state) {
case WAITING :
case WAITING_TIMEOUT :
return true;
default :
break;
}
return false;
} | 3.26 |
hbase_Procedure_acquireLock_rdh | /**
* The user should override this method if they need a lock on an Entity. A lock can be anything,
* and it is up to the implementor. The Procedure Framework will call this method just before it
* invokes {@link #execute(Object)}. It calls {@link #releaseLock(Object)} after the call to
* execute.
* <p/>
* If you need to hold the lock for the life of the Procedure -- i.e. you do not want any other
* Procedure interfering while this Procedure is running, see {@link #holdLock(Object)}.
* <p/>
* Example: in our Master we can execute request in parallel for different tables. We can create
* t1 and create t2 and these creates can be executed at the same time. Anything else on t1/t2 is
* queued waiting that specific table create to happen.
* <p/>
* There are 3 LockState:
* <ul>
* <li>LOCK_ACQUIRED should be returned when the proc has the lock and the proc is ready to
* execute.</li>
* <li>LOCK_YIELD_WAIT should be returned when the proc has not the lock and the framework should
* take care of readding the procedure back to the runnable set for retry</li>
* <li>LOCK_EVENT_WAIT should be returned when the proc has not the lock and someone will take
* care of readding the procedure back to the runnable set when the lock is available.</li>
* </ul>
*
* @return the lock state as described above.
*/
protected LockState acquireLock(TEnvironment env) {return LockState.LOCK_ACQUIRED;
} | 3.26 |
hbase_Procedure_getProcedureMetrics_rdh | /**
* Override this method to provide procedure specific counters for submitted count, failed count
* and time histogram.
*
* @param env
* The environment passed to the procedure executor
* @return Container object for procedure related metric
*/
protected ProcedureMetrics getProcedureMetrics(TEnvironment env) {
return null;
} | 3.26 |
hbase_Procedure_beforeReplay_rdh | /**
* Called when the procedure is loaded for replay. The procedure implementor may use this method
* to perform some quick operation before replay. e.g. failing the procedure if the state on
* replay may be unknown.
*/
protected void beforeReplay(TEnvironment env) {
// no-op
} | 3.26 |
hbase_Procedure_doAcquireLock_rdh | /**
* Internal method called by the ProcedureExecutor that starts the user-level code acquireLock().
*/
final LockState doAcquireLock(TEnvironment env, ProcedureStore store) {
if (waitInitialized(env)) {
return LockState.LOCK_EVENT_WAIT;
}
if (lockedWhenLoading) {
// reset it so we will not consider it anymore
lockedWhenLoading =
false;
locked = true;
// Here we return without persist the locked state, as lockedWhenLoading is true means
// that the locked field of the procedure stored in procedure store is true, so we do not need
// to store it again.
return LockState.LOCK_ACQUIRED;
}
LockState state = acquireLock(env);
if (state == LockState.LOCK_ACQUIRED) {
locked = true;
// persist that we have held the lock. This must be done before we actually execute the
// procedure, otherwise when restarting, we may consider the procedure does not have a lock,
// but it may have already done some changes as we have already executed it, and if another
// procedure gets the lock, then the semantic will be broken if the holdLock is true, as we do
// not expect that another procedure can be executed in the middle.
store.update(this);
}
return state;
} | 3.26 |
hbase_Procedure_holdLock_rdh | /**
* Used to keep the procedure lock even when the procedure is yielding or suspended.
*
* @return true if the procedure should hold on the lock until completionCleanup()
*/
protected boolean holdLock(TEnvironment env) {
return false;
} | 3.26 |
hbase_Procedure_getProcId_rdh | // ==========================================================================
// Those fields are unchanged after initialization.
//
// Each procedure will get created from the user or during
// ProcedureExecutor.start() during the load() phase and then submitted
// to the executor. these fields will never be changed after initialization
// ==========================================================================
public long getProcId() {
return procId;
} | 3.26 |
hbase_Procedure_toStringState_rdh | /**
* Called from {@link #toString()} when interpolating {@link Procedure} State. Allows decorating
* generic Procedure State with Procedure particulars.
*
* @param builder
* Append current {@link ProcedureState}
*/
protected void toStringState(StringBuilder builder)
{
builder.append(getState());
} | 3.26 |
hbase_Procedure_setParentProcId_rdh | /**
* Called by the ProcedureExecutor to assign the parent to the newly created procedure.
*/
protected void setParentProcId(long parentProcId) {
this.parentProcId = parentProcId;
} | 3.26 |
hbase_Procedure_setResult_rdh | /**
* The procedure may leave a "result" on completion.
*
* @param result
* the serialized result that will be passed to the client
*/
protected void setResult(byte[] result) {
this.result = result;
} | 3.26 |
hbase_Procedure_updateMetricsOnSubmit_rdh | /**
* This function will be called just when procedure is submitted for execution. Override this
* method to update the metrics at the beginning of the procedure. The default implementation
* updates submitted counter if {@link #getProcedureMetrics(Object)} returns non-null
* {@link ProcedureMetrics}.
*/
protected void updateMetricsOnSubmit(TEnvironment env) {
ProcedureMetrics metrics = getProcedureMetrics(env);
if (metrics == null) {
return;
}
Counter submittedCounter = metrics.getSubmittedCounter();
if (submittedCounter != null) {
submittedCounter.increment();
}
} | 3.26 |
hbase_SnappyCodec_isLoaded_rdh | /**
* Return true if the native shared libraries were loaded; false otherwise.
*/
public static boolean isLoaded() {
return loaded;
} | 3.26 |
hbase_SnappyCodec_getBufferSize_rdh | // Package private
static int getBufferSize(Configuration conf) {
return conf.getInt(SNAPPY_BUFFER_SIZE_KEY, conf.getInt(CommonConfigurationKeys.IO_COMPRESSION_CODEC_SNAPPY_BUFFERSIZE_KEY, CommonConfigurationKeys.IO_COMPRESSION_CODEC_SNAPPY_BUFFERSIZE_DEFAULT));
} | 3.26 |
hbase_DisableTableProcedure_holdLock_rdh | // For disabling a table, we does not care whether a region can be online so hold the table xlock
// for ever. This will simplify the logic as we will not be conflict with procedures other than
// SCP.
@Overrideprotected boolean holdLock(MasterProcedureEnv env) {
return true;
} | 3.26 |
hbase_DisableTableProcedure_prepareDisable_rdh | /**
* Action before any real action of disabling table. Set the exception in the procedure instead of
* throwing it. This approach is to deal with backward compatible with 1.0.
*
* @param env
* MasterProcedureEnv
*/
private boolean prepareDisable(final MasterProcedureEnv env) throws IOException {
boolean canTableBeDisabled = true;
if (tableName.equals(TableName.META_TABLE_NAME)) {
setFailure("master-disable-table", new ConstraintException("Cannot disable " + this.tableName));
canTableBeDisabled = false;
} else if (!env.getMasterServices().getTableDescriptors().exists(tableName)) {
setFailure("master-disable-table", new TableNotFoundException(tableName));
canTableBeDisabled = false;
} else if (!skipTableStateCheck) {
// There could be multiple client requests trying to disable or enable
// the table at the same time. Ensure only the first request is honored
// After that, no other requests can be accepted until the table reaches
// DISABLED or ENABLED.
//
// Note: in 1.0 release, we called TableStateManager.setTableStateIfInStates() to set
// the state to DISABLING from ENABLED. The implementation was done before table lock
// was implemented. With table lock, there is no need to set the state here (it will
// set the state later on). A quick state check should be enough for us to move forward.
TableStateManager tsm = env.getMasterServices().getTableStateManager();
TableState ts = tsm.getTableState(tableName);
if (!ts.isEnabled()) {
LOG.info("Not ENABLED, state={}, skipping disable; {}", ts.getState(), this);
setFailure("master-disable-table", new TableNotEnabledException(ts.toString()));
canTableBeDisabled = false;
}
}
// We are done the check. Future actions in this procedure could be done asynchronously.
releaseSyncLatch();
return canTableBeDisabled;
} | 3.26 |
hbase_DisableTableProcedure_postDisable_rdh | /**
* Action after disabling table.
*
* @param env
* MasterProcedureEnv
* @param state
* the procedure state
*/
protected void postDisable(final MasterProcedureEnv env, final DisableTableState state) throws IOException, InterruptedException {
runCoprocessorAction(env, state);
} | 3.26 |
hbase_DisableTableProcedure_runCoprocessorAction_rdh | /**
* Coprocessor Action.
*
* @param env
* MasterProcedureEnv
* @param state
* the procedure state
*/
private void runCoprocessorAction(final MasterProcedureEnv env, final DisableTableState state) throws IOException, InterruptedException {
final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost();
if (cpHost != null) {
switch (state) {
case DISABLE_TABLE_PRE_OPERATION
:
cpHost.preDisableTableAction(tableName, getUser());
break;
case DISABLE_TABLE_POST_OPERATION :
cpHost.postCompletedDisableTableAction(tableName, getUser());
break;
default :
throw new UnsupportedOperationException((this + " unhandled state=") + state);
}
}
} | 3.26 |
hbase_DisableTableProcedure_preDisable_rdh | /**
* Action before disabling table.
*
* @param env
* MasterProcedureEnv
* @param state
* the procedure state
*/
protected void preDisable(final MasterProcedureEnv env, final DisableTableState state) throws IOException, InterruptedException {
runCoprocessorAction(env, state);
} | 3.26 |
hbase_DisableTableProcedure_setTableStateToDisabled_rdh | /**
* Mark table state to Disabled
*
* @param env
* MasterProcedureEnv
*/
protected static void setTableStateToDisabled(final MasterProcedureEnv env, final TableName tableName) throws IOException {
// Flip the table to disabled
env.getMasterServices().getTableStateManager().setTableState(tableName, State.DISABLED);
LOG.info("Set {} to state={}", tableName, State.DISABLED);
} | 3.26 |
hbase_DisableTableProcedure_setTableStateToDisabling_rdh | /**
* Mark table state to Disabling
*
* @param env
* MasterProcedureEnv
*/
private static void setTableStateToDisabling(final
MasterProcedureEnv env, final TableName tableName) throws IOException {
// Set table disabling flag up in zk.
env.getMasterServices().getTableStateManager().setTableState(tableName, State.DISABLING);
LOG.info("Set {} to state={}", tableName, State.DISABLING); } | 3.26 |
hbase_ChoreService_printChoreDetails_rdh | /**
* Prints a summary of important details about the chore. Used for debugging purposes
*/
private void printChoreDetails(final String header, ScheduledChore chore) {
if (!LOG.isTraceEnabled()) {
return;
}
LinkedHashMap<String, String> output = new LinkedHashMap<>();
output.put(header, "");
output.put("Chore name: ", chore.getName());
output.put("Chore period: ", Integer.toString(chore.getPeriod()));
output.put("Chore timeBetweenRuns: ", Long.toString(chore.getTimeBetweenRuns()));
for (Entry<String, String> entry : output.entrySet()) {
LOG.trace(entry.getKey() + entry.getValue());
}
} | 3.26 |
hbase_ChoreService_requestCorePoolIncrease_rdh | /**
* Represents a request to increase the number of core pool threads. Typically a request
* originates from the fact that the current core pool size is not sufficient to service all of
* the currently running Chores
*
* @return true when the request to increase the core pool size succeeds
*/
private synchronized boolean requestCorePoolIncrease() {
// There is no point in creating more threads than scheduledChores.size since scheduled runs
// of the same chore cannot run concurrently (i.e. happen-before behavior is enforced
// amongst occurrences of the same chore).
if (scheduler.getCorePoolSize() < scheduledChores.size()) {scheduler.setCorePoolSize(scheduler.getCorePoolSize() + 1);
printChoreServiceDetails("requestCorePoolIncrease");
return true;
}
return false;
} | 3.26 |
hbase_ChoreService_cancelChore_rdh | /**
* Cancel any ongoing schedules that this chore has with the implementer of this interface.
* <p/>
* Call {@link ScheduledChore#cancel(boolean)} to cancel a {@link ScheduledChore}, in
* {@link ScheduledChore#cancel(boolean)} method we will call this method to remove the
* {@link ScheduledChore} from this {@link ChoreService}.
*/
@RestrictedApi(explanation = "Should only be called in ScheduledChore", link = "", allowedOnPath = ".*/org/apache/hadoop/hbase/(ScheduledChore|ChoreService).java")
synchronized void cancelChore(ScheduledChore chore,
boolean mayInterruptIfRunning)
{
if (scheduledChores.containsKey(chore)) {
ScheduledFuture<?> future = scheduledChores.get(chore);
future.cancel(mayInterruptIfRunning);
scheduledChores.remove(chore);
// Removing a chore that was missing its start time means it may be possible
// to reduce the number of threads
if (choresMissingStartTime.containsKey(chore)) {
choresMissingStartTime.remove(chore);
requestCorePoolDecrease();
}
}
} | 3.26 |
hbase_ChoreService_printChoreServiceDetails_rdh | /**
* Prints a summary of important details about the service. Used for debugging purposes
*/
private void printChoreServiceDetails(final String header) {
if (!LOG.isTraceEnabled()) {
return;
}
LinkedHashMap<String, String> output = new
LinkedHashMap<>();
output.put(header, "");
output.put("ChoreService corePoolSize: ", Integer.toString(getCorePoolSize()));
output.put("ChoreService scheduledChores: ", Integer.toString(getNumberOfScheduledChores()));
output.put("ChoreService missingStartTimeCount: ", Integer.toString(getNumberOfChoresMissingStartTime()));
for (Entry<String, String> entry : output.entrySet()) {
LOG.trace(entry.getKey() + entry.getValue());}
} | 3.26 |
hbase_ChoreService_requestCorePoolDecrease_rdh | /**
* Represents a request to decrease the number of core pool threads. Typically a request
* originates from the fact that the current core pool size is more than sufficient to service the
* running Chores.
*/
private synchronized void requestCorePoolDecrease() {
if (scheduler.getCorePoolSize()
> MIN_CORE_POOL_SIZE) {
scheduler.setCorePoolSize(scheduler.getCorePoolSize() - 1);
printChoreServiceDetails("requestCorePoolDecrease");
}
} | 3.26 |
hbase_ChoreService_getNumberOfScheduledChores_rdh | /**
* Returns number of chores that this service currently has scheduled
*/
int getNumberOfScheduledChores() {
return scheduledChores.size();
} | 3.26 |
hbase_ChoreService_isChoreScheduled_rdh | /**
* Returns true when the chore is scheduled with the implementer of this interface
*/
@InterfaceAudience.Private
public synchronized boolean isChoreScheduled(ScheduledChore chore) {
return ((chore != null) &&
scheduledChores.containsKey(chore)) && (!scheduledChores.get(chore).isDone());
} | 3.26 |
hbase_ChoreService_onChoreMissedStartTime_rdh | /**
* A callback that tells the implementer of this interface that one of the scheduled chores is
* missing its start time. The implication of a chore missing its start time is that the service's
* current means of scheduling may not be sufficient to handle the number of ongoing chores (the
* other explanation is that the chore's execution time is greater than its scheduled period). The
* service should try to increase its concurrency when this callback is received.
*
* @param chore
* The chore that missed its start time
*/
@RestrictedApi(explanation = "Should only be called in ScheduledChore", link = "", allowedOnPath = ".*/org/apache/hadoop/hbase/ScheduledChore.java")
synchronized void onChoreMissedStartTime(ScheduledChore chore) {
if (!scheduledChores.containsKey(chore)) {
return;
}
// If the chore has not caused an increase in the size of the core thread pool then request an
// increase. This allows each chore missing its start time to increase the core pool size by
// at most 1.
if ((!choresMissingStartTime.containsKey(chore)) || (!choresMissingStartTime.get(chore))) {
choresMissingStartTime.put(chore, requestCorePoolIncrease());
}
// Must reschedule the chore to prevent unnecessary delays of chores in the scheduler. If
// the chore is NOT rescheduled, future executions of this chore will be delayed more and
// more on each iteration. This hurts us because the ScheduledThreadPoolExecutor allocates
// idle threads to chores based on how delayed they are.
rescheduleChore(chore, false);
printChoreDetails("onChoreMissedStartTime", chore);
} | 3.26 |
hbase_ChoreService_isTerminated_rdh | /**
* Returns true when the service is shutdown and all threads have terminated
*/
public boolean isTerminated() {return scheduler.isTerminated();
} | 3.26 |
hbase_ChoreService_getNumberOfChoresMissingStartTime_rdh | /**
* Return number of chores that this service currently has scheduled that are missing their
* scheduled start time
*/
int getNumberOfChoresMissingStartTime() {
return
choresMissingStartTime.size();
} | 3.26 |
hbase_ChoreService_shutdown_rdh | /**
* Shut down the service. Any chores that are scheduled for execution will be cancelled. Any
* chores in the middle of execution will be interrupted and shutdown. This service will be
* unusable after this method has been called (i.e. future scheduling attempts will fail).
* <p/>
* Notice that, this will only clean the chore from this ChoreService but you could still schedule
* the chore with other ChoreService.
*/
public synchronized void shutdown() {
if (isShutdown()) {
return;
}
scheduler.shutdownNow();
LOG.info("Chore service for: {} had {} on shutdown", coreThreadPoolPrefix, scheduledChores.keySet());
cancelAllChores(true);
scheduledChores.clear();
choresMissingStartTime.clear();
} | 3.26 |
hbase_ChoreService_getCorePoolSize_rdh | /**
* Returns number of threads in the core pool of the underlying ScheduledThreadPoolExecutor
*/
int getCorePoolSize() { return scheduler.getCorePoolSize();
} | 3.26 |
hbase_ChoreService_isShutdown_rdh | /**
* Returns true when the service is shutdown and thus cannot be used anymore
*/
public boolean isShutdown() {
return scheduler.isShutdown();
} | 3.26 |
hbase_ChoreService_rescheduleChore_rdh | /**
*
* @param chore
* The Chore to be rescheduled. If the chore is not scheduled with this ChoreService
* yet then this call is equivalent to a call to scheduleChore.
*/
private void rescheduleChore(ScheduledChore chore, boolean immediately) {
if (scheduledChores.containsKey(chore)) {
ScheduledFuture<?> future = scheduledChores.get(chore);
future.cancel(false);
}
// set initial delay to 0 as we want to run it immediately
ScheduledFuture<?> future = scheduler.scheduleAtFixedRate(chore, immediately ? 0 : chore.getPeriod(), chore.getPeriod(), chore.getTimeUnit());
scheduledChores.put(chore, future);
} | 3.26 |
hbase_FastPathBalancedQueueRpcExecutor_popReadyHandler_rdh | /**
* Returns Pop a Handler instance if one available ready-to-go or else return null.
*/
private FastPathRpcHandler popReadyHandler() {
return this.fastPathHandlerStack.poll();
} | 3.26 |
hbase_BackupManager_close_rdh | /**
* Stop all the work of backup.
*/
@Override
public void close() {
if (systemTable != null) { try {
systemTable.close();
} catch (Exception e) {
LOG.error(e.toString(), e);
}
}
} | 3.26 |
hbase_BackupManager_getBackupHistory_rdh | /**
* Get all completed backup information (in desc order by time)
*
* @return history info of BackupCompleteData
* @throws IOException
* exception
*/
public List<BackupInfo> getBackupHistory() throws IOException {
return systemTable.getBackupHistory();} | 3.26 |
hbase_BackupManager_readBackupStartCode_rdh | /**
* Read the last backup start code (timestamp) of last successful backup. Will return null if
* there is no startcode stored in backup system table or the value is of length 0. These two
* cases indicate there is no successful backup completed so far.
*
* @return the timestamp of a last successful backup
* @throws IOException
* exception
*/
public String readBackupStartCode() throws IOException {
return systemTable.readBackupStartCode(backupInfo.getBackupRootDir());
} | 3.26 |
hbase_BackupManager_readRegionServerLastLogRollResult_rdh | /**
* Get the RS log information after the last log roll from backup system table.
*
* @return RS log info
* @throws IOException
* exception
*/
public HashMap<String, Long> readRegionServerLastLogRollResult() throws IOException {
return systemTable.readRegionServerLastLogRollResult(backupInfo.getBackupRootDir());
} | 3.26 |
hbase_BackupManager_writeBackupStartCode_rdh | /**
* Write the start code (timestamp) to backup system table. If passed in null, then write 0 byte.
*
* @param startCode
* start code
* @throws IOException
* exception
*/
public void writeBackupStartCode(Long startCode) throws IOException {
systemTable.writeBackupStartCode(startCode, backupInfo.getBackupRootDir());
} | 3.26 |
hbase_BackupManager_initialize_rdh | /**
* Start the backup manager service.
*
* @throws IOException
* exception
*/
public void initialize() throws IOException {
String ongoingBackupId = this.getOngoingBackupId();
if (ongoingBackupId != null) {
LOG.info("There is a ongoing backup {}" + ". Can not launch new backup until no ongoing backup remains.", ongoingBackupId);
throw new BackupException("There is ongoing backup seesion.");
}
} | 3.26 |
hbase_BackupManager_getBackupInfo_rdh | /**
* Returns backup info
*/
protected BackupInfo getBackupInfo() {
return backupInfo;
} | 3.26 |
hbase_BackupManager_m2_rdh | /**
* Adds set of tables to overall incremental backup table set
*
* @param tables
* tables
* @throws IOException
* exception
*/
public void m2(Set<TableName> tables) throws IOException {
systemTable.addIncrementalBackupTableSet(tables, backupInfo.getBackupRootDir());
} | 3.26 |
hbase_BackupManager_finishBackupSession_rdh | /**
* Finishes active backup session
*
* @throws IOException
* if no active session
*/
public void finishBackupSession() throws IOException {
systemTable.finishBackupExclusiveOperation();} | 3.26 |
hbase_BackupManager_m0_rdh | /**
* Get the direct ancestors of this backup for one table involved.
*
* @param backupInfo
* backup info
* @param table
* table
* @return backupImages on the dependency list
* @throws IOException
* exception
*/
public ArrayList<BackupImage> m0(BackupInfo backupInfo, TableName table) throws IOException {ArrayList<BackupImage> ancestors = getAncestors(backupInfo);
ArrayList<BackupImage> tableAncestors = new ArrayList<>();
for (BackupImage image : ancestors) {
if (image.hasTable(table)) {
tableAncestors.add(image);
if (image.getType() == BackupType.FULL) {
break;
}
}
}
return tableAncestors;
} | 3.26 |
hbase_BackupManager_updateBackupInfo_rdh | /* backup system table operations */
/**
* Updates status (state) of a backup session in a persistent store
*
* @param context
* context
* @throws IOException
* exception
*/
public void updateBackupInfo(BackupInfo context) throws IOException {
systemTable.updateBackupInfo(context);
} | 3.26 |
hbase_BackupManager_getConf_rdh | /**
* Get configuration
*/
Configuration getConf() {
return conf;
} | 3.26 |
hbase_BackupManager_startBackupSession_rdh | /**
* Starts new backup session
*
* @throws IOException
* if active session already exists
*/
public void startBackupSession() throws IOException {
long startTime = EnvironmentEdgeManager.currentTime();
long timeout = conf.getInt(BACKUP_EXCLUSIVE_OPERATION_TIMEOUT_SECONDS_KEY, DEFAULT_BACKUP_EXCLUSIVE_OPERATION_TIMEOUT) * 1000L;
long lastWarningOutputTime = 0;
while ((EnvironmentEdgeManager.currentTime() - startTime) < timeout) {
try {
systemTable.startBackupExclusiveOperation();
return;
} catch (IOException e) {
if (e instanceof ExclusiveOperationException) {// sleep, then repeat
try {
Thread.sleep(1000);
} catch (InterruptedException e1) {
// Restore the interrupted status
Thread.currentThread().interrupt();
}
if ((lastWarningOutputTime == 0) || ((EnvironmentEdgeManager.currentTime() - lastWarningOutputTime) > 60000)) {
lastWarningOutputTime = EnvironmentEdgeManager.currentTime();
LOG.warn("Waiting to acquire backup exclusive lock for {}s", (+(lastWarningOutputTime - startTime)) / 1000);
}} else {
throw e;
}}
}
throw new IOException(("Failed to acquire backup system table exclusive lock after " + (timeout / 1000)) + "s");
} | 3.26 |
hbase_BackupManager_createBackupInfo_rdh | /**
* Creates a backup info based on input backup request.
*
* @param backupId
* backup id
* @param type
* type
* @param tableList
* table list
* @param targetRootDir
* root dir
* @param workers
* number of parallel workers
* @param bandwidth
* bandwidth per worker in MB per sec
* @throws BackupException
* exception
*/
public BackupInfo createBackupInfo(String backupId, BackupType type, List<TableName> tableList, String targetRootDir,
int workers, long bandwidth) throws BackupException {
if (targetRootDir == null) {
throw new BackupException("Wrong backup request parameter: target backup root directory");
}
if ((type == BackupType.FULL) && ((tableList == null) || tableList.isEmpty())) {
// If table list is null for full backup, which means backup all tables. Then fill the table
// list with all user tables from meta. It no table available, throw the request exception.
List<TableDescriptor> htds = null;
try (Admin admin = conn.getAdmin()) {htds = admin.listTableDescriptors();
} catch (Exception e) {
throw new BackupException(e);
}if (htds == null) {
throw new BackupException("No table exists for full backup of all tables.");
} else {
tableList = new ArrayList<>();
for (TableDescriptor hTableDescriptor : htds) {
TableName tn = hTableDescriptor.getTableName();
if (tn.equals(BackupSystemTable.getTableName(conf))) {
// skip backup system table
continue;
}
tableList.add(hTableDescriptor.getTableName());
}
LOG.info("Full backup all the tables available in the cluster: {}", tableList);
}
}
// there are one or more tables in the table list
backupInfo = new BackupInfo(backupId, type, tableList.toArray(new TableName[tableList.size()]), targetRootDir);
backupInfo.setBandwidth(bandwidth);
backupInfo.setWorkers(workers);
return backupInfo;
} | 3.26 |
hbase_BackupManager_getAncestors_rdh | /**
* Get direct ancestors of the current backup.
*
* @param backupInfo
* The backup info for the current backup
* @return The ancestors for the current backup
* @throws IOException
* exception
*/
public ArrayList<BackupImage> getAncestors(BackupInfo backupInfo) throws IOException {
LOG.debug("Getting the direct ancestors of the current backup {}", backupInfo.getBackupId());
ArrayList<BackupImage> ancestors = new ArrayList<>();
// full backup does not have ancestor
if (backupInfo.getType() == BackupType.FULL) {
LOG.debug("Current backup is a full backup, no direct ancestor for it.");
return ancestors;
}
// get all backup history list in descending order
ArrayList<BackupInfo> allHistoryList = getBackupHistory(true);
for (BackupInfo backup : allHistoryList) {
BackupImage.Builder builder = BackupImage.newBuilder();
BackupImage image = builder.withBackupId(backup.getBackupId()).withType(backup.getType()).withRootDir(backup.getBackupRootDir()).withTableList(backup.getTableNames()).withStartTime(backup.getStartTs()).withCompleteTime(backup.getCompleteTs()).build();
// Only direct ancestors for a backup are required and not entire history of backup for this
// table resulting in verifying all of the previous backups which is unnecessary and backup
// paths need not be valid beyond the lifetime of a backup.
//
// RootDir is way of grouping a single backup including one full and many incremental backups
if (!image.getRootDir().equals(backupInfo.getBackupRootDir())) {
continue;
}
// add the full backup image as an ancestor until the last incremental backup
if (backup.getType().equals(BackupType.FULL)) {
// check the backup image coverage, if previous image could be covered by the newer ones,
// then no need to add
if (!BackupManifest.canCoverImage(ancestors, image)) {
ancestors.add(image);
}
} else // found last incremental backup, if previously added full backup ancestor images can cover
// it, then this incremental ancestor is not the dependent of the current incremental
// backup, that is to say, this is the backup scope boundary of current table set.
// Otherwise, this incremental backup ancestor is the dependent ancestor of the ongoing
// incremental backup
if (BackupManifest.canCoverImage(ancestors, image)) {
LOG.debug("Met the backup boundary of the current table set:");
for (BackupImage image1 :
ancestors) {
LOG.debug(" BackupID={}, BackupDir={}", image1.getBackupId(), image1.getRootDir());
}
} else {
Path logBackupPath = HBackupFileSystem.getBackupPath(backup.getBackupRootDir(), backup.getBackupId()); LOG.debug(("Current backup has an incremental backup ancestor, " +
"touching its image manifest in {}") + " to construct the dependency.", logBackupPath.toString());
BackupManifest lastIncrImgManifest = new BackupManifest(conf, logBackupPath);
BackupImage lastIncrImage = lastIncrImgManifest.getBackupImage();
ancestors.add(lastIncrImage);
LOG.debug("Last dependent incremental backup image: {BackupID={}" + "BackupDir={}}", lastIncrImage.getBackupId(), lastIncrImage.getRootDir());
}
}
LOG.debug("Got {} ancestors for the current backup.", ancestors.size());
return ancestors;
} | 3.26 |
hbase_BackupManager_writeRegionServerLogTimestamp_rdh | /**
* Write the current timestamps for each regionserver to backup system table after a successful
* full or incremental backup. Each table may have a different set of log timestamps. The saved
* timestamp is of the last log file that was backed up already.
*
* @param tables
* tables
* @throws IOException
* exception
*/
public void writeRegionServerLogTimestamp(Set<TableName> tables, Map<String, Long> newTimestamps) throws IOException {
systemTable.writeRegionServerLogTimestamp(tables, newTimestamps, backupInfo.getBackupRootDir());
} | 3.26 |
hbase_BackupManager_decorateMasterConfiguration_rdh | /**
* This method modifies the master's configuration in order to inject backup-related features
* (TESTs only)
*
* @param conf
* configuration
*/
public static void decorateMasterConfiguration(Configuration conf) {
if (!isBackupEnabled(conf)) {
return;
}
// Add WAL archive cleaner plug-in
String v0 = conf.get(HConstants.HBASE_MASTER_LOGCLEANER_PLUGINS);
String cleanerClass = BackupLogCleaner.class.getCanonicalName();
if (!v0.contains(cleanerClass)) {
conf.set(HConstants.HBASE_MASTER_LOGCLEANER_PLUGINS,
(v0 + ",") + cleanerClass);
}
String classes = conf.get(ProcedureManagerHost.MASTER_PROCEDURE_CONF_KEY);
String masterProcedureClass = LogRollMasterProcedureManager.class.getName();
if (classes == null) {
conf.set(ProcedureManagerHost.MASTER_PROCEDURE_CONF_KEY, masterProcedureClass);
} else if (!classes.contains(masterProcedureClass)) {
conf.set(ProcedureManagerHost.MASTER_PROCEDURE_CONF_KEY, (classes + ",") +
masterProcedureClass);
}
v0 = conf.get(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS);conf.set(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS, (v0 == null ? "" : v0 + ",") + BackupHFileCleaner.class.getName());
if (LOG.isDebugEnabled()) {
LOG.debug("Added log cleaner: {}. Added master procedure manager: {}." + "Added master procedure manager: {}", cleanerClass, masterProcedureClass, BackupHFileCleaner.class.getName());
}
} | 3.26 |
hbase_BackupManager_decorateRegionServerConfiguration_rdh | /**
* This method modifies the Region Server configuration in order to inject backup-related features
* TESTs only.
*
* @param conf
* configuration
*/
public static void decorateRegionServerConfiguration(Configuration conf) {
if (!isBackupEnabled(conf)) {
return;
}
String classes = conf.get(ProcedureManagerHost.REGIONSERVER_PROCEDURE_CONF_KEY);
String regionProcedureClass = LogRollRegionServerProcedureManager.class.getName();
if (classes == null) {
conf.set(ProcedureManagerHost.REGIONSERVER_PROCEDURE_CONF_KEY, regionProcedureClass);
} else if (!classes.contains(regionProcedureClass)) {
conf.set(ProcedureManagerHost.REGIONSERVER_PROCEDURE_CONF_KEY, (classes + ",") + regionProcedureClass);
}
String coproc = conf.get(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY);
String regionObserverClass = BackupObserver.class.getName();
conf.set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, (coproc
== null ? "" : coproc + ",") + regionObserverClass);
if (LOG.isDebugEnabled()) {
LOG.debug("Added region procedure manager: {}. Added region observer: {}", regionProcedureClass, regionObserverClass);
}
} | 3.26 |
hbase_BackupManager_getOngoingBackupId_rdh | /**
* Check if any ongoing backup. Currently, we only reply on checking status in backup system
* table. We need to consider to handle the case of orphan records in the future. Otherwise, all
* the coming request will fail.
*
* @return the ongoing backup id if on going backup exists, otherwise null
* @throws IOException
* exception
*/
private String getOngoingBackupId() throws IOException {
ArrayList<BackupInfo>
sessions = systemTable.getBackupInfos(BackupState.RUNNING);
if (sessions.size() == 0) {
return null;
}
return sessions.get(0).getBackupId();
} | 3.26 |
hbase_RegionMetrics_getNameAsString_rdh | /**
* Returns the region name as a string
*/
default String getNameAsString() {
return Bytes.toStringBinary(getRegionName());
} | 3.26 |
hbase_RegionMetrics_getRequestCount_rdh | /**
* Returns the number of write requests and read requests and coprocessor service requests made to
* region
*/
default long getRequestCount() {
return (getReadRequestCount() + getWriteRequestCount()) + getCpRequestCount();
} | 3.26 |
hbase_MetricsHBaseServerSourceFactory_createContextName_rdh | /**
* From the name of the class that's starting up create the context that an IPC source should
* register itself.
*
* @param serverName
* The name of the class that's starting up.
* @return The Camel Cased context name.
*/protected static String createContextName(String serverName) {
if (serverName.startsWith("HMaster") || serverName.startsWith("master")) {
return "Master";
} else if (serverName.startsWith("HRegion") || serverName.startsWith("regionserver")) {
return "RegionServer";
}
return "IPC";
} | 3.26 |
hbase_SnapshotVerifyProcedure_m1_rdh | // we will wrap remote exception into a RemoteProcedureException,
// here we try to unwrap it
private Throwable m1(RemoteProcedureException e) {
return e.getCause();
} | 3.26 |
hbase_BulkLoadCellFilter_filterCell_rdh | /**
* Filters the bulk load cell using the supplied predicate.
*
* @param cell
* The WAL cell to filter.
* @param famPredicate
* Returns true of given family should be removed.
* @return The filtered cell.
*/
public Cell filterCell(Cell cell, Predicate<byte[]> famPredicate) {
byte[] fam;
BulkLoadDescriptor bld = null;
try {
bld = WALEdit.getBulkLoadDescriptor(cell);
} catch (IOException e) {
LOG.warn("Failed to get bulk load events information from the WAL file.", e);
return cell;
}
List<StoreDescriptor> storesList = bld.getStoresList();
// Copy the StoreDescriptor list and update it as storesList is a unmodifiableList
List<StoreDescriptor> copiedStoresList = new ArrayList<>(storesList);
Iterator<StoreDescriptor> copiedStoresListIterator = copiedStoresList.iterator();
boolean anyStoreRemoved = false;
while (copiedStoresListIterator.hasNext()) {
StoreDescriptor sd = copiedStoresListIterator.next();
fam = sd.getFamilyName().toByteArray();
if (famPredicate.apply(fam)) {
copiedStoresListIterator.remove();
anyStoreRemoved = true;
}
}
if (!anyStoreRemoved) {
return cell;
} else if (copiedStoresList.isEmpty()) {
return null;
}
BulkLoadDescriptor.Builder newDesc = BulkLoadDescriptor.newBuilder().setTableName(bld.getTableName()).setEncodedRegionName(bld.getEncodedRegionName()).setBulkloadSeqNum(bld.getBulkloadSeqNum());
newDesc.addAllStores(copiedStoresList);
BulkLoadDescriptor newBulkLoadDescriptor = newDesc.build();
return cellBuilder.clear().setRow(CellUtil.cloneRow(cell)).setFamily(WALEdit.METAFAMILY).setQualifier(WALEdit.BULK_LOAD).setTimestamp(cell.getTimestamp()).setType(cell.getTypeByte()).setValue(newBulkLoadDescriptor.toByteArray()).build();
} | 3.26 |
hbase_MetricSampleQuantiles_clear_rdh | /**
* Resets the estimator, clearing out all previously inserted items
*/
public synchronized void clear() {
count = 0;
bufferCount = 0;
samples.clear();
} | 3.26 |
hbase_MetricSampleQuantiles_allowableError_rdh | /**
* Specifies the allowable error for this rank, depending on which quantiles are being targeted.
* This is the f(r_i, n) function from the CKMS paper. It's basically how wide the range of this
* rank can be. the index in the list of samples
*/
private double allowableError(int rank) {
int size = samples.size();
double minError = size + 1;
for (MetricQuantile q : quantiles) {
double v3;
if (rank <= (q.quantile * size)) {
v3 = ((2.0 * q.error) * (size - rank)) / (1.0 - q.quantile);
} else {
v3 = ((2.0 * q.error) * rank) / q.quantile;
}
if (v3 < minError) {
minError = v3;
}
}
return minError;
} | 3.26 |
hbase_MetricSampleQuantiles_getCount_rdh | /**
* Returns the number of items that the estimator has processed
*
* @return count total number of items processed
*/
public synchronized long getCount() {
return count;
} | 3.26 |
hbase_MetricSampleQuantiles_getSampleCount_rdh | /**
* Returns the number of samples kept by the estimator
*
* @return count current number of samples
*/
public synchronized int getSampleCount() {
return samples.size();
} | 3.26 |
hbase_MetricSampleQuantiles_query_rdh | /**
* Get the estimated value at the specified quantile.
*
* @param quantile
* Queried quantile, e.g. 0.50 or 0.99.
* @return Estimated value at that quantile.
*/
private long query(double quantile) throws IOException
{
if (samples.isEmpty()) {
throw new IOException("No samples present");
}
int rankMin = 0;
int desired = ((int) (quantile
* count));
for (int i = 1; i < samples.size(); i++) {
SampleItem prev = samples.get(i - 1);
SampleItem cur = samples.get(i);
rankMin +=
prev.g;
if (((rankMin + cur.g) + cur.delta) > (desired + (allowableError(i) / 2))) {
return prev.value;}
}
// edge case of wanting max value
return samples.get(samples.size() - 1).value;
} | 3.26 |
hbase_MetricSampleQuantiles_compress_rdh | /**
* Try to remove extraneous items from the set of sampled items. This checks if an item is
* unnecessary based on the desired error bounds, and merges it with the adjacent item if it is.
*/
private void compress() {
if (samples.size() < 2) {
return;
}
ListIterator<SampleItem> it = samples.listIterator();
SampleItem prev = null;
SampleItem next = it.next();
while (it.hasNext()) {
prev = next;
next
= it.next();
if (((prev.g + next.g) + next.delta) <= allowableError(it.previousIndex())) {
next.g += prev.g;
// Remove prev. it.remove() kills the last thing returned.
it.previous();
it.previous();
it.remove();
// it.next() is now equal to next, skip it back forward again
it.next();
}
}
} | 3.26 |
hbase_MetricSampleQuantiles_insert_rdh | /**
* Add a new value from the stream.
*
* @param v
* the value to insert
*/
public synchronized void insert(long v) {
buffer[bufferCount] = v;
bufferCount++;
count++;
if (bufferCount == buffer.length) {
insertBatch();
compress();
}
} | 3.26 |
hbase_MetricSampleQuantiles_snapshot_rdh | /**
* Get a snapshot of the current values of all the tracked quantiles.
*
* @return snapshot of the tracked quantiles if no items have been added to the estimator
*/
public synchronized Map<MetricQuantile, Long> snapshot() throws IOException {
// flush the buffer first for best results
insertBatch();
Map<MetricQuantile, Long> values = new HashMap<>(quantiles.length);
for (int i = 0; i < quantiles.length; i++) {
values.put(quantiles[i], query(quantiles[i].quantile));
}
return values;
} | 3.26 |
hbase_CellFlatMap_navigableKeySet_rdh | // -------------------------------- Sub-Sets --------------------------------
@Override
public NavigableSet<Cell> navigableKeySet() {
throw new UnsupportedOperationException();
} | 3.26 |
hbase_CellFlatMap_put_rdh | // -------------------------------- Updates --------------------------------
// All updating methods below are unsupported.
// Assuming an array of Cells will be allocated externally,
// fill up with Cells and provided in construction time.
// Later the structure is immutable.
@Override
public Cell put(Cell k, Cell
v) {
throw new UnsupportedOperationException();
} | 3.26 |
hbase_CellFlatMap_firstKey_rdh | // -------------------------------- Key's getters --------------------------------
@Override
public Cell firstKey() {
if (isEmpty()) {
return null;
}
return descending ? getCell(maxCellIdx - 1) : getCell(minCellIdx);
} | 3.26 |
hbase_CellFlatMap_pollFirstEntry_rdh | // The following 2 methods (pollFirstEntry, pollLastEntry) are unsupported because these are
// updating methods.
@Override
public Entry<Cell, Cell> pollFirstEntry() {
throw new UnsupportedOperationException();
} | 3.26 |
hbase_CellFlatMap_find_rdh | /**
* Binary search for a given key in between given boundaries of the array. Positive returned
* numbers mean the index. Negative returned numbers means the key not found. The absolute value
* of the output is the possible insert index for the searched key In twos-complement, (-1 *
* insertion point)-1 is the bitwise not of the insert point.
*
* @param needle
* The key to look for in all of the entries
* @return Same return value as Arrays.binarySearch.
*/
private int find(Cell needle) {
int begin = minCellIdx;
int end = maxCellIdx - 1;
while (begin <= end) {
int mid = begin + ((end - begin) >> 1);
Cell midCell = getCell(mid);
int compareRes = comparator.compare(midCell, needle);
if (compareRes == 0) {return mid;// 0 means equals. We found the key
}
// Key not found. Check the comparison results; reverse the meaning of
// the comparison in case the order is descending (using XOR)
if ((compareRes < 0) ^ descending) {
// midCell is less than needle so we need to look at farther up
begin = mid + 1;
} else {
// midCell is greater than needle so we need to look down
end = mid - 1;
}
}
return ((-1) * begin) - 1;
} | 3.26 |
hbase_CellFlatMap_subMap_rdh | // ---------------- Sub-Maps ----------------
@Override
public NavigableMap<Cell, Cell> subMap(Cell fromKey, boolean fromInclusive, Cell toKey, boolean toInclusive) {
final int lessCellIndex = getValidIndex(fromKey, fromInclusive, true);
final int greaterCellIndex = getValidIndex(toKey, toInclusive, false);
if (descending) {
return createSubCellFlatMap(greaterCellIndex, lessCellIndex, descending);
} else {
return createSubCellFlatMap(lessCellIndex, greaterCellIndex, descending);
}
} | 3.26 |
hbase_CellFlatMap_getValidIndex_rdh | /**
* Get the index of the given anchor key for creating subsequent set. It doesn't matter whether
* the given key exists in the set or not. taking into consideration whether the key should be
* inclusive or exclusive.
*/
private int getValidIndex(Cell key, boolean inclusive, boolean tail) {
final int index = find(key);
// get the valid (positive) insertion point from the output of the find() method
int insertionPoint = (index < 0) ? ~index : index;
// correct the insertion point in case the given anchor key DOES EXIST in the set
if (index >= 0) {
if (descending && (!(tail ^ inclusive))) {
// for the descending case
// if anchor for head set (tail=false) AND anchor is not inclusive -> move the insertion pt
// if anchor for tail set (tail=true) AND the keys is inclusive -> move the insertion point
// because the end index of a set is the index of the cell after the maximal cell
insertionPoint += 1;
} else if ((!descending) && (tail ^ inclusive)) {
// for the ascending case
// if anchor for head set (tail=false) AND anchor is inclusive -> move the insertion point
// because the end index of a set is the index of the cell after the maximal cell
// if anchor for tail set (tail=true) AND the keys is not inclusive -> move the insertion pt
insertionPoint +=
1;
}
}
// insert the insertion point into the valid range,
// as we may enlarge it too much in the above correction
return Math.min(Math.max(insertionPoint, minCellIdx), maxCellIdx);
} | 3.26 |
hbase_BaseReplicationEndpoint_getScopeWALEntryFilter_rdh | /**
* Returns a WALEntryFilter for checking the scope. Subclasses can return null if they don't want
* this filter
*/protected WALEntryFilter getScopeWALEntryFilter() {
return new ScopeWALEntryFilter();
} | 3.26 |
hbase_BaseReplicationEndpoint_getNamespaceTableCfWALEntryFilter_rdh | /**
* Returns a WALEntryFilter for checking replication per table and CF. Subclasses can return null
* if they don't want this filter
*/
protected WALEntryFilter getNamespaceTableCfWALEntryFilter() {
return new NamespaceTableCfWALEntryFilter(f0.getReplicationPeer());
} | 3.26 |
hbase_BaseReplicationEndpoint_getWALEntryfilter_rdh | /**
* Returns a default set of filters
*/
@Override
public WALEntryFilter getWALEntryfilter() {
ArrayList<WALEntryFilter> filters = Lists.newArrayList();
WALEntryFilter scopeFilter = getScopeWALEntryFilter();
if (scopeFilter != null) {
filters.add(scopeFilter);
}
WALEntryFilter tableCfFilter = getNamespaceTableCfWALEntryFilter();
if (tableCfFilter != null) {
filters.add(tableCfFilter);
}
if ((f0 != null) && (f0.getPeerConfig() != null)) {
String filterNameCSV = f0.getPeerConfig().getConfiguration().get(REPLICATION_WALENTRYFILTER_CONFIG_KEY);
if ((filterNameCSV != null) && (!filterNameCSV.isEmpty())) {
String[] filterNames = filterNameCSV.split(",");
for (String filterName : filterNames) {
try {
Class<?> clazz = Class.forName(filterName);
filters.add(((WALEntryFilter) (clazz.getDeclaredConstructor().newInstance())));
} catch (Exception e) {
LOG.error("Unable to create WALEntryFilter " + filterName, e);
}
}
}
}
return filters.isEmpty() ? null : new ChainWALEntryFilter(filters);
} | 3.26 |
hbase_BaseReplicationEndpoint_peerConfigUpdated_rdh | /**
* No-op implementation for subclasses to override if they wish to execute logic if their config
* changes
*/
@Override
public void peerConfigUpdated(ReplicationPeerConfig rpc) {
} | 3.26 |
hbase_RegionServerObserver_postClearCompactionQueues_rdh | /**
* This will be called after clearing compaction queues
*
* @param ctx
* the environment to interact with the framework and region server.
*/
default void postClearCompactionQueues(final ObserverContext<RegionServerCoprocessorEnvironment> ctx) throws IOException {
} | 3.26 |
hbase_RegionServerObserver_preRollWALWriterRequest_rdh | /**
* This will be called before executing user request to roll a region server WAL.
*
* @param ctx
* the environment to interact with the framework and region server.
*/
default void preRollWALWriterRequest(final ObserverContext<RegionServerCoprocessorEnvironment> ctx) throws IOException {
} | 3.26 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.