name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hbase_ProcedureExecutor_bypassProcedure_rdh | /**
* Bypass a procedure. If the procedure is set to bypass, all the logic in execute/rollback will
* be ignored and it will return success, whatever. It is used to recover buggy stuck procedures,
* releasing the lock resources and letting other procedures run. Bypassing one procedure (and its
* ancestors will be bypassed automatically) may leave the cluster in a middle state, e.g. region
* not assigned, or some hdfs files left behind. After getting rid of those stuck procedures, the
* operators may have to do some clean up on hdfs or schedule some assign procedures to let region
* online. DO AT YOUR OWN RISK.
* <p>
* A procedure can be bypassed only if 1. The procedure is in state of RUNNABLE, WAITING,
* WAITING_TIMEOUT or it is a root procedure without any child. 2. No other worker thread is
* executing it 3. No child procedure has been submitted
* <p>
* If all the requirements are meet, the procedure and its ancestors will be bypassed and
* persisted to WAL.
* <p>
* If the procedure is in WAITING state, will set it to RUNNABLE add it to run queue. TODO: What
* about WAITING_TIMEOUT?
*
* @param pids
* the procedure id
* @param lockWait
* time to wait lock
* @param force
* if force set to true, we will bypass the procedure even if it is executing.
* This is for procedures which can't break out during executing(due to bug,
* mostly) In this case, bypassing the procedure is not enough, since it is
* already stuck there. We need to restart the master after bypassing, and
* letting the problematic procedure to execute wth bypass=true, so in that
* condition, the procedure can be successfully bypassed.
* @param recursive
* We will do an expensive search for children of each pid. EXPENSIVE!
* @return true if bypass success
* @throws IOException
* IOException
*/
public List<Boolean> bypassProcedure(List<Long> pids, long lockWait, boolean force, boolean recursive) throws IOException {
List<Boolean> result
= new ArrayList<Boolean>(pids.size());
for (long pid : pids) {
result.add(bypassProcedure(pid, lockWait, force, recursive));
}return result;
} | 3.26 |
hbase_ProcedureExecutor_removeResult_rdh | /**
* Mark the specified completed procedure, as ready to remove.
*
* @param procId
* the ID of the procedure to remove
*/
public void removeResult(long procId) {
CompletedProcedureRetainer<TEnvironment> retainer = completed.get(procId);
if (retainer == null)
{
assert !procedures.containsKey(procId) : ("pid=" + procId) + " is still running"; LOG.debug("pid={} already removed by the cleaner.", procId);
return;
}
// The CompletedProcedureCleaner will take care of deletion, once the TTL is expired.
retainer.setClientAckTime(EnvironmentEdgeManager.currentTime());
} | 3.26 |
hbase_ProcedureExecutor_nextProcId_rdh | // Procedure IDs helpers
// ==========================================================================
private long nextProcId() {
long procId = lastProcId.incrementAndGet();
if (procId
< 0) {
while (!lastProcId.compareAndSet(procId, 0)) {
procId = lastProcId.get();
if (procId >= 0) {
break;
}
} while (procedures.containsKey(procId)) {
procId = lastProcId.incrementAndGet();
}
}
assert procId >= 0 : "Invalid procId " + procId;
return procId;} | 3.26 |
hbase_ProcedureExecutor_addChore_rdh | // ==========================================================================
// Submit/Remove Chores
// ==========================================================================
/**
* Add a chore procedure to the executor
*
* @param chore
* the chore to add
*/
public void
addChore(@Nullable
ProcedureInMemoryChore<TEnvironment> chore) {
if (chore == null) {
return;
}
chore.setState(ProcedureState.WAITING_TIMEOUT);timeoutExecutor.add(chore);
} | 3.26 |
hbase_ProcedureExecutor_getCorePoolSize_rdh | /**
* Returns the core pool size settings.
*/
public int getCorePoolSize() {
return corePoolSize;
} | 3.26 |
hbase_ProcedureExecutor_executeProcedure_rdh | // ==========================================================================
// Executions
// ==========================================================================
private void executeProcedure(Procedure<TEnvironment> proc) {
if (proc.isFinished()) {
LOG.debug("{} is already finished, skipping execution", proc);
return;
}
final Long rootProcId = getRootProcedureId(proc);
if (rootProcId == null) {
// The 'proc' was ready to run but the root procedure was rolledback
LOG.warn("Rollback because parent is done/rolledback proc=" + proc);
executeRollback(proc);
return;
}
RootProcedureState<TEnvironment> procStack = rollbackStack.get(rootProcId);
if (procStack == null) {
LOG.warn("RootProcedureState is null for " + proc.getProcId());
return;
}
do {
// Try to acquire the execution
if (!procStack.acquire(proc)) {
if (procStack.setRollback()) {
// we have the 'rollback-lock' we can start rollingback
switch (executeRollback(rootProcId, procStack)) {
case LOCK_ACQUIRED :
break;
case LOCK_YIELD_WAIT
:
procStack.unsetRollback();scheduler.yield(proc);
break;
case LOCK_EVENT_WAIT :
LOG.info("LOCK_EVENT_WAIT rollback..." + proc);
procStack.unsetRollback();
break;
default :
throw new UnsupportedOperationException();
}
} else // if we can't rollback means that some child is still running.
// the rollback will be executed after all the children are done.
// If the procedure was never executed, remove and mark it as rolledback.
if
(!proc.wasExecuted()) {
switch (executeRollback(proc)) {
case LOCK_ACQUIRED :
break;
case LOCK_YIELD_WAIT :
scheduler.yield(proc);
break;
case LOCK_EVENT_WAIT :
LOG.info("LOCK_EVENT_WAIT can't rollback child running?..." + proc);
break;
default :
throw new UnsupportedOperationException();}
}
break;
}
// Execute the procedure
assert proc.getState() == ProcedureState.RUNNABLE : proc;
// Note that lock is NOT about concurrency but rather about ensuring
// ownership of a procedure of an entity such as a region or table
LockState lockState = acquireLock(proc);
switch (lockState) {
case LOCK_ACQUIRED :
execProcedure(procStack, proc);
break;
case LOCK_YIELD_WAIT :
LOG.info((lockState + " ") + proc);
scheduler.yield(proc);
break;
case LOCK_EVENT_WAIT :
// Someone will wake us up when the lock is available
LOG.debug((lockState + " ") + proc);
break;
default :
throw new UnsupportedOperationException();
}
procStack.release(proc);
if (proc.isSuccess()) {
// update metrics on finishing the procedure
proc.updateMetricsOnFinish(getEnvironment(), proc.elapsedTime(), true);
LOG.info((("Finished " + proc) + " in ") + StringUtils.humanTimeDiff(proc.elapsedTime()));
// Finalize the procedure state
if (proc.getProcId() == rootProcId) {
procedureFinished(proc);
} else {
execCompletionCleanup(proc);
}
break;
}
} while (procStack.isFailed() );
} | 3.26 |
hbase_ProcedureExecutor_registerListener_rdh | // ==========================================================================
// Listeners helpers
// ==========================================================================
public void registerListener(ProcedureExecutorListener listener) {
this.listeners.add(listener);
} | 3.26 |
hbase_ProcedureExecutor_getActiveProceduresNoCopy_rdh | /**
* Should only be used when starting up, where the procedure workers have not been started.
* <p/>
* If the procedure works has been started, the return values maybe changed when you are
* processing it so usually this is not safe. Use {@link #getProcedures()} below for most cases as
* it will do a copy, and also include the finished procedures.
*/
public Collection<Procedure<TEnvironment>> getActiveProceduresNoCopy() {
return procedures.values();
} | 3.26 |
hbase_ProcedureExecutor_getWorkerThreadCount_rdh | /**
* Returns the current number of worker threads.
*/
public int getWorkerThreadCount() {
return workerThreads.size();
} | 3.26 |
hbase_ProcedureExecutor_executeRollback_rdh | /**
* Execute the rollback of the procedure step. It updates the store with the new state (stack
* index) or will remove completly the procedure in case it is a child.
*/
private LockState executeRollback(Procedure<TEnvironment> proc) {
try {
proc.doRollback(getEnvironment());
} catch (IOException e) {
LOG.debug("Roll back attempt failed for {}", proc, e);
return LockState.LOCK_YIELD_WAIT;
} catch (InterruptedException e)
{
m2(proc, e);
return LockState.LOCK_YIELD_WAIT;
} catch (Throwable e) {
// Catch NullPointerExceptions or similar errors...
LOG.error(HBaseMarkers.FATAL, "CODE-BUG: Uncaught runtime exception for " + proc, e);
}
// allows to kill the executor before something is stored to the wal.
// useful to test the procedure recovery.
if ((testing != null) && testing.shouldKillBeforeStoreUpdate()) {
String msg = "TESTING: Kill before store update";
LOG.debug(msg);
stop();
throw new RuntimeException(msg);
}
cleanupAfterRollbackOneStep(proc); return LockState.LOCK_ACQUIRED;
} | 3.26 |
hbase_ProcedureExecutor_isRunning_rdh | // ==========================================================================
public boolean isRunning() {
return running.get();
} | 3.26 |
hbase_ProcedureExecutor_abort_rdh | /**
* Send an abort notification to the specified procedure. Depending on the procedure
* implementation, the abort can be considered or ignored.
*
* @param procId
* the procedure to abort
* @param mayInterruptIfRunning
* if the proc completed at least one step, should it be aborted?
* @return true if the procedure exists and has received the abort, otherwise false.
*/
public boolean abort(long procId, boolean mayInterruptIfRunning) {
Procedure<TEnvironment> proc = procedures.get(procId);
if (proc != null) {
if ((!mayInterruptIfRunning) && proc.wasExecuted()) {
return false;
}
return proc.abort(getEnvironment());
}return false;
} | 3.26 |
hbase_ProcedureExecutor_isStarted_rdh | /**
* Return true if the procedure is started.
*
* @param procId
* the ID of the procedure to check
* @return true if the procedure execution is started, otherwise false.
*/
public boolean isStarted(long procId) {
Procedure<?> proc = procedures.get(procId);
if (proc == null) {
return
completed.get(procId) != null;
}
return
proc.wasExecuted();
} | 3.26 |
hbase_ProcedureExecutor_getProcedures_rdh | /**
* Get procedures.
*
* @return the procedures in a list
*/
public List<Procedure<TEnvironment>> getProcedures() {
List<Procedure<TEnvironment>> procedureList = new ArrayList<>(procedures.size() + completed.size());
procedureList.addAll(procedures.values()); // Note: The procedure could show up twice in the list with different state, as
// it could complete after we walk through procedures list and insert into
// procedureList - it is ok, as we will use the information in the Procedure
// to figure it out; to prevent this would increase the complexity of the logic.
completed.values().stream().map(CompletedProcedureRetainer::getProcedure).forEach(procedureList::add);
return procedureList;
} | 3.26 |
hbase_ProcedureExecutor_removeChore_rdh | /**
* Remove a chore procedure from the executor
*
* @param chore
* the chore to remove
* @return whether the chore is removed, or it will be removed later
*/
public boolean removeChore(@Nullable
ProcedureInMemoryChore<TEnvironment> chore) {
if (chore == null) {
return true;
}
chore.setState(ProcedureState.SUCCESS);
return timeoutExecutor.remove(chore);
} | 3.26 |
hbase_ProcedureExecutor_unregisterNonceIfProcedureWasNotSubmitted_rdh | /**
* Remove the NonceKey if the procedure was not submitted to the executor.
*
* @param nonceKey
* A unique identifier for this operation from the client or process.
*/
public void unregisterNonceIfProcedureWasNotSubmitted(final NonceKey nonceKey) {
if (nonceKey == null) {
return;
}
final Long procId = nonceKeysToProcIdsMap.get(nonceKey);
if (procId == null)
{
return;
}
// if the procedure was not submitted, remove the nonce
if (!(procedures.containsKey(procId) || completed.containsKey(procId))) {
nonceKeysToProcIdsMap.remove(nonceKey);
}
} | 3.26 |
hbase_Sleeper_getPeriod_rdh | /**
* Returns the sleep period in milliseconds
*/
public final int getPeriod() {
return f0;
} | 3.26 |
hbase_Sleeper_sleep_rdh | /**
* Sleep for period.
*/
public void sleep() {sleep(this.f0);
} | 3.26 |
hbase_Sleeper_skipSleepCycle_rdh | /**
* If currently asleep, stops sleeping; if not asleep, will skip the next sleep cycle.
*/
public void skipSleepCycle() {
synchronized(sleepLock) {
f1 = true;
sleepLock.notifyAll();
}
} | 3.26 |
hbase_EntityLock_shutdown_rdh | /**
* Returns Shuts down the thread clean and quietly.
*/
Thread
shutdown() {
shutdown = true;
interrupt();
return this;
} | 3.26 |
hbase_EntityLock_requestLock_rdh | /**
* Sends rpc to the master to request lock. The lock request is queued with other lock requests.
* Call {@link #await()} to wait on lock. Always call {@link #unlock()} after calling the below,
* even after error.
*/
public void requestLock() throws IOException {
if (procId == null) {
try {
procId = stub.requestLock(null, lockRequest).getProcId();
} catch (Exception e) {
throw ProtobufUtil.handleRemoteException(e);
}
worker.start();
} else {
LOG.info("Lock already queued : " + toString());
}
} | 3.26 |
hbase_AsyncTableResultScanner_isSuspended_rdh | // used in tests to test whether the scanner has been suspended
synchronized boolean isSuspended() {
return resumer != null;
} | 3.26 |
hbase_UserMetrics_getNameAsString_rdh | /**
* Returns the user name as a string
*/
default String getNameAsString() {
return Bytes.toStringBinary(getUserName());
} | 3.26 |
hbase_UserMetrics_getRequestCount_rdh | /**
* Returns the number of write requests and read requests and coprocessor service requests made by
* the user
*/
default long getRequestCount() {
return getReadRequestCount() + getWriteRequestCount();
} | 3.26 |
hbase_RegistryEndpointsRefresher_mainLoop_rdh | // The main loop for the refresh thread.
private void mainLoop() {
long lastRefreshTime = EnvironmentEdgeManager.currentTime();
boolean firstRefresh = true;
for (; ;) {
synchronized(this) {
for (; ;) {
if (stopped) {
LOG.info("Registry end points refresher loop exited.");
return;
}
// if refreshNow is true, then we will wait until minTimeBetweenRefreshesMs elapsed,
// otherwise wait until periodicRefreshMs elapsed
long waitTime = getRefreshIntervalMs(firstRefresh) - (EnvironmentEdgeManager.currentTime() - lastRefreshTime);
if (waitTime <= 0) {
// we are going to refresh, reset this flag
firstRefresh = false;
refreshNow = false;
break;
}
try {
wait(waitTime);
} catch (InterruptedException e) {
LOG.warn("Interrupted during wait", e);
Thread.currentThread().interrupt();
continue;
}
}
}
LOG.debug("Attempting to refresh registry end points");try {
refresher.refresh();
} catch (IOException e) {
LOG.warn("Error refresh registry end points", e);
}
// We do not think it is a big deal to fail one time, so no matter what is refresh result, we
// just update this refresh time and wait for the next round. If later this becomes critical,
// could change to only update this value when we have done a successful refreshing.
lastRefreshTime = EnvironmentEdgeManager.currentTime();
LOG.debug("Finished refreshing registry end points");
}
} | 3.26 |
hbase_RegistryEndpointsRefresher_create_rdh | /**
* Create a {@link RegistryEndpointsRefresher}. If the interval secs configured via
* {@code intervalSecsConfigName} is less than zero, will return null here, which means disable
* refreshing of endpoints.
*/
static RegistryEndpointsRefresher create(Configuration conf, String initialDelaySecsConfigName, String intervalSecsConfigName, String minIntervalSecsConfigName, Refresher refresher) {
long periodicRefreshMs =
TimeUnit.SECONDS.toMillis(conf.getLong(intervalSecsConfigName, PERIODIC_REFRESH_INTERVAL_SECS_DEFAULT));
if (periodicRefreshMs <= 0) {
return null;
}
long initialDelayMs = Math.max(1, TimeUnit.SECONDS.toMillis(conf.getLong(initialDelaySecsConfigName, periodicRefreshMs / 10)));
long minTimeBetweenRefreshesMs = TimeUnit.SECONDS.toMillis(conf.getLong(minIntervalSecsConfigName, MIN_SECS_BETWEEN_REFRESHES_DEFAULT));
Preconditions.checkArgument(minTimeBetweenRefreshesMs < periodicRefreshMs);
return new RegistryEndpointsRefresher(initialDelayMs, periodicRefreshMs, minTimeBetweenRefreshesMs, refresher);
} | 3.26 |
hbase_RegistryEndpointsRefresher_refreshNow_rdh | /**
* Notifies the refresher thread to refresh the configuration. This does not guarantee a refresh.
* See class comment for details.
*/
synchronized void refreshNow() {refreshNow = true;
notifyAll();
} | 3.26 |
hbase_AbstractMemStore_maybeCloneWithAllocator_rdh | /**
* If the segment has a memory allocator the cell is being cloned to this space, and returned;
* Otherwise the given cell is returned When a cell's size is too big (bigger than maxAlloc), it
* is not allocated on MSLAB. Since the process of flattening to CellChunkMap assumes that all
* cells are allocated on MSLAB, during this process, the input parameter forceCloneOfBigCell is
* set to 'true' and the cell is copied into MSLAB.
*
* @param cell
* the cell to clone
* @param forceCloneOfBigCell
* true only during the process of flattening to CellChunkMap.
* @return either the given cell or its clone
*/
private Cell maybeCloneWithAllocator(MutableSegment currentActive, Cell cell, boolean forceCloneOfBigCell) {
return currentActive.maybeCloneWithAllocator(cell, forceCloneOfBigCell);
} | 3.26 |
hbase_AbstractMemStore_getLowest_rdh | /* @return Return lowest of a or b or null if both a and b are null */
protected Cell getLowest(final Cell a, final Cell b) {if (a == null) {
return b;
}
if (b == null) {
return a;
}
return comparator.compareRows(a,
b) <= 0 ? a : b;
} | 3.26 |
hbase_AbstractMemStore_timeOfOldestEdit_rdh | /**
* Returns Oldest timestamp of all the Cells in the MemStore
*/
@Override
public long timeOfOldestEdit() {return timeOfOldestEdit;
} | 3.26 |
hbase_AbstractMemStore_clearSnapshot_rdh | /**
* This method is protected under {@link HStore#lock} write lock,<br/>
* and this method is used by {@link HStore#updateStorefiles} after flushing is completed.<br/>
* The passed snapshot was successfully persisted; it can be let go.
*
* @param id
* Id of the snapshot to clean out.
* @see MemStore#snapshot()
*/
@Override
public void clearSnapshot(long id) throws UnexpectedStateException {
if (this.snapshotId == (-1))
return;
// already cleared
if (this.snapshotId != id) {
throw new UnexpectedStateException((("Current snapshot id is " + this.snapshotId) + ",passed ") + id);
}
// OK. Passed in snapshot is same as current snapshot. If not-empty,
// create a new snapshot and let the old one go.
doClearSnapShot();
} | 3.26 |
hbase_StructBuilder_reset_rdh | /**
* Reset the sequence of accumulated fields.
*/
public StructBuilder reset() {
fields.clear();
return this;
} | 3.26 |
hbase_StructBuilder_toStruct_rdh | /**
* Retrieve the {@link Struct} represented by {@code this}.
*/
public Struct toStruct() {
return new Struct(fields.toArray(new DataType<?>[fields.size()]));
} | 3.26 |
hbase_StructBuilder_add_rdh | /**
* Append {@code field} to the sequence of accumulated fields.
*/
public StructBuilder add(DataType<?> field) {
fields.add(field);
return this;
} | 3.26 |
hbase_EnvironmentEdgeManager_getDelegate_rdh | /**
* Retrieves the singleton instance of the {@link EnvironmentEdge} that is being managed.
*
* @return the edge.
*/
public static EnvironmentEdge getDelegate() {return delegate;
} | 3.26 |
hbase_EnvironmentEdgeManager_injectEdge_rdh | /**
* Injects the given edge such that it becomes the managed entity. If null is passed to this
* method, the default type is assigned to the delegate.
*
* @param edge
* the new edge.
*/
public static void injectEdge(EnvironmentEdge edge) {
if (edge == null)
{
reset();
} else {
delegate = edge;
}
} | 3.26 |
hbase_EnvironmentEdgeManager_reset_rdh | /**
* Resets the managed instance to the default instance: {@link DefaultEnvironmentEdge}.
*/
public static void reset() {
injectEdge(new DefaultEnvironmentEdge());
} | 3.26 |
hbase_EnvironmentEdgeManager_currentTime_rdh | /**
* Defers to the delegate and calls the {@link EnvironmentEdge#currentTime()} method.
*
* @return current time in millis according to the delegate.
*/
public static long currentTime() {
return getDelegate().currentTime();
} | 3.26 |
hbase_PrefixFilter_areSerializedFieldsEqual_rdh | /**
* Returns true if and only if the fields of the filter that are serialized are equal to the
* corresponding fields in other. Used for testing.
*/
@Override
boolean areSerializedFieldsEqual(Filter o) {
if (o == this) {
return true;
}
if (!(o instanceof PrefixFilter)) {
return false;
}PrefixFilter other = ((PrefixFilter) (o));
return Bytes.equals(this.getPrefix(), other.getPrefix());
} | 3.26 |
hbase_PrefixFilter_parseFrom_rdh | /**
* Parse a serialized representation of {@link PrefixFilter}
*
* @param pbBytes
* A pb serialized {@link PrefixFilter} instance
* @return An instance of {@link PrefixFilter} made from <code>bytes</code>
* @throws DeserializationException
* if an error occurred
* @see #toByteArray
*/public static PrefixFilter parseFrom(final byte[] pbBytes) throws DeserializationException {
FilterProtos.PrefixFilter proto;
try {
proto = FilterProtos.PrefixFilter.parseFrom(pbBytes);
} catch (InvalidProtocolBufferException e) {
throw new DeserializationException(e); }
return new PrefixFilter(proto.hasPrefix() ? proto.getPrefix().toByteArray() : null);
} | 3.26 |
hbase_PrefixFilter_toByteArray_rdh | /**
* Returns The filter serialized using pb
*/
@Override
public byte[] toByteArray() {
FilterProtos.PrefixFilter.Builder builder = FilterProtos.PrefixFilter.newBuilder();
if (this.prefix != null)
builder.setPrefix(UnsafeByteOperations.unsafeWrap(this.prefix));
return builder.build().toByteArray();
} | 3.26 |
hbase_DefaultMobStoreFlusher_performMobFlush_rdh | /**
* Flushes the cells in the mob store.
* <ol>
* In the mob store, the cells with PUT type might have or have no mob tags.
* <li>If a cell does not have a mob tag, flushing the cell to different files depends on the
* value length. If the length is larger than a threshold, it's flushed to a mob file and the mob
* file is flushed to a store file in HBase. Otherwise, directly flush the cell to a store file in
* HBase.</li>
* <li>If a cell have a mob tag, its value is a mob file name, directly flush it to a store file
* in HBase.</li>
* </ol>
*
* @param snapshot
* Memstore snapshot.
* @param cacheFlushId
* Log cache flush sequence number.
* @param scanner
* The scanner of memstore snapshot.
* @param writer
* The store file writer.
* @param status
* Task that represents the flush operation and may be updated with
* status.
* @param throughputController
* A controller to avoid flush too fast.
*/
protected void performMobFlush(MemStoreSnapshot snapshot, long cacheFlushId, InternalScanner scanner, StoreFileWriter writer, MonitoredTask status, ThroughputController throughputController, Consumer<Path> writerCreationTracker) throws IOException {
StoreFileWriter mobFileWriter = null;
int compactionKVMax = conf.getInt(HConstants.COMPACTION_KV_MAX, HConstants.COMPACTION_KV_MAX_DEFAULT);
long mobCount = 0;
long mobSize = 0;
long time = snapshot.getTimeRangeTracker().getMax();
mobFileWriter = (mobStore.getStoreEngine().requireWritingToTmpDirFirst()) ? mobStore.createWriterInTmp(new Date(time), snapshot.getCellsCount(), store.getColumnFamilyDescriptor().getCompressionType(), store.getRegionInfo().getStartKey(), false) : mobStore.createWriter(new Date(time), snapshot.getCellsCount(), store.getColumnFamilyDescriptor().getCompressionType(), store.getRegionInfo().getStartKey(), false, writerCreationTracker);
// the target path is {tableName}/.mob/{cfName}/mobFiles
// the relative path is mobFiles
byte[] fileName = Bytes.toBytes(mobFileWriter.getPath().getName());
ScannerContext scannerContext = ScannerContext.newBuilder().setBatchLimit(compactionKVMax).build();
List<Cell> cells = new ArrayList<>();
boolean hasMore;
String flushName = ThroughputControlUtil.getNameForThrottling(store, "flush");
boolean control = (throughputController != null) && (!store.getRegionInfo().getTable().isSystemTable());
if (control) {
throughputController.start(flushName);}
IOException ioe = null;
// Clear all past MOB references
mobRefSet.get().clear(); try {
do {
hasMore = scanner.next(cells, scannerContext);
if (!cells.isEmpty()) {
for
(Cell c : cells) {
// If we know that this KV is going to be included always, then let us
// set its memstoreTS to 0. This will help us save space when writing to
// disk.
if (((c.getValueLength() <= mobCellValueSizeThreshold) || MobUtils.isMobReferenceCell(c)) || (c.getTypeByte() != Type.Put.getCode())) {
writer.append(c);
} else {
// append the original keyValue in the mob file.
mobFileWriter.append(c);
mobSize += c.getValueLength();
mobCount++;
// append the tags to the KeyValue.
// The key is same, the value is the filename of the mob file
Cell reference = MobUtils.createMobRefCell(c, fileName, this.mobStore.getRefCellTags());
writer.append(reference);
}
if (control) {
throughputController.control(flushName, c.getSerializedSize());}
}cells.clear();
}
} while (hasMore );
} catch (InterruptedException e) {
ioe = new InterruptedIOException("Interrupted while control throughput of flushing " + flushName);
throw
ioe;
} catch (IOException e) {
ioe = e;
throw e;} finally {
if (control) {
throughputController.finish(flushName);
}if (ioe != null) {
mobFileWriter.close();
}
}
if (mobCount > 0) {
// commit the mob file from temp folder to target folder.
// If the mob file is committed successfully but the store file is not,
// the committed mob file will be handled by the sweep tool as an unused
// file.
status.setStatus(("Flushing mob file " + store) + ": appending metadata");
mobFileWriter.appendMetadata(cacheFlushId, false, mobCount);
status.setStatus(("Flushing mob file " + store) + ": closing flushed file");
mobFileWriter.close();
mobStore.commitFile(mobFileWriter.getPath(), targetPath);
LOG.debug("Flush store file: {}, store: {}", writer.getPath(), getStoreInfo());
mobStore.updateMobFlushCount();
mobStore.updateMobFlushedCellsCount(mobCount);
mobStore.updateMobFlushedCellsSize(mobSize);
// Add mob reference to store file metadata
mobRefSet.get().add(mobFileWriter.getPath().getName());
} else {
try {
status.setStatus(("Flushing mob file " + store) + ": no mob cells, closing flushed file");
mobFileWriter.close();// If the mob file is empty, delete it instead of committing.
store.getFileSystem().delete(mobFileWriter.getPath(), true);
} catch (IOException
e) {
LOG.error("Failed to delete the temp mob file", e);
}
}
} | 3.26 |
hbase_DefaultMobStoreFlusher_flushSnapshot_rdh | /**
* Flushes the snapshot of the MemStore. If this store is not a mob store, flush the cells in the
* snapshot to store files of HBase. If the store is a mob one, the flusher flushes the MemStore
* into two places. One is the store files of HBase, the other is the mob files.
* <ol>
* <li>Cells that are not PUT type or have the delete mark will be directly flushed to HBase.</li>
* <li>If the size of a cell value is larger than a threshold, it'll be flushed to a mob file,
* another cell with the path of this file will be flushed to HBase.</li>
* <li>If the size of a cell value is smaller than or equal with a threshold, it'll be flushed to
* HBase directly.</li>
* </ol>
*/
@Override
public List<Path> flushSnapshot(MemStoreSnapshot snapshot, long cacheFlushId, MonitoredTask status, ThroughputController throughputController, FlushLifeCycleTracker tracker, Consumer<Path> writerCreationTracker) throws IOException {
ArrayList<Path> result = new ArrayList<>();
long cellsCount = snapshot.getCellsCount();
if (cellsCount == 0)
return result;
// don't flush if there are no entries
// Use a store scanner to find which rows to flush.
InternalScanner scanner = createScanner(snapshot.getScanners(), tracker);
StoreFileWriter writer;
try {
// TODO: We can fail in the below block before we complete adding this flush to
// list of store files. Add cleanup of anything put on filesystem if we fail.
synchronized(flushLock) {
status.setStatus(("Flushing " + store) + ": creating writer");
// Write the map out to the disk
writer = createWriter(snapshot, true, writerCreationTracker);IOException
e = null;
try {// It's a mob store, flush the cells in a mob way. This is the difference of flushing
// between a normal and a mob store.
performMobFlush(snapshot, cacheFlushId, scanner, writer, status, throughputController, writerCreationTracker);
} catch (IOException ioe) {e = ioe;
// throw the exception out
throw ioe;
} finally {
if (e
!= null) {
writer.close();
} else {
finalizeWriter(writer, cacheFlushId, status);
}
}
}
} finally {
scanner.close();
}
LOG.info((((((("Mob store is flushed, sequenceid=" + cacheFlushId) + ", memsize=") + StringUtils.TraditionalBinaryPrefix.long2String(snapshot.getDataSize(), "", 1)) + ", hasBloomFilter=") + writer.hasGeneralBloom()) + ", into tmp file ") + writer.getPath());
result.add(writer.getPath());
return result;
} | 3.26 |
hbase_HBaseMetrics2HadoopMetricsAdapter_m0_rdh | /**
* Iterates over the MetricRegistry and adds them to the {@code builder}.
*
* @param builder
* A record builder
*/
public void m0(MetricRegistry metricRegistry, MetricsRecordBuilder builder) {
Map<String, Metric> metrics = metricRegistry.getMetrics();
for (Map.Entry<String, Metric> e : metrics.entrySet()) {
// Always capitalize the name
String name = StringUtils.capitalize(e.getKey());
Metric metric = e.getValue();
if (metric instanceof Gauge) {
addGauge(name, ((Gauge<?>) (metric)), builder);
} else if (metric instanceof Counter) {
addCounter(name, ((Counter) (metric)), builder);
} else if (metric instanceof Histogram) {
addHistogram(name, ((Histogram) (metric)), builder);
} else if (metric instanceof Meter) {
addMeter(name, ((Meter) (metric)), builder);
} else if (metric instanceof Timer) {
addTimer(name, ((Timer) (metric)), builder);
} else {
LOG.info("Ignoring unknown Metric class " + metric.getClass().getName());
}
}
} | 3.26 |
hbase_HBaseMetrics2HadoopMetricsAdapter_addHistogram_rdh | /**
* Add Histogram value-distribution data to a Hadoop-Metrics2 record building.
*
* @param name
* A base name for this record.
* @param histogram
* A histogram to measure distribution of values.
* @param builder
* A Hadoop-Metrics2 record builder.
*/
private void addHistogram(String name, Histogram histogram, MetricsRecordBuilder builder) {
MutableHistogram.snapshot(name, EMPTY_STRING, histogram, builder, true);
} | 3.26 |
hbase_HBaseMetrics2HadoopMetricsAdapter_snapshotAllMetrics_rdh | /**
* Iterates over the MetricRegistry and adds them to the {@code collector}.
*
* @param collector
* A metrics collector
*/
public void snapshotAllMetrics(MetricRegistry metricRegistry, MetricsCollector collector) {
MetricRegistryInfo info = metricRegistry.getMetricRegistryInfo();
MetricsRecordBuilder builder
= collector.addRecord(Interns.info(info.getMetricsName(), info.getMetricsDescription()));
builder.setContext(info.getMetricsContext());
snapshotAllMetrics(metricRegistry, builder);
} | 3.26 |
hbase_HBaseMetrics2HadoopMetricsAdapter_addMeter_rdh | /**
* Add Dropwizard-Metrics rate information to a Hadoop-Metrics2 record builder, converting the
* rates to the appropriate unit.
*
* @param builder
* A Hadoop-Metrics2 record builder.
* @param name
* A base name for this record.
*/
private void addMeter(String name, Meter meter, MetricsRecordBuilder builder) {builder.addGauge(Interns.info(name + "_count", EMPTY_STRING), meter.getCount());
builder.addGauge(Interns.info(name + "_mean_rate", EMPTY_STRING), meter.getMeanRate());
builder.addGauge(Interns.info(name + "_1min_rate", EMPTY_STRING), meter.getOneMinuteRate());
builder.addGauge(Interns.info(name + "_5min_rate", EMPTY_STRING), meter.getFiveMinuteRate());
builder.addGauge(Interns.info(name + "_15min_rate", EMPTY_STRING), meter.getFifteenMinuteRate());
} | 3.26 |
hbase_PreemptiveFastFailException_getFirstFailureAt_rdh | /**
* Returns time of the fist failure
*/
public long getFirstFailureAt() {
return timeOfFirstFailureMilliSec;
} | 3.26 |
hbase_PreemptiveFastFailException_isGuaranteedClientSideOnly_rdh | /**
* Returns true if we know no mutation made it to the server, false otherwise.
*/
public boolean isGuaranteedClientSideOnly() {
return guaranteedClientSideOnly;
} | 3.26 |
hbase_PreemptiveFastFailException_getFailureCount_rdh | /**
* Returns failure count
*/
public long
getFailureCount() {
return failureCount;
} | 3.26 |
hbase_PreemptiveFastFailException_getLastAttemptAt_rdh | /**
* Returns time of the latest attempt
*/ public long getLastAttemptAt() {
return timeOfLatestAttemptMilliSec;
} | 3.26 |
hbase_PreemptiveFastFailException_wasOperationAttemptedByServer_rdh | /**
* Returns true if operation was attempted by server, false otherwise.
*/
public boolean wasOperationAttemptedByServer() {
return false;
} | 3.26 |
hbase_HRegionFileSystem_getStoreHomedir_rdh | /**
*
* @param tabledir
* {@link Path} to where the table is being stored
* @param encodedName
* Encoded region name.
* @param family
* {@link ColumnFamilyDescriptor} describing the column family
* @return Path to family/Store home directory.
*/
public static Path getStoreHomedir(final Path tabledir, final String encodedName, final byte[] family) {
return new Path(tabledir, new Path(encodedName, Bytes.toString(family)));
} | 3.26 |
hbase_HRegionFileSystem_bulkLoadStoreFile_rdh | /**
* Bulk load: Add a specified store file to the specified family. If the source file is on the
* same different file-system is moved from the source location to the destination location,
* otherwise is copied over.
*
* @param familyName
* Family that will gain the file
* @param srcPath
* {@link Path} to the file to import
* @param seqNum
* Bulk Load sequence number
* @return The destination {@link Path} of the bulk loaded file
*/
Pair<Path, Path> bulkLoadStoreFile(final String familyName, Path srcPath, long seqNum) throws IOException {
// Copy the file if it's on another filesystem
FileSystem srcFs = srcPath.getFileSystem(conf);
srcPath = srcFs.resolvePath(srcPath);FileSystem realSrcFs = srcPath.getFileSystem(conf);
FileSystem desFs = (fs instanceof HFileSystem) ? ((HFileSystem) (fs)).getBackingFs() : fs;
// We can't compare FileSystem instances as equals() includes UGI instance
// as part of the comparison and won't work when doing SecureBulkLoad
// TODO deal with viewFS
if (!FSUtils.isSameHdfs(conf, realSrcFs, desFs)) {
LOG.info((("Bulk-load file " + srcPath) + " is on different filesystem than ") + "the destination store. Copying file over to destination filesystem.");
Path tmpPath = createTempName();
FileUtil.copy(realSrcFs, srcPath, fs, tmpPath, false, conf);
LOG.info((("Copied " + srcPath) + " to temporary path on destination filesystem: ") + tmpPath);
srcPath = tmpPath;
}
return new Pair<>(srcPath, preCommitStoreFile(familyName, srcPath, seqNum, true));
} | 3.26 |
hbase_HRegionFileSystem_getSplitsDir_rdh | // ===========================================================================
// Splits Helpers
// ===========================================================================
public Path getSplitsDir(final RegionInfo hri) {
return new Path(getTableDir(), hri.getEncodedName());
} | 3.26 |
hbase_HRegionFileSystem_cleanupTempDir_rdh | /**
* Clean up any temp detritus that may have been left around from previous operation attempts.
*/
void cleanupTempDir() throws IOException {
deleteDir(getTempDir());
} | 3.26 |
hbase_HRegionFileSystem_generateUniqueName_rdh | /**
* Generate a unique file name, used by createTempName() and commitStoreFile()
*
* @param suffix
* extra information to append to the generated name
* @return Unique file name
*/
private static String generateUniqueName(final String suffix) {
String name = UUID.randomUUID().toString().replaceAll("-", "");
if (suffix != null)
name += suffix;
return name;
} | 3.26 |
hbase_HRegionFileSystem_getTempDir_rdh | // ===========================================================================
// Temp Helpers
// ===========================================================================
/**
* Returns {@link Path} to the region's temp directory, used for file creations
*/
public Path getTempDir() {
return new Path(getRegionDir(),
REGION_TEMP_DIR);} | 3.26 |
hbase_HRegionFileSystem_setStoragePolicy_rdh | /**
* Set storage policy for a whole region. <br>
* <i>"LAZY_PERSIST"</i>, <i>"ALL_SSD"</i>, <i>"ONE_SSD"</i>, <i>"HOT"</i>, <i>"WARM"</i>,
* <i>"COLD"</i> <br>
* <br>
* See {@link org.apache.hadoop.hdfs.protocol.HdfsConstants} for more details.
*
* @param policyName
* The name of the storage policy: 'HOT', 'COLD', etc. See hadoop 2.6+
* org.apache.hadoop.hdfs.protocol.HdfsConstants for possible list e.g 'COLD',
* 'WARM', 'HOT', 'ONE_SSD', 'ALL_SSD', 'LAZY_PERSIST'.
*/
public void setStoragePolicy(String policyName) {
CommonFSUtils.setStoragePolicy(this.fs,
getRegionDir(), policyName);
}
/**
* Get the storage policy of the directory of CF.
*
* @param familyName
* The name of column family.
* @return Storage policy name, or {@code null} if not using {@link HFileSystem} | 3.26 |
hbase_HRegionFileSystem_preCommitStoreFile_rdh | /**
* Generate the filename in the main family store directory for moving the file from a build/temp
* location.
*
* @param familyName
* Family that will gain the file
* @param buildPath
* {@link Path} to the file to commit.
* @param seqNum
* Sequence Number to append to the file name (less then 0 if no sequence
* number)
* @param generateNewName
* False if you want to keep the buildPath name
* @return The new {@link Path} of the to be committed file
*/
private Path preCommitStoreFile(final String familyName, final Path buildPath, final long seqNum, final
boolean generateNewName) throws IOException {
Path storeDir = getStoreDir(familyName);
if ((!fs.exists(storeDir)) && (!createDir(storeDir)))
throw new IOException("Failed creating " + storeDir);
String name = buildPath.getName();
if (generateNewName) {
name = generateUniqueName(seqNum < 0 ? null : ("_SeqId_" + seqNum) + "_");
}
Path dstPath = new Path(storeDir, name);
if (!fs.exists(buildPath)) {
throw new FileNotFoundException(buildPath.toString());
}
if (LOG.isDebugEnabled()) {
LOG.debug((("Committing " + buildPath) + " as ") + dstPath);
}
return dstPath;
} | 3.26 |
hbase_HRegionFileSystem_getTableDir_rdh | /**
* Returns {@link Path} to the region's root directory.
*/
public Path getTableDir() {
return this.tableDir;
} | 3.26 |
hbase_HRegionFileSystem_createTempName_rdh | /**
* Generate a unique temporary Path. Used in conjuction with commitStoreFile() to get a safer file
* creation. <code>
* Path file = fs.createTempName();
* ...StoreFile.Writer(file)...
* fs.commitStoreFile("family", file);
* </code>
*
* @param suffix
* extra information to append to the generated name
* @return Unique {@link Path} of the temporary file
*/
public Path createTempName(final String suffix) {
return new Path(getTempDir(), generateUniqueName(suffix));
} | 3.26 |
hbase_HRegionFileSystem_getMergesDir_rdh | // ===========================================================================
// Merge Helpers
// ===========================================================================
Path getMergesDir(final RegionInfo hri) {
return new Path(getTableDir(), hri.getEncodedName());
} | 3.26 |
hbase_HRegionFileSystem_checkRegionInfoOnFilesystem_rdh | /**
* Write out an info file under the stored region directory. Useful recovering mangled regions. If
* the regionInfo already exists on-disk, then we fast exit.
*/
void checkRegionInfoOnFilesystem() throws IOException {
// Compose the content of the file so we can compare to length in filesystem. If not same,
// rewrite it (it may have been written in the old format using Writables instead of pb). The
// pb version is much shorter -- we write now w/o the toString version -- so checking length
// only should be sufficient. I don't want to read the file every time to check if it pb
// serialized.
byte[] content = getRegionInfoFileContent(regionInfoForFs);
// Verify if the region directory exists before opening a region. We need to do this since if
// the region directory doesn't exist we will re-create the region directory and a new HRI
// when HRegion.openHRegion() is called.
try {
FileStatus status = fs.getFileStatus(getRegionDir());
} catch (FileNotFoundException e) {
LOG.warn((((getRegionDir() + " doesn't exist for region: ") +
regionInfoForFs.getEncodedName()) + " on table ") + regionInfo.getTable());
}
try {
Path regionInfoFile = new Path(getRegionDir(), REGION_INFO_FILE);
FileStatus status = fs.getFileStatus(regionInfoFile);
if
((status != null) && (status.getLen() == content.length)) {
// Then assume the content good and move on.
// NOTE: that the length is not sufficient to define the the content matches.
return;
}
LOG.info("Rewriting .regioninfo file at: " + regionInfoFile);
if (!fs.delete(regionInfoFile, false)) {
throw new IOException("Unable to remove existing " + regionInfoFile);
}
} catch (FileNotFoundException e) {
LOG.warn((((REGION_INFO_FILE + " file not found for region: ") + regionInfoForFs.getEncodedName()) + " on table ") + regionInfo.getTable());
}
// Write HRI to a file in case we need to recover hbase:meta
writeRegionInfoOnFilesystem(content, true);
} | 3.26 |
hbase_HRegionFileSystem_getRegionInfoFileContent_rdh | // ===========================================================================
// Create/Open/Delete Helpers
// ===========================================================================
/**
* Returns Content of the file we write out to the filesystem under a region
*/
private static byte[]
getRegionInfoFileContent(final RegionInfo hri) throws IOException {
return RegionInfo.toDelimitedByteArray(hri);
} | 3.26 |
hbase_HRegionFileSystem_removeStoreFiles_rdh | /**
* Closes and archives the specified store files from the specified family.
*
* @param familyName
* Family that contains the store files
* @param storeFiles
* set of store files to remove
* @throws IOException
* if the archiving fails
*/
public void removeStoreFiles(String familyName, Collection<HStoreFile> storeFiles) throws IOException {
HFileArchiver.archiveStoreFiles(this.conf, this.fs, this.regionInfoForFs, this.tableDir, Bytes.toBytes(familyName), storeFiles);
} | 3.26 |
hbase_HRegionFileSystem_createRegionOnFileSystem_rdh | /**
* Create a new Region on file-system.
*
* @param conf
* the {@link Configuration} to use
* @param fs
* {@link FileSystem} from which to add the region
* @param tableDir
* {@link Path} to where the table is being stored
* @param regionInfo
* {@link RegionInfo} for region to be added
* @throws IOException
* if the region creation fails due to a FileSystem exception.
*/
public static HRegionFileSystem createRegionOnFileSystem(final Configuration conf, final FileSystem fs, final Path tableDir, final RegionInfo regionInfo) throws IOException {
HRegionFileSystem regionFs = new HRegionFileSystem(conf, fs, tableDir, regionInfo);
// We only create a .regioninfo and the region directory if this is the default region replica
if (regionInfo.getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID) { Path regionDir = regionFs.getRegionDir();
if (fs.exists(regionDir)) {
LOG.warn("Trying to create a region that already exists on disk: " + regionDir);
} else // Create the region directory
if (!createDirOnFileSystem(fs, conf, regionDir)) {
LOG.warn("Unable to create the region directory: " + regionDir);
throw new IOException("Unable to create region directory: " + regionDir);
}
// Write HRI to a file in case we need to recover hbase:meta
regionFs.writeRegionInfoOnFilesystem(false);
} else if (LOG.isDebugEnabled())
LOG.debug("Skipping creation of .regioninfo file for " + regionInfo);
return regionFs;
} | 3.26 |
hbase_HRegionFileSystem_rename_rdh | /**
* Renames a directory. Assumes the user has already checked for this directory existence.
*
* @return true if rename is successful.
*/
boolean rename(Path srcpath, Path dstPath) throws IOException {
IOException
v85 = null;
int i = 0;
do {
try {
return fs.rename(srcpath, dstPath);
} catch (IOException ioe) {
v85 = ioe;
if
((!fs.exists(srcpath)) && fs.exists(dstPath))
return true;
// successful move
// dir is not there, retry after some time.
try {
sleepBeforeRetry("Rename Directory", i + 1);
} catch (InterruptedException e) {
throw ((InterruptedIOException) (new InterruptedIOException().initCause(e)));
}
}
} while ((++i) <= hdfsClientRetriesNumber );
throw new IOException("Exception in rename", v85);
} | 3.26 |
hbase_HRegionFileSystem_sleepBeforeRetry_rdh | /**
* sleeping logic for static methods; handles the interrupt exception. Keeping a static version
* for this to avoid re-looking for the integer values.
*/
private static void sleepBeforeRetry(String msg, int sleepMultiplier, int baseSleepBeforeRetries, int hdfsClientRetriesNumber) throws InterruptedException {
if (sleepMultiplier > hdfsClientRetriesNumber) {
if (LOG.isDebugEnabled()) {
LOG.debug(msg + ", retries exhausted");
}
return;
}
if (LOG.isDebugEnabled()) {
LOG.debug((((msg + ", sleeping ") + baseSleepBeforeRetries) + " times ") + sleepMultiplier);
}
Thread.sleep(((long) (baseSleepBeforeRetries)) * sleepMultiplier);
} | 3.26 |
hbase_HRegionFileSystem_getStoreFilePath_rdh | /**
* Return Qualified Path of the specified family/file
*
* @param familyName
* Column Family Name
* @param fileName
* File Name
* @return The qualified Path for the specified family/file
*/
Path getStoreFilePath(final String familyName, final String fileName) {
Path familyDir = getStoreDir(familyName);return new Path(familyDir, fileName).makeQualified(fs.getUri(), fs.getWorkingDirectory());
}
/**
* Return the store file information of the specified family/file.
*
* @param familyName
* Column Family Name
* @param fileName
* File Name
* @return The {@link StoreFileInfo} | 3.26 |
hbase_HRegionFileSystem_deleteFamily_rdh | /**
* Remove the region family from disk, archiving the store files.
*
* @param familyName
* Column Family Name
* @throws IOException
* if an error occours during the archiving
*/public void deleteFamily(final String familyName) throws IOException {
// archive family store files
HFileArchiver.archiveFamily(fs, conf, regionInfoForFs, tableDir, Bytes.toBytes(familyName));
// delete the family folder
Path familyDir = getStoreDir(familyName);
if (fs.exists(familyDir) && (!deleteDir(familyDir)))throw new IOException(((((("Could not delete family " + familyName) + " from FileSystem for region ") + regionInfoForFs.getRegionNameAsString()) + "(") + regionInfoForFs.getEncodedName()) + ")");
} | 3.26 |
hbase_HRegionFileSystem_getStoreFiles_rdh | /**
* Returns the store files available for the family. This methods performs the filtering based on
* the valid store files.
*
* @param familyName
* Column Family Name
* @return a set of {@link StoreFileInfo} for the specified family.
*/
public List<StoreFileInfo> getStoreFiles(final String familyName, final boolean validate) throws IOException {
Path familyDir
= getStoreDir(familyName);
FileStatus[] files = CommonFSUtils.listStatus(this.fs,
familyDir);
if (files == null) {
if (LOG.isTraceEnabled()) {
LOG.trace("No StoreFiles for: " + familyDir);
}
return null;
}
ArrayList<StoreFileInfo>
storeFiles = new ArrayList<>(files.length);
for (FileStatus status : files) {
if (validate && (!StoreFileInfo.isValid(status))) {
// recovered.hfiles directory is expected inside CF path when hbase.wal.split.to.hfile to
// true, refer HBASE-23740
if (!HConstants.RECOVERED_HFILES_DIR.equals(status.getPath().getName())) {LOG.warn("Invalid StoreFile: {}", status.getPath());
}
continue;
}
StoreFileInfo info = ServerRegionReplicaUtil.getStoreFileInfo(conf, fs, regionInfo, regionInfoForFs, familyName, status.getPath());
storeFiles.add(info);
}
return storeFiles;
} | 3.26 |
hbase_HRegionFileSystem_createSplitsDir_rdh | /**
* Creates region split daughter directories under the table dir. If the daughter regions already
* exist, for example, in the case of a recovery from a previous failed split procedure, this
* method deletes the given region dir recursively, then recreates it again.
*/public void
createSplitsDir(RegionInfo daughterA, RegionInfo daughterB) throws IOException {
Path daughterADir = getSplitsDir(daughterA);
if (fs.exists(daughterADir) && (!deleteDir(daughterADir))) {
throw new IOException(("Failed deletion of " + daughterADir) + " before creating them again.");
}
if (!createDir(daughterADir)) {
throw new IOException("Failed create of " + daughterADir);
}
Path daughterBDir = getSplitsDir(daughterB);
if (fs.exists(daughterBDir) && (!deleteDir(daughterBDir))) {
throw new IOException(("Failed deletion of " + daughterBDir) + " before creating them again.");
}
if (!createDir(daughterBDir)) {
throw new IOException("Failed create of " + daughterBDir);
}
} | 3.26 |
hbase_HRegionFileSystem_deleteRegionFromFileSystem_rdh | /**
* Remove the region from the table directory, archiving the region's hfiles.
*
* @param conf
* the {@link Configuration} to use
* @param fs
* {@link FileSystem} from which to remove the region
* @param tableDir
* {@link Path} to where the table is being stored
* @param regionInfo
* {@link RegionInfo} for region to be deleted
* @throws IOException
* if the request cannot be completed
*/
public static void deleteRegionFromFileSystem(final Configuration conf, final FileSystem fs, final Path tableDir, final RegionInfo regionInfo) throws IOException {
HRegionFileSystem v80 = new HRegionFileSystem(conf, fs,
tableDir, regionInfo);
Path regionDir
= v80.getRegionDir();
if (!fs.exists(regionDir)) {
LOG.warn("Trying to delete a region that do not exists on disk: " + regionDir);
return;
}
if (LOG.isDebugEnabled()) {
LOG.debug("DELETING region " + regionDir);
}
// Archive region
Path rootDir = CommonFSUtils.getRootDir(conf);
HFileArchiver.archiveRegion(fs, rootDir, tableDir,
regionDir);
// Delete empty region dir
if (!fs.delete(regionDir, true)) {
LOG.warn("Failed delete of " + regionDir);
}
} | 3.26 |
hbase_HRegionFileSystem_getStoreDir_rdh | // ===========================================================================
// Store/StoreFile Helpers
// ===========================================================================
/**
* Returns the directory path of the specified family
*
* @param familyName
* Column Family Name
* @return {@link Path} to the directory of the specified family
*/
public Path getStoreDir(final String familyName) {
return new Path(this.getRegionDir(), familyName);
} | 3.26 |
hbase_HRegionFileSystem_getStoreFilesLocatedStatus_rdh | /**
* Returns the store files' LocatedFileStatus which available for the family. This methods
* performs the filtering based on the valid store files.
*
* @param familyName
* Column Family Name
* @return a list of store files' LocatedFileStatus for the specified family.
*/public static List<LocatedFileStatus> getStoreFilesLocatedStatus(final HRegionFileSystem regionfs, final String familyName, final boolean validate) throws IOException {
Path familyDir = regionfs.getStoreDir(familyName);
List<LocatedFileStatus> locatedFileStatuses = CommonFSUtils.listLocatedStatus(regionfs.getFileSystem(), familyDir);
if (locatedFileStatuses == null) {
if (LOG.isTraceEnabled()) {
LOG.trace("No StoreFiles for: " + familyDir);
}
return
null;
}
List<LocatedFileStatus> validStoreFiles = Lists.newArrayList();
for (LocatedFileStatus status : locatedFileStatuses) {
if (validate && (!StoreFileInfo.isValid(status))) {
// recovered.hfiles directory is expected inside CF path when hbase.wal.split.to.hfile to
// true, refer HBASE-23740
if (!HConstants.RECOVERED_HFILES_DIR.equals(status.getPath().getName())) {
LOG.warn("Invalid StoreFile: {}", status.getPath());
}
} else {
validStoreFiles.add(status);
}
}
return validStoreFiles;
} | 3.26 |
hbase_HRegionFileSystem_m0_rdh | /**
* Archives the specified store file from the specified family.
*
* @param familyName
* Family that contains the store files
* @param filePath
* {@link Path} to the store file to remove
* @throws IOException
* if the archiving fails
*/
public void m0(final String familyName, final Path filePath) throws IOException {
HFileArchiver.archiveStoreFile(this.conf, this.fs, this.regionInfoForFs, this.tableDir, Bytes.toBytes(familyName), filePath);
} | 3.26 |
hbase_HRegionFileSystem_writeRegionInfoOnFilesystem_rdh | /**
* Write out an info file under the region directory. Useful recovering mangled regions.
*
* @param regionInfoContent
* serialized version of the {@link RegionInfo}
* @param useTempDir
* indicate whether or not using the region .tmp dir for a safer file
* creation.
*/
private void writeRegionInfoOnFilesystem(final byte[] regionInfoContent, final boolean useTempDir) throws IOException {
Path regionInfoFile = new Path(getRegionDir(), REGION_INFO_FILE);
if (useTempDir) {
// Create in tmpDir and then move into place in case we crash after
// create but before close. If we don't successfully close the file,
// subsequent region reopens will fail the below because create is
// registered in NN.
// And then create the file
Path tmpPath = new
Path(getTempDir(), REGION_INFO_FILE);
// If datanode crashes or if the RS goes down just before the close is called while trying to
// close the created regioninfo file in the .tmp directory then on next
// creation we will be getting AlreadyCreatedException.
// Hence delete and create the file if exists.
if (CommonFSUtils.isExists(fs, tmpPath)) {
CommonFSUtils.delete(fs, tmpPath, true);
}
// Write HRI to a file in case we need to recover hbase:meta
writeRegionInfoFileContent(conf, fs, tmpPath, regionInfoContent);
// Move the created file to the original path
if (fs.exists(tmpPath) && (!rename(tmpPath, regionInfoFile))) {throw new IOException((("Unable to rename " + tmpPath) + " to ") + regionInfoFile);
}
} else {
// Write HRI to a file in case we need to recover hbase:meta
writeRegionInfoFileContent(conf, fs, regionInfoFile, regionInfoContent);
}
} | 3.26 |
hbase_HRegionFileSystem_openRegionFromFileSystem_rdh | /**
* Open Region from file-system.
*
* @param conf
* the {@link Configuration} to use
* @param fs
* {@link FileSystem} from which to add the region
* @param tableDir
* {@link Path} to where the table is being stored
* @param regionInfo
* {@link RegionInfo} for region to be added
* @param readOnly
* True if you don't want to edit the region data
* @throws IOException
* if the region creation fails due to a FileSystem exception.
*/
public static HRegionFileSystem openRegionFromFileSystem(final Configuration conf, final FileSystem fs,
final Path tableDir, final RegionInfo regionInfo, boolean readOnly) throws IOException {
HRegionFileSystem regionFs = new HRegionFileSystem(conf, fs, tableDir, regionInfo);
Path regionDir = regionFs.getRegionDir();
if (!fs.exists(regionDir)) {
LOG.warn("Trying to open a region that do not exists on disk: " + regionDir);
throw new IOException("The specified region do not exists on disk: " + regionDir);
}
if (!readOnly) {
// Cleanup temporary directories
regionFs.cleanupTempDir();
// If it doesn't exists, Write HRI to a file, in case we need to recover hbase:meta
// Only create HRI if we are the default replica
if (regionInfo.getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID) {
regionFs.checkRegionInfoOnFilesystem();
} else if (LOG.isDebugEnabled()) {LOG.debug("Skipping creation of .regioninfo file for " + regionInfo);
}
}
return regionFs;
} | 3.26 |
hbase_HRegionFileSystem_cleanupDaughterRegion_rdh | /**
* Remove daughter region
*
* @param regionInfo
* daughter {@link RegionInfo}
*/
void cleanupDaughterRegion(final RegionInfo regionInfo) throws IOException {
Path regionDir = new Path(this.tableDir, regionInfo.getEncodedName());
if (this.fs.exists(regionDir) && (!deleteDir(regionDir))) {
throw new IOException("Failed delete of " + regionDir);
}
} | 3.26 |
hbase_HRegionFileSystem_createStoreDir_rdh | /**
* Create the store directory for the specified family name
*
* @param familyName
* Column Family Name
* @return {@link Path} to the directory of the specified family
* @throws IOException
* if the directory creation fails.
*/
Path createStoreDir(final String familyName) throws IOException {
Path storeDir = getStoreDir(familyName);
if ((!fs.exists(storeDir)) && (!createDir(storeDir)))
throw new IOException("Failed creating " + storeDir);
return storeDir;
} | 3.26 |
hbase_HRegionFileSystem_cleanupMergedRegion_rdh | /**
* Remove merged region
*
* @param mergedRegion
* {@link RegionInfo}
*/
public void cleanupMergedRegion(final RegionInfo mergedRegion) throws IOException {
Path regionDir = new Path(this.tableDir, mergedRegion.getEncodedName());
if (this.fs.exists(regionDir) && (!this.fs.delete(regionDir, true))) {
throw new IOException("Failed delete of " + regionDir);
}
} | 3.26 |
hbase_HRegionFileSystem_getRegionDir_rdh | /**
* Returns {@link Path} to the region directory.
*/
public Path getRegionDir() {
return regionDir;
} | 3.26 |
hbase_HRegionFileSystem_commitStoreFile_rdh | /* Moves file from staging dir to region dir
@param buildPath {@link Path} to the file to commit.
@param dstPath {@link Path} to the file under region dir
@return The {@link Path} of the committed file
*/
Path commitStoreFile(final Path buildPath, Path dstPath) throws IOException {
// rename is not necessary in case of direct-insert stores
if (buildPath.equals(dstPath)) {
return dstPath;
}// buildPath exists, therefore not doing an exists() check.
if (!rename(buildPath, dstPath))
{
throw
new IOException((("Failed rename of " + buildPath)
+ " to ") + dstPath);
}
return dstPath;} | 3.26 |
hbase_HRegionFileSystem_commitDaughterRegion_rdh | /**
* Commit a daughter region, moving it from the split temporary directory to the proper location
* in the filesystem.
*
* @param regionInfo
* daughter {@link org.apache.hadoop.hbase.client.RegionInfo}
*/
public Path commitDaughterRegion(final RegionInfo regionInfo, List<Path> allRegionFiles, MasterProcedureEnv env) throws IOException {
Path regionDir = this.getSplitsDir(regionInfo);
if (fs.exists(regionDir)) {
// Write HRI to a file in case we need to recover hbase:meta
Path regionInfoFile = new Path(regionDir, REGION_INFO_FILE);
byte[] regionInfoContent = getRegionInfoFileContent(regionInfo);
writeRegionInfoFileContent(conf, fs, regionInfoFile, regionInfoContent);
HRegionFileSystem regionFs = HRegionFileSystem.openRegionFromFileSystem(env.getMasterConfiguration(), fs, getTableDir(), regionInfo, false);
insertRegionFilesIntoStoreTracker(allRegionFiles, env, regionFs);}
return regionDir;
} | 3.26 |
hbase_HRegionFileSystem_splitStoreFile_rdh | /**
* Write out a split reference. Package local so it doesnt leak out of regionserver.
*
* @param hri
* {@link RegionInfo} of the destination
* @param familyName
* Column Family Name
* @param f
* File to split.
* @param splitRow
* Split Row
* @param top
* True if we are referring to the top half of the hfile.
* @param splitPolicy
* A split policy instance; be careful! May not be full populated; e.g. if this
* method is invoked on the Master side, then the RegionSplitPolicy will NOT
* have a reference to a Region.
* @return Path to created reference.
*/
public Path splitStoreFile(RegionInfo hri, String familyName, HStoreFile f, byte[] splitRow, boolean top, RegionSplitPolicy splitPolicy) throws IOException {
Path splitDir = new Path(getSplitsDir(hri), familyName);
// Add the referred-to regions name as a dot separated suffix.
// See REF_NAME_REGEX regex above. The referred-to regions name is
// up in the path of the passed in <code>f</code> -- parentdir is family,
// then the directory above is the region name.
String parentRegionName = regionInfoForFs.getEncodedName();
// Write reference with same file id only with the other region name as
// suffix and into the new region location (under same family).
Path p = new Path(splitDir, (f.getPath().getName() + ".") + parentRegionName);
if (fs.exists(p)) {
LOG.warn("Found an already existing split file for {}. Assuming this is a recovery.", p);
return p;
}
boolean v47 = false;
if ((splitPolicy == null) || (!splitPolicy.skipStoreFileRangeCheck(familyName))) {
// Check whether the split row lies in the range of the store file
// If it is outside the range, return directly.
f.initReader();
try {
Cell splitKey = PrivateCellUtil.createFirstOnRow(splitRow);
Optional<Cell> lastKey = f.getLastKey();
Optional<Cell> firstKey = f.getFirstKey();
if (top) {
// check if larger than last key.
// If lastKey is null means storefile is empty.
if (!lastKey.isPresent()) {
return null; }
if (f.getComparator().compare(splitKey, lastKey.get()) > 0) {
return null;
}
if (firstKey.isPresent() && (f.getComparator().compare(splitKey, firstKey.get()) <= 0)) {
LOG.debug("Will create HFileLink file for {}, top=true", f.getPath());
v47 = true;
}
} else {// check if smaller than first key
// If firstKey is null means storefile is empty.
if (!firstKey.isPresent()) {
return null;
}
if (f.getComparator().compare(splitKey, firstKey.get()) < 0) {
return null;
}
if (lastKey.isPresent() && (f.getComparator().compare(splitKey, lastKey.get()) >= 0)) {
LOG.debug("Will create HFileLink file for {}, top=false", f.getPath());
v47 = true;
}
}
} finally {
f.closeStoreFile(f.getCacheConf() != null ? f.getCacheConf().shouldEvictOnClose() : true);
}
}
if (v47) {
// create HFileLink file instead of Reference file for child
String hfileName = f.getPath().getName();
TableName linkedTable = regionInfoForFs.getTable();
String linkedRegion = regionInfoForFs.getEncodedName();
try {
if (HFileLink.isHFileLink(hfileName)) {
Matcher m = LINK_NAME_PATTERN.matcher(hfileName);
if (!m.matches()) {
throw new IllegalArgumentException(hfileName + " is not a valid HFileLink name!");
}
linkedTable = TableName.valueOf(m.group(1), m.group(2));
linkedRegion = m.group(3);
hfileName = m.group(4);
}
// must create back reference here
HFileLink.create(conf, fs, splitDir, familyName, hri.getTable().getNameAsString(), hri.getEncodedName(), linkedTable, linkedRegion, hfileName, true);
Path path = new Path(splitDir, HFileLink.createHFileLinkName(linkedTable, linkedRegion,
hfileName));
LOG.info((((("Created linkFile:" + path.toString()) + " for child: ") + hri.getEncodedName()) + ", parent: ") + regionInfoForFs.getEncodedName());
return path;
} catch (IOException e) {
// if create HFileLink file failed, then just skip the error and create Reference file
LOG.error(((("Create link file for " + hfileName) + " for child ") + hri.getEncodedName()) + "failed, will create Reference file", e);
}
}// A reference to the bottom half of the hsf store file.
Reference r = (top) ? Reference.createTopReference(splitRow) : Reference.createBottomReference(splitRow);
return r.write(fs, p);
} | 3.26 |
hbase_HRegionFileSystem_getFileSystem_rdh | /**
* Returns the underlying {@link FileSystem}
*/
public FileSystem getFileSystem() {
return this.fs;
} | 3.26 |
hbase_HRegionFileSystem_getFamilies_rdh | /**
* Returns the set of families present on disk n
*/
public Collection<String> getFamilies() throws IOException {
FileStatus[] fds = CommonFSUtils.listStatus(fs, getRegionDir(), new FSUtils.FamilyDirFilter(fs));
if (fds
== null)
return null;
ArrayList<String> families = new ArrayList<>(fds.length);
for (FileStatus status : fds) {
families.add(status.getPath().getName());
}
return families;
} | 3.26 |
hbase_HRegionFileSystem_mergeStoreFile_rdh | /**
* Write out a merge reference under the given merges directory.
*
* @param mergingRegion
* {@link RegionInfo} for one of the regions being merged.
* @param familyName
* Column Family Name
* @param f
* File to create reference.
* @return Path to created reference.
* @throws IOException
* if the merge write fails.
*/public Path mergeStoreFile(RegionInfo mergingRegion, String familyName, HStoreFile f) throws IOException {
Path referenceDir = new Path(getMergesDir(regionInfoForFs), familyName);
// A whole reference to the store file.
Reference r = Reference.createTopReference(mergingRegion.getStartKey());
// Add the referred-to regions name as a dot separated suffix.
// See REF_NAME_REGEX regex above. The referred-to regions name is
// up in the path of the passed in <code>f</code> -- parentdir is family,
// then the directory above is the region name.
String mergingRegionName = mergingRegion.getEncodedName();
// Write reference with same file id only with the other region name as
// suffix and into the new region location (under same family).
Path p = new Path(referenceDir, (f.getPath().getName() + ".") + mergingRegionName);
return r.write(fs, p);
} | 3.26 |
hbase_HRegionFileSystem_hasReferences_rdh | /**
* Check whether region has Reference file
*
* @param htd
* table desciptor of the region
* @return true if region has reference file
*/
public boolean hasReferences(final TableDescriptor htd) throws IOException {
for (ColumnFamilyDescriptor family : htd.getColumnFamilies()) {
if (hasReferences(family.getNameAsString())) {
return true;
}
}
return false;
} | 3.26 |
hbase_HRegionFileSystem_getRegionInfo_rdh | /**
* Returns the {@link RegionInfo} that describe this on-disk region view
*/
public RegionInfo getRegionInfo() {
return this.regionInfo;
} | 3.26 |
hbase_HRegionFileSystem_commitMergedRegion_rdh | /**
* Commit a merged region, making it ready for use.
*/
public void commitMergedRegion(List<Path> allMergedFiles, MasterProcedureEnv
env) throws IOException {
Path v63 = getMergesDir(regionInfoForFs);
if ((v63 != null) && fs.exists(v63)) {
// Write HRI to a file in case we need to recover hbase:meta
Path regionInfoFile = new Path(v63, REGION_INFO_FILE);
byte[] regionInfoContent = getRegionInfoFileContent(regionInfo);
writeRegionInfoFileContent(conf, fs, regionInfoFile, regionInfoContent);
insertRegionFilesIntoStoreTracker(allMergedFiles, env, this);
}
} | 3.26 |
hbase_HRegionFileSystem_loadRegionInfoFileContent_rdh | /**
* Create a {@link RegionInfo} from the serialized version on-disk.
*
* @param fs
* {@link FileSystem} that contains the Region Info file
* @param regionDir
* {@link Path} to the Region Directory that contains the Info file
* @return An {@link RegionInfo} instance gotten from the Region Info file.
* @throws IOException
* if an error occurred during file open/read operation.
*/
public static RegionInfo loadRegionInfoFileContent(final FileSystem fs, final Path regionDir) throws IOException {
FSDataInputStream in = fs.open(new Path(regionDir, REGION_INFO_FILE));
try {
return RegionInfo.parseFrom(in);
} finally {
in.close();
}
} | 3.26 |
hbase_HMaster_filterTablesByRegex_rdh | /**
* Removes the table descriptors that don't match the pattern.
*
* @param descriptors
* list of table descriptors to filter
* @param pattern
* the regex to use
*/
private static void filterTablesByRegex(final Collection<TableDescriptor> descriptors, final Pattern pattern) {
final String defaultNS = NamespaceDescriptor.DEFAULT_NAMESPACE_NAME_STR;
Iterator<TableDescriptor> itr = descriptors.iterator();
while (itr.hasNext()) {
TableDescriptor htd
= itr.next();
String tableName = htd.getTableName().getNameAsString();
boolean matched = pattern.matcher(tableName).matches();
if
((!matched) && htd.getTableName().getNamespaceAsString().equals(defaultNS)) {
matched = pattern.matcher((defaultNS + TableName.NAMESPACE_DELIM) + tableName).matches();
}
if (!matched) {
itr.remove();
}
}
} | 3.26 |
hbase_HMaster_listNamespaces_rdh | /**
* List namespace names
*
* @return All namespace names
*/
public List<String> listNamespaces() throws IOException {
checkInitialized();
List<String> namespaces = new ArrayList<>();
if (cpHost != null) {
cpHost.preListNamespaces(namespaces);
}for (NamespaceDescriptor namespace : clusterSchemaService.getNamespaces()) {
namespaces.add(namespace.getName());
}
if (cpHost
!= null) {
cpHost.postListNamespaces(namespaces);
}
return namespaces;
} | 3.26 |
hbase_HMaster_login_rdh | /**
* For compatibility, if failed with regionserver credentials, try the master one
*/
@Override
protected void login(UserProvider user, String host) throws IOException {
try {
user.login(SecurityConstants.REGIONSERVER_KRB_KEYTAB_FILE, SecurityConstants.REGIONSERVER_KRB_PRINCIPAL, host);
} catch (IOException ie) {user.login(SecurityConstants.MASTER_KRB_KEYTAB_FILE, SecurityConstants.MASTER_KRB_PRINCIPAL, host);
}
} | 3.26 |
hbase_HMaster_isActiveMaster_rdh | /**
* Report whether this master is currently the active master or not. If not active master, we are
* parked on ZK waiting to become active. This method is used for testing.
*
* @return true if active master, false if not.
*/
@Override
public boolean isActiveMaster() {
return activeMaster;
} | 3.26 |
hbase_HMaster_listDecommissionedRegionServers_rdh | /**
* List region servers marked as decommissioned (previously called 'draining') to not get regions
* assigned to them.
*
* @return List of decommissioned servers.
*/
public List<ServerName> listDecommissionedRegionServers() {
return this.serverManager.getDrainingServersList();
} | 3.26 |
hbase_HMaster_switchSnapshotCleanup_rdh | /**
* Turn on/off Snapshot Cleanup Chore
*
* @param on
* indicates whether Snapshot Cleanup Chore is to be run
*/
void switchSnapshotCleanup(final boolean on, final boolean synchronous) throws IOException {
if (synchronous) {
synchronized(this.snapshotCleanerChore) {
switchSnapshotCleanup(on);
}
} else {
switchSnapshotCleanup(on);
}
} | 3.26 |
hbase_HMaster_isInitialized_rdh | /**
* Report whether this master has completed with its initialization and is ready. If ready, the
* master is also the active master. A standby master is never ready. This method is used for
* testing.
*
* @return true if master is ready to go, false if not.
*/
@Override
public boolean isInitialized() {
return initialized.isReady();
} | 3.26 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.