name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hbase_RestoreSnapshotProcedure_getMonitorStatus_rdh | /**
* Set up monitor status if it is not created.
*/
private MonitoredTask getMonitorStatus() {
if
(monitorStatus == null) {
monitorStatus = TaskMonitor.get().createStatus((("Restoring snapshot '" + snapshot.getName()) + "' to table ") + getTableName());
}
return monitorStatus;
} | 3.26 |
hbase_RestoreSnapshotProcedure_prepareRestore_rdh | /**
* Action before any real action of restoring from snapshot.
*
* @param env
* MasterProcedureEnv
*/
private void prepareRestore(final MasterProcedureEnv env)
throws IOException
{
final TableName tableName = getTableName();
// Checks whether the table exists
if (!env.getMasterServices().getTableDescriptors().exists(tableName)) {throw new TableNotFoundException(tableName);
}
// check whether ttl has expired for this snapshot
if (SnapshotDescriptionUtils.isExpiredSnapshot(snapshot.getTtl(),
snapshot.getCreationTime(), EnvironmentEdgeManager.currentTime())) { throw new SnapshotTTLExpiredException(ProtobufUtil.createSnapshotDesc(snapshot));
}
// Check whether table is disabled.
env.getMasterServices().checkTableModifiable(tableName);
// Check that we have at least 1 CF
if (modifiedTableDescriptor.getColumnFamilyCount() == 0) {
throw new DoNotRetryIOException(("Table " + getTableName().toString()) + " should have at least one column family.");
}if (!getTableName().isSystemTable()) {// Table already exist. Check and update the region quota for this table namespace.
final MasterFileSystem v13 = env.getMasterServices().getMasterFileSystem();
SnapshotManifest
manifest = SnapshotManifest.open(env.getMasterConfiguration(), v13.getFileSystem(),
SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshot, v13.getRootDir()), snapshot);
int snapshotRegionCount = manifest.getRegionManifestsMap().size();
int tableRegionCount = ProcedureSyncWait.getMasterQuotaManager(env).getRegionCountOfTable(tableName);
if ((snapshotRegionCount > 0) && (tableRegionCount != snapshotRegionCount)) {
ProcedureSyncWait.getMasterQuotaManager(env).checkAndUpdateNamespaceRegionQuota(tableName, snapshotRegionCount);
}
}
} | 3.26 |
hbase_ModeStrategyUtils_aggregateRecords_rdh | /**
* Group by records on the basis of supplied groupBy field and Aggregate records using
* {@link Record#combine(Record)}
*
* @param records
* records needs to be processed
* @param groupBy
* Field to be used for group by
* @return aggregated records
*/public static List<Record> aggregateRecords(List<Record> records, Field groupBy) {
return records.stream().collect(Collectors.groupingBy(r -> r.get(groupBy))).entrySet().stream().flatMap(e -> e.getValue().stream().reduce(Record::combine).map(Stream::of).orElse(Stream.empty())).collect(Collectors.toList());
} | 3.26 |
hbase_RESTServletContainer_service_rdh | /**
* This container is used only if authentication and impersonation is enabled. The remote request
* user is used as a proxy user for impersonation in invoking any REST service.
*/
@Override
public void service(final HttpServletRequest request, final HttpServletResponse response) throws ServletException, IOException {
final HttpServletRequest lowerCaseRequest = toLowerCase(request);
final String doAsUserFromQuery = lowerCaseRequest.getParameter("doas");
RESTServlet servlet = RESTServlet.getInstance();
if (doAsUserFromQuery != null) {
Configuration conf = servlet.getConfiguration();
if (!servlet.supportsProxyuser()) {
throw new ServletException("Support for proxyuser is not configured");
}
// Authenticated remote user is attempting to do 'doAs' proxy user.
UserGroupInformation ugi = UserGroupInformation.createRemoteUser(request.getRemoteUser());
// create and attempt to authorize a proxy user (the client is attempting
// to do proxy user)
ugi = UserGroupInformation.createProxyUser(doAsUserFromQuery, ugi);
// validate the proxy user authorization
try {
ProxyUsers.authorize(ugi, request.getRemoteAddr(), conf);
} catch (AuthorizationException e) {
throw new ServletException(e.getMessage());
}
servlet.setEffectiveUser(doAsUserFromQuery);
} else {
String effectiveUser = request.getRemoteUser();
servlet.setEffectiveUser(effectiveUser);
}
super.service(request, response);
} | 3.26 |
hbase_RSMobFileCleanerChore_archiveMobFiles_rdh | /**
* Archives the mob files.
*
* @param conf
* The current configuration.
* @param tableName
* The table name.
* @param family
* The name of the column family.
* @param storeFiles
* The files to be archived.
* @throws IOException
* exception
*/public void archiveMobFiles(Configuration conf, TableName tableName, byte[] family, List<Path> storeFiles) throws IOException {
if (storeFiles.size() == 0) {
// nothing to remove
LOG.debug("Skipping archiving old MOB files - no files found for table={} cf={}", tableName, Bytes.toString(family));
return;
}
Path mobTableDir = CommonFSUtils.getTableDir(MobUtils.getMobHome(conf), tableName);
FileSystem fs = storeFiles.get(0).getFileSystem(conf);
for (Path p : storeFiles) {
LOG.debug("MOB Cleaner is archiving: {}", p);
HFileArchiver.archiveStoreFile(conf, fs, MobUtils.getMobRegionInfo(tableName), mobTableDir, family, p); }
} | 3.26 |
hbase_NoRegionSplitRestriction_initialize_rdh | /**
* A {@link RegionSplitRestriction} implementation that does nothing.
*/
@InterfaceAudience.Privatepublic class NoRegionSplitRestriction extends RegionSplitRestriction {
@Override
public void initialize(TableDescriptor tableDescriptor, Configuration conf) throws IOException {
} | 3.26 |
hbase_BloomContext_m0_rdh | /**
* Bloom information from the cell is retrieved
*/
public void m0(Cell cell) throws IOException {
// only add to the bloom filter on a new, unique key
if (isNewKey(cell)) {
sanityCheck(cell);
f0.append(cell);
}
} | 3.26 |
hbase_Threads_setDaemonThreadRunning_rdh | /**
* Utility method that sets name, daemon status and starts passed thread.
*
* @param t
* thread to frob
* @param name
* new name
* @param handler
* A handler to set on the thread. Pass null if want to use default handler.
* @return Returns the passed Thread <code>t</code>.
*/
public static <T extends Thread> T setDaemonThreadRunning(T t, String name, UncaughtExceptionHandler handler) {
t.setName(name);
if (handler != null) {
t.setUncaughtExceptionHandler(handler);
}
t.setDaemon(true);
t.start();
return t;
} | 3.26 |
hbase_Threads_sleepWithoutInterrupt_rdh | /**
* Sleeps for the given amount of time even if interrupted. Preserves the interrupt status.
*
* @param msToWait
* the amount of time to sleep in milliseconds
*/
public static void sleepWithoutInterrupt(final long msToWait) {
long timeMillis = EnvironmentEdgeManager.currentTime();
long endTime = timeMillis + msToWait;
boolean interrupted = false;
while (timeMillis < endTime) {
try {
Thread.sleep(endTime - timeMillis);
} catch (InterruptedException ex) {
interrupted = true;
}
timeMillis = EnvironmentEdgeManager.currentTime();
}
if (interrupted) {
Thread.currentThread().interrupt();}
} | 3.26 |
hbase_Threads_printThreadInfo_rdh | /**
* Print all of the thread's information and stack traces. Wrapper around Hadoop's method.
*
* @param stream
* the stream to
* @param title
* a string title for the stack trace
*/
public static void printThreadInfo(PrintStream stream, String title) { ReflectionUtils.printThreadInfo(stream, title);
} | 3.26 |
hbase_Threads_isNonDaemonThreadRunning_rdh | /**
* Checks whether any non-daemon thread is running.
*
* @return true if there are non daemon threads running, otherwise false
*/
public static boolean isNonDaemonThreadRunning() {
AtomicInteger nonDaemonThreadCount = new AtomicInteger();
Set<Thread> threads = Thread.getAllStackTraces().keySet();
threads.forEach(t -> {
// Exclude current thread
if ((t.getId() != Thread.currentThread().getId())
&& (!t.isDaemon())) {
nonDaemonThreadCount.getAndIncrement();
LOG.info("Non daemon thread {} is still alive", t.getName());
LOG.info(printStackTrace(t));
}
});
return nonDaemonThreadCount.get() > 0;
} | 3.26 |
hbase_Threads_sleep_rdh | /**
* If interrupted, just prints out the interrupt on STDOUT, resets interrupt and returns
*
* @param millis
* How long to sleep for in milliseconds.
*/
public static void sleep(long millis) {
try {
Thread.sleep(millis);
} catch (InterruptedException e) {LOG.warn("sleep interrupted", e);
Thread.currentThread().interrupt();
}
} | 3.26 |
hbase_Threads_shutdown_rdh | /**
* Shutdown passed thread using isAlive and join.
*
* @param joinwait
* Pass 0 if we're to wait forever.
* @param t
* Thread to shutdown
*/
public static void shutdown(final Thread t, final long joinwait) {
if (t == null)
return;
while (t.isAlive()) {
try {
t.join(joinwait);
} catch (InterruptedException e) {
LOG.warn((t.getName() + "; joinwait=") + joinwait, e);
}
}
} | 3.26 |
hbase_Threads_threadDumpingIsAlive_rdh | /**
* Waits on the passed thread to die dumping a threaddump every minute while its up.
*/
public static void threadDumpingIsAlive(final Thread t) throws InterruptedException {
if (t == null) {
return;
}
while (t.isAlive()) {
t.join(60 * 1000);
if (t.isAlive()) {
printThreadInfo(System.out, "Automatic Stack Trace every 60 seconds waiting on " + t.getName());
}
}
} | 3.26 |
hbase_Threads_setLoggingUncaughtExceptionHandler_rdh | /**
* Sets an UncaughtExceptionHandler for the thread which logs the Exception stack if the thread
* dies.
*/
public static void setLoggingUncaughtExceptionHandler(Thread t) {
t.setUncaughtExceptionHandler(LOGGING_EXCEPTION_HANDLER);
} | 3.26 |
hbase_RegionModeStrategy_selectModeFieldsAndAddCountField_rdh | /**
* Form new record list with records formed by only fields provided through fieldInfo and add a
* count field for each record with value 1 We are doing two operation of selecting and adding new
* field because of saving some CPU cycles on rebuilding the record again
*
* @param fieldInfos
* List of FieldInfos required in the record
* @param records
* List of records which needs to be processed
* @param countField
* Field which needs to be added with value 1 for each record
* @return records after selecting required fields and adding count field
*/
List<Record> selectModeFieldsAndAddCountField(List<FieldInfo> fieldInfos, List<Record> records, Field countField) {
return records.stream().map(record ->
Record.ofEntries(fieldInfos.stream().filter(fi -> record.containsKey(fi.getField())).map(fi -> Record.entry(fi.getField(), record.get(fi.getField()))))).map(record -> Record.builder().putAll(record).put(countField, 1).build()).collect(Collectors.toList());
} | 3.26 |
hbase_TableDescriptorUtils_computeDelta_rdh | /**
* Compares two {@link TableDescriptor} and indicate which columns were added, deleted, or
* modified from oldTD to newTD
*
* @return a TableDescriptorDelta that contains the added/deleted/modified column names
*/
public static TableDescriptorDelta computeDelta(TableDescriptor oldTD, TableDescriptor newTD) {
return new TableDescriptorDelta(oldTD, newTD);
} | 3.26 |
hbase_MultiVersionConcurrencyControl_advanceTo_rdh | /**
* Step the MVCC forward on to a new read/write basis.
*/
public void advanceTo(long newStartPoint) {
while (true) {
long seqId = this.getWritePoint();
if (seqId >= newStartPoint) {
break;
}
if (this.tryAdvanceTo(newStartPoint, seqId)) {
break;
}
}
} | 3.26 |
hbase_MultiVersionConcurrencyControl_begin_rdh | /**
* Start a write transaction. Create a new {@link WriteEntry} with a new write number and add it
* to our queue of ongoing writes. Return this WriteEntry instance. To complete the write
* transaction and wait for it to be visible, call {@link #completeAndWait(WriteEntry)}. If the
* write failed, call {@link #complete(WriteEntry)} so we can clean up AFTER removing ALL trace of
* the failed write transaction.
* <p>
* The {@code action} will be executed under the lock which means it can keep the same order with
* mvcc.
*
* @see #complete(WriteEntry)
* @see #completeAndWait(WriteEntry)
*/
public WriteEntry begin(Runnable action) {
synchronized(writeQueue) {
long nextWriteNumber = writePoint.incrementAndGet();
WriteEntry e =
new WriteEntry(nextWriteNumber);
writeQueue.add(e);
action.run();
return e;
}
} | 3.26 |
hbase_MultiVersionConcurrencyControl_completeAndWait_rdh | /**
* Complete a {@link WriteEntry} that was created by {@link #begin()} then wait until the read
* point catches up to our write. At the end of this call, the global read point is at least as
* large as the write point of the passed in WriteEntry. Thus, the write is visible to MVCC
* readers.
*/
public void completeAndWait(WriteEntry e) {
if (!complete(e)) {
waitForRead(e);
}
} | 3.26 |
hbase_MultiVersionConcurrencyControl_complete_rdh | /**
* Mark the {@link WriteEntry} as complete and advance the read point as much as possible. Call
* this even if the write has FAILED (AFTER backing out the write transaction changes completely)
* so we can clean up the outstanding transaction. How much is the read point advanced? Let S be
* the set of all write numbers that are completed. Set the read point to the highest numbered
* write of S.
*
* @return true if e is visible to MVCC readers (that is, readpoint >= e.writeNumber)
*/
public boolean complete(WriteEntry writeEntry) {
synchronized(writeQueue) {
writeEntry.markCompleted();
long nextReadValue =
NONE;
boolean ranOnce = false;
while (!writeQueue.isEmpty()) {
ranOnce = true;
WriteEntry v7 = writeQueue.getFirst();
if (nextReadValue > 0) {
if ((nextReadValue + 1) != v7.getWriteNumber()) {
throw new RuntimeException((("Invariant in complete violated, nextReadValue=" + nextReadValue) + ", writeNumber=") + v7.getWriteNumber());
}
}
if (v7.isCompleted()) {
nextReadValue = v7.getWriteNumber();
writeQueue.removeFirst();
v7.runCompletionAction();
} else {
break;
}
}
if (!ranOnce) {
throw new RuntimeException("There is no first!");
}if (nextReadValue > 0) {
synchronized(readWaiters) {
readPoint.set(nextReadValue);
readWaiters.notifyAll();
}
}
return readPoint.get() >= writeEntry.getWriteNumber();
}
} | 3.26 |
hbase_MultiVersionConcurrencyControl_waitForRead_rdh | /**
* Wait for the global readPoint to advance up to the passed in write entry number.
*/
void waitForRead(WriteEntry e) {
boolean interrupted = false;
int count = 0;
synchronized(readWaiters) {
while (readPoint.get() < e.getWriteNumber()) {
if (((count % 100) == 0) && (count > 0)) {
long totalWaitTillNow = READPOINT_ADVANCE_WAIT_TIME * count;f0.warn((("STUCK for : " + totalWaitTillNow) + " millis. ") + this);
}
count++;
try {
readWaiters.wait(READPOINT_ADVANCE_WAIT_TIME);
} catch (InterruptedException ie) {
// We were interrupted... finish the loop -- i.e. cleanup --and then
// on our way out, reset the interrupt flag.
interrupted = true;
}
}
}
if (interrupted) {
Thread.currentThread().interrupt();
}
} | 3.26 |
hbase_MultiVersionConcurrencyControl_await_rdh | /**
* Wait until the read point catches up to the write point; i.e. wait on all outstanding mvccs to
* complete.
*/
public void await() {
// Add a write and then wait on reads to catch up to it.
completeAndWait(begin());
} | 3.26 |
hbase_QuotaRetriever_open_rdh | /**
* Open a QuotaRetriever with the specified filter.
*
* @param conf
* Configuration object to use.
* @param filter
* the QuotaFilter
* @return the QuotaRetriever
* @throws IOException
* if a remote or network exception occurs
*/
public static QuotaRetriever open(final Configuration conf, final QuotaFilter filter) throws IOException {
Scan scan = QuotaTableUtil.makeScan(filter);
QuotaRetriever scanner = new QuotaRetriever();
scanner.init(conf, scan);
return scanner;
} | 3.26 |
hbase_CellComparator_getInstance_rdh | /**
* A comparator for ordering cells in user-space tables. Useful when writing cells in sorted order
* as necessary for bulk import (i.e. via MapReduce).
* <p>
* CAUTION: This comparator may provide inaccurate ordering for cells from system tables, and
* should not be relied upon in that case.
*/
// For internal use, see CellComparatorImpl utility methods.
static CellComparator getInstance() {
return CellComparatorImpl.COMPARATOR;
} | 3.26 |
hbase_HFileSystem_maybeWrapFileSystem_rdh | /**
* Returns an instance of Filesystem wrapped into the class specified in hbase.fs.wrapper
* property, if one is set in the configuration, returns unmodified FS instance passed in as an
* argument otherwise.
*
* @param base
* Filesystem instance to wrap
* @param conf
* Configuration
* @return wrapped instance of FS, or the same instance if no wrapping configured.
*/
private FileSystem
maybeWrapFileSystem(FileSystem base, Configuration conf) {
try {
Class<?> clazz = conf.getClass("hbase.fs.wrapper", null);
if (clazz != null) {
return ((FileSystem) (clazz.getConstructor(FileSystem.class, Configuration.class).newInstance(base, conf)));
}
} catch (Exception e) {
LOG.error("Failed to wrap filesystem: " + e);
}
return base;
} | 3.26 |
hbase_HFileSystem_close_rdh | /**
* Close this filesystem object
*/
@Override
public void close() throws IOException {
super.close();
if (this.noChecksumFs != fs) {
this.noChecksumFs.close();}
} | 3.26 |
hbase_HFileSystem_useHBaseChecksum_rdh | /**
* Are we verifying checksums in HBase?
*
* @return True, if hbase is configured to verify checksums, otherwise false.
*/
public boolean useHBaseChecksum() {
return useHBaseChecksum;
} | 3.26 |
hbase_HFileSystem_getStoragePolicyForOldHDFSVersion_rdh | /**
* Before Hadoop 2.8.0, there's no getStoragePolicy method for FileSystem interface, and we need
* to keep compatible with it. See HADOOP-12161 for more details.
*
* @param path
* Path to get storage policy against
* @return the storage policy name
*/
private String getStoragePolicyForOldHDFSVersion(Path path) {
try {
if (this.fs instanceof DistributedFileSystem) {
DistributedFileSystem dfs = ((DistributedFileSystem) (this.fs));
HdfsFileStatus status = dfs.getClient().getFileInfo(path.toUri().getPath());
if (null != status) {
if (unspecifiedStoragePolicyId < 0) {
// Get the unspecified id field through reflection to avoid compilation error.
// In later version BlockStoragePolicySuite#ID_UNSPECIFIED is moved to
// HdfsConstants#BLOCK_STORAGE_POLICY_ID_UNSPECIFIED
Field idUnspecified = BlockStoragePolicySuite.class.getField("ID_UNSPECIFIED");
unspecifiedStoragePolicyId = idUnspecified.getByte(BlockStoragePolicySuite.class);
}
byte storagePolicyId = status.getStoragePolicy();
if (storagePolicyId != unspecifiedStoragePolicyId) {
BlockStoragePolicy[] policies = dfs.getStoragePolicies();
for (BlockStoragePolicy policy : policies) {
if (policy.getId() == storagePolicyId) {
return policy.getName();
}
}
}
}
}
}
catch (Throwable e) {
LOG.warn(("failed to get block storage policy of [" + path) + "]", e);
}
return null;
} | 3.26 |
hbase_HFileSystem_setStoragePolicy_rdh | /**
* Set the source path (directory/file) to the specified storage policy.
*
* @param path
* The source path (directory/file).
* @param policyName
* The name of the storage policy: 'HOT', 'COLD', etc. See see hadoop 2.6+
* org.apache.hadoop.hdfs.protocol.HdfsConstants for possible list e.g 'COLD',
* 'WARM', 'HOT', 'ONE_SSD', 'ALL_SSD', 'LAZY_PERSIST'.
*/
public void setStoragePolicy(Path path, String policyName) {
CommonFSUtils.setStoragePolicy(this.fs, path, policyName);
}
/**
* Get the storage policy of the source path (directory/file).
*
* @param path
* The source path (directory/file).
* @return Storage policy name, or {@code null} if not using {@link DistributedFileSystem} | 3.26 |
hbase_HFileSystem_getNoChecksumFs_rdh | /**
* Returns the filesystem that is specially setup for doing reads from storage. This object avoids
* doing checksum verifications for reads.
*
* @return The FileSystem object that can be used to read data from files.
*/
public FileSystem getNoChecksumFs() {
return noChecksumFs;
} | 3.26 |
hbase_HFileSystem_createNonRecursive_rdh | /**
* The org.apache.hadoop.fs.FilterFileSystem does not yet support createNonRecursive. This is a
* hadoop bug and when it is fixed in Hadoop, this definition will go away.
*/
@Override
@SuppressWarnings("deprecation")
public FSDataOutputStream createNonRecursive(Path f, boolean overwrite, int bufferSize, short replication, long blockSize, Progressable progress) throws IOException {
return fs.createNonRecursive(f, overwrite, bufferSize, replication, blockSize, progress);
} | 3.26 |
hbase_HFileSystem_newInstanceFileSystem_rdh | /**
* Returns a brand new instance of the FileSystem. It does not use the FileSystem.Cache. In newer
* versions of HDFS, we can directly invoke FileSystem.newInstance(Configuration).
*
* @param conf
* Configuration
* @return A new instance of the filesystem
*/
private static FileSystem newInstanceFileSystem(Configuration conf)
throws IOException {
URI uri = FileSystem.getDefaultUri(conf);
FileSystem fs = null;Class<?> clazz = conf.getClass(("fs." + uri.getScheme()) + ".impl", null);
if (clazz != null) {
// This will be true for Hadoop 1.0, or 0.20.
fs = ((FileSystem) (util.ReflectionUtils.newInstance(clazz, conf)));
fs.initialize(uri,
conf);
} else {
// For Hadoop 2.0, we have to go through FileSystem for the filesystem
// implementation to be loaded by the service loader in case it has not
// been loaded yet.
Configuration clone = new Configuration(conf);
clone.setBoolean(("fs." + uri.getScheme()) + ".impl.disable.cache", true);
fs = FileSystem.get(uri, clone);
}
if (fs == null) {
throw new IOException("No FileSystem for scheme: " + uri.getScheme());
}
return fs;
} | 3.26 |
hbase_HFileSystem_addLocationsOrderInterceptor_rdh | /**
* Add an interceptor on the calls to the namenode#getBlockLocations from the DFSClient linked to
* this FileSystem. See HBASE-6435 for the background.
* <p/>
* There should be no reason, except testing, to create a specific ReorderBlocks.
*
* @return true if the interceptor was added, false otherwise.
*/
static boolean addLocationsOrderInterceptor(Configuration conf, final ReorderBlocks
lrb) {
if (!conf.getBoolean("hbase.filesystem.reorder.blocks", true)) {
// activated by default
LOG.debug("addLocationsOrderInterceptor configured to false");
return false;
}
FileSystem fs;
try {
fs = FileSystem.get(conf);
} catch (IOException e) {
LOG.warn("Can't get the file system from the conf.", e);
return false;
}
if (!(fs instanceof DistributedFileSystem)) {
LOG.debug("The file system is not a DistributedFileSystem. " + "Skipping on block location reordering");
return false;
}
DistributedFileSystem dfs = ((DistributedFileSystem) (fs));
DFSClient dfsc = dfs.getClient();
if (dfsc == null) {
LOG.warn("The DistributedFileSystem does not contain a DFSClient. Can't add the location " + "block reordering interceptor. Continuing, but this is unexpected.");
return false;
}
try {
Field nf = DFSClient.class.getDeclaredField("namenode");
nf.setAccessible(true);
Field modifiersField = ReflectionUtils.getModifiersField();
modifiersField.setAccessible(true);modifiersField.setInt(nf, nf.getModifiers() & (~Modifier.FINAL));
ClientProtocol namenode = ((ClientProtocol) (nf.get(dfsc)));
if (namenode == null) {
LOG.warn("The DFSClient is not linked to a namenode. Can't add the location block" + " reordering interceptor. Continuing, but this is unexpected.");
return false;
}
ClientProtocol cp1 = m0(namenode, lrb, conf);
nf.set(dfsc, cp1);
LOG.info(("Added intercepting call to namenode#getBlockLocations so can do block reordering" + " using class ") + lrb.getClass().getName());
} catch (NoSuchFieldException e)
{
LOG.warn("Can't modify the DFSClient#namenode field to add the location reorder.", e);
return false;
} catch (IllegalAccessException e) {LOG.warn("Can't modify the DFSClient#namenode field to add the location reorder.", e);
return false;
}
return true;
} | 3.26 |
hbase_HFileSystem_getBackingFs_rdh | /**
* Returns the underlying filesystem
*
* @return The underlying FileSystem for this FilterFileSystem object.
*/
public FileSystem getBackingFs() throws IOException {
return fs;
} | 3.26 |
hbase_ShutdownHookManager_addShutdownHook_rdh | // priority is ignored in hadoop versions earlier than 2.0
@Override
public void addShutdownHook(Thread shutdownHookThread, int priority) {
Runtime.getRuntime().addShutdownHook(shutdownHookThread);
} | 3.26 |
hbase_ActiveMasterManager_getBackupMasters_rdh | /**
* Returns list of registered backup masters.
*/
public List<ServerName> getBackupMasters() {
return backupMasters;
} | 3.26 |
hbase_ActiveMasterManager_hasActiveMaster_rdh | /**
* Returns True if cluster has an active master.
*/
boolean hasActiveMaster() {
try {
if (ZKUtil.checkExists(watcher, watcher.getZNodePaths().masterAddressZNode) >= 0) {
return true;
}
} catch (KeeperException
ke) {
LOG.info(("Received an unexpected KeeperException when checking "
+ "isActiveMaster : ") + ke);
}
return false;
} | 3.26 |
hbase_ActiveMasterManager_handleMasterNodeChange_rdh | /**
* Handle a change in the master node. Doesn't matter whether this was called from a nodeCreated
* or nodeDeleted event because there are no guarantees that the current state of the master node
* matches the event at the time of our next ZK request.
* <p>
* Uses the watchAndCheckExists method which watches the master address node regardless of whether
* it exists or not. If it does exist (there is an active master), it returns true. Otherwise it
* returns false.
* <p>
* A watcher is set which guarantees that this method will get called again if there is another
* change in the master node.
*/
private void handleMasterNodeChange() {
// Watch the node and check if it exists.
try {
synchronized(clusterHasActiveMaster) {
if (ZKUtil.watchAndCheckExists(watcher, watcher.getZNodePaths().masterAddressZNode)) {
// A master node exists, there is an active master
LOG.trace("A master is now available");
clusterHasActiveMaster.set(true);
} else {
// Node is no longer there, cluster does not have an active master
LOG.debug("No master available. Notifying waiting threads");
clusterHasActiveMaster.set(false);
// Notify any thread waiting to become the active master
clusterHasActiveMaster.notifyAll();
}
// Reset the active master sn. Will be re-fetched later if needed.
// We don't want to make a synchronous RPC under a monitor.
activeMasterServerName = null;
}
} catch (KeeperException ke) {
master.abort("Received an unexpected KeeperException, aborting", ke);
}
} | 3.26 |
hbase_ActiveMasterManager_setInfoPort_rdh | // will be set after jetty server is started
public void setInfoPort(int infoPort) {
this.infoPort
= infoPort;
} | 3.26 |
hbase_ActiveMasterManager_fetchAndSetActiveMasterServerName_rdh | /**
* Fetches the active master's ServerName from zookeeper.
*/
private void fetchAndSetActiveMasterServerName() {LOG.debug("Attempting to fetch active master sn from zk");
try {
activeMasterServerName = MasterAddressTracker.getMasterAddress(watcher);
} catch (IOException | KeeperException e) {
// Log and ignore for now and re-fetch later if needed.
LOG.error("Error fetching active master information", e);
}
} | 3.26 |
hbase_AggregationHelper_validateParameters_rdh | /**
*
* @param scan
* the HBase scan object to use to read data from HBase
* @param canFamilyBeAbsent
* whether column family can be absent in familyMap of scan
*/
private static void validateParameters(Scan scan, boolean canFamilyBeAbsent) throws IOException {
if (((scan == null) || (Bytes.equals(scan.getStartRow(), scan.getStopRow()) && (!Bytes.equals(scan.getStartRow(), HConstants.EMPTY_START_ROW)))) || ((Bytes.compareTo(scan.getStartRow(), scan.getStopRow()) > 0) && (!Bytes.equals(scan.getStopRow(), HConstants.EMPTY_END_ROW)))) {
throw new IOException("Agg client Exception: Startrow should be smaller than Stoprow");
} else if (!canFamilyBeAbsent)
{
if (scan.getFamilyMap().size() != 1) {
throw new IOException("There must be only one family."); }
}
} | 3.26 |
hbase_AggregationHelper_getParsedGenericInstance_rdh | /**
* Get an instance of the argument type declared in a class's signature. The argument type is
* assumed to be a PB Message subclass, and the instance is created using parseFrom method on the
* passed ByteString.
*
* @param runtimeClass
* the runtime type of the class
* @param position
* the position of the argument in the class declaration
* @param b
* the ByteString which should be parsed to get the instance created
* @return the instance
* @throws IOException
* Either we couldn't instantiate the method object, or "parseFrom" failed.
*/
// Used server-side too by Aggregation Coprocesor Endpoint. Undo this interdependence. TODO.
@SuppressWarnings({ "unchecked", "TypeParameterUnusedInFormals" })
public static <T extends Message>
T getParsedGenericInstance(Class<?> runtimeClass, int position, ByteString b) throws IOException {
Type type = runtimeClass.getGenericSuperclass();
Type argType = ((ParameterizedType) (type)).getActualTypeArguments()[position];
Class<T>
classType = ((Class<T>) (argType));
T inst;
try {
Method m = classType.getMethod("parseFrom", ByteString.class);
inst = ((T) (m.invoke(null, b)));
return inst;
} catch (SecurityException e) {
throw
new IOException(e);
} catch (NoSuchMethodException e)
{
throw new IOException(e);
} catch (IllegalArgumentException e) {
throw new IOException(e);
} catch (InvocationTargetException e) {
throw new IOException(e);
} catch (IllegalAccessException e) {
throw new IOException(e);
}} | 3.26 |
hbase_HBaseCluster_getServerHoldingMeta_rdh | /**
* Get the ServerName of region server serving the first hbase:meta region
*/public ServerName getServerHoldingMeta() throws IOException {
return getServerHoldingRegion(TableName.META_TABLE_NAME, RegionInfoBuilder.FIRST_META_REGIONINFO.getRegionName());
} | 3.26 |
hbase_HBaseCluster_getInitialClusterMetrics_rdh | /**
* Returns a ClusterStatus for this HBase cluster as observed at the starting of the HBaseCluster
*/
public ClusterMetrics getInitialClusterMetrics() throws IOException {
return initialClusterStatus;
} | 3.26 |
hbase_HBaseCluster_waitForRegionServerToStart_rdh | /**
* Wait for the specified region server to join the cluster
*
* @throws IOException
* if something goes wrong or timeout occurs
*/
public void waitForRegionServerToStart(String hostname, int port, long timeout) throws IOException {
long start = EnvironmentEdgeManager.currentTime();
while ((EnvironmentEdgeManager.currentTime() - start) < timeout) {
for (ServerName server : getClusterMetrics().getLiveServerMetrics().keySet()) {
if (server.getHostname().equals(hostname) && (server.getPort() == port)) {
return;
}
}
Threads.sleep(100);
}
throw new IOException((("did timeout " + timeout) + "ms waiting for region server to start: ") + hostname);
} | 3.26 |
hbase_HBaseCluster_waitForNamenodeAvailable_rdh | /**
* Wait for the namenode.
*/
public void waitForNamenodeAvailable() throws InterruptedException
{
} | 3.26 |
hbase_HBaseCluster_waitForActiveAndReadyMaster_rdh | /**
* Blocks until there is an active master and that master has completed initialization.
*
* @return true if an active master becomes available. false if there are no masters left.
* @throws IOException
* if something goes wrong or timeout occurs
*/
public boolean waitForActiveAndReadyMaster() throws IOException {
return waitForActiveAndReadyMaster(Long.MAX_VALUE);} | 3.26 |
hbase_HBaseCluster_m2_rdh | /**
* Restores the cluster to given state if this is a real cluster, otherwise does nothing. This is
* a best effort restore. If the servers are not reachable, or insufficient permissions, etc.
* restoration might be partial.
*
* @return whether restoration is complete
*/
public boolean m2(ClusterMetrics desiredStatus) throws IOException {
return true;
} | 3.26 |
hbase_HBaseCluster_restoreInitialStatus_rdh | /**
* Restores the cluster to it's initial state if this is a real cluster, otherwise does nothing.
* This is a best effort restore. If the servers are not reachable, or insufficient permissions,
* etc. restoration might be partial.
*
* @return whether restoration is complete
*/
public boolean restoreInitialStatus() throws IOException {
return m2(getInitialClusterMetrics());
} | 3.26 |
hbase_MetaBrowser_addParam_rdh | /**
* Adds {@code value} to {@code encoder} under {@code paramName} when {@code value} is non-null.
*/
private void addParam(final QueryStringEncoder encoder, final String paramName, final Object value) {
if (value != null) {
encoder.addParam(paramName, value.toString());
}
} | 3.26 |
hbase_RackManager_getRack_rdh | /**
* Same as {@link #getRack(ServerName)} except that a list is passed
*
* @param servers
* list of servers we're requesting racks information for
* @return list of racks for the given list of servers
*/
public List<String> getRack(List<ServerName> servers) {
// just a note - switchMapping caches results (at least the implementation should unless the
// resolution is really a lightweight process)
List<String> serversAsString = new ArrayList<>(servers.size());
for (ServerName server : servers) {
serversAsString.add(server.getHostname());
}
List<String> v3 = switchMapping.resolve(serversAsString);
return v3;
} | 3.26 |
hbase_OrderedNumeric_encodeDouble_rdh | /**
* Write instance {@code val} into buffer {@code dst}.
*
* @param dst
* the {@link PositionedByteRange} to write to
* @param val
* the value to write to {@code dst}
* @return the number of bytes written
*/
public int encodeDouble(PositionedByteRange
dst, double val) {
return OrderedBytes.encodeNumeric(dst, val, order);
} | 3.26 |
hbase_OrderedNumeric_decodeDouble_rdh | /**
* Read a {@code double} value from the buffer {@code src}.
*
* @param src
* the {@link PositionedByteRange} to read the {@code double} from
* @return the {@code double} read from the buffer
*/
public double decodeDouble(PositionedByteRange src) {
return OrderedBytes.decodeNumericAsLong(src);} | 3.26 |
hbase_OrderedNumeric_decodeLong_rdh | /**
* Read a {@code long} value from the buffer {@code src}.
*
* @param src
* the {@link PositionedByteRange} to read the {@code long} from
* @return the {@code long} read from the buffer
*/
public long decodeLong(PositionedByteRange src)
{
return OrderedBytes.decodeNumericAsLong(src);
} | 3.26 |
hbase_OrderedNumeric_encodeLong_rdh | /**
* Write instance {@code val} into buffer {@code dst}.
*
* @param dst
* the {@link PositionedByteRange} to write to
* @param val
* the value to write to {@code dst}
* @return the number of bytes written
*/
public int encodeLong(PositionedByteRange dst, long val) {
return OrderedBytes.encodeNumeric(dst, val, order);
} | 3.26 |
hbase_SnapshotScannerHDFSAclHelper_getGlobalRootPaths_rdh | /**
* return paths that user will global permission will visit
*
* @return the path list
*/
List<Path> getGlobalRootPaths() {
return Lists.newArrayList(pathHelper.getTmpDataDir(), pathHelper.getDataDir(), pathHelper.getMobDataDir(), pathHelper.getArchiveDataDir(), pathHelper.getSnapshotRootDir());
} | 3.26 |
hbase_SnapshotScannerHDFSAclHelper_removeTableAcl_rdh | /**
* Remove table acls when modify table
*
* @param tableName
* the table
* @param users
* the table users with READ permission
* @return false if an error occurred, otherwise true
*/
public boolean removeTableAcl(TableName tableName, Set<String> users) {
try {
long start =
EnvironmentEdgeManager.currentTime();
if (users.size() > 0) {
handleTableAcl(Sets.newHashSet(tableName), users, new HashSet<>(0), new HashSet<>(0), HDFSAclOperation.OperationType.REMOVE);
}
LOG.info("Set HDFS acl when create or modify table {}, cost {} ms", tableName, EnvironmentEdgeManager.currentTime() - start);
return true;
} catch (Exception e) {
LOG.error("Set HDFS acl error when create or modify table {}", tableName, e);
return false;}
} | 3.26 |
hbase_SnapshotScannerHDFSAclHelper_addTableAcl_rdh | /**
* Add table user acls
*
* @param tableName
* the table
* @param users
* the table users with READ permission
* @return false if an error occurred, otherwise true
*/
public boolean addTableAcl(TableName tableName, Set<String> users, String operation) {
try {
long start = EnvironmentEdgeManager.currentTime();
if (users.size() > 0)
{
SnapshotScannerHDFSAclHelper.HDFSAclOperation.OperationType operationType = HDFSAclOperation.OperationType.MODIFY;
handleNamespaceAccessAcl(tableName.getNamespaceAsString(), users, operationType);
handleTableAcl(Sets.newHashSet(tableName), users, new HashSet<>(0), new HashSet<>(0), operationType);
}
LOG.info("Set HDFS acl when {} table {}, cost {} ms", operation, tableName,
EnvironmentEdgeManager.currentTime() - start);
return true;
} catch (Exception e) {
LOG.error("Set HDFS acl error when {} table {}", operation, tableName, e);
return false;
}
} | 3.26 |
hbase_SnapshotScannerHDFSAclHelper_grantAcl_rdh | /**
* Set acl when grant user permission
*
* @param userPermission
* the user and permission
* @param skipNamespaces
* the namespace set to skip set acl because already set
* @param skipTables
* the table set to skip set acl because already set
* @return false if an error occurred, otherwise true
*/
public boolean grantAcl(UserPermission userPermission, Set<String> skipNamespaces, Set<TableName> skipTables) {
try {
long start = EnvironmentEdgeManager.currentTime();
handleGrantOrRevokeAcl(userPermission, HDFSAclOperation.OperationType.MODIFY, skipNamespaces, skipTables);
LOG.info("Set HDFS acl when grant {}, skipNamespaces: {}, skipTables: {}, cost {} ms", userPermission, skipNamespaces, skipTables, EnvironmentEdgeManager.currentTime() - start);
return true;} catch (Exception e) {
LOG.error("Set HDFS acl error when grant: {}, skipNamespaces: {}, skipTables: {}", userPermission, skipNamespaces, skipTables, e);
return false;
}
} | 3.26 |
hbase_SnapshotScannerHDFSAclHelper_getTableRootPaths_rdh | /**
* return paths that user will table permission will visit
*
* @param tableName
* the table
* @param includeSnapshotPath
* true if return table snapshots paths, otherwise false
* @return the path list
* @throws IOException
* if an error occurred
*/
List<Path> getTableRootPaths(TableName tableName, boolean includeSnapshotPath) throws IOException {
List<Path>
paths = Lists.newArrayList(pathHelper.getDataTableDir(tableName), pathHelper.getMobTableDir(tableName), pathHelper.getArchiveTableDir(tableName));
if (includeSnapshotPath) {
paths.addAll(getTableSnapshotPaths(tableName));
}return paths;
} | 3.26 |
hbase_SnapshotScannerHDFSAclHelper_getUsersWithGlobalReadAction_rdh | /**
* Return users with global read permission
*
* @return users with global read permission
* @throws IOException
* if an error occurred
*/
private Set<String> getUsersWithGlobalReadAction() throws IOException {
return getUsersWithReadAction(PermissionStorage.getGlobalPermissions(conf));
} | 3.26 |
hbase_SnapshotScannerHDFSAclHelper_revokeAcl_rdh | /**
* Remove acl when grant or revoke user permission
*
* @param userPermission
* the user and permission
* @param skipNamespaces
* the namespace set to skip remove acl
* @param skipTables
* the table set to skip remove acl
* @return false if an error occurred, otherwise true
*/
public boolean revokeAcl(UserPermission userPermission, Set<String> skipNamespaces, Set<TableName> skipTables) {
try {
long
start = EnvironmentEdgeManager.currentTime();
handleGrantOrRevokeAcl(userPermission, HDFSAclOperation.OperationType.REMOVE, skipNamespaces, skipTables);
LOG.info("Set HDFS acl when revoke {}, skipNamespaces: {}, skipTables: {}, cost {} ms", userPermission, skipNamespaces, skipTables, EnvironmentEdgeManager.currentTime() - start);
return true;
} catch (Exception e) {
LOG.error("Set HDFS acl error when revoke: {}, skipNamespaces: {}, skipTables: {}", userPermission, skipNamespaces, skipTables, e);
return false;
}
} | 3.26 |
hbase_SnapshotScannerHDFSAclHelper_getNamespaceRootPaths_rdh | /**
* return paths that user will namespace permission will visit
*
* @param namespace
* the namespace
* @return the path list
*/
List<Path> getNamespaceRootPaths(String namespace) {
return Lists.newArrayList(pathHelper.getTmpNsDir(namespace), pathHelper.getDataNsDir(namespace), pathHelper.getMobDataNsDir(namespace), pathHelper.getArchiveNsDir(namespace));
} | 3.26 |
hbase_SnapshotScannerHDFSAclHelper_removeNamespaceDefaultAcl_rdh | /**
* Remove default acl from namespace archive dir when delete namespace
*
* @param namespace
* the namespace
* @param removeUsers
* the users whose default acl will be removed
* @return false if an error occurred, otherwise true
*/
public boolean removeNamespaceDefaultAcl(String namespace, Set<String> removeUsers) {
try {
long start = EnvironmentEdgeManager.currentTime();Path archiveNsDir = pathHelper.getArchiveNsDir(namespace);
HDFSAclOperation operation = new HDFSAclOperation(fs, archiveNsDir, removeUsers, HDFSAclOperation.OperationType.REMOVE, false, HDFSAclOperation.AclType.DEFAULT);
operation.handleAcl();
LOG.info("Remove HDFS acl when delete namespace {}, cost {} ms", namespace, EnvironmentEdgeManager.currentTime() - start);
return true;
} catch (Exception e) {
LOG.error("Remove HDFS acl error when delete namespace {}", namespace, e);
return false;
}
} | 3.26 |
hbase_SnapshotScannerHDFSAclHelper_getUsersWithTableReadAction_rdh | /**
* Return users with table read permission
*
* @param tableName
* the table
* @param includeNamespace
* true if include users with namespace read action
* @param includeGlobal
* true if include users with global read action
* @return users with table read permission
* @throws IOException
* if an error occurred
*/
Set<String> getUsersWithTableReadAction(TableName tableName, boolean includeNamespace, boolean includeGlobal) throws IOException {
Set<String> users = getUsersWithReadAction(PermissionStorage.getTablePermissions(conf, tableName));
if
(includeNamespace) {
users.addAll(getUsersWithNamespaceReadAction(tableName.getNamespaceAsString(), includeGlobal));
}
return users;
} | 3.26 |
hbase_SnapshotScannerHDFSAclHelper_getUsersWithNamespaceReadAction_rdh | /**
* Return users with namespace read permission
*
* @param namespace
* the namespace
* @param includeGlobal
* true if include users with global read action
* @return users with namespace read permission
* @throws IOException
* if an error occurred
*/
Set<String> getUsersWithNamespaceReadAction(String namespace, boolean includeGlobal) throws IOException {
Set<String> users = getUsersWithReadAction(PermissionStorage.getNamespacePermissions(conf, namespace));
if (includeGlobal) {
users.addAll(getUsersWithGlobalReadAction());
}
return users;
} | 3.26 |
hbase_SnapshotScannerHDFSAclHelper_removeNamespaceAccessAcl_rdh | /**
* Remove table access acl from namespace dir when delete table
*
* @param tableName
* the table
* @param removeUsers
* the users whose access acl will be removed
* @return false if an error occurred, otherwise true
*/
public boolean removeNamespaceAccessAcl(TableName tableName, Set<String> removeUsers, String operation) {
try {long start = EnvironmentEdgeManager.currentTime();
if (removeUsers.size() > 0) {
handleNamespaceAccessAcl(tableName.getNamespaceAsString(), removeUsers, HDFSAclOperation.OperationType.REMOVE);
}
LOG.info("Remove HDFS acl when {} table {}, cost {} ms", operation, tableName, EnvironmentEdgeManager.currentTime()
-
start);
return true;
} catch (Exception e) {
LOG.error("Remove HDFS acl error when {} table {}", operation, tableName, e);
return false;
}
} | 3.26 |
hbase_SnapshotScannerHDFSAclHelper_removeTableDefaultAcl_rdh | /**
* Remove default acl from table archive dir when delete table
*
* @param tableName
* the table name
* @param removeUsers
* the users whose default acl will be removed
* @return false if an error occurred, otherwise true
*/
public boolean removeTableDefaultAcl(TableName tableName, Set<String> removeUsers) {
try {
long v13 = EnvironmentEdgeManager.currentTime();Path archiveTableDir = pathHelper.getArchiveTableDir(tableName);
HDFSAclOperation operation = new HDFSAclOperation(fs, archiveTableDir, removeUsers, HDFSAclOperation.OperationType.REMOVE, false, HDFSAclOperation.AclType.DEFAULT);
operation.handleAcl();
LOG.info("Remove HDFS acl when delete table {}, cost {} ms", tableName, EnvironmentEdgeManager.currentTime() - v13);
return true;
} catch (Exception e) {
LOG.error("Remove HDFS acl error when delete table {}", tableName, e);
return false;
}
} | 3.26 |
hbase_Put_addColumn_rdh | /**
* Add the specified column and value, with the specified timestamp as its version to this Put
* operation.
*
* @param family
* family name
* @param qualifier
* column qualifier
* @param ts
* version timestamp
* @param value
* column value
*/
public Put addColumn(byte[] family, ByteBuffer
qualifier, long ts, ByteBuffer value) {
if (ts < 0) {
throw new IllegalArgumentException("Timestamp cannot be negative. ts=" + ts);}
List<Cell> list = getCellList(family);
KeyValue kv =
createPutKeyValue(family, qualifier, ts, value, null);
list.add(kv);
return this;
} | 3.26 |
hbase_Put_add_rdh | /**
* Add the specified KeyValue to this Put operation. Operation assumes that the passed KeyValue is
* immutable and its backing array will not be modified for the duration of this Put.
*
* @param cell
* individual cell
* @throws java.io.IOException
* e
*/
@Override
public Put add(Cell cell) throws IOException {
super.add(cell);
return this;
} | 3.26 |
hbase_MajorCompactionTTLRequest_getColFamilyCutoffTime_rdh | // If the CF has no TTL, return -1, else return the current time - TTL.
private long getColFamilyCutoffTime(ColumnFamilyDescriptor colDesc) {
if (colDesc.getTimeToLive() == HConstants.FOREVER) {
return -1;
}
return EnvironmentEdgeManager.currentTime() - (colDesc.getTimeToLive() * 1000L);
} | 3.26 |
hbase_MasterSnapshotVerifier_verifyTableInfo_rdh | /**
* Check that the table descriptor for the snapshot is a valid table descriptor
*
* @param manifest
* snapshot manifest to inspect
*/
private void verifyTableInfo(final SnapshotManifest manifest) throws IOException {
TableDescriptor htd
= manifest.getTableDescriptor();
if (htd == null) {
throw new CorruptedSnapshotException("Missing Table Descriptor", ProtobufUtil.createSnapshotDesc(snapshot));
}
if (!htd.getTableName().getNameAsString().equals(snapshot.getTable())) {
throw new CorruptedSnapshotException((("Invalid Table Descriptor. Expected " + snapshot.getTable()) + " name, got ") + htd.getTableName().getNameAsString(), ProtobufUtil.createSnapshotDesc(snapshot));
}
} | 3.26 |
hbase_MasterSnapshotVerifier_verifySnapshotDescription_rdh | /**
* Check that the snapshot description written in the filesystem matches the current snapshot
*
* @param snapshotDir
* snapshot directory to check
*/
private void verifySnapshotDescription(Path snapshotDir) throws CorruptedSnapshotException {
SnapshotDescription found = SnapshotDescriptionUtils.readSnapshotInfo(workingDirFs, snapshotDir);
if (!this.snapshot.equals(found)) {
throw new CorruptedSnapshotException(((("Snapshot read (" + found) + ") doesn't equal snapshot we ran (") + snapshot) + ").", ProtobufUtil.createSnapshotDesc(snapshot));
}
} | 3.26 |
hbase_MasterSnapshotVerifier_verifyRegionInfo_rdh | /**
* Verify that the regionInfo is valid
*
* @param region
* the region to check
* @param manifest
* snapshot manifest to inspect
*/
private void verifyRegionInfo(final RegionInfo region, final SnapshotRegionManifest manifest) throws IOException {
RegionInfo manifestRegionInfo = ProtobufUtil.toRegionInfo(manifest.getRegionInfo());
if (RegionInfo.COMPARATOR.compare(region, manifestRegionInfo) != 0) {
String msg =
(("Manifest region info " + manifestRegionInfo) + "doesn't match expected region:") + region;
throw new CorruptedSnapshotException(msg, ProtobufUtil.createSnapshotDesc(snapshot));
}
} | 3.26 |
hbase_MasterSnapshotVerifier_verifySnapshot_rdh | /**
* Verify that the snapshot in the directory is a valid snapshot
*
* @param snapshotDir
* snapshot directory to check
* @throws CorruptedSnapshotException
* if the snapshot is invalid
* @throws IOException
* if there is an unexpected connection issue to the filesystem
*/
public void verifySnapshot(Path snapshotDir, boolean verifyRegions) throws CorruptedSnapshotException,
IOException {
SnapshotManifest manifest = SnapshotManifest.open(services.getConfiguration(), workingDirFs, snapshotDir, snapshot); // verify snapshot info matches
verifySnapshotDescription(snapshotDir);
// check that tableinfo is a valid table description
verifyTableInfo(manifest);
// check that each region is valid
verifyRegions(manifest, verifyRegions);
} | 3.26 |
hbase_MasterSnapshotVerifier_verifyRegions_rdh | /**
* Check that all the regions in the snapshot are valid, and accounted for.
*
* @param manifest
* snapshot manifest to inspect
* @throws IOException
* if we can't reach hbase:meta or read the files from the FS
*/
private void verifyRegions(SnapshotManifest manifest, boolean verifyRegions) throws IOException
{
List<RegionInfo> regions = services.getAssignmentManager().getTableRegions(tableName, false);
// Remove the non-default regions
RegionReplicaUtil.removeNonDefaultRegions(regions);
Map<String, SnapshotRegionManifest> regionManifests = manifest.getRegionManifestsMap();
if (regionManifests == null) {
String msg = ("Snapshot " + ClientSnapshotDescriptionUtils.toString(snapshot)) + " looks empty";
LOG.error(msg);
throw new CorruptedSnapshotException(msg);
}
String errorMsg = "";
boolean hasMobStore
= false;
// the mob region is a dummy region, it's not a real region in HBase.
// the mob region has a special name, it could be found by the region name.
if (regionManifests.get(MobUtils.getMobRegionInfo(tableName).getEncodedName()) != null) {
hasMobStore = true;
}
int realRegionCount = (hasMobStore) ? regionManifests.size() - 1
: regionManifests.size();
if (realRegionCount != regions.size()) {
errorMsg = ((((("Regions moved during the snapshot '" + ClientSnapshotDescriptionUtils.toString(snapshot)) + "'. expected=") + regions.size()) + " snapshotted=") + realRegionCount) + ".";
LOG.error(errorMsg);
}
// Verify RegionInfo
if (verifyRegions) {
for (RegionInfo region : regions)
{
SnapshotRegionManifest regionManifest = regionManifests.get(region.getEncodedName());
if (regionManifest == null) {// could happen due to a move or split race.
String mesg = " No snapshot region directory found for region:" + region; if (errorMsg.isEmpty()) {
errorMsg = mesg;
}
LOG.error(mesg);
continue;
}
verifyRegionInfo(region, regionManifest);
}
if (!errorMsg.isEmpty()) {
throw new CorruptedSnapshotException(errorMsg);
}// Verify Snapshot HFiles
// Requires the root directory file system as HFiles are stored in the root directory
SnapshotReferenceUtil.verifySnapshot(services.getConfiguration(), CommonFSUtils.getRootDirFileSystem(services.getConfiguration()), manifest);
}
} | 3.26 |
hbase_RemoteProcedureException_serialize_rdh | /**
* Converts a RemoteProcedureException to an array of bytes.
*
* @param source
* the name of the external exception source
* @param t
* the "local" external exception (local)
* @return protobuf serialized version of RemoteProcedureException
*/public static byte[] serialize(String source, Throwable
t) {return toProto(source, t).toByteArray();
} | 3.26 |
hbase_RemoteProcedureException_deserialize_rdh | /**
* Takes a series of bytes and tries to generate an RemoteProcedureException instance for it.
*
* @param bytes
* the bytes to generate the {@link RemoteProcedureException} from
* @return the ForeignExcpetion instance
* @throws IOException
* if there was deserialization problem this is thrown.
*/
public static RemoteProcedureException deserialize(byte[] bytes) throws IOException {
return fromProto(ForeignExceptionMessage.parseFrom(bytes));
} | 3.26 |
hbase_RemoteProcedureException_unwrapRemoteIOException_rdh | // NOTE: Does not throw DoNotRetryIOE because it does not
// have access (DNRIOE is in the client module). Use
// MasterProcedureUtil.unwrapRemoteIOException if need to
// throw DNRIOE.
public IOException unwrapRemoteIOException() {
final Exception cause = unwrapRemoteException();
if (cause instanceof IOException) {
return ((IOException) (cause));
}
return new IOException(cause);
} | 3.26 |
hbase_ServerNonceManager_createCleanupScheduledChore_rdh | /**
* Creates a scheduled chore that is used to clean up old nonces.
*
* @param stoppable
* Stoppable for the chore.
* @return ScheduledChore; the scheduled chore is not started.
*/
public ScheduledChore createCleanupScheduledChore(Stoppable stoppable) {
// By default, it will run every 6 minutes (30 / 5).
return new
ScheduledChore("nonceCleaner", stoppable, deleteNonceGracePeriod / 5) {
@Override
protected void chore() {
cleanUpOldNonces();
}
};
} | 3.26 |
hbase_ServerNonceManager_reportOperationFromWal_rdh | /**
* Reports the operation from WAL during replay.
*
* @param group
* Nonce group.
* @param nonce
* Nonce.
* @param writeTime
* Entry write time, used to ignore entries that are too old.
*/ public void reportOperationFromWal(long group, long nonce, long writeTime) {
if (nonce == HConstants.NO_NONCE)return;
// Give the write time some slack in case the clocks are not synchronized.
long now = EnvironmentEdgeManager.currentTime();
if (now > (writeTime + (deleteNonceGracePeriod * 1.5)))
return;
OperationContext newResult = new OperationContext();
newResult.setState(OperationContext.DONT_PROCEED);
NonceKey nk = new NonceKey(group, nonce);
OperationContext oldResult = nonces.putIfAbsent(nk, newResult);
if (oldResult != null) {
// Some schemes can have collisions (for example, expiring hashes), so just log it.
// We have no idea about the semantics here, so this is the least of many evils.
f0.warn((((("Nonce collision during WAL recovery: " + nk) + ", ") + oldResult) + " with ") + newResult);
}
} | 3.26 |
hbase_ServerNonceManager_endOperation_rdh | /**
* Ends the operation started by startOperation.
*
* @param group
* Nonce group.
* @param nonce
* Nonce.
* @param success
* Whether the operation has succeeded.
*/
public void endOperation(long group, long nonce, boolean success) {
if (nonce == HConstants.NO_NONCE)
return;
NonceKey nk = new NonceKey(group, nonce);
OperationContext newResult = nonces.get(nk);
assert newResult != null;
synchronized(newResult) {
assert newResult.getState() == OperationContext.WAIT;
// If we failed, other retries can proceed.
newResult.setState(success ? OperationContext.DONT_PROCEED : OperationContext.PROCEED);
if (success) {newResult.reportActivity();// Set time to use for cleanup.
} else {
OperationContext val = nonces.remove(nk);
assert val == newResult;
}
if (newResult.hasWait())
{
f0.debug((("Conflict with running op ended: " + nk) + ", ") + newResult);
newResult.notifyAll(); }
}
} | 3.26 |
hbase_ServerNonceManager_addMvccToOperationContext_rdh | /**
* Store the write point in OperationContext when the operation succeed.
*
* @param group
* Nonce group.
* @param nonce
* Nonce.
* @param mvcc
* Write point of the succeed operation.
*/
public void addMvccToOperationContext(long group, long nonce, long mvcc) {
if (nonce == HConstants.NO_NONCE) {
return;
}
NonceKey nk = new NonceKey(group, nonce);
OperationContext result = nonces.get(nk);
assert result != null;
synchronized(result) {result.setMvcc(mvcc);
}
} | 3.26 |
hbase_ServerNonceManager_startOperation_rdh | /**
* Starts the operation if operation with such nonce has not already succeeded. If the operation
* is in progress, waits for it to end and checks whether it has succeeded.
*
* @param group
* Nonce group.
* @param nonce
* Nonce.
* @param stoppable
* Stoppable that terminates waiting (if any) when the server is stopped.
* @return true if the operation has not already succeeded and can proceed; false otherwise.
*/public boolean startOperation(long group, long nonce, Stoppable stoppable) throws InterruptedException {
if (nonce == HConstants.NO_NONCE)
return true;
NonceKey nk = new NonceKey(group, nonce);
OperationContext ctx = new OperationContext();
while (true) {
OperationContext oldResult = nonces.putIfAbsent(nk, ctx);
if (oldResult == null)
return true;
// Collision with some operation - should be extremely rare.
synchronized(oldResult) {
int oldState = oldResult.getState();
f0.debug((("Conflict detected by nonce: " + nk) + ", ") + oldResult);
if (oldState != OperationContext.WAIT) {
return oldState == OperationContext.PROCEED;// operation ended
}
oldResult.setHasWait();
oldResult.wait(this.conflictWaitIterationMs);// operation is still active... wait and loop
if (stoppable.isStopped())
{
throw new InterruptedException("Server stopped");
}
}
}
} | 3.26 |
hbase_ServerNonceManager_getMvccFromOperationContext_rdh | /**
* Return the write point of the previous succeed operation.
*
* @param group
* Nonce group.
* @param nonce
* Nonce.
* @return write point of the previous succeed operation.
*/
public long getMvccFromOperationContext(long group, long nonce)
{
if (nonce == HConstants.NO_NONCE) {
return Long.MAX_VALUE;
}NonceKey nk = new
NonceKey(group, nonce);
OperationContext result = nonces.get(nk);
return result == null ? Long.MAX_VALUE : result.getMvcc();
} | 3.26 |
hbase_FSHLogProvider_createWriter_rdh | /**
* Public because of FSHLog. Should be package-private
*/
public static Writer createWriter(final Configuration conf, final FileSystem fs, final Path path, final boolean overwritable, long blocksize) throws IOException {
// Configuration already does caching for the Class lookup.
Class<? extends Writer> logWriterClass = conf.getClass(WRITER_IMPL, ProtobufLogWriter.class, FSHLogProvider.Writer.class);
Writer writer = null;
try {
writer = logWriterClass.getDeclaredConstructor().newInstance();
FileSystem rootFs = FileSystem.get(path.toUri(), conf);
writer.init(rootFs, path, conf, overwritable, blocksize, StreamSlowMonitor.create(conf, path.getName()));
return writer;} catch (Exception e) {
if (e instanceof CommonFSUtils.StreamLacksCapabilityException) {
LOG.error(((((((("The RegionServer write ahead log provider for FileSystem implementations " + "relies on the ability to call ") + e.getMessage()) + " for proper operation during ") + "component failures, but the current FileSystem does not support doing so. Please ") + "check the config value of '") + CommonFSUtils.HBASE_WAL_DIR) + "' and ensure ") + "it points to a FileSystem mount that has suitable capabilities for output streams.");
} else {
LOG.debug("Error instantiating log writer.", e);
}
throw new
IOException("cannot get log writer", e);
}
} | 3.26 |
hbase_ZKReplicationQueueStorageForMigration_listAllHFileRefs_rdh | /**
* Pair<PeerId, List<HFileRefs>>
*/@SuppressWarnings("unchecked")
public MigrationIterator<Pair<String, List<String>>> listAllHFileRefs() throws KeeperException {
List<String> peerIds = ZKUtil.listChildrenNoWatch(zookeeper, hfileRefsZNode);
if ((peerIds == null) || peerIds.isEmpty()) {
ZKUtil.deleteNodeRecursively(zookeeper, hfileRefsZNode);
return EMPTY_ITER;
}
Iterator<String> iter = peerIds.iterator();
return new MigrationIterator<Pair<String, List<String>>>() {
private String previousPeerId;
@Override
public Pair<String, List<String>> next() throws KeeperException {
if (previousPeerId != null) {
ZKUtil.deleteNodeRecursively(zookeeper, m1(previousPeerId));
}
if (!iter.hasNext()) {
ZKUtil.deleteNodeRecursively(zookeeper, hfileRefsZNode);
return null;
}
String peerId = iter.next();
List<String> refs = ZKUtil.listChildrenNoWatch(zookeeper, m1(peerId));
previousPeerId = peerId;
return Pair.newPair(peerId, refs != null ? refs : Collections.emptyList());
}
};
} | 3.26 |
hbase_PBType_inputStreamFromByteRange_rdh | /**
* Create a {@link CodedInputStream} from a {@link PositionedByteRange}. Be sure to update
* {@code src}'s position after consuming from the stream.
* <p/>
* For example:
*
* <pre>
* Foo.Builder builder = ...
* CodedInputStream is = inputStreamFromByteRange(src);
* Foo ret = builder.mergeFrom(is).build();
* src.setPosition(src.getPosition() + is.getTotalBytesRead());
* </pre>
*/
public static CodedInputStream inputStreamFromByteRange(PositionedByteRange src) {
return CodedInputStream.newInstance(src.getBytes(), src.getOffset() + src.getPosition(), src.getRemaining());
} | 3.26 |
hbase_PBType_outputStreamFromByteRange_rdh | /**
* Create a {@link CodedOutputStream} from a {@link PositionedByteRange}. Be sure to update
* {@code dst}'s position after writing to the stream.
* <p/>
* For example:
*
* <pre>
* CodedOutputStream os = outputStreamFromByteRange(dst);
* int before = os.spaceLeft(), after, written;
* val.writeTo(os);
* after = os.spaceLeft();
* written = before - after;
* dst.setPosition(dst.getPosition() + written);
* </pre>
*/
public static CodedOutputStream outputStreamFromByteRange(PositionedByteRange dst) {
return CodedOutputStream.newInstance(dst.getBytes(), dst.getOffset() + dst.getPosition(), dst.getRemaining());
} | 3.26 |
hbase_IpcClientSpanBuilder_getRpcPackageAndService_rdh | /**
* Retrieve the combined {@code $package.$service} value from {@code sd}.
*/
public static String getRpcPackageAndService(final Descriptors.ServiceDescriptor sd) {
// it happens that `getFullName` returns a string in the $package.$service format required by
// the otel RPC specification. Use it for now; might have to parse the value in the future.
return sd.getFullName();
} | 3.26 |
hbase_IpcClientSpanBuilder_populateMethodDescriptorAttributes_rdh | /**
* Static utility method that performs the primary logic of this builder. It is visible to other
* classes in this package so that other builders can use this functionality as a mix-in.
*
* @param attributes
* the attributes map to be populated.
* @param md
* the source of the RPC attribute values.
*/
static void populateMethodDescriptorAttributes(final Map<AttributeKey<?>, Object> attributes, final Descriptors.MethodDescriptor md) {
final String packageAndService = getRpcPackageAndService(md.getService());
final String method = getRpcName(md);
attributes.put(RPC_SYSTEM, RpcSystem.HBASE_RPC.name());
attributes.put(RPC_SERVICE, packageAndService);
attributes.put(RPC_METHOD, method);
} | 3.26 |
hbase_IpcClientSpanBuilder_getRpcName_rdh | /**
* Retrieve the {@code $method} value from {@code md}.
*/
public static String getRpcName(final Descriptors.MethodDescriptor md) {return md.getName();
} | 3.26 |
hbase_ReplicationStorageFactory_getReplicationPeerStorage_rdh | /**
* Create a new {@link ReplicationPeerStorage}.
*/
public static ReplicationPeerStorage getReplicationPeerStorage(FileSystem fs, ZKWatcher
zk, Configuration conf) {
Class<? extends ReplicationPeerStorage> clazz = getReplicationPeerStorageClass(conf);
for (Constructor<?> c : clazz.getConstructors()) {
if
(c.getParameterCount() != 2) {
continue;
}
if (c.getParameterTypes()[0].isAssignableFrom(FileSystem.class)) {
return ReflectionUtils.newInstance(clazz, fs, conf);
} else if (c.getParameterTypes()[0].isAssignableFrom(ZKWatcher.class)) {
return ReflectionUtils.newInstance(clazz, zk, conf);
}
}
throw new IllegalArgumentException("Can not create replication peer storage with type "
+ clazz);
} | 3.26 |
hbase_ReplicationStorageFactory_getReplicationQueueStorage_rdh | /**
* Create a new {@link ReplicationQueueStorage}.
*/
public static ReplicationQueueStorage getReplicationQueueStorage(Connection conn, Configuration conf, TableName tableName) {
Class<? extends ReplicationQueueStorage> clazz = conf.getClass(REPLICATION_QUEUE_IMPL, TableReplicationQueueStorage.class, ReplicationQueueStorage.class);
try {
Constructor<? extends ReplicationQueueStorage>
c = clazz.getConstructor(Connection.class, TableName.class);
return c.newInstance(conn, tableName);
} catch (Exception e) {
LOG.debug("failed to create ReplicationQueueStorage with Connection, try creating with Configuration", e);
return ReflectionUtils.newInstance(clazz, conf, tableName);
}
} | 3.26 |
hbase_TableDescriptors_update_rdh | /**
* Add or update descriptor. Just call {@link #update(TableDescriptor, boolean)} with
* {@code cacheOnly} as {@code false}.
*/
default void update(TableDescriptor htd) throws IOException {
m0(htd, false);
} | 3.26 |
hbase_TableDescriptors_exists_rdh | /**
* Get, remove and modify table descriptors.
*/
@InterfaceAudience.Privatepublic interface TableDescriptors extends Closeable {
/**
* Test whether a given table exists, i.e, has a table descriptor.
*/
default boolean exists(TableName tableName) throws IOException {
return get(tableName) != null;
} | 3.26 |
hbase_AsyncConnectionImpl_getNonceGenerator_rdh | // ditto
NonceGenerator getNonceGenerator() {
return nonceGenerator;
} | 3.26 |
hbase_AsyncConnectionImpl_getChoreService_rdh | /**
* If choreService has not been created yet, create the ChoreService.
*/
synchronized ChoreService getChoreService() {
if (isClosed()) {
throw new IllegalStateException("connection is already closed");
}
if (choreService
== null) {
choreService = new ChoreService("AsyncConn Chore Service");
}
return choreService;
} | 3.26 |
hbase_AsyncConnectionImpl_getLocator_rdh | // we will override this method for testing retry caller, so do not remove this method.
AsyncRegionLocator getLocator() {
return locator;
} | 3.26 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.