name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hbase_CompactSplit_isUnderCompaction_rdh | /**
* Check if this store is under compaction
*/
public boolean isUnderCompaction(final HStore s) {
return underCompactionStores.contains(getStoreNameForUnderCompaction(s));
} | 3.26 |
hbase_CompactSplit_getLongCompactions_rdh | /**
* Returns the longCompactions thread pool executor
*/
ThreadPoolExecutor getLongCompactions() {
return longCompactions;
} | 3.26 |
hbase_CompactSplit_onConfigurationChange_rdh | /**
* {@inheritDoc }
*/
@Override
public void onConfigurationChange(Configuration newConf) {
// Check if number of large / small compaction threads has changed, and then
// adjust the core pool size of the thread pools, by using the
// setCorePoolSize() method. According to the javadocs, it is safe to
// change the core pool size on-the-fly. We need to reset the maximum
// pool size, as well.
int largeThreads = Math.max(1, newConf.getInt(LARGE_COMPACTION_THREADS, LARGE_COMPACTION_THREADS_DEFAULT));
if (this.longCompactions.getCorePoolSize() != largeThreads) {
LOG.info((((("Changing the value of " + LARGE_COMPACTION_THREADS) + " from ") + this.longCompactions.getCorePoolSize()) + " to ") + largeThreads);
if (this.longCompactions.getCorePoolSize() < largeThreads) {
this.longCompactions.setMaximumPoolSize(largeThreads);
this.longCompactions.setCorePoolSize(largeThreads);
} else {
this.longCompactions.setCorePoolSize(largeThreads);
this.longCompactions.setMaximumPoolSize(largeThreads);
}
}
int smallThreads = newConf.getInt(SMALL_COMPACTION_THREADS, SMALL_COMPACTION_THREADS_DEFAULT);
if (this.shortCompactions.getCorePoolSize() != smallThreads) {
LOG.info((((("Changing the value of " + SMALL_COMPACTION_THREADS) + " from ") + this.shortCompactions.getCorePoolSize()) + " to ") + smallThreads);
if (this.shortCompactions.getCorePoolSize() < smallThreads) {
this.shortCompactions.setMaximumPoolSize(smallThreads);
this.shortCompactions.setCorePoolSize(smallThreads);
} else {
this.shortCompactions.setCorePoolSize(smallThreads);
this.shortCompactions.setMaximumPoolSize(smallThreads);
}
}
int splitThreads = newConf.getInt(SPLIT_THREADS, SPLIT_THREADS_DEFAULT);
if (this.splits.getCorePoolSize() !=
splitThreads) {
LOG.info((((("Changing the value of " + SPLIT_THREADS) + " from ") + this.splits.getCorePoolSize()) + " to ") + splitThreads);
if (this.splits.getCorePoolSize() < splitThreads) {
this.splits.setMaximumPoolSize(splitThreads);
this.splits.setCorePoolSize(splitThreads);
} else {
this.splits.setCorePoolSize(splitThreads);
this.splits.setMaximumPoolSize(splitThreads);
}
}
ThroughputController old = this.compactionThroughputController;
if (old
!= null) {
old.stop("configuration change");
}
this.compactionThroughputController = CompactionThroughputControllerFactory.create(server, newConf);
// We change this atomically here instead of reloading the config in order that upstream
// would be the only one with the flexibility to reload the config.
this.conf.reloadConfiguration();
} | 3.26 |
hbase_CompactSplit_requestSplit_rdh | /* The User parameter allows the split thread to assume the correct user identity */
private synchronized void requestSplit(final Region r, byte[] midKey, User user) {
if (midKey == null) {
LOG.debug(("Region " + r.getRegionInfo().getRegionNameAsString()) + " not splittable because midkey=null");
return;
}try {
this.splits.execute(new SplitRequest(r, midKey, this.server, user));
if (LOG.isDebugEnabled()) {
LOG.debug((("Splitting "
+ r) + ", ") + this);
}
} catch (RejectedExecutionException ree) {
LOG.info("Could not execute split for "
+ r, ree);
}
} | 3.26 |
hbase_CompactSplit_shutdownLongCompactions_rdh | /**
* Shutdown the long compaction thread pool. Should only be used in unit test to prevent long
* compaction thread pool from stealing job from short compaction queue
*/
void shutdownLongCompactions() {
this.longCompactions.shutdown();
} | 3.26 |
hbase_CompactSplit_interruptIfNecessary_rdh | /**
* Only interrupt once it's done with a run through the work loop.
*/
void interruptIfNecessary() {
splits.shutdown();
longCompactions.shutdown();
shortCompactions.shutdown();
} | 3.26 |
hbase_CompactSplit_requestCompactionInternal_rdh | // set protected for test
protected void requestCompactionInternal(HRegion region, HStore store, String why, int priority, boolean selectNow, CompactionLifeCycleTracker tracker, CompactionCompleteTracker completeTracker, User user) throws IOException {
if (!this.isCompactionsEnabled()) {
LOG.info(("Ignoring compaction request for " + region) + ",because compaction is disabled.");
return;
}
if (this.server.isStopped() || ((region.getTableDescriptor() != null) && (!region.getTableDescriptor().isCompactionEnabled()))) {
return;
}
RegionServerSpaceQuotaManager spaceQuotaManager = this.server.getRegionServerSpaceQuotaManager();
if ((((user != null) && (!Superusers.isSuperUser(user))) && (spaceQuotaManager != null)) && spaceQuotaManager.areCompactionsDisabled(region.getTableDescriptor().getTableName())) {
// Enter here only when:
// It's a user generated req, the user is super user, quotas enabled, compactions disabled.
String reason
= (("Ignoring compaction request for " + region) + " as an active space quota violation ") + " policy disallows compactions.";
tracker.notExecuted(store, reason);
completeTracker.completed(store);
LOG.debug(reason);
return;
}
CompactionContext compaction;
if (selectNow) {
Optional<CompactionContext> c = selectCompaction(region, store, priority, tracker, completeTracker, user);
if (!c.isPresent()) {
// message logged inside
return;
}
compaction = c.get();
} else {
compaction = null;
}
ThreadPoolExecutor pool;
if (selectNow) {
// compaction.get is safe as we will just return if selectNow is true but no compaction is
// selected
pool = (store.throttleCompaction(compaction.getRequest().getSize())) ? longCompactions : shortCompactions;
} else {
// We assume that most compactions are small. So, put system compactions into small
// pool; we will do selection there, and move to large pool if necessary.
pool = shortCompactions;
}
// A simple implementation for under compaction marks.
// Since this method is always called in the synchronized methods, we do not need to use the
// boolean result to make sure that exactly the one that added here will be removed
// in the next steps.
underCompactionStores.add(getStoreNameForUnderCompaction(store));
pool.execute(new CompactionRunner(store, region, compaction, tracker, completeTracker, pool, user));
if (LOG.isDebugEnabled()) {LOG.debug("Add compact mark for store {}, priority={}, current under compaction " + "store size is {}", getStoreNameForUnderCompaction(store), priority, underCompactionStores.size());
}
region.incrementCompactionsQueuedCount();
if (LOG.isDebugEnabled()) {
String type = (pool == shortCompactions) ? "Small " : "Large ";
LOG.debug(((((type + "Compaction requested: ") + (selectNow ? compaction.toString() : "system")) + ((why != null) && (!why.isEmpty()) ? "; Because: " + why : "")) + "; ")
+ this);
}
} | 3.26 |
hbase_ByteBufferKeyOnlyKeyValue_m0_rdh | // The position in BB where the family length is added.
private int m0() {
return getFamilyLengthPosition(getRowLength());
} | 3.26 |
hbase_ByteBufferKeyOnlyKeyValue_setKey_rdh | /**
* A setter that helps to avoid object creation every time and whenever there is a need to create
* new OffheapKeyOnlyKeyValue.
*
* @param key
* - the key part of the cell
* @param offset
* - offset of the cell
* @param length
* - length of the cell
* @param rowLen
* - the rowlen part of the cell
*/
public void setKey(ByteBuffer key, int offset, int length, short rowLen) {
this.buf = key;
this.offset = offset; this.length = length;
this.rowLen = rowLen;
} | 3.26 |
hbase_MetricsMasterQuotaSourceImpl_m0_rdh | /**
* Summarizes the usage and limit for many targets (table or namespace) into JSON.
*/
private String m0(Iterable<Entry<String, Entry<Long, Long>>> data, String target) {
StringBuilder sb = new StringBuilder();
for (Entry<String, Entry<Long, Long>> tableUsage : data) {
String tableName = tableUsage.getKey();
long usage = tableUsage.getValue().getKey();
long limit = tableUsage.getValue().getValue();
if (sb.length() > 0)
{
sb.append(", ");
}sb.append("{").append(target).append("=").append(tableName).append(", usage=").append(usage).append(", limit=").append(limit).append("}");
}
sb.insert(0, "[").append("]");
return sb.toString();
} | 3.26 |
hbase_AssignReplicationQueuesProcedure_shouldSkip_rdh | // check whether ReplicationSyncUp has already done the work for us, if so, we should skip
// claiming the replication queues and deleting them instead.
private boolean shouldSkip(MasterProcedureEnv env) throws IOException {
MasterFileSystem v14 = env.getMasterFileSystem();
Path syncUpDir = new Path(v14.getRootDir(), ReplicationSyncUp.INFO_DIR);
return v14.getFileSystem().exists(new Path(syncUpDir, crashedServer.getServerName()));
} | 3.26 |
hbase_SecurityInfo_getInfo_rdh | /**
* Returns the security configuration associated with the given service name.
*/
public static SecurityInfo getInfo(String serviceName) {
return infos.get(serviceName);
} | 3.26 |
hbase_SecurityInfo_addInfo_rdh | /**
* Adds a security configuration for a new service name. Note that this will have no effect if the
* service name was already registered.
*/
public static void addInfo(String serviceName, SecurityInfo securityInfo) {
infos.putIfAbsent(serviceName, securityInfo);
} | 3.26 |
hbase_LockAndQueue_releaseExclusiveLock_rdh | /**
* Returns whether we should wake the procedures waiting on the lock here.
*/
public boolean releaseExclusiveLock(Procedure<?> proc) {
if ((exclusiveLockOwnerProcedure == null) || (exclusiveLockOwnerProcedure.getProcId() != proc.getProcId())) {
// We are not the lock owner, it is probably inherited from the parent procedures.
return false;
}
exclusiveLockOwnerProcedure = null;
// This maybe a bit strange so let me explain. We allow acquiring shared lock while the parent
// proc or we have already held the xlock, and also allow releasing the locks in any order, so
// it could happen that the xlock is released but there are still some procs holding the shared
// lock.
// In HBase, this could happen when a proc which holdLock is false and schedules sub procs which
// acquire the shared lock on the same lock. This is because we will schedule the sub proces
// before releasing the lock, so the sub procs could call acquire lock before we releasing the
// xlock.
return sharedLock == 0;
} | 3.26 |
hbase_LockAndQueue_releaseSharedLock_rdh | /**
* Returns whether we should wake the procedures waiting on the lock here.
*/
public boolean releaseSharedLock() {// hasExclusiveLock could be true, it usually means we acquire shared lock while we or our
// parent have held the xlock. And since there is still an exclusive lock, we do not need to
// wake any procedures.
return
((--sharedLock) == 0) && (!hasExclusiveLock());
} | 3.26 |
hbase_LockAndQueue_trySharedLock_rdh | // ======================================================================
// try/release Shared/Exclusive lock
// ======================================================================
/**
* Returns whether we have succesfully acquired the shared lock.
*/
public boolean trySharedLock(Procedure<?> proc) {
if (hasExclusiveLock() && (!hasLockAccess(proc))) {
return false;
}
// If no one holds the xlock, then we are free to hold the sharedLock
// If the parent proc or we have already held the xlock, then we return true here as
// xlock is more powerful then shared lock.
sharedLock++;
return true;
} | 3.26 |
hbase_NamespaceStateManager_checkAndUpdateNamespaceRegionCount_rdh | /**
* Check and update region count for an existing table. To handle scenarios like restore snapshot
*
* @param name
* name of the table for region count needs to be checked and updated
* @param incr
* count of regions
* @throws QuotaExceededException
* if quota exceeds for the number of regions allowed in a
* namespace
* @throws IOException
* Signals that an I/O exception has occurred.
*/
synchronized void checkAndUpdateNamespaceRegionCount(TableName name, int incr) throws IOException {
String namespace = name.getNamespaceAsString();
NamespaceDescriptor nspdesc = getNamespaceDescriptor(namespace);
if (nspdesc != null) {
NamespaceTableAndRegionInfo currentStatus = getState(namespace);
int regionCountOfTable = currentStatus.getRegionCountOfTable(name);
if (((currentStatus.getRegionCount() - regionCountOfTable) + incr) > TableNamespaceManager.getMaxRegions(nspdesc)) {
throw new QuotaExceededException(((("The table " + name.getNameAsString()) + " region count cannot be updated as it would exceed maximum number ") + "of regions allowed in the namespace. The total number of regions permitted is ") + TableNamespaceManager.getMaxRegions(nspdesc));
}
currentStatus.removeTable(name);
currentStatus.addTable(name, incr);
}
} | 3.26 |
hbase_NamespaceStateManager_deleteNamespace_rdh | /**
* Delete the namespace state.
*
* @param namespace
* the name of the namespace to delete
*/
void deleteNamespace(String namespace) {
this.nsStateCache.remove(namespace);
} | 3.26 |
hbase_NamespaceStateManager_initialize_rdh | /**
* Initialize namespace state cache by scanning meta table.
*/
private void initialize() throws IOException {
List<NamespaceDescriptor> namespaces = this.f0.getClusterSchema().getNamespaces();
for (NamespaceDescriptor namespace : namespaces) {
addNamespace(namespace.getName());
List<TableName> tables = this.f0.listTableNamesByNamespace(namespace.getName());
for (TableName table : tables) {if (table.isSystemTable()) {
continue;
}
List<RegionInfo> regions = MetaTableAccessor.getTableRegions(this.f0.getConnection(), table, true);
addTable(table, regions.size());}
}
LOG.info(("Finished updating state of " + nsStateCache.size()) + " namespaces. ");
initialized = true;
} | 3.26 |
hbase_NamespaceStateManager_start_rdh | /**
* Starts the NamespaceStateManager. The boot strap of cache is done in the post master start hook
* of the NamespaceAuditor class.
*
* @throws IOException
* Signals that an I/O exception has occurred.
*/
public void start() throws IOException {
LOG.info("Namespace State Manager started.");
initialize();
} | 3.26 |
hbase_NamespaceStateManager_getState_rdh | /**
* Gets an instance of NamespaceTableAndRegionInfo associated with namespace.
*
* @param name
* The name of the namespace
* @return An instance of NamespaceTableAndRegionInfo.
*/
public NamespaceTableAndRegionInfo getState(String name)
{
return nsStateCache.get(name);
} | 3.26 |
hbase_IncreasingToUpperBoundRegionSplitPolicy_getCountOfCommonTableRegions_rdh | /**
* Returns Count of regions on this server that share the table this.region belongs to
*/
private int getCountOfCommonTableRegions() {
RegionServerServices rss = region.getRegionServerServices();
// Can be null in tests
if (rss ==
null) {
return 0;
}
TableName tablename = region.getTableDescriptor().getTableName();
int tableRegionsCount = 0;
try {
List<? extends Region>
hri = rss.getRegions(tablename);
tableRegionsCount = ((hri == null) || hri.isEmpty()) ? 0 : hri.size();
}
catch (IOException e) {
LOG.debug("Failed getOnlineRegions " + tablename, e);
}
return tableRegionsCount;
}
/**
*
* @return Region max size or {@code count of regions cubed * 2 * flushsize} | 3.26 |
hbase_CommonFSUtils_getWALRegionDir_rdh | /**
* Returns the WAL region directory based on the given table name and region name
*
* @param conf
* configuration to determine WALRootDir
* @param tableName
* Table that the region is under
* @param encodedRegionName
* Region name used for creating the final region directory
* @return the region directory used to store WALs under the WALRootDir
* @throws IOException
* if there is an exception determining the WALRootDir
*/
public static Path getWALRegionDir(final Configuration conf, final TableName tableName, final String encodedRegionName) throws IOException {
return new Path(getWALTableDir(conf, tableName), encodedRegionName);
} | 3.26 |
hbase_CommonFSUtils_getDefaultBlockSize_rdh | /**
* Return the number of bytes that large input files should be optimally be split into to minimize
* i/o time.
*
* @param fs
* filesystem object
* @return the default block size for the path's filesystem
*/
public static long getDefaultBlockSize(final FileSystem fs, final Path path) {
return fs.getDefaultBlockSize(path);
} | 3.26 |
hbase_CommonFSUtils_getWrongWALRegionDir_rdh | /**
* For backward compatibility with HBASE-20734, where we store recovered edits in a wrong
* directory without BASE_NAMESPACE_DIR. See HBASE-22617 for more details.
*
* @deprecated For compatibility, will be removed in 4.0.0.
*/
@Deprecated
public static Path getWrongWALRegionDir(final Configuration conf, final TableName tableName, final String encodedRegionName)
throws IOException {
Path wrongTableDir = new Path(new Path(getWALRootDir(conf), tableName.getNamespaceAsString()), tableName.getQualifierAsString());
return new Path(wrongTableDir, encodedRegionName);
} | 3.26 |
hbase_CommonFSUtils_getDefaultReplication_rdh | /* Get the default replication.
@param fs filesystem object
@param f path of file
@return default replication for the path's filesystem
*/
public static short getDefaultReplication(final FileSystem fs, final Path path) {
return fs.getDefaultReplication(path);
} | 3.26 |
hbase_CommonFSUtils_isHDFS_rdh | /**
* Return true if this is a filesystem whose scheme is 'hdfs'.
*
* @throws IOException
* from underlying FileSystem
*/
public static boolean isHDFS(final Configuration conf) throws IOException {
FileSystem fs = FileSystem.get(conf);
String scheme = fs.getUri().getScheme();
return scheme.equalsIgnoreCase("hdfs");
} | 3.26 |
hbase_CommonFSUtils_removeWALRootPath_rdh | /**
* Checks for the presence of the WAL log root path (using the provided conf object) in the given
* path. If it exists, this method removes it and returns the String representation of remaining
* relative path.
*
* @param path
* must not be null
* @param conf
* must not be null
* @return String representation of the remaining relative path
* @throws IOException
* from underlying filesystem
*/
public static String removeWALRootPath(Path path, final Configuration conf) throws IOException {
Path root = getWALRootDir(conf);
String v14 = path.toString();
// check that the path is absolute... it has the root path in it.
if (!v14.startsWith(root.toString())) {
return v14;
}
// if not, return as it is.
return v14.substring(root.toString().length() + 1);// remove the "/" too.
} | 3.26 |
hbase_CommonFSUtils_listLocatedStatus_rdh | /**
* Calls fs.listFiles() to get FileStatus and BlockLocations together for reducing rpc call
*
* @param fs
* file system
* @param dir
* directory
* @return LocatedFileStatus list
*/
public static List<LocatedFileStatus> listLocatedStatus(final FileSystem fs, final Path dir) throws IOException {
List<LocatedFileStatus> status = null;
try {
RemoteIterator<LocatedFileStatus> locatedFileStatusRemoteIterator = fs.listFiles(dir, false);
while (locatedFileStatusRemoteIterator.hasNext()) {
if (status == null) {
status = Lists.newArrayList();
}
status.add(locatedFileStatusRemoteIterator.next());}
} catch (FileNotFoundException fnfe) {
// if directory doesn't exist, return null
if (LOG.isTraceEnabled()) {
LOG.trace("{} doesn't exist", dir);
}
}
return status;
} | 3.26 |
hbase_CommonFSUtils_delete_rdh | /**
* Calls fs.delete() and returns the value returned by the fs.delete()
*
* @param fs
* must not be null
* @param path
* must not be null
* @param recursive
* delete tree rooted at path
* @return the value returned by the fs.delete()
* @throws IOException
* from underlying FileSystem
*/
public static boolean delete(final FileSystem fs, final Path path, final boolean recursive) throws IOException {
return fs.delete(path, recursive);
} | 3.26 |
hbase_CommonFSUtils_listStatus_rdh | /**
* Calls fs.listStatus() and treats FileNotFoundException as non-fatal This would accommodates
* differences between hadoop versions
*
* @param fs
* file system
* @param dir
* directory
* @return null if dir is empty or doesn't exist, otherwise FileStatus array
*/
public static FileStatus[] listStatus(final FileSystem fs, final Path dir) throws IOException
{
return listStatus(fs, dir, null);
} | 3.26 |
hbase_CommonFSUtils_isExists_rdh | /**
* Calls fs.exists(). Checks if the specified path exists
*
* @param fs
* must not be null
* @param path
* must not be null
* @return the value returned by fs.exists()
* @throws IOException
* from underlying FileSystem
*/
public static boolean isExists(final FileSystem fs, final Path
path) throws IOException {
return fs.exists(path);
} | 3.26 |
hbase_CommonFSUtils_create_rdh | /**
* Create the specified file on the filesystem. By default, this will:
* <ol>
* <li>apply the umask in the configuration (if it is enabled)</li>
* <li>use the fs configured buffer size (or 4096 if not set)</li>
* <li>use the default replication</li>
* <li>use the default block size</li>
* <li>not track progress</li>
* </ol>
*
* @param fs
* {@link FileSystem} on which to write the file
* @param path
* {@link Path} to the file to write
* @param perm
* intial permissions
* @param overwrite
* Whether or not the created file should be overwritten.
* @return output stream to the created file
* @throws IOException
* if the file cannot be created
*/
public static FSDataOutputStream create(FileSystem fs, Path path, FsPermission perm, boolean overwrite) throws IOException {
if (LOG.isTraceEnabled()) {
LOG.trace("Creating file={} with permission={}, overwrite={}", path, perm, overwrite);
}
return fs.create(path, perm, overwrite, getDefaultBufferSize(fs), getDefaultReplication(fs, path), getDefaultBlockSize(fs, path), null);
} | 3.26 |
hbase_CommonFSUtils_invokeSetStoragePolicy_rdh | /* All args have been checked and are good. Run the setStoragePolicy invocation. */
private static void invokeSetStoragePolicy(final FileSystem fs, final Path path, final String storagePolicy) throws IOException {
Exception toThrow = null;
try {
fs.setStoragePolicy(path, storagePolicy);
LOG.debug("Set storagePolicy={} for path={}", storagePolicy, path);
} catch (Exception e) {
toThrow = e;
// This swallows FNFE, should we be throwing it? seems more likely to indicate dev
// misuse than a runtime problem with HDFS.
if (!warningMap.containsKey(fs)) {
warningMap.put(fs, true);
LOG.warn((((("Unable to set storagePolicy=" + storagePolicy) + " for path=") + path) + ". ") + "DEBUG log level might have more details.", e);
} else if (LOG.isDebugEnabled()) {
LOG.debug((("Unable to set storagePolicy=" + storagePolicy) + " for path=") + path, e);
}
// Hadoop 2.8+, 3.0-a1+ added FileSystem.setStoragePolicy with a default implementation
// that throws UnsupportedOperationException
if (e instanceof UnsupportedOperationException) {
if (LOG.isDebugEnabled()) {
LOG.debug((((((("The underlying FileSystem implementation doesn't support " + "setStoragePolicy. This is probably intentional on their part, since HDFS-9345 ") + "appears to be present in your version of Hadoop. For more information check ") + "the Hadoop documentation on 'ArchivalStorage', the Hadoop FileSystem ") +
"specification docs from HADOOP-11981, and/or related documentation from the ") + "provider of the underlying FileSystem (its name should appear in the ") + "stacktrace that accompanies this message). Note in particular that Hadoop's ") + "local filesystem implementation doesn't support storage policies.", e);
}
}
}
if (toThrow != null) {
throw new IOException(toThrow);
}} | 3.26 |
hbase_CommonFSUtils_getDirUri_rdh | /**
* Returns the URI in the string format
*
* @param c
* configuration
* @param p
* path
* @return - the URI's to string format
*/
public static String getDirUri(final Configuration c, Path p) throws IOException {
if (p.toUri().getScheme() != null) {
return p.toUri().toString();
}
return null;
} | 3.26 |
hbase_CommonFSUtils_validateRootPath_rdh | /**
* Verifies root directory path is a valid URI with a scheme
*
* @param root
* root directory path
* @return Passed <code>root</code> argument.
* @throws IOException
* if not a valid URI with a scheme
*/
public static Path validateRootPath(Path root) throws
IOException {
try {
URI rootURI = new URI(root.toString());
String scheme = rootURI.getScheme();if (scheme == null) {
throw new IOException("Root directory does not have a scheme");
}
return root;
} catch (URISyntaxException e) {
throw new IOException((("Root directory path is not a valid " + "URI -- check your ") + HConstants.HBASE_DIR) + " configuration", e);
}
} | 3.26 |
hbase_CommonFSUtils_isRecoveredEdits_rdh | /**
* Checks if the given path is the one with 'recovered.edits' dir.
*
* @param path
* must not be null
* @return True if we recovered edits
*/
public static boolean isRecoveredEdits(Path path) {
return path.toString().contains(HConstants.RECOVERED_EDITS_DIR); } | 3.26 |
hbase_CommonFSUtils_getPath_rdh | /**
* Return the 'path' component of a Path. In Hadoop, Path is a URI. This method returns the 'path'
* component of a Path's URI: e.g. If a Path is
* <code>hdfs://example.org:9000/hbase_trunk/TestTable/compaction.dir</code>, this method returns
* <code>/hbase_trunk/TestTable/compaction.dir</code>. This method is useful if you want to print
* out a Path without qualifying Filesystem instance.
*
* @param p
* Filesystem Path whose 'path' component we are to return.
* @return Path portion of the Filesystem
*/
public static String getPath(Path p) {
return p.toUri().getPath();
} | 3.26 |
hbase_CommonFSUtils_getTableDir_rdh | /**
* Returns the {@link org.apache.hadoop.fs.Path} object representing the table directory under
* path rootdir
*
* @param rootdir
* qualified path of HBase root directory
* @param tableName
* name of table
* @return {@link org.apache.hadoop.fs.Path} for table
*/
public static Path getTableDir(Path rootdir, final TableName tableName) {
return new Path(getNamespaceDir(rootdir, tableName.getNamespaceAsString()), tableName.getQualifierAsString());
}
/**
* Returns the {@link org.apache.hadoop.fs.Path} object representing the region directory under
* path rootdir
*
* @param rootdir
* qualified path of HBase root directory
* @param tableName
* name of table
* @param regionName
* The encoded region name
* @return {@link org.apache.hadoop.fs.Path} | 3.26 |
hbase_CommonFSUtils_m1_rdh | /**
* Sets storage policy for given path. If the passed path is a directory, we'll set the storage
* policy for all files created in the future in said directory. Note that this change in storage
* policy takes place at the FileSystem level; it will persist beyond this RS's lifecycle. If
* we're running on a version of FileSystem that doesn't support the given storage policy (or
* storage policies at all), then we'll issue a log message and continue. See
* http://hadoop.apache.org/docs/r2.6.0/hadoop-project-dist/hadoop-hdfs/ArchivalStorage.html
*
* @param fs
* We only do anything it implements a setStoragePolicy method
* @param path
* the Path whose storage policy is to be set
* @param storagePolicy
* Policy to set on <code>path</code>; see hadoop 2.6+
* org.apache.hadoop.hdfs.protocol.HdfsConstants for possible list e.g
* 'COLD', 'WARM', 'HOT', 'ONE_SSD', 'ALL_SSD', 'LAZY_PERSIST'.
*/
public static void m1(final FileSystem fs, final Path path, final String storagePolicy) {
try {
setStoragePolicy(fs, path, storagePolicy,
false);
} catch (IOException e) {
// should never arrive here
LOG.warn("We have chosen not to throw exception but some unexpectedly thrown out", e);
}
} | 3.26 |
hbase_CommonFSUtils_logFSTree_rdh | /**
* Recursive helper to log the state of the FS
*
* @see #logFileSystemState(FileSystem, Path, Logger)
*/
private static void logFSTree(Logger log, final FileSystem fs, final Path root, String prefix) throws IOException {
FileStatus[] files = listStatus(fs, root, null);
if (files == null) {
return;
}
for (FileStatus file : files) {
if (file.isDirectory()) {
log.debug((prefix + file.getPath().getName()) + "/");
logFSTree(log, fs, file.getPath(), prefix + "---");
} else {
log.debug(prefix + file.getPath().getName());
}
}
} | 3.26 |
hbase_CommonFSUtils_getRootDir_rdh | /**
* Get the path for the root data directory
*
* @param c
* configuration
* @return {@link Path} to hbase root directory from configuration as a qualified Path.
* @throws IOException
* e
*/
public static Path getRootDir(final Configuration c) throws IOException {
Path p = new Path(c.get(HConstants.HBASE_DIR));
FileSystem fs = p.getFileSystem(c);
return p.makeQualified(fs.getUri(), fs.getWorkingDirectory());
} | 3.26 |
hbase_CommonFSUtils_checkShortCircuitReadBufferSize_rdh | /**
* Check if short circuit read buffer size is set and if not, set it to hbase value.
*
* @param conf
* must not be null
*/
public static void checkShortCircuitReadBufferSize(final Configuration conf) {
final int defaultSize = HConstants.DEFAULT_BLOCKSIZE * 2;
final int notSet = -1;
// DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_BUFFER_SIZE_KEY is only defined in h2
final String dfsKey = "dfs.client.read.shortcircuit.buffer.size";
int size = conf.getInt(dfsKey, notSet);
// If a size is set, return -- we will use it.
if (size != notSet) {
return;
}
// But short circuit buffer size is normally not set. Put in place the hbase wanted size.
int hbaseSize = conf.getInt("hbase." + dfsKey, defaultSize);
conf.setIfUnset(dfsKey, Integer.toString(hbaseSize));
} | 3.26 |
hbase_CommonFSUtils_isMatchingTail_rdh | /**
* Compare path component of the Path URI; e.g. if hdfs://a/b/c and /a/b/c, it will compare the
* '/a/b/c' part. If you passed in 'hdfs://a/b/c and b/c, it would return true. Does not consider
* schema; i.e. if schemas different but path or subpath matches, the two will equate.
*
* @param pathToSearch
* Path we will be trying to match agains against
* @param pathTail
* what to match
* @return True if <code>pathTail</code> is tail on the path of <code>pathToSearch</code>
*/
public static boolean isMatchingTail(final Path pathToSearch, final Path pathTail) {if (pathToSearch.depth() != pathTail.depth()) {
return false;
}
Path tailPath = pathTail;
String tailName;
Path toSearch = pathToSearch;
String toSearchName;
boolean result
= false;
do {
tailName = tailPath.getName();
if ((tailName == null) ||
(tailName.length() <= 0)) {
result = true;
break;
}
toSearchName = toSearch.getName();
if ((toSearchName == null) || (toSearchName.length() <= 0)) {
break;
}
// Move up a parent on each path for next go around. Path doesn't let us go off the end.
tailPath = tailPath.getParent();toSearch = toSearch.getParent();
} while (tailName.equals(toSearchName) );
return result;
} | 3.26 |
hbase_CommonFSUtils_deleteDirectory_rdh | /**
* Delete if exists.
*
* @param fs
* filesystem object
* @param dir
* directory to delete
* @return True if deleted <code>dir</code>
* @throws IOException
* e
*/
public static boolean deleteDirectory(final FileSystem fs, final Path dir) throws IOException {
return fs.exists(dir) && fs.delete(dir, true);
} | 3.26 |
hbase_CommonFSUtils_logFileSystemState_rdh | /**
* Log the current state of the filesystem from a certain root directory
*
* @param fs
* filesystem to investigate
* @param root
* root file/directory to start logging from
* @param log
* log to output information
* @throws IOException
* if an unexpected exception occurs
*/
public static void logFileSystemState(final FileSystem fs, final Path root, Logger log) throws IOException {
log.debug("File system contents for path {}", root);
logFSTree(log, fs, root, "|-");
} | 3.26 |
hbase_CommonFSUtils_getTableName_rdh | /**
* Returns the {@link org.apache.hadoop.hbase.TableName} object representing the table directory
* under path rootdir
*
* @param tablePath
* path of table
* @return {@link org.apache.hadoop.fs.Path} for table
*/
public static TableName getTableName(Path tablePath) {
return TableName.valueOf(tablePath.getParent().getName(), tablePath.getName());
}
/**
* Returns the {@link org.apache.hadoop.fs.Path} object representing the namespace directory under
* path rootdir
*
* @param rootdir
* qualified path of HBase root directory
* @param namespace
* namespace name
* @return {@link org.apache.hadoop.fs.Path} | 3.26 |
hbase_CommonFSUtils_m0_rdh | /**
* Compare of path component. Does not consider schema; i.e. if schemas different but
* <code>path</code> starts with <code>rootPath</code>, then the function returns true
*
* @param rootPath
* value to check for
* @param path
* subject to check
* @return True if <code>path</code> starts with <code>rootPath</code>
*/
public static boolean
m0(final Path rootPath, final String path) {
String uriRootPath = rootPath.toUri().getPath();
String tailUriPath = new Path(path).toUri().getPath();
return tailUriPath.startsWith(uriRootPath);
} | 3.26 |
hbase_CommonFSUtils_getWALTableDir_rdh | /**
* Returns the Table directory under the WALRootDir for the specified table name
*
* @param conf
* configuration used to get the WALRootDir
* @param tableName
* Table to get the directory for
* @return a path to the WAL table directory for the specified table
* @throws IOException
* if there is an exception determining the WALRootDir
*/
public static Path getWALTableDir(final Configuration conf, final TableName tableName) throws IOException {
Path baseDir = new Path(getWALRootDir(conf), HConstants.BASE_NAMESPACE_DIR);
return new Path(new Path(baseDir, tableName.getNamespaceAsString()), tableName.getQualifierAsString());
} | 3.26 |
hbase_CommonFSUtils_getCurrentFileSystem_rdh | /**
* Returns the filesystem of the hbase rootdir.
*
* @throws IOException
* from underlying FileSystem
*/
public static FileSystem getCurrentFileSystem(Configuration conf) throws IOException {
return getRootDir(conf).getFileSystem(conf);
} | 3.26 |
hbase_CommonFSUtils_getDefaultBufferSize_rdh | /**
* Returns the default buffer size to use during writes. The size of the buffer should probably be
* a multiple of hardware page size (4096 on Intel x86), and it determines how much data is
* buffered during read and write operations.
*
* @param fs
* filesystem object
* @return default buffer size to use during writes
*/
public static int getDefaultBufferSize(final FileSystem fs) {
return fs.getConf().getInt("io.file.buffer.size", 4096);
} | 3.26 |
hbase_DirScanPool_onConfigurationChange_rdh | /**
* Checks if pool can be updated. If so, mark for update later.
*
* @param conf
* configuration
*/@Override
public synchronized void onConfigurationChange(Configuration conf) {int newSize = CleanerChore.calculatePoolSize(conf.get(f0.cleanerPoolSizeConfigName, f0.cleanerPoolSizeConfigDefault));
if (newSize == size) {
LOG.trace("{} Cleaner Size from configuration is same as previous={}, no need to update.", name, newSize);
return;
}
size = newSize;
// Chore is working, update it later.
reconfigNotification = true;
} | 3.26 |
hbase_SegmentScanner_getHighest_rdh | /**
* Private internal method that returns the higher of the two key values, or null if they are both
* null
*/
private Cell getHighest(Cell first, Cell second) {
if ((first == null) && (second == null))
{
return null;
}
if ((first != null) && (second != null)) {
int compare = segment.compare(first, second);
return compare > 0 ? first : second;
}
return first != null ? first : second;
} | 3.26 |
hbase_SegmentScanner_reseek_rdh | /**
* Reseek the scanner at or after the specified KeyValue. This method is guaranteed to seek at or
* after the required key only if the key comes after the current position of the scanner. Should
* not be used to seek to a key which may come before the current position.
*
* @param cell
* seek value (should be non-null)
* @return true if scanner has values left, false if end of scanner
*/
@Override
public boolean reseek(Cell cell) throws IOException {
if (closed) {
return false;
}
/* See HBASE-4195 & HBASE-3855 & HBASE-6591 for the background on this implementation. This code
is executed concurrently with flush and puts, without locks. The ideal implementation for
performance would use the sub skip list implicitly pointed by the iterator. Unfortunately the
Java API does not offer a method to get it. So we remember the last keys we iterated to and
restore the reseeked set to at least that point.
*/
f0 = getIterator(getHighest(cell, last));
updateCurrent();
return current != null;
} | 3.26 |
hbase_SegmentScanner_toString_rdh | // debug method
@Override
public String
toString() {
String res = ("Store segment scanner of type " + this.getClass().getName()) + "; ";
res += ("Scanner order " + getScannerOrder()) + "; ";
res += getSegment().toString();
return res;
} | 3.26 |
hbase_SegmentScanner_realSeekDone_rdh | /**
* This scanner is working solely on the in-memory MemStore and doesn't work on store files,
* MutableCellSetSegmentScanner always does the seek, therefore always returning true.
*/
@Override
public boolean realSeekDone() {return true;
} | 3.26 |
hbase_SegmentScanner_seek_rdh | /**
* Seek the scanner at or after the specified Cell.
*
* @param cell
* seek value
* @return true if scanner has values left, false if end of scanner
*/
@Override
public boolean seek(Cell cell) throws IOException {
if (closed) {
return false;
}
if (cell == null)
{
close();
return false;
}
// restart the iterator from new key
f0 = getIterator(cell);
// last is going to be reinitialized in the next getNext() call
last = null;
updateCurrent();
return current != null;
} | 3.26 |
hbase_SegmentScanner_getSegment_rdh | /**
* ******************* Private Methods *********************
*/
private Segment getSegment() {
return segment;
} | 3.26 |
hbase_SegmentScanner_backwardSeek_rdh | /**
* Seek the scanner at or before the row of specified Cell, it firstly tries to seek the scanner
* at or after the specified Cell, return if peek KeyValue of scanner has the same row with
* specified Cell, otherwise seek the scanner at the first Cell of the row which is the previous
* row of specified KeyValue
*
* @param key
* seek Cell
* @return true if the scanner is at the valid KeyValue, false if such Cell does not exist
*/
@Override
public boolean backwardSeek(Cell key) throws IOException {
if (closed) {
return false;
}
seek(key);// seek forward then go backward
if ((m0() == null) || (segment.compareRows(m0(), key) > 0)) {
return seekToPreviousRow(key);
}
return true;
} | 3.26 |
hbase_SegmentScanner_shipped_rdh | /**
* Called after a batch of rows scanned (RPC) and set to be returned to client. Any in between
* cleanup can be done here. Nothing to be done for MutableCellSetSegmentScanner.
*/
@Override
public void shipped() throws IOException {
// do nothing
} | 3.26 |
hbase_SegmentScanner_m0_rdh | /**
* Look at the next Cell in this scanner, but do not iterate the scanner
*
* @return the currently observed Cell
*/
@Override
public Cell m0() {
// sanity check, the current should be always valid
if (closed) {
return null;
}
if ((current != null) && (current.getSequenceId() > readPoint)) {
throw new RuntimeException(((("current is invalid: read point is " + readPoint) + ", ") + "while current sequence id is ") + current.getSequenceId());
}
return current;
} | 3.26 |
hbase_SegmentScanner_seekToLastRow_rdh | /**
* Seek the scanner at the first KeyValue of last row
*
* @return true if scanner has values left, false if the underlying data is empty
*/
@Override
public boolean seekToLastRow() throws IOException {
if (closed) {
return false;
}
Cell v7 =
(segment.isEmpty()) ?
null : segment.last();
if (v7 == null) {
return false;
}
Cell firstCellOnLastRow =
PrivateCellUtil.createFirstOnRow(v7);
if (seek(firstCellOnLastRow)) {
return true;
} else {
return seekToPreviousRow(v7);
}
} | 3.26 |
hbase_SegmentScanner_updateCurrent_rdh | /**
* Private internal method for iterating over the segment, skipping the cells with irrelevant MVCC
*/
protected void updateCurrent() {
Cell next = null;
try {
while
(f0.hasNext()) {
next =
f0.next();
if (next.getSequenceId() <= this.readPoint) {
current = next;
return;// skip irrelevant versions
}
// for backwardSeek() stay in the boundaries of a single row
if (stopSkippingKVsIfNextRow && (segment.compareRows(next, stopSkippingKVsRow) > 0)) {
current = null;
return;
}
} // end of while
current =
null;// nothing found
} finally {
if (next != null) {
// in all cases, remember the last KV we iterated to, needed for reseek()
last =
next;
}
}
} | 3.26 |
hbase_SegmentScanner_isFileScanner_rdh | /**
* Returns true if this is a file scanner. Otherwise a memory scanner is assumed.
*/
@Override
public boolean isFileScanner()
{
return false;
} | 3.26 |
hbase_SegmentScanner_close_rdh | /**
* Close the KeyValue scanner.
*/
@Override
public void close() {
if (closed) {
return;
}
getSegment().decScannerCount();
closed = true;
} | 3.26 |
hbase_SegmentScanner_enforceSeek_rdh | /**
* This function should be never called on scanners that always do real seek operations (i.e. most
* of the scanners and also this one). The easiest way to achieve this is to call
* {@link #realSeekDone()} first.
*/
@Override
public void enforceSeek() throws IOException {
throw new NotImplementedException("enforceSeek cannot be called on a SegmentScanner");
} | 3.26 |
hbase_SegmentScanner_shouldUseScanner_rdh | /**
* This functionality should be resolved in the higher level which is MemStoreScanner, currently
* returns true as default. Doesn't throw IllegalStateException in order not to change the
* signature of the overridden method
*/
@Override
public boolean shouldUseScanner(Scan scan, HStore store, long oldestUnexpiredTS) {
return getSegment().shouldSeek(scan.getColumnFamilyTimeRange().getOrDefault(store.getColumnFamilyDescriptor().getName(), scan.getTimeRange()), oldestUnexpiredTS);
} | 3.26 |
hbase_SnapshotDescriptionUtils_getCorruptedFlagFileForSnapshot_rdh | /**
* Get the flag file path if the snapshot is corrupted
*
* @param workingDir
* the directory where we build the specific snapshot
* @return {@link Path} snapshot corrupted flag file path
*/
public static Path getCorruptedFlagFileForSnapshot(final Path workingDir)
{
return new Path(workingDir, SNAPSHOT_CORRUPTED_FILE);
} | 3.26 |
hbase_SnapshotDescriptionUtils_getSpecifiedSnapshotDir_rdh | /**
* Get the directory within the given filepath to store the snapshot instance
*
* @param snapshotsDir
* directory to store snapshot directory within
* @param snapshotName
* name of the snapshot to take
* @return the final directory for the snapshot in the given filepath
*/
private static final Path getSpecifiedSnapshotDir(final Path snapshotsDir, String snapshotName) {
return new
Path(snapshotsDir, snapshotName);
} | 3.26 |
hbase_SnapshotDescriptionUtils_getCompletedSnapshotDir_rdh | /**
* Get the directory for a completed snapshot. This directory is a sub-directory of snapshot root
* directory and all the data files for a snapshot are kept under this directory.
*
* @param snapshotName
* name of the snapshot being taken
* @param rootDir
* hbase root directory
* @return the final directory for the completed snapshot
*/
public static Path getCompletedSnapshotDir(final String snapshotName, final Path rootDir) {
return getSpecifiedSnapshotDir(getSnapshotsDir(rootDir), snapshotName);
} | 3.26 |
hbase_SnapshotDescriptionUtils_getSnapshotsDir_rdh | /**
*
* @param rootDir
* hbase root directory
* @return the directory for all completed snapshots;
*/
public static final Path getSnapshotsDir(Path rootDir) {
return new Path(rootDir, HConstants.SNAPSHOT_DIR_NAME);
} | 3.26 |
hbase_SnapshotDescriptionUtils_isExpiredSnapshot_rdh | /**
* Method to check whether TTL has expired for specified snapshot creation time and snapshot ttl.
* NOTE: For backward compatibility (after the patch deployment on HMaster), any snapshot with ttl
* 0 is to be considered as snapshot to keep FOREVER. Default ttl value specified by
* {@link HConstants#DEFAULT_SNAPSHOT_TTL}
*
* @return true if ttl has expired, or, false, otherwise
*/
public static boolean isExpiredSnapshot(long snapshotTtl, long snapshotCreatedTime, long currentTime) {
return
(((snapshotCreatedTime > 0) && (snapshotTtl > HConstants.DEFAULT_SNAPSHOT_TTL)) && (snapshotTtl < TimeUnit.MILLISECONDS.toSeconds(Long.MAX_VALUE))) && ((snapshotCreatedTime +
TimeUnit.SECONDS.toMillis(snapshotTtl)) < currentTime);
} | 3.26 |
hbase_SnapshotDescriptionUtils_getDefaultWorkingSnapshotDir_rdh | /**
* Get the default working directory for snapshots - where they are built, where they are
* temporarily copied on export, etc.
*
* @param rootDir
* root directory of the HBase installation
* @return Path to the default snapshot tmp directory, relative to the passed root directory
*/
private static Path getDefaultWorkingSnapshotDir(final Path rootDir) {
return new Path(getSnapshotsDir(rootDir), SNAPSHOT_TMP_DIR_NAME);
} | 3.26 |
hbase_SnapshotDescriptionUtils_readSnapshotInfo_rdh | /**
* Read in the {@link SnapshotDescription} stored for the snapshot in the passed directory
*
* @param fs
* filesystem where the snapshot was taken
* @param snapshotDir
* directory where the snapshot was stored
* @return the stored snapshot description
* @throws CorruptedSnapshotException
* if the snapshot cannot be read
*/
public static SnapshotDescription readSnapshotInfo(FileSystem fs, Path snapshotDir) throws CorruptedSnapshotException {
Path snapshotInfo =
new Path(snapshotDir, SNAPSHOTINFO_FILE);
try (FSDataInputStream in = fs.open(snapshotInfo)) {
return SnapshotDescription.parseFrom(in);
} catch (IOException e) {
throw new CorruptedSnapshotException("Couldn't read snapshot info from:" +
snapshotInfo, e);
}
} | 3.26 |
hbase_SnapshotDescriptionUtils_getSnapshotRootDir_rdh | /**
* Get the snapshot root directory. All the snapshots are kept under this directory, i.e.
* ${hbase.rootdir}/.snapshot
*
* @param rootDir
* hbase root directory
* @return the base directory in which all snapshots are kept
*/
public static Path getSnapshotRootDir(final Path rootDir) {
return new Path(rootDir, HConstants.SNAPSHOT_DIR_NAME);
} | 3.26 |
hbase_SnapshotDescriptionUtils_completeSnapshot_rdh | /**
* Commits the snapshot process by moving the working snapshot to the finalized filepath
*
* @param snapshotDir
* The file path of the completed snapshots
* @param workingDir
* The file path of the in progress snapshots
* @param fs
* The file system of the completed snapshots
* @param workingDirFs
* The file system of the in progress snapshots
* @param conf
* Configuration
* @throws SnapshotCreationException
* if the snapshot could not be moved
* @throws IOException
* the filesystem could not be reached
*/
public static void completeSnapshot(Path snapshotDir, Path workingDir, FileSystem fs, FileSystem workingDirFs, final Configuration conf) throws SnapshotCreationException, IOException {
LOG.debug((("Sentinel is done, just moving the snapshot from " + workingDir) + " to ") + snapshotDir);
// If the working and completed snapshot directory are on the same file system, attempt
// to rename the working snapshot directory to the completed location. If that fails,
// or the file systems differ, attempt to copy the directory over, throwing an exception
// if this fails
URI workingURI = workingDirFs.getUri();
URI rootURI = fs.getUri();
if ((shouldSkipRenameSnapshotDirectories(workingURI, rootURI) || (!fs.rename(workingDir, snapshotDir))) && (!FileUtil.copy(workingDirFs, workingDir, fs, snapshotDir, true, true, conf))) {
throw new SnapshotCreationException(((("Failed to copy working directory(" + workingDir) + ") to completed directory(") + snapshotDir) + ").");
}
} | 3.26 |
hbase_SnapshotDescriptionUtils_getMaxMasterTimeout_rdh | /**
*
* @param conf
* {@link Configuration} from which to check for the timeout
* @param type
* type of snapshot being taken
* @param defaultMaxWaitTime
* Default amount of time to wait, if none is in the configuration
* @return the max amount of time the master should wait for a snapshot to complete
*/
public static long getMaxMasterTimeout(Configuration conf, SnapshotDescription.Type type, long defaultMaxWaitTime) {
String confKey;
switch (type) {
case DISABLED :
default :
confKey = f0;
}
return Math.max(conf.getLong(confKey, defaultMaxWaitTime), conf.getLong(f0, defaultMaxWaitTime));
} | 3.26 |
hbase_SnapshotDescriptionUtils_validate_rdh | /**
* Convert the passed snapshot description into a 'full' snapshot description based on default
* parameters, if none have been supplied. This resolves any 'optional' parameters that aren't
* supplied to their default values.
*
* @param snapshot
* general snapshot descriptor
* @param conf
* Configuration to read configured snapshot defaults if snapshot is not complete
* @return a valid snapshot description
* @throws IllegalArgumentException
* if the {@link SnapshotDescription} is not a complete
* {@link SnapshotDescription}.
*/
public static SnapshotDescription validate(SnapshotDescription snapshot, Configuration conf) throws IllegalArgumentException, IOException {
if (!snapshot.hasTable()) {
throw new IllegalArgumentException("Descriptor doesn't apply to a table, so we can't build it.");
}
SnapshotDescription.Builder builder = snapshot.toBuilder();
// set the creation time, if one hasn't been set
long time = snapshot.getCreationTime(); if (time == SnapshotDescriptionUtils.NO_SNAPSHOT_START_TIME_SPECIFIED) {
time =
EnvironmentEdgeManager.currentTime();
LOG.debug(((("Creation time not specified, setting to:" + time) + " (current time:") + EnvironmentEdgeManager.currentTime()) + ").");
builder.setCreationTime(time);}
long ttl = snapshot.getTtl();
// set default ttl(sec) if it is not set already or the value is out of the range
if ((ttl == SnapshotDescriptionUtils.NO_SNAPSHOT_TTL_SPECIFIED) || (ttl > TimeUnit.MILLISECONDS.toSeconds(Long.MAX_VALUE))) {
final long v5 = conf.getLong(HConstants.DEFAULT_SNAPSHOT_TTL_CONFIG_KEY, HConstants.DEFAULT_SNAPSHOT_TTL);
if (LOG.isDebugEnabled()) {
LOG.debug("Snapshot current TTL value: {} resetting it to default value: {}", ttl, v5);
}
ttl = v5;
}
builder.setTtl(ttl);
if (!snapshot.hasVersion()) {
builder.setVersion(SnapshotDescriptionUtils.SNAPSHOT_LAYOUT_VERSION);
LOG.debug("Snapshot {} VERSION not specified, setting to {}", snapshot.getName(), SnapshotDescriptionUtils.SNAPSHOT_LAYOUT_VERSION);
}
RpcServer.getRequestUser().ifPresent(user -> {
if (AccessChecker.isAuthorizationSupported(conf)) {
builder.setOwner(user.getShortName());
LOG.debug("Set {} as owner of Snapshot", user.getShortName());
}
});
snapshot = builder.build();
// set the acl to snapshot if security feature is enabled.
if (isSecurityAvailable(conf)) {
snapshot = writeAclToSnapshotDescription(snapshot, conf);
}
return snapshot;
}
/**
* Write the snapshot description into the working directory of a snapshot
*
* @param snapshot
* description of the snapshot being taken
* @param workingDir
* working directory of the snapshot
* @param fs
* {@link FileSystem} | 3.26 |
hbase_SnapshotDescriptionUtils_getWorkingSnapshotDir_rdh | /**
* Get the directory to build a snapshot, before it is finalized
*
* @param snapshotName
* name of the snapshot
* @param rootDir
* root directory of the hbase installation
* @param conf
* Configuration of the HBase instance
* @return {@link Path} where one can build a snapshot
*/
public static Path getWorkingSnapshotDir(String snapshotName, final Path rootDir, Configuration conf) {
return getSpecifiedSnapshotDir(getWorkingSnapshotDir(rootDir, conf), snapshotName);
} | 3.26 |
hbase_Server_getFileSystem_rdh | /**
* Returns Return the FileSystem object used (can return null!).
*/
// TODO: Distinguish between "dataFs" and "walFs".
default FileSystem getFileSystem() {
// This default is pretty dodgy!
Configuration c = getConfiguration();
FileSystem fs = null;
try {
if (c != null) {fs = FileSystem.get(c);
}
} catch (IOException e) {
// If an exception, just return null
}
return fs;
} | 3.26 |
hbase_Server_isStopping_rdh | /**
* Returns True is the server is Stopping
*/
// Note: This method is not part of the Stoppable Interface.
default boolean
isStopping() {
return false;
} | 3.26 |
hbase_Server_getConnection_rdh | /**
* Returns a reference to the servers' connection. Important note: this method returns a reference
* to Connection which is managed by Server itself, so callers must NOT attempt to close
* connection obtained.
*/
default Connection getConnection() {
return getAsyncConnection().toConnection();
} | 3.26 |
hbase_Server_getAsyncConnection_rdh | /**
* Returns a reference to the servers' async connection.
* <p/>
* Important note: this method returns a reference to Connection which is managed by Server
* itself, so callers must NOT attempt to close connection obtained.
*/
default AsyncConnection getAsyncConnection() {
return
getAsyncClusterConnection();
} | 3.26 |
hbase_SyncFuture_reset_rdh | /**
* Call this method to clear old usage and get it ready for new deploy.
*
* @param txid
* the new transaction id
*/
SyncFuture reset(long txid, boolean forceSync) {
if ((t != null) && (t != Thread.currentThread())) {
throw new IllegalStateException();
}
t = Thread.currentThread();
if (!isDone()) {
throw new IllegalStateException((("" + txid) +
" ")
+ Thread.currentThread());
}
this.doneTxid = NOT_DONE;
this.forceSync = forceSync;
this.txid = txid;
this.throwable = null;
return this;
} | 3.26 |
hbase_SyncFuture_getThread_rdh | /**
* Returns the thread that owned this sync future, use with caution as we return the reference to
* the actual thread object.
*
* @return the associated thread instance.
*/
Thread getThread() {
return t;
} | 3.26 |
hbase_FileLink_getAvailablePath_rdh | /**
* Returns the path of the first available link.
*/
public Path getAvailablePath(FileSystem fs) throws IOException {
for (int v10 = 0; v10 < locations.length; ++v10) {
if (fs.exists(locations[v10])) {
return locations[v10];
}
}throw new FileNotFoundException(toString());
}
/**
* Get the FileStatus of the referenced file.
*
* @param fs
* {@link FileSystem} | 3.26 |
hbase_FileLink_getBackReferencesDir_rdh | /**
* Get the directory to store the link back references
* <p>
* To simplify the reference count process, during the FileLink creation a back-reference is added
* to the back-reference directory of the specified file.
*
* @param storeDir
* Root directory for the link reference folder
* @param fileName
* File Name with links
* @return Path for the link back references.
*/
public static Path getBackReferencesDir(final Path storeDir, final String fileName) {
return new Path(storeDir, BACK_REFERENCES_DIRECTORY_PREFIX + fileName);
} | 3.26 |
hbase_FileLink_getBackReferenceFileName_rdh | /**
* Get the referenced file name from the reference link directory path.
*
* @param dirPath
* Link references directory path
* @return Name of the file referenced
*/
public static String getBackReferenceFileName(final Path dirPath) {
return dirPath.getName().substring(BACK_REFERENCES_DIRECTORY_PREFIX.length());
} | 3.26 |
hbase_FileLink_setLocations_rdh | /**
* NOTE: This method must be used only in the constructor! It creates a List with the specified
* locations for the link.
*/
protected void setLocations(Path originPath, Path... alternativePaths) {
assert this.locations == null : "Link locations already set";
List<Path> paths = new ArrayList<>(alternativePaths.length + 1);
if (originPath != null) {
paths.add(originPath);
}
for
(int i
= 0; i < alternativePaths.length; i++) {
if (alternativePaths[i] != null) {
paths.add(alternativePaths[i]);
}
}
this.locations = paths.toArray(new Path[0]);
} | 3.26 |
hbase_FileLink_isBackReferencesDir_rdh | /**
* Checks if the specified directory path is a back reference links folder.
*
* @param dirPath
* Directory path to verify
* @return True if the specified directory is a link references folder
*/
public static boolean isBackReferencesDir(final Path dirPath) {
if (dirPath ==
null) {
return false;
}
return dirPath.getName().startsWith(BACK_REFERENCES_DIRECTORY_PREFIX);
} | 3.26 |
hbase_FileLink_exists_rdh | /**
* Returns true if the file pointed by the link exists
*/
public boolean exists(final FileSystem fs) throws IOException {
for (int i = 0; i < locations.length; ++i) {
if (fs.exists(locations[i])) {
return true;
}
}
return false;
} | 3.26 |
hbase_FileLink_getLocations_rdh | /**
* Returns the locations to look for the linked file.
*/
public Path[] getLocations() {
return locations;
} | 3.26 |
hbase_FileLink_open_rdh | /**
* Open the FileLink for read.
* <p>
* It uses a wrapper of FSDataInputStream that is agnostic to the location of the file, even if
* the file switches between locations.
*
* @param fs
* {@link FileSystem} on which to open the FileLink
* @param bufferSize
* the size of the buffer to be used.
* @return InputStream for reading the file link.
* @throws IOException
* on unexpected error.
*/
public FSDataInputStream open(final FileSystem fs, int bufferSize) throws IOException
{
return new FSDataInputStream(new FileLinkInputStream(fs, this, bufferSize));
} | 3.26 |
hbase_FileLink_tryOpen_rdh | /**
* Try to open the file from one of the available locations.
*
* @return FSDataInputStream stream of the opened file link
* @throws IOException
* on unexpected error, or file not found.
*/
private FSDataInputStream tryOpen() throws IOException {
IOException exception = null;
for (Path path : fileLink.getLocations()) {
if (path.equals(currentPath))
continue;
try {
in = fs.open(path, bufferSize);
if (pos != 0)
in.seek(pos);
assert in.getPos() == pos : "Link unable to seek to the right position=" + pos;
if (LOG.isTraceEnabled()) {
if (currentPath == null) {
LOG.debug("link open path=" + path);
} else {
LOG.trace((("link switch from path=" + currentPath) + " to path=") + path);
}
}
currentPath = path;return in;
} catch (FileNotFoundException | AccessControlException | RemoteException e) {
exception = FileLink.handleAccessLocationException(fileLink, e, exception);
}}
throw exception;
} | 3.26 |
hbase_FileLink_getUnderlyingFileLinkInputStream_rdh | /**
* If the passed FSDataInputStream is backed by a FileLink, returns the underlying InputStream for
* the resolved link target. Otherwise, returns null.
*/
public static FSDataInputStream getUnderlyingFileLinkInputStream(FSDataInputStream stream) {
if (stream.getWrappedStream() instanceof FileLinkInputStream) {
return ((FileLinkInputStream) (stream.getWrappedStream())).getUnderlyingInputStream();
}
return null;
} | 3.26 |
hbase_StoreFlusher_performFlush_rdh | /**
* Performs memstore flush, writing data from scanner into sink.
*
* @param scanner
* Scanner to get data from.
* @param sink
* Sink to write data to. Could be StoreFile.Writer.
* @param throughputController
* A controller to avoid flush too fast
*/
protected void performFlush(InternalScanner scanner, CellSink sink, ThroughputController throughputController) throws IOException {
int compactionKVMax = conf.getInt(HConstants.COMPACTION_KV_MAX, HConstants.COMPACTION_KV_MAX_DEFAULT);
ScannerContext scannerContext = ScannerContext.newBuilder().setBatchLimit(compactionKVMax).build();
List<Cell> kvs = new ArrayList<>();
boolean hasMore;
String flushName = ThroughputControlUtil.getNameForThrottling(store, "flush");
// no control on system table (such as meta, namespace, etc) flush
boolean control = (throughputController != null) && (!store.getRegionInfo().getTable().isSystemTable());
if (control) {
throughputController.start(flushName);
}
try {
do {
hasMore = scanner.next(kvs, scannerContext);
if (!kvs.isEmpty()) {
for (Cell c : kvs) {
sink.append(c);
if (control) {
throughputController.control(flushName, c.getSerializedSize());
}
}
kvs.clear();
}
} while (hasMore );
} catch (InterruptedException e) {
throw new InterruptedIOException("Interrupted while control throughput of flushing " + flushName);
} finally {
if (control) {
throughputController.finish(flushName);
}
}
} | 3.26 |
hbase_StoreFlusher_createScanner_rdh | /**
* Creates the scanner for flushing snapshot. Also calls coprocessors.
*
* @return The scanner; null if coprocessor is canceling the flush.
*/
protected final InternalScanner createScanner(List<KeyValueScanner> snapshotScanners, FlushLifeCycleTracker tracker) throws IOException
{
ScanInfo scanInfo;
if (store.getCoprocessorHost() != null) {
scanInfo = store.getCoprocessorHost().preFlushScannerOpen(store, tracker);
} else {
scanInfo = store.getScanInfo();
}
final long smallestReadPoint = store.getSmallestReadPoint();
InternalScanner scanner = new StoreScanner(store, scanInfo, snapshotScanners, ScanType.COMPACT_RETAIN_DELETES, smallestReadPoint, PrivateConstants.OLDEST_TIMESTAMP);
if (store.getCoprocessorHost() != null) {
try {
return store.getCoprocessorHost().preFlush(store, scanner, tracker);
} catch (IOException ioe) {
scanner.close();
throw ioe;
}
}
return scanner;
} | 3.26 |
hbase_ConcurrentMapUtils_computeIfAbsentEx_rdh | /**
* In HBASE-16648 we found that ConcurrentHashMap.get is much faster than computeIfAbsent if the
* value already exists. So here we copy the implementation of
* {@link ConcurrentMap#computeIfAbsent(Object, java.util.function.Function)}. It uses get and
* putIfAbsent to implement computeIfAbsent. And notice that the implementation does not guarantee
* that the supplier will only be executed once.
*/
public static <K, V> V computeIfAbsentEx(ConcurrentMap<K, V> map, K key, IOExceptionSupplier<V> supplier) throws IOException {
V v;
V newValue;
return (((v = map.get(key)) == null) && ((newValue = supplier.get()) !=
null)) && ((v = map.putIfAbsent(key, newValue)) == null) ? newValue : v;
} | 3.26 |
hbase_ConcurrentMapUtils_computeIfAbsent_rdh | /**
* In HBASE-16648 we found that ConcurrentHashMap.get is much faster than computeIfAbsent if the
* value already exists. Notice that the implementation does not guarantee that the supplier will
* only be executed once.
*/
public static <K, V> V computeIfAbsent(ConcurrentMap<K, V> map, K key, Supplier<V> supplier) {
return computeIfAbsent(map, key, supplier, () -> {
});
} | 3.26 |
hbase_LogRollRegionServerProcedureManager_start_rdh | /**
* Start accepting backup procedure requests.
*/
@Override
public void start() {
if (!BackupManager.isBackupEnabled(rss.getConfiguration())) {
LOG.warn(("Backup is not enabled. Check your " + BackupRestoreConstants.BACKUP_ENABLE_KEY) + " setting");
return;
}
this.memberRpcs.start(rss.getServerName().toString(), member);
started = true;
LOG.info("Started region server backup manager.");
} | 3.26 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.