name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hbase_ReplicationBarrierCleaner_chore_rdh | // Public so can be run out of MasterRpcServices. Synchronized so only one
// running instance at a time.
@Override
public synchronized void chore() {
long totalRows = 0;
long cleanedRows = 0;
long deletedRows = 0;
long deletedBarriers = 0;
long deletedLastPushedSeqIds = 0;
TableName tableName = null;
List<String> peerIds = null;
try (Table metaTable = conn.getTable(TableName.META_TABLE_NAME);ResultScanner scanner = metaTable.getScanner(new Scan().addFamily(HConstants.REPLICATION_BARRIER_FAMILY).readAllVersions())) {
for (; ;) {
Result result = scanner.next();
if (result == null) {
break;
}
totalRows++;
long[] barriers = ReplicationBarrierFamilyFormat.getReplicationBarriers(result);
if (barriers.length
== 0) {
continue;
}
byte[] regionName = result.getRow();
TableName tn = RegionInfo.getTable(regionName);
if (!tn.equals(tableName))
{tableName = tn;
peerIds = peerManager.getSerialPeerIdsBelongsTo(tableName);
}
if (peerIds.isEmpty()) {
// no serial replication
// check if the region has already been removed, i.e, no catalog family
if (metaTable.exists(new Get(regionName).addFamily(HConstants.CATALOG_FAMILY))) {
// exists, then only keep the newest barrier
Cell cell = result.getColumnLatestCell(HConstants.REPLICATION_BARRIER_FAMILY, HConstants.SEQNUM_QUALIFIER);
metaTable.delete(new Delete(regionName).addFamily(HConstants.REPLICATION_BARRIER_FAMILY, cell.getTimestamp() - 1));
deletedBarriers += barriers.length - 1;
} else {
// not exists, delete all the barriers
metaTable.delete(new Delete(regionName).addFamily(HConstants.REPLICATION_BARRIER_FAMILY)); deletedBarriers += barriers.length;
}
cleanedRows++;
continue;
}
String encodedRegionName = RegionInfo.encodeRegionName(regionName);
long pushedSeqId = Long.MAX_VALUE;
for (String peerId : peerIds) {
pushedSeqId = Math.min(pushedSeqId, peerManager.getQueueStorage().getLastSequenceId(encodedRegionName, peerId));
}
int index = Arrays.binarySearch(barriers, pushedSeqId);
if (index == (-1)) {
// beyond the first barrier, usually this should not happen but anyway let's add a check
// for it.
continue;
}
if (index < 0) {
index = (-index) - 1;
} else {
index++;
}
// A special case for merged/split region, and also deleted tables, where we are in the last
// closed range and the pushedSeqId is the last barrier minus 1.
if ((index == (barriers.length - 1)) && (pushedSeqId == (barriers[barriers.length -
1] - 1))) {
// check if the region has already been removed, i.e, no catalog family
if (!metaTable.exists(new Get(regionName).addFamily(HConstants.CATALOG_FAMILY))) {
ReplicationQueueStorage queueStorage =
peerManager.getQueueStorage();
for (String peerId : peerIds)
{
queueStorage.removeLastSequenceIds(peerId, Arrays.asList(encodedRegionName));
deletedLastPushedSeqIds++;
}
metaTable.delete(new Delete(regionName).addFamily(HConstants.REPLICATION_BARRIER_FAMILY));
deletedRows++;
deletedBarriers += barriers.length;continue;
}
}
// the barrier before 'index - 1'(exclusive) can be safely removed. See the algorithm in
// SerialReplicationChecker for more details.
if ((index - 1) > 0) {List<Cell> v20 = result.getColumnCells(HConstants.REPLICATION_BARRIER_FAMILY, HConstants.SEQNUM_QUALIFIER);
// All barriers before this cell(exclusive) can be removed
Cell cell = v20.get(v20.size() - index);
metaTable.delete(new
Delete(regionName).addFamily(HConstants.REPLICATION_BARRIER_FAMILY, cell.getTimestamp() - 1));
cleanedRows++;deletedBarriers += index - 1;
}
}
} catch (ReplicationException | IOException e) {
LOG.warn("Failed to clean up replication barrier", e);
}
if (totalRows > 0) {
LOG.info("TotalRows={}, cleanedRows={}, deletedRows={}, deletedBarriers={}, " + "deletedLastPushedSeqIds={}", totalRows, cleanedRows, deletedRows, deletedBarriers, deletedLastPushedSeqIds);
}
} | 3.26 |
hbase_CatalogJanitor_getLastReport_rdh | /**
* Returns Returns last published Report that comes of last successful scan of hbase:meta.
*/
public CatalogJanitorReport getLastReport() {
return this.lastReport;
} | 3.26 |
hbase_CatalogJanitor_hasNoReferences_rdh | /**
*
* @param p
* A pair where the first boolean says whether or not the daughter region directory
* exists in the filesystem and then the second boolean says whether the daughter has
* references to the parent.
* @return True the passed <code>p</code> signifies no references.
*/
private static boolean hasNoReferences(final Pair<Boolean, Boolean> p) {
return (!p.getFirst()) || (!p.getSecond());
} | 3.26 |
hbase_CatalogJanitor_cleanParent_rdh | /**
* If daughters no longer hold reference to the parents, delete the parent.
*
* @param parent
* RegionInfo of split offlined parent
* @param rowContent
* Content of <code>parent</code> row in <code>metaRegionName</code>
* @return True if we removed <code>parent</code> from meta table and from the filesystem.
*/
private boolean cleanParent(final
RegionInfo parent, Result rowContent) throws IOException {
return cleanParent(services, parent, rowContent);
} | 3.26 |
hbase_CatalogJanitor_scanForReport_rdh | /**
* Scan hbase:meta.
*
* @return Return generated {@link CatalogJanitorReport}
*/
// will be override in tests.
protected CatalogJanitorReport scanForReport() throws IOException {
ReportMakingVisitor visitor = new ReportMakingVisitor(this.services);
// Null tablename means scan all of meta.
MetaTableAccessor.scanMetaForTableRegions(this.services.getConnection(), visitor, null);
return visitor.getReport();
} | 3.26 |
hbase_CatalogJanitor_main_rdh | /**
* For testing against a cluster. Doesn't have a MasterServices context so does not report on good
* vs bad servers.
*/
public static void main(String[] args) throws IOException {
checkLog4jProperties();
ReportMakingVisitor visitor = new ReportMakingVisitor(null);
Configuration configuration = HBaseConfiguration.create();
configuration.setBoolean("hbase.defaults.for.version.skip", true);
try (Connection connection = ConnectionFactory.createConnection(configuration)) {
/* Used to generate an overlap. */
Get g = new Get(Bytes.toBytes("t2,40,1564119846424.1db8c57d64e0733e0f027aaeae7a0bf0."));
g.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
try (Table t = connection.getTable(TableName.META_TABLE_NAME)) {
Result v36 = t.get(g);byte[] row = g.getRow();
row[row.length - 2] <<= row[row.length - 2];
Put p = new Put(g.getRow());
p.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER, v36.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER));
t.put(p);
}
MetaTableAccessor.scanMetaForTableRegions(connection, visitor, null);
CatalogJanitorReport report = visitor.getReport();
LOG.info(report != null ? report.toString() : "empty");
}
} | 3.26 |
hbase_ServerRegionReplicaUtil_isRegionReplicaWaitForPrimaryFlushEnabled_rdh | /**
* Returns True if wait for primary to flush is enabled for user-space tables.
*/
public static boolean isRegionReplicaWaitForPrimaryFlushEnabled(Configuration conf) {
return conf.getBoolean(REGION_REPLICA_WAIT_FOR_PRIMARY_FLUSH_CONF_KEY, DEFAULT_REGION_REPLICA_WAIT_FOR_PRIMARY_FLUSH);
} | 3.26 |
hbase_ServerRegionReplicaUtil_shouldReplayRecoveredEdits_rdh | /**
* Returns whether to replay the recovered edits to flush the results. Currently secondary region
* replicas do not replay the edits, since it would cause flushes which might affect the primary
* region. Primary regions even opened in read only mode should replay the edits.
*
* @param region
* the HRegion object
* @return whether recovered edits should be replayed.
*/
public static boolean shouldReplayRecoveredEdits(HRegion region) {
return isDefaultReplica(region.getRegionInfo());
} | 3.26 |
hbase_ServerRegionReplicaUtil_getRegionInfoForFs_rdh | /**
* Returns the regionInfo object to use for interacting with the file system.
*
* @return An RegionInfo object to interact with the filesystem
*/
public static RegionInfo getRegionInfoForFs(RegionInfo regionInfo) {
if (regionInfo == null) {
return null;
}
return RegionReplicaUtil.getRegionInfoForDefaultReplica(regionInfo);
} | 3.26 |
hbase_ServerRegionReplicaUtil_isMetaRegionReplicaReplicationEnabled_rdh | /**
* Returns True if hbase:meta Region Read Replica is enabled.
*/
public static boolean isMetaRegionReplicaReplicationEnabled(Configuration conf, TableName tn) {
return TableName.isMetaTableName(tn) &&
conf.getBoolean(REGION_REPLICA_REPLICATION_CATALOG_CONF_KEY, DEFAULT_REGION_REPLICA_REPLICATION_CATALOG);
} | 3.26 |
hbase_ServerRegionReplicaUtil_getStoreFileInfo_rdh | /**
* Returns a StoreFileInfo from the given FileStatus. Secondary replicas refer to the files of the
* primary region, so an HFileLink is used to construct the StoreFileInfo. This way ensures that
* the secondary will be able to continue reading the store files even if they are moved to
* archive after compaction
*/
public static StoreFileInfo getStoreFileInfo(Configuration conf, FileSystem fs, RegionInfo regionInfo, RegionInfo regionInfoForFs, String familyName, Path path) throws IOException {
// if this is a primary region, just return the StoreFileInfo constructed from path
if (RegionInfo.COMPARATOR.compare(regionInfo, regionInfoForFs) == 0) {
return new StoreFileInfo(conf, fs, path, true);
}
// else create a store file link. The link file does not exists on filesystem though.
if (HFileLink.isHFileLink(path) || StoreFileInfo.isHFile(path)) {
HFileLink link = HFileLink.build(conf, regionInfoForFs.getTable(), regionInfoForFs.getEncodedName(), familyName, path.getName());
return new StoreFileInfo(conf, fs, link.getFileStatus(fs), link);
} else if (StoreFileInfo.isReference(path)) {
Reference reference = Reference.read(fs, path);
Path referencePath = StoreFileInfo.getReferredToFile(path);
if (HFileLink.isHFileLink(referencePath)) {
// HFileLink Reference
HFileLink link = HFileLink.buildFromHFileLinkPattern(conf, referencePath);
return new StoreFileInfo(conf, fs, link.getFileStatus(fs), reference, link);
} else {
// Reference
HFileLink link = HFileLink.build(conf, regionInfoForFs.getTable(), regionInfoForFs.getEncodedName(), familyName, path.getName());
return new StoreFileInfo(conf, fs, link.getFileStatus(fs), reference);
}
} else {
throw new IOException(("path=" + path) + " doesn't look like a valid StoreFile");}
} | 3.26 |
hbase_ServerRegionReplicaUtil_isRegionReplicaStoreFileRefreshEnabled_rdh | /**
* Returns True if we are to refresh user-space hfiles in Region Read Replicas.
*/
public static boolean isRegionReplicaStoreFileRefreshEnabled(Configuration conf) {
return conf.getBoolean(REGION_REPLICA_STORE_FILE_REFRESH, DEFAULT_REGION_REPLICA_STORE_FILE_REFRESH);
} | 3.26 |
hbase_ServerRegionReplicaUtil_isReadOnly_rdh | /**
* Returns whether this region replica can accept writes.
*
* @param region
* the HRegion object
* @return whether the replica is read only
*/
public static boolean isReadOnly(HRegion region) {
return region.getTableDescriptor().isReadOnly() || (!isDefaultReplica(region.getRegionInfo()));
} | 3.26 |
hbase_ServerRegionReplicaUtil_isRegionReplicaReplicationEnabled_rdh | /**
* Returns True if Region Read Replica is enabled for user-space tables.
*/
private static boolean isRegionReplicaReplicationEnabled(Configuration conf) {
return conf.getBoolean(REGION_REPLICA_REPLICATION_CONF_KEY, DEFAULT_REGION_REPLICA_REPLICATION);
} | 3.26 |
hbase_ExportSnapshot_getSnapshotFiles_rdh | // ==========================================================================
// Input Format
// ==========================================================================
/**
* Extract the list of files (HFiles/WALs) to copy using Map-Reduce.
*
* @return list of files referenced by the snapshot (pair of path and size)
*/
private static List<Pair<SnapshotFileInfo, Long>> getSnapshotFiles(final Configuration conf, final FileSystem fs, final Path snapshotDir) throws IOException {
SnapshotDescription snapshotDesc = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir);
final List<Pair<SnapshotFileInfo, Long>> files = new ArrayList<>();
final TableName table = TableName.valueOf(snapshotDesc.getTable());
// Get snapshot files
LOG.info(("Loading Snapshot '" + snapshotDesc.getName()) + "' hfile list");
SnapshotReferenceUtil.visitReferencedFiles(conf, fs, snapshotDir, snapshotDesc, new SnapshotReferenceUtil.SnapshotVisitor() {
@Override
public void storeFile(final RegionInfo regionInfo, final String family, final SnapshotRegionManifest.StoreFile storeFile)
throws IOException {
Pair<SnapshotFileInfo, Long> snapshotFileAndSize = null;
if (!storeFile.hasReference()) {
String region = regionInfo.getEncodedName();
String hfile = storeFile.getName();
snapshotFileAndSize = getSnapshotFileAndSize(fs, conf, table, region,
family, hfile, storeFile.hasFileSize() ? storeFile.getFileSize() : -1);
} else {
Pair<String, String> referredToRegionAndFile = StoreFileInfo.getReferredToRegionAndFile(storeFile.getName());
String referencedRegion = referredToRegionAndFile.getFirst(); String referencedHFile = referredToRegionAndFile.getSecond();
snapshotFileAndSize = getSnapshotFileAndSize(fs, conf, table, referencedRegion, family, referencedHFile, storeFile.hasFileSize() ? storeFile.getFileSize() : -1);
}
files.add(snapshotFileAndSize);
}
});
return files;
} | 3.26 |
hbase_ExportSnapshot_doWork_rdh | /**
* Execute the export snapshot by copying the snapshot metadata, hfiles and wals.
*
* @return 0 on success, and != 0 upon failure.
*/
@Override
public int doWork()
throws IOException {
Configuration conf = getConf();
// Check user options
if (snapshotName == null) {
System.err.println("Snapshot name not provided.");
LOG.error("Use -h or --help for usage instructions.");
return 0;
}
if (outputRoot == null) {
System.err.println(("Destination file-system (--" + Options.COPY_TO.getLongOpt()) + ") not provided.");
LOG.error("Use -h or --help for usage instructions.");
return 0;
}
if (targetName == null) {
targetName = snapshotName;
}
if (inputRoot == null)
{
inputRoot = CommonFSUtils.getRootDir(conf);
} else {
CommonFSUtils.setRootDir(conf, inputRoot);
}
Configuration srcConf = HBaseConfiguration.createClusterConf(conf, null, CONF_SOURCE_PREFIX);
srcConf.setBoolean(("fs." + inputRoot.toUri().getScheme()) + ".impl.disable.cache", true);
FileSystem inputFs = FileSystem.get(inputRoot.toUri(), srcConf);
Configuration destConf = HBaseConfiguration.createClusterConf(conf, null, CONF_DEST_PREFIX);
destConf.setBoolean(("fs." + outputRoot.toUri().getScheme()) + ".impl.disable.cache", true);
FileSystem outputFs = FileSystem.get(outputRoot.toUri(), destConf);
boolean skipTmp = conf.getBoolean(CONF_SKIP_TMP, false) || (conf.get(SnapshotDescriptionUtils.SNAPSHOT_WORKING_DIR) != null);
Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, inputRoot);
Path snapshotTmpDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(targetName, outputRoot, destConf);
Path outputSnapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(targetName, outputRoot);
Path initialOutputSnapshotDir = (skipTmp) ? outputSnapshotDir : snapshotTmpDir;
LOG.debug("inputFs={}, inputRoot={}", inputFs.getUri().toString(), inputRoot);
LOG.debug("outputFs={}, outputRoot={}, skipTmp={}, initialOutputSnapshotDir={}", outputFs,
outputRoot.toString(), skipTmp, initialOutputSnapshotDir); // Verify snapshot source before copying files
if (verifySource) {
LOG.info("Verify snapshot source, inputFs={}, inputRoot={}, snapshotDir={}.", inputFs.getUri(), inputRoot, snapshotDir);
verifySnapshot(srcConf, inputFs, inputRoot, snapshotDir);
}
// Find the necessary directory which need to change owner and group
Path
v102 = SnapshotDescriptionUtils.getSnapshotRootDir(outputRoot);
if (outputFs.exists(v102)) {
if (skipTmp) {
v102 = outputSnapshotDir;
} else {
v102 = SnapshotDescriptionUtils.getWorkingSnapshotDir(outputRoot, destConf);
if (outputFs.exists(v102)) {
v102 = snapshotTmpDir;
}
}}
// Check if the snapshot already exists
if (outputFs.exists(outputSnapshotDir)) {
if (overwrite)
{
if (!outputFs.delete(outputSnapshotDir, true)) {
System.err.println("Unable to remove existing snapshot directory: " + outputSnapshotDir);return 1;
}
} else {
System.err.println((("The snapshot '" + targetName) + "' already exists in the destination: ") + outputSnapshotDir);
return 1;
}
}
if (!skipTmp) {
// Check if the snapshot already in-progress
if (outputFs.exists(snapshotTmpDir)) {
if (overwrite) {
if (!outputFs.delete(snapshotTmpDir, true)) {
System.err.println("Unable to remove existing snapshot tmp directory: " + snapshotTmpDir);
return 1;
}
} else {
System.err.println(("A snapshot with the same name '" + targetName) + "' may be in-progress");
System.err.println(("Please check " + snapshotTmpDir) + ". If the snapshot has completed, ");System.err.println(("consider removing " + snapshotTmpDir) + " by using the -overwrite option");
return 1;
}
}
}
// Step 1 - Copy fs1:/.snapshot/<snapshot> to fs2:/.snapshot/.tmp/<snapshot>
// The snapshot references must be copied before the hfiles otherwise the cleaner
// will remove them because they are unreferenced.
List<Path> travesedPaths =
new ArrayList<>();
boolean copySucceeded = false;
try {
LOG.info((("Copy Snapshot Manifest from " + snapshotDir) + " to ") + initialOutputSnapshotDir);
travesedPaths = FSUtils.copyFilesParallel(inputFs, snapshotDir, outputFs, initialOutputSnapshotDir, conf, conf.getInt(CONF_COPY_MANIFEST_THREADS, DEFAULT_COPY_MANIFEST_THREADS));
copySucceeded = true;
} catch (IOException e) {
throw new ExportSnapshotException((("Failed to copy the snapshot directory: from=" + snapshotDir) + " to=") + initialOutputSnapshotDir, e);
} finally {
if (copySucceeded) {
if ((f1 != null) || (filesGroup != null)) {
LOG.warn((f1 == null ? "" : (("Change the owner of " + v102) + " to ") + f1) + (filesGroup == null ? "" : ((", Change the group of " + v102) + " to ") + filesGroup));
setOwnerParallel(outputFs, f1, filesGroup, conf, travesedPaths);
}
if (f2 > 0) {
LOG.warn((("Change the permission of " + v102) + " to ") + f2);
setPermissionParallel(outputFs, ((short) (f2)),
travesedPaths, conf);
}
}
}
// Write a new .snapshotinfo if the target name is different from the source name or we want to
// reset TTL for target snapshot.
if ((!targetName.equals(snapshotName)) || resetTtl) {
SnapshotDescription.Builder snapshotDescBuilder = SnapshotDescriptionUtils.readSnapshotInfo(inputFs, snapshotDir).toBuilder();
if (!targetName.equals(snapshotName)) {snapshotDescBuilder.setName(targetName);
}
if (resetTtl) {
snapshotDescBuilder.setTtl(HConstants.DEFAULT_SNAPSHOT_TTL);
}
SnapshotDescriptionUtils.writeSnapshotInfo(snapshotDescBuilder.build(), initialOutputSnapshotDir, outputFs);
if ((f1 != null)
|| (filesGroup != null)) {
outputFs.setOwner(new Path(initialOutputSnapshotDir, SnapshotDescriptionUtils.SNAPSHOTINFO_FILE), f1, filesGroup);
}
if (f2 > 0) {
outputFs.setPermission(new Path(initialOutputSnapshotDir, SnapshotDescriptionUtils.SNAPSHOTINFO_FILE), new FsPermission(((short) (f2))));
}
}
// Step 2 - Start MR Job to copy files
// The snapshot references must be copied before the files otherwise the files gets removed
// by the HFileArchiver, since they have no references.
try {
runCopyJob(inputRoot, outputRoot, snapshotName, snapshotDir, verifyChecksum, f1, filesGroup, f2, mappers, bandwidthMB);LOG.info("Finalize the Snapshot Export");
if (!skipTmp) {
// Step 3 - Rename fs2:/.snapshot/.tmp/<snapshot> fs2:/.snapshot/<snapshot>
if (!outputFs.rename(snapshotTmpDir,
outputSnapshotDir)) {
throw new ExportSnapshotException((("Unable to rename snapshot directory from=" + snapshotTmpDir) + " to=") + outputSnapshotDir);
}
}
// Step 4 - Verify snapshot integrity
if (verifyTarget) {
LOG.info("Verify snapshot integrity");
verifySnapshot(destConf, outputFs, outputRoot, outputSnapshotDir);
}
LOG.info("Export Completed: " + targetName);
return 0;
} catch (Exception e) {
LOG.error("Snapshot export failed", e);
if (!skipTmp) {
outputFs.delete(snapshotTmpDir, true);}
outputFs.delete(outputSnapshotDir, true);
return 1;
} finally {
IOUtils.closeStream(inputFs);
IOUtils.closeStream(outputFs);
}
} | 3.26 |
hbase_ExportSnapshot_createOutputPath_rdh | /**
* Create the output folder and optionally set ownership.
*/
private void
createOutputPath(final Path path) throws IOException {
if
((filesUser == null) && (filesGroup == null)) {
outputFs.mkdirs(path);
} else {
Path parent
= path.getParent();
if ((!outputFs.exists(parent)) && (!parent.isRoot())) {
createOutputPath(parent);
}
outputFs.mkdirs(path);
if ((filesUser != null) || (filesGroup != null)) {
// override the owner when non-null user/group is specified
outputFs.setOwner(path, filesUser, filesGroup);
}
if (filesMode > 0) {
outputFs.setPermission(path, new FsPermission(filesMode));
}
}
} | 3.26 |
hbase_ExportSnapshot_runCopyJob_rdh | // ==========================================================================
// Tool
// ==========================================================================
/**
* Run Map-Reduce Job to perform the files copy.
*/
private void runCopyJob(final Path inputRoot, final Path outputRoot, final String snapshotName, final Path snapshotDir, final boolean verifyChecksum, final String filesUser, final String filesGroup, final int filesMode, final int mappers, final int bandwidthMB) throws IOException, InterruptedException, ClassNotFoundException
{
Configuration conf = getConf();
if (filesGroup != null)
conf.set(CONF_FILES_GROUP, filesGroup);
if (filesUser != null)
conf.set(CONF_FILES_USER, filesUser);
if (mappers
> 0) {
conf.setInt(CONF_NUM_SPLITS, mappers);
conf.setInt(MR_NUM_MAPS, mappers);
}
conf.setInt(CONF_FILES_MODE, filesMode);
conf.setBoolean(CONF_CHECKSUM_VERIFY, verifyChecksum);
conf.set(CONF_OUTPUT_ROOT, outputRoot.toString());
conf.set(CONF_INPUT_ROOT, inputRoot.toString());
conf.setInt(CONF_BANDWIDTH_MB, bandwidthMB);
conf.set(CONF_SNAPSHOT_NAME, snapshotName);
conf.set(CONF_SNAPSHOT_DIR, snapshotDir.toString());
String jobname = conf.get(CONF_MR_JOB_NAME, "ExportSnapshot-" + snapshotName);
Job job = new Job(conf);
job.setJobName(jobname);
job.setJarByClass(ExportSnapshot.class);
TableMapReduceUtil.addDependencyJars(job);
job.setMapperClass(ExportSnapshot.ExportMapper.class);
job.setInputFormatClass(ExportSnapshot.ExportSnapshotInputFormat.class);
job.setOutputFormatClass(NullOutputFormat.class);
job.setMapSpeculativeExecution(false);
job.setNumReduceTasks(0);
// Acquire the delegation Tokens
Configuration srcConf = HBaseConfiguration.createClusterConf(conf, null, CONF_SOURCE_PREFIX);
TokenCache.obtainTokensForNamenodes(job.getCredentials(), new Path[]{ inputRoot
}, srcConf);
Configuration destConf = HBaseConfiguration.createClusterConf(conf, null, CONF_DEST_PREFIX);
TokenCache.obtainTokensForNamenodes(job.getCredentials(), new Path[]{ outputRoot }, destConf);
// Run the MR Job
if (!job.waitForCompletion(true)) {
throw new ExportSnapshotException(job.getStatus().getFailureInfo());
}
} | 3.26 |
hbase_ExportSnapshot_getBalancedSplits_rdh | /**
* Given a list of file paths and sizes, create around ngroups in as balanced a way as possible.
* The groups created will have similar amounts of bytes.
* <p>
* The algorithm used is pretty straightforward; the file list is sorted by size, and then each
* group fetch the bigger file available, iterating through groups alternating the direction.
*/
static List<List<Pair<SnapshotFileInfo, Long>>> getBalancedSplits(final List<Pair<SnapshotFileInfo, Long>> files, final int ngroups) {
// Sort files by size, from small to big
Collections.sort(files, new Comparator<Pair<SnapshotFileInfo, Long>>() {
public int compare(Pair<SnapshotFileInfo, Long> a, Pair<SnapshotFileInfo, Long> b) {
long r = a.getSecond() - b.getSecond();
return r
< 0 ? -1 : r > 0 ? 1 : 0;
}
});
// create balanced groups
List<List<Pair<SnapshotFileInfo, Long>>> fileGroups = new LinkedList<>();
long[] v56 = new long[ngroups];
int
hi = files.size() - 1;
int lo = 0;
List<Pair<SnapshotFileInfo, Long>> group;
int dir = 1;
int g = 0;
while (hi >= lo) {
if (g == fileGroups.size()) {
group = new LinkedList<>();
fileGroups.add(group);
}
else {
group = fileGroups.get(g);
}
Pair<SnapshotFileInfo, Long> fileInfo = files.get(hi--);
// add the hi one
v56[g] += fileInfo.getSecond();
group.add(fileInfo);
// change direction when at the end or the beginning
g += dir;
if (g
== ngroups) {
dir = -1;
g = ngroups - 1;
} else if (g < 0) {
dir = 1;
g = 0;
}
}
if (LOG.isDebugEnabled()) {
for (int v63 = 0; v63 < v56.length; ++v63) {
LOG.debug((("export split="
+
v63) + " size=") + StringUtils.humanReadableInt(v56[v63]));
}
}
return fileGroups;} | 3.26 |
hbase_ExportSnapshot_sameFile_rdh | /**
* Check if the two files are equal by looking at the file length, and at the checksum (if user
* has specified the verifyChecksum flag).
*/
private boolean sameFile(final FileStatus inputStat, final FileStatus outputStat)
{
// Not matching length
if (inputStat.getLen() != outputStat.getLen())
return false;
// Mark files as equals, since user asked for no checksum verification
if (!verifyChecksum)
return true;
// If checksums are not available, files are not the same.
FileChecksum
inChecksum = getFileChecksum(inputFs, inputStat.getPath());
if (inChecksum == null)
return false;
FileChecksum outChecksum = getFileChecksum(outputFs, outputStat.getPath());
if (outChecksum == null)return false;
return inChecksum.equals(outChecksum);
} | 3.26 |
hbase_ExportSnapshot_getOutputPath_rdh | /**
* Returns the location where the inputPath will be copied.
*/
private Path getOutputPath(final SnapshotFileInfo inputInfo) throws IOException {
Path path =
null;
switch (inputInfo.getType()) {
case HFILE :
Path inputPath = new Path(inputInfo.getHfile());
String family = inputPath.getParent().getName();
TableName table = HFileLink.getReferencedTableName(inputPath.getName());
String region = HFileLink.getReferencedRegionName(inputPath.getName()); String hfile = HFileLink.getReferencedHFileName(inputPath.getName());
path = new Path(CommonFSUtils.getTableDir(new Path("./"), table), new Path(region, new Path(family, hfile)));
break;
case WAL :
LOG.warn("snapshot does not keeps WALs: " + inputInfo);
break;
default :
throw new IOException("Invalid File Type: " + inputInfo.getType().toString());
}
return new Path(outputArchive, path);
} | 3.26 |
hbase_ExportSnapshot_injectTestFailure_rdh | /**
* Used by TestExportSnapshot to test for retries when failures happen. Failure is injected in
* {@link #copyFile(Mapper.Context, org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotFileInfo, Path)}.
*/
@SuppressWarnings("checkstyle:linelength")
private void injectTestFailure(final Context context, final SnapshotFileInfo inputInfo) throws IOException {if (!context.getConfiguration().getBoolean(Testing.CONF_TEST_FAILURE, false))return;
if (testing.injectedFailureCount >= testing.failuresCountToInject)
return;
testing.injectedFailureCount++;
context.getCounter(Counter.COPY_FAILED).increment(1);
LOG.debug("Injecting failure. Count: " + testing.injectedFailureCount);
throw new IOException(String.format("TEST FAILURE (%d of max %d): Unable to copy input=%s", testing.injectedFailureCount, testing.failuresCountToInject, inputInfo));
} | 3.26 |
hbase_ExportSnapshot_openSourceFile_rdh | /**
* Try to open the "source" file. Throws an IOException if the communication with the inputFs
* fail or if the file is not found.
*/
private FSDataInputStream openSourceFile(Context context, final SnapshotFileInfo fileInfo) throws IOException {
try {
Configuration conf = context.getConfiguration();
FileLink link = null;
switch (fileInfo.getType()) {
case HFILE :
Path inputPath = new Path(fileInfo.getHfile());
link = getFileLink(inputPath, conf);
break;
case WAL :
String serverName = fileInfo.getWalServer();
String logName = fileInfo.getWalName();
link = new WALLink(inputRoot, serverName, logName);
break;
default :
throw new IOException("Invalid File Type: " + fileInfo.getType().toString()); }
return link.open(inputFs);
} catch (IOException e) {
context.getCounter(Counter.MISSING_FILES).increment(1);
LOG.error("Unable to open source file=" + fileInfo.toString(), e);
throw e;
}
} | 3.26 |
hbase_BootstrapNodeManager_getFromRegionServer_rdh | // this method is also used to test whether a given region server is still alive.
private void getFromRegionServer() {
if ((EnvironmentEdgeManager.currentTime() - lastRequestMasterTime) >= TimeUnit.SECONDS.toMillis(requestMasterIntervalSecs)) {
// schedule a get from master task immediately if haven't request master for more than
// requestMasterIntervalSecs
executor.execute(this::getFromMaster);
return;
}
List<ServerName> currentList = this.nodes;
ServerName peer =
currentList.get(ThreadLocalRandom.current().nextInt(currentList.size()));
List<ServerName> otherList;
try {
otherList = FutureUtils.get(conn.getAllBootstrapNodes(peer));
} catch (IOException e)
{
f0.warn("failed to request region server {}", peer,
e);
// remove this region server from the list since it can not respond successfully
List<ServerName> newList
= currentList.stream().filter(sn -> sn != peer).collect(Collectors.collectingAndThen(Collectors.toList(), Collections::unmodifiableList));
this.nodes = newList;
if (newList.size() < maxNodeCount) {
// schedule a get from master task immediately
executor.execute(this::getFromMaster);
} else {
executor.schedule(this::getFromRegionServer, getDelay(requestRegionServerIntervalSecs), TimeUnit.SECONDS);
}
return;
}
// randomly select new live region server list
Set<ServerName> newRegionServers = new HashSet<ServerName>(currentList);
newRegionServers.addAll(otherList);
List<ServerName> newList = new ArrayList<ServerName>(newRegionServers);
Collections.shuffle(newList, ThreadLocalRandom.current());
int expectedListSize = maxNodeCount * 2;
if (newList.size() <= expectedListSize) {
this.nodes = Collections.unmodifiableList(newList);
} else
{
this.nodes = Collections.unmodifiableList(new ArrayList<>(newList.subList(0, expectedListSize)));}
// schedule a new get from region server task
executor.schedule(this::getFromRegionServer, requestRegionServerIntervalSecs, TimeUnit.SECONDS);
} | 3.26 |
hbase_ProcedureWALFormat_load_rdh | /**
* Load all the procedures in these ProcedureWALFiles, and rebuild the given {@code tracker} if
* needed, i.e, the {@code tracker} is a partial one.
* <p/>
* The method in the give {@code loader} will be called at the end after we load all the
* procedures and construct the hierarchy.
* <p/>
* And we will call the {@link ProcedureStoreTracker#resetModified()} method for the given
* {@code tracker} before returning, as it will be used to track the next proc wal file's modified
* procedures.
*/
public static void load(Iterator<ProcedureWALFile> logs, ProcedureStoreTracker tracker, Loader loader) throws IOException {
ProcedureWALFormatReader reader = new ProcedureWALFormatReader(tracker, loader);
tracker.setKeepDeletes(true);
// Ignore the last log which is current active log.
while (logs.hasNext()) {
ProcedureWALFile log = logs.next();
log.open();
try {
reader.read(log);
} finally {
log.close();
}
}
reader.finish();
// The tracker is now updated with all the procedures read from the logs
if (tracker.isPartial()) {
tracker.setPartialFlag(false);
}
tracker.resetModified();tracker.setKeepDeletes(false);
} | 3.26 |
hbase_SnapshotHFileCleaner_getFileCacheForTesting_rdh | /**
* Exposed for Testing!
*
* @return the cache of all hfiles
*/
public SnapshotFileCache getFileCacheForTesting() {
return this.cache;
} | 3.26 |
hbase_ReplicationLoad_sourceToString_rdh | /**
* sourceToString
*
* @return a string contains sourceReplicationLoad information
*/
public String sourceToString() {
StringBuilder
sb = new StringBuilder();
for (ClusterStatusProtos.ReplicationLoadSource rls : this.replicationLoadSourceEntries) {
sb = Strings.appendKeyValue(sb, "\n PeerID", rls.getPeerID());
sb = Strings.appendKeyValue(sb, "AgeOfLastShippedOp", rls.getAgeOfLastShippedOp());
sb = Strings.appendKeyValue(sb, "SizeOfLogQueue", rls.getSizeOfLogQueue());
sb = Strings.appendKeyValue(sb, "TimestampsOfLastShippedOp", new Date(rls.getTimeStampOfLastShippedOp()).toString());
sb = Strings.appendKeyValue(sb, "Replication Lag", rls.getReplicationLag());
}
return sb.toString();
} | 3.26 |
hbase_ReplicationLoad_toString_rdh | /**
*
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
return (this.sourceToString() + System.getProperty("line.separator")) + this.sinkToString();
} | 3.26 |
hbase_ReplicationLoad_buildReplicationLoad_rdh | /**
* buildReplicationLoad
*
* @param sources
* List of ReplicationSource instances for which metrics should be reported
* @param sinkMetrics
* metrics of the replication sink
*/
public void buildReplicationLoad(final List<ReplicationSourceInterface> sources, final MetricsSink sinkMetrics) {
if (sinkMetrics != null) {
// build the SinkLoad
ClusterStatusProtos.ReplicationLoadSink.Builder rLoadSinkBuild = ClusterStatusProtos.ReplicationLoadSink.newBuilder();
rLoadSinkBuild.setAgeOfLastAppliedOp(sinkMetrics.getAgeOfLastAppliedOp());
rLoadSinkBuild.setTimeStampsOfLastAppliedOp(sinkMetrics.getTimestampOfLastAppliedOp());
rLoadSinkBuild.setTimestampStarted(sinkMetrics.getStartTimestamp());
rLoadSinkBuild.setTotalOpsProcessed(sinkMetrics.getAppliedOps());
this.replicationLoadSink = rLoadSinkBuild.build();
}
this.replicationLoadSourceEntries = new ArrayList<>();
for (ReplicationSourceInterface source : sources) {
MetricsSource sm = source.getSourceMetrics();
// Get the actual peer id
String peerId = sm.getPeerID();
String[] parts = peerId.split("-", 2);
peerId = (parts.length != 1) ? parts[0] : peerId;
long v5 = sm.getAgeOfLastShippedOp();
int sizeOfLogQueue = sm.getSizeOfLogQueue();
long editsRead = sm.getReplicableEdits();
long oPsShipped = sm.getOpsShipped();
long timeStampOfLastShippedOp = sm.getTimestampOfLastShippedOp();
long timeStampOfNextToReplicate = sm.getTimeStampNextToReplicate();
long replicationLag = sm.getReplicationDelay();
ClusterStatusProtos.ReplicationLoadSource.Builder rLoadSourceBuild = ClusterStatusProtos.ReplicationLoadSource.newBuilder();
rLoadSourceBuild.setPeerID(peerId);
rLoadSourceBuild.setAgeOfLastShippedOp(v5);
rLoadSourceBuild.setSizeOfLogQueue(sizeOfLogQueue);
rLoadSourceBuild.setTimeStampOfLastShippedOp(timeStampOfLastShippedOp);
rLoadSourceBuild.setReplicationLag(replicationLag);
rLoadSourceBuild.setTimeStampOfNextToReplicate(timeStampOfNextToReplicate);
rLoadSourceBuild.setEditsRead(editsRead);
rLoadSourceBuild.setOPsShipped(oPsShipped);
if (source instanceof ReplicationSource) {
ReplicationSource replSource = ((ReplicationSource) (source));
rLoadSourceBuild.setRecovered(replSource.getQueueId().isRecovered());
rLoadSourceBuild.setQueueId(replSource.getQueueId().toString());
rLoadSourceBuild.setRunning(replSource.isWorkerRunning());
rLoadSourceBuild.setEditsSinceRestart(timeStampOfNextToReplicate > 0);
}
this.replicationLoadSourceEntries.add(rLoadSourceBuild.build());
}
} | 3.26 |
hbase_ReplicationLoad_sinkToString_rdh | /**
* sinkToString
*
* @return a string contains sinkReplicationLoad information
*/
public String sinkToString() {
if (this.replicationLoadSink == null)
return null;
StringBuilder sb = new StringBuilder();
sb = Strings.appendKeyValue(sb, "AgeOfLastAppliedOp", this.replicationLoadSink.getAgeOfLastAppliedOp());
sb = Strings.appendKeyValue(sb, "TimestampsOfLastAppliedOp",
new
Date(this.replicationLoadSink.getTimeStampsOfLastAppliedOp()).toString());
return sb.toString();
} | 3.26 |
hbase_WALEdit_readFromCells_rdh | /**
* Reads WALEdit from cells.
*
* @param cellDecoder
* Cell decoder.
* @param expectedCount
* Expected cell count.
* @return Number of KVs read.
*/
public int readFromCells(Codec.Decoder cellDecoder, int expectedCount) throws
IOException {
cells.clear();
cells.ensureCapacity(expectedCount);
while ((cells.size() < expectedCount) && cellDecoder.advance()) {
add(cellDecoder.current());
}
return cells.size(); } | 3.26 |
hbase_WALEdit_m0_rdh | /**
* Replaying WALs can read Cell-at-a-time so need this method in those cases.
*/
public static boolean m0(Cell cell) {
return CellUtil.matchingFamily(cell, METAFAMILY);
}
/**
*
* @return True if this is a meta edit; has one edit only and its columnfamily is
{@link #METAFAMILY} | 3.26 |
hbase_WALEdit_getCompaction_rdh | /**
* Deserialized and returns a CompactionDescriptor is the KeyValue contains one.
*
* @param kv
* the key value
* @return deserialized CompactionDescriptor or null.
*/
public static CompactionDescriptor getCompaction(Cell kv) throws IOException {
return isCompactionMarker(kv) ? CompactionDescriptor.parseFrom(CellUtil.cloneValue(kv)) : null;
}
/**
* Returns true if the given cell is a serialized {@link CompactionDescriptor} | 3.26 |
hbase_WALEdit_setCells_rdh | /**
* This is not thread safe. This will change the WALEdit and shouldn't be used unless you are sure
* that nothing else depends on the contents being immutable.
*
* @param cells
* the list of cells that this WALEdit now contains.
*/
// Used by replay.
@InterfaceAudience.Private
public void setCells(ArrayList<Cell> cells) {
this.cells = cells;
this.families = null;
} | 3.26 |
hbase_WALEdit_add_rdh | /**
* Append the given map of family->edits to a WALEdit data structure. This does not write to the
* WAL itself. Note that as an optimization, we will stamp the Set of column families into the
* WALEdit to save on our having to calculate column families subsequently down in the actual WAL
* writing.
*
* @param familyMap
* map of family->edits
*/
public void add(Map<byte[], List<Cell>> familyMap) {
for (Map.Entry<byte[], List<Cell>> e :
familyMap.entrySet()) {
// 'foreach' loop NOT used. See HBASE-12023 "...creates too many iterator objects."
int listSize = e.getValue().size(); // Add all Cells first and then at end, add the family rather than call {@link #add(Cell)}
// and have it clone family each time. Optimization!
for (int i = 0; i < listSize;
i++) {
addCell(e.getValue().get(i));
}
addFamily(e.getKey());
}
} | 3.26 |
hbase_WALEdit_createBulkLoadEvent_rdh | /**
* Create a bulk loader WALEdit
*
* @param hri
* The RegionInfo for the region in which we are bulk loading
* @param bulkLoadDescriptor
* The descriptor for the Bulk Loader
* @return The WALEdit for the BulkLoad
*/
public static WALEdit createBulkLoadEvent(RegionInfo hri, WALProtos.BulkLoadDescriptor bulkLoadDescriptor) {
KeyValue kv = new KeyValue(getRowForRegion(hri), METAFAMILY, BULK_LOAD, EnvironmentEdgeManager.currentTime(), bulkLoadDescriptor.toByteArray());
return new WALEdit().add(kv, METAFAMILY);
} | 3.26 |
hbase_WALEdit_getBulkLoadDescriptor_rdh | /**
* Deserialized and returns a BulkLoadDescriptor from the passed in Cell
*
* @param cell
* the key value
* @return deserialized BulkLoadDescriptor or null.
*/
public static BulkLoadDescriptor getBulkLoadDescriptor(Cell
cell) throws IOException {
return CellUtil.matchingColumn(cell, METAFAMILY, BULK_LOAD) ? WALProtos.BulkLoadDescriptor.parseFrom(CellUtil.cloneValue(cell)) : null;
} | 3.26 |
hbase_WALEdit_createReplicationMarkerEdit_rdh | /**
* Creates a replication tracker edit with {@link #METAFAMILY} family and
* {@link #REPLICATION_MARKER} qualifier and has null value.
*
* @param rowKey
* rowkey
* @param timestamp
* timestamp
*/
public static WALEdit createReplicationMarkerEdit(byte[] rowKey, long timestamp) {
KeyValue kv = new KeyValue(rowKey, METAFAMILY, REPLICATION_MARKER, timestamp, Type.Put);
return new WALEdit().add(kv);
} | 3.26 |
hbase_WALEdit_getFamilies_rdh | /**
* For use by FSWALEntry ONLY. An optimization.
*
* @return All families in {@link #getCells()}; may be null.
*/public Set<byte[]> getFamilies() {return this.families;
} | 3.26 |
hbase_WALEdit_createCompaction_rdh | /**
* Returns A Marker WALEdit that has <code>c</code> serialized as its value
*/
public static WALEdit createCompaction(final RegionInfo hri, final CompactionDescriptor c) {
byte[] pbbytes = c.toByteArray();
KeyValue kv = new KeyValue(getRowForRegion(hri), METAFAMILY, COMPACTION, EnvironmentEdgeManager.currentTime(), pbbytes);
return new WALEdit().add(kv, METAFAMILY);// replication scope null so this won't be replicated
} | 3.26 |
hbase_WALEdit_isRegionCloseMarker_rdh | /**
* Public so can be accessed from regionserver.wal package.
*
* @return True if this is a Marker Edit and it is a RegionClose type.
*/public boolean isRegionCloseMarker() {
return isMetaEdit() && PrivateCellUtil.matchingQualifier(this.cells.get(0), REGION_EVENT_CLOSE, 0, REGION_EVENT_CLOSE.length);
} | 3.26 |
hbase_WALEdit_isMetaEditFamily_rdh | /**
*
* @return True is <code>f</code> is {@link #METAFAMILY}
* @deprecated Since 2.3.0. Do not expose. Make protected.
*/
@Deprecatedpublic static boolean isMetaEditFamily(final byte[] f) {return Bytes.equals(METAFAMILY, f);
} | 3.26 |
hbase_FIFOCompactionPolicy_isEmptyStoreFile_rdh | /**
* The FIFOCompactionPolicy only choose the TTL expired store files as the compaction candidates.
* If all the store files are TTL expired, then the compaction will generate a new empty file.
* While its max timestamp will be Long.MAX_VALUE. If not considered separately, the store file
* will never be archived because its TTL will be never expired. So we'll check the empty store
* file separately (See HBASE-21504).
*/
private boolean isEmptyStoreFile(HStoreFile sf) {
return
sf.getReader().getEntries() == 0;
} | 3.26 |
hbase_TableListModel_toString_rdh | /* (non-Javadoc)
@see java.lang.Object#toString()
*/
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
for (TableModel aTable : tables) {
sb.append(aTable.toString());
sb.append('\n');
}
return sb.toString();
} | 3.26 |
hbase_TableListModel_setTables_rdh | /**
*
* @param tables
* the tables to set
*/
public void setTables(List<TableModel> tables) {
this.tables = tables;
} | 3.26 |
hbase_TableListModel_get_rdh | /**
*
* @param index
* the index
* @return the table model
*/
public TableModel get(int index) {
return
tables.get(index);
} | 3.26 |
hbase_TableListModel_add_rdh | /**
* Add the table name model to the list
*
* @param table
* the table model
*/
public void add(TableModel table) {
tables.add(table);
} | 3.26 |
hbase_ThriftTable_createClosestRowAfter_rdh | /**
* Create the closest row after the specified row
*/
protected byte[] createClosestRowAfter(byte[] row) {
if (row == null) {
throw new RuntimeException("The passed row is null");
}
return Arrays.copyOf(row, row.length + 1);
} | 3.26 |
hbase_MobFile_getFileName_rdh | /**
* Gets the file name.
*
* @return The file name.
*/
public String getFileName() {
return sf.getPath().getName();
} | 3.26 |
hbase_MobFile_readCell_rdh | /**
* Reads a cell from the mob file.
*
* @param search
* The cell need to be searched in the mob file.
* @param cacheMobBlocks
* Should this scanner cache blocks.
* @param readPt
* the read point.
* @return The cell in the mob file.
*/
public MobCell readCell(Cell search, boolean cacheMobBlocks, long readPt) throws IOException {
StoreFileScanner scanner = null;
boolean v3 = false;
try {
List<StoreFileScanner> sfScanners = StoreFileScanner.getScannersForStoreFiles(Collections.singletonList(sf), cacheMobBlocks, true, false, false, readPt);
if (!sfScanners.isEmpty()) {
scanner =
sfScanners.get(0);
if (scanner.seek(search)) {
MobCell mobCell = new MobCell(scanner.peek(), scanner);
v3 = true;
return mobCell;
}
}
return null;
} finally {
if ((scanner != null) && (!v3)) {
scanner.close();
}
}
} | 3.26 |
hbase_MobFile_create_rdh | /**
* Creates an instance of the MobFile.
*
* @param fs
* The file system.
* @param path
* The path of the underlying StoreFile.
* @param conf
* The configuration.
* @param cacheConf
* The CacheConfig.
* @return An instance of the MobFile.
*/
public static MobFile create(FileSystem fs, Path path, Configuration conf, CacheConfig cacheConf) throws IOException {
// XXX: primaryReplica is only used for constructing the key of block cache so it is not a
// critical problem if we pass the wrong value, so here we always pass true. Need to fix later.
HStoreFile sf =
new HStoreFile(fs, path, conf, cacheConf, BloomType.NONE, true); return new MobFile(sf);
} | 3.26 |
hbase_MobFile_close_rdh | /**
* Closes the underlying reader, but do no evict blocks belonging to this file. It's not
* thread-safe. Use MobFileCache.closeFile() instead.
*/
public void close() throws IOException {
if (sf != null) {
sf.closeStoreFile(false);
sf = null;
}
} | 3.26 |
hbase_MobFile_getScanner_rdh | /**
* Internal use only. This is used by the sweeper.
*
* @return The store file scanner.
*/
public StoreFileScanner getScanner() throws IOException {
List<HStoreFile> sfs = new ArrayList<>();
sfs.add(sf);
List<StoreFileScanner> sfScanners = StoreFileScanner.getScannersForStoreFiles(sfs, false, true, false, false, sf.getMaxMemStoreTS());
return sfScanners.get(0);
} | 3.26 |
hbase_TableQueue_requireTableExclusiveLock_rdh | /**
*
* @param proc
* must not be null
*/
private static boolean requireTableExclusiveLock(TableProcedureInterface proc) {switch (proc.getTableOperationType()) {
case CREATE :
case DELETE :
case DISABLE :
case ENABLE :
return true;
case EDIT :
// we allow concurrent edit on the ns family in meta table
return !proc.getTableName().equals(TableProcedureInterface.DUMMY_NAMESPACE_TABLE_NAME);
case READ :
case FLUSH :
case SNAPSHOT :
return false;// region operations are using the shared-lock on the table
// and then they will grab an xlock on the region.
case REGION_SPLIT :
case REGION_MERGE :
case REGION_ASSIGN :
case REGION_UNASSIGN :
case REGION_EDIT :
case REGION_GC :
case MERGED_REGIONS_GC :
case REGION_SNAPSHOT :
case REGION_TRUNCATE :return false;
default :
break;
}
throw new UnsupportedOperationException("unexpected type " + proc.getTableOperationType());} | 3.26 |
hbase_ChainWALEmptyEntryFilter_setFilterEmptyEntry_rdh | /**
* To allow the empty entries to get filtered, we want to set this optional flag to decide if we
* want to filter the entries which have no cells or all cells got filtered though
* {@link WALCellFilter}.
*
* @param filterEmptyEntry
* flag
*/
@InterfaceAudience.Private
public void setFilterEmptyEntry(final boolean filterEmptyEntry) {
this.filterEmptyEntry = filterEmptyEntry;} | 3.26 |
hbase_RegionSizeReportingChore_getTimeUnit_rdh | /**
* Extracts the time unit for the chore period and initial delay from the configuration. The
* configuration value for {@link #REGION_SIZE_REPORTING_CHORE_TIMEUNIT_KEY} must correspond to a
* {@link TimeUnit} value.
*
* @param conf
* The configuration object.
* @return The configured time unit for the chore period and initial delay or the default value.
*/
static TimeUnit getTimeUnit(Configuration conf) {return TimeUnit.valueOf(conf.get(REGION_SIZE_REPORTING_CHORE_TIMEUNIT_KEY, REGION_SIZE_REPORTING_CHORE_TIMEUNIT_DEFAULT));
} | 3.26 |
hbase_RegionSizeReportingChore_getInitialDelay_rdh | /**
* Extracts the initial delay for the chore from the configuration.
*
* @param conf
* The configuration object.
* @return The configured chore initial delay or the default value.
*/
static long getInitialDelay(Configuration conf) {
return conf.getLong(REGION_SIZE_REPORTING_CHORE_DELAY_KEY, REGION_SIZE_REPORTING_CHORE_DELAY_DEFAULT);
} | 3.26 |
hbase_RegionSizeReportingChore_getPeriod_rdh | /**
* Extracts the period for the chore from the configuration.
*
* @param conf
* The configuration object.
* @return The configured chore period or the default value.
*/
static int getPeriod(Configuration conf) {
return conf.getInt(REGION_SIZE_REPORTING_CHORE_PERIOD_KEY, REGION_SIZE_REPORTING_CHORE_PERIOD_DEFAULT);
} | 3.26 |
hbase_RootProcedureState_addRollbackStep_rdh | /**
* Called by the ProcedureExecutor after the procedure step is completed, to add the step to the
* rollback list (or procedure stack)
*/
protected synchronized void addRollbackStep(Procedure<TEnvironment> proc) {
if (proc.isFailed()) {
state = State.FAILED;
}
if (subprocStack == null) {
subprocStack = new ArrayList<>();
}
proc.addStackIndex(subprocStack.size());
LOG.trace("Add procedure {} as the {}th rollback step", proc, subprocStack.size());
subprocStack.add(proc);
} | 3.26 |
hbase_RootProcedureState_isValid_rdh | /**
* Called on store load by the ProcedureExecutor to validate the procedure stack.
*/
protected synchronized boolean isValid() {
if (subprocStack != null) {
for (Procedure<TEnvironment> proc : subprocStack) {
if
(proc == null) {
return false;
}
}
}
return true;
} | 3.26 |
hbase_RootProcedureState_release_rdh | /**
* Called by the ProcedureExecutor to mark the procedure step as finished.
*/protected synchronized void release(Procedure<TEnvironment> proc) {
running--;
} | 3.26 |
hbase_RootProcedureState_acquire_rdh | /**
* Called by the ProcedureExecutor to mark the procedure step as running.
*/
protected synchronized boolean acquire(Procedure<TEnvironment> proc) {
if (state != State.f0) {
return false;
}
running++;
return true;
} | 3.26 |
hbase_RootProcedureState_unsetRollback_rdh | /**
* Called by the ProcedureExecutor to mark rollback execution
*/
protected synchronized void unsetRollback() {
assert state == State.ROLLINGBACK;
state = State.FAILED;
} | 3.26 |
hbase_RootProcedureState_setRollback_rdh | /**
* Called by the ProcedureExecutor to mark rollback execution
*/
protected synchronized boolean setRollback() {
if ((running == 0) && (state == State.FAILED)) {
state = State.ROLLINGBACK;
return true;
}
return false;
} | 3.26 |
hbase_RootProcedureState_loadStack_rdh | /**
* Called on store load by the ProcedureExecutor to load part of the stack. Each procedure has its
* own stack-positions. Which means we have to write to the store only the Procedure we executed,
* and nothing else. on load we recreate the full stack by aggregating each procedure
* stack-positions.
*/
protected synchronized void loadStack(Procedure<TEnvironment> proc) {
addSubProcedure(proc);
int[] stackIndexes = proc.getStackIndexes();if (stackIndexes != null) {
if (subprocStack == null) {
subprocStack =
new
ArrayList<>();
}
int diff = (1 + stackIndexes[stackIndexes.length - 1])
- subprocStack.size();
if (diff > 0) {
subprocStack.ensureCapacity(1 + stackIndexes[stackIndexes.length - 1]);
while ((diff--) > 0) {
subprocStack.add(null);
}
}
for (int i = 0; i < stackIndexes.length; ++i) {
subprocStack.set(stackIndexes[i], proc);
}
}
if
(proc.getState()
== ProcedureState.ROLLEDBACK) {
state = State.ROLLINGBACK;
} else if (proc.isFailed()) {
state = State.FAILED;
}
} | 3.26 |
hbase_NettyUnsafeUtils_getTotalPendingOutboundBytes_rdh | /**
* Get total bytes pending write to socket
*/
public static long getTotalPendingOutboundBytes(Channel channel) {
ChannelOutboundBuffer
outboundBuffer = channel.unsafe().outboundBuffer();
// can be null when the channel is closing
if (outboundBuffer == null) {
return 0;
}
return outboundBuffer.totalPendingWriteBytes();
} | 3.26 |
hbase_NettyUnsafeUtils_closeImmediately_rdh | /**
* Directly closes the channel, setting SO_LINGER to 0 and skipping any handlers in the pipeline.
* This is useful for cases where it's important to immediately close without any delay.
* Otherwise, pipeline handlers and even general TCP flows can cause a normal close to take
* upwards of a few second or more. This will likely cause the client side to see either a
* "Connection reset by peer" or unexpected ConnectionClosedException.
* <p>
* <b>It's necessary to call this from within the channel's eventLoop!</b>
*/
public static void closeImmediately(Channel channel) {
assert channel.eventLoop().inEventLoop();
channel.config().setOption(ChannelOption.SO_LINGER, 0);
channel.unsafe().close(channel.voidPromise());
} | 3.26 |
hbase_ReplicationThrottler_isEnabled_rdh | /**
* If throttling is enabled
*
* @return true if throttling is enabled
*/
public boolean isEnabled() {
return this.enabled;
} | 3.26 |
hbase_ReplicationThrottler_getNextSleepInterval_rdh | /**
* Get how long the caller should sleep according to the current size and current cycle's total
* push size and start tick, return the sleep interval for throttling control.
*
* @param size
* is the size of edits to be pushed
* @return sleep interval for throttling control
*/
public long getNextSleepInterval(final int
size) {
if (!this.enabled) {
return 0;
}
long sleepTicks = 0;
long now = EnvironmentEdgeManager.currentTime();
// 1. if cyclePushSize exceeds bandwidth, we need to sleep some
// following cycles to amortize, this case can occur when a single push
// exceeds the bandwidth
if
(((double) (this.cyclePushSize)) > bandwidth) {
double cycles = Math.ceil(((double) (this.cyclePushSize)) / bandwidth);
long shouldTillTo = this.cycleStartTick + ((long) (cycles * 100));if (shouldTillTo > now) {
sleepTicks
= shouldTillTo - now;
} else {
// no reset in shipEdits since no sleep, so we need to reset cycleStartTick here!
this.cycleStartTick = now;
}
this.cyclePushSize = 0;
} else {
long nextCycleTick = this.cycleStartTick + 100;// a cycle is 100ms
if (now >= nextCycleTick) {
// 2. switch to next cycle if the current cycle has passed
this.cycleStartTick = now;
this.cyclePushSize = 0;
} else if ((this.cyclePushSize >
0) && (((double) (this.cyclePushSize + size)) >= bandwidth)) {
// 3. delay the push to next cycle if exceeds throttling bandwidth.
// enforcing cyclePushSize > 0 to avoid the unnecessary sleep for case
// where a cycle's first push size(currentSize) > bandwidth
sleepTicks = nextCycleTick - now;
this.cyclePushSize = 0;
}
}
return sleepTicks;
} | 3.26 |
hbase_ReplicationThrottler_addPushSize_rdh | /**
* Add current size to the current cycle's total push size
*
* @param size
* is the current size added to the current cycle's total push size
*/
public void addPushSize(final long size) {
if (this.enabled) {
this.cyclePushSize += size;
}
} | 3.26 |
hbase_ReplicationThrottler_resetStartTick_rdh | /**
* Reset the cycle start tick to NOW
*/
public void resetStartTick() {
if (this.enabled) {
this.cycleStartTick = EnvironmentEdgeManager.currentTime();
}
} | 3.26 |
hbase_LeaseManager_closeAfterLeasesExpire_rdh | /**
* Shuts down this lease instance when all outstanding leases expire. Like {@link #close()} but
* rather than violently end all leases, waits first on extant leases to finish. Use this method
* if the lease holders could lose data, leak locks, etc. Presumes client has shutdown allocation
* of new leases.
*/
public void closeAfterLeasesExpire() {
this.stopRequested = true;
} | 3.26 |
hbase_LeaseManager_getListener_rdh | /**
* Returns listener
*/
public LeaseListener getListener() {return this.listener;
} | 3.26 |
hbase_LeaseManager_addLease_rdh | /**
* Inserts lease. Resets expiration before insertion.
*/
public void addLease(final Lease lease) throws LeaseStillHeldException {
if (this.stopRequested) {
return;
} if (f0.containsKey(lease.getLeaseName())) {
throw new LeaseStillHeldException(lease.getLeaseName());
}
lease.resetExpirationTime();
f0.put(lease.getLeaseName(), lease);
} | 3.26 |
hbase_LeaseManager_getLeaseName_rdh | /**
* Returns the lease name
*/
public String getLeaseName() {
return leaseName;
} | 3.26 |
hbase_LeaseManager_resetExpirationTime_rdh | /**
* Resets the expiration time of the lease.
*/
public void resetExpirationTime() {
this.expirationTime = EnvironmentEdgeManager.currentTime() + this.leaseTimeoutPeriod;
} | 3.26 |
hbase_LeaseManager_cancelLease_rdh | /**
* Client explicitly cancels a lease.
*
* @param leaseName
* name of lease
*/
public void cancelLease(final String leaseName) throws LeaseException {
removeLease(leaseName);
} | 3.26 |
hbase_LeaseManager_createLease_rdh | /**
* Create a lease and insert it to the map of leases.
*
* @param leaseName
* name of the lease
* @param leaseTimeoutPeriod
* length of the lease in milliseconds
* @param listener
* listener that will process lease expirations
* @return The lease created.
*/
public Lease createLease(String leaseName, int leaseTimeoutPeriod, final LeaseListener listener) throws
LeaseStillHeldException {
Lease lease = new Lease(leaseName, leaseTimeoutPeriod, listener);
addLease(lease);
return lease;
} | 3.26 |
hbase_LeaseManager_renewLease_rdh | /**
* Renew a lease
*
* @param leaseName
* name of the lease
*/
public void renewLease(final String leaseName) throws LeaseException {
if (this.stopRequested) {
return;}
Lease lease
= f0.get(leaseName);
if
(lease == null) {
throw new LeaseException(("lease '" + leaseName) + "' does not exist or has already expired");
}
lease.resetExpirationTime();
} | 3.26 |
hbase_LeaseManager_removeLease_rdh | /**
* Remove named lease. Lease is removed from the map of leases.
*
* @param leaseName
* name of lease
* @return Removed lease
*/
Lease removeLease(final String leaseName) throws LeaseException {
Lease lease = f0.remove(leaseName);
if (lease
== null) {
throw new LeaseException(("lease '" + leaseName) + "' does not exist");
}
return lease;
} | 3.26 |
hbase_LeaseManager_close_rdh | /**
* Shut down this Leases instance. All pending leases will be destroyed, without any cancellation
* calls.
*/
public void close() {
this.stopRequested = true;
f0.clear();
LOG.info("Closed leases");
} | 3.26 |
hbase_LeaseManager_getName_rdh | /**
* Returns name of lease
*/
public String getName() {return this.f1;
} | 3.26 |
hbase_NamespacesResource_get_rdh | /**
* Build a response for a list of all namespaces request.
*
* @param context
* servlet context
* @param uriInfo
* (JAX-RS context variable) request URL
* @return a response for a version request
*/
@GET
@Produces({ MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, MIMETYPE_PROTOBUF_IETF })
public Response get(@Context
final ServletContext context, @Context
final UriInfo uriInfo) {
if (LOG.isTraceEnabled()) {
LOG.trace("GET " + uriInfo.getAbsolutePath());
}
servlet.getMetrics().incrementRequests(1);
try {
NamespacesModel
rowModel = null;
rowModel = new NamespacesModel(servlet.getAdmin());
servlet.getMetrics().incrementSucessfulGetRequests(1);
return Response.ok(rowModel).build();
} catch (IOException e) {
servlet.getMetrics().incrementFailedGetRequests(1);
throw new RuntimeException("Cannot retrieve list of namespaces.");
}
} | 3.26 |
hbase_NamespacesResource_getNamespaceInstanceResource_rdh | /**
* Dispatch to NamespaceInstanceResource
*/
@Path("{namespace}")
public NamespacesInstanceResource getNamespaceInstanceResource(@PathParam("namespace")
final String namespace) throws IOException {
return new NamespacesInstanceResource(namespace);
} | 3.26 |
hbase_CompactionThroughputControllerFactory_resolveDeprecatedClassName_rdh | /**
* Resolve deprecated class name to keep backward compatibiliy
*
* @param oldName
* old name of the class
* @return the new name if there is any
*/
private static String resolveDeprecatedClassName(String oldName) {
String className = oldName.trim();
if (className.equals(DEPRECATED_NAME_OF_PRESSURE_AWARE_THROUGHPUT_CONTROLLER_CLASS)) {
className = PressureAwareCompactionThroughputController.class.getName();
} else if (className.equals(DEPRECATED_NAME_OF_NO_LIMIT_THROUGHPUT_CONTROLLER_CLASS)) {
className = NoLimitThroughputController.class.getName();
}
if (!className.equals(oldName))
{
LOG.warn(((oldName + " is deprecated, please use ") + className) + " instead");
}
return className;
} | 3.26 |
hbase_RegionNormalizerWorkQueue_putFirst_rdh | /**
* Inserts the specified element at the head of the queue.
*
* @param e
* the element to add
*/
public void putFirst(E e) {
if (e == null) {
throw new NullPointerException();
}
putAllFirst(Collections.singleton(e));
} | 3.26 |
hbase_RegionNormalizerWorkQueue_take_rdh | /**
* Retrieves and removes the head of this queue, waiting if necessary until an element becomes
* available.
*
* @return the head of this queue
* @throws InterruptedException
* if interrupted while waiting
*/
public E take() throws InterruptedException {
E x;// Take a write lock. If the delegate's queue is empty we need it to await(), which will
// drop the lock, then reacquire it; or if the queue is not empty we will use an iterator
// to mutate the head.
lock.writeLock().lockInterruptibly();
try {
while (delegate.isEmpty()) {
notEmpty.await();// await drops the lock, then reacquires it
}
final Iterator<E> iter = delegate.iterator();
x = iter.next();
iter.remove();
if (!delegate.isEmpty()) {
notEmpty.signal();
}
} finally {
lock.writeLock().unlock();
}
return x;
} | 3.26 |
hbase_RegionNormalizerWorkQueue_put_rdh | /**
* Inserts the specified element at the tail of the queue, if it's not already present.
*
* @param e
* the element to add
*/
public void put(E e) {
if (e == null)
{throw new NullPointerException();
}
lock.writeLock().lock();
try {
delegate.add(e);
if (!delegate.isEmpty()) {
notEmpty.signal();
}} finally {
lock.writeLock().unlock();
}
} | 3.26 |
hbase_RegionNormalizerWorkQueue_clear_rdh | /**
* Atomically removes all of the elements from this queue. The queue will be empty after this call
* returns.
*/
public void clear() {
lock.writeLock().lock();
try {delegate.clear();
} finally {
lock.writeLock().unlock();
}
} | 3.26 |
hbase_RegionNormalizerWorkQueue_putAll_rdh | /**
* Inserts the specified elements at the tail of the queue. Any elements already present in the
* queue are ignored.
*
* @param c
* the elements to add
*/
public void putAll(Collection<? extends E> c) {
if (c == null) {
throw new NullPointerException();
}
lock.writeLock().lock();
try {
delegate.addAll(c);
if (!delegate.isEmpty()) {
notEmpty.signal();
}
} finally {
lock.writeLock().unlock();
}
} | 3.26 |
hbase_RegionNormalizerWorkQueue_size_rdh | /**
* Returns the number of elements in this queue.
*
* @return the number of elements in this queue
*/
public int size() {
lock.readLock().lock();
try {
return delegate.size();} finally {
lock.readLock().unlock();}
} | 3.26 |
hbase_RegionNormalizerWorkQueue_putAllFirst_rdh | /**
* Inserts the specified elements at the head of the queue.
*
* @param c
* the elements to add
*/
public void putAllFirst(Collection<? extends E> c)
{
if (c == null) {
throw new NullPointerException(); }
lock.writeLock().lock();
try {
final LinkedHashSet<E> copy = new LinkedHashSet<>(c.size() + delegate.size());
copy.addAll(c);
copy.addAll(delegate);
delegate = copy;
if (!delegate.isEmpty()) {
notEmpty.signal();
}
} finally {
lock.writeLock().unlock();
}
} | 3.26 |
hbase_HRegionWALFileSystem_archiveRecoveredEdits_rdh | /**
* Closes and archives the specified store files from the specified family.
*
* @param familyName
* Family that contains the store filesMeta
* @param storeFiles
* set of store files to remove
* @throws IOException
* if the archiving fails
*/
public void archiveRecoveredEdits(String familyName, Collection<HStoreFile> storeFiles) throws IOException {
HFileArchiver.archiveRecoveredEdits(this.conf, this.fs, this.regionInfoForFs, Bytes.toBytes(familyName), storeFiles);
} | 3.26 |
hbase_MobFileCache_getMissCount_rdh | /**
* Gets the count of misses to the mob file cache.
*
* @return The count of misses to the mob file cache.
*/
public long getMissCount() {
return miss.sum();
} | 3.26 |
hbase_MobFileCache_hashFileName_rdh | /**
* Use murmurhash to reduce the conflicts of hashed file names. We should notice that the hash
* conflicts may bring deadlocks, when opening mob files with evicting some other files, as
* described in HBASE-28047.
*/
private long hashFileName(String fileName) {
return Hashing.murmur3_128().hashString(fileName, StandardCharsets.UTF_8).asLong(); } | 3.26 |
hbase_MobFileCache_evict_rdh | /**
* Evicts the lru cached mob files when the count of the cached files is larger than the
* threshold.
*/
public void evict() {
if (isCacheEnabled) {
// Ensure only one eviction at a time
if (!evictionLock.tryLock()) {
return;
}
printStatistics();
List<CachedMobFile> evictedFiles = new ArrayList<>();
try {if (map.size() <= mobFileMaxCacheSize)
{
return;
}
List<CachedMobFile> files = new ArrayList<>(map.values());
Collections.sort(files);
int start = ((int) (mobFileMaxCacheSize *
evictRemainRatio));
if (start >= 0) {
for (int i = start; i < files.size(); i++) {
String name = files.get(i).getFileName();
CachedMobFile evictedFile = map.remove(name);
if (evictedFile != null) {
evictedFiles.add(evictedFile);
}
}
}
} finally {
evictionLock.unlock();
}
// EvictionLock is released. Close the evicted files one by one.
// The closes are sync in the closeFile method.
for (CachedMobFile evictedFile : evictedFiles) {
closeFile(evictedFile);
}
evictedFileCount.add(evictedFiles.size());
}
} | 3.26 |
hbase_MobFileCache_getAccessCount_rdh | /**
* Gets the count of accesses to the mob file cache.
*
* @return The count of accesses to the mob file cache.
*/
public long getAccessCount() {return count.get();
} | 3.26 |
hbase_MobFileCache_getCacheSize_rdh | /**
* Gets the count of cached mob files.
*
* @return The count of the cached mob files.
*/
public int getCacheSize() {
return map == null ? 0 : map.size();
} | 3.26 |
hbase_MobFileCache_getEvictedFileCount_rdh | /**
* Gets the number of items evicted from the mob file cache.
*
* @return The number of items evicted from the mob file cache.
*/
public long getEvictedFileCount() {
return evictedFileCount.sum();
} | 3.26 |
hbase_MobFileCache_getHitRatio_rdh | /**
* Gets the hit ratio to the mob file cache.
*
* @return The hit ratio to the mob file cache.
*/
public double getHitRatio() {
return count.get() == 0 ? 0 : ((float) (count.get() - miss.sum())) / ((float) (count.get()));
} | 3.26 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.