name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hbase_PrettyPrinter_valueOf_rdh | /**
* Convert a human readable string to its value.
*
* @see org.apache.hadoop.hbase.util.PrettyPrinter#format(String, Unit)
* @return the value corresponding to the human readable string
*/
public static String valueOf(final String
pretty, final Unit unit) throws HBaseException {
StringBuilder value = new StringBuilder();
switch (unit) {
case TIME_INTERVAL :value.append(humanReadableIntervalToSec(pretty));
break;
case BYTE :
value.append(humanReadableSizeToBytes(pretty));
break;
default :
value.append(pretty);
}
return value.toString();
} | 3.26 |
hbase_PrettyPrinter_toString_rdh | /**
* Pretty prints a collection of any type to a string. Relies on toString() implementation of the
* object type.
*
* @param collection
* collection to pretty print.
* @return Pretty printed string for the collection.
*/
public static String toString(Collection<?> collection) {
List<String> stringList = new ArrayList<>();
for (Object o : collection) {
stringList.add(Objects.toString(o));
}
return ("[" + String.join(",", stringList)) + "]";
} | 3.26 |
hbase_PrettyPrinter_humanReadableIntervalToSec_rdh | /**
* Convert a human readable time interval to seconds. Examples of the human readable time
* intervals are: 50 DAYS 1 HOUR 30 MINUTES , 25000 SECONDS etc. The units of time specified can
* be in uppercase as well as lowercase. Also, if a single number is specified without any time
* unit, it is assumed to be in seconds.
*
* @return value in seconds
*/
private static long humanReadableIntervalToSec(final String humanReadableInterval) throws HBaseException
{
if ((humanReadableInterval == null) || humanReadableInterval.equalsIgnoreCase("FOREVER")) {
return HConstants.FOREVER;
}
try {
return Long.parseLong(humanReadableInterval);
} catch (NumberFormatException ex) {
LOG.debug("Given interval value is not a number, parsing for human readable format");
}
String days = null;
String hours = null;
String minutes = null;
String v12 = null;
String expectedTtl = null;
long ttl;
Matcher matcher = PrettyPrinter.INTERVAL_PATTERN.matcher(humanReadableInterval);
if (matcher.matches()) {
expectedTtl = matcher.group(2);
days = matcher.group(4);
hours = matcher.group(6);
minutes = matcher.group(8);
v12 = matcher.group(10);
} else {
LOG.warn("Given interval value '{}' is not a number and does not match human readable format," + " value will be set to 0.", humanReadableInterval);}
ttl = 0;
ttl += (days != null) ? Long.parseLong(days) * HConstants.DAY_IN_SECONDS : 0;
ttl += (hours != null) ? Long.parseLong(hours) *
HConstants.HOUR_IN_SECONDS : 0;
ttl += (minutes != null) ? Long.parseLong(minutes) * HConstants.MINUTE_IN_SECONDS : 0;
ttl += (v12 != null) ? Long.parseLong(v12) : 0;
if ((expectedTtl != null) && (Long.parseLong(expectedTtl) != ttl)) {
throw new HBaseException("Malformed TTL string: TTL values in seconds and human readable" + "format do not match");
}
return ttl;
} | 3.26 |
hbase_RestoreSnapshotHelper_hasRegionsToRestore_rdh | /**
* Returns true if there're regions to restore
*/
public boolean hasRegionsToRestore() {
return (this.regionsToRestore != null) && (this.regionsToRestore.size() > 0);
} | 3.26 |
hbase_RestoreSnapshotHelper_hasRegionsToRemove_rdh | /**
* Returns true if there're regions to remove
*/
public boolean hasRegionsToRemove() {
return (this.regionsToRemove != null) && (this.regionsToRemove.size() > 0);
} | 3.26 |
hbase_RestoreSnapshotHelper_getRegionsToRestore_rdh | /**
* Returns the list of 'restored regions' during the on-disk restore. The caller is responsible
* to add the regions to hbase:meta if not present.
*
* @return the list of regions restored
*/
public List<RegionInfo> getRegionsToRestore() {
return this.regionsToRestore;
} | 3.26 |
hbase_RestoreSnapshotHelper_getRegionsToRemove_rdh | /**
* Returns the list of regions removed during the on-disk restore. The caller is responsible to
* remove the regions from META. e.g. MetaTableAccessor.deleteRegions(...)
*
* @return the list of regions to remove from META
*/
public List<RegionInfo> getRegionsToRemove() {
return
this.regionsToRemove;
} | 3.26 |
hbase_RestoreSnapshotHelper_removeHdfsRegions_rdh | /**
* Remove specified regions from the file-system, using the archiver.
*/
private void removeHdfsRegions(final ThreadPoolExecutor exec, final List<RegionInfo> regions) throws IOException {
if ((regions == null) || regions.isEmpty())
return;
ModifyRegionUtils.editRegions(exec, regions, new ModifyRegionUtils.RegionEditTask() {
@Override public void editRegion(final RegionInfo hri) throws IOException {
HFileArchiver.archiveRegion(conf, f0, hri);
}
});
} | 3.26 |
hbase_RestoreSnapshotHelper_restoreStoreFile_rdh | /**
* Create a new {@link HFileLink} to reference the store file.
* <p>
* The store file in the snapshot can be a simple hfile, an HFileLink or a reference.
* <ul>
* <li>hfile: abc -> table=region-abc
* <li>reference: abc.1234 -> table=region-abc.1234
* <li>hfilelink: table=region-hfile -> table=region-hfile
* </ul>
*
* @param familyDir
* destination directory for the store file
* @param regionInfo
* destination region info for the table
* @param createBackRef
* - Whether back reference should be created. Defaults to true.
* @param storeFile
* store file name (can be a Reference, HFileLink or simple HFile)
*/
private String restoreStoreFile(final Path familyDir, final RegionInfo regionInfo, final SnapshotRegionManifest.StoreFile storeFile, final boolean createBackRef) throws IOException {
String hfileName = storeFile.getName();
if (HFileLink.isHFileLink(hfileName)) {
return HFileLink.createFromHFileLink(conf, f0, familyDir, hfileName, createBackRef);
} else if (StoreFileInfo.isReference(hfileName)) {
return restoreReferenceFile(familyDir, regionInfo, storeFile);
} else {
return HFileLink.create(conf, f0, familyDir, regionInfo, hfileName, createBackRef);
}
} | 3.26 |
hbase_RestoreSnapshotHelper_cloneRegion_rdh | /**
* Clone region directory content from the snapshot info. Each region is encoded with the table
* name, so the cloned region will have a different region name. Instead of copying the hfiles a
* HFileLink is created.
*
* @param region
* {@link HRegion} cloned
*/
private void cloneRegion(final HRegion region, final RegionInfo snapshotRegionInfo, final SnapshotRegionManifest manifest) throws IOException
{
cloneRegion(region.getRegionInfo(), new Path(tableDir, region.getRegionInfo().getEncodedName()), snapshotRegionInfo, manifest);
} | 3.26 |
hbase_RestoreSnapshotHelper_copySnapshotForScanner_rdh | /**
* Copy the snapshot files for a snapshot scanner, discards meta changes.
*/
public static RestoreMetaChanges copySnapshotForScanner(Configuration conf, FileSystem fs, Path rootDir, Path restoreDir, String snapshotName) throws IOException {
// ensure that restore dir is not under root dir
if (!restoreDir.getFileSystem(conf).getUri().equals(rootDir.getFileSystem(conf).getUri())) {
throw new IllegalArgumentException("Filesystems for restore directory and HBase root " + "directory should be the same");
}if (restoreDir.toUri().getPath().startsWith(rootDir.toUri().getPath() + "/")) {
throw new IllegalArgumentException(((("Restore directory cannot be a sub directory of HBase " + "root directory. RootDir: ") + rootDir) + ", restoreDir: ") + restoreDir);
}
Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, rootDir);
SnapshotDescription snapshotDesc = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir);
SnapshotManifest v88 = SnapshotManifest.open(conf, fs, snapshotDir, snapshotDesc);
MonitoredTask status = TaskMonitor.get().createStatus((("Restoring snapshot '" + snapshotName) + "' to directory ") + restoreDir);
ForeignExceptionDispatcher monitor =
new ForeignExceptionDispatcher();
// we send createBackRefs=false so that restored hfiles do not create back reference links
// in the base hbase root dir.
RestoreSnapshotHelper helper = new RestoreSnapshotHelper(conf, fs, v88, v88.getTableDescriptor(), restoreDir, monitor, status, false);
RestoreMetaChanges metaChanges = helper.restoreHdfsRegions();// TODO: parallelize.
if (LOG.isDebugEnabled()) {
LOG.debug("Restored table dir:" + restoreDir);
CommonFSUtils.logFileSystemState(fs, restoreDir, LOG);
}
return metaChanges;
} | 3.26 |
hbase_RestoreSnapshotHelper_restoreRegion_rdh | /**
* Restore region by removing files not in the snapshot and adding the missing ones from the
* snapshot.
*/
private void restoreRegion(final RegionInfo regionInfo, final SnapshotRegionManifest regionManifest,
Path regionDir) throws IOException {
Map<String, List<SnapshotRegionManifest.StoreFile>> snapshotFiles = getRegionHFileReferences(regionManifest);
String tableName = tableDesc.getTableName().getNameAsString();
final String snapshotName = snapshotDesc.getName();
Path v22 = new Path(tableDir, regionInfo.getEncodedName());
HRegionFileSystem regionFS = (f0.exists(v22)) ? HRegionFileSystem.openRegionFromFileSystem(conf, f0, tableDir, regionInfo, false) : HRegionFileSystem.createRegionOnFileSystem(conf, f0, tableDir, regionInfo);
// Restore families present in the table
for (Path familyDir : FSUtils.getFamilyDirs(f0, regionDir)) {
byte[] v25 = Bytes.toBytes(familyDir.getName());Set<String> familyFiles
=
getTableRegionFamilyFiles(familyDir);
List<SnapshotRegionManifest.StoreFile> snapshotFamilyFiles = snapshotFiles.remove(familyDir.getName());
List<StoreFileInfo> filesToTrack = new ArrayList<>();if (snapshotFamilyFiles != null) {
List<SnapshotRegionManifest.StoreFile> hfilesToAdd = new ArrayList<>();
for (SnapshotRegionManifest.StoreFile storeFile : snapshotFamilyFiles) {
if (familyFiles.contains(storeFile.getName())) {
// HFile already present
familyFiles.remove(storeFile.getName());
// no need to restore already present files, but we need to add those to tracker
filesToTrack.add(new StoreFileInfo(conf, f0, new Path(familyDir, storeFile.getName()), true));
} else {
// HFile missing
hfilesToAdd.add(storeFile);
}
}
// Remove hfiles not present in the snapshot
for (String hfileName : familyFiles) {
Path hfile = new Path(familyDir, hfileName);
if (!f0.getFileStatus(hfile).isDirectory()) {
LOG.trace((((((("Removing HFile=" + hfileName) + " not present in snapshot=") + snapshotName) + " from region=")
+ regionInfo.getEncodedName()) +
" table=") + tableName);
HFileArchiver.archiveStoreFile(conf, f0, regionInfo, tableDir, v25, hfile);
}
}
// Restore Missing files
for (SnapshotRegionManifest.StoreFile storeFile : hfilesToAdd) {
LOG.debug((((((("Restoring missing HFileLink " + storeFile.getName()) + " of snapshot=") + snapshotName) + " to region=") + regionInfo.getEncodedName()) + " table=") + tableName);String fileName = restoreStoreFile(familyDir, regionInfo, storeFile, createBackRefs);
// mark the reference file to be added to tracker
filesToTrack.add(new StoreFileInfo(conf, f0, new Path(familyDir, fileName), true));
}
}
else {
// Family doesn't exists in the snapshot
LOG.trace((((((("Removing family=" + Bytes.toString(v25)) + " in snapshot=")
+ snapshotName) + " from region=") + regionInfo.getEncodedName()) + " table=") + tableName);
HFileArchiver.archiveFamilyByFamilyDir(f0, conf, regionInfo, familyDir, v25);
f0.delete(familyDir, true);
}
StoreFileTracker tracker = StoreFileTrackerFactory.create(conf, true, StoreContext.getBuilder().withFamilyStoreDirectoryPath(familyDir).withRegionFileSystem(regionFS).build());
// simply reset list of tracked files with the matching files
// and the extra one present in the snapshot
tracker.set(filesToTrack);
}
// Add families not present in the table
for (Map.Entry<String, List<SnapshotRegionManifest.StoreFile>> familyEntry :
snapshotFiles.entrySet()) {
Path familyDir = new Path(regionDir, familyEntry.getKey());
StoreFileTracker tracker = StoreFileTrackerFactory.create(conf, true, StoreContext.getBuilder().withFamilyStoreDirectoryPath(familyDir).withRegionFileSystem(regionFS).build());
List<StoreFileInfo> files = new ArrayList<>();
if (!f0.mkdirs(familyDir)) {
throw new IOException("Unable to create familyDir=" + familyDir);
}
for (SnapshotRegionManifest.StoreFile v40 : familyEntry.getValue()) {
LOG.trace((((("Adding HFileLink (Not present in the table) " + v40.getName()) + " of snapshot ") +
snapshotName) + " to table=") + tableName);
String v41 = restoreStoreFile(familyDir, regionInfo, v40, createBackRefs);
files.add(new StoreFileInfo(conf, f0, new Path(familyDir, v41), true));
}
tracker.set(files);
}
} | 3.26 |
hbase_RestoreSnapshotHelper_getRegionsToAdd_rdh | /**
* Returns the list of new regions added during the on-disk restore. The caller is responsible
* to add the regions to META. e.g MetaTableAccessor.addRegionsToMeta(...)
*
* @return the list of regions to add to META
*/
public List<RegionInfo> getRegionsToAdd() {
return this.regionsToAdd;
} | 3.26 |
hbase_RestoreSnapshotHelper_getTableRegionFamilyFiles_rdh | /**
* Returns The set of files in the specified family directory.
*/
private Set<String> getTableRegionFamilyFiles(final Path familyDir) throws
IOException {
FileStatus[] hfiles = CommonFSUtils.listStatus(f0, familyDir);
if (hfiles == null) {
return Collections.emptySet();
}
Set<String> familyFiles = new HashSet<>(hfiles.length);
for (int i = 0; i < hfiles.length; ++i) {
String hfileName = hfiles[i].getPath().getName();
familyFiles.add(hfileName);
}
return familyFiles;
} | 3.26 |
hbase_RestoreSnapshotHelper_restoreMobRegion_rdh | /**
* Restore mob region by removing files not in the snapshot and adding the missing ones from the
* snapshot.
*/
private void restoreMobRegion(final RegionInfo regionInfo, final SnapshotRegionManifest regionManifest) throws IOException {
if (regionManifest == null) {
return;
}
restoreRegion(regionInfo, regionManifest, MobUtils.getMobRegionPath(conf, tableDesc.getTableName()));
} | 3.26 |
hbase_RestoreSnapshotHelper_restoreReferenceFile_rdh | /**
* Create a new {@link Reference} as copy of the source one.
* <p>
* <blockquote>
*
* <pre>
* The source table looks like:
* 1234/abc (original file)
* 5678/abc.1234 (reference file)
*
* After the clone operation looks like:
* wxyz/table=1234-abc
* stuv/table=1234-abc.wxyz
*
* NOTE that the region name in the clone changes (md5 of regioninfo)
* and the reference should reflect that change.
* </pre>
*
* </blockquote>
*
* @param familyDir
* destination directory for the store file
* @param regionInfo
* destination region info for the table
* @param storeFile
* reference file name
*/
private String restoreReferenceFile(final Path familyDir, final RegionInfo regionInfo, final SnapshotRegionManifest.StoreFile storeFile) throws IOException {String hfileName = storeFile.getName();
// Extract the referred information (hfile name and parent region)
Path refPath = StoreFileInfo.getReferredToFile(new Path(new Path(new Path(new Path(snapshotTable.getNamespaceAsString(), snapshotTable.getQualifierAsString()), regionInfo.getEncodedName()), familyDir.getName()), hfileName));
String snapshotRegionName = refPath.getParent().getParent().getName();
String fileName = refPath.getName();
// The new reference should have the cloned region name as parent, if it is a clone.
String clonedRegionName = Bytes.toString(regionsMap.get(Bytes.toBytes(snapshotRegionName)));
if (clonedRegionName == null)
clonedRegionName = snapshotRegionName;
// The output file should be a reference link table=snapshotRegion-fileName.clonedRegionName
Path linkPath = null;String refLink = fileName;
if (!HFileLink.isHFileLink(fileName)) {
refLink = HFileLink.createHFileLinkName(snapshotTable, snapshotRegionName, fileName);
linkPath = new Path(familyDir, HFileLink.createHFileLinkName(snapshotTable, regionInfo.getEncodedName(), hfileName));
}
Path outPath = new Path(familyDir, (refLink + '.') + clonedRegionName);
// Create the new reference
if (storeFile.hasReference()) {
Reference reference = Reference.convert(storeFile.getReference());
reference.write(f0, outPath);
} else {
InputStream in;
if (linkPath != null) {
in = HFileLink.buildFromHFileLinkPattern(conf, linkPath).open(f0);
} else {
linkPath = new Path(new Path(HRegion.getRegionDir(snapshotManifest.getSnapshotDir(), regionInfo.getEncodedName()), familyDir.getName()), hfileName);
in = f0.open(linkPath);
}
OutputStream out = f0.create(outPath);
IOUtils.copyBytes(in, out, conf);
}
// Add the daughter region to the map
String regionName = Bytes.toString(regionsMap.get(regionInfo.getEncodedNameAsBytes()));
if (regionName == null) {
regionName = regionInfo.getEncodedName();
}
LOG.debug((("Restore reference " + regionName) + " to ") + clonedRegionName);
synchronized(parentsMap) {
Pair<String, String> daughters = parentsMap.get(clonedRegionName);
if (daughters == null) {
// In case one side of the split is already compacted, regionName is put as both first and
// second of Pair
daughters = new Pair<>(regionName, regionName);
parentsMap.put(clonedRegionName, daughters);
} else if (!regionName.equals(daughters.getFirst())) {
daughters.setSecond(regionName);
}
}
return outPath.getName();
} | 3.26 |
hbase_RestoreSnapshotHelper_getTableRegions_rdh | /**
* Returns the set of the regions contained in the table
*/
private List<RegionInfo> getTableRegions() throws IOException { LOG.debug("get table regions: " + tableDir);
FileStatus[] regionDirs = CommonFSUtils.listStatus(f0, tableDir, new FSUtils.RegionDirFilter(f0));
if (regionDirs == null) {
return null;
}
List<RegionInfo> regions = new ArrayList<>(regionDirs.length);
for (int i = 0; i < regionDirs.length; ++i) {
RegionInfo hri = HRegionFileSystem.loadRegionInfoFileContent(f0, regionDirs[i].getPath());
regions.add(hri);
}
LOG.debug((("found " + regions.size()) + " regions for table=") + tableDesc.getTableName().getNameAsString());
return regions;
} | 3.26 |
hbase_RestoreSnapshotHelper_restoreHdfsMobRegions_rdh | /**
* Restore specified mob regions by restoring content to the snapshot state.
*/
private void restoreHdfsMobRegions(final ThreadPoolExecutor exec, final
Map<String, SnapshotRegionManifest> regionManifests, final List<RegionInfo> regions) throws IOException {
if ((regions == null) || regions.isEmpty())
return;
ModifyRegionUtils.editRegions(exec, regions,
new ModifyRegionUtils.RegionEditTask() {
@Override
public void editRegion(final RegionInfo hri) throws IOException {
restoreMobRegion(hri, regionManifests.get(hri.getEncodedName()));
}
});
} | 3.26 |
hbase_RestoreSnapshotHelper_getParentToChildrenPairMap_rdh | /**
* Returns the map of parent-children_pair.
*
* @return the map
*/
public Map<String, Pair<String, String>> getParentToChildrenPairMap() {
return this.parentsMap;
} | 3.26 |
hbase_RestoreSnapshotHelper_m0_rdh | /**
* Create a new {@link RegionInfo} from the snapshot region info. Keep the same startKey, endKey,
* regionId and split information but change the table name.
*
* @param snapshotRegionInfo
* Info for region to clone.
* @return the new HRegion instance
*/
public RegionInfo m0(final RegionInfo snapshotRegionInfo) {
return cloneRegionInfo(tableDesc.getTableName(), snapshotRegionInfo);
} | 3.26 |
hbase_RestoreSnapshotHelper_cloneHdfsMobRegion_rdh | /**
* Clone the mob region. For the region create a new region and create a HFileLink for each hfile.
*/
private void cloneHdfsMobRegion(final Map<String, SnapshotRegionManifest> regionManifests, final RegionInfo region) throws IOException {
// clone region info (change embedded tableName with the new one)
Path clonedRegionPath = MobUtils.getMobRegionPath(rootDir, tableDesc.getTableName());
cloneRegion(MobUtils.getMobRegionInfo(tableDesc.getTableName()), clonedRegionPath, region, regionManifests.get(region.getEncodedName()));
} | 3.26 |
hbase_RestoreSnapshotHelper_cloneHdfsRegions_rdh | /**
* Clone specified regions. For each region create a new region and create a HFileLink for each
* hfile.
*/
private RegionInfo[] cloneHdfsRegions(final ThreadPoolExecutor exec, final
Map<String, SnapshotRegionManifest> regionManifests, final List<RegionInfo> regions) throws IOException {
if ((regions == null) || regions.isEmpty())
return null;
final Map<String, RegionInfo>
snapshotRegions
= new HashMap<>(regions.size());
final String snapshotName = snapshotDesc.getName();
// clone region info (change embedded tableName with the new one)
RegionInfo[] clonedRegionsInfo =
new RegionInfo[regions.size()];
for (int i = 0; i < clonedRegionsInfo.length; ++i) {
// clone the region info from the snapshot region info
RegionInfo snapshotRegionInfo = regions.get(i);
clonedRegionsInfo[i] = m0(snapshotRegionInfo);
// add the region name mapping between snapshot and cloned
String v51 = snapshotRegionInfo.getEncodedName(); String clonedRegionName = clonedRegionsInfo[i].getEncodedName();
regionsMap.put(Bytes.toBytes(v51), Bytes.toBytes(clonedRegionName));
LOG.info((((("clone region=" + v51) + " as ") + clonedRegionName) + " in snapshot ") + snapshotName);// Add mapping between cloned region name and snapshot region info
snapshotRegions.put(clonedRegionName, snapshotRegionInfo);
}
// create the regions on disk
ModifyRegionUtils.createRegions(exec, conf, rootDir, tableDesc, clonedRegionsInfo, new ModifyRegionUtils.RegionFillTask() {
@Override
public
void fillRegion(final HRegion region) throws IOException {
RegionInfo snapshotHri = snapshotRegions.get(region.getRegionInfo().getEncodedName());
cloneRegion(region, snapshotHri, regionManifests.get(snapshotHri.getEncodedName()));
}
});
return clonedRegionsInfo;
} | 3.26 |
hbase_RestoreSnapshotHelper_restoreHdfsRegions_rdh | /**
* Restore specified regions by restoring content to the snapshot state.
*/
private void restoreHdfsRegions(final ThreadPoolExecutor exec, final Map<String, SnapshotRegionManifest> regionManifests, final List<RegionInfo> regions) throws IOException {
if ((regions == null) || regions.isEmpty())
return;
ModifyRegionUtils.editRegions(exec, regions, new ModifyRegionUtils.RegionEditTask() {
@Override
public void editRegion(final RegionInfo hri) throws IOException {
restoreRegion(hri, regionManifests.get(hri.getEncodedName()));
}
});
} | 3.26 |
hbase_RestoreSnapshotHelper_hasRegionsToAdd_rdh | /**
* Returns true if there're new regions
*/
public boolean hasRegionsToAdd() {
return (this.regionsToAdd != null) && (this.regionsToAdd.size() > 0);
} | 3.26 |
hbase_IndexBlockEncoding_getId_rdh | /**
* Returns The id of a data block encoder.
*/
public short getId() {
return f0;
} | 3.26 |
hbase_IndexBlockEncoding_getNameInBytes_rdh | /**
* Returns name converted to bytes.
*/
public byte[] getNameInBytes() {
return Bytes.toBytes(toString());
} | 3.26 |
hbase_IndexBlockEncoding_getNameFromId_rdh | /**
* Find and return the name of data block encoder for the given id.
*
* @param encoderId
* id of data block encoder
* @return name, same as used in options in column family
*/
public static String getNameFromId(short encoderId) {
return getEncodingById(encoderId).toString();
} | 3.26 |
hbase_IndexBlockEncoding_writeIdInBytes_rdh | /**
* Writes id bytes to the given array starting from offset.
*
* @param dest
* output array
* @param offset
* starting offset of the output array
*/
public void writeIdInBytes(byte[] dest, int offset) throws IOException {
System.arraycopy(idInBytes, 0, dest, offset, ID_SIZE);
} | 3.26 |
hbase_FanOutOneBlockAsyncDFSOutputHelper_createOutput_rdh | /**
* Create a {@link FanOutOneBlockAsyncDFSOutput}. The method maybe blocked so do not call it
* inside an {@link EventLoop}.
*/
public static FanOutOneBlockAsyncDFSOutput createOutput(DistributedFileSystem dfs, Path f, boolean overwrite, boolean createParent, short replication, long blockSize, EventLoopGroup eventLoopGroup, Class<? extends Channel> channelClass, final StreamSlowMonitor monitor) throws IOException {
return new FileSystemLinkResolver<FanOutOneBlockAsyncDFSOutput>() {
@Override
public FanOutOneBlockAsyncDFSOutput doCall(Path p) throws IOException, UnresolvedLinkException {
return createOutput(dfs, p.toUri().getPath(), overwrite, createParent, replication, blockSize, eventLoopGroup, channelClass, monitor);
}
@Override
public FanOutOneBlockAsyncDFSOutput next(FileSystem fs, Path p) throws IOException {
throw new UnsupportedOperationException();
}
}.resolve(dfs, f);
} | 3.26 |
hbase_WALProcedureStore_getMaxLogId_rdh | /**
* Make sure that the file set are gotten by calling {@link #getLogFiles()}, where we will sort
* the file set by log id.
*
* @return Max-LogID of the specified log file set
*/
private static long getMaxLogId(FileStatus[] logFiles) {
if ((logFiles == null) || (logFiles.length == 0)) {
return 0L;
}
return getLogIdFromName(logFiles[logFiles.length -
1].getPath().getName());
} | 3.26 |
hbase_WALProcedureStore_removeInactiveLogs_rdh | // ==========================================================================
// Log Files cleaner helpers
// ==========================================================================
private void removeInactiveLogs() throws IOException {
// We keep track of which procedures are holding the oldest WAL in 'holdingCleanupTracker'.
// once there is nothing olding the oldest WAL we can remove it.
while ((logs.size() > 1) && holdingCleanupTracker.isEmpty()) {
LOG.info("Remove the oldest log {}", logs.getFirst());
removeLogFile(logs.getFirst(), walArchiveDir);
buildHoldingCleanupTracker();
}
// TODO: In case we are holding up a lot of logs for long time we should
// rewrite old procedures (in theory parent procs) to the new WAL.
} | 3.26 |
hbase_WALProcedureStore_initTrackerFromOldLogs_rdh | /**
* If last log's tracker is not null, use it as {@link #storeTracker}. Otherwise, set storeTracker
* as partial, and let {@link ProcedureWALFormatReader} rebuild it using entries in the log.
*/
private void initTrackerFromOldLogs() {
if (logs.isEmpty() || (!isRunning())) {
return;
}
ProcedureWALFile log = logs.getLast();
if (!log.getTracker().isPartial()) {
storeTracker.resetTo(log.getTracker());
} else {storeTracker.reset();
storeTracker.setPartialFlag(true);
}
} | 3.26 |
hbase_WALProcedureStore_removeAllLogs_rdh | /**
* Remove all logs with logId <= {@code lastLogId}.
*/
private void removeAllLogs(long lastLogId, String why) {
if (logs.size() <= 1) {
return;
}
LOG.info("Remove all state logs with ID less than {}, since {}",
lastLogId, why);
boolean removed = false;
while (logs.size() > 1) {
ProcedureWALFile log = logs.getFirst();
if (lastLogId < log.getLogId()) {
break;
}
removeLogFile(log, walArchiveDir);
removed = true;
}
if (removed) {
buildHoldingCleanupTracker();
}
} | 3.26 |
hbase_WALProcedureStore_initOldLog_rdh | /**
* Loads given log file and it's tracker.
*/
private ProcedureWALFile initOldLog(final FileStatus logFile, final Path walArchiveDir) throws IOException {
final ProcedureWALFile log = new ProcedureWALFile(fs, logFile);
if (logFile.getLen() == 0) {
LOG.warn("Remove uninitialized log: {}", logFile);
log.removeFile(walArchiveDir);
return null;
}
LOG.debug("Opening Pv2 {}",
logFile);
try {
log.open();
} catch (ProcedureWALFormat.InvalidWALDataException e) {
LOG.warn("Remove uninitialized log: {}", logFile, e);
log.removeFile(walArchiveDir);
return null;
} catch (IOException e) {
String msg = "Unable to read state log: " + logFile;
LOG.error(msg, e);
throw new IOException(msg, e);
}
try {
log.readTracker();
} catch (IOException e) {log.getTracker().reset();
log.getTracker().setPartialFlag(true);
LOG.warn("Unable to read tracker for {}", log, e);
}
log.close();
return log;
} | 3.26 |
hbase_WALProcedureStore_main_rdh | /**
* Parses a directory of WALs building up ProcedureState. For testing parse and profiling.
*
* @param args
* Include pointer to directory of WAL files for a store instance to parse & load.
*/
public static void main(String[] args) throws IOException {Configuration conf = HBaseConfiguration.create();
if ((args == null) || (args.length != 1)) {
System.out.println("ERROR: Empty arguments list; pass path to MASTERPROCWALS_DIR.");
System.out.println("Usage: WALProcedureStore MASTERPROCWALS_DIR");
System.exit(-1);
}
WALProcedureStore store = new WALProcedureStore(conf, new Path(args[0]), null, new LeaseRecovery() {
@Override
public void recoverFileLease(FileSystem fs, Path path) throws IOException {
// no-op
}
});
try {
store.start(16);
ProcedureExecutor<?> pe = /* Pass anything */
new ProcedureExecutor<>(conf, new Object(), store);
pe.init(1, true);
} finally {
store.stop(true);
}
} | 3.26 |
hbase_WALProcedureStore_initOldLogs_rdh | /**
* Make sure that the file set are gotten by calling {@link #getLogFiles()}, where we will sort
* the file set by log id.
*
* @return Max-LogID of the specified log file set
*/
private long initOldLogs(FileStatus[] logFiles) throws
IOException {
if ((logFiles ==
null) || (logFiles.length == 0)) {
return 0L;
}
long maxLogId = 0;
for (int i = 0; i < logFiles.length; ++i) {
final Path logPath
= logFiles[i].getPath();
leaseRecovery.recoverFileLease(fs, logPath);
if (!isRunning()) {
throw new IOException("wal aborting");
}
maxLogId = Math.max(maxLogId, getLogIdFromName(logPath.getName()));
ProcedureWALFile log = initOldLog(logFiles[i], this.walArchiveDir);
if (log != null) {
this.logs.add(log);
}
}
initTrackerFromOldLogs();
return maxLogId;
} | 3.26 |
hbase_TableMapReduceUtil_getJar_rdh | /**
* Invoke 'getJar' on a custom JarFinder implementation. Useful for some job configuration
* contexts (HBASE-8140) and also for testing on MRv2. check if we have HADOOP-9426.
*
* @param my_class
* the class to find.
* @return a jar file that contains the class, or null.
*/private static String getJar(Class<?> my_class) {
String ret = null;
try {ret = JarFinder.getJar(my_class);
} catch (Exception e) {
// toss all other exceptions, related to reflection failure
throw new RuntimeException("getJar invocation failed.", e);
}
return ret;
} | 3.26 |
hbase_TableMapReduceUtil_findContainingJar_rdh | /**
* Find a jar that contains a class of the same name, if any. It will return a jar file, even if
* that is not the first thing on the class path that has a class with the same name. Looks first
* on the classpath and then in the <code>packagedClasses</code> map.
*
* @param my_class
* the class to find.
* @return a jar file that contains the class, or null.
*/
private static String findContainingJar(Class<?> my_class, Map<String, String> packagedClasses) throws IOException {
ClassLoader loader = my_class.getClassLoader();
String class_file = my_class.getName().replaceAll("\\.", "/") + ".class";
if (loader != null) {
// first search the classpath
for (Enumeration<URL> itr = loader.getResources(class_file); itr.hasMoreElements();) {
URL v35 = itr.nextElement();
if ("jar".equals(v35.getProtocol())) {
String toReturn = v35.getPath();
if (toReturn.startsWith("file:")) {
toReturn = toReturn.substring("file:".length());
}
// URLDecoder is a misnamed class, since it actually decodes
// x-www-form-urlencoded MIME type rather than actual
// URL encoding (which the file path has). Therefore it would
// decode +s to ' 's which is incorrect (spaces are actually
// either unencoded or encoded as "%20"). Replace +s first, so
// that they are kept sacred during the decoding process.
toReturn = toReturn.replaceAll("\\+", "%2B");
toReturn = URLDecoder.decode(toReturn, "UTF-8");
return toReturn.replaceAll("!.*$", "");}}
}
// now look in any jars we've packaged using JarFinder. Returns null when
// no jar is found.
return packagedClasses.get(class_file);
} | 3.26 |
hbase_TableMapReduceUtil_addHBaseDependencyJars_rdh | /**
* Add HBase and its dependencies (only) to the job configuration.
* <p>
* This is intended as a low-level API, facilitating code reuse between this class and its mapred
* counterpart. It also of use to external tools that need to build a MapReduce job that interacts
* with HBase but want fine-grained control over the jars shipped to the cluster.
* </p>
*
* @param conf
* The Configuration object to extend with dependencies.
* @see org.apache.hadoop.hbase.mapred.TableMapReduceUtil
* @see <a href="https://issues.apache.org/jira/browse/PIG-3285">PIG-3285</a>
*/
public static void addHBaseDependencyJars(Configuration conf) throws IOException {
// explicitly pull a class from each module
// hbase-common
// hbase-protocol-shaded
// hbase-client
// hbase-server
// hbase-hadoop-compat
// hbase-hadoop2-compat
// hbase-mapreduce
// hbase-metrics
// hbase-metrics-api
// hbase-replication
// hbase-http
// hbase-procedure
// hbase-zookeeper
// hb-shaded-miscellaneous
// hbase-shaded-gson
// hb-sh-protobuf
// hbase-shaded-netty
// hbase-unsafe
// zookeeper
// metrics-core
// commons-lang
// opentelemetry-api
// opentelemetry-semconv
addDependencyJarsForClasses(conf, HConstants.class, ClientProtos.class, Put.class, RpcServer.class, CompatibilityFactory.class, JobUtil.class, TableMapper.class, FastLongHistogram.class, Snapshot.class, ReplicationUtils.class,
HttpServer.class, Procedure.class, ZKWatcher.class, Lists.class, GsonBuilder.class, UnsafeByteOperations.class, Channel.class, HBasePlatformDependent.class, ZooKeeper.class, MetricRegistry.class, ArrayUtils.class, Span.class, SemanticAttributes.class, Context.class);// opentelemetry-context
} | 3.26 |
hbase_TableMapReduceUtil_updateMap_rdh | /**
* Add entries to <code>packagedClasses</code> corresponding to class files contained in
* <code>jar</code>.
*
* @param jar
* The jar who's content to list.
* @param packagedClasses
* map[class -> jar]
*/
private static void updateMap(String jar, Map<String, String> packagedClasses) throws IOException {
if ((null == jar) || jar.isEmpty()) {
return;
}
ZipFile zip = null;
try {
zip = new
ZipFile(jar);for (Enumeration<? extends ZipEntry> iter = zip.entries(); iter.hasMoreElements();) {
ZipEntry entry = iter.nextElement();
if (entry.getName().endsWith("class")) {
packagedClasses.put(entry.getName(), jar);
}
}
} finally {
if (null != zip)
zip.close();
}} | 3.26 |
hbase_TableMapReduceUtil_setNumReduceTasks_rdh | /**
* Sets the number of reduce tasks for the given job configuration to the number of regions the
* given table has.
*
* @param table
* The table to get the region count for.
* @param job
* The current job to adjust.
* @throws IOException
* When retrieving the table details fails.
*/
public static void setNumReduceTasks(String table, Job job) throws IOException {
job.setNumReduceTasks(getRegionCount(job.getConfiguration(), TableName.valueOf(table)));
} | 3.26 |
hbase_TableMapReduceUtil_buildDependencyClasspath_rdh | /**
* Returns a classpath string built from the content of the "tmpjars" value in {@code conf}. Also
* exposed to shell scripts via `bin/hbase mapredcp`.
*/
public static String buildDependencyClasspath(Configuration conf) {
if (conf == null) {
throw new IllegalArgumentException("Must provide a configuration object.");
}
Set<String> paths = new HashSet<>(conf.getStringCollection("tmpjars"));
if (paths.isEmpty()) {
throw new IllegalArgumentException("Configuration contains no tmpjars.");
}
StringBuilder sb = new StringBuilder();
for (String
s : paths) {
// entries can take the form 'file:/path/to/file.jar'.
int idx = s.indexOf(":");
if (idx != (-1))
s =
s.substring(idx + 1);
if (sb.length() > 0)
sb.append(File.pathSeparator);
sb.append(s);
}
return sb.toString();
} | 3.26 |
hbase_TableMapReduceUtil_initMultiTableSnapshotMapperJob_rdh | /**
* Sets up the job for reading from one or more table snapshots, with one or more scans per
* snapshot. It bypasses hbase servers and read directly from snapshot files.
*
* @param snapshotScans
* map of snapshot name to scans on that snapshot.
* @param mapper
* The mapper class to use.
* @param outputKeyClass
* The class of the output key.
* @param outputValueClass
* The class of the output value.
* @param job
* The current job to adjust. Make sure the passed job is carrying all
* necessary HBase configuration.
* @param addDependencyJars
* upload HBase jars and jars for any of the configured job classes via
* the distributed cache (tmpjars).
*/
public static void initMultiTableSnapshotMapperJob(Map<String, Collection<Scan>> snapshotScans, Class<? extends TableMapper> mapper, Class<?> outputKeyClass, Class<?> outputValueClass, Job job, boolean addDependencyJars, Path tmpRestoreDir) throws
IOException {
MultiTableSnapshotInputFormat.setInput(job.getConfiguration(), snapshotScans, tmpRestoreDir);
job.setInputFormatClass(MultiTableSnapshotInputFormat.class);
if (outputValueClass != null) {
job.setMapOutputValueClass(outputValueClass);
}
if (outputKeyClass != null) {
job.setMapOutputKeyClass(outputKeyClass);
}
job.setMapperClass(mapper);
Configuration v1 = job.getConfiguration();
HBaseConfiguration.merge(v1, HBaseConfiguration.create(v1));
if (addDependencyJars) {
addDependencyJars(job);
addDependencyJarsForClasses(job.getConfiguration(), MetricRegistry.class);
}
resetCacheConfig(job.getConfiguration());
} | 3.26 |
hbase_TableMapReduceUtil_resetCacheConfig_rdh | /**
* Enable a basic on-heap cache for these jobs. Any BlockCache implementation based on direct
* memory will likely cause the map tasks to OOM when opening the region. This is done here
* instead of in TableSnapshotRegionRecordReader in case an advanced user wants to override this
* behavior in their job.
*/ public static void resetCacheConfig(Configuration conf) {
conf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, HConstants.HFILE_BLOCK_CACHE_SIZE_DEFAULT);
conf.setFloat(HConstants.BUCKET_CACHE_SIZE_KEY, 0.0F);
conf.unset(HConstants.BUCKET_CACHE_IOENGINE_KEY);
} | 3.26 |
hbase_TableMapReduceUtil_convertStringToScan_rdh | /**
* Converts the given Base64 string back into a Scan instance.
*
* @param base64
* The scan details.
* @return The newly created Scan instance.
* @throws IOException
* When reading the scan instance fails.
*/
public static Scan convertStringToScan(String base64) throws IOException {
byte[] decoded = Base64.getDecoder().decode(base64);
return ProtobufUtil.toScan(ClientProtos.Scan.parseFrom(decoded));
} | 3.26 |
hbase_TableMapReduceUtil_limitNumReduceTasks_rdh | /**
* Ensures that the given number of reduce tasks for the given job configuration does not exceed
* the number of regions for the given table.
*
* @param table
* The table to get the region count for.
* @param job
* The current job to adjust.
* @throws IOException
* When retrieving the table details fails.
*/
public static void limitNumReduceTasks(String table, Job job) throws IOException {
int regions = getRegionCount(job.getConfiguration(), TableName.valueOf(table));
if (job.getNumReduceTasks() > regions) {
job.setNumReduceTasks(regions);
}
} | 3.26 |
hbase_TableMapReduceUtil_initCredentialsForCluster_rdh | /**
* Obtain an authentication token, for the specified cluster, on behalf of the current user and
* add it to the credentials for the given map reduce job.
*
* @param job
* The job that requires the permission.
* @param conf
* The configuration to use in connecting to the peer cluster
* @throws IOException
* When the authentication token cannot be obtained.
*/ public static void initCredentialsForCluster(Job job, Configuration conf)
throws IOException {
UserProvider userProvider = UserProvider.instantiate(conf);
if (userProvider.isHBaseSecurityEnabled()) {
try {
Connection peerConn = ConnectionFactory.createConnection(conf);
try {
TokenUtil.addTokenForJob(peerConn, userProvider.getCurrent(), job);
} finally {
peerConn.close();
}
} catch (InterruptedException e) {
LOG.info("Interrupted obtaining user authentication token");Thread.interrupted();
}
}
} | 3.26 |
hbase_TableMapReduceUtil_addDependencyJarsForClasses_rdh | /**
* Add the jars containing the given classes to the job's configuration such that JobClient will
* ship them to the cluster and add them to the DistributedCache. N.B. that this method at most
* adds one jar per class given. If there is more than one jar available containing a class with
* the same name as a given class, we don't define which of those jars might be chosen.
*
* @param conf
* The Hadoop Configuration to modify
* @param classes
* will add just those dependencies needed to find the given classes
* @throws IOException
* if an underlying library call fails.
*/
@InterfaceAudience.Private
public static void addDependencyJarsForClasses(Configuration conf, Class<?>... classes) throws IOException {
FileSystem localFs = FileSystem.getLocal(conf);
Set<String> jars
= new HashSet<>();
// Add jars that are already in the tmpjars variable
jars.addAll(conf.getStringCollection("tmpjars"));
// add jars as we find them to a map of contents jar name so that we can avoid
// creating new jars for classes that have already been packaged.
Map<String, String> packagedClasses = new HashMap<>();
// Add jars containing the specified classes
for (Class<?> clazz : classes) {
if (clazz == null)
continue;
Path path = findOrCreateJar(clazz, localFs, packagedClasses);if
(path == null) {
LOG.warn(("Could not find jar for class " + clazz) + " in order to ship it to the cluster.");
continue;
}
if (!localFs.exists(path)) {
LOG.warn((("Could not validate jar file " + path) + " for class ") +
clazz);
continue; }
jars.add(path.toString());
}
if (jars.isEmpty())
return;
conf.set("tmpjars", StringUtils.arrayToString(jars.toArray(new String[jars.size()])));
} | 3.26 |
hbase_TableMapReduceUtil_initTableReducerJob_rdh | /**
* Use this before submitting a TableReduce job. It will appropriately set up the JobConf.
*
* @param table
* The output table.
* @param reducer
* The reducer class to use.
* @param job
* The current job to adjust. Make sure the passed job is carrying all
* necessary HBase configuration.
* @param partitioner
* Partitioner to use. Pass <code>null</code> to use default partitioner.
* @param quorumAddress
* Distant cluster to write to; default is null for output to the cluster
* that is designated in <code>hbase-site.xml</code>. Set this String to
* the zookeeper ensemble of an alternate remote cluster when you would
* have the reduce write a cluster that is other than the default; e.g.
* copying tables between clusters, the source would be designated by
* <code>hbase-site.xml</code> and this param would have the ensemble
* address of the remote cluster. The format to pass is particular. Pass
* <code> <hbase.zookeeper.quorum>:<
* hbase.zookeeper.client.port>:<zookeeper.znode.parent>
* </code> such as <code>server,server2,server3:2181:/hbase</code>.
* @param serverClass
* redefined hbase.regionserver.class
* @param serverImpl
* redefined hbase.regionserver.impl
* @param addDependencyJars
* upload HBase jars and jars for any of the configured job classes via
* the distributed cache (tmpjars).
* @throws IOException
* When determining the region count fails.
*/
public static void initTableReducerJob(String table, Class<? extends TableReducer> reducer, Job job, Class partitioner, String quorumAddress, String serverClass, String serverImpl, boolean addDependencyJars) throws IOException {
Configuration conf = job.getConfiguration();
HBaseConfiguration.merge(conf, HBaseConfiguration.create(conf));
job.setOutputFormatClass(TableOutputFormat.class);
if (reducer != null)
job.setReducerClass(reducer);
conf.set(TableOutputFormat.OUTPUT_TABLE, table);
conf.setStrings("io.serializations", conf.get("io.serializations"), MutationSerialization.class.getName(), ResultSerialization.class.getName());
// If passed a quorum/ensemble address, pass it on to TableOutputFormat.
if (quorumAddress != null) {
// Calling this will validate the format
ZKConfig.validateClusterKey(quorumAddress);
conf.set(TableOutputFormat.QUORUM_ADDRESS, quorumAddress);
}
if
((serverClass != null) && (serverImpl != null)) {
conf.set(TableOutputFormat.REGION_SERVER_CLASS, serverClass);
conf.set(TableOutputFormat.REGION_SERVER_IMPL, serverImpl);
}
job.setOutputKeyClass(ImmutableBytesWritable.class);
job.setOutputValueClass(Writable.class);
if (partitioner == HRegionPartitioner.class) {
job.setPartitionerClass(HRegionPartitioner.class);
int v17 = getRegionCount(conf, TableName.valueOf(table));
if (job.getNumReduceTasks() > v17) {
job.setNumReduceTasks(v17);
}
} else if (partitioner != null) {
job.setPartitionerClass(partitioner);
}
if (addDependencyJars) {
addDependencyJars(job);
}
initCredentials(job);
} | 3.26 |
hbase_TableMapReduceUtil_findOrCreateJar_rdh | /**
* Finds the Jar for a class or creates it if it doesn't exist. If the class is in a directory in
* the classpath, it creates a Jar on the fly with the contents of the directory and returns the
* path to that Jar. If a Jar is created, it is created in the system temporary directory.
* Otherwise, returns an existing jar that contains a class of the same name. Maintains a mapping
* from jar contents to the tmp jar created.
*
* @param my_class
* the class to find.
* @param fs
* the FileSystem with which to qualify the returned path.
* @param packagedClasses
* a map of class name to path.
* @return a jar file that contains the class.
*/
private static Path findOrCreateJar(Class<?> my_class, FileSystem fs, Map<String, String> packagedClasses) throws IOException {
// attempt to locate an existing jar for the class.
String jar = findContainingJar(my_class, packagedClasses);
if ((null == jar) || jar.isEmpty()) {
jar = getJar(my_class);updateMap(jar, packagedClasses);
}
if ((null ==
jar) || jar.isEmpty()) {
return null;
}
LOG.debug(String.format("For class %s, using jar %s", my_class.getName(), jar));
return new Path(jar).makeQualified(fs.getUri(), fs.getWorkingDirectory());
} | 3.26 |
hbase_TableMapReduceUtil_initTableSnapshotMapperJob_rdh | /**
* Sets up the job for reading from a table snapshot. It bypasses hbase servers and read directly
* from snapshot files.
*
* @param snapshotName
* The name of the snapshot (of a table) to read from.
* @param scan
* The scan instance with the columns, time range etc.
* @param mapper
* The mapper class to use.
* @param outputKeyClass
* The class of the output key.
* @param outputValueClass
* The class of the output value.
* @param job
* The current job to adjust. Make sure the passed job is carrying all
* necessary HBase configuration.
* @param addDependencyJars
* upload HBase jars and jars for any of the configured job classes via
* the distributed cache (tmpjars).
* @param tmpRestoreDir
* a temporary directory to copy the snapshot files into. Current user
* should have write permissions to this directory, and this should not
* be a subdirectory of rootdir. After the job is finished, restore
* directory can be deleted.
* @param splitAlgo
* algorithm to split
* @param numSplitsPerRegion
* how many input splits to generate per one region
* @throws IOException
* When setting up the details fails.
* @see TableSnapshotInputFormat
*/
public static void initTableSnapshotMapperJob(String snapshotName, Scan scan, Class<? extends TableMapper> mapper, Class<?> outputKeyClass, Class<?> outputValueClass, Job job, boolean addDependencyJars, Path tmpRestoreDir, RegionSplitter.SplitAlgorithm splitAlgo, int numSplitsPerRegion) throws IOException {
TableSnapshotInputFormat.setInput(job, snapshotName, tmpRestoreDir, splitAlgo, numSplitsPerRegion);
initTableMapperJob(snapshotName, scan, mapper, outputKeyClass, outputValueClass, job, addDependencyJars, false, TableSnapshotInputFormat.class);
resetCacheConfig(job.getConfiguration());
} | 3.26 |
hbase_TableMapReduceUtil_addDependencyJars_rdh | /**
* Add the HBase dependency jars as well as jars for any of the configured job classes to the job
* configuration, so that JobClient will ship them to the cluster and add them to the
* DistributedCache.
*/
public static void addDependencyJars(Job job) throws IOException {
addHBaseDependencyJars(job.getConfiguration());
try {
// when making changes here, consider also mapred.TableMapReduceUtil
// pull job classes
addDependencyJarsForClasses(job.getConfiguration(), job.getMapOutputKeyClass(), job.getMapOutputValueClass(), job.getInputFormatClass(), job.getOutputKeyClass(), job.getOutputValueClass(), job.getOutputFormatClass(),
job.getPartitionerClass(), job.getCombinerClass());
} catch (ClassNotFoundException e)
{
throw new IOException(e);
}
}
/**
* Add the jars containing the given classes to the job's configuration such that JobClient will
* ship them to the cluster and add them to the DistributedCache.
*
* @deprecated since 1.3.0 and will be removed in 3.0.0. Use {@link #addDependencyJars(Job)} | 3.26 |
hbase_TableMapReduceUtil_convertScanToString_rdh | /**
* Writes the given scan into a Base64 encoded string.
*
* @param scan
* The scan to write out.
* @return The scan saved in a Base64 encoded string.
* @throws IOException
* When writing the scan fails.
*/
public static String convertScanToString(Scan scan) throws IOException {
ClientProtos.Scan proto = ProtobufUtil.toScan(scan);
return Bytes.toString(Base64.getEncoder().encode(proto.toByteArray()));
} | 3.26 |
hbase_TableMapReduceUtil_setScannerCaching_rdh | /**
* Sets the number of rows to return and cache with each scanner iteration. Higher caching values
* will enable faster mapreduce jobs at the expense of requiring more heap to contain the cached
* rows.
*
* @param job
* The current job to adjust.
* @param batchSize
* The number of rows to return in batch with each scanner iteration.
*/
public static void setScannerCaching(Job job, int batchSize) {
job.getConfiguration().setInt("hbase.client.scanner.caching", batchSize);
} | 3.26 |
hbase_TableMapReduceUtil_initTableMapperJob_rdh | /**
* Use this before submitting a Multi TableMap job. It will appropriately set up the job.
*
* @param scans
* The list of {@link Scan} objects to read from.
* @param mapper
* The mapper class to use.
* @param outputKeyClass
* The class of the output key.
* @param outputValueClass
* The class of the output value.
* @param job
* The current job to adjust. Make sure the passed job is carrying all
* necessary HBase configuration.
* @param addDependencyJars
* upload HBase jars and jars for any of the configured job classes via
* the distributed cache (tmpjars).
* @param initCredentials
* whether to initialize hbase auth credentials for the job
* @throws IOException
* When setting up the details fails.
*/
public static void initTableMapperJob(List<Scan> scans, Class<? extends TableMapper> mapper, Class<?> outputKeyClass, Class<?>
outputValueClass, Job job, boolean addDependencyJars, boolean initCredentials) throws IOException {
job.setInputFormatClass(MultiTableInputFormat.class);
if (outputValueClass != null) {
job.setMapOutputValueClass(outputValueClass);
}
if (outputKeyClass != null) {
job.setMapOutputKeyClass(outputKeyClass);
}
job.setMapperClass(mapper);
Configuration conf = job.getConfiguration();HBaseConfiguration.merge(conf, HBaseConfiguration.create(conf));
List<String> scanStrings = new ArrayList<>();
for (Scan scan : scans) {
scanStrings.add(convertScanToString(scan));
}
job.getConfiguration().setStrings(MultiTableInputFormat.SCANS, scanStrings.toArray(new String[scanStrings.size()]));
if (addDependencyJars) {
addDependencyJars(job);
}
if (initCredentials) {
initCredentials(job);
}
} | 3.26 |
hbase_TaskGroup_addTask_rdh | /**
* Add a new task to the group, and before that might complete the last task in the group
*
* @param description
* the description of the new task
* @param withCompleteLast
* whether to complete the last task in the group
* @return the added new task
*/
public synchronized MonitoredTask addTask(String description, boolean withCompleteLast) {
if (withCompleteLast) {
MonitoredTask previousTask = this.tasks.peekLast();
if (((previousTask != null) && (previousTask.getState() != State.COMPLETE)) && (previousTask.getState() != State.ABORTED)) {
previousTask.markComplete("Completed");
}
}
MonitoredTask task = TaskMonitor.get().createStatus(description, ignoreSubTasksInTaskMonitor,
true);
this.setStatus(description);
this.tasks.addLast(task);
delegate.setStatus(description);
return
task;
} | 3.26 |
hbase_ProcedureMember_close_rdh | /**
* Best effort attempt to close the threadpool via Thread.interrupt.
*/
@Override
public void close() throws IOException {
// have to use shutdown now to break any latch waiting
f0.shutdownNow();
} | 3.26 |
hbase_ProcedureMember_receivedReachedGlobalBarrier_rdh | /**
* Notification that procedure coordinator has reached the global barrier
*
* @param procName
* name of the subprocedure that should start running the in-barrier phase
*/
public void
receivedReachedGlobalBarrier(String procName) {
Subprocedure subproc = subprocs.get(procName);
if (subproc == null) {
LOG.warn(("Unexpected reached globa barrier message for Sub-Procedure '" +
procName) + "'");
return;
}
if (LOG.isTraceEnabled()) {
LOG.trace(("reached global barrier message for Sub-Procedure '" + procName) + "'");
}
subproc.receiveReachedGlobalBarrier();
} | 3.26 |
hbase_ProcedureMember_defaultPool_rdh | /**
* Default thread pool for the procedure
*
* @param procThreads
* the maximum number of threads to allow in the pool
* @param keepAliveMillis
* the maximum time (ms) that excess idle threads will wait for new tasks
*/
public static ThreadPoolExecutor defaultPool(String memberName, int procThreads, long keepAliveMillis) {
return new ThreadPoolExecutor(1, procThreads, keepAliveMillis, TimeUnit.MILLISECONDS, new SynchronousQueue<>(), new ThreadFactoryBuilder().setNameFormat(("member: '" + memberName) + "' subprocedure-pool-%d").setDaemon(true).setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build());
} | 3.26 |
hbase_ProcedureMember_controllerConnectionFailure_rdh | /**
* The connection to the rest of the procedure group (member and coordinator) has been
* broken/lost/failed. This should fail any interested subprocedure, but not attempt to notify
* other members since we cannot reach them anymore.
*
* @param message
* description of the error
* @param cause
* the actual cause of the failure
* @param procName
* the name of the procedure we'd cancel due to the error.
*/
public void controllerConnectionFailure(final String message, final Throwable cause, final String procName) {
LOG.error(message, cause);
if (procName == null) {
return;
}
Subprocedure toNotify = subprocs.get(procName);
if
(toNotify != null) {
toNotify.cancel(message, cause);
}
} | 3.26 |
hbase_ProcedureMember_createSubprocedure_rdh | /**
* This is separated from execution so that we can detect and handle the case where the
* subprocedure is invalid and inactionable due to bad info (like DISABLED snapshot type being
* sent here)
*/
public Subprocedure createSubprocedure(String opName, byte[] data) {
return builder.buildSubprocedure(opName, data);
} | 3.26 |
hbase_ProcedureMember_getRpcs_rdh | /**
* Package exposed. Not for public use.
*
* @return reference to the Procedure member's rpcs object
*/
ProcedureMemberRpcs getRpcs() {
return rpcs;
} | 3.26 |
hbase_ProcedureMember_receiveAbortProcedure_rdh | /**
* Send abort to the specified procedure
*
* @param procName
* name of the procedure to about
* @param ee
* exception information about the abort
*/
public void receiveAbortProcedure(String procName, ForeignException ee) {
LOG.debug("Request received to abort procedure " + procName, ee);
// if we know about the procedure, notify it
Subprocedure sub = subprocs.get(procName);
if (sub == null) {
LOG.info(("Received abort on procedure with no local subprocedure " + procName) + ", ignoring it.", ee);
return;// Procedure has already completed
}
String msg = "Propagating foreign exception to subprocedure " + sub.getName();
LOG.error(msg, ee);
sub.cancel(msg, ee);} | 3.26 |
hbase_ProcedureMember_closeAndWait_rdh | /**
* Shutdown the threadpool, and wait for upto timeoutMs millis before bailing
*
* @param timeoutMs
* timeout limit in millis
* @return true if successfully, false if bailed due to timeout.
*/
boolean closeAndWait(long
timeoutMs) throws InterruptedException {
f0.shutdown();
return f0.awaitTermination(timeoutMs, TimeUnit.MILLISECONDS);
} | 3.26 |
hbase_SecurityUtil_getPrincipalWithoutRealm_rdh | /**
* Get the user name from a principal
*/
public static String getPrincipalWithoutRealm(final String principal) {
int i = principal.indexOf("@");
return i > (-1) ? principal.substring(0, i) : principal;
} | 3.26 |
hbase_Mutation_getCellBuilder_rdh | /**
* get a CellBuilder instance that already has relevant Type and Row set.
*
* @param cellBuilderType
* e.g CellBuilderType.SHALLOW_COPY
* @param cellType
* e.g Cell.Type.Put
* @return CellBuilder which already has relevant Type and Row set.
*/
protected CellBuilder getCellBuilder(CellBuilderType cellBuilderType, Cell.Type cellType) {
CellBuilder builder = CellBuilderFactory.create(cellBuilderType).setRow(row).setType(cellType);
return new CellBuilder() {
@Override
public CellBuilder m3(byte[] row) {
return this;
}
@Override
public CellBuilder setType(Cell.Type type) {
return this;
}
@Override
public CellBuilder setRow(byte[] row, int rOffset, int rLength) {
return this;
}
@Override
public CellBuilder setFamily(byte[] family) {
builder.setFamily(family);
return this;
}
@Override
public CellBuilder setFamily(byte[] family, int fOffset, int fLength) {
builder.setFamily(family, fOffset, fLength);
return this;
}
@Override
public CellBuilder setQualifier(byte[] qualifier) {
builder.setQualifier(qualifier);
return this;
}
@Override
public CellBuilder setQualifier(byte[] qualifier, int qOffset, int qLength) {
builder.setQualifier(qualifier, qOffset, qLength);
return this;
}
@Override
public CellBuilder setTimestamp(long timestamp) {
builder.setTimestamp(timestamp);
return this;
}
@Override
public CellBuilder setValue(byte[] value) {
builder.setValue(value);
return this;
}
@Override
public CellBuilder setValue(byte[] value, int vOffset, int vLength) {
builder.setValue(value, vOffset, vLength);
return this;
}
@Override
public Cell build() {
return builder.build();
}
@Override
public CellBuilder clear() {
builder.clear();
// reset the row and type
builder.setRow(row);
builder.setType(cellType);
return this;
}
};
} | 3.26 |
hbase_Mutation_setClusterIds_rdh | /**
* Marks that the clusters with the given clusterIds have consumed the mutation
*
* @param clusterIds
* of the clusters that have consumed the mutation
*/
public Mutation setClusterIds(List<UUID> clusterIds) {
ByteArrayDataOutput out = ByteStreams.newDataOutput();out.writeInt(clusterIds.size());
for
(UUID clusterId : clusterIds) {
out.writeLong(clusterId.getMostSignificantBits());
out.writeLong(clusterId.getLeastSignificantBits());
}
setAttribute(CONSUMED_CLUSTER_IDS, out.toByteArray());
return this;
} | 3.26 |
hbase_Mutation_getACL_rdh | /**
* Returns The serialized ACL for this operation, or null if none
*/
public byte[] getACL() {
return getAttribute(AccessControlConstants.OP_ATTRIBUTE_ACL);
} | 3.26 |
hbase_Mutation_getFingerprint_rdh | /**
* Compile the column family (i.e. schema) information into a Map. Useful for parsing and
* aggregation by debugging, logging, and administration tools.
*/
@Override
public Map<String, Object> getFingerprint() {
Map<String, Object> map = new HashMap<>();
List<String> families = new ArrayList<>(getFamilyCellMap().entrySet().size());
// ideally, we would also include table information, but that information
// is not stored in each Operation instance.
map.put("families", families);
for (Map.Entry<byte[], List<Cell>> entry : getFamilyCellMap().entrySet()) {
families.add(Bytes.toStringBinary(entry.getKey()));
}
return map;
} | 3.26 |
hbase_Mutation_size_rdh | /**
* Number of KeyValues carried by this Mutation.
*
* @return the total number of KeyValues
*/
public int size() {
int size = 0;
for (List<Cell> cells : getFamilyCellMap().values()) {
size += cells.size();
}
return size;
} | 3.26 |
hbase_Mutation_getCellVisibility_rdh | /**
* Returns CellVisibility associated with cells in this Mutation. n
*/
public CellVisibility getCellVisibility() throws DeserializationException
{
byte[] cellVisibilityBytes = this.getAttribute(VisibilityConstants.VISIBILITY_LABELS_ATTR_KEY);
if (cellVisibilityBytes
== null)
return null;
return toCellVisibility(cellVisibilityBytes);
} | 3.26 |
hbase_Mutation_getFamilyCellMap_rdh | /**
* Method for retrieving the put's familyMap
*/
public NavigableMap<byte[], List<Cell>> getFamilyCellMap() {
return this.familyMap;
} | 3.26 |
hbase_Mutation_checkRow_rdh | /**
*
* @param row
* Row to check
* @throws IllegalArgumentException
* Thrown if <code>row</code> is empty or null or >
* {@link HConstants#MAX_ROW_LENGTH}
* @return <code>row</code>
*/
static byte[] checkRow(final byte[] row, final int offset, final int length) {
if (row == null) {
throw new IllegalArgumentException("Row buffer is null");
}
if (length == 0) {
throw new IllegalArgumentException("Row length is 0");
}if (length > HConstants.MAX_ROW_LENGTH) {
throw new IllegalArgumentException((("Row length " + length) + " is > ") + HConstants.MAX_ROW_LENGTH);}
return row;
} | 3.26 |
hbase_Mutation_getDurability_rdh | /**
* Get the current durability
*/
public Durability getDurability() {
return this.durability;
} | 3.26 |
hbase_Mutation_toMap_rdh | /**
* Compile the details beyond the scope of getFingerprint (row, columns, timestamps, etc.) into a
* Map along with the fingerprinted information. Useful for debugging, logging, and administration
* tools.
*
* @param maxCols
* a limit on the number of columns output prior to truncation
*/
@Override
public Map<String, Object> toMap(int maxCols) {
// we start with the fingerprint map and build on top of it.
Map<String, Object> map = getFingerprint();
// replace the fingerprint's simple list of families with a
// map from column families to lists of qualifiers and kv details
Map<String, List<Map<String, Object>>> columns = new HashMap<>();
map.put("families", columns);
map.put("row", Bytes.toStringBinary(this.row));
int colCount = 0;
// iterate through all column families affected
for (Map.Entry<byte[], List<Cell>> entry : getFamilyCellMap().entrySet()) {
// map from this family to details for each cell affected within the family
List<Map<String, Object>> qualifierDetails = new ArrayList<>();
columns.put(Bytes.toStringBinary(entry.getKey()), qualifierDetails);
colCount += entry.getValue().size();
if (maxCols <= 0) {
continue;
}
// add details for each cell
for (Cell cell : entry.getValue()) {
if ((--maxCols) <= 0) {
continue;
}
Map<String, Object> cellMap = cellToStringMap(cell);
qualifierDetails.add(cellMap);
}
}
map.put("totalColumns", colCount);
// add the id if set
if (getId() != null) {
map.put("id", getId());
}
// Add the TTL if set
// Long.MAX_VALUE is the default, and is interpreted to mean this attribute
// has not been set.
if (getTTL() != Long.MAX_VALUE) {
map.put("ttl", getTTL());
}
map.put("ts", this.ts);
return map;} | 3.26 |
hbase_Mutation_m2_rdh | /**
* Private method to determine if this object's familyMap contains the given value assigned to the
* given family, qualifier and timestamp, respecting the 2 boolean arguments.
*/
protected boolean m2(byte[] family, byte[] qualifier, long ts, byte[] value, boolean ignoreTS, boolean ignoreValue) {
List<Cell> list = getCellList(family);
if (list.isEmpty()) {
return false;}
// Boolean analysis of ignoreTS/ignoreValue.
// T T => 2
// T F => 3 (first is always true)
// F T => 2
// F F => 1
if ((!ignoreTS) && (!ignoreValue)) {
for (Cell cell : list) {
if (((CellUtil.matchingFamily(cell, family) && CellUtil.matchingQualifier(cell, qualifier)) && CellUtil.matchingValue(cell, value)) && (cell.getTimestamp() == ts)) {
return true;
}
}
} else if (ignoreValue && (!ignoreTS)) {
for (Cell cell
: list) {
if ((CellUtil.matchingFamily(cell, family)
&& CellUtil.matchingQualifier(cell, qualifier)) && (cell.getTimestamp() == ts)) {
return true;
}
}
} else if ((!ignoreValue) && ignoreTS) {
for (Cell cell : list) {
if ((CellUtil.matchingFamily(cell, family) && CellUtil.matchingQualifier(cell, qualifier)) && CellUtil.matchingValue(cell, value)) {
return true;
}}
} else {
for (Cell cell : list) {
if (CellUtil.matchingFamily(cell, family) && CellUtil.matchingQualifier(cell, qualifier)) {
return true;
}
}
}
return false;
} | 3.26 |
hbase_Mutation_isEmpty_rdh | /**
* Method to check if the familyMap is empty
*
* @return true if empty, false otherwise
*/
public boolean isEmpty() {
return getFamilyCellMap().isEmpty();
} | 3.26 |
hbase_Mutation_getClusterIds_rdh | /**
* Returns the set of clusterIds that have consumed the mutation
*/
public List<UUID> getClusterIds() {
List<UUID> clusterIds = new ArrayList<>();
byte[] bytes = getAttribute(CONSUMED_CLUSTER_IDS);
if (bytes != null) {
ByteArrayDataInput in = ByteStreams.newDataInput(bytes);
int numClusters = in.readInt();
for (int i = 0; i < numClusters;
i++) {
clusterIds.add(new UUID(in.readLong(), in.readLong()));
}
}
return clusterIds;
} | 3.26 |
hbase_Mutation_setACL_rdh | /**
* Set the ACL for this operation.
*
* @param perms
* A map of permissions for a user or users
*/
public Mutation setACL(Map<String, Permission> perms) {
ListMultimap<String, Permission> permMap = ArrayListMultimap.create();
for (Map.Entry<String, Permission> entry : perms.entrySet()) {
permMap.put(entry.getKey(), entry.getValue());
}
setAttribute(AccessControlConstants.OP_ATTRIBUTE_ACL, AccessControlUtil.toUsersAndPermissions(permMap).toByteArray());
return this;
} | 3.26 |
hbase_Mutation_numFamilies_rdh | /**
* Returns the number of different families
*/
public int numFamilies() {
return getFamilyCellMap().size();
} | 3.26 |
hbase_Mutation_toCellVisibility_rdh | /**
* Convert a protocol buffer CellVisibility bytes to a client CellVisibility
*
* @return the converted client CellVisibility
*/
private static CellVisibility toCellVisibility(byte[] protoBytes) throws DeserializationException {
if (protoBytes == null)
return null;
ClientProtos.CellVisibility.Builder builder = ClientProtos.CellVisibility.newBuilder();
ClientProtos.CellVisibility proto = null;
try {
ProtobufUtil.mergeFrom(builder, protoBytes);
proto = builder.build();
} catch (IOException e) {
throw new DeserializationException(e);
}
return toCellVisibility(proto);
} | 3.26 |
hbase_Mutation_m0_rdh | /**
* Method for retrieving the timestamp.
*/
public long m0() {
return this.ts;
} | 3.26 |
hbase_Mutation_getRow_rdh | /**
* Method for retrieving the delete's row
*/@Overridepublic byte[] getRow() {
return this.row;
} | 3.26 |
hbase_Mutation_setReturnResults_rdh | // Used by Increment and Append only.
@InterfaceAudience.Private
protected Mutation setReturnResults(boolean returnResults) {
setAttribute(RETURN_RESULTS, Bytes.toBytes(returnResults));
return this;
} | 3.26 |
hbase_Mutation_createPutKeyValue_rdh | /**
* Create a KeyValue with this objects row key and the Put identifier.
*
* @return a KeyValue with this objects row key and the Put identifier.
*/
KeyValue createPutKeyValue(byte[] family, ByteBuffer qualifier, long
ts, ByteBuffer value, Tag[] tags)
{
return new KeyValue(this.row, 0, this.row == null ? 0 : this.row.length, family, 0, family == null ? 0 : family.length, qualifier, ts, Type.Put, value, tags != null ? Arrays.asList(tags) : null);
} | 3.26 |
hbase_Mutation_m1_rdh | /**
* Subclasses should override this method to add the heap size of their own fields.
*
* @return the heap size to add (will be aligned).
*/
protected long
m1() {
return 0L;
} | 3.26 |
hbase_Mutation_getCellList_rdh | /**
* Creates an empty list if one doesn't exist for the given column family or else it returns the
* associated list of Cell objects.
*
* @param family
* column family
* @return a list of Cell objects, returns an empty list if one doesn't exist.
*/
List<Cell> getCellList(byte[] family) {List<Cell> v0 = getFamilyCellMap().get(family);
if (v0 == null) {
v0 = new ArrayList<>();
getFamilyCellMap().put(family, v0);
}
return v0;
} | 3.26 |
hbase_Mutation_setTimestamp_rdh | /**
* Set the timestamp of the delete.
*/
public Mutation setTimestamp(long timestamp) {
if (timestamp < 0) {
throw new IllegalArgumentException("Timestamp cannot be negative. ts=" + timestamp);
}
this.ts = timestamp;
return this;} | 3.26 |
hbase_Mutation_setCellVisibility_rdh | /**
* Sets the visibility expression associated with cells in this Mutation.
*/
public Mutation setCellVisibility(CellVisibility expression) {this.setAttribute(VisibilityConstants.VISIBILITY_LABELS_ATTR_KEY, toCellVisibility(expression).toByteArray());
return this;
} | 3.26 |
hbase_Mutation_setTTL_rdh | /**
* Set the TTL desired for the result of the mutation, in milliseconds.
*
* @param ttl
* the TTL desired for the result of the mutation, in milliseconds
*/public Mutation
setTTL(long ttl) {
setAttribute(OP_ATTRIBUTE_TTL, Bytes.toBytes(ttl));
return this;
} | 3.26 |
hbase_Mutation_heapSize_rdh | /**
* Returns Calculate what Mutation adds to class heap size.
*/
@Override
public long heapSize() {
long heapsize = MUTATION_OVERHEAD;
// Adding row
heapsize += ClassSize.align(ClassSize.ARRAY + this.row.length);
// Adding map overhead
heapsize += ClassSize.align(getFamilyCellMap().size() * ClassSize.MAP_ENTRY);
for (Map.Entry<byte[], List<Cell>> entry : getFamilyCellMap().entrySet()) {
// Adding key overhead
heapsize += ClassSize.align(ClassSize.ARRAY + entry.getKey().length);
// This part is kinds tricky since the JVM can reuse references if you
// store the same value, but have a good match with SizeOf at the moment
// Adding value overhead
heapsize += ClassSize.align(ClassSize.ARRAYLIST);
int size = entry.getValue().size();
heapsize += ClassSize.align(ClassSize.ARRAY + (size * ClassSize.REFERENCE));
for (Cell cell : entry.getValue()) {
heapsize += cell.heapSize();
}
}
heapsize += getAttributeSize();
heapsize += m1();
return ClassSize.align(heapsize);
} | 3.26 |
hbase_Mutation_setDurability_rdh | /**
* Set the durability for this mutation
*/
public Mutation setDurability(Durability d) {
this.durability = d;
return this;
} | 3.26 |
hbase_LocalHBaseCluster_m0_rdh | /**
*
* @param c
* Configuration to check.
* @return True if a 'local' address in hbase.master value.
*/
public static boolean m0(final Configuration c) {
boolean mode = c.getBoolean(HConstants.CLUSTER_DISTRIBUTED, HConstants.DEFAULT_CLUSTER_DISTRIBUTED);
return mode == HConstants.CLUSTER_IS_LOCAL;
} | 3.26 |
hbase_LocalHBaseCluster_getRegionServer_rdh | /**
* Returns region server
*/
public HRegionServer getRegionServer(int serverNumber) {
return regionThreads.get(serverNumber).getRegionServer();
} | 3.26 |
hbase_LocalHBaseCluster_getActiveMaster_rdh | /**
* Gets the current active master, if available. If no active master, returns null.
*
* @return the HMaster for the active master
*/
public HMaster getActiveMaster() {
for (JVMClusterUtil.MasterThread mt : masterThreads) {
// Ensure that the current active master is not stopped.
// We don't want to return a stopping master as an active master.
if (mt.getMaster().isActiveMaster() && (!mt.getMaster().isStopped())) {
return mt.getMaster();
}
}
return null;
} | 3.26 |
hbase_LocalHBaseCluster_getMasters_rdh | /**
* Returns Read-only list of master threads.
*/
public List<JVMClusterUtil.MasterThread> getMasters() {
return Collections.unmodifiableList(this.masterThreads);
} | 3.26 |
hbase_LocalHBaseCluster_getMaster_rdh | /**
* Returns the HMaster thread
*/
public HMaster getMaster(int serverNumber) {
return masterThreads.get(serverNumber).getMaster();
} | 3.26 |
hbase_LocalHBaseCluster_join_rdh | /**
* Wait for Mini HBase Cluster to shut down. Presumes you've already called {@link #shutdown()}.
*/
public void join() {
if (this.regionThreads != null) {
for (Thread t : this.regionThreads) {
if (t.isAlive()) {
try {
Threads.threadDumpingIsAlive(t);
} catch (InterruptedException e) {LOG.debug("Interrupted", e);
}
}
}
}
if (this.masterThreads != null) {
for (Thread t :
this.masterThreads) {
if (t.isAlive()) {
try {
Threads.threadDumpingIsAlive(t);
} catch (InterruptedException e) {
LOG.debug("Interrupted", e);
}
}
}
}
} | 3.26 |
hbase_LocalHBaseCluster_startup_rdh | /**
* Start the cluster.
*/
public void startup() throws IOException {
JVMClusterUtil.startup(this.masterThreads, this.regionThreads);
} | 3.26 |
hbase_LocalHBaseCluster_getRegionServers_rdh | /**
* Returns Read-only list of region server threads.
*/
public List<JVMClusterUtil.RegionServerThread> getRegionServers() {
return Collections.unmodifiableList(this.regionThreads);} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.