name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hbase_SnapshotOfRegionAssignmentFromMeta_getTableToRegionMap_rdh | /**
* Get regions for tables
*
* @return a mapping from table to regions
*/public Map<TableName, List<RegionInfo>> getTableToRegionMap() {
return tableToRegionMap;
} | 3.26 |
hbase_SnapshotOfRegionAssignmentFromMeta_initialize_rdh | /**
* Initialize the region assignment snapshot by scanning the hbase:meta table
*/
public void initialize() throws IOException {LOG.info("Start to scan the hbase:meta for the current region assignment " + "snappshot");
// Scan hbase:meta to pick up user regions
try (Table v7 = connection.getTable(TableName.META_TABLE_NAME);ResultScanner scanner = v7.getScanner(HConstants.CATALOG_FAMILY)) {
for (; ;) {
Result result = scanner.next();
if (result == null) {
break;
}
try {
processMetaRecord(result);
} catch
(RuntimeException e) {
LOG.error((("Catch remote exception " + e.getMessage()) + " when processing") + result);
throw e;
}}}
LOG.info("Finished to scan the hbase:meta for the current region assignment" + "snapshot");
} | 3.26 |
hbase_SnapshotOfRegionAssignmentFromMeta_getRegionToRegionServerMap_rdh | /**
* Get region to region server map
*
* @return region to region server map
*/
public Map<RegionInfo, ServerName> getRegionToRegionServerMap() {
return regionToRegionServerMap;
} | 3.26 |
hbase_SnapshotOfRegionAssignmentFromMeta_getRegionNameToRegionInfoMap_rdh | /**
* Get the regioninfo for a region
*
* @return the regioninfo
*/
public Map<String, RegionInfo> getRegionNameToRegionInfoMap() {
return this.regionNameToRegionInfoMap;
} | 3.26 |
hbase_WALSplitUtil_moveAsideBadEditsFile_rdh | /**
* Move aside a bad edits file.
*
* @param fs
* the file system used to rename bad edits file.
* @param edits
* Edits file to move aside.
* @return The name of the moved aside file.
*/
public static Path moveAsideBadEditsFile(final FileSystem fs, final Path edits) throws IOException {
Path moveAsideName = new Path(edits.getParent(), (edits.getName() + ".") + EnvironmentEdgeManager.currentTime());
if (!fs.rename(edits, moveAsideName)) { LOG.warn("Rename failed from {} to {}", edits, moveAsideName);
}
return moveAsideName;
} | 3.26 |
hbase_WALSplitUtil_finishSplitLogFile_rdh | /**
* Completes the work done by splitLogFile by archiving logs
* <p>
* It is invoked by SplitLogManager once it knows that one of the SplitLogWorkers have completed
* the splitLogFile() part. If the master crashes then this function might get called multiple
* times.
* <p>
*/
public static void finishSplitLogFile(String logfile, Configuration conf) throws IOException {
Path v0 = CommonFSUtils.getWALRootDir(conf);
Path oldLogDir = new Path(v0, HConstants.HREGION_OLDLOGDIR_NAME);
Path walPath;
if (CommonFSUtils.isStartingWithPath(v0, logfile)) {
walPath = new Path(logfile);
} else {
walPath = new Path(v0, logfile);
}
FileSystem walFS = v0.getFileSystem(conf);
boolean corrupt
= ZKSplitLog.isCorrupted(v0, walPath.getName(), walFS);
archive(walPath, corrupt, oldLogDir, walFS, conf);
Path stagingDir = ZKSplitLog.getSplitLogDir(v0, walPath.getName());
walFS.delete(stagingDir, true);
} | 3.26 |
hbase_WALSplitUtil_getCompletedRecoveredEditsFilePath_rdh | /**
* Get the completed recovered edits file path, renaming it to be by last edit in the file from
* its first edit. Then we could use the name to skip recovered edits when doing
* HRegion#replayRecoveredEditsIfAny(Map, CancelableProgressable, MonitoredTask).
*
* @return dstPath take file's last edit log seq num as the name
*/
static Path getCompletedRecoveredEditsFilePath(Path srcPath, long maximumEditWALSeqNum) {
String fileName = formatRecoveredEditsFileName(maximumEditWALSeqNum);
return new Path(srcPath.getParent(), fileName);
} | 3.26 |
hbase_WALSplitUtil_archive_rdh | /**
* Moves processed logs to a oldLogDir after successful processing Moves corrupted logs (any log
* that couldn't be successfully parsed to corruptDir (.corrupt) for later investigation
*/
static void archive(final Path wal, final boolean corrupt, final Path oldWALDir, final FileSystem walFS, final Configuration conf) throws IOException {
Path dir;
Path target;
if (corrupt) {
dir = new Path(CommonFSUtils.getWALRootDir(conf), HConstants.CORRUPT_DIR_NAME);
if
(conf.get("hbase.regionserver.hlog.splitlog.corrupt.dir") != null) {
LOG.warn("hbase.regionserver.hlog.splitlog.corrupt.dir is deprecated. Default to {}", dir);
}
target = new Path(dir, wal.getName());
} else {
dir = oldWALDir;
target = AbstractFSWAL.getWALArchivePath(oldWALDir, wal);
}
mkdir(walFS, dir);
moveWAL(walFS, wal, target);
} | 3.26 |
hbase_WALSplitUtil_getMutationsFromWALEntry_rdh | /**
* This function is used to construct mutations from a WALEntry. It also reconstructs WALKey &
* WALEdit from the passed in WALEntry
*
* @param logEntry
* pair of WALKey and WALEdit instance stores WALKey and WALEdit instances
* extracted from the passed in WALEntry.
* @return list of Pair<MutationType, Mutation> to be replayed
* @deprecated Since 3.0.0, will be removed in 4.0.0.
*/
@Deprecated
public static List<MutationReplay> getMutationsFromWALEntry(AdminProtos.WALEntry entry, CellScanner cells, Pair<WALKey, WALEdit> logEntry, Durability durability) throws IOException {
if (entry == null) {
// return an empty array
return Collections.emptyList();
}
long replaySeqId = (entry.getKey().hasOrigSequenceNumber()) ? entry.getKey().getOrigSequenceNumber() : entry.getKey().getLogSequenceNumber();
int count = entry.getAssociatedCellCount();
List<MutationReplay> v46
= new ArrayList<>();
Cell previousCell = null;
Mutation m = null;
WALKeyImpl key = null;
WALEdit val = null;
if (logEntry !=
null) {
val = new WALEdit();
}
for (int i = 0; i < count; i++) {
// Throw index out of bounds if our cell count is off
if (!cells.advance()) {
throw new
ArrayIndexOutOfBoundsException((("Expected=" + count) + ", index=") + i);
}
Cell cell = cells.current();
if (val != null) {
val.add(cell);
}
boolean isNewRowOrType = ((previousCell == null) || (previousCell.getTypeByte() != cell.getTypeByte())) || (!CellUtil.matchingRows(previousCell, cell));
if (isNewRowOrType) {
// Create new mutation
if (CellUtil.isDelete(cell)) {
m = new Delete(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength());
// Deletes don't have nonces.
v46.add(new MutationReplay(MutationType.DELETE, m, HConstants.NO_NONCE, HConstants.NO_NONCE));
} else {
m = new Put(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength());
// Puts might come from increment or append, thus we need nonces.
long nonceGroup = (entry.getKey().hasNonceGroup()) ? entry.getKey().getNonceGroup() : HConstants.NO_NONCE;long nonce = (entry.getKey().hasNonce()) ? entry.getKey().getNonce() : HConstants.NO_NONCE;
v46.add(new MutationReplay(MutationType.PUT, m, nonceGroup, nonce));
}
}if (CellUtil.isDelete(cell)) {
((Delete) (m)).add(cell);
} else {
((Put) (m)).add(cell);
}
m.setDurability(durability);
previousCell = cell;
}
// reconstruct WALKey
if (logEntry != null) {
WALKey walKeyProto = entry.getKey();
List<UUID> clusterIds = new ArrayList<>(walKeyProto.getClusterIdsCount());
for (HBaseProtos.UUID uuid : entry.getKey().getClusterIdsList()) {
clusterIds.add(new UUID(uuid.getMostSigBits(), uuid.getLeastSigBits()));
} key =
new WALKeyImpl(walKeyProto.getEncodedRegionName().toByteArray(), TableName.valueOf(walKeyProto.getTableName().toByteArray()), replaySeqId, walKeyProto.getWriteTime(), clusterIds, walKeyProto.getNonceGroup(), walKeyProto.getNonce(), null);
logEntry.setFirst(key);
logEntry.setSecond(val);
}
return v46;
} | 3.26 |
hbase_WALSplitUtil_hasRecoveredEdits_rdh | /**
* Check whether there is recovered.edits in the region dir
*
* @param conf
* conf
* @param regionInfo
* the region to check
* @return true if recovered.edits exist in the region dir
*/
public static boolean hasRecoveredEdits(final Configuration conf, final RegionInfo regionInfo) throws IOException {
// No recovered.edits for non default replica regions
if (regionInfo.getReplicaId() != RegionInfo.DEFAULT_REPLICA_ID) {
return false;
}
// Only default replica region can reach here, so we can use regioninfo
// directly without converting it to default replica's regioninfo.
Path regionWALDir = CommonFSUtils.getWALRegionDir(conf, regionInfo.getTable(), regionInfo.getEncodedName());
Path regionDir = FSUtils.getRegionDirFromRootDir(CommonFSUtils.getRootDir(conf), regionInfo);
Path wrongRegionWALDir = CommonFSUtils.getWrongWALRegionDir(conf, regionInfo.getTable(), regionInfo.getEncodedName());
FileSystem walFs = CommonFSUtils.getWALFileSystem(conf);
FileSystem rootFs = CommonFSUtils.getRootDirFileSystem(conf);
NavigableSet<Path> files = getSplitEditFilesSorted(walFs, regionWALDir);
if (!files.isEmpty()) {
return true;
}
files = getSplitEditFilesSorted(rootFs, regionDir);
if (!files.isEmpty()) {
return true;
}
files = getSplitEditFilesSorted(walFs, wrongRegionWALDir);
return !files.isEmpty();
}
/**
* This method will check 3 places for finding the max sequence id file. One is the expected
* place, another is the old place under the region directory, and the last one is the wrong one
* we introduced in HBASE-20734. See HBASE-22617 for more details.
* <p/>
* Notice that, you should always call this method instead of
* {@link #getMaxRegionSequenceId(FileSystem, Path)} | 3.26 |
hbase_WALSplitUtil_moveWAL_rdh | /**
* Move WAL. Used to move processed WALs to archive or bad WALs to corrupt WAL dir. WAL may have
* already been moved; makes allowance.
*/
public static void moveWAL(FileSystem fs, Path p, Path targetDir) throws IOException {
if (fs.exists(p)) {
if (!CommonFSUtils.renameAndSetModifyTime(fs, p, targetDir)) {
LOG.warn("Failed move of {} to {}", p, targetDir);
} else {
LOG.info("Moved {} to {}", p, targetDir);}
}
} | 3.26 |
hbase_WALSplitUtil_writeRegionSequenceIdFile_rdh | /**
* Create a file with name as region's max sequence id
*/
public static void writeRegionSequenceIdFile(FileSystem walFS, Path regionDir, long newMaxSeqId) throws IOException {
FileStatus[] files = getSequenceIdFiles(walFS, regionDir);long maxSeqId = getMaxSequenceId(files);
if (maxSeqId > newMaxSeqId) {
throw new IOException((("The new max sequence id " + newMaxSeqId) + " is less than the old max sequence id ") + maxSeqId);
}
// write a new seqId file
Path newSeqIdFile = new Path(getRegionDirRecoveredEditsDir(regionDir), newMaxSeqId + SEQUENCE_ID_FILE_SUFFIX);
if (newMaxSeqId != maxSeqId) {
try {
if ((!walFS.createNewFile(newSeqIdFile)) && (!walFS.exists(newSeqIdFile))) {
throw new IOException("Failed to create SeqId file:" + newSeqIdFile);
}
LOG.debug("Wrote file={}, newMaxSeqId={}, maxSeqId={}", newSeqIdFile, newMaxSeqId, maxSeqId);
} catch (FileAlreadyExistsException ignored) {
// latest hdfs throws this exception. it's all right if newSeqIdFile already exists
}
}
// remove old ones
for (FileStatus status : files) {
if (!newSeqIdFile.equals(status.getPath())) {
walFS.delete(status.getPath(), false);
}
}
} | 3.26 |
hbase_WALSplitUtil_getMaxRegionSequenceId_rdh | /**
* Get the max sequence id which is stored in the region directory. -1 if none.
*/
public static long getMaxRegionSequenceId(FileSystem walFS, Path regionDir) throws IOException {
return getMaxSequenceId(getSequenceIdFiles(walFS, regionDir));
} | 3.26 |
hbase_WALSplitUtil_getSplitEditFilesSorted_rdh | /**
* Returns sorted set of edit files made by splitter, excluding files with '.temp' suffix.
*
* @param walFS
* WAL FileSystem used to retrieving split edits files.
* @param regionDir
* WAL region dir to look for recovered edits files under.
* @return Files in passed <code>regionDir</code> as a sorted set.
*/
public static NavigableSet<Path> getSplitEditFilesSorted(final FileSystem walFS, final Path regionDir) throws IOException {
NavigableSet<Path> filesSorted =
new TreeSet<>();
Path editsdir = getRegionDirRecoveredEditsDir(regionDir);
if (!walFS.exists(editsdir)) {
return filesSorted;
}
FileStatus[] files = CommonFSUtils.listStatus(walFS, editsdir, new PathFilter() {
@Override
public boolean accept(Path p) {boolean result =
false;
try {
// Return files and only files that match the editfile names pattern.
// There can be other files in this directory other than edit files.
// In particular, on error, we'll move aside the bad edit file giving
// it a timestamp suffix. See moveAsideBadEditsFile.
Matcher m = f0.matcher(p.getName());
result = walFS.isFile(p) && m.matches();
// Skip the file whose name ends with RECOVERED_LOG_TMPFILE_SUFFIX,
// because it means splitwal thread is writting this file.
if (p.getName().endsWith(RECOVERED_LOG_TMPFILE_SUFFIX)) {
result = false;
}
// Skip SeqId Files
if (isSequenceIdFile(p)) {
result = false;
}
} catch (IOException e) {
LOG.warn("Failed isFile check on {}", p, e);
}
return result;
}
});
if (ArrayUtils.isNotEmpty(files)) {
Arrays.asList(files).forEach(status -> filesSorted.add(status.getPath()));
}
return filesSorted;
} | 3.26 |
hbase_WALSplitUtil_tryCreateRecoveredHFilesDir_rdh | /**
* Return path to recovered.hfiles directory of the region's column family: e.g.
* /hbase/some_table/2323432434/cf/recovered.hfiles/. This method also ensures existence of
* recovered.hfiles directory under the region's column family, creating it if necessary.
*
* @param rootFS
* the root file system
* @param conf
* configuration
* @param tableName
* the table name
* @param encodedRegionName
* the encoded region name
* @param familyName
* the column family name
* @return Path to recovered.hfiles directory of the region's column family.
*/
static Path tryCreateRecoveredHFilesDir(FileSystem rootFS, Configuration conf, TableName tableName, String encodedRegionName, String familyName) throws IOException {
Path rootDir = CommonFSUtils.getRootDir(conf);
Path v60 = FSUtils.getRegionDirFromTableDir(CommonFSUtils.getTableDir(rootDir, tableName), encodedRegionName);
Path dir = getRecoveredHFilesDir(v60, familyName);
if ((!rootFS.exists(dir)) && (!rootFS.mkdirs(dir))) {
LOG.warn("mkdir failed on {}, region {}, column family {}", dir, encodedRegionName, familyName);
}
return dir;
} | 3.26 |
hbase_WALSplitUtil_isSequenceIdFile_rdh | /**
* Is the given file a region open sequence id file.
*/
public static boolean isSequenceIdFile(final Path file) {
return file.getName().endsWith(SEQUENCE_ID_FILE_SUFFIX) || file.getName().endsWith(OLD_SEQUENCE_ID_FILE_SUFFIX);
} | 3.26 |
hbase_WALSplitUtil_getRegionSplitEditsPath_rdh | /**
* Path to a file under RECOVERED_EDITS_DIR directory of the region found in <code>logEntry</code>
* named for the sequenceid in the passed <code>logEntry</code>: e.g.
* /hbase/some_table/2323432434/recovered.edits/2332. This method also ensures existence of
* RECOVERED_EDITS_DIR under the region creating it if necessary. And also set storage policy for
* RECOVERED_EDITS_DIR if WAL_STORAGE_POLICY is configured.
*
* @param tableName
* the table name
* @param encodedRegionName
* the encoded region name
* @param seqId
* the sequence id which used to generate file name
* @param fileNameBeingSplit
* the file being split currently. Used to generate tmp file name.
* @param tmpDirName
* of the directory used to sideline old recovered edits file
* @param conf
* configuration
* @return Path to file into which to dump split log edits.
*/
@SuppressWarnings("deprecation")
static Path getRegionSplitEditsPath(TableName tableName, byte[] encodedRegionName, long seqId, String fileNameBeingSplit, String tmpDirName, Configuration conf) throws IOException {
FileSystem walFS = CommonFSUtils.getWALFileSystem(conf);
Path tableDir = CommonFSUtils.getWALTableDir(conf, tableName);
String v10 = Bytes.toString(encodedRegionName);
Path regionDir = HRegion.getRegionDir(tableDir, v10);
Path dir = getRegionDirRecoveredEditsDir(regionDir);
if (walFS.exists(dir) && walFS.isFile(dir)) {
Path tmp = new Path(tmpDirName);
if (!walFS.exists(tmp)) {
walFS.mkdirs(tmp);
}
tmp = new Path(tmp, (HConstants.RECOVERED_EDITS_DIR + "_") + v10);
LOG.warn(("Found existing old file: {}. It could be some " + "leftover of an old installation. It should be a folder instead. ") + "So moving it to {}", dir, tmp);
if (!walFS.rename(dir, tmp)) {
LOG.warn("Failed to sideline old file {}", dir);
}
}if ((!walFS.exists(dir)) && (!walFS.mkdirs(dir))) {
LOG.warn("mkdir failed on {}", dir);
} else {
String storagePolicy = conf.get(HConstants.WAL_STORAGE_POLICY, HConstants.DEFAULT_WAL_STORAGE_POLICY);
CommonFSUtils.setStoragePolicy(walFS, dir, storagePolicy);
}
// Append fileBeingSplit to prevent name conflict since we may have duplicate wal entries now.
// Append file name ends with RECOVERED_LOG_TMPFILE_SUFFIX to ensure
// region's replayRecoveredEdits will not delete it
String fileName = formatRecoveredEditsFileName(seqId);
fileName = getTmpRecoveredEditsFileName((fileName + "-") + fileNameBeingSplit);
return new Path(dir, fileName);
} | 3.26 |
hbase_ColumnPrefixFilter_toByteArray_rdh | /**
* Returns The filter serialized using pb
*/
@Override
public byte[] toByteArray() {
FilterProtos.ColumnPrefixFilter.Builder builder = FilterProtos.ColumnPrefixFilter.newBuilder();
if (this.prefix != null)
builder.setPrefix(UnsafeByteOperations.unsafeWrap(this.prefix));
return builder.build().toByteArray();
} | 3.26 |
hbase_ColumnPrefixFilter_parseFrom_rdh | /**
* Parses a serialized representation of the {@link ColumnPrefixFilter}
*
* @param pbBytes
* A pb serialized {@link ColumnPrefixFilter} instance
* @return An instance of {@link ColumnPrefixFilter} made from <code>bytes</code>
* @throws DeserializationException
* if an error occurred
* @see #toByteArray
*/
public static ColumnPrefixFilter parseFrom(final byte[] pbBytes) throws DeserializationException {
FilterProtos.ColumnPrefixFilter proto;
try {
proto = FilterProtos.ColumnPrefixFilter.parseFrom(pbBytes);
} catch (InvalidProtocolBufferException e) {
throw new DeserializationException(e);
}
return new ColumnPrefixFilter(proto.getPrefix().toByteArray());
} | 3.26 |
hbase_ColumnPrefixFilter_areSerializedFieldsEqual_rdh | /**
* Returns true if and only if the fields of the filter that are serialized are equal to the
* corresponding fields in other. Used for testing.
*/
@Override
boolean areSerializedFieldsEqual(Filter o) {
if (o == this) {
return true;
}
if (!(o instanceof ColumnPrefixFilter)) {
return false;
}
ColumnPrefixFilter other = ((ColumnPrefixFilter) (o));
return Bytes.equals(this.getPrefix(), other.getPrefix());
} | 3.26 |
hbase_HFileContentValidator_validateHFileContent_rdh | /**
* Check HFile contents are readable by HBase 2.
*
* @param conf
* used configuration
* @return number of HFiles corrupted HBase
* @throws IOException
* if a remote or network exception occurs
*/
private boolean validateHFileContent(Configuration conf) throws IOException {
FileSystem fileSystem = CommonFSUtils.getCurrentFileSystem(conf);
ExecutorService threadPool = createThreadPool(conf);
HFileCorruptionChecker checker;
try {
checker = new HFileCorruptionChecker(conf, threadPool, false);
Path rootDir = CommonFSUtils.getRootDir(conf);
LOG.info("Validating HFile contents under {}", rootDir);Collection<Path> tableDirs = FSUtils.getTableDirs(fileSystem, rootDir);
checker.checkTables(tableDirs);
Path archiveRootDir = new Path(rootDir, HConstants.HFILE_ARCHIVE_DIRECTORY);
LOG.info("Validating HFile contents under {}", archiveRootDir);
List<Path> archiveTableDirs = FSUtils.getTableDirs(fileSystem, archiveRootDir);
checker.checkTables(archiveTableDirs);
} finally {
threadPool.shutdown();
try {
threadPool.awaitTermination(Long.MAX_VALUE, TimeUnit.SECONDS);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
}
int checkedFiles = checker.getHFilesChecked();
Collection<Path> corrupted = checker.getCorrupted();
if (corrupted.isEmpty()) {
LOG.info("Checked {} HFiles, none of them are corrupted.", checkedFiles);
LOG.info("There are no incompatible HFiles.");
return true;
} else {
LOG.info("Checked {} HFiles, {} are corrupted.",
checkedFiles, corrupted.size());
for (Path path : corrupted) {
LOG.info("Corrupted file: {}", path);
}
LOG.info("Change data block encodings before upgrading. " + "Check https://s.apache.org/prefixtree for instructions.");
return false;
}
} | 3.26 |
hbase_SecureBulkLoadManager_isFile_rdh | /**
* Check if the path is referencing a file. This is mainly needed to avoid symlinks.
*
* @return true if the p is a file
*/
private boolean isFile(Path p) throws IOException { FileStatus status = srcFs.getFileStatus(p);
boolean isFile = !status.isDirectory();
try {
isFile = isFile && (!((Boolean) (Methods.call(FileStatus.class, status, "isSymlink", null, null))));} catch (Exception e) {
}return isFile; } | 3.26 |
hbase_BinaryComponentComparator_areSerializedFieldsEqual_rdh | /**
* Returns true if and only if the fields of the comparator that are serialized are equal to the
* corresponding fields in other. Used for testing.
*/
@Override
boolean areSerializedFieldsEqual(ByteArrayComparable other) {
if (other == this) {
return true;
}
if (!(other instanceof BinaryComponentComparator)) {
return false;
}return super.areSerializedFieldsEqual(other);
} | 3.26 |
hbase_BinaryComponentComparator_m0_rdh | /**
* Parse a serialized representation of {@link BinaryComponentComparator}
*
* @param pbBytes
* A pb serialized {@link BinaryComponentComparator} instance
* @return An instance of {@link BinaryComponentComparator} made from <code>bytes</code>
* @throws DeserializationException
* if an error occurred
* @see #toByteArray
*/public static BinaryComponentComparator
m0(final byte[] pbBytes) throws DeserializationException {
ComparatorProtos.BinaryComponentComparator proto;
try {
proto = ComparatorProtos.BinaryComponentComparator.parseFrom(pbBytes);
} catch (InvalidProtocolBufferException e) {
throw new DeserializationException(e);}
return new BinaryComponentComparator(proto.getValue().toByteArray(), proto.getOffset());
} | 3.26 |
hbase_BinaryComponentComparator_toByteArray_rdh | /**
* Returns The comparator serialized using pb
*/
@Override
public byte[]
toByteArray() {
ComparatorProtos.BinaryComponentComparator.Builder builder = ComparatorProtos.BinaryComponentComparator.newBuilder();
builder.setValue(ByteString.copyFrom(this.value));
builder.setOffset(this.offset);
return builder.build().toByteArray();
} | 3.26 |
hbase_BlockType_isData_rdh | /**
* Returns whether this block type is encoded or unencoded data block
*/
public final boolean isData() {return (this == DATA) || (this == ENCODED_DATA);
} | 3.26 |
hbase_BlockType_getId_rdh | /**
* Use this instead of {@link #ordinal()}. They work exactly the same, except DATA and
* ENCODED_DATA get the same id using this method (overridden for {@link #ENCODED_DATA}).
*
* @return block type id from 0 to the number of block types - 1
*/
public int getId() {
// Default implementation, can be overridden for individual enum members.
return ordinal();
} | 3.26 |
hbase_BlockType_readAndCheck_rdh | /**
* Reads a magic record of the length {@link #MAGIC_LENGTH} from the given stream and expects it
* to match this block type.
*/
public void readAndCheck(DataInputStream in) throws IOException {
byte[] buf = new byte[MAGIC_LENGTH];
in.readFully(buf);
if (Bytes.compareTo(buf, f0) != 0) {
throw new IOException((("Invalid magic: expected " + Bytes.toStringBinary(f0)) + ", got ") + Bytes.toStringBinary(buf));
}
}
/**
* Reads a magic record of the length {@link #MAGIC_LENGTH} | 3.26 |
hbase_BlockType_isIndex_rdh | /**
* Returns whether this block category is index
*/
public final boolean isIndex() {
return this.getCategory() ==
BlockCategory.INDEX;
} | 3.26 |
hbase_BlockType_expectSpecific_rdh | /**
* Throws an exception if the block category passed is the special category meaning "all
* categories".
*/
public void expectSpecific() {if (this == ALL_CATEGORIES) {
throw new IllegalArgumentException(("Expected a specific block " + "category but got ") + this);
}
} | 3.26 |
hbase_BlockType_isBloom_rdh | /**
* Returns whether this block category is bloom filter
*/
public final boolean isBloom() {
return this.getCategory() == BlockCategory.BLOOM;
} | 3.26 |
hbase_BlockType_put_rdh | /**
* Put the magic record out to the specified byte array position.
*
* @param bytes
* the byte array
* @param offset
* position in the array
* @return incremented offset
*/
// System.arraycopy is static native. We can't do anything about this until minimum JDK is 9.
@SuppressWarnings("UnsafeFinalization")
public int put(byte[] bytes, int offset) {
System.arraycopy(f0, 0, bytes, offset, MAGIC_LENGTH);
return offset + MAGIC_LENGTH;
} | 3.26 |
hbase_ZKReplicationStorageBase_toByteArray_rdh | /**
* Serialized protobuf of <code>state</code> with pb magic prefix prepended suitable for use as
* content of a peer-state znode under a peer cluster id as in
* /hbase/replication/peers/PEER_ID/peer-state.
*/
protected static byte[] toByteArray(final ReplicationProtos.ReplicationState.State state) {
ReplicationProtos.ReplicationState msg = ReplicationProtos.ReplicationState.newBuilder().setState(state).build();
// There is no toByteArray on this pb Message?
// 32 bytes is default which seems fair enough here.
try (ByteArrayOutputStream baos = new ByteArrayOutputStream()) {
CodedOutputStream cos = CodedOutputStream.newInstance(baos, 16);
msg.writeTo(cos);
cos.flush();
baos.flush();
return ProtobufUtil.prependPBMagic(baos.toByteArray()); } catch (IOException e) {
throw new RuntimeException(e);
}
} | 3.26 |
hbase_ChunkCreator_getChunk_rdh | /**
* Poll a chunk from the pool, reset it if not null, else create a new chunk to return if we
* have not yet created max allowed chunks count. When we have already created max allowed
* chunks and no free chunks as of now, return null. It is the responsibility of the caller to
* make a chunk then. Note: Chunks returned by this pool must be put back to the pool after its
* use.
*
* @return a chunk
* @see #putbackChunks(Chunk)
*/
Chunk getChunk() {
Chunk chunk = reclaimedChunks.poll();
if (chunk != null) {
chunk.reset();
reusedChunkCount.increment();
} else {
// Make a chunk iff we have not yet created the maxCount chunks
while (true) {
long created = this.chunkCount.get();
if (created < this.maxCount) {
if (this.chunkCount.compareAndSet(created, created + 1))
{
chunk = createChunkForPool(chunkType, chunkSize);
break;
}
} else {break;
}
}
}
return chunk;
} | 3.26 |
hbase_ChunkCreator_createChunk_rdh | /**
* Creates the chunk either onheap or offheap
*
* @param pool
* indicates if the chunks have to be created which will be used by the Pool
* @param chunkType
* whether the requested chunk is data chunk or index chunk.
* @param size
* the size of the chunk to be allocated, in bytes
* @return the chunk
*/
private Chunk createChunk(boolean pool, ChunkType chunkType, int size) {
Chunk chunk = null;
int id = chunkID.getAndIncrement();
assert id > 0;
// do not create offheap chunk on demand
if (pool && this.f1) {
chunk = new OffheapChunk(size, id, chunkType, pool);
} else {chunk = new OnheapChunk(size, id, chunkType, pool);
}/**
* Here we always put the chunk into the {@link ChunkCreator#chunkIdMap} no matter whether the
* chunk is pooled or not. <br/>
* For {@link CompactingMemStore},because the chunk could only be acquired from
* {@link ChunkCreator} through {@link MemStoreLABImpl}, and
* {@link CompactingMemStore#indexType} could only be {@link IndexType.CHUNK_MAP} when using
* {@link MemStoreLABImpl}, so we must put chunk into this {@link ChunkCreator#chunkIdMap} to
* make sure the chunk could be got by chunkId.
* <p>
* For {@link DefaultMemStore},it is also reasonable to put the chunk in
* {@link ChunkCreator#chunkIdMap} because: <br/>
* 1.When the {@link MemStoreLAB} which created the chunk is not closed, this chunk is used by
* the {@link Segment} which references this {@link MemStoreLAB}, so this chunk certainly should
* not be GC-ed, putting the chunk in {@link ChunkCreator#chunkIdMap} does not prevent useless
* chunk to be GC-ed. <br/>
* 2.When the {@link MemStoreLAB} which created the chunk is closed, and if the chunk is not
* pooled, {@link ChunkCreator#removeChunk} is invoked to remove the chunk from this
* {@link ChunkCreator#chunkIdMap}, so there is no memory leak.
*/
this.chunkIdMap.put(chunk.getId(), chunk);
return chunk;
} | 3.26 |
hbase_ChunkCreator_putbackChunks_rdh | /**
* Add the chunks to the pool, when the pool achieves the max size, it will skip the remaining
* chunks
*/
private void putbackChunks(Chunk c) {
int v10 = this.maxCount - reclaimedChunks.size();
if ((c.isFromPool() && (c.size == chunkSize)) && (v10 > 0)) {
reclaimedChunks.add(c);
} else {
// remove the chunk (that is not going to pool)
// though it is initially from the pool or not
ChunkCreator.this.removeChunk(c.getId());
}
} | 3.26 |
hbase_ChunkCreator_numberOfMappedChunks_rdh | // the chunks in the chunkIdMap may already be released so we shouldn't relay
// on this counting for strong correctness. This method is used only in testing.
int numberOfMappedChunks() {
return this.chunkIdMap.size();
} | 3.26 |
hbase_ChunkCreator_getJumboChunk_rdh | /**
* Creates and inits a chunk of a special size, bigger than a regular chunk size. Such a chunk
* will never come from pool and will always be on demand allocated.
*
* @return the chunk that was initialized
* @param jumboSize
* the special size to be used
*/
Chunk getJumboChunk(int jumboSize) {
int allocSize = jumboSize + SIZEOF_CHUNK_HEADER;
if (allocSize <= this.getChunkSize(ChunkType.DATA_CHUNK)) {
f0.warn(((("Jumbo chunk size " + jumboSize) + " must be more than regular chunk size ") + this.getChunkSize(ChunkType.DATA_CHUNK)) + ". Converting to regular chunk.");
return getChunk();
}
// the new chunk is going to hold the jumbo cell data and needs to be referenced by
// a strong map.
return getChunk(ChunkType.JUMBO_CHUNK, allocSize);
} | 3.26 |
hbase_ChunkCreator_createChunkForPool_rdh | // Chunks from pool are created covered with strong references anyway.
private Chunk createChunkForPool(ChunkType chunkType, int
chunkSize) {
if ((chunkSize != dataChunksPool.getChunkSize()) && (chunkSize != indexChunksPool.getChunkSize())) {
return null;
}
return createChunk(true, chunkType, chunkSize);
} | 3.26 |
hbase_ChunkCreator_initialize_rdh | /**
* Initializes the instance of ChunkCreator
*
* @param chunkSize
* the chunkSize
* @param offheap
* indicates if the chunk is to be created offheap or not
* @param globalMemStoreSize
* the global memstore size
* @param poolSizePercentage
* pool size percentage
* @param initialCountPercentage
* the initial count of the chunk pool if any
* @param heapMemoryManager
* the heapmemory manager
* @return singleton MSLABChunkCreator
*/
@SuppressWarnings(value = { "LI_LAZY_INIT_STATIC", "MS_EXPOSE_REP" }, justification = "Method is called by single thread at the starting of RS")
public static ChunkCreator initialize(int chunkSize, boolean offheap, long globalMemStoreSize, float poolSizePercentage, float initialCountPercentage, HeapMemoryManager heapMemoryManager, float indexChunkSizePercent) {
if (f2 != null) {
return f2;
}
f2 = new ChunkCreator(chunkSize, offheap, globalMemStoreSize, poolSizePercentage, initialCountPercentage, heapMemoryManager,
indexChunkSizePercent);
return f2;
} | 3.26 |
hbase_WALObserver_postWALRoll_rdh | /**
* Called after rolling the current WAL
*
* @param oldPath
* the path of the wal that we replaced
* @param newPath
* the path of the wal we have created and now is the current
*/
default void postWALRoll(ObserverContext<? extends WALCoprocessorEnvironment> ctx, Path oldPath, Path newPath) throws IOException {
} | 3.26 |
hbase_WALObserver_preWALRoll_rdh | /**
* Called before rolling the current WAL
*
* @param oldPath
* the path of the current wal that we are replacing
* @param newPath
* the path of the wal we are going to create
*/
default void preWALRoll(ObserverContext<? extends WALCoprocessorEnvironment> ctx, Path oldPath, Path newPath) throws IOException {
} | 3.26 |
hbase_SubstringComparator_areSerializedFieldsEqual_rdh | /**
* Returns true if and only if the fields of the comparator that are serialized are equal to the
* corresponding fields in other. Used for testing.
*/
@Override
boolean areSerializedFieldsEqual(ByteArrayComparable other) {
if (other == this) {
return true;
}
if (!(other instanceof SubstringComparator)) {
return false;
}SubstringComparator
comparator = ((SubstringComparator) (other));
return super.areSerializedFieldsEqual(comparator) && this.substr.equals(comparator.substr);
} | 3.26 |
hbase_SubstringComparator_parseFrom_rdh | /**
* Parse a serialized representation of {@link SubstringComparator}
*
* @param pbBytes
* A pb serialized {@link SubstringComparator} instance
* @return An instance of {@link SubstringComparator} made from <code>bytes</code>
* @throws DeserializationException
* if an error occurred
* @see #toByteArray
*/
public static SubstringComparator parseFrom(final byte[] pbBytes) throws DeserializationException {
ComparatorProtos.SubstringComparator proto;
try {
proto = ComparatorProtos.SubstringComparator.parseFrom(pbBytes);
} catch (InvalidProtocolBufferException e) {
throw new DeserializationException(e);
}
return new SubstringComparator(proto.getSubstr());
} | 3.26 |
hbase_SubstringComparator_toByteArray_rdh | /**
* Returns The comparator serialized using pb
*/
@Override
public byte[] toByteArray() {
ComparatorProtos.SubstringComparator.Builder builder = ComparatorProtos.SubstringComparator.newBuilder();
builder.setSubstr(this.substr); return builder.build().toByteArray();
} | 3.26 |
hbase_TsvImporterMapper_setup_rdh | /**
* Handles initializing this class with objects specific to it (i.e., the parser). Common
* initialization that might be leveraged by a subsclass is done in <code>doSetup</code>. Hence a
* subclass may choose to override this method and call <code>doSetup</code> as well before
* handling it's own custom params.
*/
@Override
protected void setup(Context context) {
doSetup(context);
conf = context.getConfiguration();
parser = new ImportTsv.TsvParser(conf.get(ImportTsv.COLUMNS_CONF_KEY), f1);
if (parser.getRowKeyColumnIndex() == (-1)) {
throw new RuntimeException("No row key column specified");}
this.kvCreator = new CellCreator(conf);
tags = new ArrayList<>();
} | 3.26 |
hbase_TsvImporterMapper_doSetup_rdh | /**
* Handles common parameter initialization that a subclass might want to leverage.
*/
protected void doSetup(Context context) {
Configuration conf = context.getConfiguration();
// If a custom separator has been used,
// decode it back from Base64 encoding.
f1 = conf.get(ImportTsv.SEPARATOR_CONF_KEY);
if (f1 == null) {
f1 = ImportTsv.DEFAULT_SEPARATOR;
} else {
f1 = new String(Base64.getDecoder().decode(f1));}
// Should never get 0 as we are setting this to a valid value in job
// configuration.
f0 = conf.getLong(ImportTsv.TIMESTAMP_CONF_KEY, 0);
skipEmptyColumns = context.getConfiguration().getBoolean(ImportTsv.SKIP_EMPTY_COLUMNS, false);
skipBadLines = context.getConfiguration().getBoolean(ImportTsv.SKIP_LINES_CONF_KEY, true);
badLineCount = context.getCounter("ImportTsv", "Bad Lines");
logBadLines = context.getConfiguration().getBoolean(ImportTsv.LOG_BAD_LINES_CONF_KEY, false);
hfileOutPath = conf.get(ImportTsv.BULK_OUTPUT_CONF_KEY);
} | 3.26 |
hbase_TsvImporterMapper_map_rdh | /**
* Convert a line of TSV text into an HBase table row.
*/
@Override
public void map(LongWritable offset,
Text value, Context context) throws IOException {
byte[]
lineBytes = value.getBytes();
try {
ImportTsv.TsvParser.ParsedLine parsed = parser.parse(lineBytes, value.getLength());
ImmutableBytesWritable rowKey = new ImmutableBytesWritable(lineBytes, parsed.getRowKeyOffset(), parsed.getRowKeyLength());
// Retrieve timestamp if exists
f0 = parsed.getTimestamp(f0);
f2 = parsed.getCellVisibility();
ttl = parsed.getCellTTL();
// create tags for the parsed line
if (hfileOutPath != null) {
tags.clear();
if (f2 != null) {
tags.addAll(kvCreator.getVisibilityExpressionResolver().createVisibilityExpTags(f2));
}
// Add TTL directly to the KV so we can vary them when packing more than one KV
// into puts
if (ttl > 0) {
tags.add(new ArrayBackedTag(TagType.TTL_TAG_TYPE, Bytes.toBytes(ttl)));
}
}
Put put = new Put(rowKey.copyBytes());
for (int i = 0; i < parsed.getColumnCount(); i++) {
if ((((((i == parser.getRowKeyColumnIndex()) || (i == parser.getTimestampKeyColumnIndex())) || (i == parser.getAttributesKeyColumnIndex())) || (i == parser.getCellVisibilityColumnIndex())) || (i == parser.getCellTTLColumnIndex())) || (skipEmptyColumns && (parsed.getColumnLength(i) == 0))) {
continue;
}
populatePut(lineBytes, parsed, put, i);
}
context.write(rowKey, put);
} catch (ImportTsv | IllegalArgumentException | InvalidLabelException badLine) {
if (logBadLines) {
System.err.println(value);
}
System.err.println((("Bad line at offset: " + offset.get()) + ":\n") + badLine.getMessage());
if (skipBadLines) {
incrementBadLineCount(1);
return;
}
throw new IOException(badLine);
} catch (InterruptedException e) {
LOG.error("Interrupted while emitting put", e);
Thread.currentThread().interrupt();
}
} | 3.26 |
hbase_MetricsHeapMemoryManager_increaseAboveHeapOccupancyLowWatermarkCounter_rdh | /**
* Increase the counter for heap occupancy percent above low watermark
*/
public void increaseAboveHeapOccupancyLowWatermarkCounter() {
source.increaseAboveHeapOccupancyLowWatermarkCounter();
} | 3.26 |
hbase_MetricsHeapMemoryManager_setCurMemStoreSizeGauge_rdh | /**
* Set the current global memstore size used gauge
*
* @param memStoreSize
* the current memory usage in memstore, in bytes.
*/
public void setCurMemStoreSizeGauge(final long memStoreSize) {
source.setCurMemStoreSizeGauge(memStoreSize);
} | 3.26 |
hbase_MetricsHeapMemoryManager_increaseTunerDoNothingCounter_rdh | /**
* Increase the counter for tuner neither expanding memstore global size limit nor expanding
* blockcache max size.
*/
public void increaseTunerDoNothingCounter() {
source.increaseTunerDoNothingCounter();
} | 3.26 |
hbase_MetricsHeapMemoryManager_updateUnblockedFlushCount_rdh | /**
* Update/Set the unblocked flush count histogram/gauge
*
* @param unblockedFlushCount
* the number of unblocked memstore flush since last tuning.
*/
public void updateUnblockedFlushCount(final long unblockedFlushCount) {
source.updateUnblockedFlushCount(unblockedFlushCount);
} | 3.26 |
hbase_MetricsHeapMemoryManager_updateBlockedFlushCount_rdh | /**
* Update/Set the blocked flush count histogram/gauge
*
* @param blockedFlushCount
* the number of blocked memstore flush since last tuning.
*/
public void updateBlockedFlushCount(final long blockedFlushCount) {
source.updateBlockedFlushCount(blockedFlushCount);
} | 3.26 |
hbase_MetricsHeapMemoryManager_setCurMemStoreOffHeapSizeGauge_rdh | /**
* Set the current global memstore off-heap size gauge
*
* @param memStoreOffHeapSize
* the current memory off-heap size in memstore, in bytes.
*/
public void setCurMemStoreOffHeapSizeGauge(final long memStoreOffHeapSize) {
source.setCurMemStoreOffHeapSizeGauge(memStoreOffHeapSize);
} | 3.26 |
hbase_MetricsHeapMemoryManager_updateMemStoreDeltaSizeHistogram_rdh | /**
* Update the increase/decrease memstore size histogram
*
* @param memStoreDeltaSize
* the tuning result of memstore.
*/
public void updateMemStoreDeltaSizeHistogram(final int memStoreDeltaSize) {
source.updateMemStoreDeltaSizeHistogram(memStoreDeltaSize);
} | 3.26 |
hbase_MetricsHeapMemoryManager_updateBlockCacheDeltaSizeHistogram_rdh | /**
* Update the increase/decrease blockcache size histogram
*
* @param blockCacheDeltaSize
* the tuning result of blockcache.
*/
public void updateBlockCacheDeltaSizeHistogram(final int blockCacheDeltaSize) {
source.updateBlockCacheDeltaSizeHistogram(blockCacheDeltaSize); } | 3.26 |
hbase_MetricsHeapMemoryManager_setCurBlockCacheSizeGauge_rdh | /**
* Set the current blockcache size used gauge
*
* @param blockCacheSize
* the current memory usage in blockcache, in bytes.
*/
public void setCurBlockCacheSizeGauge(final long blockCacheSize) {
source.setCurBlockCacheSizeGauge(blockCacheSize);
} | 3.26 |
hbase_MetricsHeapMemoryManager_setCurMemStoreOnHeapSizeGauge_rdh | /**
* Set the current global memstore on-heap size gauge
*
* @param memStoreOnHeapSize
* the current memory on-heap size in memstore, in bytes.
*/
public void setCurMemStoreOnHeapSizeGauge(final long
memStoreOnHeapSize) {
source.setCurMemStoreOnHeapSizeGauge(memStoreOnHeapSize);
} | 3.26 |
hbase_CompactionTool_compactStoreFiles_rdh | /**
* Execute the actual compaction job. If the compact once flag is not specified, execute the
* compaction until no more compactions are needed. Uses the Configuration settings provided.
*/
private void compactStoreFiles(final Path tableDir, final TableDescriptor htd, final RegionInfo hri, final String familyName, final boolean compactOnce, final boolean major) throws IOException {
HStore store = getStore(conf, fs, tableDir, htd, hri, familyName);
LOG.info((((("Compact table=" + htd.getTableName()) + " region=") + hri.getRegionNameAsString()) + " family=") + familyName);
if (major) {
store.triggerMajorCompaction();
}do {
Optional<CompactionContext> compaction = store.requestCompaction(PRIORITY_USER, CompactionLifeCycleTracker.DUMMY, null);
if (!compaction.isPresent()) {
break;
}
List<HStoreFile> storeFiles = store.compact(compaction.get(), NoLimitThroughputController.INSTANCE, null);
if ((storeFiles != null) && (!storeFiles.isEmpty())) {
if (deleteCompacted) {
for (HStoreFile storeFile : storeFiles) {
fs.delete(storeFile.getPath(), false);
}
}
}
} while (store.needsCompaction() && (!compactOnce) );
// We need to close the store properly, to make sure it will archive compacted files
store.close();
} | 3.26 |
hbase_CompactionTool_getSplits_rdh | /**
* Returns a split for each store files directory using the block location of each file as
* locality reference.
*/
@Override
public List<InputSplit> getSplits(JobContext job) throws IOException {
List<InputSplit> splits = new ArrayList<>();
List<FileStatus> files = listStatus(job);
Text key = new Text();
for (FileStatus file : files) {
Path path = file.getPath();
FileSystem fs = path.getFileSystem(job.getConfiguration());
LineReader reader = new LineReader(fs.open(path));
long v27 = 0;
int n;
try {
while ((n = reader.readLine(key)) > 0) {
String[] hosts = getStoreDirHosts(fs, path);
splits.add(new FileSplit(path, v27, n, hosts));
v27 += n;
}
} finally {
reader.close();}
}
return splits;
} | 3.26 |
hbase_CompactionTool_compact_rdh | /**
* Execute the compaction on the specified path.
*
* @param path
* Directory path on which to run compaction.
* @param compactOnce
* Execute just a single step of compaction.
* @param major
* Request major compaction.
*/
public void compact(final Path path, final boolean compactOnce, final boolean major) throws IOException {
if (isFamilyDir(fs, path)) {
Path regionDir = path.getParent();
Path tableDir = regionDir.getParent();
TableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir);
RegionInfo hri = HRegionFileSystem.loadRegionInfoFileContent(fs, regionDir);
compactStoreFiles(tableDir, htd, hri, path.getName(), compactOnce, major);
} else if (isRegionDir(fs, path)) {
Path tableDir = path.getParent();
TableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir);
compactRegion(tableDir, htd, path, compactOnce, major);
} else if (isTableDir(fs, path)) {
compactTable(path, compactOnce, major);
} else {
throw new IOException("Specified path is not a table, region or family directory. path=" + path);
}
} | 3.26 |
hbase_CompactionTool_createInputFile_rdh | /**
* Create the input file for the given directories to compact. The file is a TextFile with each
* line corrisponding to a store files directory to compact.
*/
public static List<Path> createInputFile(final FileSystem fs, final FileSystem stagingFs, final Path path, final Set<Path> toCompactDirs) throws IOException {
// Extract the list of store dirs
List<Path> storeDirs = new LinkedList<>();
for (Path compactDir :
toCompactDirs) {
if (isFamilyDir(fs, compactDir)) {
storeDirs.add(compactDir);
} else if (isRegionDir(fs, compactDir)) {
storeDirs.addAll(FSUtils.getFamilyDirs(fs, compactDir));
} else if (isTableDir(fs, compactDir)) {
// Lookup regions
for (Path v37 : FSUtils.getRegionDirs(fs, compactDir)) {
storeDirs.addAll(FSUtils.getFamilyDirs(fs, v37));}
} else {
throw new IOException("Specified path is not a table, region or family directory. path=" + compactDir);
}
}
// Write Input File
FSDataOutputStream stream = stagingFs.create(path);
LOG.info(((("Create input file=" + path) + " with ") + storeDirs.size()) + " dirs to compact.");
try {
final byte[] newLine = Bytes.toBytes("\n");
for (Path storeDir : storeDirs) {
stream.write(Bytes.toBytes(storeDir.toString()));
stream.write(newLine);}
} finally {
stream.close();
}
return storeDirs;
} | 3.26 |
hbase_CompactionTool_getStoreDirHosts_rdh | /**
* return the top hosts of the store files, used by the Split
*/
private static String[] getStoreDirHosts(final FileSystem fs, final Path path) throws IOException {
FileStatus[] files = CommonFSUtils.listStatus(fs, path);
if (files
== null) {
return new String[]{ };
}
HDFSBlocksDistribution hdfsBlocksDistribution = new HDFSBlocksDistribution();
for (FileStatus hfileStatus : files) {
HDFSBlocksDistribution storeFileBlocksDistribution = FSUtils.computeHDFSBlocksDistribution(fs, hfileStatus, 0, hfileStatus.getLen());
hdfsBlocksDistribution.add(storeFileBlocksDistribution);
}
List<String> hosts = hdfsBlocksDistribution.getTopHosts();return hosts.toArray(new String[hosts.size()]);
} | 3.26 |
hbase_CompactionTool_doClient_rdh | /**
* Execute compaction, from this client, one path at the time.
*/
private int doClient(final FileSystem fs, final Set<Path> toCompactDirs, final boolean compactOnce, final
boolean
major) throws IOException {
CompactionWorker worker = new CompactionWorker(fs, getConf());
for (Path path : toCompactDirs) {
worker.compact(path, compactOnce, major);
}return 0;
} | 3.26 |
hbase_MasterFileSystem_createInitialFileSystemLayout_rdh | /**
* Create initial layout in filesystem.
* <ol>
* <li>Check if the meta region exists and is readable, if not create it. Create hbase.version and
* the hbase:meta directory if not one.</li>
* </ol>
* Idempotent.
*/
private void createInitialFileSystemLayout() throws IOException {
final String[] protectedSubDirs = new String[]{ HConstants.BASE_NAMESPACE_DIR, HConstants.HFILE_ARCHIVE_DIRECTORY, HConstants.HBCK_SIDELINEDIR_NAME, MobConstants.MOB_DIR_NAME };
// With the introduction of RegionProcedureStore,
// there's no need to create MasterProcWAL dir here anymore. See HBASE-23715
final String[] protectedSubLogDirs = new String[]{ HConstants.HREGION_LOGDIR_NAME, HConstants.HREGION_OLDLOGDIR_NAME,
HConstants.CORRUPT_DIR_NAME, ReplicationUtils.REMOTE_WAL_DIR_NAME };
// check if the root directory exists
checkRootDir(this.rootdir, conf, this.fs);
// Check the directories under rootdir.
checkTempDir(this.tempdir, conf, this.fs);
for (String subDir : protectedSubDirs)
{
checkSubDir(new Path(this.rootdir, subDir), HBASE_DIR_PERMS);
}
final String perms;
if (!this.walRootDir.equals(this.rootdir)) {
perms =
HBASE_WAL_DIR_PERMS;
} else {
perms = HBASE_DIR_PERMS;
}
for (String v4 : protectedSubLogDirs) {
checkSubDir(new Path(this.walRootDir, v4), perms);
}
checkStagingDir();
// Handle the last few special files and set the final rootDir permissions
// rootDir needs 'x' for all to support bulk load staging dir
if (isSecurityEnabled) {
fs.setPermission(new Path(rootdir, HConstants.VERSION_FILE_NAME), secureRootFilePerms);
fs.setPermission(new Path(rootdir, HConstants.CLUSTER_ID_FILE_NAME), secureRootFilePerms);
}
FsPermission currentRootPerms = fs.getFileStatus(this.rootdir).getPermission();
if (((!currentRootPerms.getUserAction().implies(FsAction.EXECUTE)) || (!currentRootPerms.getGroupAction().implies(FsAction.EXECUTE))) || (!currentRootPerms.getOtherAction().implies(FsAction.EXECUTE))) {
LOG.warn("rootdir permissions do not contain 'excute' for user, group or other. " + "Automatically adding 'excute' permission for all");
fs.setPermission(this.rootdir, new FsPermission(currentRootPerms.getUserAction().or(FsAction.EXECUTE), currentRootPerms.getGroupAction().or(FsAction.EXECUTE), currentRootPerms.getOtherAction().or(FsAction.EXECUTE)));
}
} | 3.26 |
hbase_MasterFileSystem_getClusterId_rdh | /**
* Returns The unique identifier generated for this cluster
*/
public ClusterId getClusterId() {
return clusterId;
} | 3.26 |
hbase_MasterFileSystem_getWALRootDir_rdh | /**
* Returns HBase root log dir.
*/
public Path getWALRootDir() {
return this.walRootDir;
} | 3.26 |
hbase_MasterFileSystem_getRootDir_rdh | /**
* Returns HBase root dir.
*/
public Path getRootDir() {
return this.rootdir;
} | 3.26 |
hbase_MasterFileSystem_getRegionDir_rdh | /**
* Returns the directory for a give {@code region}.
*/
public Path getRegionDir(RegionInfo region) {
return FSUtils.getRegionDirFromRootDir(getRootDir(), region);
} | 3.26 |
hbase_MasterFileSystem_getTempDir_rdh | /**
* Returns HBase temp dir.
*/public Path getTempDir() {
return this.tempdir;
} | 3.26 |
hbase_MasterFileSystem_checkStagingDir_rdh | /**
* Check permissions for bulk load staging directory. This directory has special hidden
* permissions. Create it if necessary.
*/
private void checkStagingDir() throws IOException {
Path
p = new Path(this.rootdir, HConstants.BULKLOAD_STAGING_DIR_NAME);
try {
if (!this.fs.exists(p)) {
if (!this.fs.mkdirs(p, HiddenDirPerms)) {
throw new IOException("Failed to create staging directory " + p.toString());
}
}
this.fs.setPermission(p, HiddenDirPerms);
} catch (IOException e) {
LOG.error("Failed to create or set permission on staging directory " + p.toString());throw new IOException("Failed to create or set permission on staging directory " + p.toString(), e);
}} | 3.26 |
hbase_MasterFileSystem_checkSubDir_rdh | /**
* Make sure the directories under rootDir have good permissions. Create if necessary.
*/
private void checkSubDir(final Path p, final String dirPermsConfName) throws IOException {
FileSystem fs = p.getFileSystem(conf);
FsPermission dirPerms = new FsPermission(conf.get(dirPermsConfName, "700"));
if (!fs.exists(p)) {
if (isSecurityEnabled) {
if (!fs.mkdirs(p, secureRootSubDirPerms)) {
throw new IOException(("HBase directory '" + p) + "' creation failure.");
}
} else if (!fs.mkdirs(p))
{
throw new IOException(("HBase directory '" + p)
+ "' creation failure.");
}
}
if
(isSecurityEnabled && (!dirPerms.equals(fs.getFileStatus(p).getPermission()))) {
// check whether the permission match
LOG.warn(((((((((("Found HBase directory permissions NOT matching expected permissions for " + p.toString()) + " permissions=") + fs.getFileStatus(p).getPermission()) + ", expecting ") + dirPerms) + ". Automatically setting the permissions. ") + "You can change the permissions by setting \"") + dirPermsConfName) + "\" in hbase-site.xml ") + "and restarting the master");fs.setPermission(p, dirPerms);
}
} | 3.26 |
hbase_MasterFileSystem_checkTempDir_rdh | /**
* Make sure the hbase temp directory exists and is empty. NOTE that this method is only executed
* once just after the master becomes the active one.
*/
void checkTempDir(final Path tmpdir, final Configuration c, final FileSystem fs) throws IOException {
// If the temp directory exists, clear the content (left over, from the previous run)
if (fs.exists(tmpdir)) {
// Archive table in temp, maybe left over from failed deletion,
// if not the cleaner will take care of them.
for (Path tableDir : FSUtils.getTableDirs(fs, tmpdir)) {
HFileArchiver.archiveRegions(c, fs, this.rootdir, tableDir, FSUtils.getRegionDirs(fs, tableDir));
if (!FSUtils.getRegionDirs(fs, tableDir).isEmpty()) {
LOG.warn("Found regions in tmp dir after archiving table regions, {}", tableDir);
}
}
// if acl sync to hdfs is enabled, then skip delete tmp dir because ACLs are set
if ((!SnapshotScannerHDFSAclHelper.isAclSyncToHdfsEnabled(c)) &&
(!fs.delete(tmpdir, true))) {
throw new IOException("Unable to clean the temp directory: " + tmpdir);
}
}
// Create the temp directory
if (!fs.exists(tmpdir)) {
if (isSecurityEnabled) {
if (!fs.mkdirs(tmpdir, secureRootSubDirPerms)) {
throw new IOException(("HBase temp directory '" + tmpdir) + "' creation failure.");
}
} else if (!fs.mkdirs(tmpdir)) {
throw new IOException(("HBase temp directory '" + tmpdir) + "' creation failure.");
}
}
} | 3.26 |
hbase_NullComparator_parseFrom_rdh | /**
* Parse the serialized representation of {@link NullComparator}
*
* @param pbBytes
* A pb serialized {@link NullComparator} instance
* @return An instance of {@link NullComparator} made from <code>bytes</code>
* @throws DeserializationException
* if an error occurred
* @see #toByteArray
*/
public static NullComparator parseFrom(final byte[] pbBytes) throws DeserializationException {
try {
// Just parse. Don't use what we parse since on end we are returning new NullComparator.
ComparatorProtos.NullComparator.parseFrom(pbBytes);
} catch (InvalidProtocolBufferException e) {
throw new DeserializationException(e);
}
return new NullComparator();
} | 3.26 |
hbase_NullComparator_toByteArray_rdh | /**
* Returns The comparator serialized using pb
*/
@Override
public byte[] toByteArray() {ComparatorProtos.NullComparator.Builder builder = ComparatorProtos.NullComparator.newBuilder();
return builder.build().toByteArray();
} | 3.26 |
hbase_NullComparator_areSerializedFieldsEqual_rdh | /**
* Returns true if and only if the fields of the comparator that are serialized are equal to the
* corresponding fields in other. Used for testing.
*/
@Override
boolean areSerializedFieldsEqual(ByteArrayComparable other) {
if (other == this) {
return true;
}
if (!(other instanceof NullComparator)) {
return false;
}
return super.areSerializedFieldsEqual(other);
} | 3.26 |
hbase_RegionServerRpcQuotaManager_checkQuota_rdh | /**
* Check the quota for the current (rpc-context) user. Returns the OperationQuota used to get the
* available quota and to report the data/usage of the operation.
*
* @param region
* the region where the operation will be performed
* @param numWrites
* number of writes to perform
* @param numReads
* number of short-reads to perform
* @param numScans
* number of scan to perform
* @return the OperationQuota
* @throws RpcThrottlingException
* if the operation cannot be executed due to quota exceeded.
*/
private OperationQuota checkQuota(final Region region, final int numWrites, final int numReads, final int numScans) throws IOException, RpcThrottlingException {
Optional<User> user = RpcServer.getRequestUser();
UserGroupInformation ugi;
if (user.isPresent()) {
ugi = user.get().getUGI();
} else {
ugi = User.getCurrent().getUGI();
}
TableName table = region.getTableDescriptor().getTableName();
OperationQuota quota = getQuota(ugi, table);
try {
quota.checkQuota(numWrites, numReads, numScans);
} catch (RpcThrottlingException e) {
LOG.debug((((((((((("Throttling exception for user=" + ugi.getUserName()) + " table=") + table) + " numWrites=") + numWrites) + " numReads=") + numReads) + " numScans=") + numScans) + ": ") + e.getMessage());
throw e;
}
return quota;
} | 3.26 |
hbase_RegionServerRpcQuotaManager_getQuota_rdh | /**
* Returns the quota for an operation.
*
* @param ugi
* the user that is executing the operation
* @param table
* the table where the operation will be executed
* @return the OperationQuota
*/
public OperationQuota getQuota(final UserGroupInformation ugi, final TableName table) {
if ((isQuotaEnabled()
&& (!table.isSystemTable())) && isRpcThrottleEnabled()) {
UserQuotaState v1 = quotaCache.getUserQuotaState(ugi);
QuotaLimiter userLimiter = v1.getTableLimiter(table);boolean useNoop = userLimiter.isBypass();
if (v1.hasBypassGlobals()) {
if (LOG.isTraceEnabled()) {
LOG.trace((((("get quota for ugi=" + ugi) + " table=") + table) + " userLimiter=") + userLimiter);
}
if (!useNoop) {
return new DefaultOperationQuota(this.rsServices.getConfiguration(), userLimiter);
}
} else
{
QuotaLimiter nsLimiter = quotaCache.getNamespaceLimiter(table.getNamespaceAsString());
QuotaLimiter tableLimiter = quotaCache.getTableLimiter(table);
QuotaLimiter rsLimiter = quotaCache.getRegionServerQuotaLimiter(QuotaTableUtil.QUOTA_REGION_SERVER_ROW_KEY);
useNoop &= (tableLimiter.isBypass() && nsLimiter.isBypass()) && rsLimiter.isBypass();
boolean exceedThrottleQuotaEnabled = quotaCache.isExceedThrottleQuotaEnabled();
if (LOG.isTraceEnabled()) {
LOG.trace((((((((((((("get quota for ugi=" + ugi) + " table=") + table) + " userLimiter=") + userLimiter) +
" tableLimiter=") + tableLimiter) + " nsLimiter=") + nsLimiter) + " rsLimiter=") + rsLimiter) + " exceedThrottleQuotaEnabled=") + exceedThrottleQuotaEnabled);
}
if (!useNoop) {
if (exceedThrottleQuotaEnabled) {
return new ExceedOperationQuota(this.rsServices.getConfiguration(), rsLimiter, userLimiter, tableLimiter, nsLimiter);
} else {
return new
DefaultOperationQuota(this.rsServices.getConfiguration(), userLimiter, tableLimiter, nsLimiter, rsLimiter);
}
}
}
}
return NoopOperationQuota.get();
} | 3.26 |
hbase_RESTServer_loginServerPrincipal_rdh | // login the server principal (if using secure Hadoop)
private static Pair<FilterHolder,
Class<? extends ServletContainer>> loginServerPrincipal(UserProvider userProvider, Configuration conf) throws Exception {
Class<? extends ServletContainer> v5 = ServletContainer.class;
if (userProvider.isHadoopSecurityEnabled() && userProvider.isHBaseSecurityEnabled()) {
String machineName = Strings.domainNamePointerToHostName(DNS.getDefaultHost(conf.get(REST_DNS_INTERFACE, "default"), conf.get(REST_DNS_NAMESERVER, "default")));
String keytabFilename = conf.get(REST_KEYTAB_FILE);
Preconditions.checkArgument((keytabFilename != null) && (!keytabFilename.isEmpty()), REST_KEYTAB_FILE + " should be set if security is enabled");
String principalConfig = conf.get(REST_KERBEROS_PRINCIPAL);
Preconditions.checkArgument((principalConfig != null) && (!principalConfig.isEmpty()), REST_KERBEROS_PRINCIPAL + " should be set if security is enabled");
// Hook for unit tests, this will log out any other user and mess up tests.
if (!conf.getBoolean(SKIP_LOGIN_KEY, false)) {userProvider.login(REST_KEYTAB_FILE, REST_KERBEROS_PRINCIPAL, machineName);
}
if (conf.get(REST_AUTHENTICATION_TYPE) != null) {
v5 = RESTServletContainer.class;
FilterHolder authFilter = new FilterHolder();
authFilter.setClassName(AuthFilter.class.getName());
authFilter.setName("AuthenticationFilter");
return new Pair<>(authFilter, v5);
}
}
return new Pair<>(null, v5);
} | 3.26 |
hbase_RESTServer_main_rdh | /**
* The main method for the HBase rest server.
*
* @param args
* command-line arguments
* @throws Exception
* exception
*/
public static void main(String[] args) throws Exception {
LOG.info(("***** STARTING service '" + RESTServer.class.getSimpleName()) + "' *****");
VersionInfo.logVersion();
final Configuration conf = HBaseConfiguration.create();
parseCommandLine(args, conf);
RESTServer server = new RESTServer(conf);
try {
server.run();
server.join();
} catch (Exception e) {
LOG.error(HBaseMarkers.FATAL, "Failed to start server", e);
System.exit(1);
}LOG.info(("***** STOPPING service '" + RESTServer.class.getSimpleName()) + "' *****");
} | 3.26 |
hbase_RESTServer_run_rdh | /**
* Runs the REST server.
*/public synchronized void run() throws Exception
{
Pair<FilterHolder, Class<? extends ServletContainer>> pair = loginServerPrincipal(userProvider, conf);
FilterHolder authFilter = pair.getFirst();
Class<? extends ServletContainer> containerClass = pair.getSecond();
RESTServlet servlet = RESTServlet.getInstance(conf, userProvider);
// set up the Jersey servlet container for Jetty
ResourceConfig application = new ResourceConfig().packages("org.apache.hadoop.hbase.rest").register(JacksonJaxbJsonProvider.class);
// Using our custom ServletContainer is tremendously important. This is what makes sure the
// UGI.doAs() is done for the remoteUser, and calls are not made as the REST server itself.
ServletContainer servletContainer = ReflectionUtils.newInstance(containerClass, application);
ServletHolder
sh = new ServletHolder(servletContainer);
// Set the default max thread number to 100 to limit
// the number of concurrent requests so that REST server doesn't OOM easily.
// Jetty set the default max thread number to 250, if we don't set it.
//
// Our default min thread number 2 is the same as that used by Jetty.
int maxThreads = servlet.getConfiguration().getInt(REST_THREAD_POOL_THREADS_MAX, 100);
int minThreads = servlet.getConfiguration().getInt(REST_THREAD_POOL_THREADS_MIN, 2);
// Use the default queue (unbounded with Jetty 9.3) if the queue size is negative, otherwise use
// bounded {@link ArrayBlockingQueue} with the given size
int queueSize = servlet.getConfiguration().getInt(REST_THREAD_POOL_TASK_QUEUE_SIZE, -1);
int idleTimeout = servlet.getConfiguration().getInt(REST_THREAD_POOL_THREAD_IDLE_TIMEOUT, 60000);
QueuedThreadPool threadPool = (queueSize > 0) ? new QueuedThreadPool(maxThreads, minThreads, idleTimeout, new ArrayBlockingQueue<>(queueSize)) : new QueuedThreadPool(maxThreads, minThreads, idleTimeout);
this.server = new Server(threadPool);
// Setup JMX
MBeanContainer mbContainer = new MBeanContainer(ManagementFactory.getPlatformMBeanServer());
server.addEventListener(mbContainer);
server.addBean(mbContainer);
String host = servlet.getConfiguration().get("hbase.rest.host", "0.0.0.0");
int servicePort = servlet.getConfiguration().getInt("hbase.rest.port", 8080);
int httpHeaderCacheSize = servlet.getConfiguration().getInt(HTTP_HEADER_CACHE_SIZE, DEFAULT_HTTP_HEADER_CACHE_SIZE);
HttpConfiguration httpConfig = new HttpConfiguration();
httpConfig.setSecureScheme("https");
httpConfig.setSecurePort(servicePort);
httpConfig.setHeaderCacheSize(httpHeaderCacheSize);
httpConfig.setRequestHeaderSize(DEFAULT_HTTP_MAX_HEADER_SIZE);
httpConfig.setResponseHeaderSize(DEFAULT_HTTP_MAX_HEADER_SIZE);
httpConfig.setSendServerVersion(false);
httpConfig.setSendDateHeader(false);
ServerConnector serverConnector;
boolean isSecure = false;
if (conf.getBoolean(REST_SSL_ENABLED, false)) {
isSecure = true;
HttpConfiguration httpsConfig = new HttpConfiguration(httpConfig);
httpsConfig.addCustomizer(new SecureRequestCustomizer());
SslContextFactory.Server sslCtxFactory = new SslContextFactory.Server();
String keystore = conf.get(REST_SSL_KEYSTORE_STORE);
String keystoreType = conf.get(REST_SSL_KEYSTORE_TYPE);
String password = HBaseConfiguration.getPassword(conf, REST_SSL_KEYSTORE_PASSWORD, null);
String keyPassword =
HBaseConfiguration.getPassword(conf, REST_SSL_KEYSTORE_KEYPASSWORD, password);
sslCtxFactory.setKeyStorePath(keystore);
if (StringUtils.isNotBlank(keystoreType)) {
sslCtxFactory.setKeyStoreType(keystoreType);
}
sslCtxFactory.setKeyStorePassword(password);
sslCtxFactory.setKeyManagerPassword(keyPassword);
String trustStore = conf.get(REST_SSL_TRUSTSTORE_STORE);
if (StringUtils.isNotBlank(trustStore)) {
sslCtxFactory.setTrustStorePath(trustStore);
}
String trustStorePassword = HBaseConfiguration.getPassword(conf, REST_SSL_TRUSTSTORE_PASSWORD, null);if (StringUtils.isNotBlank(trustStorePassword)) {
sslCtxFactory.setTrustStorePassword(trustStorePassword);
}
String trustStoreType = conf.get(REST_SSL_TRUSTSTORE_TYPE);
if (StringUtils.isNotBlank(trustStoreType)) {
sslCtxFactory.setTrustStoreType(trustStoreType);
}
String[] excludeCiphers = servlet.getConfiguration().getStrings(REST_SSL_EXCLUDE_CIPHER_SUITES, ArrayUtils.EMPTY_STRING_ARRAY);
if (excludeCiphers.length != 0) {
sslCtxFactory.setExcludeCipherSuites(excludeCiphers);
}
String[] includeCiphers = servlet.getConfiguration().getStrings(REST_SSL_INCLUDE_CIPHER_SUITES, ArrayUtils.EMPTY_STRING_ARRAY);
if (includeCiphers.length != 0) {
sslCtxFactory.setIncludeCipherSuites(includeCiphers);
}
String[]
excludeProtocols = servlet.getConfiguration().getStrings(REST_SSL_EXCLUDE_PROTOCOLS, ArrayUtils.EMPTY_STRING_ARRAY);
if (excludeProtocols.length != 0) {
sslCtxFactory.setExcludeProtocols(excludeProtocols);
}
String[] includeProtocols = servlet.getConfiguration().getStrings(REST_SSL_INCLUDE_PROTOCOLS, ArrayUtils.EMPTY_STRING_ARRAY);
if (includeProtocols.length != 0) {
sslCtxFactory.setIncludeProtocols(includeProtocols);
}
serverConnector = new ServerConnector(server, new SslConnectionFactory(sslCtxFactory, HttpVersion.HTTP_1_1.toString()), new HttpConnectionFactory(httpsConfig));
} else {
serverConnector = new ServerConnector(server, new HttpConnectionFactory(httpConfig));
}
int acceptQueueSize = servlet.getConfiguration().getInt(REST_CONNECTOR_ACCEPT_QUEUE_SIZE, -1);
if (acceptQueueSize >= 0) {
serverConnector.setAcceptQueueSize(acceptQueueSize);
}
serverConnector.setPort(servicePort);
serverConnector.setHost(host);
server.addConnector(serverConnector);
server.setStopAtShutdown(true);
// set up context
ServletContextHandler ctxHandler = new ServletContextHandler(server, "/", ServletContextHandler.SESSIONS);
ctxHandler.addServlet(sh, PATH_SPEC_ANY);
if (authFilter != null) {
ctxHandler.addFilter(authFilter, PATH_SPEC_ANY, EnumSet.of(DispatcherType.REQUEST));
}
// Load filters from configuration.
String[] filterClasses = servlet.getConfiguration().getStrings(FILTER_CLASSES, GzipFilter.class.getName());
for (String filter : filterClasses) {
filter = filter.trim();
ctxHandler.addFilter(filter, PATH_SPEC_ANY, EnumSet.of(DispatcherType.REQUEST));
}
addCSRFFilter(ctxHandler, conf);
addClickjackingPreventionFilter(ctxHandler, conf);
addSecurityHeadersFilter(ctxHandler, conf, isSecure);
HttpServerUtil.constrainHttpMethods(ctxHandler, servlet.getConfiguration().getBoolean(REST_HTTP_ALLOW_OPTIONS_METHOD, REST_HTTP_ALLOW_OPTIONS_METHOD_DEFAULT));
// Put up info server.
int port = conf.getInt("hbase.rest.info.port", 8085);
if (port >= 0) {
conf.setLong("startcode", EnvironmentEdgeManager.currentTime());
String a = conf.get("hbase.rest.info.bindAddress", "0.0.0.0");
this.infoServer = new InfoServer("rest", a, port, false, conf);
this.infoServer.setAttribute("hbase.conf", conf);
this.infoServer.start();
}
// start server
server.start();
} | 3.26 |
hbase_ZKConfig_makeZKProps_rdh | /**
* Make a Properties object holding ZooKeeper config. Parses the corresponding config options from
* the HBase XML configs and generates the appropriate ZooKeeper properties.
*
* @param conf
* Configuration to read from.
* @return Properties holding mappings representing ZooKeeper config file.
*/
public static Properties makeZKProps(Configuration conf) {
return makeZKPropsFromHbaseConfig(conf);
} | 3.26 |
hbase_ZKConfig_standardizeZKQuorumServerString_rdh | /**
* Standardize the ZK quorum string: make it a "server:clientport" list, separated by ','
*
* @param quorumStringInput
* a string contains a list of servers for ZK quorum
* @param clientPort
* the default client port
* @return the string for a list of "server:port" separated by ","
*/
public static String standardizeZKQuorumServerString(String quorumStringInput, String clientPort) {String[] serverHosts = quorumStringInput.split(",");
return buildZKQuorumServerString(serverHosts, clientPort);
} | 3.26 |
hbase_ZKConfig_makeZKPropsFromHbaseConfig_rdh | /**
* Make a Properties object holding ZooKeeper config. Parses the corresponding config options from
* the HBase XML configs and generates the appropriate ZooKeeper properties.
*
* @param conf
* Configuration to read from.
* @return Properties holding mappings representing ZooKeeper config file.
*/
private static Properties makeZKPropsFromHbaseConfig(Configuration conf) {
Properties zkProperties = new Properties();
// Directly map all of the hbase.zookeeper.property.KEY properties.
// Synchronize on conf so no loading of configs while we iterate
synchronized(conf) {
for (Entry<String, String> entry : conf) {
String key = entry.getKey();
if (key.startsWith(HConstants.ZK_CFG_PROPERTY_PREFIX)) {
String zkKey = key.substring(HConstants.ZK_CFG_PROPERTY_PREFIX_LEN);
String value = entry.getValue();
// If the value has variables substitutions, need to do a get.
if (value.contains(VARIABLE_START)) {
value = conf.get(key);
}
zkProperties.setProperty(zkKey, value);
}
}
}
// If clientPort is not set, assign the default.
if (zkProperties.getProperty(HConstants.CLIENT_PORT_STR) == null) {
zkProperties.put(HConstants.CLIENT_PORT_STR, HConstants.DEFAULT_ZOOKEEPER_CLIENT_PORT);
}
// Create the server.X properties.
int peerPort = conf.getInt("hbase.zookeeper.peerport", 2888);
int leaderPort = conf.getInt("hbase.zookeeper.leaderport", 3888);
final String[] serverHosts = conf.getStrings(HConstants.ZOOKEEPER_QUORUM, HConstants.LOCALHOST);
String serverHost;
String address;
String
key;for (int i = 0; i < serverHosts.length; ++i) {
if (serverHosts[i].contains(":")) {
serverHost = serverHosts[i].substring(0, serverHosts[i].indexOf(':'));
} else {
serverHost = serverHosts[i];
}
address = (((serverHost + ":") + peerPort) + ":") + leaderPort;
key = "server." + i;
zkProperties.put(key, address);
}
return zkProperties;
} | 3.26 |
hbase_ZKConfig_getZKQuorumServersStringFromHbaseConfig_rdh | /**
* Return the ZK Quorum servers string given the specified configuration
*
* @return Quorum servers String
*/
private static String getZKQuorumServersStringFromHbaseConfig(Configuration conf) {
String defaultClientPort = Integer.toString(conf.getInt(HConstants.ZOOKEEPER_CLIENT_PORT, HConstants.DEFAULT_ZOOKEEPER_CLIENT_PORT));
// Build the ZK quorum server string with "server:clientport" list, separated by ','
final String[] serverHosts = conf.getStrings(HConstants.ZOOKEEPER_QUORUM, HConstants.LOCALHOST);
return buildZKQuorumServerString(serverHosts, defaultClientPort);
} | 3.26 |
hbase_ZKConfig_transformClusterKey_rdh | /**
* Separate the given key into the three configurations it should contain: hbase.zookeeper.quorum,
* hbase.zookeeper.client.port and zookeeper.znode.parent
*
* @return the three configuration in the described order
*/
public static ZKClusterKey transformClusterKey(String key) throws IOException {
List<String> v20 = Splitter.on(':').splitToList(key);
String[] partsArray = v20.toArray(new String[v20.size()]);
if (partsArray.length == 3) {
if (!partsArray[2].matches("/.*[^/]")) {throw new IOException((((((("Cluster key passed " + key) + " is invalid, the format should be:") + HConstants.ZOOKEEPER_QUORUM) + ":") + HConstants.ZOOKEEPER_CLIENT_PORT) + ":") + HConstants.ZOOKEEPER_ZNODE_PARENT);
}
return new ZKClusterKey(partsArray[0], Integer.parseInt(partsArray[1]), partsArray[2]);
}
if (partsArray.length > 3) {// The quorum could contain client port in server:clientport format, try to transform more.
String zNodeParent = partsArray[partsArray.length - 1];
if (!zNodeParent.matches("/.*[^/]")) {
throw new IOException((((((("Cluster key passed "
+
key) + " is invalid, the format should be:") + HConstants.ZOOKEEPER_QUORUM) + ":") + HConstants.ZOOKEEPER_CLIENT_PORT) + ":") +
HConstants.ZOOKEEPER_ZNODE_PARENT);
}
String clientPort = partsArray[partsArray.length - 2];
// The first part length is the total length minus the lengths of other parts and minus 2 ":"
int endQuorumIndex = ((key.length() - zNodeParent.length()) - clientPort.length()) - 2;
String quorumStringInput = key.substring(0, endQuorumIndex);
String[] serverHosts = quorumStringInput.split(",");
// The common case is that every server has its own client port specified - this means
// that (total parts - the ZNodeParent part - the ClientPort part) is equal to
// (the number of "," + 1) - "+ 1" because the last server has no ",".
if ((partsArray.length - 2) == (serverHosts.length + 1)) {
return new ZKClusterKey(quorumStringInput, Integer.parseInt(clientPort), zNodeParent);}
// For the uncommon case that some servers has no port specified, we need to build the
// server:clientport list using default client port for servers without specified port.
return new ZKClusterKey(buildZKQuorumServerString(serverHosts, clientPort), Integer.parseInt(clientPort), zNodeParent);
}
throw new
IOException((((((("Cluster key passed " + key) + " is invalid, the format should be:") + HConstants.ZOOKEEPER_QUORUM) + ":") + HConstants.ZOOKEEPER_CLIENT_PORT) + ":") + HConstants.ZOOKEEPER_ZNODE_PARENT);
} | 3.26 |
hbase_ZKConfig_buildZKQuorumServerString_rdh | /**
* Build the ZK quorum server string with "server:clientport" list, separated by ','
*
* @param serverHosts
* a list of servers for ZK quorum
* @param clientPort
* the default client port
* @return the string for a list of "server:port" separated by ","
*/
public static String buildZKQuorumServerString(String[] serverHosts, String clientPort) {
StringBuilder quorumStringBuilder = new StringBuilder();
String serverHost;
InetAddressValidator validator = new InetAddressValidator();
for (int i = 0; i < serverHosts.length; ++i) {
if (serverHosts[i].startsWith("[")) {int index = serverHosts[i].indexOf("]");
if (index < 0) {
throw new IllegalArgumentException(serverHosts[i] + " starts with '[' but has no matching ']:'");
}
if ((index + 2) == serverHosts[i].length()) {
throw new IllegalArgumentException(serverHosts[i] + " doesn't have a port after colon");
}// check the IPv6 address e.g. [2001:db8::1]
String serverHostWithoutBracket = serverHosts[i].substring(1, index);
if (!validator.isValidInet6Address(serverHostWithoutBracket)) {
throw new IllegalArgumentException(serverHosts[i] +
" is not a valid IPv6 address");
}
serverHost = serverHosts[i];
if ((index + 1) == serverHosts[i].length()) {
serverHost = (serverHosts[i] + ":") + clientPort;
}
} else if (serverHosts[i].contains(":")) {
serverHost = serverHosts[i];// just use the port specified from the input
} else {
serverHost = (serverHosts[i] + ":") + clientPort;
}
if (i > 0) {
quorumStringBuilder.append(',');
}
quorumStringBuilder.append(serverHost);
}
return quorumStringBuilder.toString();
} | 3.26 |
hbase_ZKConfig_getZooKeeperClusterKey_rdh | /**
* Get the key to the ZK ensemble for this configuration and append a name at the end
*
* @param conf
* Configuration to use to build the key
* @param name
* Name that should be appended at the end if not empty or null
* @return ensemble key with a name (if any)
*/
public static String getZooKeeperClusterKey(Configuration conf, String name) {
String ensemble = conf.get(HConstants.ZOOKEEPER_QUORUM).replaceAll("[\\t\\n\\x0B\\f\\r]", "");
StringBuilder builder = new StringBuilder(ensemble);
builder.append(":");
builder.append(conf.get(HConstants.ZOOKEEPER_CLIENT_PORT));builder.append(":");
builder.append(conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT));
if ((name != null) && (!name.isEmpty())) {builder.append(",");
builder.append(name);
}
return builder.toString();} | 3.26 |
hbase_ZKConfig_m0_rdh | /**
* Get the key to the ZK ensemble for this configuration without adding a name at the end
*
* @param conf
* Configuration to use to build the key
* @return ensemble key without a name
*/
public static String m0(Configuration conf) {
return getZooKeeperClusterKey(conf, null);
} | 3.26 |
hbase_ZKConfig_getZKQuorumServersString_rdh | /**
* Return the ZK Quorum servers string given the specified configuration.
*
* @return Quorum servers
*/
public static String getZKQuorumServersString(Configuration conf) {
m2(HConstants.ZK_CFG_PROPERTY_PREFIX, conf);
return getZKQuorumServersStringFromHbaseConfig(conf);
} | 3.26 |
hbase_ZKConfig_validateClusterKey_rdh | /**
* Verifies that the given key matches the expected format for a ZooKeeper cluster key. The Quorum
* for the ZK cluster can have one the following formats (see examples below):
* <ol>
* <li>s1,s2,s3 (no client port in the list, the client port could be obtained from
* clientPort)</li>
* <li>s1:p1,s2:p2,s3:p3 (with client port, which could be same or different for each server, in
* this case, the clientPort would be ignored)</li>
* <li>s1:p1,s2,s3:p3 (mix of (1) and (2) - if port is not specified in a server, it would use the
* clientPort; otherwise, it would use the specified port)</li>
* </ol>
*
* @param key
* the cluster key to validate
* @throws IOException
* if the key could not be parsed
*/
public static void validateClusterKey(String key) throws
IOException {
transformClusterKey(key);
} | 3.26 |
hbase_Interns_tag_rdh | /**
* Get a metrics tag
*
* @param info
* of the tag
* @param value
* of the tag
* @return an interned metrics tag
*/
public static MetricsTag tag(MetricsInfo info, String value) {Map<String, MetricsTag> map = tagCache.getUnchecked(info);
MetricsTag tag = map.get(value);
if (tag == null) {
tag = new MetricsTag(info, value);
map.put(value, tag);
}
return tag;
} | 3.26 |
hbase_Interns_m0_rdh | /**
* Get a metrics tag
*
* @param name
* of the tag
* @param description
* of the tag
* @param value
* of the tag
* @return an interned metrics tag
*/
public static MetricsTag m0(String name, String description, String value) {return tag(info(name, description), value);
} | 3.26 |
hbase_LogEventHandler_persistAll_rdh | /**
* Add all in memory queue records to system table. The implementors can use system table or
* direct HDFS file or ZK as persistence system.
*/
void persistAll(NamedQueuePayload.NamedQueueEvent namedQueueEvent, Connection connection) {
namedQueueServices.get(namedQueueEvent).persistAll(connection);
} | 3.26 |
hbase_LogEventHandler_clearNamedQueue_rdh | /**
* Cleans up queues maintained by services.
*
* @param namedQueueEvent
* type of queue to clear
* @return true if queue is cleaned up, false otherwise
*/
boolean clearNamedQueue(NamedQueuePayload.NamedQueueEvent namedQueueEvent) {
return namedQueueServices.get(namedQueueEvent).clearNamedQueue();
} | 3.26 |
hbase_LogEventHandler_onEvent_rdh | /**
* Called when a publisher has published an event to the {@link RingBuffer}. This is generic
* consumer of disruptor ringbuffer and for each new namedQueue that we add, we should also
* provide specific consumer logic here.
*
* @param event
* published to the {@link RingBuffer}
* @param sequence
* of the event being processed
* @param endOfBatch
* flag to indicate if this is the last event in a batch from the
* {@link RingBuffer}
*/
@Override
public void onEvent(RingBufferEnvelope event, long sequence, boolean endOfBatch) {final
NamedQueuePayload namedQueuePayload
= event.getPayload();
// consume ringbuffer payload based on event type
namedQueueServices.get(namedQueuePayload.getNamedQueueEvent()).consumeEventFromDisruptor(namedQueuePayload);
} | 3.26 |
hbase_AsyncScanSingleRegionRpcRetryingCaller_destroy_rdh | // return the current state, and set the state to DESTROYED.
ScanControllerState destroy() {
ScanControllerState state = this.state;
this.state = ScanControllerState.DESTROYED;
return state;
} | 3.26 |
hbase_AsyncScanSingleRegionRpcRetryingCaller_prepare_rdh | // return false if the scan has already been resumed. See the comment above for ScanResumerImpl
// for more details.
synchronized boolean prepare(ScanResponse resp, int numberOfCompleteRows) {
if
(state ==
ScanResumerState.RESUMED) {
// user calls resume before we actually suspend the scan, just continue;
return false;
}
state = ScanResumerState.SUSPENDED;
this.resp = resp;
this.numberOfCompleteRows =
numberOfCompleteRows;
// if there are no more results in region then the scanner at RS side will be closed
// automatically so we do not need to renew lease.
if (resp.getMoreResultsInRegion()) {
// schedule renew lease task
scheduleRenewLeaseTask();
}
return true;
} | 3.26 |
hbase_AsyncScanSingleRegionRpcRetryingCaller_start_rdh | /**
* Now we will also fetch some cells along with the scanner id when opening a scanner, so we also
* need to process the ScanResponse for the open scanner request. The HBaseRpcController for the
* open scanner request is also needed because we may have some data in the CellScanner which is
* contained in the controller.
*
* @return {@code true} if we should continue, otherwise {@code false}.
*/
public CompletableFuture<Boolean>
start(HBaseRpcController controller, ScanResponse respWhenOpen) {
onComplete(controller, respWhenOpen);
return future;
} | 3.26 |
hbase_Union4_decodeD_rdh | /**
* Read an instance of the fourth type parameter from buffer {@code src}.
*/
public D decodeD(PositionedByteRange src) {
return ((D) (decode(src)));
} | 3.26 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.