name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hbase_TableBackupClient_addManifest_rdh | /**
* Add manifest for the current backup. The manifest is stored within the table backup directory.
*
* @param backupInfo
* The current backup info
* @throws IOException
* exception
*/
protected void addManifest(BackupInfo backupInfo, BackupManager backupManager, BackupType type, Configuration conf) throws IOException {
// set the overall backup phase : store manifest
backupInfo.setPhase(BackupPhase.STORE_MANIFEST);
BackupManifest manifest;
// Since we have each table's backup in its own directory structure,
// we'll store its manifest with the table directory.
for (TableName table : backupInfo.getTables()) {
manifest = new BackupManifest(backupInfo, table);
ArrayList<BackupImage> ancestors = backupManager.getAncestors(backupInfo, table);
for (BackupImage image : ancestors) {
manifest.addDependentImage(image);}
if (type == BackupType.INCREMENTAL) {
// We'll store the log timestamps for this table only in its manifest.
Map<TableName, Map<String, Long>> tableTimestampMap = new HashMap<>();
tableTimestampMap.put(table, backupInfo.getIncrTimestampMap().get(table));
manifest.setIncrTimestampMap(tableTimestampMap);
ArrayList<BackupImage> ancestorss = backupManager.getAncestors(backupInfo);
for (BackupImage image : ancestorss) {
manifest.addDependentImage(image);
}
}
manifest.store(conf);
}
// For incremental backup, we store a overall manifest in
// <backup-root-dir>/WALs/<backup-id>
// This is used when created the next incremental backup
if (type == BackupType.INCREMENTAL) {manifest = new BackupManifest(backupInfo);
// set the table region server start and end timestamps for incremental backup
manifest.setIncrTimestampMap(backupInfo.getIncrTimestampMap());
ArrayList<BackupImage> ancestors = backupManager.getAncestors(backupInfo);
for (BackupImage image : ancestors) {
manifest.addDependentImage(image);
}
manifest.store(conf);
}
} | 3.26 |
hbase_TableBackupClient_cleanupTargetDir_rdh | /**
* Clean up the uncompleted data at target directory if the ongoing backup has already entered the
* copy phase.
*/
protected static void cleanupTargetDir(BackupInfo backupInfo, Configuration conf) {
try {
// clean up the uncompleted data at target directory if the ongoing backup has already entered
// the copy phase
f1.debug("Trying to cleanup up target dir. Current backup phase: " + backupInfo.getPhase());
if ((backupInfo.getPhase().equals(BackupPhase.SNAPSHOTCOPY) || backupInfo.getPhase().equals(BackupPhase.INCREMENTAL_COPY)) || backupInfo.getPhase().equals(BackupPhase.STORE_MANIFEST)) {FileSystem v8 = FileSystem.get(new Path(backupInfo.getBackupRootDir()).toUri(), conf);
// now treat one backup as a transaction, clean up data that has been partially copied at
// table level
for (TableName table : backupInfo.getTables()) {
Path targetDirPath = new Path(HBackupFileSystem.getTableBackupDir(backupInfo.getBackupRootDir(), backupInfo.getBackupId(), table));
if (v8.delete(targetDirPath, true)) {
f1.debug(("Cleaning up uncompleted backup data at " + targetDirPath.toString()) + " done.");
} else {
f1.debug(("No data has been copied to " + targetDirPath.toString())
+ ".");
}
Path v11 = targetDirPath.getParent();
FileStatus[] backups = CommonFSUtils.listStatus(v8, v11);
if ((backups == null) || (backups.length == 0)) {
v8.delete(v11, true);
f1.debug(v11.toString() + " is empty, remove it.");
}
}
}
} catch (IOException e1) {f1.error(((((("Cleaning up uncompleted backup data of " + backupInfo.getBackupId()) + " at ") + backupInfo.getBackupRootDir()) + " failed due to ") + e1.getMessage()) + ".");
}
} | 3.26 |
hbase_TableBackupClient_completeBackup_rdh | /**
* Complete the overall backup.
*
* @param backupInfo
* backup info
* @throws IOException
* exception
*/protected void completeBackup(final Connection conn, BackupInfo backupInfo, BackupManager backupManager, BackupType type, Configuration conf) throws IOException {
// set the complete timestamp of the overall backup
backupInfo.setCompleteTs(EnvironmentEdgeManager.currentTime());// set overall backup status: complete
backupInfo.setState(BackupState.COMPLETE);
backupInfo.setProgress(100);
// add and store the manifest for the backup
addManifest(backupInfo, backupManager, type, conf);
// compose the backup complete data
String backupCompleteData = (((((obtainBackupMetaDataStr(backupInfo) + ",startts=") + backupInfo.getStartTs()) + ",completets=") + backupInfo.getCompleteTs()) + ",bytescopied=") + backupInfo.getTotalBytesCopied();
if (f1.isDebugEnabled()) {
f1.debug((("Backup " + backupInfo.getBackupId()) + " finished: ") + backupCompleteData);
}
// when full backup is done:
// - delete HBase snapshot
// - clean up directories with prefix "exportSnapshot-", which are generated when exporting
// snapshots
// incremental backups use distcp, which handles cleaning up its own directories
if (type == BackupType.FULL) {
deleteSnapshots(conn, backupInfo, conf);
cleanupExportSnapshotLog(conf);
}
BackupSystemTable.deleteSnapshot(conn);
backupManager.updateBackupInfo(backupInfo);
// Finish active session
backupManager.finishBackupSession();
f1.info(("Backup " + backupInfo.getBackupId())
+ " completed.");
} | 3.26 |
hbase_ImmutableSegment_getNumOfSegments_rdh | // /////////////////// PUBLIC METHODS /////////////////////
public int getNumOfSegments() {
return 1;
} | 3.26 |
hbase_ImmutableSegment_getSnapshotScanners_rdh | /**
* We create a new {@link SnapshotSegmentScanner} to increase the reference count of
* {@link MemStoreLABImpl} used by this segment.
*/List<KeyValueScanner> getSnapshotScanners() {
return Collections.singletonList(new SnapshotSegmentScanner(this));
} | 3.26 |
hbase_AbstractStateMachineNamespaceProcedure_m0_rdh | /**
* Insert/update the row into the ns family of meta table.
*
* @param env
* MasterProcedureEnv
*/
protected static void m0(MasterProcedureEnv env, NamespaceDescriptor ns) throws IOException {
getTableNamespaceManager(env).addOrUpdateNamespace(ns);
} | 3.26 |
hbase_AbstractStateMachineNamespaceProcedure_createDirectory_rdh | /**
* Create the namespace directory
*
* @param env
* MasterProcedureEnv
* @param nsDescriptor
* NamespaceDescriptor
*/
protected static void createDirectory(MasterProcedureEnv env, NamespaceDescriptor nsDescriptor) throws IOException {
createDirectory(env.getMasterServices().getMasterFileSystem(), nsDescriptor);
} | 3.26 |
hbase_FSDataInputStreamWrapper_unbuffer_rdh | /**
* This will free sockets and file descriptors held by the stream only when the stream implements
* org.apache.hadoop.fs.CanUnbuffer. NOT THREAD SAFE. Must be called only when all the clients
* using this stream to read the blocks have finished reading. If by chance the stream is
* unbuffered and there are clients still holding this stream for read then on next client read
* request a new socket will be opened by Datanode without client knowing about it and will serve
* its read request. Note: If this socket is idle for some time then the DataNode will close the
* socket and the socket will move into CLOSE_WAIT state and on the next client request on this
* stream, the current socket will be closed and a new socket will be opened to serve the
* requests.
*/
@SuppressWarnings({ "rawtypes" })
public void unbuffer() {
FSDataInputStream stream = this.getStream(this.shouldUseHBaseChecksum());
if
(stream != null) {
InputStream wrappedStream = stream.getWrappedStream();
// CanUnbuffer interface was added as part of HDFS-7694 and the fix is available in Hadoop
// 2.6.4+ and 2.7.1+ versions only so check whether the stream object implements the
// CanUnbuffer interface or not and based on that call the unbuffer api.
final Class<? extends InputStream> streamClass = wrappedStream.getClass();
if (this.instanceOfCanUnbuffer == null) {
// To ensure we compute whether the stream is instance of CanUnbuffer only once.
this.instanceOfCanUnbuffer = false;
if (wrappedStream instanceof CanUnbuffer) {
this.unbuffer = ((CanUnbuffer) (wrappedStream));
this.instanceOfCanUnbuffer = true;
}
}
if (this.instanceOfCanUnbuffer) {
try {
this.unbuffer.unbuffer();
} catch (UnsupportedOperationException e) {
if (isLogTraceEnabled) {
LOG.trace(("Failed to invoke 'unbuffer' method in class " + streamClass) + " . So there may be the stream does not support unbuffering.", e);
}
}
} else if (isLogTraceEnabled) {
LOG.trace("Failed to find 'unbuffer' method in class " + streamClass);
}
}
} | 3.26 |
hbase_FSDataInputStreamWrapper_fallbackToFsChecksum_rdh | /**
* Read from non-checksum stream failed, fall back to FS checksum. Thread-safe.
*
* @param offCount
* For how many checksumOk calls to turn off the HBase checksum.
*/
public FSDataInputStream fallbackToFsChecksum(int offCount) throws IOException {
// checksumOffCount is speculative, but let's try to reset it less.
boolean partOfConvoy = false;
if (this.stream == null) {
synchronized(streamNoFsChecksumFirstCreateLock) {
partOfConvoy = this.stream != null;
if (!partOfConvoy) {
this.stream = (link != null) ? link.open(hfs) : hfs.open(path);
}
}
}
if (!partOfConvoy) {
this.useHBaseChecksum = false;
this.hbaseChecksumOffCount.set(offCount);
}
return this.stream;
} | 3.26 |
hbase_FSDataInputStreamWrapper_prepareForBlockReader_rdh | /**
* Prepares the streams for block reader. NOT THREAD SAFE. Must be called once, after any reads
* finish and before any other reads start (what happens in reality is we read the tail, then call
* this based on what's in the tail, then read blocks).
*
* @param forceNoHBaseChecksum
* Force not using HBase checksum.
*/
public void prepareForBlockReader(boolean forceNoHBaseChecksum) throws IOException {
if (hfs == null)
return;
assert (this.stream != null) && (!this.useHBaseChecksumConfigured);
boolean useHBaseChecksum
= ((!forceNoHBaseChecksum) && hfs.useHBaseChecksum()) && (hfs.getNoChecksumFs() != hfs);
if (useHBaseChecksum) {
FileSystem fsNc = hfs.getNoChecksumFs();
this.streamNoFsChecksum = (link != null) ? link.open(fsNc) : fsNc.open(path);
setStreamOptions(streamNoFsChecksum);
this.useHBaseChecksumConfigured = this.useHBaseChecksum
= useHBaseChecksum;
// Close the checksum stream; we will reopen it if we get an HBase checksum failure.
this.stream.close();
this.stream = null;
}
} | 3.26 |
hbase_FSDataInputStreamWrapper_shouldUseHBaseChecksum_rdh | /**
* Returns Whether we are presently using HBase checksum.
*/
public boolean shouldUseHBaseChecksum() {
return this.useHBaseChecksum;
} | 3.26 |
hbase_FSDataInputStreamWrapper_getStream_rdh | /**
* Get the stream to use. Thread-safe.
*
* @param useHBaseChecksum
* must be the value that shouldUseHBaseChecksum has returned at some
* point in the past, otherwise the result is undefined.
*/
public FSDataInputStream getStream(boolean useHBaseChecksum) {
return useHBaseChecksum ? this.streamNoFsChecksum : this.stream;
} | 3.26 |
hbase_FSDataInputStreamWrapper_close_rdh | /**
* CloseClose stream(s) if necessary.
*/
@Override
public void close() {if (!doCloseStreams) {
return;
}
updateInputStreamStatistics(this.streamNoFsChecksum);
// we do not care about the close exception as it is for reading, no data loss issue.
Closeables.closeQuietly(streamNoFsChecksum);
updateInputStreamStatistics(stream);
Closeables.closeQuietly(stream);
} | 3.26 |
hbase_FSDataInputStreamWrapper_checksumOk_rdh | /**
* Report that checksum was ok, so we may ponder going back to HBase checksum.
*/
public void checksumOk() {
if ((this.useHBaseChecksumConfigured && (!this.useHBaseChecksum)) && (this.hbaseChecksumOffCount.getAndDecrement() < 0)) {
// The stream we need is already open (because we were using HBase checksum in the past).
assert this.streamNoFsChecksum != null;
this.useHBaseChecksum = true;
}
} | 3.26 |
hbase_HStoreFile_m0_rdh | /**
*
* @param key
* to look up
* @return value associated with the metadata key
*/
public byte[] m0(byte[] key) {
return metadataMap.get(key);
} | 3.26 |
hbase_HStoreFile_initReader_rdh | /**
* Initialize the reader used for pread.
*/
public void initReader() throws IOException {
if (initialReader == null) {
synchronized(this) {
if (initialReader == null) {
try { open();
} catch (Exception e) {
try {boolean evictOnClose = (cacheConf != null) ? cacheConf.shouldEvictOnClose() : true;
this.closeStoreFile(evictOnClose);
} catch (IOException ee) {
LOG.warn("failed to close reader", ee);
}
throw e;
}
}
}}
} | 3.26 |
hbase_HStoreFile_getReader_rdh | /**
*
* @return Current reader. Must call initReader first else returns null.
* @see #initReader()
*/
public StoreFileReader getReader() {
return this.initialReader;
} | 3.26 |
hbase_HStoreFile_closeStoreFile_rdh | /**
*
* @param evictOnClose
* whether to evict blocks belonging to this file
*/
public synchronized void closeStoreFile(boolean evictOnClose) throws IOException {
if (this.initialReader != null) {
this.initialReader.close(evictOnClose);
this.initialReader = null;
}
} | 3.26 |
hbase_HStoreFile_open_rdh | /**
* Opens reader on this store file. Called by Constructor.
*
* @see #closeStoreFile(boolean)
*/
private void open() throws IOException {
fileInfo.initHDFSBlocksDistribution();
long readahead = (fileInfo.isNoReadahead()) ? 0L : -1L;
ReaderContext context = fileInfo.createReaderContext(false, readahead,
ReaderType.PREAD);
fileInfo.initHFileInfo(context);
StoreFileReader reader = fileInfo.preStoreFileReaderOpen(context, cacheConf);
if (reader == null) {
reader = fileInfo.createReader(context, cacheConf);
fileInfo.getHFileInfo().initMetaAndIndex(reader.getHFileReader());
}
this.initialReader = fileInfo.postStoreFileReaderOpen(context, cacheConf, reader);
if (InputStreamBlockDistribution.isEnabled(fileInfo.getConf())) {
boolean useHBaseChecksum = context.getInputStreamWrapper().shouldUseHBaseChecksum();
FSDataInputStream stream = context.getInputStreamWrapper().getStream(useHBaseChecksum);
this.initialReaderBlockDistribution = new InputStreamBlockDistribution(stream, fileInfo);
}
// Load up indices and fileinfo. This also loads Bloom filter type.
metadataMap = Collections.unmodifiableMap(initialReader.loadFileInfo());
// Read in our metadata.
byte[] b = metadataMap.get(MAX_SEQ_ID_KEY);if (b != null) {
// By convention, if halfhfile, top half has a sequence number > bottom
// half. Thats why we add one in below. Its done for case the two halves
// are ever merged back together --rare. Without it, on open of store,
// since store files are distinguished by sequence id, the one half would
// subsume the other.
this.sequenceid = Bytes.toLong(b);
if (fileInfo.isTopReference()) {
this.sequenceid += 1;
}
}
if (isBulkLoadResult()) {
// generate the sequenceId from the fileName
// fileName is of the form <randomName>_SeqId_<id-when-loaded>_
String fileName = this.getPath().getName();
// Use lastIndexOf() to get the last, most recent bulk load seqId.
int startPos = fileName.lastIndexOf("SeqId_");
if (startPos != (-1)) {
this.sequenceid = Long.parseLong(fileName.substring(startPos + 6, fileName.indexOf('_', startPos + 6)));
// Handle reference files as done above.
if (fileInfo.isTopReference()) {
this.sequenceid += 1;
}}
// SKIP_RESET_SEQ_ID only works in bulk loaded file.
// In mob compaction, the hfile where the cells contain the path of a new mob file is bulk
// loaded to hbase, these cells have the same seqIds with the old ones. We do not want
// to reset new seqIds for them since this might make a mess of the visibility of cells that
// have the same row key but different seqIds.
boolean skipResetSeqId = isSkipResetSeqId(metadataMap.get(SKIP_RESET_SEQ_ID));
if (skipResetSeqId) {
// increase the seqId when it is a bulk loaded file from mob compaction.
this.sequenceid += 1;
}
initialReader.setSkipResetSeqId(skipResetSeqId);
initialReader.setBulkLoaded(true);
}
initialReader.setSequenceID(this.sequenceid);
b = metadataMap.get(Writer.MAX_MEMSTORE_TS_KEY);
if (b != null) {
this.maxMemstoreTS = Bytes.toLong(b);
}
b =
metadataMap.get(MAJOR_COMPACTION_KEY);
if (b != null) {
boolean mc = Bytes.toBoolean(b);
if (this.majorCompaction == null) {
this.majorCompaction = new AtomicBoolean(mc);
} else {
this.majorCompaction.set(mc);
}
} else {
// Presume it is not major compacted if it doesn't explicity say so
// HFileOutputFormat explicitly sets the major compacted key.
this.majorCompaction = new AtomicBoolean(false);
}
b = metadataMap.get(EXCLUDE_FROM_MINOR_COMPACTION_KEY);
this.excludeFromMinorCompaction = (b != null) && Bytes.toBoolean(b);
BloomType hfileBloomType = initialReader.getBloomFilterType();
if (cfBloomType != BloomType.NONE) {
initialReader.loadBloomfilter(BlockType.GENERAL_BLOOM_META, metrics);
if (hfileBloomType != cfBloomType) {
LOG.debug((((((("HFile Bloom filter type for " + initialReader.getHFileReader().getName()) + ": ") + hfileBloomType) + ", but ") + cfBloomType) +
" specified in column family ") + "configuration");
}
} else if (hfileBloomType != BloomType.NONE) {
LOG.info("Bloom filter turned off by CF config for " + initialReader.getHFileReader().getName());
}
// load delete family bloom filter
initialReader.loadBloomfilter(BlockType.DELETE_FAMILY_BLOOM_META, metrics);try {
byte[] data = metadataMap.get(TIMERANGE_KEY);
initialReader.timeRange = (data == null) ? null : TimeRangeTracker.parseFrom(data).toTimeRange();
} catch (IllegalArgumentException e) {
LOG.error("Error reading timestamp range data from meta -- "
+ "proceeding without", e);
this.initialReader.timeRange = null;
}
try { byte[] data = metadataMap.get(COMPACTION_EVENT_KEY);
this.compactedStoreFiles.addAll(ProtobufUtil.toCompactedStoreFiles(data));
} catch (IOException e) {
LOG.error("Error reading compacted storefiles from meta data", e);
}
// initialize so we can reuse them after reader closed.
f1
= initialReader.getFirstKey();
lastKey = initialReader.getLastKey();
comparator = initialReader.getComparator();
} | 3.26 |
hbase_HStoreFile_isSkipResetSeqId_rdh | /**
* Gets whether to skip resetting the sequence id for cells.
*
* @param skipResetSeqId
* The byte array of boolean.
* @return Whether to skip resetting the sequence id.
*/
private boolean isSkipResetSeqId(byte[] skipResetSeqId) {
if ((skipResetSeqId != null) && (skipResetSeqId.length == 1))
{
return Bytes.toBoolean(skipResetSeqId);
}
return false;
} | 3.26 |
hbase_HStoreFile_getStreamScanner_rdh | /**
* Get a scanner which uses streaming read.
* <p>
* Must be called after initReader.
*/
public StoreFileScanner getStreamScanner(boolean canUseDropBehind, boolean cacheBlocks, boolean
isCompaction, long readPt, long scannerOrder, boolean canOptimizeForNonNullColumn) throws IOException {
return createStreamReader(canUseDropBehind).getStoreFileScanner(cacheBlocks, false, isCompaction, readPt, scannerOrder, canOptimizeForNonNullColumn);} | 3.26 |
hbase_HStoreFile_deleteStoreFile_rdh | /**
* Delete this file
*/
public void deleteStoreFile() throws IOException {
boolean evictOnClose = (cacheConf != null) ? cacheConf.shouldEvictOnClose() : true;
closeStoreFile(evictOnClose);
this.fileInfo.getFileSystem().delete(getPath(), true);
} | 3.26 |
hbase_HStoreFile_getPreadScanner_rdh | /**
* Get a scanner which uses pread.
* <p>
* Must be called after initReader.
*/
public StoreFileScanner getPreadScanner(boolean cacheBlocks, long
readPt, long scannerOrder, boolean canOptimizeForNonNullColumn) {
return getReader().getStoreFileScanner(cacheBlocks, true, false, readPt, scannerOrder, canOptimizeForNonNullColumn);
} | 3.26 |
hbase_HStoreFile_isReferencedInReads_rdh | /**
* Returns true if the file is still used in reads
*/
public boolean isReferencedInReads() {
int rc = fileInfo.getRefCount();
assert rc
>= 0;// we should not go negative.
return rc > 0;
} | 3.26 |
hbase_StoreFileListFile_update_rdh | /**
* We will set the timestamp in this method so just pass the builder in
*/
void update(StoreFileList.Builder builder) throws IOException {
if
(nextTrackFile < 0) {
// we need to call load first to load the prevTimestamp and also the next file
// we are already in the update method, which is not read only, so pass false
load(false);
}
long timestamp = Math.max(prevTimestamp + 1, EnvironmentEdgeManager.currentTime());
byte[] actualData = builder.setTimestamp(timestamp).build().toByteArray();
CRC32 crc32 = new CRC32();
crc32.update(actualData);
int checksum = ((int) (crc32.getValue()));
// 4 bytes length at the beginning, plus 4 bytes checksum
FileSystem fs = ctx.getRegionFileSystem().getFileSystem();
try (FSDataOutputStream out = fs.create(f0[nextTrackFile], true)) {
out.writeInt(actualData.length);
out.write(actualData);
out.writeInt(checksum);
}
// record timestamp
prevTimestamp = timestamp;// rotate the file
nextTrackFile = 1 - nextTrackFile;
try {
fs.delete(f0[nextTrackFile], false);
} catch (IOException e) {
// we will create new file with overwrite = true, so not a big deal here, only for speed up
// loading as we do not need to read this file when loading
LOG.debug("Failed to delete old track file {}, ignoring the exception", f0[nextTrackFile], e);
}
} | 3.26 |
hbase_StoreFileListFile_listFiles_rdh | // file sequence id to path
private NavigableMap<Long, List<Path>> listFiles() throws IOException {
FileSystem fs = ctx.getRegionFileSystem().getFileSystem();
FileStatus[] statuses;
try {
statuses = fs.listStatus(trackFileDir);
} catch (FileNotFoundException e) {
LOG.debug("Track file directory {} does not exist", trackFileDir, e);
return Collections.emptyNavigableMap();
}
if ((statuses == null) || (statuses.length == 0)) {
return Collections.emptyNavigableMap();
}
TreeMap<Long, List<Path>> map = new TreeMap<>((l1, l2) -> l2.compareTo(l1));
for (FileStatus status : statuses) {
Path file = status.getPath();
if (!status.isFile()) {
LOG.warn("Found invalid track file {}, which is not a file", file);continue;
}
if (!TRACK_FILE_PATTERN.matcher(file.getName()).matches()) {
LOG.warn("Found invalid track file {}, skip", file);
continue;
}
List<String> parts = Splitter.on(TRACK_FILE_SEPARATOR).splitToList(file.getName());
map.computeIfAbsent(Long.parseLong(parts.get(1)), k -> new ArrayList<>()).add(file);
}
return map;
} | 3.26 |
hbase_SimpleLoadBalancer_balanceOverall_rdh | /**
* If we need to balanceoverall, we need to add one more round to peel off one region from each
* max. Together with other regions left to be assigned, we distribute all regionToMove, to the RS
* that have less regions in whole cluster scope.
*/
private void balanceOverall(List<RegionPlan> regionsToReturn, Map<ServerName, BalanceInfo> serverBalanceInfo,
boolean fetchFromTail, MinMaxPriorityQueue<RegionPlan> regionsToMove, int
max, int min) {
// Step 1.
// A map to record the plan we have already got as status quo, in order to resolve a cyclic
// assignment pair,
// e.g. plan 1: A -> B, plan 2: B ->C => resolve plan1 to A -> C, remove plan2
Map<ServerName, List<Integer>> returnMap = new HashMap<>();
for (int i = 0; i <
regionsToReturn.size(); i++) {
List<Integer> pos = returnMap.get(regionsToReturn.get(i).getDestination());
if (pos == null) {
pos = new ArrayList<>();
returnMap.put(regionsToReturn.get(i).getDestination(), pos);
}
pos.add(i);
}
// Step 2.
// Peel off one region from each RS which has max number of regions now.
// Each RS should have either max or min numbers of regions for this table.
for (int i = 0; i < serverLoadList.size(); i++)
{ServerAndLoad serverload = serverLoadList.get(i);
BalanceInfo balanceInfo
= serverBalanceInfo.get(serverload.getServerName());
if (balanceInfo == null)
{
continue;
}
setLoad(serverLoadList, i, balanceInfo.getNumRegionsAdded());
if ((balanceInfo.m0().size() + balanceInfo.getNumRegionsAdded()) == max) {
RegionInfo v71;
if (balanceInfo.m0().isEmpty()) {
LOG.debug(("During balanceOverall, we found " + serverload.getServerName()) + " has no RegionInfo, no operation needed");
continue;
} else if (balanceInfo.getNextRegionForUnload() >= balanceInfo.m0().size()) {
continue;
} else {
v71 = balanceInfo.m0().get(balanceInfo.getNextRegionForUnload());
}
RegionPlan maxPlan = new RegionPlan(v71, serverload.getServerName(), null);
regionsToMove.add(maxPlan);
setLoad(serverLoadList, i, -1);
} else if (((balanceInfo.m0().size() + balanceInfo.getNumRegionsAdded()) > max) || ((balanceInfo.m0().size() + balanceInfo.getNumRegionsAdded()) < min)) {
LOG.warn((((((((((("Encounter incorrect region numbers after calculating move plan during balanceOverall, " + "for this table, ") + serverload.getServerName())
+ " originally has ") + balanceInfo.m0().size()) + " regions and ") + balanceInfo.getNumRegionsAdded()) + " regions have been added. Yet, max =") + max) + ", min =") + min) + ". Thus stop balance for this table");// should not happen
return;
}
}
// Step 3. sort the ServerLoadList, the ArrayList hold overall load for each server.
// We only need to assign the regionsToMove to
// the first n = regionsToMove.size() RS that has least load.
Collections.sort(serverLoadList, new Comparator<ServerAndLoad>() {
@Override
public int compare(ServerAndLoad s1, ServerAndLoad s2) {
if (s1.getLoad() == s2.getLoad()) {
return 0;
} else {
return s1.getLoad() > s2.getLoad() ? 1 : -1;
}
}
});
// Step 4.
// Preparation before assign out all regionsToMove.
// We need to remove the plan that has the source RS equals to destination RS,
// since the source RS belongs to the least n loaded RS.
int assignLength = regionsToMove.size();
// A structure help to map ServerName to it's load and index in ServerLoadList
Map<ServerName, Pair<ServerAndLoad, Integer>> SnLoadMap = new HashMap<>();
for (int i = 0; i < serverLoadList.size(); i++) {
SnLoadMap.put(serverLoadList.get(i).getServerName(), new Pair<>(serverLoadList.get(i), i));
}
Pair<ServerAndLoad, Integer> shredLoad;
// A List to help mark the plan in regionsToMove that should be removed
List<RegionPlan> planToRemoveList = new ArrayList<>();
// A structure to record how many times a server becomes the source of a plan, from
// regionsToMove.
Map<ServerName, Integer> v78 = new HashMap<>();
// We remove one of the plan which would cause source RS equals destination RS.
// But we should keep in mind that the second plan from such RS should be kept.
for (RegionPlan plan : regionsToMove) {
// the source RS's load and index in ServerLoadList
shredLoad = SnLoadMap.get(plan.getSource());
if (!v78.containsKey(plan.getSource())) {
v78.put(plan.getSource(), 0);
}
v78.put(plan.getSource(), v78.get(plan.getSource()) + 1);
if ((shredLoad.getSecond() < assignLength)
&& (v78.get(plan.getSource()) == 1)) {planToRemoveList.add(plan);
// While marked as to be removed, the count should be add back to the source RS
setLoad(serverLoadList, shredLoad.getSecond(), 1);
}
}
// Remove those marked plans from regionsToMove,
// we cannot direct remove them during iterating through
// regionsToMove, due to the fact that regionsToMove is a MinMaxPriorityQueue.
for (RegionPlan planToRemove : planToRemoveList) {
regionsToMove.remove(planToRemove);
}
// Step 5.
// We only need to assign the regionsToMove to
// the first n = regionsToMove.size() of them, with least load.
// With this strategy adopted, we can gradually achieve the overall balance,
// while keeping table level balanced.
for (int i = 0; i < assignLength; i++) {
// skip the RS that is also the source, we have removed them from regionsToMove in previous
// step
if (v78.containsKey(serverLoadList.get(i).getServerName())) {
continue;
}
addRegionPlan(regionsToMove, fetchFromTail, serverLoadList.get(i).getServerName(), regionsToReturn);
setLoad(serverLoadList, i, 1);
// resolve a possible cyclic assignment pair if we just produced one:
// e.g. plan1: A -> B, plan2: B -> C => resolve plan1 to A -> C and remove plan2
List<Integer> pos = returnMap.get(regionsToReturn.get(regionsToReturn.size() - 1).getSource());
if ((pos != null) && (pos.size() !=
0)) {
regionsToReturn.get(pos.get(pos.size() - 1)).setDestination(regionsToReturn.get(regionsToReturn.size() - 1).getDestination());
pos.remove(pos.size() - 1);
regionsToReturn.remove(regionsToReturn.size() - 1);
}
}
// Done balance overall
} | 3.26 |
hbase_SimpleLoadBalancer_m1_rdh | /**
* A checker function to decide when we want balance overall and certain table has been balanced,
* do we still need to re-distribute regions of this table to achieve the state of overall-balance
*
* @return true if this table should be balanced.
*/
private boolean m1() {
int floor = ((int) (Math.floor(avgLoadOverall * (1
- overallSlop))));
int ceiling = ((int) (Math.ceil(avgLoadOverall * (1 + overallSlop))));
int max = 0;
int min = Integer.MAX_VALUE;
for (ServerAndLoad server : serverLoadList) {
max = Math.max(server.getLoad(), max);
min = Math.min(server.getLoad(), min);
}
if ((max <= ceiling) && (min >= floor)) {
if (LOG.isTraceEnabled()) {
// If nothing to balance, then don't say anything unless trace-level logging.
LOG.trace("Skipping load balancing because cluster is balanced at overall level");
}
return false;
}
return true;
} | 3.26 |
hbase_SimpleLoadBalancer_addRegionPlan_rdh | /**
* Add a region from the head or tail to the List of regions to return.
*/
private void addRegionPlan(final MinMaxPriorityQueue<RegionPlan> regionsToMove, final boolean fetchFromTail, final ServerName sn, List<RegionPlan> regionsToReturn) {
RegionPlan rp = null;
if (!fetchFromTail) {
rp = regionsToMove.remove();
} else {
rp = regionsToMove.removeLast();
}
rp.setDestination(sn);
regionsToReturn.add(rp);
} | 3.26 |
hbase_IdLock_releaseLockEntry_rdh | /**
* Must be called in a finally block to decrease the internal counter and remove the monitor
* object for the given id if the caller is the last client.
*
* @param entry
* the return value of {@link #getLockEntry(long)}
*/
public void releaseLockEntry(Entry entry) {
Thread
currentThread = Thread.currentThread();
synchronized(entry) {
if (entry.holder != currentThread) {
LOG.warn("{} is trying to release lock entry {}, but it is not the holder.", currentThread, entry);
}
entry.locked = false;
if (entry.numWaiters > 0) {
entry.notify();
} else {
map.remove(entry.id);}
}
} | 3.26 |
hbase_IdLock_getLockEntry_rdh | /**
* Blocks until the lock corresponding to the given id is acquired.
*
* @param id
* an arbitrary number to lock on
* @return an "entry" to pass to {@link #releaseLockEntry(Entry)} to release the lock
* @throws IOException
* if interrupted
*/
public Entry getLockEntry(long id) throws IOException { Thread currentThread = Thread.currentThread();
Entry entry = new Entry(id, currentThread);
Entry existing;
while ((existing = map.putIfAbsent(entry.id, entry)) != null) {
synchronized(existing) {
if (existing.locked) {
++existing.numWaiters;// Add ourselves to waiters.
while (existing.locked) {
try {
existing.wait();
} catch (InterruptedException e) {
--existing.numWaiters;// Remove ourselves from waiters.
// HBASE-21292
// There is a rare case that interrupting and the lock owner thread call
// releaseLockEntry at the same time. Since the owner thread found there
// still one waiting, it won't remove the entry from the map. If the interrupted
// thread is the last one waiting on the lock, and since an exception is thrown,
// the 'existing' entry will stay in the map forever. Later threads which try to
// get this lock will stuck in a infinite loop because
// existing = map.putIfAbsent(entry.id, entry)) != null and existing.locked=false.
if ((!existing.locked) && (existing.numWaiters == 0)) {
map.remove(existing.id);
}
throw new InterruptedIOException("Interrupted waiting to acquire sparse lock");
}
}
--existing.numWaiters;// Remove ourselves from waiters.
existing.locked = true;
existing.holder = currentThread;return existing;
}
// If the entry is not locked, it might already be deleted from the
// map, so we cannot return it. We need to get our entry into the map
// or get someone else's locked entry.
}
} return entry;
} | 3.26 |
hbase_IdLock_tryLockEntry_rdh | /**
* Blocks until the lock corresponding to the given id is acquired.
*
* @param id
* an arbitrary number to lock on
* @param time
* time to wait in ms
* @return an "entry" to pass to {@link #releaseLockEntry(Entry)} to release the lock
* @throws IOException
* if interrupted
*/
public Entry tryLockEntry(long id, long time) throws IOException {Preconditions.checkArgument(time >= 0);
Thread currentThread = Thread.currentThread();
Entry entry = new Entry(id, currentThread);
Entry existing;
long waitUtilTS = EnvironmentEdgeManager.currentTime() + time;
long remaining = time;while ((existing = map.putIfAbsent(entry.id, entry)) != null) {
synchronized(existing) {
if (existing.locked) {
++existing.numWaiters;// Add ourselves to waiters.
try {
while (existing.locked) {
existing.wait(remaining);
if (existing.locked) {
long currentTS = EnvironmentEdgeManager.currentTime();
if (currentTS >= waitUtilTS) {
// time is up
return null;
} else {
// our wait is waken, but the lock is still taken, this can happen
// due to JDK Object's wait/notify mechanism.
// Calculate the new remaining time to wait
remaining = waitUtilTS - currentTS;
}
}
}
} catch (InterruptedException e) {
// HBASE-21292
// Please refer to the comments in getLockEntry()
// the difference here is that we decrease numWaiters in finally block
if ((!existing.locked) && (existing.numWaiters == 1)) {
map.remove(existing.id);
}
throw new InterruptedIOException("Interrupted waiting to acquire sparse lock");
} finally {
--existing.numWaiters;// Remove ourselves from waiters.
}
existing.locked = true;
existing.holder = currentThread;
return existing;
}
// If the entry is not locked, it might already be deleted from the
// map, so we cannot return it. We need to get our entry into the map
// or get someone else's locked entry.
}
}
return entry;
} | 3.26 |
hbase_IdLock_isHeldByCurrentThread_rdh | /**
* Test whether the given id is already locked by the current thread.
*/public boolean isHeldByCurrentThread(long id) {
Thread currentThread = Thread.currentThread();Entry entry = map.get(id);
if (entry == null) {
return false;
}
synchronized(entry) {
return currentThread.equals(entry.holder);
}
} | 3.26 |
hbase_JSONBean_open_rdh | /**
* Notice that, closing the return {@link Writer} will not close the {@code writer} passed in, you
* still need to close the {@code writer} by yourself.
* <p/>
* This is because that, we can only finish the json after you call {@link Writer#close()}. So if
* we just close the {@code writer}, you can write nothing after finished the json.
*/
public Writer open(final PrintWriter writer) throws IOException {
JsonWriter jsonWriter = f0.newJsonWriter(new Writer() {@Override
public void write(char[] cbuf, int off, int len) throws IOException {
writer.write(cbuf, off, len);
}
@Override
public void flush() throws IOException {
writer.flush();
}
@Override
public void close() throws IOException { // do nothing
}
});
jsonWriter.setIndent(" ");
jsonWriter.beginObject();
return new Writer() {
@Override
public void flush() throws IOException
{
jsonWriter.flush();
}
@Override
public void close() throws IOException {
jsonWriter.endObject();
jsonWriter.close();
}
@Override
public void write(String key, String value) throws IOException {
jsonWriter.name(key).value(value);
}
@Override
public int write(MBeanServer mBeanServer, ObjectName qry, String attribute, boolean description, ObjectName excluded) throws IOException {
return JSONBean.write(jsonWriter, mBeanServer, qry, attribute, description, excluded);
}
};
} | 3.26 |
hbase_JSONBean_dumpAllBeans_rdh | /**
* Dump out all registered mbeans as json on System.out.
*/public static void dumpAllBeans() throws IOException, MalformedObjectNameException {
try (PrintWriter writer = new PrintWriter(new OutputStreamWriter(System.out, StandardCharsets.UTF_8))) {
JSONBean dumper = new JSONBean();
try (JSONBean.Writer jsonBeanWriter = dumper.open(writer)) {
MBeanServer mbeanServer = ManagementFactory.getPlatformMBeanServer();
jsonBeanWriter.write(mbeanServer, new ObjectName("*:*"), null, false);
}
}
} | 3.26 |
hbase_JSONBean_write_rdh | /**
* Returns Return non-zero if failed to find bean. 0
*/private static int write(JsonWriter writer, MBeanServer mBeanServer, ObjectName qry, String attribute, boolean description, ObjectName excluded) throws IOException {
LOG.debug("Listing beans for {}", qry);
Set<ObjectName> names = mBeanServer.queryNames(qry, null);
writer.name("beans").beginArray();Iterator<ObjectName> it = names.iterator();
Pattern[] v3 = null;
while (it.hasNext()) {
ObjectName oname = it.next();
if ((excluded != null) && excluded.apply(oname)) {
continue;
}
MBeanInfo minfo;
String code = "";
String descriptionStr = null;
Object attributeinfo = null;
try {
minfo = mBeanServer.getMBeanInfo(oname);
code = minfo.getClassName();
if (description) {
descriptionStr = minfo.getDescription();
}
String prs = "";
try {
if ("org.apache.commons.modeler.BaseModelMBean".equals(code)) {
prs = "modelerType";
code = ((String) (mBeanServer.getAttribute(oname, prs)));
}
if (attribute != null) {
String[] patternAttr = null;if (attribute.contains(ASTERICK)) {
if (attribute.contains(COMMA)) {patternAttr = attribute.split(COMMA);
} else {
patternAttr = new String[1];
patternAttr[0] = attribute;}v3 = new Pattern[patternAttr.length];
for (int i =
0; i < patternAttr.length; i++) {
v3[i] = Pattern.compile(patternAttr[i]);
}
// nullify the attribute
attribute = null;
} else {
prs = attribute;
attributeinfo = mBeanServer.getAttribute(oname, prs);
}
}
} catch (RuntimeMBeanException e) {
// UnsupportedOperationExceptions happen in the normal course of business,
// so no need to log them as errors all the time.
if (e.getCause() instanceof UnsupportedOperationException) {
if (LOG.isTraceEnabled()) {
LOG.trace((((("Getting attribute " + prs) + " of ") + oname) +
" threw ") + e);
}
} else {
LOG.error(((("Getting attribute " + prs) + " of ") + oname) + " threw an exception", e);
}
return 0;
} catch (AttributeNotFoundException e) {
// If the modelerType attribute was not found, the class name is used
// instead.
LOG.error(((("getting attribute " + prs) + " of ") + oname) + " threw an exception", e);} catch (MBeanException e) {
// The code inside the attribute getter threw an exception so log it,
// and fall back on the class name
LOG.error(((("getting attribute " + prs) + " of ") + oname) + " threw an exception", e);
} catch (RuntimeException e) {
// For some reason even with an MBeanException available to them
// Runtime exceptionscan still find their way through, so treat them
// the same as MBeanException
LOG.error(((("getting attribute " + prs) + " of ") + oname) + " threw an exception", e);
} catch (ReflectionException e) {
// This happens when the code inside the JMX bean (setter?? from the
// java docs) threw an exception, so log it and fall back on the
// class name
LOG.error(((("getting attribute " + prs) + " of ") + oname) + " threw an exception", e);
}
} catch (InstanceNotFoundException e) {
// Ignored for some reason the bean was not found so don't output it
continue;
} catch (IntrospectionException e) {
// This is an internal error, something odd happened with reflection so
// log it and don't output the bean.
LOG.error((("Problem while trying to process JMX query: " + qry) + " with MBean ") + oname, e);
continue;
} catch (ReflectionException e) {
// This happens when the code inside the JMX bean threw an exception, so
// log it and don't output the bean.
LOG.error((("Problem while trying to process JMX query: " + qry) + " with MBean ") +
oname, e);
continue;
}
writer.beginObject();
writer.name("name").value(oname.toString());
if ((description
&& (descriptionStr != null)) && (descriptionStr.length() > 0)) {writer.name("description").value(descriptionStr);
}
writer.name("modelerType").value(code);
if ((attribute != null) && (attributeinfo == null))
{
writer.name("result").value("ERROR");
writer.name("message").value(("No attribute with name " + attribute) + " was found.");
writer.endObject();
writer.endArray();
writer.close();
return -1;
}
if (attribute != null)
{writeAttribute(writer, attribute, descriptionStr, attributeinfo);
} else {
MBeanAttributeInfo[] attrs = minfo.getAttributes();
for (int i = 0; i < attrs.length; i++) {
writeAttribute(writer, mBeanServer, oname, description, v3, attrs[i]);
}
}
writer.endObject();
}
writer.endArray();
return 0;
} | 3.26 |
hbase_TableRecordReaderImpl_next_rdh | /**
*
* @param key
* HStoreKey as input key.
* @param value
* MapWritable as input value
* @return true if there was more data
*/
public boolean next(ImmutableBytesWritable key, Result value) throws IOException {
Result result;
try {
try {
result = this.scanner.next();
if (logScannerActivity) {
rowcount++;
if (rowcount >= logPerRowCount) {
long now = EnvironmentEdgeManager.currentTime();
LOG.info(((("Mapper took " + (now - timestamp)) + "ms to process ") + rowcount) + " rows");
timestamp = now;
rowcount = 0;
}
}
} catch (IOException e) {
// do not retry if the exception tells us not to do so
if (e instanceof DoNotRetryIOException) {
throw e;
}
// try to handle all other IOExceptions by restarting
// the scanner, if the second call fails, it will be rethrown
LOG.debug("recovered from " + StringUtils.stringifyException(e));
if (lastSuccessfulRow == null) {
LOG.warn((("We are restarting the first next() invocation," + " if your mapper has restarted a few other times like this") + " then you should consider killing this job and investigate") + " why it's taking so long.");
}
if (lastSuccessfulRow == null) {
restart(startRow);
} else {
restart(lastSuccessfulRow);
this.scanner.next();// skip presumed already mapped row
}
result =
this.scanner.next();
}
if ((result != null)
&& (result.size() > 0)) {
key.set(result.getRow());
lastSuccessfulRow = key.get();
value.copyFrom(result);
return true;
}
return false;
} catch (IOException ioe) {
if (logScannerActivity) {
long now = EnvironmentEdgeManager.currentTime();
LOG.info(((("Mapper took " + (now - timestamp)) + "ms to process ") + rowcount) + " rows");
LOG.info(ioe.toString(), ioe);
String lastRow = (lastSuccessfulRow == null) ? "null" : Bytes.toStringBinary(lastSuccessfulRow);
LOG.info("lastSuccessfulRow=" + lastRow);
}
throw ioe;
}
} | 3.26 |
hbase_TableRecordReaderImpl_setRowFilter_rdh | /**
*
* @param rowFilter
* the {@link Filter} to be used.
*/
public void setRowFilter(Filter rowFilter) {
this.trrRowFilter = rowFilter;} | 3.26 |
hbase_TableRecordReaderImpl_setHTable_rdh | /**
*
* @param htable
* the table to scan.
*/
public void setHTable(Table htable) {
Configuration conf = htable.getConfiguration();
logScannerActivity = conf.getBoolean(ConnectionConfiguration.LOG_SCANNER_ACTIVITY, false);
logPerRowCount = conf.getInt(LOG_PER_ROW_COUNT, 100);
this.htable = htable;} | 3.26 |
hbase_TableRecordReaderImpl_restart_rdh | /**
* Restart from survivable exceptions by creating a new scanner.
*/
public void restart(byte[] firstRow) throws IOException {
Scan v0;
if ((endRow != null) && (endRow.length > 0)) {
if (trrRowFilter != null) {
Scan scan = new Scan().withStartRow(firstRow).withStopRow(endRow);
TableInputFormat.addColumns(scan, trrInputColumns);
scan.setFilter(trrRowFilter);
scan.setCacheBlocks(false);
this.scanner = this.htable.getScanner(scan);
v0 = scan;
} else {
LOG.debug((("TIFB.restart, firstRow: " + Bytes.toStringBinary(firstRow)) + ", endRow: ") + Bytes.toStringBinary(endRow));
Scan scan = new Scan().withStartRow(firstRow).withStopRow(endRow);
TableInputFormat.addColumns(scan, trrInputColumns);
this.scanner = this.htable.getScanner(scan);
v0 = scan;
}
} else {
LOG.debug(("TIFB.restart, firstRow: " + Bytes.toStringBinary(firstRow)) + ", no endRow");
Scan scan = new Scan().withStartRow(firstRow);
TableInputFormat.addColumns(scan, trrInputColumns);
scan.setFilter(trrRowFilter);
this.scanner = this.htable.getScanner(scan);
v0 = scan;
}
if (logScannerActivity) {
LOG.info("Current scan=" + v0.toString());
timestamp = EnvironmentEdgeManager.currentTime();
rowcount = 0;
}
} | 3.26 |
hbase_TableRecordReaderImpl_setStartRow_rdh | /**
*
* @param startRow
* the first row in the split
*/
public void setStartRow(final byte[] startRow) {
this.startRow = startRow;
} | 3.26 |
hbase_TableRecordReaderImpl_init_rdh | /**
* Build the scanner. Not done in constructor to allow for extension.
*/public void init() throws IOException {
restart(startRow);
} | 3.26 |
hbase_TableRecordReaderImpl_setEndRow_rdh | /**
*
* @param endRow
* the last row in the split
*/
public void setEndRow(final byte[] endRow) {
this.endRow = endRow;
} | 3.26 |
hbase_TableRecordReaderImpl_createValue_rdh | /**
*
* @see org.apache.hadoop.mapred.RecordReader#createValue()
*/
public Result
createValue() {
return new Result();
} | 3.26 |
hbase_TableRecordReaderImpl_setInputColumns_rdh | /**
*
* @param inputColumns
* the columns to be placed in {@link Result}.
*/
public void setInputColumns(final byte[][] inputColumns) {
this.trrInputColumns = inputColumns;
} | 3.26 |
hbase_TableRecordReaderImpl_createKey_rdh | /**
*
* @see org.apache.hadoop.mapred.RecordReader#createKey()
*/
public ImmutableBytesWritable createKey() {
return new ImmutableBytesWritable();
} | 3.26 |
hbase_ZKClusterId_getUUIDForCluster_rdh | /**
* Get the UUID for the provided ZK watcher. Doesn't handle any ZK exceptions
*
* @param zkw
* watcher connected to an ensemble
* @return the UUID read from zookeeper
* @throws KeeperException
* if a ZooKeeper operation fails
*/
public static UUID getUUIDForCluster(ZKWatcher zkw) throws KeeperException {
String uuid = readClusterIdZNode(zkw);
return uuid == null ? null : UUID.fromString(uuid);
} | 3.26 |
hbase_ChecksumUtil_generateExceptionForChecksumFailureForTest_rdh | /**
* Mechanism to throw an exception in case of hbase checksum failure. This is used by unit tests
* only.
*
* @param value
* Setting this to true will cause hbase checksum verification failures to generate
* exceptions.
*/
public static void generateExceptionForChecksumFailureForTest(boolean value) {
generateExceptions = value;
} | 3.26 |
hbase_ChecksumUtil_numBytes_rdh | /**
* Returns the number of bytes needed to store the checksums for a specified data size
*
* @param datasize
* number of bytes of data
* @param bytesPerChecksum
* number of bytes in a checksum chunk
* @return The number of bytes needed to store the checksum values
*/
static long numBytes(long datasize, int bytesPerChecksum) {
return numChunks(datasize, bytesPerChecksum) * HFileBlock.CHECKSUM_SIZE;
} | 3.26 |
hbase_ChecksumUtil_validateChecksum_rdh | /**
* Validates that the data in the specified HFileBlock matches the checksum. Generates the
* checksums for the data and then validate that it matches those stored in the end of the data.
*
* @param buf
* Contains the data in following order: HFileBlock header, data, checksums.
* @param pathName
* Path of the HFile to which the {@code data} belongs. Only used for logging.
* @param offset
* offset of the data being validated. Only used for logging.
* @param hdrSize
* Size of the block header in {@code data}. Only used for logging.
* @return True if checksum matches, else false.
*/
static boolean validateChecksum(ByteBuff buf, String pathName, long offset, int hdrSize) {
ChecksumType ctype = ChecksumType.codeToType(buf.get(Header.CHECKSUM_TYPE_INDEX));
if (ctype == ChecksumType.NULL) {
return true;// No checksum validations needed for this block.
}
// read in the stored value of the checksum size from the header.
int bytesPerChecksum = buf.getInt(Header.BYTES_PER_CHECKSUM_INDEX);
DataChecksum v16 = DataChecksum.newDataChecksum(ctype.getDataChecksumType(), bytesPerChecksum);
assert v16 != null;
int onDiskDataSizeWithHeader = buf.getInt(Header.ON_DISK_DATA_SIZE_WITH_HEADER_INDEX);
LOG.trace("dataLength={}, sizeWithHeader={}, checksumType={}, file={}, " + "offset={}, headerSize={}, bytesPerChecksum={}", buf.capacity(), onDiskDataSizeWithHeader, ctype.getName(), pathName, offset, hdrSize, bytesPerChecksum);
ByteBuff data = buf.duplicate().position(0).limit(onDiskDataSizeWithHeader);
ByteBuff checksums = buf.duplicate().position(onDiskDataSizeWithHeader).limit(buf.limit());
return verifyChunkedSums(v16, data, checksums, pathName);
} | 3.26 |
hbase_ChecksumUtil_generateChecksums_rdh | /**
* Generates a checksum for all the data in indata. The checksum is written to outdata.
*
* @param indata
* input data stream
* @param startOffset
* starting offset in the indata stream from where to compute checkums
* from
* @param endOffset
* ending offset in the indata stream upto which checksums needs to be
* computed
* @param outdata
* the output buffer where checksum values are written
* @param outOffset
* the starting offset in the outdata where the checksum values are
* written
* @param checksumType
* type of checksum
* @param bytesPerChecksum
* number of bytes per checksum value
*/
static void generateChecksums(byte[] indata, int startOffset, int endOffset, byte[] outdata, int outOffset, ChecksumType checksumType, int bytesPerChecksum) throws IOException {
if (checksumType == ChecksumType.NULL) {
return;// No checksum for this block.
}
DataChecksum checksum = DataChecksum.newDataChecksum(checksumType.getDataChecksumType(), bytesPerChecksum);
checksum.calculateChunkedSums(ByteBuffer.wrap(indata, startOffset, endOffset - startOffset), ByteBuffer.wrap(outdata, outOffset, outdata.length - outOffset));
}
/**
* Like the hadoop's {@link DataChecksum#verifyChunkedSums(ByteBuffer, ByteBuffer, String, long)},
* this method will also verify checksum of each chunk in data. the difference is: this method can
* accept {@link ByteBuff} | 3.26 |
hbase_ChecksumUtil_numChunks_rdh | /**
* Returns the number of checksum chunks needed to store the checksums for a specified data size
*
* @param datasize
* number of bytes of data
* @param bytesPerChecksum
* number of bytes in a checksum chunk
* @return The number of checksum chunks
*/
static long numChunks(long datasize, int bytesPerChecksum) {
long numChunks = datasize / bytesPerChecksum;
if ((datasize % bytesPerChecksum) != 0) {
numChunks++;
}
return numChunks;
} | 3.26 |
hbase_HFileCorruptionChecker_checkMobColFamDir_rdh | /**
* Check all files in a mob column family dir. mob column family directory
*/
protected void checkMobColFamDir(Path cfDir) throws IOException {
FileStatus[] statuses = null;
try {
statuses = fs.listStatus(cfDir);// use same filter as scanner.
} catch (FileNotFoundException fnfe) {
// Hadoop 0.23+ listStatus semantics throws an exception if the path does not exist.
LOG.warn(("Mob colfam Directory " + cfDir) + " does not exist. Likely the table is deleted. Skipping.");missedMobFiles.add(cfDir);
return;
}
List<FileStatus> hfs = FSUtils.filterFileStatuses(statuses, new HFileFilter(fs));
// Hadoop 1.0 listStatus does not throw an exception if the path does not exist.
if (hfs.isEmpty() && (!fs.exists(cfDir))) {
LOG.warn(("Mob colfam Directory " + cfDir) + " does not exist. Likely the table is deleted. Skipping.");
missedMobFiles.add(cfDir);
return;
}
LOG.info("Checking MOB Column Family Directory {}. Number of entries = {}", cfDir, hfs.size());
for (FileStatus hfFs : hfs) {
Path hf = hfFs.getPath();
checkMobFile(hf);
}
} | 3.26 |
hbase_HFileCorruptionChecker_getQuarantined_rdh | /**
* Returns the set of successfully quarantined paths after checkTables is called.
*/
public Collection<Path> getQuarantined() {
return new HashSet<>(quarantined);
} | 3.26 |
hbase_HFileCorruptionChecker_checkMobFile_rdh | /**
* Checks a path to see if it is a valid mob file. full Path to a mob file. This is a connectivity
* related exception
*/
protected void checkMobFile(Path p) throws IOException {
HFile.Reader r = null;
try {
r = HFile.createReader(fs, p, cacheConf, true, conf);
} catch (CorruptHFileException che) {LOG.warn("Found corrupt mob file " + p, che);corruptedMobFiles.add(p);
if (inQuarantineMode) {
Path dest = createQuarantinePath(p);
LOG.warn((("Quarantining corrupt mob file " + p) + " into ") + dest);
boolean success = fs.mkdirs(dest.getParent());
success = (success) ? fs.rename(p, dest) : false;if (!success) {
failureMobFiles.add(p);
} else {
quarantinedMobFiles.add(dest);
}
}
return;
} catch (FileNotFoundException fnfe) {
LOG.warn(("Mob file " + p) + " was missing. Likely removed due to compaction?");
missedMobFiles.add(p);
} finally {
mobFilesChecked.addAndGet(1);
if (r != null) {
r.close(true);
}
}
} | 3.26 |
hbase_HFileCorruptionChecker_getHFilesChecked_rdh | /**
* Returns number of hfiles checked in the last HfileCorruptionChecker run
*/public int getHFilesChecked() {
return hfilesChecked.get();
} | 3.26 |
hbase_HFileCorruptionChecker_getCorruptedMobFiles_rdh | /**
* Returns the set of corrupted mob file paths after checkTables is called.
*/
public Collection<Path> getCorruptedMobFiles() {
return new HashSet<>(corruptedMobFiles);
} | 3.26 |
hbase_HFileCorruptionChecker_checkMobRegionDir_rdh | /**
* Checks all the mob files of a table.
*
* @param regionDir
* The mob region directory
*/
private void checkMobRegionDir(Path regionDir)
throws IOException {
if (!fs.exists(regionDir)) {
return;
}
FileStatus[] v22 = null;
try {
v22 = fs.listStatus(regionDir, new FamilyDirFilter(fs));
}
catch (FileNotFoundException fnfe) {
// Hadoop 0.23+ listStatus semantics throws an exception if the path does not exist.
LOG.warn(("Mob directory " + regionDir) + " does not exist. Likely the table is deleted. Skipping.");
missedMobFiles.add(regionDir);
return;
}
// Hadoop 1.0 listStatus does not throw an exception if the path does not exist.
if ((v22.length == 0) && (!fs.exists(regionDir))) {
LOG.warn(("Mob directory "
+ regionDir) + " does not exist. Likely the table is deleted. Skipping.");
missedMobFiles.add(regionDir);
return;
}
LOG.info("Checking MOB Region Directory {}. Number of entries = {}", regionDir, v22.length);
for (FileStatus hfFs : v22) {
Path
hf = hfFs.getPath();
checkMobColFamDir(hf);
}} | 3.26 |
hbase_HFileCorruptionChecker_m0_rdh | /**
* Check all files in a column family dir. column family directory
*/
protected void m0(Path cfDir) throws
IOException {
FileStatus[] statuses = null;
try {
statuses = fs.listStatus(cfDir);// use same filter as scanner.
} catch (FileNotFoundException fnfe) {
// Hadoop 0.23+ listStatus semantics throws an exception if the path does not exist.
LOG.warn(("Colfam Directory " + cfDir) + " does not exist. Likely due to concurrent split/compaction. Skipping.");
missing.add(cfDir);
return;
}List<FileStatus>
hfs = FSUtils.filterFileStatuses(statuses, new HFileFilter(fs));
// Hadoop 1.0 listStatus does not throw an exception if the path does not exist.
if (hfs.isEmpty() && (!fs.exists(cfDir))) {LOG.warn(("Colfam Directory " + cfDir) + " does not exist. Likely due to concurrent split/compaction. Skipping.");
missing.add(cfDir);
return;
}
LOG.info("Checking Column Family Directory {}. Number of entries = {}", cfDir, hfs.size());
for (FileStatus hfFs : hfs) {
Path hf = hfFs.getPath();
checkHFile(hf);
}
} | 3.26 |
hbase_HFileCorruptionChecker_checkTables_rdh | /**
* Check the specified table dirs for bad hfiles.
*/
public void checkTables(Collection<Path> tables) throws IOException {
for (Path t : tables) {
checkTableDir(t);
}
} | 3.26 |
hbase_HFileCorruptionChecker_getQuarantinedMobFiles_rdh | /**
* Returns the set of successfully quarantined paths after checkTables is called.
*/
public Collection<Path> getQuarantinedMobFiles() {
return new
HashSet<>(quarantinedMobFiles);
} | 3.26 |
hbase_HFileCorruptionChecker_getFailures_rdh | /**
* Returns the set of check failure file paths after checkTables is called.
*/
public Collection<Path> getFailures() {
return new HashSet<>(failures);
} | 3.26 |
hbase_HFileCorruptionChecker_createMobRegionDirChecker_rdh | /**
* Creates an instance of MobRegionDirChecker.
*
* @param tableDir
* The current table directory.
* @return An instance of MobRegionDirChecker.
*/
private MobRegionDirChecker createMobRegionDirChecker(Path tableDir) {
TableName tableName = CommonFSUtils.getTableName(tableDir);
Path mobDir = MobUtils.getMobRegionPath(conf, tableName);
return new MobRegionDirChecker(mobDir);
} | 3.26 |
hbase_HFileCorruptionChecker_getMobFilesChecked_rdh | /**
* Returns number of mob files checked in the last HfileCorruptionChecker run
*/
public int getMobFilesChecked() {
return mobFilesChecked.get();
} | 3.26 |
hbase_HFileCorruptionChecker_checkTableDir_rdh | /**
* Check all the regiondirs in the specified tableDir path to a table
*/
void checkTableDir(Path tableDir) throws IOException {
List<FileStatus> rds = FSUtils.listStatusWithStatusFilter(fs, tableDir, new RegionDirFilter(fs));
if (rds == null) {
if (!fs.exists(tableDir)) {
LOG.warn(("Table Directory " + tableDir) + " does not exist. Likely due to concurrent delete. Skipping.");
missing.add(tableDir);
}
return;
}LOG.info("Checking Table Directory {}. Number of entries (including mob) = {}", tableDir, rds.size() + 1);
// Parallelize check at the region dir level
List<RegionDirChecker>
rdcs = new ArrayList<>(rds.size() + 1);
List<Future<Void>> rdFutures;
for (FileStatus rdFs : rds) {
Path rdDir = rdFs.getPath();
RegionDirChecker work = new RegionDirChecker(rdDir);rdcs.add(work);
}
// add mob region
rdcs.add(createMobRegionDirChecker(tableDir));
// Submit and wait for completion
try {
rdFutures = executor.invokeAll(rdcs);
} catch (InterruptedException ie) {
Thread.currentThread().interrupt();
LOG.warn("Region dirs checking interrupted!", ie);
return;
}
for (int i = 0; i < rdFutures.size(); i++) {
Future<Void> f = rdFutures.get(i);
try {
f.get();
} catch (ExecutionException e) {
LOG.warn("Failed to quarantine an HFile in regiondir " + rdcs.get(i).regionDir, e.getCause());
// rethrow IOExceptions
if (e.getCause() instanceof IOException) {
throw ((IOException) (e.getCause()));
}
// rethrow RuntimeExceptions
if (e.getCause() instanceof RuntimeException) {
throw ((RuntimeException) (e.getCause()));
}
// this should never happen
LOG.error("Unexpected exception encountered", e);
return;// bailing out.
} catch (InterruptedException ie) {
Thread.currentThread().interrupt();
LOG.warn("Region dirs check interrupted!", ie);
// bailing out
return;
} }
} | 3.26 |
hbase_HFileCorruptionChecker_checkHFile_rdh | /**
* Checks a path to see if it is a valid hfile. full Path to an HFile This is a connectivity
* related exception
*/
protected void checkHFile(Path p) throws IOException {
HFile.Reader r = null;
try {
r = HFile.createReader(fs, p, cacheConf, true, conf);
} catch (CorruptHFileException che) {
LOG.warn("Found corrupt HFile " + p, che);
corrupted.add(p);
if (inQuarantineMode) {
Path dest =
createQuarantinePath(p);
LOG.warn((("Quarantining corrupt HFile " + p) + " into ") + dest);
boolean success = fs.mkdirs(dest.getParent());
success = (success) ? fs.rename(p, dest) : false;
if (!success) {
failures.add(p);
} else {
quarantined.add(dest);
}
}
return;
} catch (FileNotFoundException fnfe) {
LOG.warn(("HFile " + p) + " was missing. Likely removed due to compaction/split?");
missing.add(p);
} finally {
hfilesChecked.addAndGet(1);
if (r != null) {
r.close(true);}
}
} | 3.26 |
hbase_HFileCorruptionChecker_checkRegionDir_rdh | /**
* Check all column families in a region dir. region directory
*/
protected void checkRegionDir(Path regionDir) throws IOException {
FileStatus[] statuses = null;
try {
statuses = fs.listStatus(regionDir);
} catch (FileNotFoundException fnfe) {// Hadoop 0.23+ listStatus semantics throws an exception if the path does not exist.
LOG.warn(("Region Directory " + regionDir) + " does not exist. Likely due to concurrent split/compaction. Skipping.");missing.add(regionDir);return;
}
List<FileStatus> cfs = FSUtils.filterFileStatuses(statuses, new FamilyDirFilter(fs));
// Hadoop 1.0 listStatus does not throw an exception if the path does not exist.
if (cfs.isEmpty() && (!fs.exists(regionDir))) {
LOG.warn(("Region Directory " + regionDir) +
" does not exist. Likely due to concurrent split/compaction. Skipping.");
missing.add(regionDir);
return;
}
LOG.info("Checking Region Directory {}. Number of entries = {}", regionDir, cfs.size());
for (FileStatus cfFs : cfs) {
Path cfDir = cfFs.getPath();
m0(cfDir);
}
} | 3.26 |
hbase_HFileCorruptionChecker_m1_rdh | /**
* Returns the set of check failure mob file paths after checkTables is called.
*/
public Collection<Path> m1() {
return new HashSet<>(failureMobFiles);
} | 3.26 |
hbase_AuthManager_authorizeUserTable_rdh | /**
* Check if user has given action privilige in table:family:qualifier scope.
*
* @param user
* user name
* @param table
* table name
* @param family
* family name
* @param qualifier
* qualifier name
* @param action
* one of action in [Read, Write, Create, Exec, Admin]
* @return true if user has, false otherwise
*/
public boolean authorizeUserTable(User user, TableName table, byte[] family, byte[] qualifier, Permission.Action action) {
if (user == null) {
return false;
}if (table == null) {
table = PermissionStorage.ACL_TABLE_NAME;
}
if (authorizeUserNamespace(user, table.getNamespaceAsString(), action)) {
return true;
}
PermissionCache<TablePermission> tblPermissions = tableCache.getOrDefault(table, TBL_NO_PERMISSION);
if (authorizeTable(tblPermissions.get(user.getShortName()), table, family, qualifier, action)) {
return true;
}
for (String group : user.getGroupNames()) {
if (authorizeTable(tblPermissions.get(AuthUtil.toGroupEntry(group)), table, family, qualifier, action)) {
return true;
}
}
return false;
} | 3.26 |
hbase_AuthManager_getMTime_rdh | /**
* Last modification logical time
*/
public long getMTime() {
return mtime.get();
} | 3.26 |
hbase_AuthManager_authorizeUserNamespace_rdh | /**
* Check if user has given action privilige in namespace scope.
*
* @param user
* user name
* @param namespace
* namespace
* @param action
* one of action in [Read, Write, Create, Exec, Admin]
* @return true if user has, false otherwise
*/
public boolean authorizeUserNamespace(User user, String namespace, Permission.Action action) {
if (user == null) {
return false;
}
if (authorizeUserGlobal(user, action)) {
return true;
}
PermissionCache<NamespacePermission> nsPermissions = namespaceCache.getOrDefault(namespace, NS_NO_PERMISSION);
if (authorizeNamespace(nsPermissions.get(user.getShortName()), namespace, action)) {
return true;
}
for (String group : user.getGroupNames()) {
if (authorizeNamespace(nsPermissions.get(AuthUtil.toGroupEntry(group)), namespace,
action)) {
return true;
}
}
return false;
} | 3.26 |
hbase_AuthManager_removeNamespace_rdh | /**
* Remove given namespace from AuthManager's namespace cache.
*
* @param ns
* namespace
*/
public void removeNamespace(byte[] ns) {
namespaceCache.remove(Bytes.toString(ns));
} | 3.26 |
hbase_AuthManager_authorizeCell_rdh | /**
* Check if user has given action privilige in cell scope.
*
* @param user
* user name
* @param table
* table name
* @param cell
* cell to be checked
* @param action
* one of action in [Read, Write, Create, Exec, Admin]
* @return true if user has, false otherwise
*/
public boolean authorizeCell(User user, TableName table, Cell cell, Permission.Action action) {
try {
List<Permission> perms = PermissionStorage.getCellPermissionsForUser(user, cell);
if (LOG.isTraceEnabled()) {
LOG.trace("Perms for user {} in table {} in cell {}: {}", user.getShortName(), table, cell, perms != null ? perms : "");
}
if (perms != null) {
for (Permission p : perms) {
if (p.implies(action)) {
return true;
}
}
}
} catch (IOException e) {
// We failed to parse the KV tag
LOG.error("Failed parse of ACL tag in cell " +
cell);
// Fall through to check with the table and CF perms we were able
// to collect regardless
}
return false;
} | 3.26 |
hbase_AuthManager_refreshNamespaceCacheFromWritable_rdh | /**
* Update acl info for namespace.
*
* @param namespace
* namespace
* @param data
* updated acl data
* @throws IOException
* exception when deserialize data
*/
public void refreshNamespaceCacheFromWritable(String namespace, byte[] data) throws IOException {
if ((data != null) && (data.length > 0)) {
try {
ListMultimap<String, Permission> perms = PermissionStorage.readPermissions(data, conf);
if (perms != null) {
updateNamespaceCache(namespace, perms);
}
} catch (DeserializationException e) {
throw new IOException(e);
}
} else {
LOG.debug("Skipping permission cache refresh because writable data is empty");
}
} | 3.26 |
hbase_AuthManager_accessUserTable_rdh | /**
* Checks if the user has access to the full table or at least a family/qualifier for the
* specified action.
*
* @param user
* user name
* @param table
* table name
* @param action
* action in one of [Read, Write, Create, Exec, Admin]
* @return true if the user has access to the table, false otherwise
*/
public boolean accessUserTable(User user, TableName table, Permission.Action action) {
if (user == null) {
return false;
}
if (table == null) {
table = PermissionStorage.ACL_TABLE_NAME;
}
if (authorizeUserNamespace(user, table.getNamespaceAsString(), action)) {
return true;}
PermissionCache<TablePermission> tblPermissions = tableCache.getOrDefault(table, TBL_NO_PERMISSION);
if (hasAccessTable(tblPermissions.get(user.getShortName()), action)) {return true;
}
for (String group : user.getGroupNames()) {
if (hasAccessTable(tblPermissions.get(AuthUtil.toGroupEntry(group)), action)) {
return true;
}
}
return false;
} | 3.26 |
hbase_AuthManager_updateTableCache_rdh | /**
* Updates the internal table permissions cache for specified table.
*
* @param table
* updated table name
* @param tablePerms
* new table permissions
*/
private void updateTableCache(TableName table, ListMultimap<String, Permission> tablePerms) {
PermissionCache<TablePermission> cacheToUpdate = tableCache.getOrDefault(table, new PermissionCache<>());
clearCache(cacheToUpdate);
updateCache(tablePerms, cacheToUpdate);
tableCache.put(table, cacheToUpdate);
mtime.incrementAndGet();
} | 3.26 |
hbase_AuthManager_authorizeUserGlobal_rdh | /**
* Check if user has given action privilige in global scope.
*
* @param user
* user name
* @param action
* one of action in [Read, Write, Create, Exec, Admin]
* @return true if user has, false otherwise
*/
public boolean authorizeUserGlobal(User user, Permission.Action action) {
if (user == null) {
return false;
}
if (Superusers.isSuperUser(user)) {
return true;
}
if (authorizeGlobal(globalCache.get(user.getShortName()), action)) {
return true;
}
for (String group : user.getGroupNames()) {
if (authorizeGlobal(globalCache.get(AuthUtil.toGroupEntry(group)), action)) {
return true;}}
return false;
} | 3.26 |
hbase_AuthManager_authorizeUserFamily_rdh | /**
* Check if user has given action privilige in table:family scope. This method is for backward
* compatibility.
*
* @param user
* user name
* @param table
* table name
* @param family
* family names
* @param action
* one of action in [Read, Write, Create, Exec, Admin]
* @return true if user has, false otherwise
*/
public boolean authorizeUserFamily(User user, TableName table, byte[] family, Permission.Action action) { PermissionCache<TablePermission> tblPermissions
= tableCache.getOrDefault(table, TBL_NO_PERMISSION);
if (authorizeFamily(tblPermissions.get(user.getShortName()), table, family, action)) {
return true;
}
for (String group : user.getGroupNames()) {
if (authorizeFamily(tblPermissions.get(AuthUtil.toGroupEntry(group)), table, family, action)) {
return true;
}
}
return false;
} | 3.26 |
hbase_AuthManager_updateGlobalCache_rdh | /**
* Updates the internal global permissions cache.
*
* @param globalPerms
* new global permissions
*/
private void updateGlobalCache(ListMultimap<String, Permission> globalPerms) {
globalCache.clear();for (String name : globalPerms.keySet()) {
for (Permission permission : globalPerms.get(name)) {
// Before 2.2, the global permission which storage in zk is not right. It was saved as a
// table permission. So here need to handle this for compatibility. See HBASE-22503.
if (permission instanceof TablePermission) {
globalCache.put(name, new GlobalPermission(permission.getActions()));
} else {
globalCache.put(name, ((GlobalPermission) (permission)));
}
}
}
mtime.incrementAndGet();
} | 3.26 |
hbase_AuthManager_updateNamespaceCache_rdh | /**
* Updates the internal namespace permissions cache for specified namespace.
*
* @param namespace
* updated namespace
* @param nsPerms
* new namespace permissions
*/
private void updateNamespaceCache(String namespace, ListMultimap<String, Permission> nsPerms) {
PermissionCache<NamespacePermission> cacheToUpdate = namespaceCache.getOrDefault(namespace, new PermissionCache<>());
clearCache(cacheToUpdate);
updateCache(nsPerms, cacheToUpdate);
namespaceCache.put(namespace, cacheToUpdate);
mtime.incrementAndGet();
} | 3.26 |
hbase_AuthManager_refreshTableCacheFromWritable_rdh | /**
* Update acl info for table.
*
* @param table
* name of table
* @param data
* updated acl data
* @throws IOException
* exception when deserialize data
*/
public void refreshTableCacheFromWritable(TableName table,
byte[] data) throws IOException {
if ((data != null) && (data.length > 0)) {
try {
ListMultimap<String, Permission> perms
= PermissionStorage.readPermissions(data, conf);
if (perms != null) {
if (Bytes.equals(table.getName(), PermissionStorage.ACL_GLOBAL_NAME)) {
updateGlobalCache(perms);
} else {
updateTableCache(table, perms);
}
}
} catch (DeserializationException e) {
throw new IOException(e);
}
} else {
LOG.info("Skipping permission cache refresh because writable data is empty");
}
} | 3.26 |
hbase_ZstdCompressor_maxCompressedLength_rdh | // Package private
static int maxCompressedLength(final int len) {
return ((int) (Zstd.compressBound(len)));
} | 3.26 |
hbase_CoprocessorClassLoader_clearCache_rdh | // This method is used in unit test
public static void clearCache() {
classLoadersCache.clear();
} | 3.26 |
hbase_CoprocessorClassLoader_getAllCached_rdh | // This method is used in unit test
public static Collection<? extends
ClassLoader> getAllCached()
{
return classLoadersCache.values();
} | 3.26 |
hbase_CoprocessorClassLoader_getClassLoader_rdh | /**
* Get a CoprocessorClassLoader for a coprocessor jar path from cache. If not in cache, create
* one.
*
* @param path
* the path to the coprocessor jar file to load classes from
* @param parent
* the parent class loader for exempted classes
* @param pathPrefix
* a prefix used in temp path name to store the jar file locally
* @param conf
* the configuration used to create the class loader, if needed
* @return a CoprocessorClassLoader for the coprocessor jar path
*/
public static CoprocessorClassLoader getClassLoader(final Path path, final ClassLoader parent,
final String pathPrefix, final Configuration conf) throws IOException {
CoprocessorClassLoader cl = getIfCached(path);
String pathStr = path.toString();
if (cl != null) {
LOG.debug((("Found classloader " + cl) + " for ") + pathStr);
return cl;
}
if (path.getFileSystem(conf).isFile(path) && (!pathStr.endsWith(".jar"))) {
throw
new IOException(pathStr + ": not a jar file?");
}
Lock lock = locker.acquireLock(pathStr);
try {
cl = getIfCached(path);
if (cl != null) {
LOG.debug((("Found classloader " + cl) + " for ") + pathStr);
return cl;
}
cl = AccessController.doPrivileged(new PrivilegedAction<CoprocessorClassLoader>() {
@Override
public CoprocessorClassLoader run() {
return new CoprocessorClassLoader(parent);}
});
cl.init(path, pathPrefix, conf);
// Cache class loader as a weak value, will be GC'ed when no reference left
CoprocessorClassLoader prev = classLoadersCache.putIfAbsent(path, cl);
if (prev != null) {
// Lost update race, use already added class loader
LOG.warn(("THIS SHOULD NOT HAPPEN, a class loader" + " is already cached for ") + pathStr);
cl = prev;
}
return cl;
} finally {
lock.unlock();
}
} | 3.26 |
hbase_CoprocessorClassLoader_getIfCached_rdh | // This method is used in unit test
public static CoprocessorClassLoader getIfCached(final Path path) {
Preconditions.checkNotNull(path, "The jar path is null!");
return classLoadersCache.get(path);
} | 3.26 |
hbase_CoprocessorClassLoader_isClassExempt_rdh | /**
* Determines whether the given class should be exempt from being loaded by this ClassLoader.
*
* @param name
* the name of the class to test.
* @return true if the class should *not* be loaded by this ClassLoader; false otherwise.
*/
protected boolean isClassExempt(String name, String[] includedClassPrefixes) {
if (includedClassPrefixes != null) {
for (String clsName : includedClassPrefixes) {
if (name.startsWith(clsName)) {return false;
}
}
}
for (String exemptPrefix
: CLASS_PREFIX_EXEMPTIONS) {
if (name.startsWith(exemptPrefix)) {
return true;
}
}
return false;
} | 3.26 |
hbase_LogLevel_main_rdh | /**
* A command line implementation
*/
public static void main(String[] args) throws Exception {
CLI cli = new CLI(new Configuration());
System.exit(cli.m0(args));
} | 3.26 |
hbase_LogLevel_doSetLevel_rdh | /**
* Send HTTP request to set log level.
*
* @throws HadoopIllegalArgumentException
* if arguments are invalid.
* @throws Exception
* if unable to connect
*/
private void doSetLevel() throws Exception {
process((((((protocol + "://") + hostName) + "/logLevel?log=") + className) + "&level=") + level);
} | 3.26 |
hbase_LogLevel_process_rdh | /**
* Configures the client to send HTTP request to the URL. Supports SPENGO for authentication.
*
* @param urlString
* URL and query string to the daemon's web UI
* @throws Exception
* if unable to connect
*/
private void process(String urlString) throws Exception {
URL url = new URL(urlString);
System.out.println("Connecting to " + url);
HttpURLConnection connection = connect(url);
HttpExceptionUtils.validateResponse(connection, 200);
// read from the servlet
try (InputStreamReader streamReader = new InputStreamReader(connection.getInputStream(), StandardCharsets.UTF_8);BufferedReader bufferedReader = new BufferedReader(streamReader)) {
bufferedReader.lines().filter(Objects::nonNull).filter(line -> line.startsWith(MARKER)).forEach(line -> System.out.println(TAG.matcher(line).replaceAll("")));
} catch (IOException ioe) {
System.err.println("" + ioe);
}
} | 3.26 |
hbase_LogLevel_doGetLevel_rdh | /**
* Send HTTP request to get log level.
*
* @throws HadoopIllegalArgumentException
* if arguments are invalid.
* @throws Exception
* if unable to connect
*/
private void doGetLevel() throws Exception {
process((((protocol + "://") + hostName) + "/logLevel?log=") + className);
} | 3.26 |
hbase_LogLevel_sendLogLevelRequest_rdh | /**
* Send HTTP request to the daemon.
*
* @throws HadoopIllegalArgumentException
* if arguments are invalid.
* @throws Exception
* if unable to connect
*/
private void sendLogLevelRequest() throws HadoopIllegalArgumentException, Exception {
switch (operation) {
case GETLEVEL :
doGetLevel();
break;
case SETLEVEL :
doSetLevel();
break;
default :
throw new HadoopIllegalArgumentException("Expect either -getlevel or -setlevel");
}
} | 3.26 |
hbase_AuthResult_concatenateExtraParams_rdh | /**
* Returns extra parameter key/value string
*/
private String concatenateExtraParams() {
final StringBuilder sb = new StringBuilder();
boolean first = true;
for (Entry<String, String> entry : extraParams.entrySet()) { if ((entry.getKey() != null) && (entry.getValue() != null)) {
if (!first) {
sb.append(',');
}
first = false;
sb.append(entry.getKey() + '=');
sb.append(entry.getValue());
}
}
return sb.toString();
} | 3.26 |
hbase_IdReadWriteLockWithObjectPool_getLock_rdh | /**
* Get the ReentrantReadWriteLock corresponding to the given id
*
* @param id
* an arbitrary number to identify the lock
*/
@Override
public ReentrantReadWriteLock getLock(T id) {
lockPool.purge();
ReentrantReadWriteLock readWriteLock = lockPool.get(id);
return readWriteLock;
} | 3.26 |
hbase_IdReadWriteLockWithObjectPool_purgeAndGetEntryPoolSize_rdh | /**
* For testing
*/
int purgeAndGetEntryPoolSize() {gc();
Threads.sleep(200);
lockPool.purge();
return lockPool.size();
} | 3.26 |
hbase_MemStoreSnapshot_getId_rdh | /**
* Returns snapshot's identifier.
*/
public long getId() {
return id;
} | 3.26 |
hbase_MemStoreSnapshot_getTimeRangeTracker_rdh | /**
* Returns {@link TimeRangeTracker} for all the Cells in the snapshot.
*/
public TimeRangeTracker getTimeRangeTracker() {
return timeRangeTracker;
}
/**
* Create new {@link SnapshotSegmentScanner}s for iterating over the snapshot. <br/>
* NOTE:Here when create new {@link SnapshotSegmentScanner}s, {@link Segment#incScannerCount} is
* invoked in the {@link SnapshotSegmentScanner} ctor,so after we use these
* {@link SnapshotSegmentScanner}s, we must call {@link SnapshotSegmentScanner#close} to invoke
* {@link Segment#decScannerCount}.
*
* @return {@link KeyValueScanner}s(Which type is {@link SnapshotSegmentScanner} | 3.26 |
hbase_MemStoreSnapshot_getCellsCount_rdh | /**
* Returns Number of Cells in this snapshot.
*/
public int getCellsCount() {
return cellsCount;
} | 3.26 |
hbase_MemStoreSnapshot_isTagsPresent_rdh | /**
* Returns true if tags are present in this snapshot
*/
public boolean isTagsPresent() {
return this.tagsPresent;
} | 3.26 |
hbase_NewVersionBehaviorTracker_isDeleted_rdh | /**
* This method is not idempotent, we will save some info to judge VERSION_MASKED.
*
* @param cell
* - current cell to check if deleted by a previously seen delete
* @return We don't distinguish DeleteColumn and DeleteFamily. We only return code for column.
*/
@Override
public DeleteResult isDeleted(Cell cell) {
long duplicateMvcc = prepare(cell);
for (Map.Entry<Long, DeleteVersionsNode> e : delColMap.tailMap(cell.getSequenceId()).entrySet()) {
DeleteVersionsNode node = e.getValue();
long deleteMvcc = Long.MAX_VALUE;
SortedSet<Long> deleteVersionMvccs = node.deletesMap.get(cell.getTimestamp());
if (deleteVersionMvccs != null) {SortedSet<Long> tail = deleteVersionMvccs.tailSet(cell.getSequenceId());
if (!tail.isEmpty()) {
deleteMvcc = tail.first();
}
}
SortedMap<Long, SortedSet<Long>> subMap = node.mvccCountingMap.subMap(cell.getSequenceId(), true, Math.min(duplicateMvcc, deleteMvcc), true);
for (Map.Entry<Long, SortedSet<Long>> v17 : subMap.entrySet()) {
if (v17.getValue().size() >= maxVersions) {
return DeleteResult.VERSION_MASKED;
}
v17.getValue().add(cell.getSequenceId()); }
if (deleteMvcc < Long.MAX_VALUE) {
return DeleteResult.VERSION_DELETED;
}
if (cell.getTimestamp() <= node.ts) {
return DeleteResult.COLUMN_DELETED;
}
}
if (duplicateMvcc < Long.MAX_VALUE) {
return DeleteResult.VERSION_MASKED;
}
return DeleteResult.NOT_DELETED;
} | 3.26 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.