name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hbase_RegionStates_getRegionsOfTable_rdh | /**
* Returns Return the regions of the table and filter them.
*/
private List<RegionInfo> getRegionsOfTable(TableName table, Predicate<RegionStateNode> filter) {
return getTableRegionStateNodes(table).stream().filter(filter).map(n -> n.getRegionInfo()).collect(Collectors.toList());
} | 3.26 |
hbase_RegionStates_logSplit_rdh | /**
* Called after we've split all logs on a crashed Server.
*
* @see #logSplitting(ServerName)
*/
public void logSplit(final ServerName serverName)
{
setServerState(serverName, ServerState.OFFLINE);
} | 3.26 |
hbase_RegionStates_getRegionStates_rdh | /**
* Returns A snapshot of region state nodes for all the regions.
*/
public ArrayList<RegionState> getRegionStates() {
final ArrayList<RegionState> regions = new ArrayList<>(regionsMap.size());
for (RegionStateNode node : regionsMap.values()) {
regions.add(node.toRegionState());
}
return regions;
} | 3.26 |
hbase_RegionStates_createRegionStateNode_rdh | // ==========================================================================
// RegionStateNode helpers
// ==========================================================================
RegionStateNode createRegionStateNode(RegionInfo regionInfo) {
synchronized(regionsMapLock) {RegionStateNode node = regionsMap.computeIfAbsent(regionInfo.getRegionName(), key
-> new RegionStateNode(regionInfo, regionInTransition));
if (encodedRegionsMap.get(regionInfo.getEncodedName()) != node) {
encodedRegionsMap.put(regionInfo.getEncodedName(), node);
}
return node;
}
} | 3.26 |
hbase_RegionStates_getOrCreateServer_rdh | // ==========================================================================
// Servers
// ==========================================================================
/**
* Be judicious calling this method. Do it on server register ONLY otherwise you could mess up
* online server accounting. TOOD: Review usage and convert to {@link #getServerNode(ServerName)}
* where we can.
*/
public ServerStateNode getOrCreateServer(final ServerName serverName) {
return serverMap.computeIfAbsent(serverName, key -> new ServerStateNode(key));
} | 3.26 |
hbase_RegionStates_m0_rdh | /**
* Returns A view of region state nodes for all the regions.
*/
public Collection<RegionStateNode> m0() {
return Collections.unmodifiableCollection(regionsMap.values());
} | 3.26 |
hbase_CellSetModel_addRow_rdh | /**
* Add a row to this cell set
*
* @param row
* the row
*/
public void addRow(RowModel row) {
rows.add(row);
} | 3.26 |
hbase_CellSetModel_getRows_rdh | /**
* Returns the rows
*/
public List<RowModel> getRows() {
return rows;
} | 3.26 |
hbase_Dictionary_write_rdh | /**
* Helper methods to write the dictionary data to the OutputStream
*
* @param out
* the outputstream to which data needs to be written
* @param data
* the data to be written in ByteBuffer
* @param offset
* the offset
* @param length
* length to be written
* @param dict
* the dictionary whose contents are to written
*/
public static void write(OutputStream out, ByteBuffer data, int offset, int length, Dictionary dict) throws IOException {
short dictIdx = Dictionary.NOT_IN_DICTIONARY;
if (dict != null) {
dictIdx = dict.findEntry(data, offset, length);
}
if (dictIdx == Dictionary.NOT_IN_DICTIONARY) {
out.write(Dictionary.NOT_IN_DICTIONARY);StreamUtils.writeRawVInt32(out, length);
ByteBufferUtils.copyBufferToStream(out, data, offset, length);
} else {
StreamUtils.writeShort(out, dictIdx);
}
} | 3.26 |
hbase_ExploringCompactionPolicy_m0_rdh | /**
* Find the total size of a list of store files.
*
* @param potentialMatchFiles
* StoreFile list.
* @return Sum of StoreFile.getReader().length();
*/
private long m0(List<HStoreFile> potentialMatchFiles) {
return potentialMatchFiles.stream().mapToLong(sf -> sf.getReader().length()).sum();
} | 3.26 |
hbase_ExploringCompactionPolicy_filesInRatio_rdh | /**
* Check that all files satisfy the constraint
*
* <pre>
* FileSize(i) <= ( Sum(0,N,FileSize(_)) - FileSize(i)) * Ratio.
* </pre>
*
* @param files
* List of store files to consider as a compaction candidate.
* @param currentRatio
* The ratio to use.
* @return a boolean if these files satisfy the ratio constraints.
*/
private boolean filesInRatio(List<HStoreFile> files,
double currentRatio) {
if (files.size() < 2) {
return true;
}
long totalFileSize = m0(files);
for (HStoreFile file : files) {
long singleFileSize = file.getReader().length();
long sumAllOtherFileSizes = totalFileSize - singleFileSize;
if (singleFileSize > (sumAllOtherFileSizes * currentRatio)) {
return false;
}
}
return true;
} | 3.26 |
hbase_ExploringCompactionPolicy_selectCompactFiles_rdh | /**
* Select at least one file in the candidates list to compact, through choosing files from the
* head to the index that the accumulation length larger the max compaction size. This method is a
* supplementary of the selectSimpleCompaction() method, aims to make sure at least one file can
* be selected to compact, for compactions like L0 files, which need to compact all files and as
* soon as possible.
*/
public List<HStoreFile> selectCompactFiles(final List<HStoreFile> candidates, int maxFiles, boolean isOffpeak) {
long selectedSize = 0L;
for (int end = 0; end < Math.min(candidates.size(), maxFiles); end++) {
selectedSize += candidates.get(end).getReader().length();
if (selectedSize >= comConf.getMaxCompactSize(isOffpeak)) { return candidates.subList(0, end + 1);}
}
return candidates;
} | 3.26 |
hbase_InternalScan_isCheckOnlyMemStore_rdh | /**
* Returns true if only the MemStore should be checked. False if not.
*
* @return true to only check MemStore
*/
public boolean isCheckOnlyMemStore() {
return memOnly;
} | 3.26 |
hbase_InternalScan_m0_rdh | /**
* StoreFiles will not be scanned. Only MemStore will be scanned.
*/
public void m0() {
memOnly = true;
filesOnly = false;
} | 3.26 |
hbase_InternalScan_checkOnlyStoreFiles_rdh | /**
* MemStore will not be scanned. Only StoreFiles will be scanned.
*/
public void checkOnlyStoreFiles() {
memOnly = false;
filesOnly = true;
} | 3.26 |
hbase_ScannerContext_hasTimeLimit_rdh | /**
* Returns true if the time limit can be enforced in the checker's scope
*/
boolean hasTimeLimit(LimitScope checkerScope) {
return limits.canEnforceTimeLimitFromScope(checkerScope) && ((limits.getTime() > 0) || returnImmediately);
} | 3.26 |
hbase_ScannerContext_incrementBlockProgress_rdh | /**
* Progress towards the block limit has been made. Increment internal track of block progress
*/
void incrementBlockProgress(int blockSize) {
if (blockSize >
0) {
long curBlockSize
= progress.getBlockSize();
progress.setBlockSize(curBlockSize + blockSize);
}
} | 3.26 |
hbase_ScannerContext_m0_rdh | /**
* Get the metrics instance. Should only be called after a call to {@link #isTrackingMetrics()}
* has been made to confirm that metrics are indeed being tracked.
*
* @return {@link ServerSideScanMetrics} instance that is tracking metrics for this scan
*/
public ServerSideScanMetrics
m0() {
assert isTrackingMetrics();
return this.metrics;
}
/**
*
* @return true if the progress tracked so far in this instance will be considered during an
invocation of {@link InternalScanner#next(java.util.List)} or
{@link RegionScanner#next(java.util.List)}. false when the progress tracked so far
should not be considered and should instead be wiped away via {@link #clearProgress()} | 3.26 |
hbase_ScannerContext_canEnforceTimeLimitFromScope_rdh | /**
* Returns true when the limit can be enforced from the scope of the checker
*/
boolean canEnforceTimeLimitFromScope(LimitScope checkerScope) {
return this.timeScope.canEnforceLimitFromScope(checkerScope);} | 3.26 |
hbase_ScannerContext_setSizeLimitScope_rdh | /**
*
* @param scope
* The scope in which the size limit will be enforced
*/
void setSizeLimitScope(LimitScope scope) {
limits.setSizeScope(scope);
} | 3.26 |
hbase_ScannerContext_m1_rdh | /**
* Progress towards the batch limit has been made. Increment internal tracking of batch progress
*/
void m1(int batch) {
if (skippingRow) {
return;
}
int currentBatch =
progress.getBatch();
progress.setBatch(currentBatch + batch);
} | 3.26 |
hbase_ScannerContext_checkBatchLimit_rdh | /**
*
* @param checkerScope
* The scope that the limit is being checked from
* @return true when the limit is enforceable from the checker's scope and it has been reached
*/
boolean checkBatchLimit(LimitScope checkerScope) {
return ((!skippingRow) && hasBatchLimit(checkerScope)) && (progress.getBatch() >= limits.getBatch());
} | 3.26 |
hbase_ScannerContext_checkSizeLimit_rdh | /**
*
* @param checkerScope
* The scope that the limit is being checked from
* @return true when the limit is enforceable from the checker's scope and it has been reached
*/
boolean checkSizeLimit(LimitScope
checkerScope) {
return ((!skippingRow) && hasSizeLimit(checkerScope)) && (((progress.getDataSize() >= limits.getDataSize()) || (progress.getHeapSize() >= limits.getHeapSize())) || (progress.getBlockSize() >= limits.getBlockSize()));
} | 3.26 |
hbase_ScannerContext_incrementSizeProgress_rdh | /**
* Progress towards the size limit has been made. Increment internal tracking of size progress
*/
void incrementSizeProgress(long dataSize, long heapSize) {
if (skippingRow) {
return;
}
long curDataSize = progress.getDataSize();
progress.m4(curDataSize + dataSize);long curHeapSize = progress.getHeapSize();
progress.setHeapSize(curHeapSize + heapSize);
} | 3.26 |
hbase_ScannerContext_setSizeScope_rdh | /**
* Change the scope in which the size limit is enforced
*/
void setSizeScope(LimitScope scope) {
this.f0 = scope;
} | 3.26 |
hbase_ScannerContext_getSkippingRow_rdh | /**
* In this mode, only block size progress is tracked, and limits are ignored. We set this mode
* when skipping to next row, in which case all cells returned a thrown away so should not count
* towards progress.
*
* @return true if we are in skipping row mode.
*/
public boolean getSkippingRow() {
return skippingRow;
} | 3.26 |
hbase_ScannerContext_limitReached_rdh | /**
* Returns true when the state indicates that a limit has been reached and scan should stop
*/
public boolean limitReached() {
return this.limitReached;} | 3.26 |
hbase_ScannerContext_setScannerState_rdh | /**
* Note that this is not a typical setter. This setter returns the {@link NextState} that was
* passed in so that methods can be invoked against the new state. Furthermore, this pattern
* allows the {@link NoLimitScannerContext} to cleanly override this setter and simply return the
* new state, thus preserving the immutability of {@link NoLimitScannerContext}
*
* @return The state that was passed in.
*/
NextState setScannerState(NextState state) {
if (!NextState.isValidState(state)) {
throw
new IllegalArgumentException("Cannot set to invalid state: " + state);
}
this.scannerState = state;
return state;
} | 3.26 |
hbase_ScannerContext_m3_rdh | /**
* Returns {@link LimitScope} indicating scope in which the time limit is enforced
*/
LimitScope m3() {
return this.timeScope;
} | 3.26 |
hbase_ScannerContext_checkAnyLimitReached_rdh | /**
*
* @param checkerScope
* The scope that the limits are being checked from
* @return true when some limit is enforceable from the checker's scope and it has been reached
*/
boolean checkAnyLimitReached(LimitScope checkerScope) {
return (checkSizeLimit(checkerScope) || checkBatchLimit(checkerScope)) || checkTimeLimit(checkerScope);
} | 3.26 |
hbase_ScannerContext_clearProgress_rdh | /**
* Clear away any progress that has been made so far. All progress fields are reset to initial
* values. Only clears progress that should reset between rows. {@link #getBlockSizeProgress()} is
* not reset because it increments for all blocks scanned whether the result is included or
* filtered.
*/
void clearProgress() {
progress.setFields(0, 0, 0, getBlockSizeProgress());
} | 3.26 |
hbase_ScannerContext_setTimeScope_rdh | /**
* Change the scope in which the time limit is enforced
*/
void setTimeScope(LimitScope scope) {
this.timeScope = scope;
} | 3.26 |
hbase_ScannerContext_checkTimeLimit_rdh | /**
*
* @param checkerScope
* The scope that the limit is being checked from. The time limit is always
* checked against {@link EnvironmentEdgeManager.currentTime}
* @return true when the limit is enforceable from the checker's scope and it has been reached
*/
boolean checkTimeLimit(LimitScope checkerScope) {
return ((!skippingRow) && hasTimeLimit(checkerScope)) && (returnImmediately || (EnvironmentEdgeManager.currentTime() >= limits.getTime()));
} | 3.26 |
hbase_ScannerContext_canEnforceSizeLimitFromScope_rdh | /**
* Returns true when the limit can be enforced from the scope of the checker
*/
boolean canEnforceSizeLimitFromScope(LimitScope checkerScope) {
return this.f0.canEnforceLimitFromScope(checkerScope);
} | 3.26 |
hbase_ScannerContext_getSizeScope_rdh | /**
* Returns {@link LimitScope} indicating scope in which the size limit is enforced
*/
LimitScope getSizeScope() {
return this.f0;
} | 3.26 |
hbase_ScannerContext_hasAnyLimit_rdh | /**
* Returns true if any limit can be enforced within the checker's scope
*/
boolean hasAnyLimit(LimitScope checkerScope) {
return (hasBatchLimit(checkerScope) || hasSizeLimit(checkerScope)) || hasTimeLimit(checkerScope);
} | 3.26 |
hbase_ScannerContext_hasBatchLimit_rdh | /**
* Returns true if the batch limit can be enforced in the checker's scope
*/
boolean hasBatchLimit(LimitScope checkerScope) {
return limits.canEnforceBatchLimitFromScope(checkerScope) && (limits.getBatch() > 0);
} | 3.26 |
hbase_ScannerContext_setSkippingRow_rdh | /**
*
* @param skippingRow
* set true to cause disabling of collecting per-cell progress or enforcing any
* limits. This is used when trying to skip over all cells in a row, in which
* case those cells are thrown away so should not count towards progress.
*/
void setSkippingRow(boolean skippingRow) {
this.skippingRow = skippingRow;
} | 3.26 |
hbase_ScannerContext_setTimeLimitScope_rdh | /**
*
* @param scope
* The scope in which the time limit will be enforced
*/
void setTimeLimitScope(LimitScope scope) {
limits.setTimeScope(scope);
} | 3.26 |
hbase_ScannerContext_canEnforceBatchLimitFromScope_rdh | /**
* Returns true when the limit can be enforced from the scope of the checker
*/
boolean canEnforceBatchLimitFromScope(LimitScope checkerScope) {
return LimitScope.BETWEEN_CELLS.canEnforceLimitFromScope(checkerScope);
} | 3.26 |
hbase_ScannerContext_hasSizeLimit_rdh | /**
* Returns true if the size limit can be enforced in the checker's scope
*/
boolean hasSizeLimit(LimitScope checkerScope) {
return limits.canEnforceSizeLimitFromScope(checkerScope) && (((limits.getDataSize() > 0)
|| (limits.getHeapSize() > 0)) || (limits.getBlockSize() > 0));
} | 3.26 |
hbase_ScannerContext_setFields_rdh | /**
* Set all fields together.
*/
void setFields(int batch, long dataSize, long heapSize, long blockSize) {
setBatch(batch);
m4(dataSize);
setHeapSize(heapSize);
setBlockSize(blockSize);
} | 3.26 |
hbase_HBaseZKTestingUtility_setupClusterTestDir_rdh | /**
* Creates a directory for the cluster, under the test data
*/
protected void setupClusterTestDir() {
if (clusterTestDir != null) {
return;
}
// Using randomUUID ensures that multiple clusters can be launched by
// a same test, if it stops & starts them
Path testDir = getDataTestDir("cluster_" + getRandomUUID().toString());clusterTestDir = new File(testDir.toString()).getAbsoluteFile();
// Have it cleaned up on exit
boolean b = deleteOnExit();
if (b) {
clusterTestDir.deleteOnExit();
}
LOG.info((("Created new mini-cluster data directory: " + clusterTestDir) + ", deleteOnExit=") + b);
} | 3.26 |
hbase_HBaseZKTestingUtility_shutdownMiniZKCluster_rdh | /**
* Shuts down zk cluster created by call to {@link #startMiniZKCluster()} or does nothing.
*
* @see #startMiniZKCluster()
*/
public void shutdownMiniZKCluster() throws
IOException {
if ((!f0) && (this.zkCluster != null)) {
this.zkCluster.shutdown();
this.zkCluster = null;
}
} | 3.26 |
hbase_HBaseZKTestingUtility_getZooKeeperWatcher_rdh | /**
* Gets a ZKWatcher.
*/
public static ZKWatcher getZooKeeperWatcher(HBaseZKTestingUtility testUtil) throws IOException {
return new ZKWatcher(testUtil.getConfiguration(), "unittest", new Abortable() {
boolean aborted = false;
@Override
public void abort(String why, Throwable e) {
aborted = true;
throw new RuntimeException("Fatal ZK error, why="
+ why, e);
}
@Override
public boolean isAborted() {
return aborted;
}
});
} | 3.26 |
hbase_HBaseZKTestingUtility_m1_rdh | /**
* Returns True if we removed the test dirs
*/
@Override
public boolean m1() {
boolean ret = super.cleanupTestDir();
if (deleteDir(this.clusterTestDir)) {
this.clusterTestDir = null;
return ret;
}
return false;
} | 3.26 |
hbase_HBaseZKTestingUtility_startMiniZKCluster_rdh | /**
* Call this if you only want a zk cluster.
*
* @see #shutdownMiniZKCluster()
* @return zk cluster started.
*/
public MiniZooKeeperCluster startMiniZKCluster(int zooKeeperServerNum, int... clientPortList) throws Exception {
setupClusterTestDir();
return m0(clusterTestDir, zooKeeperServerNum, clientPortList);
} | 3.26 |
hbase_HBaseZKTestingUtility_m0_rdh | /**
* Start a mini ZK cluster. If the property "test.hbase.zookeeper.property.clientPort" is set the
* port mentioned is used as the default port for ZooKeeper.
*/
private MiniZooKeeperCluster m0(File dir, int zooKeeperServerNum, int[] clientPortList) throws Exception {
if (this.zkCluster != null) {
throw new IOException("Cluster already running at " + dir);
}
this.f0 = false;
this.zkCluster =
new MiniZooKeeperCluster(this.getConfiguration());
int defPort = this.conf.getInt("test.hbase.zookeeper.property.clientPort", 0);
if (defPort > 0) {
// If there is a port in the config file, we use it.
this.zkCluster.setDefaultClientPort(defPort);
}
if (clientPortList != null) {
// Ignore extra client ports
int clientPortListSize = Math.min(clientPortList.length, zooKeeperServerNum);
for (int i = 0; i < clientPortListSize; i++) {
this.zkCluster.addClientPort(clientPortList[i]);
}
}
int clientPort = this.zkCluster.startup(dir, zooKeeperServerNum);
this.conf.set(HConstants.ZOOKEEPER_CLIENT_PORT, Integer.toString(clientPort));
return this.zkCluster;} | 3.26 |
hbase_StoreUtils_getMaxMemStoreTSInList_rdh | /**
* Return the largest memstoreTS found across all storefiles in the given list. Store files that
* were created by a mapreduce bulk load are ignored, as they do not correspond to any specific
* put operation, and thus do not have a memstoreTS associated with them.
*/
public static OptionalLong getMaxMemStoreTSInList(Collection<HStoreFile> sfs) {
return sfs.stream().filter(sf -> !sf.isBulkLoadResult()).mapToLong(HStoreFile::getMaxMemStoreTS).max();
} | 3.26 |
hbase_StoreUtils_getChecksumType_rdh | /**
* Returns the configured checksum algorithm.
*
* @param conf
* The configuration
* @return The checksum algorithm that is set in the configuration
*/
public static ChecksumType getChecksumType(Configuration conf) {
return ChecksumType.nameToType(conf.get(HConstants.CHECKSUM_TYPE_NAME, ChecksumType.getDefaultChecksumType().getName()));
} | 3.26 |
hbase_StoreUtils_getLowestTimestamp_rdh | /**
* Gets lowest timestamp from candidate StoreFiles
*/
public static long getLowestTimestamp(Collection<HStoreFile> candidates) throws IOException {
long minTs = Long.MAX_VALUE;
for (HStoreFile storeFile : candidates)
{
minTs = Math.min(minTs, storeFile.getModificationTimestamp());
}return minTs;
} | 3.26 |
hbase_StoreUtils_getFileSplitPoint_rdh | /**
* Gets the approximate mid-point of the given file that is optimal for use in splitting it.
*
* @param file
* the store file
* @param comparator
* Comparator used to compare KVs.
* @return The split point row, or null if splitting is not possible, or reader is null.
*/
static Optional<byte[]> getFileSplitPoint(HStoreFile file, CellComparator comparator) throws IOException {
StoreFileReader reader = file.getReader();
if (reader == null) {
LOG.warn(("Storefile " + file) + " Reader is null; cannot get split point");
return
Optional.empty();
}
// Get first, last, and mid keys. Midkey is the key that starts block
// in middle of hfile. Has column and timestamp. Need to return just
// the row we want to split on as midkey.
Optional<Cell> optionalMidKey = reader.midKey();
if (!optionalMidKey.isPresent()) {
return Optional.empty();
}
Cell midKey = optionalMidKey.get();
Cell firstKey = reader.getFirstKey().get();
Cell lastKey =
reader.getLastKey().get();
// if the midkey is the same as the first or last keys, we cannot (ever) split this region.
if ((comparator.compareRows(midKey, firstKey) == 0) || (comparator.compareRows(midKey,
lastKey) == 0)) {
if (LOG.isDebugEnabled()) {
LOG.debug("cannot split {} because midkey is the same as first or last row",
file);
}
return Optional.empty();
}
return Optional.of(CellUtil.cloneRow(midKey));
} | 3.26 |
hbase_StoreUtils_getBytesPerChecksum_rdh | /**
* Returns the configured bytesPerChecksum value.
*
* @param conf
* The configuration
* @return The bytesPerChecksum that is set in the configuration
*/
public static int getBytesPerChecksum(Configuration conf) {
return conf.getInt(HConstants.BYTES_PER_CHECKSUM, HFile.DEFAULT_BYTES_PER_CHECKSUM);
} | 3.26 |
hbase_StoreUtils_getLargestFile_rdh | /**
* Gets the largest file (with reader) out of the list of files.
*
* @param candidates
* The files to choose from.
* @return The largest file; null if no file has a reader.
*/
static Optional<HStoreFile> getLargestFile(Collection<HStoreFile> candidates) {
return candidates.stream().filter(f -> f.getReader() != null).max((f1, f2) -> Long.compare(f1.getReader().length(), f2.getReader().length()));
} | 3.26 |
hbase_StoreUtils_getMaxSequenceIdInList_rdh | /**
* Return the highest sequence ID found across all storefiles in the given list.
*/
public static OptionalLong getMaxSequenceIdInList(Collection<HStoreFile> sfs) {
return sfs.stream().mapToLong(HStoreFile::getMaxSequenceId).max();
} | 3.26 |
hbase_StoreUtils_getDeterministicRandomSeed_rdh | /**
* Creates a deterministic hash code for store file collection.
*/
public static OptionalInt getDeterministicRandomSeed(Collection<HStoreFile> files) {
return files.stream().mapToInt(f -> f.getPath().getName().hashCode()).findFirst();
} | 3.26 |
hbase_StoreUtils_getSplitPoint_rdh | /**
* Gets the mid point of the largest file passed in as split point.
*/
static Optional<byte[]> getSplitPoint(Collection<HStoreFile> storefiles, CellComparator comparator) throws
IOException {
Optional<HStoreFile> largestFile = StoreUtils.getLargestFile(storefiles);
return largestFile.isPresent() ? StoreUtils.getFileSplitPoint(largestFile.get(), comparator) : Optional.empty();} | 3.26 |
hbase_FSWALEntry_getFamilyNames_rdh | /**
* Returns the family names which are effected by this edit.
*/
Set<byte[]> getFamilyNames() {
return
familyNames;} | 3.26 |
hbase_FSWALEntry_getTxid_rdh | /**
* Returns The transaction id of this edit.
*/
long getTxid() {
return this.txid;
} | 3.26 |
hbase_FSWALEntry_m1_rdh | /**
* Here is where a WAL edit gets its sequenceid. SIDE-EFFECT is our stamping the sequenceid into
* every Cell AND setting the sequenceid into the MVCC WriteEntry!!!!
*
* @return The sequenceid we stamped on this edit.
*/
long m1(MultiVersionConcurrencyControl.WriteEntry we) throws IOException {
long v3 =
we.getWriteNumber();
if ((!this.getEdit().isReplay()) && inMemstore) {
for (Cell c : getEdit().getCells()) {
PrivateCellUtil.setSequenceId(c, v3);
}
}
getKey().setWriteEntry(we);
return v3;
} | 3.26 |
hbase_ReplicationSourceWALActionListener_scopeWALEdits_rdh | /**
* Utility method used to set the correct scopes on each log key. Doesn't set a scope on keys from
* compaction WAL edits and if the scope is local.
*
* @param logKey
* Key that may get scoped according to its edits
* @param logEdit
* Edits used to lookup the scopes
*/
static void scopeWALEdits(WALKey logKey, WALEdit logEdit, Configuration conf) {
// For bulk load replication we need meta family to know the file we want to replicate.
if (ReplicationUtils.isReplicationForBulkLoadDataEnabled(conf)) {
return;
}
// Allow replication marker row to pass through.
if (WALEdit.isReplicationMarkerEdit(logEdit)) {
return;
}
// For replay, or if all the cells are markers, do not need to store replication scope.
if (logEdit.isReplay() || logEdit.getCells().stream().allMatch(c -> WALEdit.isMetaEditFamily(c))) {((WALKeyImpl) (logKey)).clearReplicationScope();
}
} | 3.26 |
hbase_TinyLfuBlockCache_asReferencedHeapBlock_rdh | /**
* The block cached in TinyLfuBlockCache will always be an heap block: on the one side, the heap
* access will be more faster then off-heap, the small index block or meta block cached in
* CombinedBlockCache will benefit a lot. on other side, the TinyLfuBlockCache size is always
* calculated based on the total heap size, if caching an off-heap block in TinyLfuBlockCache, the
* heap size will be messed up. Here we will clone the block into an heap block if it's an
* off-heap block, otherwise just use the original block. The key point is maintain the refCnt of
* the block (HBASE-22127): <br>
* 1. if cache the cloned heap block, its refCnt is an totally new one, it's easy to handle; <br>
* 2. if cache the original heap block, we're sure that it won't be tracked in ByteBuffAllocator's
* reservoir, if both RPC and TinyLfuBlockCache release the block, then it can be garbage
* collected by JVM, so need a retain here.
*
* @param buf
* the original block
* @return an block with an heap memory backend.
*/
private Cacheable asReferencedHeapBlock(Cacheable buf) {if (buf instanceof HFileBlock) {
HFileBlock blk = ((HFileBlock) (buf));
if (blk.isSharedMem()) {return HFileBlock.deepCloneOnHeap(blk);
}
}
// The block will be referenced by this TinyLfuBlockCache, so should increase its refCnt here.
return buf.retain();
} | 3.26 |
hbase_TinyLfuBlockCache_recordEviction_rdh | /**
* Records an eviction. The number of eviction operations and evicted blocks are identical, as an
* eviction is triggered immediately when the capacity has been exceeded. An eviction is performed
* asynchronously. See the library's documentation for details on write buffers, batching, and
* maintenance behavior.
*/
private void recordEviction() {
// FIXME: Currently does not capture the insertion time
stats.evicted(Long.MAX_VALUE, true);
stats.evict();
} | 3.26 |
hbase_RSGroupAdminClient_moveTables_rdh | /**
* Move given set of tables to the specified target RegionServer group. This will unassign all of
* a table's region so it can be reassigned to the correct group.
*/
public void moveTables(Set<TableName> tables, String
targetGroup) throws IOException {
MoveTablesRequest.Builder builder = MoveTablesRequest.newBuilder().setTargetGroup(targetGroup);
for (TableName tableName : tables) {
builder.addTableName(ProtobufUtil.toProtoTableName(tableName));
if (!admin.tableExists(tableName)) {
throw new TableNotFoundException(tableName);
}
}
try
{
stub.moveTables(null, builder.build());
} catch (ServiceException e) {
throw ProtobufUtil.handleRemoteException(e);
}
} | 3.26 |
hbase_RSGroupAdminClient_moveServersAndTables_rdh | /**
* Move given set of servers and tables to the specified target RegionServer group.
*
* @param servers
* set of servers to move
* @param tables
* set of tables to move
* @param targetGroup
* the target group name
* @throws IOException
* if moving the server and tables fail
*/
public void moveServersAndTables(Set<Address> servers, Set<TableName>
tables, String targetGroup) throws IOException {
MoveServersAndTablesRequest.Builder builder = MoveServersAndTablesRequest.newBuilder().setTargetGroup(targetGroup);
for (Address el : servers) {
builder.addServers(HBaseProtos.ServerName.newBuilder().setHostName(el.getHostname()).setPort(el.getPort()).build());
}for (TableName tableName : tables) {
builder.addTableName(ProtobufUtil.toProtoTableName(tableName));
if (!admin.tableExists(tableName)) {
throw new TableNotFoundException(tableName);
}
}
try {
stub.moveServersAndTables(null, builder.build());
} catch (ServiceException e) {
throw ProtobufUtil.handleRemoteException(e);
}
} | 3.26 |
hbase_RSGroupAdminClient_getRSGroupInfoOfTable_rdh | /**
* Gets {@code RSGroupInfo} for the given table's group.
*/
public RSGroupInfo getRSGroupInfoOfTable(TableName tableName) throws IOException {
GetRSGroupInfoOfTableRequest request = GetRSGroupInfoOfTableRequest.newBuilder().setTableName(ProtobufUtil.toProtoTableName(tableName)).build();
try {
GetRSGroupInfoOfTableResponse resp = stub.getRSGroupInfoOfTable(null, request);
if
(resp.hasRSGroupInfo()) {
return ProtobufUtil.toGroupInfo(resp.getRSGroupInfo());}
return null;
} catch (ServiceException e) {
throw ProtobufUtil.handleRemoteException(e);
}
} | 3.26 |
hbase_RSGroupAdminClient_removeRSGroup_rdh | /**
* Removes RegionServer group associated with the given name.
*/
public void removeRSGroup(String name) throws IOException {
RemoveRSGroupRequest request = RemoveRSGroupRequest.newBuilder().setRSGroupName(name).build();try {
stub.removeRSGroup(null,
request);
} catch (ServiceException e) {
throw ProtobufUtil.handleRemoteException(e);
}
} | 3.26 |
hbase_RSGroupAdminClient_removeServers_rdh | /**
* Remove decommissioned servers from rsgroup. 1. Sometimes we may find the server aborted due to
* some hardware failure and we must offline the server for repairing. Or we need to move some
* servers to join other clusters. So we need to remove these servers from the rsgroup. 2.
* Dead/recovering/live servers will be disallowed.
*
* @param servers
* set of servers to remove
*/
public void removeServers(Set<Address> servers) throws IOException {
Set<HBaseProtos.ServerName> hostPorts = Sets.newHashSet();
for (Address el : servers) {
hostPorts.add(HBaseProtos.ServerName.newBuilder().setHostName(el.getHostname()).setPort(el.getPort()).build());
}
RemoveServersRequest request = RemoveServersRequest.newBuilder().addAllServers(hostPorts).build();
try {
stub.removeServers(null, request);
} catch (ServiceException e) {
throw ProtobufUtil.handleRemoteException(e);
}
} | 3.26 |
hbase_RSGroupAdminClient_balanceRSGroup_rdh | /**
* Balance regions in the given RegionServer group.
*
* @return BalanceResponse details about the balancer run
*/
public BalanceResponse balanceRSGroup(String groupName, BalanceRequest request) throws IOException {
try {
BalanceRSGroupRequest req = ProtobufUtil.createBalanceRSGroupRequest(groupName, request);
return ProtobufUtil.toBalanceResponse(stub.balanceRSGroup(null, req));}
catch (ServiceException e) {
throw ProtobufUtil.handleRemoteException(e);
}
} | 3.26 |
hbase_RSGroupAdminClient_getRSGroupOfServer_rdh | /**
* Retrieve the RSGroupInfo a server is affiliated to
*
* @param hostPort
* HostPort to get RSGroupInfo for
*/
public RSGroupInfo getRSGroupOfServer(Address hostPort) throws IOException {
GetRSGroupInfoOfServerRequest request = GetRSGroupInfoOfServerRequest.newBuilder().setServer(HBaseProtos.ServerName.newBuilder().setHostName(hostPort.getHostname()).setPort(hostPort.getPort()).build()).build();
try {
GetRSGroupInfoOfServerResponse resp = stub.getRSGroupInfoOfServer(null, request);
if (resp.hasRSGroupInfo()) {
return ProtobufUtil.toGroupInfo(resp.getRSGroupInfo());
}
return null;
} catch (ServiceException e) {
throw ProtobufUtil.handleRemoteException(e);
}
} | 3.26 |
hbase_RSGroupAdminClient_getRSGroupInfo_rdh | /**
* Gets {@code RSGroupInfo} for given group name.
*/
public RSGroupInfo getRSGroupInfo(String groupName) throws IOException {
try {
GetRSGroupInfoResponse resp = stub.getRSGroupInfo(null, GetRSGroupInfoRequest.newBuilder().setRSGroupName(groupName).build());
if (resp.hasRSGroupInfo()) {
return ProtobufUtil.toGroupInfo(resp.getRSGroupInfo());
}
return null;} catch (ServiceException e) {
throw ProtobufUtil.handleRemoteException(e);
}
} | 3.26 |
hbase_RSGroupAdminClient_listRSGroups_rdh | /**
* Lists current set of RegionServer groups.
*/
public List<RSGroupInfo> listRSGroups() throws IOException {
try {
List<RSGroupProtos.RSGroupInfo> v11 = stub.listRSGroupInfos(null, ListRSGroupInfosRequest.getDefaultInstance()).getRSGroupInfoList();
List<RSGroupInfo> result = new ArrayList<>(v11.size());
for (RSGroupProtos.RSGroupInfo entry : v11) {
result.add(ProtobufUtil.toGroupInfo(entry));
}
return result;
} catch (ServiceException e) {
throw ProtobufUtil.handleRemoteException(e);
}
} | 3.26 |
hbase_RSGroupAdminClient_moveServers_rdh | /**
* Move given set of servers to the specified target RegionServer group.
*/
public void moveServers(Set<Address> servers, String targetGroup) throws IOException {
Set<HBaseProtos.ServerName> hostPorts = Sets.newHashSet();
for (Address el : servers) {
hostPorts.add(HBaseProtos.ServerName.newBuilder().setHostName(el.getHostname()).setPort(el.getPort()).build());
} MoveServersRequest request = MoveServersRequest.newBuilder().setTargetGroup(targetGroup).addAllServers(hostPorts).build();
try {
stub.moveServers(null, request);
} catch (ServiceException e) {
throw ProtobufUtil.handleRemoteException(e);
}
} | 3.26 |
hbase_NamespacesModel_toString_rdh | /* (non-Javadoc)
@see java.lang.Object#toString()
*/
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
for (String namespace : namespaces) {
sb.append(namespace);
sb.append("\n");
}
return sb.toString();
} | 3.26 |
hbase_NamespacesModel_getNamespaces_rdh | /**
* Returns all namespaces
*/public
List<String> getNamespaces() {
return namespaces;
} | 3.26 |
hbase_NamespacesModel_setNamespaces_rdh | /**
*
* @param namespaces
* the namespace name array
*/public void setNamespaces(List<String> namespaces) {
this.namespaces = namespaces;} | 3.26 |
hbase_ProcedureWALFile_addToSize_rdh | /**
* Used to update in-progress log sizes. the FileStatus will report 0 otherwise.
*/
void addToSize(long size) {this.logSize += size;
} | 3.26 |
hbase_NamespaceDescriptor_setConfiguration_rdh | /**
* Setter for storing a configuration setting in {@link #configuration} map.
*
* @param key
* Config key. Same as XML config key e.g. hbase.something.or.other.
* @param value
* String value. If null, removes the setting.
*/
public void setConfiguration(String key, String value) {
if (value == null) {
removeConfiguration(key);
} else {
configuration.put(key, value);
}
} | 3.26 |
hbase_NamespaceDescriptor_getConfigurationValue_rdh | /**
* Getter for accessing the configuration value by key
*/
public String getConfigurationValue(String key) {
return configuration.get(key);
} | 3.26 |
hbase_NamespaceDescriptor_removeConfiguration_rdh | /**
* Remove a config setting represented by the key from the {@link #configuration} map
*/
public void removeConfiguration(final String key) {configuration.remove(key);
} | 3.26 |
hbase_NamespaceDescriptor_getConfiguration_rdh | /**
* Getter for fetching an unmodifiable {@link #configuration} map.
*/
public Map<String, String> getConfiguration() {
// shallow pointer copy
return Collections.unmodifiableMap(configuration);} | 3.26 |
hbase_ZKAuthentication_login_rdh | /**
* Log in the current process using the given configuration keys for the credential file and login
* principal.
* <p>
* <strong>This is only applicable when running on secure hbase</strong> On regular HBase (without
* security features), this will safely be ignored.
* </p>
*
* @param conf
* The configuration data to use
* @param keytabFileKey
* Property key used to configure the path to the credential file
* @param userNameKey
* Property key used to configure the login principal
* @param hostname
* Current hostname to use in any credentials
* @param loginContextProperty
* property name to expose the entry name
* @param loginContextName
* jaas entry name
* @throws IOException
* underlying exception from SecurityUtil.login() call
*/
private static void login(Configuration conf, String keytabFileKey, String userNameKey, String hostname, String loginContextProperty, String loginContextName) throws IOException {
if (!isSecureZooKeeper(conf)) {
return;
}
// User has specified a jaas.conf, keep this one as the good one.
// HBASE_OPTS="-Djava.security.auth.login.config=jaas.conf"
if (System.getProperty("java.security.auth.login.config") != null) {
return;
}
// No keytab specified, no auth
String keytabFilename = conf.get(keytabFileKey);
if (keytabFilename == null) {
LOG.warn("no keytab specified for: {}", keytabFileKey);
return;
}
String principalConfig = conf.get(userNameKey, System.getProperty("user.name"));
String principalName = SecurityUtil.getServerPrincipal(principalConfig, hostname);
// Initialize the "jaas.conf" for keyTab/principal,
// If keyTab is not specified use the Ticket Cache.
// and set the zookeeper login context name.
JaasConfiguration jaasConf =
new JaasConfiguration(loginContextName, principalName, keytabFilename);
Configuration.setConfiguration(jaasConf);
System.setProperty(loginContextProperty, loginContextName);
} | 3.26 |
hbase_ZKAuthentication_isSecureZooKeeper_rdh | /**
* Returns {@code true} when secure authentication is enabled (whether
* {@code hbase.security.authentication} is set to "{@code kerberos}").
*/
public static boolean isSecureZooKeeper(Configuration conf) {
// Detection for embedded HBase client with jaas configuration
// defined for third party programs.
try {
Configuration testConfig = Configuration.getConfiguration();
if (((((testConfig.getAppConfigurationEntry("Client") == null) && (testConfig.getAppConfigurationEntry(JaasConfiguration.CLIENT_KEYTAB_KERBEROS_CONFIG_NAME) == null)) && (testConfig.getAppConfigurationEntry(JaasConfiguration.SERVER_KEYTAB_KERBEROS_CONFIG_NAME) == null)) && (conf.get(HConstants.ZK_CLIENT_KERBEROS_PRINCIPAL) == null)) && (conf.get(HConstants.ZK_SERVER_KERBEROS_PRINCIPAL) == null)) {
return false;
}
} catch (Exception e) {
// No Jaas configuration defined.
return false;
}
// Master & RSs uses hbase.zookeeper.client.*
return "kerberos".equalsIgnoreCase(conf.get("hbase.security.authentication"));
} | 3.26 |
hbase_ZKAuthentication_loginClient_rdh | /**
* Log in the current zookeeper client using the given configuration keys for the credential file
* and login principal.
* <p>
* <strong>This is only applicable when running on secure hbase</strong> On regular HBase (without
* security features), this will safely be ignored.
* </p>
*
* @param conf
* The configuration data to use
* @param keytabFileKey
* Property key used to configure the path to the credential file
* @param userNameKey
* Property key used to configure the login principal
* @param hostname
* Current hostname to use in any credentials
* @throws IOException
* underlying exception from SecurityUtil.login() call
*/
public static void loginClient(Configuration conf, String keytabFileKey, String userNameKey, String hostname) throws IOException {
login(conf, keytabFileKey, userNameKey, hostname, ZooKeeperSaslClient.LOGIN_CONTEXT_NAME_KEY, JaasConfiguration.CLIENT_KEYTAB_KERBEROS_CONFIG_NAME);} | 3.26 |
hbase_ZKAuthentication_loginServer_rdh | /**
* Log in the current zookeeper server process using the given configuration keys for the
* credential file and login principal.
* <p>
* <strong>This is only applicable when running on secure hbase</strong> On regular HBase (without
* security features), this will safely be ignored.
* </p>
*
* @param conf
* The configuration data to use
* @param keytabFileKey
* Property key used to configure the path to the credential file
* @param userNameKey
* Property key used to configure the login principal
* @param hostname
* Current hostname to use in any credentials
* @throws IOException
* underlying exception from SecurityUtil.login() call
*/
public static void loginServer(Configuration conf, String keytabFileKey, String userNameKey, String hostname) throws IOException {
login(conf, keytabFileKey, userNameKey, hostname, ZooKeeperSaslServer.LOGIN_CONTEXT_NAME_KEY, JaasConfiguration.SERVER_KEYTAB_KERBEROS_CONFIG_NAME);
} | 3.26 |
hbase_TableInputFormat_addColumn_rdh | /**
* Parses a combined family and qualifier and adds either both or just the family in case there is
* no qualifier. This assumes the older colon divided notation, e.g. "family:qualifier".
*
* @param scan
* The Scan to update.
* @param familyAndQualifier
* family and qualifier
* @throws IllegalArgumentException
* When familyAndQualifier is invalid.
*/
private static void addColumn(Scan scan, byte[] familyAndQualifier) {
byte[][]
fq = CellUtil.parseColumn(familyAndQualifier);
if (fq.length == 1) {
scan.addFamily(fq[0]);
} else if (fq.length == 2) {
scan.addColumn(fq[0], fq[1]);
} else {
throw new IllegalArgumentException("Invalid familyAndQualifier provided.");
}
} | 3.26 |
hbase_TableInputFormat_configureSplitTable_rdh | /**
* Sets split table in map-reduce job.
*/
public static void configureSplitTable(Job job, TableName tableName) {
job.getConfiguration().set(SPLIT_TABLE, tableName.getNameAsString());
} | 3.26 |
hbase_TableInputFormat_createScanFromConfiguration_rdh | /**
* Sets up a {@link Scan} instance, applying settings from the configuration property constants
* defined in {@code TableInputFormat}. This allows specifying things such as:
* <ul>
* <li>start and stop rows</li>
* <li>column qualifiers or families</li>
* <li>timestamps or timerange</li>
* <li>scanner caching and batch size</li>
* </ul>
*/
public static Scan createScanFromConfiguration(Configuration conf) throws IOException {
Scan scan = new Scan();
if (conf.get(SCAN_ROW_START) != null) {
scan.withStartRow(Bytes.toBytesBinary(conf.get(SCAN_ROW_START)));
}
if (conf.get(SCAN_ROW_STOP) != null) {
scan.withStopRow(Bytes.toBytesBinary(conf.get(SCAN_ROW_STOP)));
}
if (conf.get(SCAN_COLUMNS) != null) {
addColumns(scan, conf.get(SCAN_COLUMNS));
}
for (String columnFamily : conf.getTrimmedStrings(SCAN_COLUMN_FAMILY)) {
scan.addFamily(Bytes.toBytes(columnFamily));
}
if (conf.get(SCAN_TIMESTAMP) != null) {
scan.setTimestamp(Long.parseLong(conf.get(SCAN_TIMESTAMP)));
}
if ((conf.get(SCAN_TIMERANGE_START) != null) && (conf.get(SCAN_TIMERANGE_END) != null)) {
scan.setTimeRange(Long.parseLong(conf.get(SCAN_TIMERANGE_START)), Long.parseLong(conf.get(SCAN_TIMERANGE_END)));
}
if (conf.get(SCAN_MAXVERSIONS) != null) {
scan.readVersions(Integer.parseInt(conf.get(SCAN_MAXVERSIONS)));
}
if (conf.get(SCAN_CACHEDROWS) != null) {
scan.setCaching(Integer.parseInt(conf.get(SCAN_CACHEDROWS)));
}
if (conf.get(SCAN_BATCHSIZE) != null) {
scan.setBatch(Integer.parseInt(conf.get(SCAN_BATCHSIZE)));
}
// false by default, full table scans generate too much BC churn
scan.setCacheBlocks(conf.getBoolean(SCAN_CACHEBLOCKS, false));
return scan;
} | 3.26 |
hbase_TableInputFormat_getSplits_rdh | /**
* Calculates the splits that will serve as input for the map tasks. The number of splits matches
* the number of regions in a table. Splits are shuffled if required.
*
* @param context
* The current job context.
* @return The list of input splits.
* @throws IOException
* When creating the list of splits fails.
* @see org.apache.hadoop.mapreduce.InputFormat#getSplits( org.apache.hadoop.mapreduce.JobContext)
*/
@Override
public List<InputSplit> getSplits(JobContext context) throws IOException {
List<InputSplit> splits = super.getSplits(context);
if
((conf.get(SHUFFLE_MAPS) != null) && "true".equals(conf.get(SHUFFLE_MAPS).toLowerCase(Locale.ROOT))) {
Collections.shuffle(splits);}
return splits;
} | 3.26 |
hbase_TableInputFormat_setConf_rdh | /**
* Sets the configuration. This is used to set the details for the table to be scanned.
*
* @param configuration
* The configuration to set.
* @see org.apache.hadoop.conf.Configurable#setConf( org.apache.hadoop.conf.Configuration)
*/
@Override
@SuppressWarnings(value = "REC_CATCH_EXCEPTION", justification = "Intentional")
public void setConf(Configuration configuration) {
this.conf = configuration;
Scan scan = null;
if (conf.get(SCAN) != null) {
try {
scan = TableMapReduceUtil.convertStringToScan(conf.get(SCAN));
} catch (IOException e) {
LOG.error("An error occurred.", e);
}
} else {
try { scan = createScanFromConfiguration(conf);
} catch
(Exception e) {
LOG.error(StringUtils.stringifyException(e));
}
}
setScan(scan);
} | 3.26 |
hbase_TableInputFormat_addColumns_rdh | /**
* Convenience method to parse a string representation of an array of column specifiers.
*
* @param scan
* The Scan to update.
* @param columns
* The columns to parse.
*/
private static void addColumns(Scan scan, String columns) {
String[] cols = columns.split(" ");
for (String col : cols) {
addColumn(scan, Bytes.toBytes(col));
}
} | 3.26 |
hbase_TableInputFormat_getConf_rdh | /**
* Returns the current configuration.
*
* @return The current configuration.
* @see org.apache.hadoop.conf.Configurable#getConf()
*/
@Override
public Configuration getConf() {
return conf;
} | 3.26 |
hbase_ObserverContextImpl_createAndPrepare_rdh | /**
* Instantiates a new ObserverContext instance if the passed reference is <code>null</code> and
* sets the environment in the new or existing instance. This allows deferring the instantiation
* of a ObserverContext until it is actually needed.
*
* @param <E>
* The environment type for the context
* @param env
* The coprocessor environment to set
* @return An instance of <code>ObserverContext</code> with the environment set
*/
// TODO: Remove this method, ObserverContext should not depend on RpcServer
@Deprecated
public static <E extends CoprocessorEnvironment> ObserverContext<E> createAndPrepare(E env) {
ObserverContextImpl<E> ctx = new ObserverContextImpl<>(RpcServer.getRequestUser().orElse(null));
ctx.prepare(env);
return ctx;
} | 3.26 |
hbase_RawAsyncTableImpl_mutateRow_rdh | // We need the MultiRequest when constructing the org.apache.hadoop.hbase.client.MultiResponse,
// so here I write a new method as I do not want to change the abstraction of call method.
@SuppressWarnings("unchecked")
private <RES, RESP> CompletableFuture<RESP> mutateRow(HBaseRpcController controller, HRegionLocation loc, ClientService.Interface stub, RowMutations mutation, Converter<MultiRequest, byte[], RowMutations> reqConvert, Function<RES, RESP> respConverter) {
CompletableFuture<RESP> v24 = new CompletableFuture<>();
try {
byte[] regionName = loc.getRegion().getRegionName();
MultiRequest req = reqConvert.convert(regionName, mutation);
stub.multi(controller, req, new RpcCallback<MultiResponse>() {
@Override
public void run(MultiResponse resp) {
if (controller.failed())
{
v24.completeExceptionally(controller.getFailed());
} else {
try {
MultiResponse multiResp = ResponseConverter.getResults(req, resp, controller.cellScanner());
ConnectionUtils.updateStats(f0.getStatisticsTracker(), f0.getConnectionMetrics(), loc.getServerName(), multiResp);
Throwable ex = multiResp.getException(regionName);
if (ex != null) {
v24.completeExceptionally(ex instanceof IOException ? ex : new IOException("Failed to mutate row: " + Bytes.toStringBinary(mutation.getRow()), ex));
} else {
v24.complete(respConverter.apply(((RES) (multiResp.getResults().get(regionName).result.get(0)))));
}
} catch (IOException e) {
v24.completeExceptionally(e);
}
}
}
});
} catch (IOException e) {
v24.completeExceptionally(e);
}
return v24;
} | 3.26 |
hbase_RestoreSnapshotProcedure_updateTableDescriptor_rdh | /**
* Update descriptor
*
* @param env
* MasterProcedureEnv
*/
private void updateTableDescriptor(final MasterProcedureEnv env) throws IOException {
env.getMasterServices().getTableDescriptors().update(modifiedTableDescriptor);
} | 3.26 |
hbase_RestoreSnapshotProcedure_restoreSnapshot_rdh | /**
* Execute the on-disk Restore
*
* @param env
* MasterProcedureEnv
*/
private void restoreSnapshot(final MasterProcedureEnv env) throws IOException {MasterFileSystem fileSystemManager = env.getMasterServices().getMasterFileSystem();
FileSystem fs = fileSystemManager.getFileSystem();
Path rootDir = fileSystemManager.getRootDir();
final ForeignExceptionDispatcher monitorException = new ForeignExceptionDispatcher();
final Configuration conf = new
Configuration(env.getMasterConfiguration());
LOG.info("Starting restore snapshot=" + ClientSnapshotDescriptionUtils.toString(snapshot));
try {
Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshot, rootDir);
SnapshotManifest manifest = SnapshotManifest.open(conf, fs, snapshotDir, snapshot);
RestoreSnapshotHelper restoreHelper = new RestoreSnapshotHelper(conf, fs, manifest, modifiedTableDescriptor, rootDir, monitorException, getMonitorStatus());
RestoreSnapshotHelper.RestoreMetaChanges metaChanges = restoreHelper.restoreHdfsRegions();
regionsToRestore = metaChanges.getRegionsToRestore();
regionsToRemove = metaChanges.getRegionsToRemove();
regionsToAdd = metaChanges.getRegionsToAdd();
parentsToChildrenPairMap = metaChanges.getParentToChildrenPairMap();
} catch (IOException e) {
String msg = ("restore snapshot=" + ClientSnapshotDescriptionUtils.toString(snapshot)) + " failed in on-disk restore. Try re-running the restore command.";
LOG.error(msg, e);
monitorException.receive(new ForeignException(env.getMasterServices().getServerName().toString(), e));
throw new IOException(msg, e);
}
} | 3.26 |
hbase_RestoreSnapshotProcedure_updateMETA_rdh | /**
* Apply changes to hbase:meta
*/
private void updateMETA(final MasterProcedureEnv env) throws IOException {
try {
Connection conn = env.getMasterServices().getConnection();
RegionStateStore regionStateStore =
env.getAssignmentManager().getRegionStateStore();
int regionReplication = modifiedTableDescriptor.getRegionReplication();
// 1. Prepare to restore
getMonitorStatus().setStatus("Preparing to restore each region");
// 2. Applies changes to hbase:meta and in-memory states
// (2.1). Removes the current set of regions from META and in-memory states
//
// By removing also the regions to restore (the ones present both in the snapshot
// and in the current state) we ensure that no extra fields are present in META
// e.g. with a simple add addRegionToMeta() the splitA and splitB attributes
// not overwritten/removed, so you end up with old informations
// that are not correct after the restore.
if (regionsToRemove != null) {
regionStateStore.deleteRegions(regionsToRemove);
deleteRegionsFromInMemoryStates(regionsToRemove, env, regionReplication);
}
// (2.2). Add the new set of regions to META and in-memory states
//
// At this point the old regions are no longer present in META.
// and the set of regions present in the snapshot will be written to META.
// All the information in hbase:meta are coming from the .regioninfo of each region present
// in the snapshot folder.
if (regionsToAdd != null) {
MetaTableAccessor.addRegionsToMeta(conn, regionsToAdd, regionReplication);
addRegionsToInMemoryStates(regionsToAdd, env, regionReplication);
}
if (regionsToRestore != null) {
regionStateStore.overwriteRegions(regionsToRestore, regionReplication);
deleteRegionsFromInMemoryStates(regionsToRestore, env, regionReplication);
addRegionsToInMemoryStates(regionsToRestore, env, regionReplication);
}
RestoreSnapshotHelper.RestoreMetaChanges metaChanges = new RestoreSnapshotHelper.RestoreMetaChanges(modifiedTableDescriptor, parentsToChildrenPairMap); metaChanges.updateMetaParentRegions(conn, regionsToAdd);
// At this point the restore is complete.
LOG.info(((("Restore snapshot=" + ClientSnapshotDescriptionUtils.toString(snapshot)) + " on table=") + getTableName()) + " completed!");
} catch (IOException e) {
final ForeignExceptionDispatcher monitorException = new ForeignExceptionDispatcher();
String msg = ("restore snapshot=" + ClientSnapshotDescriptionUtils.toString(snapshot)) + " failed in meta update. Try re-running the restore command.";
LOG.error(msg,
e);
monitorException.receive(new ForeignException(env.getMasterServices().getServerName().toString(), e));
throw new IOException(msg, e);
}
monitorStatus.markComplete(("Restore snapshot '" + snapshot.getName()) + "'!");
MetricsSnapshot metricsSnapshot = new MetricsSnapshot();
metricsSnapshot.addSnapshotRestore(monitorStatus.getCompletionTimestamp() -
monitorStatus.getStartTime());
} | 3.26 |
hbase_RestoreSnapshotProcedure_addRegionsToInMemoryStates_rdh | /**
* Add regions to in-memory states
*
* @param regionInfos
* regions to add
* @param env
* MasterProcedureEnv
* @param regionReplication
* the number of region replications
*/
private void
addRegionsToInMemoryStates(List<RegionInfo> regionInfos, MasterProcedureEnv
env, int regionReplication) {
AssignmentManager am = env.getAssignmentManager();
for (RegionInfo regionInfo : regionInfos) {
if (regionInfo.isSplit()) {
am.getRegionStates().updateRegionState(regionInfo, State.SPLIT);
} else {
am.getRegionStates().updateRegionState(regionInfo, State.CLOSED);
// For region replicas
for (int i
= 1; i < regionReplication; i++) {
RegionInfo regionInfoForReplica =
RegionReplicaUtil.getRegionInfoForReplica(regionInfo, i);
am.getRegionStates().updateRegionState(regionInfoForReplica, State.CLOSED);
}
}
}
} | 3.26 |
hbase_RestoreSnapshotProcedure_deleteRegionsFromInMemoryStates_rdh | /**
* Delete regions from in-memory states
*
* @param regionInfos
* regions to delete
* @param env
* MasterProcedureEnv
* @param regionReplication
* the number of region replications
*/
private void deleteRegionsFromInMemoryStates(List<RegionInfo> regionInfos, MasterProcedureEnv env, int regionReplication) {
FavoredNodesManager fnm = env.getMasterServices().getFavoredNodesManager();
env.getAssignmentManager().getRegionStates().deleteRegions(regionInfos);
env.getMasterServices().getServerManager().removeRegions(regionInfos);
if (fnm != null) {
fnm.deleteFavoredNodesForRegions(regionInfos);
}
// For region replicas
if (regionReplication > 1) {
for (RegionInfo regionInfo : regionInfos) {
for (int i = 1; i < regionReplication; i++) {
RegionInfo regionInfoForReplica = RegionReplicaUtil.getRegionInfoForReplica(regionInfo, i);
env.getAssignmentManager().getRegionStates().deleteRegion(regionInfoForReplica);
env.getMasterServices().getServerManager().removeRegion(regionInfoForReplica);
if (fnm != null) {
fnm.deleteFavoredNodesForRegion(regionInfoForReplica);
}
}
}
}
} | 3.26 |
hbase_RestoreSnapshotProcedure_getRestoreAcl_rdh | /**
* Exposed for Testing: HBASE-26462
*/
@RestrictedApi(explanation = "Should only be called in tests", link = "", allowedOnPath = ".*/src/test/.*")
public boolean getRestoreAcl() {
return restoreAcl;
} | 3.26 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.