name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hbase_ServerManager_removeRegions_rdh | /**
* Called by delete table and similar to notify the ServerManager that a region was removed.
*/
public void removeRegions(final List<RegionInfo> regions) {
for (RegionInfo hri : regions) {
m2(hri);
}
} | 3.26 |
hbase_ServerManager_getLoad_rdh | /**
* Returns ServerMetrics if serverName is known else null
*/
public ServerMetrics getLoad(final ServerName serverName) {
return this.onlineServers.get(serverName);
} | 3.26 |
hbase_ServerManager_waitForRegionServers_rdh | /**
* Wait for the region servers to report in. We will wait until one of this condition is met: -
* the master is stopped - the 'hbase.master.wait.on.regionservers.maxtostart' number of region
* servers is reached - the 'hbase.master.wait.on.regionservers.mintostart' is reached AND there
* have been no new region server in for 'hbase.master.wait.on.regionservers.interval' time AND
* the 'hbase.master.wait.on.regionservers.timeout' is reached
*/
public void waitForRegionServers(MonitoredTask status) throws InterruptedException {final long interval = this.master.getConfiguration().getLong(WAIT_ON_REGIONSERVERS_INTERVAL, 1500);
final long timeout = this.master.getConfiguration().getLong(WAIT_ON_REGIONSERVERS_TIMEOUT, 4500);// Min is not an absolute; just a friction making us wait longer on server checkin.
int minToStart = getMinToStart();
int maxToStart = this.master.getConfiguration().getInt(WAIT_ON_REGIONSERVERS_MAXTOSTART, Integer.MAX_VALUE);
if (maxToStart < minToStart) {
LOG.warn(String.format("The value of '%s' (%d) is set less than '%s' (%d), ignoring.", WAIT_ON_REGIONSERVERS_MAXTOSTART, maxToStart, WAIT_ON_REGIONSERVERS_MINTOSTART, minToStart));
maxToStart = Integer.MAX_VALUE;}
long now = EnvironmentEdgeManager.currentTime();
final long startTime = now;
long slept = 0;
long lastLogTime = 0;
long lastCountChange = startTime;
int count = countOfRegionServers();
int oldCount = 0;
// This while test is a little hard to read. We try to comment it in below but in essence:
// Wait if Master is not stopped and the number of regionservers that have checked-in is
// less than the maxToStart. Both of these conditions will be true near universally.
// Next, we will keep cycling if ANY of the following three conditions are true:
// 1. The time since a regionserver registered is < interval (means servers are actively
// checking in).
// 2. We are under the total timeout.
// 3. The count of servers is < minimum.
for (ServerListener listener : this.f2) {
listener.waiting();
}
while ((((!this.master.isStopped()) && (!isClusterShutdown())) && (count < maxToStart)) && ((((lastCountChange + interval) >
now) || (timeout > slept)) || (count < minToStart))) {
// Log some info at every interval time or if there is a change
if ((oldCount != count) || ((lastLogTime + interval) < now)) {
lastLogTime = now;
String msg =
(((((((((((("Waiting on regionserver count=" + count) + "; waited=") + slept) + "ms, expecting min=") + minToStart) + " server(s), max=") + getStrForMax(maxToStart)) + " server(s), ") + "timeout=") + timeout) + "ms, lastChange=") + (now - lastCountChange)) + "ms";
LOG.info(msg);
status.setStatus(msg);
}
// We sleep for some time
final long sleepTime = 50;
Thread.sleep(sleepTime);
now = EnvironmentEdgeManager.currentTime();
slept =
now - startTime;
oldCount = count;
count = countOfRegionServers();
if (count != oldCount) {
lastCountChange = now;
}
}
// Did we exit the loop because cluster is going down?
if (isClusterShutdown()) {this.master.stop("Cluster shutdown");
}
LOG.info((((((((((("Finished waiting on RegionServer count=" + count) + "; waited=") + slept) + "ms,") + " expected min=") + minToStart) + " server(s), max=") + getStrForMax(maxToStart)) + " server(s),") + " master is ") + (this.master.isStopped() ? "stopped." : "running"));
} | 3.26 |
hbase_ServerManager_addServerToDrainList_rdh | /**
* Add the server to the drain list.
*
* @return True if the server is added or the server is already on the drain list.
*/
public synchronized boolean addServerToDrainList(final ServerName sn) {
// Warn if the server (sn) is not online. ServerName is of the form:
// <hostname> , <port> , <startcode>
if (!this.isServerOnline(sn)) {
LOG.warn((("Server " + sn) + " is not currently online. ") + "Ignoring request to add it to draining list.");
return false;
}
// Add the server to the draining servers lists, if it's not already in
// it.
if (this.drainingServers.contains(sn)) {
LOG.warn((("Server " + sn) + " is already in the draining server list.") + "Ignoring request to add it again.");
return true;
}
LOG.info(("Server " + sn) + " added to draining server list.");
return this.drainingServers.add(sn);
} | 3.26 |
hbase_ServerManager_findDeadServersAndProcess_rdh | /**
* Find out the region servers crashed between the crash of the previous master instance and the
* current master instance and schedule SCP for them.
* <p/>
* Since the {@code RegionServerTracker} has already helped us to construct the online servers set
* by scanning zookeeper, now we can compare the online servers with {@code liveServersFromWALDir}
* to find out whether there are servers which are already dead.
* <p/>
* Must be called inside the initialization method of {@code RegionServerTracker} to avoid
* concurrency issue.
*
* @param deadServersFromPE
* the region servers which already have a SCP associated.
* @param liveServersFromWALDir
* the live region servers from wal directory.
*/
void findDeadServersAndProcess(Set<ServerName> deadServersFromPE, Set<ServerName> liveServersFromWALDir) {
deadServersFromPE.forEach(f1::putIfAbsent);
liveServersFromWALDir.stream().filter(sn -> !onlineServers.containsKey(sn)).forEach(this::expireServer);
} | 3.26 |
hbase_ServerManager_getAverageLoad_rdh | /**
* Compute the average load across all region servers. Currently, this uses a very naive
* computation - just uses the number of regions being served, ignoring stats about number of
* requests.
*
* @return the average load
*/
public double getAverageLoad()
{
int totalLoad = 0;
int numServers = 0;
for (ServerMetrics sl : this.onlineServers.values()) {
numServers++;
totalLoad += sl.getRegionMetrics().size();
}
return numServers == 0 ? 0 : ((double) (totalLoad)) / ((double) (numServers));
} | 3.26 |
hbase_ServerManager_m3_rdh | /**
* Persist last flushed sequence id of each region to HDFS
*
* @throws IOException
* if persit to HDFS fails
*/
private void m3() throws IOException {
if (isFlushSeqIdPersistInProgress) {
return;
}
isFlushSeqIdPersistInProgress = true;
try {
Configuration conf = master.getConfiguration();
Path rootDir = CommonFSUtils.getRootDir(conf);
Path
lastFlushedSeqIdPath = new Path(rootDir, LAST_FLUSHED_SEQ_ID_FILE);
FileSystem fs = FileSystem.get(conf);
if (fs.exists(lastFlushedSeqIdPath)) {
LOG.info("Rewriting .lastflushedseqids file at: " + lastFlushedSeqIdPath);
if (!fs.delete(lastFlushedSeqIdPath, false)) {
throw new IOException("Unable to remove existing " + lastFlushedSeqIdPath);
}
} else {
LOG.info("Writing .lastflushedseqids file at: " + lastFlushedSeqIdPath);
}FSDataOutputStream out = fs.create(lastFlushedSeqIdPath);
FlushedSequenceId.Builder flushedSequenceIdBuilder = FlushedSequenceId.newBuilder();
try {for (Entry<byte[], Long> entry : flushedSequenceIdByRegion.entrySet()) {
FlushedRegionSequenceId.Builder flushedRegionSequenceIdBuilder = FlushedRegionSequenceId.newBuilder();
flushedRegionSequenceIdBuilder.setRegionEncodedName(ByteString.copyFrom(entry.getKey()));
flushedRegionSequenceIdBuilder.setSeqId(entry.getValue());
ConcurrentNavigableMap<byte[], Long> storeSeqIds = storeFlushedSequenceIdsByRegion.get(entry.getKey());
if (storeSeqIds != null) {
for (Entry<byte[], Long> store : storeSeqIds.entrySet()) {
FlushedStoreSequenceId.Builder flushedStoreSequenceIdBuilder = FlushedStoreSequenceId.newBuilder();
flushedStoreSequenceIdBuilder.setFamily(ByteString.copyFrom(store.getKey()));
flushedStoreSequenceIdBuilder.setSeqId(store.getValue());
flushedRegionSequenceIdBuilder.addStores(flushedStoreSequenceIdBuilder);
}
}
flushedSequenceIdBuilder.addRegionSequenceId(flushedRegionSequenceIdBuilder);
}
flushedSequenceIdBuilder.build().writeDelimitedTo(out);
} finally {
if (out != null) {out.close();
}
}
} finally {
isFlushSeqIdPersistInProgress = false;
}
} | 3.26 |
hbase_ServerManager_isServerDead_rdh | /**
* Check if a server is known to be dead. A server can be online, or known to be dead, or unknown
* to this manager (i.e, not online, not known to be dead either; it is simply not tracked by the
* master any more, for example, a very old previous instance).
*/
public synchronized boolean isServerDead(ServerName serverName) {
return (serverName == null) || f1.isDeadServer(serverName);
} | 3.26 |
hbase_ServerManager_unregisterListener_rdh | /**
* Remove the listener from the notification list.
*
* @param listener
* The ServerListener to unregister
*/
public boolean unregisterListener(final ServerListener listener) {
return this.f2.remove(listener);
} | 3.26 |
hbase_ServerManager_getOnlineServers_rdh | /**
* Returns Read-only map of servers to serverinfo
*/
public Map<ServerName, ServerMetrics> getOnlineServers() {
// Presumption is that iterating the returned Map is OK.
synchronized(this.onlineServers) {
return Collections.unmodifiableMap(this.onlineServers);
}
} | 3.26 |
hbase_ServerManager_moveFromOnlineToDeadServers_rdh | /**
* Called when server has expired.
*/
// Locking in this class needs cleanup.
public synchronized void moveFromOnlineToDeadServers(final ServerName sn) {
synchronized(this.onlineServers) {
boolean v37 = this.onlineServers.containsKey(sn);
if (v37) {
// Remove the server from the known servers lists and update load info BUT
// add to deadservers first; do this so it'll show in dead servers list if
// not in online servers list.
this.f1.putIfAbsent(sn);
this.onlineServers.remove(sn);
onlineServers.notifyAll();
} else {
// If not online, that is odd but may happen if 'Unknown Servers' -- where meta
// has references to servers not online nor in dead servers list. If
// 'Unknown Server', don't add to DeadServers else will be there for ever.
LOG.trace("Expiration of {} but server not online", sn);
}
}
} | 3.26 |
hbase_ServerManager_getVersion_rdh | /**
* May return "0.0.0" when server is not online
*/
public String getVersion(ServerName serverName) {
ServerMetrics serverMetrics = onlineServers.get(serverName);
return serverMetrics != null ? serverMetrics.getVersion() : "0.0.0";
} | 3.26 |
hbase_ProcedureStoreTracker_setDeletedIfDeletedByThem_rdh | /**
* For the global tracker, we will use this method to build the holdingCleanupTracker, as the
* modified flags will be cleared after rolling so we only need to test the deleted flags.
*
* @see #setDeletedIfModifiedInBoth(ProcedureStoreTracker)
*/
public void setDeletedIfDeletedByThem(ProcedureStoreTracker tracker) {
setDeleteIf(tracker, (node, procId) -> ((node == null)
|| (!node.contains(procId))) || (node.isDeleted(procId) == DeleteState.YES));
} | 3.26 |
hbase_ProcedureStoreTracker_resetModified_rdh | /**
* Clears the list of updated procedure ids. This doesn't affect global list of active procedure
* ids.
*/
public void resetModified() {
for (Map.Entry<Long, BitSetNode> v27 :
map.entrySet()) {
v27.getValue().resetModified();
}
minModifiedProcId = Long.MAX_VALUE;
maxModifiedProcId = Long.MIN_VALUE;
} | 3.26 |
hbase_ProcedureStoreTracker_setDeletedIfModifiedInBoth_rdh | /**
* Similar with {@link #setDeletedIfModified(long...)}, but here the {@code procId} are given by
* the {@code tracker}. If a procedure is modified by us, and also by the given {@code tracker},
* then we mark it as deleted.
*
* @see #setDeletedIfModified(long...)
*/public void setDeletedIfModifiedInBoth(ProcedureStoreTracker tracker) {
setDeleteIf(tracker, (node, procId) -> (node != null) && node.isModified(procId));
} | 3.26 |
hbase_ProcedureStoreTracker_growNode_rdh | /**
* Grows {@code node} to contain {@code procId} and updates the map.
*
* @return {@link BitSetNode} instance which contains {@code procId}.
*/
private BitSetNode growNode(BitSetNode node, long procId) {map.remove(node.getStart());
node.grow(procId);
map.put(node.getStart(), node);
return node;
} | 3.26 |
hbase_ProcedureStoreTracker_resetTo_rdh | /**
* Resets internal state to same as given {@code tracker}, and change the deleted flag according
* to the modified flag if {@code resetDelete} is true. Does deep copy of the bitmap.
* <p/>
* The {@code resetDelete} will be set to true when building cleanup tracker, please see the
* comments in {@link BitSetNode#BitSetNode(BitSetNode, boolean)} to learn how we change the
* deleted flag if {@code resetDelete} is true.
*/
public void resetTo(ProcedureStoreTracker tracker, boolean resetDelete) {
reset();
// resetDelete will true if we are building the cleanup tracker, as we will reset deleted flags
// for all the unmodified bits to 1, the partial flag is useless so set it to false for not
// confusing the developers when debugging.
this.partial = (resetDelete) ? false : tracker.partial;
this.minModifiedProcId = tracker.minModifiedProcId;
this.maxModifiedProcId = tracker.maxModifiedProcId;
this.keepDeletes = tracker.keepDeletes;
for (Map.Entry<Long, BitSetNode> entry : tracker.map.entrySet()) {
map.put(entry.getKey(), new BitSetNode(entry.getValue(), resetDelete));
}
} | 3.26 |
hbase_ProcedureStoreTracker_toProto_rdh | // ========================================================================
// Convert to/from Protocol Buffer.
// ========================================================================
/**
* Builds org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker
* protocol buffer from current state.
*/
public ProcedureStoreTracker toProto() throws IOException {
ProcedureProtos.ProcedureStoreTracker.Builder builder = ProcedureProtos.ProcedureStoreTracker.newBuilder();
for (Map.Entry<Long, BitSetNode> entry : map.entrySet()) {
builder.addNode(entry.getValue().convert());
}
return builder.build();
} | 3.26 |
hbase_ProcedureStoreTracker_mergeNodes_rdh | /**
* Merges {@code leftNode} & {@code rightNode} and updates the map.
*/
private BitSetNode mergeNodes(BitSetNode leftNode, BitSetNode
rightNode) {
assert leftNode.getStart() < rightNode.getStart();
leftNode.merge(rightNode);
map.remove(rightNode.getStart());
return leftNode;
} | 3.26 |
hbase_ProcedureStoreTracker_setDeleted_rdh | /**
* This method is used when restarting where we need to rebuild the ProcedureStoreTracker. The
* {@link #delete(long)} method above assume that the {@link BitSetNode} exists, but when restart
* this is not true, as we will read the wal files in reverse order so a delete may come first.
*/
public void setDeleted(long procId, boolean isDeleted) {BitSetNode node = getOrCreateNode(procId);
assert node.contains(procId) :
(("expected procId=" + procId) + " in the node=") + node;
node.updateState(procId, isDeleted);
trackProcIds(procId);
} | 3.26 |
hbase_ProcedureStoreTracker_setMinMaxModifiedProcIds_rdh | /**
* Will be called when restarting where we need to rebuild the ProcedureStoreTracker.
*/
public void setMinMaxModifiedProcIds(long min, long max) {
this.minModifiedProcId = min;
this.maxModifiedProcId = max;
} | 3.26 |
hbase_ProcedureStoreTracker_isEmpty_rdh | /**
* Returns true, if no procedure is active, else false.
*/
public boolean isEmpty() {
for (Map.Entry<Long, BitSetNode> entry : map.entrySet()) {
if (!entry.getValue().isEmpty()) {
return false;
}
}
return true;
}
/**
*
* @return true if all procedure was modified or deleted since last call to
{@link #resetModified()} | 3.26 |
hbase_ProcedureStoreTracker_isDeleted_rdh | /**
* If {@link #partial} is false, returns state from the bitmap. If no state is found for
* {@code procId}, returns YES. If partial is true, tracker doesn't have complete view of system
* state, so it returns MAYBE if there is no update for the procedure or if it doesn't have a
* state in bitmap. Otherwise, returns state from the bitmap.
*/
public DeleteState isDeleted(long procId) {
Map.Entry<Long, BitSetNode> entry = map.floorEntry(procId);
if ((entry != null) && entry.getValue().contains(procId)) {
BitSetNode node = entry.getValue();
DeleteState state = node.isDeleted(procId);
return partial && (!node.isModified(procId)) ? DeleteState.MAYBE : state;
}
return partial ? DeleteState.MAYBE : DeleteState.YES;
} | 3.26 |
hbase_ProcedureStoreTracker_getAllActiveProcIds_rdh | /**
* Will be used when there are too many proc wal files. We will rewrite the states of the active
* procedures in the oldest proc wal file so that we can delete it.
*
* @return all the active procedure ids in this tracker.
*/
public long[] getAllActiveProcIds()
{
return map.values().stream().map(BitSetNode::getActiveProcIds).filter(p -> p.length > 0).flatMapToLong(LongStream::of).toArray();} | 3.26 |
hbase_ProcedureStoreTracker_lookupClosestNode_rdh | /**
* lookup the node containing the specified procId.
*
* @param node
* cached node to check before doing a lookup
* @param procId
* the procId to lookup
* @return the node that may contains the procId or null
*/
private BitSetNode lookupClosestNode(final BitSetNode
node, final long procId) {
if ((node != null) && node.contains(procId)) {
return node;
}
final Map.Entry<Long, BitSetNode> entry = map.floorEntry(procId);
return entry != null ? entry.getValue() : null;} | 3.26 |
hbase_ProcedureStoreTracker_setDeletedIfModified_rdh | /**
* Set the given bit for the procId to delete if it was modified before.
* <p/>
* This method is used to test whether a procedure wal file can be safely deleted, as if all the
* procedures in the given procedure wal file has been modified in the new procedure wal files,
* then we can delete it.
*/
public void setDeletedIfModified(long... procId)
{
BitSetNode node = null;
for (int i = 0; i < procId.length; ++i) {
node = lookupClosestNode(node, procId[i]);
if ((node != null) && node.isModified(procId[i])) {
node.delete(procId[i]);
}
}
} | 3.26 |
hbase_RegionLocator_getStartEndKeys_rdh | /**
* Gets the starting and ending row keys for every region in the currently open table.
* <p>
* This is mainly useful for the MapReduce integration.
*
* @return Pair of arrays of region starting and ending row keys
* @throws IOException
* if a remote or network exception occurs
*/
default Pair<byte[][], byte[][]> getStartEndKeys() throws IOException {
List<HRegionLocation> v0 = getAllRegionLocations().stream().filter(loc -> RegionReplicaUtil.isDefaultReplica(loc.getRegion())).collect(Collectors.toList());
byte[][] startKeys = new byte[v0.size()][];
byte[][] v2 = new byte[v0.size()][];
for (int i = 0, n = v0.size(); i < n; i++) {
RegionInfo region = v0.get(i).getRegion();
startKeys[i] = region.getStartKey();
v2[i] = region.getEndKey();
}
return Pair.newPair(startKeys, v2);
} | 3.26 |
hbase_RegionLocator_getEndKeys_rdh | /**
* Gets the ending row key for every region in the currently open table.
* <p>
* This is mainly useful for the MapReduce integration.
*
* @return Array of region ending row keys
* @throws IOException
* if a remote or network exception occurs
*/
default byte[][] getEndKeys() throws IOException {
return
getStartEndKeys().getSecond();
} | 3.26 |
hbase_RegionLocator_getStartKeys_rdh | /**
* Gets the starting row key for every region in the currently open table.
* <p>
* This is mainly useful for the MapReduce integration.
*
* @return Array of region starting row keys
* @throws IOException
* if a remote or network exception occurs
*/
default byte[][] getStartKeys() throws IOException {
return getStartEndKeys().getFirst();
} | 3.26 |
hbase_RegionLocator_getRegionLocation_rdh | /**
* Finds the region with the given replica id on which the given row is being served.
*
* @param row
* Row to find.
* @param replicaId
* the replica id
* @return Location of the row.
* @throws IOException
* if a remote or network exception occurs
*/
default HRegionLocation getRegionLocation(byte[] row,
int replicaId) throws IOException {
return getRegionLocation(row, replicaId, false);
} | 3.26 |
hbase_RegionLocator_getRegionLocations_rdh | /**
* Find all the replicas for the region on which the given row is being served.
*
* @param row
* Row to find.
* @return Locations for all the replicas of the row.
* @throws IOException
* if a remote or network exception occurs
*/
default List<HRegionLocation> getRegionLocations(byte[] row) throws IOException {
return getRegionLocations(row,
false);
} | 3.26 |
hbase_DefaultHeapMemoryTuner_addToRollingStats_rdh | /**
* Add the given context to the rolling tuner stats.
*
* @param context
* The tuner context.
*/
private void addToRollingStats(TunerContext context) {
f0.insertDataValue(context.getCacheMissCount());
rollingStatsForFlushes.insertDataValue(context.getBlockedFlushCount() + context.getUnblockedFlushCount());
rollingStatsForEvictions.insertDataValue(context.getEvictCount());} | 3.26 |
hbase_DefaultHeapMemoryTuner_getTuneDirection_rdh | /**
* Determine best direction of tuning base on given context.
*
* @param context
* The tuner context.
* @return tuning direction.
*/
private StepDirection getTuneDirection(TunerContext context) {
StepDirection newTuneDirection = StepDirection.NEUTRAL;
long v11 = context.getBlockedFlushCount();
long unblockedFlushCount = context.getUnblockedFlushCount();long evictCount = context.getEvictCount();
long cacheMissCount = context.getCacheMissCount();
long totalFlushCount = v11 + unblockedFlushCount;
float curMemstoreSize = context.getCurMemStoreSize();
float curBlockCacheSize = context.getCurBlockCacheSize();
StringBuilder tunerLog = new StringBuilder();
// We can consider memstore or block cache to be sufficient if
// we are using only a minor fraction of what have been already provided to it.
boolean earlyMemstoreSufficientCheck = (totalFlushCount == 0) || (context.getCurMemStoreUsed() < (curMemstoreSize * sufficientMemoryLevel));
boolean earlyBlockCacheSufficientCheck = (evictCount == 0) || (context.getCurBlockCacheUsed() < (curBlockCacheSize * sufficientMemoryLevel));
if (earlyMemstoreSufficientCheck && earlyBlockCacheSufficientCheck) {
// Both memstore and block cache memory seems to be sufficient. No operation required.
newTuneDirection = StepDirection.NEUTRAL;
tunerLog.append("Going to do nothing because no changes are needed.");} else if (earlyMemstoreSufficientCheck) {
// Increase the block cache size and corresponding decrease in memstore size.
newTuneDirection = StepDirection.INCREASE_BLOCK_CACHE_SIZE;
tunerLog.append("Going to increase the block cache size.");
} else if (earlyBlockCacheSufficientCheck) {
// Increase the memstore size and corresponding decrease in block cache size.
newTuneDirection = StepDirection.INCREASE_MEMSTORE_SIZE;
tunerLog.append("Going to increase the memstore size.");
} else {
// Early checks for sufficient memory failed. Tuning memory based on past statistics.
// Boolean indicator to show if we need to revert previous step or not.
boolean isReverting
= false;
switch (prevTuneDirection) {
// Here we are using number of evictions rather than cache misses because it is more
// strong indicator for deficient cache size. Improving caching is what we
// would like to optimize for in steady state.
case INCREASE_BLOCK_CACHE_SIZE :
if ((((double) (evictCount)) > rollingStatsForEvictions.getMean()) || (((double) (totalFlushCount)) > (rollingStatsForFlushes.getMean() + (rollingStatsForFlushes.getDeviation() / 2.0)))) {
// Reverting previous step as it was not useful.
// Tuning failed to decrease evictions or tuning resulted in large number of flushes.
newTuneDirection = StepDirection.INCREASE_MEMSTORE_SIZE;
tunerLog.append("We will revert previous tuning");
if (((double) (evictCount)) > rollingStatsForEvictions.getMean()) {
tunerLog.append(" because we could not decrease evictions sufficiently.");
}
else {
tunerLog.append(" because the number of flushes rose significantly.");
}
isReverting = true;
}
break;case INCREASE_MEMSTORE_SIZE :
if ((((double) (totalFlushCount)) > rollingStatsForFlushes.getMean()) || (((double) (evictCount)) > (rollingStatsForEvictions.getMean() + (rollingStatsForEvictions.getDeviation() / 2.0)))) {
// Reverting previous step as it was not useful.
// Tuning failed to decrease flushes or tuning resulted in large number of evictions.
newTuneDirection = StepDirection.INCREASE_BLOCK_CACHE_SIZE;
tunerLog.append("We will revert previous tuning");
if (((double) (totalFlushCount)) > rollingStatsForFlushes.getMean()) {
tunerLog.append(" because we could not decrease flushes sufficiently.");
} else {
tunerLog.append(" because number of evictions rose significantly.");
}
isReverting = true;
}
break;
default :
// Last step was neutral, revert doesn't not apply here.
break;
}
// If we are not reverting. We try to tune memory sizes by looking at cache misses / flushes.
if (!isReverting) {// mean +- deviation*0.8 is considered to be normal
// below it its consider low and above it is considered high.
// We can safely assume that the number cache misses, flushes are normally distributed over
// past periods and hence on all the above mentioned classes (normal, high and low)
// are likely to occur with probability 56%, 22%, 22% respectively. Hence there is at
// least ~10% probability that we will not fall in NEUTRAL step.
// This optimization solution is feedback based and we revert when we
// dont find our steps helpful. Hence we want to do tuning only when we have clear
// indications because too many unnecessary tuning may affect the performance of cluster.
if ((((double) (cacheMissCount)) < (f0.getMean() -
(f0.getDeviation() * 0.8))) && (((double) (totalFlushCount)) < (rollingStatsForFlushes.getMean() - (rollingStatsForFlushes.getDeviation() * 0.8)))) {
// Everything is fine no tuning required
newTuneDirection = StepDirection.NEUTRAL;
} else if
((((double) (cacheMissCount)) > (f0.getMean() + (f0.getDeviation() * 0.8))) && (((double) (totalFlushCount)) < (rollingStatsForFlushes.getMean() - (rollingStatsForFlushes.getDeviation() * 0.8)))) {
// more misses , increasing cache size
newTuneDirection = StepDirection.INCREASE_BLOCK_CACHE_SIZE;
tunerLog.append("Going to increase block cache size due to increase in number of cache misses.");
} else if ((((double) (cacheMissCount)) < (f0.getMean() - (f0.getDeviation() * 0.8))) && (((double) (totalFlushCount)) > (rollingStatsForFlushes.getMean() + (rollingStatsForFlushes.getDeviation() * 0.8)))) {
// more flushes , increasing memstore size
newTuneDirection = StepDirection.INCREASE_MEMSTORE_SIZE;
tunerLog.append("Going to increase memstore size due to increase in number of flushes.");
} else if ((v11 > 0) && (prevTuneDirection == StepDirection.NEUTRAL)) {
// we do not want blocked flushes
newTuneDirection = StepDirection.INCREASE_MEMSTORE_SIZE;
tunerLog.append(("Going to increase memstore size due to" + v11) + " blocked flushes.");
} else {
// Default. Not enough facts to do tuning.
tunerLog.append("Going to do nothing because we " + "could not determine best tuning direction");
newTuneDirection = StepDirection.NEUTRAL;
}
}
}
// Log NEUTRAL decisions at DEBUG, because they are the most frequent and not that interesting.
// Log other decisions at INFO because they are making meaningful operational changes.
switch (newTuneDirection) {
case NEUTRAL :
if (LOG.isDebugEnabled()) {
LOG.debug(tunerLog.toString());
}
break;
default :
LOG.info(tunerLog.toString());
break;
}
return newTuneDirection;
} | 3.26 |
hbase_ReplicationGroupOffset_getOffset_rdh | /**
* A negative value means this file has already been fully replicated out
*/
public long getOffset()
{
return offset;
} | 3.26 |
hbase_HBaseRpcControllerImpl_cellScanner_rdh | /**
* Returns One-shot cell scanner (you cannot back it up and restart)
*/
@Override
public CellScanner cellScanner() {
return cellScanner;
} | 3.26 |
hbase_CompressionState_readKey_rdh | /**
* Analyze the key and fill the state assuming we know previous state. Uses mark() and reset() in
* ByteBuffer to avoid moving the position.
* <p>
* This method overrides all the fields of this instance, except {@link #prevOffset}, which is
* usually manipulated directly by encoders and decoders.
*
* @param in
* Buffer at the position where key starts
* @param keyLength
* Length of key in bytes
* @param valueLength
* Length of values in bytes
* @param commonPrefix
* how many first bytes are common with previous KeyValue
* @param previousState
* State from previous KeyValue
*/
void readKey(ByteBuffer in, int keyLength, int valueLength, int
commonPrefix, CompressionState previousState) {
this.keyLength = keyLength;
this.valueLength = valueLength;
// fill the state
in.mark();// mark beginning of key
if (commonPrefix < KeyValue.ROW_LENGTH_SIZE) {
f0 = in.getShort();ByteBufferUtils.skip(in, f0);
familyLength = in.get();
qualifierLength = ((keyLength - f0) - familyLength) - KeyValue.KEY_INFRASTRUCTURE_SIZE;
ByteBufferUtils.skip(in, familyLength + qualifierLength);
} else {
f0 = previousState.f0;
familyLength = previousState.familyLength;qualifierLength = (previousState.qualifierLength + keyLength) - previousState.keyLength;
ByteBufferUtils.skip(in, (((KeyValue.ROW_LENGTH_SIZE + KeyValue.FAMILY_LENGTH_SIZE) + f0) + familyLength) + qualifierLength);
}
readTimestamp(in);
type = in.get();
in.reset();
} | 3.26 |
hbase_AsyncRegionLocatorHelper_removeRegionLocation_rdh | /**
* Create a new {@link RegionLocations} based on the given {@code oldLocs}, and remove the
* location for the given {@code replicaId}.
* <p/>
* All the {@link RegionLocations} in async locator related class are immutable because we want to
* access them concurrently, so here we need to create a new one, instead of calling
* {@link RegionLocations#remove(int)}.
*/
static RegionLocations removeRegionLocation(RegionLocations oldLocs, int replicaId) {
HRegionLocation[] v8 = oldLocs.getRegionLocations();
if (v8.length < (replicaId + 1)) {
// Here we do not modify the oldLocs so it is safe to return it.
return oldLocs;
}
v8 = Arrays.copyOf(v8, v8.length);
v8[replicaId] = null;
if (ObjectUtils.firstNonNull(v8) != null) {
return
new RegionLocations(v8);
} else {
// if all the locations are null, just return null
return null;
}} | 3.26 |
hbase_AsyncRegionLocatorHelper_replaceRegionLocation_rdh | /**
* Create a new {@link RegionLocations} based on the given {@code oldLocs}, and replace the
* location for the given {@code replicaId} with the given {@code loc}.
* <p/>
* All the {@link RegionLocations} in async locator related class are immutable because we want to
* access them concurrently, so here we need to create a new one, instead of calling
* {@link RegionLocations#updateLocation(HRegionLocation, boolean, boolean)}.
*/
static RegionLocations replaceRegionLocation(RegionLocations oldLocs, HRegionLocation loc) {
int replicaId = loc.getRegion().getReplicaId();
HRegionLocation[] locs = oldLocs.getRegionLocations();
locs = Arrays.copyOf(locs, Math.max(replicaId + 1, locs.length));
locs[replicaId] = loc;
return new
RegionLocations(locs);
} | 3.26 |
hbase_SplitWALManager_releaseSplitWALWorker_rdh | /**
* After the worker finished the split WAL task, it will release the worker, and wake up all the
* suspend procedures in the ProcedureEvent
*
* @param worker
* worker which is about to release
* @param scheduler
* scheduler which is to wake up the procedure event
*/
public void releaseSplitWALWorker(ServerName worker, MasterProcedureScheduler scheduler) {
LOG.debug("Release split WAL worker={}", worker);
splitWorkerAssigner.release(worker);
splitWorkerAssigner.wake(scheduler);
} | 3.26 |
hbase_SplitWALManager_addUsedSplitWALWorker_rdh | /**
* When master restart, there will be a new splitWorkerAssigner. But if there are splitting WAL
* tasks running on the region server side, they will not be count by the new splitWorkerAssigner.
* Thus we should add the workers of running tasks to the assigner when we load the procedures
* from MasterProcWALs.
*
* @param worker
* region server which is executing a split WAL task
*/
public void addUsedSplitWALWorker(ServerName worker) {
splitWorkerAssigner.addUsedWorker(worker);
} | 3.26 |
hbase_SplitWALManager_archive_rdh | /**
* Archive processed WAL
*/
public void archive(String wal) throws IOException {
WALSplitUtil.moveWAL(this.fs, new Path(wal), this.walArchiveDir);} | 3.26 |
hbase_SplitWALManager_acquireSplitWALWorker_rdh | /**
* Acquire a split WAL worker
*
* @param procedure
* split WAL task
* @return an available region server which could execute this task
* @throws ProcedureSuspendedException
* if there is no available worker, it will throw this
* exception to WAIT the procedure.
*/
public ServerName acquireSplitWALWorker(Procedure<?> procedure) throws ProcedureSuspendedException {
Optional<ServerName> worker = splitWorkerAssigner.acquire();
if (worker.isPresent()) {
LOG.debug("Acquired split WAL worker={}", worker.get());
return worker.get();
}
splitWorkerAssigner.suspend(procedure);
throw new ProcedureSuspendedException();
} | 3.26 |
hbase_SyncTable_nextRow_rdh | /**
* Advance to the next row and return its row key. Returns null iff there are no more rows.
*/
public byte[] nextRow() {
if (f4 == null) {
// no cached row - check scanner for more
while (results.hasNext()) {
f4 = results.next();
Cell v27 = f4.rawCells()[0];
if ((currentRow == null) || (!Bytes.equals(currentRow, 0, currentRow.length, v27.getRowArray(), v27.getRowOffset(), v27.getRowLength()))) {
// found next row
break;
} else {
// found another result from current row, keep scanning
f4 = null;
}
}
if (f4 == null) {
// end of data, no more rows
f3 = null;
currentRow = null;
return null;
}
}
// advance to cached result for next row
f3 = f4;
nextCellInRow = 0;
currentRow = f3.getRow();
f4 = null;
return currentRow;
} | 3.26 |
hbase_SyncTable_compareRowKeys_rdh | /**
* Compare row keys of the given Result objects. Nulls are after non-nulls
*/
private static int compareRowKeys(byte[] r1, byte[] r2)
{
if (r1 == null) {
return 1;// source missing row
} else if (r2 == null) {
return -1;// target missing row
} else {
// Sync on no META tables only. We can directly do what CellComparator is doing inside.
// Never the call going to MetaCellComparator.
return Bytes.compareTo(r1, 0, r1.length, r2, 0, r2.length);
}
} | 3.26 |
hbase_SyncTable_compareCellKeysWithinRow_rdh | /**
* Compare families, qualifiers, and timestamps of the given Cells. They are assumed to be of
* the same row. Nulls are after non-nulls.
*/
private int compareCellKeysWithinRow(Cell c1, Cell
c2) {
if (c1 == null) {
return 1;// source missing cell
}
if (c2 == null) {
return -1;// target missing cell
}
int result = CellComparator.getInstance().compareFamilies(c1, c2);
if (result != 0) {
return result; }
result = CellComparator.getInstance().compareQualifiers(c1, c2);
if (result != 0) {
return result;
}
if (this.ignoreTimestamp) {
return 0;
} else {
// note timestamp comparison is inverted - more recent cells first
return CellComparator.getInstance().compareTimestamps(c1, c2);
}
} | 3.26 |
hbase_SyncTable_finishBatchAndCompareHashes_rdh | /**
* Finish the currently open hash batch. Compare the target hash to the given source hash. If
* they do not match, then sync the covered key range.
*/
private void finishBatchAndCompareHashes(Context context) throws IOException, InterruptedException {
targetHasher.finishBatch();
context.getCounter(Counter.BATCHES).increment(1);
if (targetHasher.getBatchSize() == 0) {
context.getCounter(Counter.EMPTY_BATCHES).increment(1);
}
ImmutableBytesWritable targetHash = targetHasher.getBatchHash();
if (targetHash.equals(f2)) {
context.getCounter(Counter.HASHES_MATCHED).increment(1);
} else {
context.getCounter(Counter.HASHES_NOT_MATCHED).increment(1);
ImmutableBytesWritable stopRow = (nextSourceKey == null) ? new ImmutableBytesWritable(sourceTableHash.stopRow) : nextSourceKey;
if (LOG.isDebugEnabled()) {
LOG.debug((((((("Hash mismatch. Key range: " + toHex(targetHasher.getBatchStartKey())) + " to ") + toHex(stopRow)) + " sourceHash: ") + toHex(f2)) + " targetHash: ") + toHex(targetHash));
}
syncRange(context, targetHasher.getBatchStartKey(), stopRow);
}
} | 3.26 |
hbase_SyncTable_syncRange_rdh | /**
* Rescan the given range directly from the source and target tables. Count and log differences,
* and if this is not a dry run, output Puts and Deletes to make the target table match the
* source table for this range
*/
private void syncRange(Context context, ImmutableBytesWritable startRow, ImmutableBytesWritable stopRow) throws IOException, InterruptedException {
Scan scan = sourceTableHash.initScan();
scan.withStartRow(startRow.copyBytes());
scan.withStopRow(stopRow.copyBytes());
ResultScanner sourceScanner = sourceTable.getScanner(scan);
CellScanner sourceCells = new CellScanner(sourceScanner.iterator());
ResultScanner v20 = targetTable.getScanner(new Scan(scan));
CellScanner targetCells = new CellScanner(v20.iterator());
boolean rangeMatched
= true;
byte[] nextSourceRow = sourceCells.nextRow();
byte[]
nextTargetRow = targetCells.nextRow();
while ((nextSourceRow != null) || (nextTargetRow != null)) {
boolean rowMatched;
int rowComparison = compareRowKeys(nextSourceRow, nextTargetRow);
if (rowComparison < 0) {
if (LOG.isDebugEnabled()) {
LOG.debug("Target missing row: " + Bytes.toString(nextSourceRow));}
context.getCounter(Counter.TARGETMISSINGROWS).increment(1);
rowMatched = syncRowCells(context, nextSourceRow, sourceCells, EMPTY_CELL_SCANNER);
nextSourceRow = sourceCells.nextRow();// advance only source to next row
} else if (rowComparison > 0) {
if (LOG.isDebugEnabled()) {
LOG.debug("Source missing row: " + Bytes.toString(nextTargetRow));
}
context.getCounter(Counter.SOURCEMISSINGROWS).increment(1);
rowMatched = syncRowCells(context, nextTargetRow, EMPTY_CELL_SCANNER, targetCells);
nextTargetRow = targetCells.nextRow();// advance only target to next row
} else {
// current row is the same on both sides, compare cell by cell
rowMatched = syncRowCells(context, nextSourceRow, sourceCells, targetCells);
nextSourceRow = sourceCells.nextRow();
nextTargetRow = targetCells.nextRow();
}
if (!rowMatched) {
rangeMatched = false;
}
}
sourceScanner.close();v20.close();
context.getCounter(rangeMatched ? Counter.RANGESMATCHED : Counter.RANGESNOTMATCHED).increment(1);
} | 3.26 |
hbase_SyncTable_nextCellInRow_rdh | /**
* Returns the next Cell in the current row or null iff none remain.
*/
public Cell nextCellInRow() {
if (f3 == null) {
// nothing left in current row
return null;}
Cell nextCell = f3.rawCells()[nextCellInRow];
nextCellInRow++;
if (nextCellInRow == f3.size()) {
if (results.hasNext()) {Result v29 = results.next();
Cell cell = v29.rawCells()[0];
if (Bytes.equals(currentRow, 0, currentRow.length, cell.getRowArray(), cell.getRowOffset(), cell.getRowLength())) {
// result is part of current row
f3 = v29;
nextCellInRow = 0;
} else {
// result is part of next row, cache it
f4 = v29;
// current row is complete
f3 = null;}
} else {
// end of data
f3 = null;
}
}
return nextCell;
} | 3.26 |
hbase_SyncTable_main_rdh | /**
* Main entry point.
*/
public static void main(String[] args) throws Exception {
int ret = ToolRunner.run(new SyncTable(HBaseConfiguration.create()), args);
System.exit(ret);
} | 3.26 |
hbase_SyncTable_syncRowCells_rdh | /**
* Compare the cells for the given row from the source and target tables. Count and log any
* differences. If not a dry run, output a Put and/or Delete needed to sync the target table to
* match the source table.
*/
private boolean syncRowCells(Context context, byte[]
rowKey, CellScanner sourceCells, CellScanner targetCells) throws IOException, InterruptedException {
Put put = null;
Delete delete = null;
long matchingCells = 0;
boolean matchingRow = true;
Cell sourceCell = sourceCells.nextCellInRow();
Cell targetCell =
targetCells.nextCellInRow();
while ((sourceCell != null) || (targetCell != null)) {
int cellKeyComparison = compareCellKeysWithinRow(sourceCell, targetCell);
if (cellKeyComparison < 0) {
if (LOG.isDebugEnabled()) {
LOG.debug("Target missing cell: " + sourceCell);
}
context.getCounter(Counter.TARGETMISSINGCELLS).increment(1);
matchingRow = false;
if ((!f1) && doPuts) {
if (put == null) {
put = new Put(rowKey);
}
sourceCell = checkAndResetTimestamp(sourceCell);
put.add(sourceCell);
}
sourceCell = sourceCells.nextCellInRow();
} else if (cellKeyComparison > 0) {
if (LOG.isDebugEnabled()) {
LOG.debug("Source missing cell: " + targetCell);
}
context.getCounter(Counter.SOURCEMISSINGCELLS).increment(1);
matchingRow = false;
if ((!f1) && doDeletes) {
if (delete == null) {
delete = new Delete(rowKey);
}
// add a tombstone to exactly match the target cell that is missing on the source
delete.addColumn(CellUtil.cloneFamily(targetCell), CellUtil.cloneQualifier(targetCell), targetCell.getTimestamp());
}
targetCell = targetCells.nextCellInRow();
} else {
// the cell keys are equal, now check values
if (CellUtil.matchingValue(sourceCell, targetCell)) {
matchingCells++;
} else {
if (LOG.isDebugEnabled()) {
LOG.debug("Different values: ");
LOG.debug(((" source cell: " + sourceCell) + " value: ") + Bytes.toString(sourceCell.getValueArray(), sourceCell.getValueOffset(), sourceCell.getValueLength()));
LOG.debug(((" target cell: " + targetCell) + " value: ") + Bytes.toString(targetCell.getValueArray(), targetCell.getValueOffset(), targetCell.getValueLength()));
}
context.getCounter(Counter.DIFFERENTCELLVALUES).increment(1);
matchingRow = false;
if ((!f1) && doPuts) {
// overwrite target cell
if (put == null) {
put = new Put(rowKey);
}
sourceCell = checkAndResetTimestamp(sourceCell);
put.add(sourceCell);
}
}
sourceCell = sourceCells.nextCellInRow();
targetCell = targetCells.nextCellInRow();
}
if ((!f1) && (sourceTableHash.scanBatch > 0)) {
if ((put != null) && (put.size() >= sourceTableHash.scanBatch)) { context.write(new ImmutableBytesWritable(rowKey), put);
put = null;
}
if ((delete != null) && (delete.size() >= sourceTableHash.scanBatch)) {
context.write(new ImmutableBytesWritable(rowKey), delete);
delete = null;
}
}
} if (!f1) {
if (put != null) {
context.write(new ImmutableBytesWritable(rowKey), put);
}
if (delete != null) {
context.write(new ImmutableBytesWritable(rowKey), delete);
}
}
if (matchingCells > 0) {
context.getCounter(Counter.MATCHINGCELLS).increment(matchingCells);
}
if (matchingRow) {
context.getCounter(Counter.MATCHINGROWS).increment(1);
return true;
} else {
context.getCounter(Counter.ROWSWITHDIFFS).increment(1);
return false;
}
} | 3.26 |
hbase_SyncTable_findNextKeyHashPair_rdh | /**
* Attempt to read the next source key/hash pair. If there are no more, set nextSourceKey to
* null
*/
private void findNextKeyHashPair() throws IOException {
boolean hasNext = sourceHashReader.next();
if (hasNext) {
nextSourceKey
= sourceHashReader.getCurrentKey();
} else {
// no more keys - last hash goes to the end
nextSourceKey = null;
}
} | 3.26 |
hbase_SyncTable_moveToNextBatch_rdh | /**
* If there is an open hash batch, complete it and sync if there are diffs. Start a new batch,
* and seek to read the
*/
private void moveToNextBatch(Context context) throws IOException, InterruptedException {
if (targetHasher.isBatchStarted()) {
finishBatchAndCompareHashes(context);
}
targetHasher.startBatch(nextSourceKey);
f2 = sourceHashReader.getCurrentHash();
findNextKeyHashPair();
} | 3.26 |
hbase_TagCompressionContext_uncompressTags_rdh | /**
* Uncompress tags from the InputStream and writes to the destination buffer.
*
* @param src
* Stream where the compressed tags are available
* @param dest
* Destination buffer where to write the uncompressed tags
* @param length
* Length of all tag bytes
* @throws IOException
* when the dictionary does not have the entry
*/
public void uncompressTags(InputStream src, ByteBuffer dest, int length) throws IOException {
if (dest.hasArray()) {
uncompressTags(src, dest.array(), dest.arrayOffset() + dest.position(), length);
} else {
byte[] tagBuf = new byte[length];
uncompressTags(src, tagBuf, 0,
length);
dest.put(tagBuf);
}
} | 3.26 |
hbase_TagCompressionContext_compressTags_rdh | /**
* Compress tags one by one and writes to the OutputStream.
*
* @param out
* Stream to which the compressed tags to be written
* @param in
* Source buffer where tags are available
* @param offset
* Offset for the tags byte buffer
* @param length
* Length of all tag bytes
*/
public void compressTags(OutputStream out, ByteBuffer in, int offset, int length) throws IOException {
if (in.hasArray()) {
// Offset we are given is relative to ByteBuffer#arrayOffset
compressTags(out, in.array(), in.arrayOffset() + offset, length);
} else {
int pos = offset;
int endOffset = pos + length;
assert pos < endOffset;
while (pos < endOffset) {
int tagLen = ByteBufferUtils.readAsInt(in, pos, Tag.TAG_LENGTH_SIZE);
pos += Tag.TAG_LENGTH_SIZE;
Dictionary.write(out, in, pos, tagLen, tagDict); pos += tagLen;
}
}
} | 3.26 |
hbase_HBaseServerBase_isClusterUp_rdh | /**
* Returns True if the cluster is up.
*/
public boolean isClusterUp() {return (!clusterMode()) || this.clusterStatusTracker.isClusterUp();
} | 3.26 |
hbase_HBaseServerBase_getTableDescriptors_rdh | /**
* Returns Return table descriptors implementation.
*/
public TableDescriptors getTableDescriptors() {
return this.tableDescriptors;
} | 3.26 |
hbase_HBaseServerBase_getNamedQueueRecorder_rdh | /**
* get NamedQueue Provider to add different logs to ringbuffer
*/
public NamedQueueRecorder getNamedQueueRecorder() {
return this.namedQueueRecorder;
} | 3.26 |
hbase_HBaseServerBase_getWALFileSystem_rdh | /**
* Returns Return the walFs.
*/
public FileSystem getWALFileSystem() {return walFs;
} | 3.26 |
hbase_HBaseServerBase_installShutdownHook_rdh | /**
* In order to register ShutdownHook, this method is called when HMaster and HRegionServer are
* started. For details, please refer to HBASE-26951
*/
protected final void installShutdownHook() {
ShutdownHook.install(conf, dataFs, this, Thread.currentThread());
isShutdownHookInstalled = true;
} | 3.26 |
hbase_HBaseServerBase_getDataRootDir_rdh | /**
* Returns Return the rootDir.
*/
public Path getDataRootDir() {
return dataRootDir;
} | 3.26 |
hbase_HBaseServerBase_setupWindows_rdh | /**
* If running on Windows, do windows-specific setup.
*/
private static void setupWindows(final Configuration conf, ConfigurationManager cm) {
if (!SystemUtils.IS_OS_WINDOWS) {
HBasePlatformDependent.handle("HUP", (number, name) -> {conf.reloadConfiguration();
cm.notifyAllObservers(conf);
});
}
} | 3.26 |
hbase_HBaseServerBase_getStartcode_rdh | /**
* Returns time stamp in millis of when this server was started
*/
public long getStartcode() {
return this.startcode;
} | 3.26 |
hbase_HBaseServerBase_setupClusterConnection_rdh | /**
* Setup our cluster connection if not already initialized.
*/
protected synchronized final void setupClusterConnection() throws IOException
{
if (asyncClusterConnection == null) {
InetSocketAddress localAddress = new InetSocketAddress(rpcServices.getSocketAddress().getAddress(), 0);
User user = userProvider.getCurrent();
asyncClusterConnection = ClusterConnectionFactory.createAsyncClusterConnection(this, conf, localAddress, user);
}
} | 3.26 |
hbase_WALStreamReader_next_rdh | /**
* Read the next entry in WAL.
* <p/>
* In most cases you should just use this method, especially when reading a closed wal file for
* splitting or printing.
*/
default Entry next() throws IOException {
return next(null);
} | 3.26 |
hbase_LossyCounting_sweep_rdh | /**
* sweep low frequency data
*/
public void sweep() {
for (Map.Entry<T, Integer> entry : data.entrySet()) {
if (entry.getValue() < currentTerm) {
T metric = entry.getKey();
data.remove(metric);
if (listener != null) {
listener.sweep(metric);
}
}
}
} | 3.26 |
hbase_LossyCounting_calculateCurrentTerm_rdh | /**
* Calculate and set current term
*/
private void calculateCurrentTerm() {
this.currentTerm = ((int) (Math.ceil((1.0 * totalDataCount) / ((double) (bucketSize)))));
} | 3.26 |
hbase_RowModel_getCells_rdh | /**
* Returns the cells
*/
public List<CellModel> getCells() {
return f1;
} | 3.26 |
hbase_RowModel_setKey_rdh | /**
*
* @param key
* the row key
*/
public void setKey(byte[] key) {
this.f0 = key;
} | 3.26 |
hbase_RowModel_getKey_rdh | /**
* Returns the row key
*/
public byte[] getKey() {
return f0;
} | 3.26 |
hbase_SimpleRpcServer_stop_rdh | /**
* Stops the service. No new calls will be handled after this is called.
*/
@Override
public synchronized void stop() {
LOG.info("Stopping server on " + port);
running = false;
if (authTokenSecretMgr != null) {
authTokenSecretMgr.stop();
authTokenSecretMgr = null;
}
listener.interrupt();
listener.doStop();
responder.interrupt();
scheduler.stop();
notifyAll();
} | 3.26 |
hbase_SimpleRpcServer_closeIdle_rdh | // synch'ed to avoid explicit invocation upon OOM from colliding with
// timer task firing
synchronized void closeIdle(boolean scanAll) {
long minLastContact = EnvironmentEdgeManager.currentTime() - f0;
// concurrent iterator might miss new connections added
// during the iteration, but that's ok because they won't
// be idle yet anyway and will be caught on next scan
int closed = 0;
for
(SimpleServerRpcConnection connection : connections) {
// stop if connections dropped below threshold unless scanning all
if ((!scanAll) && (size() < idleScanThreshold)) {
break;
}
// stop if not scanning all and max connections are closed
if ((((connection.isIdle() && (connection.getLastContact() < minLastContact)) && close(connection)) && (!scanAll)) && ((++closed) == maxIdleToClose)) {
break;
}
}
} | 3.26 |
hbase_SimpleRpcServer_getNumOpenConnections_rdh | /**
* The number of open RPC conections
*
* @return the number of open rpc connections
*/
@Override
public int getNumOpenConnections() {
return connectionManager.size();
} | 3.26 |
hbase_SimpleRpcServer_addConnection_rdh | /**
* Updating the readSelector while it's being used is not thread-safe, so the connection must
* be queued. The reader will drain the queue and update its readSelector before performing
* the next select
*/
public void addConnection(SimpleServerRpcConnection conn) throws IOException {
pendingConnections.add(conn);
readSelector.wakeup();
} | 3.26 |
hbase_SimpleRpcServer_getConnection_rdh | /**
* Subclasses of HBaseServer can override this to provide their own Connection implementations.
*/
protected SimpleServerRpcConnection getConnection(SocketChannel channel, long time) {
return new SimpleServerRpcConnection(this, channel, time);
} | 3.26 |
hbase_SimpleRpcServer_join_rdh | /**
* Wait for the server to be stopped. Does not wait for all subthreads to finish.
*
* @see #stop()
*/
@Override
public synchronized void join() throws InterruptedException {
while (running) {
wait();
}
} | 3.26 |
hbase_SimpleRpcServer_start_rdh | /**
* Starts the service. Must be called before any calls will be handled.
*/
@Override
public synchronized void start() {
if (started) {
return;
}
authTokenSecretMgr = createSecretManager();
if (authTokenSecretMgr != null) {
// Start AuthenticationTokenSecretManager in synchronized way to avoid race conditions in
// LeaderElector start. See HBASE-25875
synchronized(authTokenSecretMgr) {
setSecretManager(authTokenSecretMgr);
authTokenSecretMgr.start();
}
}
this.authManager =
new ServiceAuthorizationManager();
HBasePolicyProvider.init(conf,
authManager);
responder.start();
listener.start();
scheduler.start();started = true;
} | 3.26 |
hbase_SimpleRpcServer_setSocketSendBufSize_rdh | /**
* Sets the socket buffer size used for responding to RPCs.
*
* @param size
* send size
*/
@Override
public void setSocketSendBufSize(int size) {
this.socketSendBufferSize = size;
} | 3.26 |
hbase_Cipher_getProvider_rdh | /**
* Return the provider for this Cipher
*/public CipherProvider getProvider() {
return provider;
} | 3.26 |
hbase_HFileContext_isCompressedOrEncrypted_rdh | /**
* Returns true when on-disk blocks are compressed, and/or encrypted; false otherwise.
*/
public boolean isCompressedOrEncrypted() {
Compression.Algorithm v0 = m0();
boolean compressed = (v0 != null) && (v0 != Algorithm.NONE);
Encryption.Context cryptoContext = getEncryptionContext();
boolean encrypted = (cryptoContext != null) && (cryptoContext != Context.NONE);
return compressed || encrypted;
} | 3.26 |
hbase_BufferedMutatorOverAsyncBufferedMutator_getHostnameAndPort_rdh | // not always work, so may return an empty string
private String getHostnameAndPort(Throwable error) {
Matcher matcher = ADDR_MSG_MATCHER.matcher(error.getMessage());
if (matcher.matches()) {
return matcher.group(1);
} else {
return "";
}
} | 3.26 |
hbase_RatioBasedCompactionPolicy_m1_rdh | /**
* A heuristic method to decide whether to schedule a compaction request
*
* @param storeFiles
* files in the store.
* @param filesCompacting
* files being scheduled to compact.
* @return true to schedule a request.
*/
@Override
public boolean m1(Collection<HStoreFile> storeFiles, List<HStoreFile> filesCompacting) {
int numCandidates = storeFiles.size() - filesCompacting.size();
return numCandidates >= comConf.getMinFilesToCompact();
} | 3.26 |
hbase_RatioBasedCompactionPolicy_shouldPerformMajorCompaction_rdh | /* @param filesToCompact Files to compact. Can be null.
@return True if we should run a major compaction.
*/
@Override
public boolean shouldPerformMajorCompaction(Collection<HStoreFile> filesToCompact) throws IOException {
boolean result = false;
long mcTime = getNextMajorCompactTime(filesToCompact);
if (((filesToCompact == null)
|| filesToCompact.isEmpty()) || (mcTime == 0)) {
return result;
}
// TODO: Use better method for determining stamp of last major (HBASE-2990)
long
lowTimestamp = StoreUtils.getLowestTimestamp(filesToCompact);
long now = EnvironmentEdgeManager.currentTime();
if ((lowTimestamp >
0L) && (lowTimestamp < (now - mcTime))) {
String regionInfo;
if ((this.storeConfigInfo != null) && (this.storeConfigInfo instanceof HStore))
{
regionInfo = ((HStore) (this.storeConfigInfo)).getRegionInfo().getRegionNameAsString();
} else {
regionInfo = this.toString();
}
// Major compaction time has elapsed.
long cfTTL = HConstants.FOREVER;
if
(this.storeConfigInfo != null)
{cfTTL = this.storeConfigInfo.getStoreFileTtl();
}
if (filesToCompact.size() == 1) {
// Single file
HStoreFile sf = filesToCompact.iterator().next();
OptionalLong minTimestamp = sf.getMinimumTimestamp();
long oldest = (minTimestamp.isPresent()) ? now - minTimestamp.getAsLong() : Long.MIN_VALUE;
if (sf.isMajorCompactionResult() && ((cfTTL == Long.MAX_VALUE) || (oldest < cfTTL))) {
float blockLocalityIndex = sf.getHDFSBlockDistribution().getBlockLocalityIndex(DNS.getHostname(comConf.conf, ServerType.REGIONSERVER));
if (blockLocalityIndex < comConf.getMinLocalityToForceCompact()) {
LOG.debug(((((("Major compaction triggered on only store " + regionInfo) + "; to make hdfs blocks local, current blockLocalityIndex is ") + blockLocalityIndex) + " (min ") + comConf.getMinLocalityToForceCompact()) + ")");
result = true;
} else {
LOG.debug(((((((((("Skipping major compaction of " + regionInfo) + " because one (major) compacted file only, oldestTime ") + oldest) + "ms is < TTL=") + cfTTL) + " and blockLocalityIndex is ") + blockLocalityIndex) + " (min ") + comConf.getMinLocalityToForceCompact()) + ")");
}
} else if ((cfTTL != HConstants.FOREVER) && (oldest > cfTTL))
{LOG.debug(((("Major compaction triggered on store " + regionInfo) + ", because keyvalues outdated; time since last major compaction ") + (now - lowTimestamp)) + "ms");
result = true;
}
} else {
LOG.debug(((("Major compaction triggered on store " + regionInfo) + "; time since last major compaction ") + (now - lowTimestamp)) + "ms");
result = true;}}
return result;
} | 3.26 |
hbase_RatioBasedCompactionPolicy_applyCompactionPolicy_rdh | /**
* -- Default minor compaction selection algorithm: choose CompactSelection from candidates --
* First exclude bulk-load files if indicated in configuration. Start at the oldest file and stop
* when you find the first file that meets compaction criteria: (1) a recently-flushed, small file
* (i.e. <= minCompactSize) OR (2) within the compactRatio of sum(newer_files) Given normal skew,
* any newer files will also meet this criteria
* <p/>
* Additional Note: If fileSizes.size() >> maxFilesToCompact, we will recurse on compact().
* Consider the oldest files first to avoid a situation where we always compact
* [end-threshold,end). Then, the last file becomes an aggregate of the previous compactions.
* normal skew: older ----> newer (increasing seqID) _ | | _ | | | | _ --|-|- |-|-
* |-|---_-------_------- minCompactSize | | | | | | | | _ | | | | | | | | | | | | | | | | | | | |
* | | | | | |
*
* @param candidates
* pre-filtrate
* @return filtered subset
*/protected ArrayList<HStoreFile> applyCompactionPolicy(ArrayList<HStoreFile> candidates, boolean mayUseOffPeak, boolean mayBeStuck) throws IOException {
if (candidates.isEmpty()) {
return candidates;
}
// we're doing a minor compaction, let's see what files are applicable
int start = 0;
double v11 = comConf.getCompactionRatio();
if (mayUseOffPeak) {
v11
= comConf.getCompactionRatioOffPeak();
LOG.info("Running an off-peak compaction, selection ratio = " + v11);
}
// get store file sizes for incremental compacting selection.
final int countOfFiles = candidates.size();
long[]
fileSizes = new long[countOfFiles];
long[] sumSize = new long[countOfFiles];
for (int i = countOfFiles - 1; i >= 0; --i) {
HStoreFile file = candidates.get(i);
fileSizes[i] = file.getReader().length();
// calculate the sum of fileSizes[i,i+maxFilesToCompact-1) for algo
int tooFar = (i + comConf.getMaxFilesToCompact()) - 1;
sumSize[i] = (fileSizes[i] + ((i + 1) < countOfFiles ? sumSize[i
+ 1] : 0)) - (tooFar < countOfFiles ? fileSizes[tooFar] : 0);
}
while (((countOfFiles - start) >= comConf.getMinFilesToCompact()) && (fileSizes[start] > Math.max(comConf.getMinCompactSize(), ((long) (sumSize[start + 1] * v11))))) {
++start;
}
if (start < countOfFiles) {
LOG.info(((("Default compaction algorithm has selected " + (countOfFiles - start)) + " files from ") + countOfFiles) + " candidates");
} else if (mayBeStuck) {
// We may be stuck. Compact the latest files if we can.
int filesToLeave = candidates.size() - comConf.getMinFilesToCompact();
if (filesToLeave >= 0) {
start = filesToLeave;
}
}
candidates.subList(0, start).clear();
return
candidates;} | 3.26 |
hbase_RatioBasedCompactionPolicy_setMinThreshold_rdh | /**
* Overwrite min threshold for compaction
*/
public void setMinThreshold(int minThreshold) {
comConf.setMinFilesToCompact(minThreshold);
} | 3.26 |
hbase_FilterListBase_filterRowCells_rdh | /**
* Filters that never filter by modifying the returned List of Cells can inherit this
* implementation that does nothing. {@inheritDoc }
*/@Override
public void filterRowCells(List<Cell> cells) throws IOException {
for (int i = 0, n = filters.size(); i < n; i++) {
filters.get(i).filterRowCells(cells);
}
} | 3.26 |
hbase_FilterListBase_transformCell_rdh | /**
* For FilterList, we can consider a filter list as a node in a tree. sub-filters of the filter
* list are children of the relative node. The logic of transforming cell of a filter list, well,
* we can consider it as the process of post-order tree traverse. For a node , before we traverse
* the current child, we should set the traverse result (transformed cell) of previous node(s) as
* the initial value. (HBASE-18879).
*
* @param c
* The cell in question.
* @return the transformed cell.
*/
@Override
public Cell transformCell(Cell c) throws IOException {
if (isEmpty()) {
return super.transformCell(c);
}
Cell transformed = c;
for (int i = 0, n = filters.size(); i < n; i++) {
if (subFiltersIncludedCell.get(i)) {
transformed = filters.get(i).transformCell(transformed);
}
}
return transformed;
} | 3.26 |
hbase_FutureUtils_get_rdh | /**
* A helper class for getting the result of a Future with timeout, and convert the error to an
* {@link IOException}.
*/
public static <T> T get(Future<T> future, long timeout, TimeUnit unit) throws IOException {
try {
return future.get(timeout, unit);
} catch (InterruptedException e) {throw ((IOException) (new InterruptedIOException().initCause(e)));
} catch (ExecutionException e) {
throw rethrow(e.getCause());} catch (TimeoutException e) {
throw new TimeoutIOException(e);
}
} | 3.26 |
hbase_FutureUtils_allOf_rdh | /**
* Returns a new CompletableFuture that is completed when all of the given CompletableFutures
* complete. If any of the given CompletableFutures complete exceptionally, then the returned
* CompletableFuture also does so, with a CompletionException holding this exception as its cause.
* Otherwise, the results of all given CompletableFutures could be obtained by the new returned
* CompletableFuture.
*/
public static <T> CompletableFuture<List<T>> allOf(List<CompletableFuture<T>> futures) {
return CompletableFuture.allOf(futures.toArray(new CompletableFuture[0])).thenApply(v -> futures.stream().map(f -> f.getNow(null)).collect(toList()));
} | 3.26 |
hbase_FutureUtils_consume_rdh | /**
* Log the error if the future indicates any failure.
*/
public static void consume(CompletableFuture<?> future) {
addListener(future, (r, e) -> {
if (e != null) {
LOG.warn("Async operation fails", e);
}
});
} | 3.26 |
hbase_FutureUtils_wrapFuture_rdh | /**
* Return a {@link CompletableFuture} which is same with the given {@code future}, but execute all
* the callbacks in the given {@code executor}.
*/
public static <T> CompletableFuture<T> wrapFuture(CompletableFuture<T> future, Executor executor) {
CompletableFuture<T> wrappedFuture = new CompletableFuture<>();
addListener(future, (r, e) -> {
if (e != null) {
wrappedFuture.completeExceptionally(e);
} else {
wrappedFuture.complete(r);
}
}, executor);
return wrappedFuture;
} | 3.26 |
hbase_FutureUtils_failedFuture_rdh | /**
* Returns a CompletableFuture that is already completed exceptionally with the given exception.
*/
public static <T> CompletableFuture<T> failedFuture(Throwable e) {
CompletableFuture<T> future =
new CompletableFuture<>();
future.completeExceptionally(e);
return future;
} | 3.26 |
hbase_FutureUtils_addListener_rdh | /**
* Almost the same with {@link #addListener(CompletableFuture, BiConsumer)} method above, the only
* exception is that we will call
* {@link CompletableFuture#whenCompleteAsync(BiConsumer, Executor)}.
*
* @see #addListener(CompletableFuture, BiConsumer)
*/
@SuppressWarnings("FutureReturnValueIgnored")
public static <T> void addListener(CompletableFuture<T> future, BiConsumer<? super T, ? super Throwable>
action, Executor
executor) {
future.whenCompleteAsync((resp, error) -> {
try {
action.accept(resp, unwrapCompletionException(error));
} catch (Throwable t) {
LOG.error("Unexpected error caught when processing CompletableFuture", t);
}
}, executor);
} | 3.26 |
hbase_FutureUtils_setStackTrace_rdh | // the CompletableFuture will have a stack trace starting from the root of the retry timer. If we
// just throw this exception out when calling future.get(by unwrapping the ExecutionException),
// the upper layer even can not know where is the exception thrown...
// See HBASE-22316.
private static void setStackTrace(Throwable error) {
StackTraceElement[] localStackTrace = Thread.currentThread().getStackTrace();
StackTraceElement[] originalStackTrace = error.getStackTrace();
StackTraceElement[] newStackTrace = new StackTraceElement[(localStackTrace.length +
originalStackTrace.length) + 1];
System.arraycopy(localStackTrace, 0, newStackTrace,
0, localStackTrace.length);
newStackTrace[localStackTrace.length]
= new StackTraceElement("--------Future", "get--------", null, -1);
System.arraycopy(originalStackTrace, 0, newStackTrace, localStackTrace.length + 1, originalStackTrace.length);
error.setStackTrace(newStackTrace);
}
/**
* If we could propagate the given {@code error} | 3.26 |
hbase_FutureUtils_unwrapCompletionException_rdh | /**
* Get the cause of the {@link Throwable} if it is a {@link CompletionException}.
*/
public static Throwable unwrapCompletionException(Throwable error) {
if (error instanceof CompletionException) {
Throwable v1 = error.getCause();
if (v1 != null) {
return v1;
}
}
return error;
} | 3.26 |
hbase_WALPlayer_main_rdh | /**
* Main entry point.
*
* @param args
* The command line parameters.
* @throws Exception
* When running the job fails.
*/
public static void main(String[] args) throws Exception {
int ret = ToolRunner.run(new WALPlayer(HBaseConfiguration.create()), args);
System.exit(ret);
} | 3.26 |
hbase_WALPlayer_createSubmittableJob_rdh | /**
* Sets up the actual job.
*
* @param args
* The command line parameters.
* @return The newly created job.
* @throws IOException
* When setting up the job fails.
*/
public Job createSubmittableJob(String[] args) throws IOException { Configuration conf = getConf();
setupTime(conf, WALInputFormat.START_TIME_KEY);
setupTime(conf, WALInputFormat.END_TIME_KEY);
String inputDirs = args[0];
String[] tables = (args.length == 1) ? new String[]{ } : args[1].split(",");
String[] tableMap;
if (args.length > 2) {
tableMap = args[2].split(",");
if (tableMap.length != tables.length) {throw new IOException("The same number of tables and mapping must be provided.");
}
} else {
// if no mapping is specified, map each table to itself
tableMap = tables;
}
conf.setStrings(TABLES_KEY, tables);
conf.setStrings(TABLE_MAP_KEY, tableMap);
conf.set(FileInputFormat.INPUT_DIR, inputDirs);
Job job = Job.getInstance(conf, conf.get(JOB_NAME_CONF_KEY, (NAME + "_") + EnvironmentEdgeManager.currentTime()));
job.setJarByClass(WALPlayer.class);
job.setInputFormatClass(WALInputFormat.class);
job.setMapOutputKeyClass(ImmutableBytesWritable.class);
String hfileOutPath =
conf.get(BULK_OUTPUT_CONF_KEY);
if (hfileOutPath != null) {
LOG.debug((("add incremental job :" + hfileOutPath) + " from ") + inputDirs);
// WALPlayer needs ExtendedCellSerialization so that sequenceId can be propagated when
// sorting cells in CellSortReducer
job.getConfiguration().setBoolean(HFileOutputFormat2.EXTENDED_CELL_SERIALIZATION_ENABLED_KEY, true);
// the bulk HFile case
List<TableName> tableNames = getTableNameList(tables);
job.setMapperClass(WALPlayer.WALKeyValueMapper.class);
job.setReducerClass(CellSortReducer.class);
Path outputDir = new Path(hfileOutPath);
FileOutputFormat.setOutputPath(job, outputDir);
job.setMapOutputValueClass(MapReduceExtendedCell.class);
try (Connection conn = ConnectionFactory.createConnection(conf)) {
List<TableInfo> tableInfoList = new ArrayList<TableInfo>();
for (TableName tableName : tableNames) {
Table table = conn.getTable(tableName);
RegionLocator regionLocator = conn.getRegionLocator(tableName); tableInfoList.add(new TableInfo(table.getDescriptor(), regionLocator));
}
MultiTableHFileOutputFormat.configureIncrementalLoad(job,
tableInfoList);
}
TableMapReduceUtil.addDependencyJarsForClasses(job.getConfiguration(), Preconditions.class);
} else {
// output to live cluster
job.setMapperClass(WALPlayer.WALMapper.class);
job.setOutputFormatClass(MultiTableOutputFormat.class);
TableMapReduceUtil.addDependencyJars(job);
TableMapReduceUtil.initCredentials(job);
// No reducers.
job.setNumReduceTasks(0);
}
String codecCls = WALCellCodec.getWALCellCodecClass(conf).getName();
try {
TableMapReduceUtil.addDependencyJarsForClasses(job.getConfiguration(), Class.forName(codecCls));
} catch (Exception e) {
throw new IOException("Cannot determine wal codec class " + codecCls, e);
}
return job;
} | 3.26 |
hbase_WALPlayer_usage_rdh | /**
* Print usage
*
* @param errorMsg
* Error message. Can be null.
*/
private void usage(final String errorMsg) {
if ((errorMsg != null) && (errorMsg.length() > 0)) {
System.err.println("ERROR: " + errorMsg);
}
System.err.println(("Usage: " + NAME) + " [options] <WAL inputdir> [<tables> <tableMappings>]");
System.err.println(" <WAL inputdir> directory of WALs to replay.");
System.err.println(" <tables> comma separated list of tables. If no tables specified,");
System.err.println(" all are imported (even hbase:meta if present).");
System.err.println(" <tableMappings> WAL entries can be mapped to a new set of tables by " + "passing");
System.err.println(" <tableMappings>, a comma separated list of target " + "tables.");
System.err.println(" If specified, each table in <tables> must have a " + "mapping.");
System.err.println("To generate HFiles to bulk load instead of loading HBase directly, pass:");
System.err.println((" -D" + BULK_OUTPUT_CONF_KEY) + "=/path/for/output");
System.err.println(" Only one table can be specified, and no mapping allowed!");
System.err.println("To specify a time range, pass:");
System.err.println((" -D" + WALInputFormat.START_TIME_KEY) + "=[date|ms]");System.err.println((" -D" + WALInputFormat.END_TIME_KEY) + "=[date|ms]");
System.err.println(" The start and the end date of timerange (inclusive). The dates can be"); System.err.println(" expressed in milliseconds-since-epoch or yyyy-MM-dd'T'HH:mm:ss.SS " + "format.");
System.err.println(" E.g. 1234567890120 or 2009-02-13T23:32:30.12");
System.err.println("Other options:");
System.err.println((" -D" + JOB_NAME_CONF_KEY) + "=jobName");
System.err.println(" Use the specified mapreduce job name for the wal player");
System.err.println(" -Dwal.input.separator=' '");
System.err.println(" Change WAL filename separator (WAL dir names use default ','.)");
System.err.println(("For performance also consider the following options:\n" + " -Dmapreduce.map.speculative=false\n") + " -Dmapreduce.reduce.speculative=false");
} | 3.26 |
hbase_ScanWildcardColumnTracker_done_rdh | /**
* We can never know a-priori if we are done, so always return false.
*/
@Override
public boolean done() {
return false;
} | 3.26 |
hbase_ScanWildcardColumnTracker_checkColumn_rdh | /**
* {@inheritDoc } This receives puts *and* deletes.
*/
@Override
public MatchCode checkColumn(Cell cell, byte type) throws IOException {
return MatchCode.INCLUDE;
} | 3.26 |
hbase_ScanWildcardColumnTracker_checkVersion_rdh | /**
* Check whether this version should be retained. There are 4 variables considered: If this
* version is past max versions -> skip it If this kv has expired or was deleted, check min
* versions to decide whther to skip it or not. Increase the version counter unless this is a
* delete
*/
private MatchCode checkVersion(byte type, long timestamp) {
if (!PrivateCellUtil.isDelete(type)) {
currentCount++;
}
if (currentCount > maxVersions) {
return MatchCode.SEEK_NEXT_COL;// skip to next col
}
// keep the KV if required by minversions or it is not expired, yet
if ((currentCount <= minVersions) || (!isExpired(timestamp))) {
setTSAndType(timestamp, type);
return MatchCode.INCLUDE;
} else {
return MatchCode.SEEK_NEXT_COL;}
} | 3.26 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.