name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hbase_RegionStateStore_getMergeRegions_rdh | /**
* Returns Return all regioninfos listed in the 'info:merge*' columns of the given {@code region}.
*/
public List<RegionInfo> getMergeRegions(RegionInfo region) throws IOException {
return CatalogFamilyFormat.getMergeRegions(getRegionCatalogResult(region).rawCells());
} | 3.26 |
hbase_RegionStateStore_deleteRegions_rdh | /**
* Deletes the specified regions.
*/
public void deleteRegions(final List<RegionInfo> regions)
throws IOException {
deleteRegions(regions, EnvironmentEdgeManager.currentTime());
} | 3.26 |
hbase_RegionStateStore_m0_rdh | /**
* Performs an atomic multi-mutate operation against the given table. Used by the likes of merge
* and split as these want to make atomic mutations across multiple rows.
*/
private void m0(RegionInfo ri, List<Mutation> mutations) throws IOException {
debugLogMutations(mutations);
byte[] row = Bytes.toBytes(RegionReplicaUtil.getRegionInfoForDefaultReplica(ri).getRegionNameAsString() + HConstants.DELIMITER);
MutateRowsRequest.Builder builder = MutateRowsRequest.newBuilder();
for (Mutation mutation : mutations) {
if (mutation instanceof Put) {
builder.addMutationRequest(ProtobufUtil.toMutation(MutationType.PUT, mutation));
} else if (mutation instanceof Delete)
{
builder.addMutationRequest(ProtobufUtil.toMutation(MutationType.DELETE, mutation));
} else {
throw new DoNotRetryIOException("multi in MetaEditor doesn't support " + mutation.getClass().getName());
}
}
MutateRowsRequest request = builder.build();
AsyncTable<?> table = master.getConnection().toAsyncConnection().getTable(TableName.META_TABLE_NAME);
CompletableFuture<MutateRowsResponse> future = table.<MultiRowMutationService,
MutateRowsResponse>coprocessorService(MultiRowMutationService::newStub, (stub, controller, done) -> stub.mutateRows(controller, request, done), row);
FutureUtils.get(future);
} | 3.26 |
hbase_RegionStateStore_splitRegion_rdh | // ============================================================================================
// Update Region Splitting State helpers
// ============================================================================================
/**
* Splits the region into two in an atomic operation. Offlines the parent region with the
* information that it is split into two, and also adds the daughter regions. Does not add the
* location information to the daughter regions since they are not open yet.
*/
public void splitRegion(RegionInfo parent, RegionInfo splitA, RegionInfo splitB, ServerName serverName, TableDescriptor htd) throws IOException {
long parentOpenSeqNum = HConstants.NO_SEQNUM;
if (htd.hasGlobalReplicationScope()) {
parentOpenSeqNum = getOpenSeqNumForParentRegion(parent);
}
long time = EnvironmentEdgeManager.currentTime();
// Put for parent
Put putParent = MetaTableAccessor.makePutFromRegionInfo(RegionInfoBuilder.newBuilder(parent).setOffline(true).setSplit(true).build(), time);MetaTableAccessor.addDaughtersToPut(putParent, splitA, splitB);
// Puts for daughters
Put putA = MetaTableAccessor.makePutFromRegionInfo(splitA, time);
Put putB = MetaTableAccessor.makePutFromRegionInfo(splitB, time);
if (parentOpenSeqNum > 0)
{
ReplicationBarrierFamilyFormat.addReplicationBarrier(putParent, parentOpenSeqNum);
ReplicationBarrierFamilyFormat.addReplicationParent(putA, Collections.singletonList(parent));
ReplicationBarrierFamilyFormat.addReplicationParent(putB, Collections.singletonList(parent));
}
// Set initial state to CLOSED
// NOTE: If initial state is not set to CLOSED then daughter regions get added with the
// default OFFLINE state. If Master gets restarted after this step, start up sequence of
// master tries to assign these offline regions. This is followed by re-assignments of the
// daughter regions from resumed {@link SplitTableRegionProcedure}
MetaTableAccessor.addRegionStateToPut(putA, RegionInfo.DEFAULT_REPLICA_ID, State.CLOSED);
MetaTableAccessor.addRegionStateToPut(putB, RegionInfo.DEFAULT_REPLICA_ID, State.CLOSED);
// new regions, openSeqNum = 1 is fine.
addSequenceNum(putA, 1, splitA.getReplicaId());
addSequenceNum(putB, 1, splitB.getReplicaId());
// Add empty locations for region replicas of daughters so that number of replicas can be
// cached whenever the primary region is looked up from meta
int regionReplication = getRegionReplication(htd);
for (int i = 1; i < regionReplication; i++) {
MetaTableAccessor.addEmptyLocation(putA, i);MetaTableAccessor.addEmptyLocation(putB, i);
}
m0(parent, Arrays.asList(putParent, putA, putB));
} | 3.26 |
hbase_RegionStateStore_deleteMergeQualifiers_rdh | /**
* Deletes merge qualifiers for the specified merge region.
*
* @param connection
* connection we're using
* @param mergeRegion
* the merged region
*/
public void deleteMergeQualifiers(RegionInfo mergeRegion) throws IOException {
// NOTE: We are doing a new hbase:meta read here.
Cell[] cells = getRegionCatalogResult(mergeRegion).rawCells();
if ((cells == null) || (cells.length == 0)) {
return;
}
Delete delete = new Delete(mergeRegion.getRegionName());
List<byte[]> qualifiers = new ArrayList<>();
for (Cell cell : cells) {
if (!CatalogFamilyFormat.isMergeQualifierPrefix(cell)) {
continue;}
byte[] qualifier = CellUtil.cloneQualifier(cell);
qualifiers.add(qualifier);
delete.addColumns(HConstants.CATALOG_FAMILY, qualifier, HConstants.LATEST_TIMESTAMP);}
// There will be race condition that a GCMultipleMergedRegionsProcedure is scheduled while
// the previous GCMultipleMergedRegionsProcedure is still going on, in this case, the second
// GCMultipleMergedRegionsProcedure could delete the merged region by accident!
if (qualifiers.isEmpty()) {LOG.info(("No merged qualifiers for region " + mergeRegion.getRegionNameAsString()) + " in meta table, they are cleaned up already, Skip.");
return;
}
try (Table table = master.getConnection().getTable(TableName.META_TABLE_NAME)) {
table.delete(delete);
}
LOG.info((("Deleted merge references in " + mergeRegion.getRegionNameAsString()) + ", deleted qualifiers ") + qualifiers.stream().map(Bytes::toStringBinary).collect(Collectors.joining(", ")));
} | 3.26 |
hbase_RegionStateStore_visitMetaForRegion_rdh | /**
* Queries META table for the passed region encoded name, delegating action upon results to the
* {@code RegionStateVisitor} passed as second parameter.
*
* @param regionEncodedName
* encoded name for the Region we want to query META for.
* @param visitor
* The {@code RegionStateVisitor} instance to react over the query
* results.
* @throws IOException
* If some error occurs while querying META or parsing results.
*/
public void visitMetaForRegion(final String regionEncodedName, final RegionStateVisitor visitor) throws IOException
{
Result result = MetaTableAccessor.scanByRegionEncodedName(master.getConnection(), regionEncodedName);
if (result != null) { visitMetaEntry(visitor, result);
}
} | 3.26 |
hbase_RegionStateStore_overwriteRegions_rdh | /**
* Overwrites the specified regions from hbase:meta. Deletes old rows for the given regions and
* adds new ones. Regions added back have state CLOSED.
*
* @param connection
* connection we're using
* @param regionInfos
* list of regions to be added to META
*/
public void overwriteRegions(List<RegionInfo> regionInfos, int regionReplication) throws IOException {
// use master time for delete marker and the Put
long now = EnvironmentEdgeManager.currentTime();
deleteRegions(regionInfos, now);
// Why sleep? This is the easiest way to ensure that the previous deletes does not
// eclipse the following puts, that might happen in the same ts from the server.
// See HBASE-9906, and HBASE-9879. Once either HBASE-9879, HBASE-8770 is fixed,
// or HBASE-9905 is fixed and meta uses seqIds, we do not need the sleep.
//
// HBASE-13875 uses master timestamp for the mutations. The 20ms sleep is not needed
MetaTableAccessor.addRegionsToMeta(master.getConnection(), regionInfos, regionReplication, now + 1);
LOG.info(("Overwritten " + regionInfos.size()) + " regions to Meta");
LOG.debug("Overwritten regions: {} ", regionInfos);
} | 3.26 |
hbase_RegionStateStore_getRegionState_rdh | // ==========================================================================
// Region State
// ==========================================================================
/**
* Pull the region state from a catalog table {@link Result}.
*
* @return the region state, or null if unknown.
*/
public static State getRegionState(final Result r, RegionInfo regionInfo) {
Cell cell = r.getColumnLatestCell(HConstants.CATALOG_FAMILY, getStateColumn(regionInfo.getReplicaId()));
if ((cell == null) || (cell.getValueLength()
== 0)) {return null;
}
String state = Bytes.toString(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength());
try {
return State.valueOf(state);
} catch (IllegalArgumentException e) {
LOG.warn("BAD value {} in hbase:meta info:state column for region {} , " + "Consider using HBCK2 setRegionState ENCODED_REGION_NAME STATE", state, regionInfo.getEncodedName());
return null;
}
} | 3.26 |
hbase_MasterRegionFlusherAndCompactor_setupConf_rdh | // inject our flush related configurations
static void setupConf(Configuration conf,
long flushSize, long flushPerChanges, long flushIntervalMs) {
conf.setLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, flushSize);
conf.setLong(HRegion.MEMSTORE_FLUSH_PER_CHANGES, flushPerChanges);
conf.setLong(HRegion.MEMSTORE_PERIODIC_FLUSH_INTERVAL, flushIntervalMs);
LOG.info("Injected flushSize={}, flushPerChanges={}, flushIntervalMs={}", flushSize, flushPerChanges, flushIntervalMs);
} | 3.26 |
hbase_AdaptiveLifoCoDelCallQueue_updateTunables_rdh | /**
* Update tunables.
*
* @param newCodelTargetDelay
* new CoDel target delay
* @param newCodelInterval
* new CoDel interval
* @param newLifoThreshold
* new Adaptive Lifo threshold
*/
public void updateTunables(int newCodelTargetDelay, int newCodelInterval, double newLifoThreshold) {
this.codelTargetDelay = newCodelTargetDelay;
this.codelInterval = newCodelInterval;
this.lifoThreshold = newLifoThreshold;
} | 3.26 |
hbase_AdaptiveLifoCoDelCallQueue_offer_rdh | // Generic BlockingQueue methods we support
@Override
public boolean offer(CallRunner callRunner) {
return queue.offer(callRunner);
} | 3.26 |
hbase_AdaptiveLifoCoDelCallQueue_poll_rdh | // This class does NOT provide generic purpose BlockingQueue implementation,
// so to prevent misuse all other methods throw UnsupportedOperationException.
@Override
public CallRunner poll(long timeout, TimeUnit unit) throws InterruptedException {
throw new UnsupportedOperationException("This class doesn't support anything," + " but take() and offer() methods");
} | 3.26 |
hbase_AdaptiveLifoCoDelCallQueue_m0_rdh | /**
* Behaves as {@link LinkedBlockingQueue#take()}, except it will silently skip all calls which it
* thinks should be dropped.
*
* @return the head of this queue
* @throws InterruptedException
* if interrupted while waiting
*/
@Override
public CallRunner m0() throws InterruptedException {
CallRunner cr;while (true) {
if ((((double) (queue.size())) / this.maxCapacity) > lifoThreshold) {
numLifoModeSwitches.increment();
cr = queue.takeLast();
} else {
cr = queue.takeFirst();
}
if (needToDrop(cr)) {
numGeneralCallsDropped.increment();
cr.drop();
} else {
return cr;
}
}
} | 3.26 |
hbase_RegionReplicaGroupingCostFunction_costPerGroup_rdh | /**
* For each primary region, it computes the total number of replicas in the array (numReplicas)
* and returns a sum of numReplicas-1 squared. For example, if the server hosts regions a, b, c,
* d, e, f where a and b are same replicas, and c,d,e are same replicas, it returns (2-1) * (2-1)
* + (3-1) * (3-1) + (1-1) * (1-1).
*
* @param colocatedReplicaCounts
* a sorted array of primary regions ids for the regions hosted
* @return a sum of numReplicas-1 squared for each primary region in the group.
*/
protected final long costPerGroup(Int2IntCounterMap colocatedReplicaCounts) {
final AtomicLong
cost = new AtomicLong(0);
// colocatedReplicaCounts is a sorted array of primary ids of regions. Replicas of regions
// sharing the same primary will have consecutive numbers in the array.
colocatedReplicaCounts.forEach((primary, count) -> {
if (count > 1) {
// means consecutive primaries, indicating co-location
cost.getAndAdd((count - 1) * (count - 1));
}
});
return cost.longValue();
} | 3.26 |
hbase_HBaseCommonTestingUtility_deleteOnExit_rdh | /**
* Returns True if we should delete testing dirs on exit.
*/
boolean deleteOnExit() {
String v = System.getProperty("hbase.testing.preserve.testdir");// Let default be true, to delete on exit.
return v == null ? true : !Boolean.parseBoolean(v);
} | 3.26 |
hbase_HBaseCommonTestingUtility_waitFor_rdh | /**
* Wrapper method for {@link Waiter#waitFor(Configuration, long, long, boolean, Predicate)}.
*/
public <E extends Exception> long waitFor(long timeout, long interval, boolean failIfTimeout, Predicate<E> predicate) throws E {
return Waiter.waitFor(this.conf, timeout, interval, failIfTimeout, predicate);
} | 3.26 |
hbase_HBaseCommonTestingUtility_randomFreePort_rdh | /**
* Returns a random free port and marks that port as taken. Not thread-safe. Expected to be
* called from single-threaded test setup code/
*/
public int randomFreePort() {int port = 0;
do {
port = randomPort(); if (takenRandomPorts.contains(port)) {
port = 0;
continue;
}
takenRandomPorts.add(port);
if (!portChecker.available(port)) {
port = 0;
}
} while (port == 0 );
return port;
} | 3.26 |
hbase_HBaseCommonTestingUtility_getConfiguration_rdh | /**
* Returns this classes's instance of {@link Configuration}.
*
* @return Instance of Configuration.
*/
public Configuration getConfiguration() {
return this.conf;
} | 3.26 |
hbase_HBaseCommonTestingUtility_deleteDir_rdh | /**
*
* @param dir
* Directory to delete
* @return True if we deleted it.
*/
boolean deleteDir(final File dir) {
if ((dir == null) || (!dir.exists())) {
return true;
}
int ntries = 0;
do {
ntries += 1;
try {
if (deleteOnExit()) {
FileUtils.deleteDirectory(dir);
}return true;
} catch (IOException ex)
{
LOG.warn("Failed to delete " + dir.getAbsolutePath());
} catch (IllegalArgumentException ex) {
LOG.warn("Failed to delete " + dir.getAbsolutePath(), ex);
}} while (ntries < 30 );
return false;
} | 3.26 |
hbase_HBaseCommonTestingUtility_cleanupTestDir_rdh | /**
*
* @param subdir
* Test subdir name.
* @return True if we removed the test dir
*/
public boolean cleanupTestDir(final String subdir) {
if (this.dataTestDir == null) {
return false;
}
return deleteDir(new File(this.dataTestDir, subdir));
}
/**
*
* @return Where to write test data on local filesystem; usually
{@link #DEFAULT_BASE_TEST_DIRECTORY} | 3.26 |
hbase_HBaseCommonTestingUtility_randomPort_rdh | /**
* Returns a random port. These ports cannot be registered with IANA and are intended for
* dynamic allocation (see http://bit.ly/dynports).
*/
private int randomPort() {
return MIN_RANDOM_PORT + ThreadLocalRandom.current().nextInt(MAX_RANDOM_PORT - MIN_RANDOM_PORT);
} | 3.26 |
hbase_HBaseCommonTestingUtility_getRandomDir_rdh | /**
*
* @return A dir with a random (uuid) name under the test dir
* @see #getBaseTestDir()
*/
public Path getRandomDir() {
return new Path(getBaseTestDir(), getRandomUUID().toString());
} | 3.26 |
hbase_HBaseCommonTestingUtility_setupDataTestDir_rdh | /**
* Sets up a directory for a test to use.
*
* @return New directory path, if created.
*/
protected Path setupDataTestDir() {
if (this.dataTestDir != null) {
LOG.warn("Data test dir already setup in " + dataTestDir.getAbsolutePath());
return null;
}
Path testPath = getRandomDir();
this.dataTestDir = new File(testPath.toString()).getAbsoluteFile();
// Set this property so if mapreduce jobs run, they will use this as their home dir.
System.setProperty("test.build.dir", this.dataTestDir.toString());
if (deleteOnExit()) {
this.dataTestDir.deleteOnExit();
}
createSubDir("hbase.local.dir", testPath, "hbase-local-dir");
return testPath;
} | 3.26 |
hbase_EntryBuffers_appendEntry_rdh | /**
* Append a log entry into the corresponding region buffer. Blocks if the total heap usage has
* crossed the specified threshold.
*/
void appendEntry(WAL.Entry entry) throws InterruptedException, IOException {
WALKey key = entry.getKey();
RegionEntryBuffer buffer;
long incrHeap;
synchronized(this) {
buffer = buffers.get(key.getEncodedRegionName());
if (buffer == null) {
buffer = new RegionEntryBuffer(key.getTableName(), key.getEncodedRegionName());
buffers.put(key.getEncodedRegionName(), buffer);}
incrHeap = buffer.appendEntry(entry);
}
// If we crossed the chunk threshold, wait for more space to be available
synchronized(controller.dataAvailable) {
totalBuffered += incrHeap;
while ((totalBuffered > maxHeapUsage) && (controller.thrown.get() == null)) {
LOG.debug("Used {} bytes of buffered edits, waiting for IO threads", totalBuffered);
controller.dataAvailable.wait(2000);
}
controller.dataAvailable.notifyAll();
}
controller.checkForErrors();
} | 3.26 |
hbase_EntryBuffers_getChunkToWrite_rdh | /**
* Returns RegionEntryBuffer a buffer of edits to be written.
*/
synchronized RegionEntryBuffer getChunkToWrite() {
long biggestSize = 0;
byte[] v4 = null;
for (Map.Entry<byte[], RegionEntryBuffer> entry : buffers.entrySet()) {
long v6 = entry.getValue().heapSize();
if ((v6 > biggestSize) && (!currentlyWriting.contains(entry.getKey()))) {
biggestSize = v6;
v4 = entry.getKey();
}
}
if (v4 == null) {
return null;
}
RegionEntryBuffer buffer = buffers.remove(v4);
currentlyWriting.add(v4);
return buffer;
} | 3.26 |
hbase_MetricsRegionAggregateSourceImpl_getMetrics_rdh | /**
* Yes this is a get function that doesn't return anything. Thanks Hadoop for breaking all
* expectations of java programmers. Instead of returning anything Hadoop metrics expects
* getMetrics to push the metrics into the collector.
*
* @param collector
* the collector
* @param all
* get all the metrics regardless of when they last changed.
*/
@Override
public void getMetrics(MetricsCollector collector, boolean all) {
MetricsRecordBuilder mrb = collector.addRecord(metricsName);
if (regionSources != null) {
for (MetricsRegionSource regionMetricSource : regionSources) {
if (regionMetricSource instanceof MetricsRegionSourceImpl) {
((MetricsRegionSourceImpl) (regionMetricSource)).snapshot(mrb, all);
}
}
metricsRegistry.snapshot(mrb, all);
}
} | 3.26 |
hbase_ByteBufferInputStream_skip_rdh | /**
* Skips <code>n</code> bytes of input from this input stream. Fewer bytes might be skipped if the
* end of the input stream is reached. The actual number <code>k</code> of bytes to be skipped is
* equal to the smaller of <code>n</code> and remaining bytes in the stream.
*
* @param n
* the number of bytes to be skipped.
* @return the actual number of bytes skipped.
*/
@Override
public long skip(long n) {
long k = Math.min(n, available());
if (k < 0) {
k = 0;
}
this.buf.position(((int) (this.buf.position() + k)));
return k;
} | 3.26 |
hbase_ByteBufferInputStream_read_rdh | /**
* Reads the next byte of data from this input stream. The value byte is returned as an
* <code>int</code> in the range <code>0</code> to <code>255</code>. If no byte is available
* because the end of the stream has been reached, the value <code>-1</code> is returned.
*
* @return the next byte of data, or <code>-1</code> if the end of the stream has been reached.
*/
@Override
public int read() {
if (this.buf.hasRemaining()) {
return this.buf.get() & 0xff;
}
return -1;
} | 3.26 |
hbase_SnapshotReferenceUtil_m0_rdh | /**
* Iterate over the snapshot store files, restored.edits and logs
*
* @param conf
* The current {@link Configuration} instance.
* @param fs
* {@link FileSystem}
* @param snapshotDir
* {@link Path} to the Snapshot directory
* @param desc
* the {@link SnapshotDescription} of the snapshot to verify
* @param visitor
* callback object to get the referenced files
* @throws IOException
* if an error occurred while scanning the directory
*/
public static void m0(final Configuration conf, final FileSystem fs, final Path snapshotDir, final SnapshotDescription desc, final SnapshotVisitor visitor) throws IOException {
visitTableStoreFiles(conf, fs, snapshotDir, desc, visitor);
} | 3.26 |
hbase_SnapshotReferenceUtil_getHFileNames_rdh | /**
* Returns the store file names in the snapshot.
*
* @param conf
* The current {@link Configuration} instance.
* @param fs
* {@link FileSystem}
* @param snapshotDir
* {@link Path} to the Snapshot directory
* @param snapshotDesc
* the {@link SnapshotDescription} of the snapshot to inspect
* @throws IOException
* if an error occurred while scanning the directory
* @return the names of hfiles in the specified snaphot
*/
private static Set<String> getHFileNames(final Configuration conf, final FileSystem fs, final Path snapshotDir, final SnapshotDescription snapshotDesc) throws IOException {
final Set<String> names = new HashSet<>();
visitTableStoreFiles(conf, fs, snapshotDir, snapshotDesc, new StoreFileVisitor() {
@Override
public void storeFile(final RegionInfo regionInfo, final String family, final SnapshotRegionManifest.StoreFile storeFile) throws IOException {
String hfile = storeFile.getName();
if (HFileLink.isHFileLink(hfile)) {
names.add(HFileLink.getReferencedHFileName(hfile));
} else if (StoreFileInfo.isReference(hfile)) {
Path refPath = StoreFileInfo.getReferredToFile(new Path(new Path(new Path(new Path(regionInfo.getTable().getNamespaceAsString(), regionInfo.getTable().getQualifierAsString()), regionInfo.getEncodedName()), family), hfile));
names.add(hfile);
names.add(refPath.getName());if (HFileLink.isHFileLink(refPath.getName())) {
names.add(HFileLink.getReferencedHFileName(refPath.getName()));}} else {
names.add(hfile);
}
}
});
return names;
} | 3.26 |
hbase_SnapshotReferenceUtil_verifyStoreFile_rdh | /**
* Verify the validity of the snapshot store file
*
* @param conf
* The current {@link Configuration} instance.
* @param fs
* {@link FileSystem}
* @param snapshotDir
* {@link Path} to the Snapshot directory of the snapshot to verify
* @param snapshot
* the {@link SnapshotDescription} of the snapshot to verify
* @param regionInfo
* {@link RegionInfo} of the region that contains the store file
* @param family
* family that contains the store file
* @param storeFile
* the store file to verify
* @throws CorruptedSnapshotException
* if the snapshot is corrupted
* @throws IOException
* if an error occurred while scanning the directory
*/
public static void verifyStoreFile(final Configuration conf, final FileSystem fs, final Path snapshotDir, final SnapshotDescription snapshot, final RegionInfo regionInfo, final String family, final SnapshotRegionManifest.StoreFile storeFile) throws IOException {
TableName table = TableName.valueOf(snapshot.getTable());
String fileName = storeFile.getName();
Path refPath = null;
if (StoreFileInfo.isReference(fileName)) {
// If is a reference file check if the parent file is present in the snapshot
refPath =
new Path(new Path(regionInfo.getEncodedName(), family), fileName);
refPath = StoreFileInfo.getReferredToFile(refPath);
String refRegion = refPath.getParent().getParent().getName();
refPath = HFileLink.createPath(table, refRegion, family,
refPath.getName());
if (!HFileLink.buildFromHFileLinkPattern(conf, refPath).exists(fs)) {
throw new CorruptedSnapshotException((("Missing parent hfile for: " + fileName) + " path=") + refPath, ProtobufUtil.createSnapshotDesc(snapshot));
}
if
(storeFile.hasReference()) {
// We don't really need to look for the file on-disk
// we already have the Reference information embedded here.
return;
}
}
Path linkPath;
if ((refPath != null) && HFileLink.isHFileLink(refPath)) {
linkPath = new Path(family, refPath.getName());
} else if (HFileLink.isHFileLink(fileName)) {
linkPath = new Path(family, fileName);
} else {
linkPath = new Path(family, HFileLink.createHFileLinkName(table, regionInfo.getEncodedName(), fileName));
}
// check if the linked file exists (in the archive, or in the table dir)
HFileLink link = null;
if (MobUtils.isMobRegionInfo(regionInfo)) {
// for mob region
link
= HFileLink.buildFromHFileLinkPattern(MobUtils.getQualifiedMobRootDir(conf), HFileArchiveUtil.getArchivePath(conf), linkPath);
} else {
// not mob region
link = HFileLink.buildFromHFileLinkPattern(conf, linkPath);
}
try {
FileStatus fstat = link.getFileStatus(fs);
if (storeFile.hasFileSize() && (storeFile.getFileSize() != fstat.getLen())) {
String msg = ((((("hfile: " + fileName) + " size does not match with the expected one. ") + " found=") + fstat.getLen()) + " expected=") + storeFile.getFileSize();
LOG.error(msg);
throw new CorruptedSnapshotException(msg, ProtobufUtil.createSnapshotDesc(snapshot));}
} catch (FileNotFoundException e) {
String msg = ((((("Can't find hfile: " + fileName) + " in the real (") + link.getOriginPath()) + ") or archive (") + link.getArchivePath()) + ") directory for the primary table.";
LOG.error(msg);
throw new CorruptedSnapshotException(msg, ProtobufUtil.createSnapshotDesc(snapshot));
}
} | 3.26 |
hbase_SnapshotReferenceUtil_m1_rdh | /**
* Returns the store file names in the snapshot.
*
* @param conf
* The current {@link Configuration} instance.
* @param fs
* {@link FileSystem}
* @param snapshotDir
* {@link Path} to the Snapshot directory
* @throws IOException
* if an error occurred while scanning the directory
* @return the names of hfiles in the specified snaphot
*/
public static Set<String> m1(final Configuration conf, final FileSystem fs, final Path snapshotDir) throws IOException {
SnapshotDescription desc = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir);
return getHFileNames(conf, fs, snapshotDir, desc);
} | 3.26 |
hbase_SnapshotReferenceUtil_visitTableStoreFiles_rdh | /**
* © Iterate over the snapshot store files
*
* @param conf
* The current {@link Configuration} instance.
* @param fs
* {@link FileSystem}
* @param snapshotDir
* {@link Path} to the Snapshot directory
* @param desc
* the {@link SnapshotDescription} of the snapshot to verify
* @param visitor
* callback object to get the store files
* @throws IOException
* if an error occurred while scanning the directory
*/
static void visitTableStoreFiles(final Configuration conf, final FileSystem fs, final Path snapshotDir, final SnapshotDescription desc, final StoreFileVisitor visitor) throws IOException {
SnapshotManifest manifest = SnapshotManifest.open(conf, fs, snapshotDir, desc);
List<SnapshotRegionManifest> regionManifests = manifest.getRegionManifests();
if ((regionManifests == null) || regionManifests.isEmpty()) {
LOG.debug("No manifest files present: " + snapshotDir);
return;
}
for (SnapshotRegionManifest regionManifest : regionManifests) {
visitRegionStoreFiles(regionManifest, visitor);
}
} | 3.26 |
hbase_SnapshotReferenceUtil_verifySnapshot_rdh | /**
* Verify the validity of the snapshot
*
* @param conf
* The current {@link Configuration} instance.
* @param fs
* {@link FileSystem}
* @param manifest
* snapshot manifest to inspect
* @throws CorruptedSnapshotException
* if the snapshot is corrupted
* @throws IOException
* if an error occurred while scanning the directory
*/
public static void verifySnapshot(final Configuration conf, final FileSystem fs, final SnapshotManifest manifest) throws IOException {
final SnapshotDescription snapshotDesc = manifest.getSnapshotDescription();
final Path snapshotDir = manifest.getSnapshotDir();
concurrentVisitReferencedFiles(conf, fs, manifest, "VerifySnapshot", new StoreFileVisitor() {
@Override
public void storeFile(final RegionInfo regionInfo, final String family, final SnapshotRegionManifest.StoreFile storeFile) throws IOException {
verifyStoreFile(conf, fs, snapshotDir, snapshotDesc, regionInfo, family, storeFile);
}
});
} | 3.26 |
hbase_SnapshotReferenceUtil_visitRegionStoreFiles_rdh | /**
* Iterate over the snapshot store files in the specified region
*
* @param manifest
* snapshot manifest to inspect
* @param visitor
* callback object to get the store files
* @throws IOException
* if an error occurred while scanning the directory
*/
public static void visitRegionStoreFiles(final SnapshotRegionManifest manifest, final StoreFileVisitor visitor) throws IOException {
RegionInfo regionInfo = ProtobufUtil.toRegionInfo(manifest.getRegionInfo());
for (SnapshotRegionManifest.FamilyFiles familyFiles : manifest.getFamilyFilesList()) {String familyName = familyFiles.getFamilyName().toStringUtf8();
for (SnapshotRegionManifest.StoreFile storeFile : familyFiles.getStoreFilesList()) {
visitor.storeFile(regionInfo, familyName, storeFile);
}
}
} | 3.26 |
hbase_SnapshotReferenceUtil_visitReferencedFiles_rdh | /**
* Iterate over the snapshot store files
*
* @param conf
* The current {@link Configuration} instance.
* @param fs
* {@link FileSystem}
* @param snapshotDir
* {@link Path} to the Snapshot directory
* @param visitor
* callback object to get the referenced files
* @throws IOException
* if an error occurred while scanning the directory
*/
public static void visitReferencedFiles(final Configuration conf, final FileSystem fs, final Path snapshotDir, final SnapshotVisitor visitor) throws IOException {
SnapshotDescription desc = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir);
m0(conf, fs, snapshotDir, desc, visitor);
} | 3.26 |
hbase_HBaseSaslRpcServer_unwrap_rdh | /**
* Unwrap InvalidToken exception, otherwise return the one passed in.
*/
public static Throwable unwrap(Throwable e) {
Throwable cause
= e;
while (cause != null) {
if (cause instanceof InvalidToken) {
return cause;
}
cause = cause.getCause();
}
return e;
} | 3.26 |
hbase_HFileArchiveTableMonitor_shouldArchiveTable_rdh | /**
* Determine if the given table should or should not allow its hfiles to be deleted in the archive
*
* @param tableName
* name of the table to check
* @return <tt>true</tt> if its store files should be retained, <tt>false</tt> otherwise
*/
public synchronized boolean shouldArchiveTable(String tableName) {
return archivedTables.contains(tableName);
} | 3.26 |
hbase_HFileArchiveTableMonitor_addTable_rdh | /**
* Add the named table to be those being archived. Attempts to register the table
*
* @param table
* name of the table to be registered
*/
public synchronized void addTable(String table) {
if (this.shouldArchiveTable(table)) {
LOG.debug(("Already archiving table: " + table) + ", ignoring it");
return;
}
archivedTables.add(table);} | 3.26 |
hbase_HFileArchiveTableMonitor_setArchiveTables_rdh | /**
* Set the tables to be archived. Internally adds each table and attempts to register it.
* <p>
* <b>Note: All previous tables will be removed in favor of these tables.</b>
*
* @param tables
* add each of the tables to be archived.
*/
public synchronized void setArchiveTables(List<String> tables) {
archivedTables.clear();
archivedTables.addAll(tables);
} | 3.26 |
hbase_NettyAsyncFSWALConfigHelper_m0_rdh | /**
* Set the EventLoopGroup and channel class for {@code AsyncFSWALProvider}.
*/
public static void m0(Configuration conf, EventLoopGroup group, Class<? extends Channel> channelClass) {
Preconditions.checkNotNull(group, "group is null");
Preconditions.checkNotNull(channelClass,
"channel class is null");
conf.set(EVENT_LOOP_CONFIG, CONFIG_NAME);
EVENT_LOOP_CONFIG_MAP.put(CONFIG_NAME, Pair.<EventLoopGroup, Class<? extends Channel>>newPair(group, channelClass));
} | 3.26 |
hbase_ExportUtils_usage_rdh | /**
* Common usage for other export tools.
*
* @param errorMsg
* Error message. Can be null.
*/
public static void usage(final String errorMsg) {
if ((errorMsg != null) && (errorMsg.length() > 0)) {
System.err.println("ERROR: " + errorMsg);
}
System.err.println("Usage: Export [-D <property=value>]* <tablename> <outputdir> [<versions> " + "[<starttime> [<endtime>]] [^[regex pattern] or [Prefix] to filter]]\n");
System.err.println(" Note: -D properties will be applied to the conf used. ");
System.err.println(" For example: ");
System.err.println((" -D " + FileOutputFormat.COMPRESS) + "=true");
System.err.println((" -D " + FileOutputFormat.COMPRESS_CODEC) + "=org.apache.hadoop.io.compress.GzipCodec");
System.err.println((" -D " + FileOutputFormat.COMPRESS_TYPE) + "=BLOCK");System.err.println(" Additionally, the following SCAN properties can be specified");
System.err.println(" to control/limit what is exported..");
System.err.println((" -D " + TableInputFormat.SCAN_COLUMN_FAMILY) + "=<family1>,<family2>, ...");
System.err.println((" -D " + RAW_SCAN) + "=true");
System.err.println((" -D " + TableInputFormat.SCAN_ROW_START) + "=<ROWSTART>");
System.err.println((" -D " + TableInputFormat.SCAN_ROW_STOP) + "=<ROWSTOP>");
System.err.println((" -D " + HConstants.HBASE_CLIENT_SCANNER_CACHING) + "=100");
System.err.println((" -D " + EXPORT_VISIBILITY_LABELS) + "=<labels>");
System.err.println(((((("For tables with very wide rows consider setting the batch size as below:\n" + " -D ") + EXPORT_BATCHING) + "=10\n") + " -D ") + EXPORT_CACHING) + "=100");
} | 3.26 |
hbase_TextSortReducer_doSetup_rdh | /**
* Handles common parameter initialization that a subclass might want to leverage.
*/
protected void doSetup(Context context, Configuration conf) {
// If a custom separator has been used,
// decode it back from Base64 encoding.
separator = conf.get(ImportTsv.SEPARATOR_CONF_KEY);
if (separator == null) {
separator = ImportTsv.DEFAULT_SEPARATOR;
} else {
separator = Bytes.toString(Base64.getDecoder().decode(separator));
}
// Should never get 0 as we are setting this to a valid value in job configuration.
ts = conf.getLong(ImportTsv.TIMESTAMP_CONF_KEY, 0);
skipBadLines = context.getConfiguration().getBoolean(ImportTsv.SKIP_LINES_CONF_KEY, true);badLineCount = context.getCounter("ImportTsv", "Bad Lines");
} | 3.26 |
hbase_TextSortReducer_setup_rdh | /**
* Handles initializing this class with objects specific to it (i.e., the parser). Common
* initialization that might be leveraged by a subsclass is done in <code>doSetup</code>. Hence a
* subclass may choose to override this method and call <code>doSetup</code> as well before
* handling it's own custom params.
*/
@Override
protected void setup(Context context) {
Configuration conf
= context.getConfiguration();
doSetup(context, conf);
parser = new ImportTsv.TsvParser(conf.get(ImportTsv.COLUMNS_CONF_KEY), separator);
if (parser.getRowKeyColumnIndex() == (-1)) {
throw new RuntimeException("No row key column specified");
}
this.kvCreator = new CellCreator(conf);
} | 3.26 |
hbase_BalancerClusterState_registerRegion_rdh | /**
* Helper for Cluster constructor to handle a region
*/
private void registerRegion(RegionInfo region, int regionIndex, int serverIndex, Map<String, Deque<BalancerRegionLoad>> loads, RegionHDFSBlockLocationFinder regionFinder) {
String tableName = region.getTable().getNameAsString();
if (!tablesToIndex.containsKey(tableName)) {
tables.add(tableName);
tablesToIndex.put(tableName, tablesToIndex.size());
}
int tableIndex = tablesToIndex.get(tableName);
regionsToIndex.put(region, regionIndex);
regions[regionIndex] = region;
regionIndexToServerIndex[regionIndex] = serverIndex;
initialRegionIndexToServerIndex[regionIndex] = serverIndex;
regionIndexToTableIndex[regionIndex]
= tableIndex;
// region load
if (loads != null) {
Deque<BalancerRegionLoad> rl = loads.get(region.getRegionNameAsString());
// That could have failed if the RegionLoad is using the other regionName
if (rl == null) {
// Try getting the region load using encoded name.
rl = loads.get(region.getEncodedName());
}
regionLoads[regionIndex] = rl;
}
if (regionFinder !=
null) {
// region location
List<ServerName> loc = regionFinder.getTopBlockLocations(region);
regionLocations[regionIndex] = new int[loc.size()];
for (int v48 = 0; v48 < loc.size(); v48++) {
regionLocations[regionIndex][v48] = (loc.get(v48) == null) ? -1 : serversToIndex.get(loc.get(v48).getAddress()) == null ? -1 : serversToIndex.get(loc.get(v48).getAddress());
}
}
} | 3.26 |
hbase_BalancerClusterState_getOrComputeLocality_rdh | /**
* Looks up locality from cache of localities. Will create cache if it does not already exist.
*/
public float getOrComputeLocality(int region, int entity, BalancerClusterState.LocalityType type) {
switch (type) {case SERVER :
return getLocalityOfRegion(region, entity);
case RACK :
return getOrComputeRackLocalities()[region][entity];
default :
throw new IllegalArgumentException("Unsupported LocalityType: " +
type);
}
} | 3.26 |
hbase_BalancerClusterState_getOrComputeRackLocalities_rdh | /**
* Retrieves and lazily initializes a field storing the locality of every region/server
* combination
*/
public float[][] getOrComputeRackLocalities() {
if ((rackLocalities == null) || (regionsToMostLocalEntities == null)) {
computeCachedLocalities();
}
return rackLocalities;
} | 3.26 |
hbase_BalancerClusterState_getOrComputeWeightedRegionCacheRatio_rdh | /**
* Returns the weighted cache ratio of a region on the given region server
*/
public float getOrComputeWeightedRegionCacheRatio(int region, int server) {
return getTotalRegionHFileSizeMB(region) * getOrComputeRegionCacheRatio(region, server);
} | 3.26 |
hbase_BalancerClusterState_getOrComputeWeightedLocality_rdh | /**
* Returns locality weighted by region size in MB. Will create locality cache if it does not
* already exist.
*/
public double getOrComputeWeightedLocality(int region, int server, BalancerClusterState.LocalityType type) {
return getRegionSizeMB(region) * getOrComputeLocality(region, server, type);
} | 3.26 |
hbase_BalancerClusterState_computeRegionServerRegionCacheRatio_rdh | /**
* Populate the maps containing information about how much a region is cached on a region server.
*/
private void computeRegionServerRegionCacheRatio() {
regionIndexServerIndexRegionCachedRatio = new HashMap<>();
f1 = new int[numRegions];
for (int region = 0; region < numRegions; region++) {
float bestRegionCacheRatio = 0.0F;
int serverWithBestRegionCacheRatio = 0;
for (int server = 0; server < numServers; server++) {
float regionCacheRatio = getRegionCacheRatioOnRegionServer(region, server);
if ((regionCacheRatio > 0.0F) || (server == regionIndexToServerIndex[region])) {
// A region with cache ratio 0 on a server means nothing. Hence, just make a note of
// cache ratio only if the cache ratio is greater than 0.
Pair<Integer, Integer> regionServerPair = new Pair<>(region, server);
regionIndexServerIndexRegionCachedRatio.put(regionServerPair, regionCacheRatio);}
if (regionCacheRatio > bestRegionCacheRatio) {
serverWithBestRegionCacheRatio =
server;
// If the server currently hosting the region has equal cache ratio to a historical
// server, consider the current server to keep hosting the region
bestRegionCacheRatio = regionCacheRatio;
} else if ((regionCacheRatio == bestRegionCacheRatio) && (server == regionIndexToServerIndex[region])) {// If two servers have same region cache ratio, then the server currently hosting the
// region
// should retain the region
serverWithBestRegionCacheRatio = server;
}
}
f1[region] = serverWithBestRegionCacheRatio;
Pair<Integer, Integer> regionServerPair = new Pair<>(region, regionIndexToServerIndex[region]);
float tempRegionCacheRatio = regionIndexServerIndexRegionCachedRatio.get(regionServerPair);
if (tempRegionCacheRatio > bestRegionCacheRatio) {
LOG.warn("INVALID CONDITION: region {} on server {} cache ratio {} is greater than the " + "best region cache ratio {} on server {}", regions[region].getEncodedName(), servers[regionIndexToServerIndex[region]], tempRegionCacheRatio, bestRegionCacheRatio, servers[serverWithBestRegionCacheRatio]);
}
}
} | 3.26 |
hbase_BalancerClusterState_wouldLowerAvailability_rdh | /**
* Return true if the placement of region on server would lower the availability of the region in
* question
*
* @return true or false
*/
boolean wouldLowerAvailability(RegionInfo regionInfo, ServerName serverName) {
if (!serversToIndex.containsKey(serverName.getAddress())) {
return false;// safeguard against race between cluster.servers and servers from LB method
// args
}
int server = serversToIndex.get(serverName.getAddress());
int v83 = regionsToIndex.get(regionInfo);
// Region replicas for same region should better assign to different servers
for (int i : regionsPerServer[server]) {
RegionInfo otherRegionInfo = regions[i];
if (RegionReplicaUtil.isReplicasForSameRegion(regionInfo, otherRegionInfo)) {
return true;
}
}
int primary = regionIndexToPrimaryIndex[v83];
if (primary == (-1)) {
return
false;
}
// there is a subset relation for server < host < rack
// check server first
int result = checkLocationForPrimary(server, colocatedReplicaCountsPerServer, primary);
if (result != 0) {
return result > 0;
}
// check host
if (multiServersPerHost) {
result = checkLocationForPrimary(serverIndexToHostIndex[server], colocatedReplicaCountsPerHost, primary);
if (result != 0) {
return result > 0;
}
}
// check rack
if (numRacks > 1) {
result = checkLocationForPrimary(serverIndexToRackIndex[server], colocatedReplicaCountsPerRack, primary);
if (result != 0) {
return result > 0;
}
}
return false;
} | 3.26 |
hbase_BalancerClusterState_computeCachedLocalities_rdh | /**
* Computes and caches the locality for each region/rack combinations, as well as storing a
* mapping of region -> server and region -> rack such that server and rack have the highest
* locality for region
*/
private void computeCachedLocalities() {
rackLocalities = new float[numRegions][numRacks];
regionsToMostLocalEntities = new int[LocalityType.values().length][numRegions];
// Compute localities and find most local server per region
for (int region = 0; region < numRegions; region++) {
int serverWithBestLocality = 0;
float bestLocalityForRegion = 0;
for (int server = 0; server < numServers; server++) {
// Aggregate per-rack locality
float locality = getLocalityOfRegion(region, server);int rack = serverIndexToRackIndex[server];
int numServersInRack = serversPerRack[rack].length;
rackLocalities[region][rack] += locality / numServersInRack;
if (locality > bestLocalityForRegion) {
serverWithBestLocality = server;
bestLocalityForRegion = locality;
}
}
regionsToMostLocalEntities[LocalityType.SERVER.ordinal()][region] = serverWithBestLocality;
// Find most local rack per region
int rackWithBestLocality = 0;
float v60 = 0.0F;
for (int rack = 0; rack < numRacks; rack++) {
float rackLocality = rackLocalities[region][rack];
if (rackLocality > v60)
{
v60 = rackLocality;
rackWithBestLocality = rack;
}
}
regionsToMostLocalEntities[LocalityType.RACK.ordinal()][region] = rackWithBestLocality;
}
} | 3.26 |
hbase_BalancerClusterState_getRegionCacheRatioOnRegionServer_rdh | /**
* Returns the amount by which a region is cached on a given region server. If the region is not
* currently hosted on the given region server, then find out if it was previously hosted there
* and return the old cache ratio.
*/
protected float getRegionCacheRatioOnRegionServer(int region, int regionServerIndex) {
float regionCacheRatio = 0.0F;
// Get the current region cache ratio if the region is hosted on the server regionServerIndex
for (int
regionIndex : regionsPerServer[regionServerIndex]) {
if (region != regionIndex) {
continue;
}
Deque<BalancerRegionLoad> regionLoadList
= regionLoads[regionIndex];
// The region is currently hosted on this region server. Get the region cache ratio for this
// region on this server
regionCacheRatio = (regionLoadList == null) ? 0.0F : regionLoadList.getLast().getCurrentRegionCacheRatio();
return regionCacheRatio;
}
// Region is not currently hosted on this server. Check if the region was cached on this
// server earlier. This can happen when the server was shutdown and the cache was persisted.
// Search using the region name and server name and not the index id and server id as these ids
// may change when a server is marked as dead or a new server is added.
String regionEncodedName = regions[region].getEncodedName();
ServerName serverName = servers[regionServerIndex];
if ((regionCacheRatioOnOldServerMap != null) && regionCacheRatioOnOldServerMap.containsKey(regionEncodedName)) {
Pair<ServerName, Float> cacheRatioOfRegionOnServer = regionCacheRatioOnOldServerMap.get(regionEncodedName);
if (ServerName.isSameAddress(cacheRatioOfRegionOnServer.getFirst(), serverName)) {regionCacheRatio = cacheRatioOfRegionOnServer.getSecond();
if (LOG.isDebugEnabled()) {
LOG.debug("Old cache ratio found for region {} on server {}: {}", regionEncodedName, serverName, regionCacheRatio);
}
}
}
return regionCacheRatio;
} | 3.26 |
hbase_BalancerClusterState_getRegionSizeMB_rdh | /**
* Returns the size in MB from the most recent RegionLoad for region
*/
public int getRegionSizeMB(int region) {
Deque<BalancerRegionLoad> load = regionLoads[region];
// This means regions have no actual data on disk
if (load == null) {
return 0;
}
return regionLoads[region].getLast().getStorefileSizeMB();
} | 3.26 |
hbase_BalancerClusterState_getRackForRegion_rdh | /**
* Maps region index to rack index
*/
public int getRackForRegion(int region) {
return serverIndexToRackIndex[regionIndexToServerIndex[region]];
} | 3.26 |
hbase_BalancerClusterState_getOrComputeRegionsToMostLocalEntities_rdh | /**
* Lazily initializes and retrieves a mapping of region -> server for which region has the highest
* the locality
*/
public int[] getOrComputeRegionsToMostLocalEntities(BalancerClusterState.LocalityType type) {
if ((rackLocalities == null) || (regionsToMostLocalEntities
== null)) {
computeCachedLocalities();
}
return regionsToMostLocalEntities[type.ordinal()];
} | 3.26 |
hbase_BalancerClusterState_updateForLocation_rdh | /**
* Common method for per host and per Location region index updates when a region is moved.
*
* @param serverIndexToLocation
* serverIndexToHostIndex or serverIndexToLocationIndex
* @param regionsPerLocation
* regionsPerHost or regionsPerLocation
* @param colocatedReplicaCountsPerLocation
* colocatedReplicaCountsPerHost or
* colocatedReplicaCountsPerRack
*/
private void updateForLocation(int[] serverIndexToLocation, int[][] regionsPerLocation, Int2IntCounterMap[] colocatedReplicaCountsPerLocation, int oldServer, int newServer, int primary, int region) {
int oldLocation = (oldServer >= 0) ? serverIndexToLocation[oldServer] : -1;int newLocation = serverIndexToLocation[newServer];
if (newLocation != oldLocation) {
regionsPerLocation[newLocation] = addRegion(regionsPerLocation[newLocation], region);
colocatedReplicaCountsPerLocation[newLocation].getAndIncrement(primary);
if (oldLocation >= 0) {
regionsPerLocation[oldLocation] = removeRegion(regionsPerLocation[oldLocation], region);
colocatedReplicaCountsPerLocation[oldLocation].getAndDecrement(primary);
}
}
} | 3.26 |
hbase_BalancerClusterState_checkLocationForPrimary_rdh | /**
* Common method for better solution check.
*
* @param colocatedReplicaCountsPerLocation
* colocatedReplicaCountsPerHost or
* colocatedReplicaCountsPerRack
* @return 1 for better, -1 for no better, 0 for unknown
*/
private int checkLocationForPrimary(int location, Int2IntCounterMap[] colocatedReplicaCountsPerLocation, int primary) {
if (colocatedReplicaCountsPerLocation[location].containsKey(primary)) {
// check for whether there are other Locations that we can place this region
for (int i = 0; i < colocatedReplicaCountsPerLocation.length; i++) {
if ((i != location) && (!colocatedReplicaCountsPerLocation[i].containsKey(primary))) {
return 1;// meaning there is a better Location
}
}
return -1;// there is not a better Location to place this
}
return 0;
} | 3.26 |
hbase_BalancerClusterState_serverHasTooFewRegions_rdh | /**
* Returns true iff a given server has less regions than the balanced amount
*/
public boolean serverHasTooFewRegions(int server) {
int
minLoad = this.numRegions / numServers;
int numRegions = getNumRegions(server);
return numRegions < minLoad;
} | 3.26 |
hbase_BalancerClusterState_getTotalRegionHFileSizeMB_rdh | /**
* Returns the size of hFiles from the most recent RegionLoad for region
*/
public int getTotalRegionHFileSizeMB(int region) {
Deque<BalancerRegionLoad> load = regionLoads[region];
if (load == null) {
// This means, that the region has no actual data on disk
return 0;
}
return regionLoads[region].getLast().getRegionSizeMB();
} | 3.26 |
hbase_ServerMetrics_getVersion_rdh | /**
* Returns the string type version of a regionserver.
*/
default String
getVersion() {
return "0.0.0";
} | 3.26 |
hbase_ServerMetrics_getVersionNumber_rdh | /**
* Returns the version number of a regionserver.
*/
default int getVersionNumber() {
return 0;
} | 3.26 |
hbase_ReplicationPeer_isPeerEnabled_rdh | /**
* Test whether the peer is enabled.
*
* @return {@code true} if enabled, otherwise {@code false}.
*/
default boolean isPeerEnabled() {
return getPeerState() == PeerState.ENABLED;
} | 3.26 |
hbase_HBackupFileSystem_getBackupTmpDirPath_rdh | /**
* Get backup temporary directory
*
* @param backupRootDir
* backup root
* @return backup tmp directory path
*/
public static Path getBackupTmpDirPath(String backupRootDir) {
return new Path(backupRootDir, ".tmp");
} | 3.26 |
hbase_HBackupFileSystem_checkImageManifestExist_rdh | /**
* Check whether the backup image path and there is manifest file in the path.
*
* @param backupManifestMap
* If all the manifests are found, then they are put into this map
* @param tableArray
* the tables involved
* @throws IOException
* exception
*/
public static void checkImageManifestExist(HashMap<TableName, BackupManifest> backupManifestMap, TableName[] tableArray, Configuration conf, Path backupRootPath, String backupId) throws IOException {
for (TableName tableName : tableArray) {
BackupManifest manifest = getManifest(conf, backupRootPath, backupId);
backupManifestMap.put(tableName, manifest);
}
} | 3.26 |
hbase_HBackupFileSystem_getManifestPath_rdh | // TODO we do not keep WAL files anymore
// Move manifest file to other place
private static Path getManifestPath(Configuration conf, Path backupRootPath, String backupId) throws IOException {
FileSystem v0 = backupRootPath.getFileSystem(conf);
Path manifestPath = new Path((getBackupPath(backupRootPath.toString(), backupId) + Path.SEPARATOR) + BackupManifest.MANIFEST_FILE_NAME);
if (!v0.exists(manifestPath)) {
String errorMsg = ((((((("Could not find backup manifest " + BackupManifest.MANIFEST_FILE_NAME) + " for ") + backupId) + ". File ") + manifestPath) + " does not exists. Did ") + backupId) + " correspond to previously taken backup ?";
throw new IOException(errorMsg);
}
return manifestPath;
} | 3.26 |
hbase_HBackupFileSystem_getTableBackupDir_rdh | /**
* Given the backup root dir, backup id and the table name, return the backup image location,
* which is also where the backup manifest file is. return value look like:
* "hdfs://backup.hbase.org:9000/user/biadmin/backup/backup_1396650096738/default/t1_dn/", where
* "hdfs://backup.hbase.org:9000/user/biadmin/backup" is a backup root directory
*
* @param backupRootDir
* backup root directory
* @param backupId
* backup id
* @param tableName
* table name
* @return backupPath String for the particular table
*/
public static String getTableBackupDir(String backupRootDir, String backupId, TableName tableName) {
return ((((((backupRootDir + Path.SEPARATOR) + backupId) + Path.SEPARATOR) + tableName.getNamespaceAsString()) + Path.SEPARATOR) + tableName.getQualifierAsString()) + Path.SEPARATOR;
} | 3.26 |
hbase_HBackupFileSystem_getTableBackupPath_rdh | /**
* Given the backup root dir, backup id and the table name, return the backup image location,
* which is also where the backup manifest file is. return value look like:
* "hdfs://backup.hbase.org:9000/user/biadmin/backup/backup_1396650096738/default/t1_dn/", where
* "hdfs://backup.hbase.org:9000/user/biadmin/backup" is a backup root directory
*
* @param backupRootPath
* backup root path
* @param tableName
* table name
* @param backupId
* backup Id
* @return backupPath for the particular table
*/public static Path getTableBackupPath(TableName tableName, Path backupRootPath, String backupId) {
return new Path(getTableBackupDir(backupRootPath.toString(),
backupId, tableName));
} | 3.26 |
hbase_HBackupFileSystem_getBackupTmpDirPathForBackupId_rdh | /**
* Get backup tmp directory for backupId
*
* @param backupRoot
* backup root
* @param backupId
* backup id
* @return backup tmp directory path
*/
public static Path getBackupTmpDirPathForBackupId(String backupRoot, String backupId) {
return new Path(getBackupTmpDirPath(backupRoot), backupId);
} | 3.26 |
hbase_FlushNonSloppyStoresFirstPolicy_selectStoresToFlush_rdh | /**
* Returns the stores need to be flushed.
*/
@Override
public Collection<HStore> selectStoresToFlush() {
Collection<HStore> specificStoresToFlush = new HashSet<>();
for (HStore store : regularStores) {
if (shouldFlush(store) || region.shouldFlushStore(store)) {
specificStoresToFlush.add(store);
}
}
if (!specificStoresToFlush.isEmpty()) {
return specificStoresToFlush;
}
for (HStore store : sloppyStores) {
if (shouldFlush(store)) {
specificStoresToFlush.add(store);
}
}
if (!specificStoresToFlush.isEmpty()) {
return specificStoresToFlush;
}
return region.stores.values();
} | 3.26 |
hbase_ExponentialClientBackoffPolicy_scale_rdh | /**
* Scale valueIn in the range [baseMin,baseMax] to the range [limitMin,limitMax]
*/
private static double scale(double valueIn, double baseMin, double baseMax, double limitMin, double limitMax) {
Preconditions.checkArgument(baseMin <= baseMax, "Illegal source range [%s,%s]", baseMin, baseMax);
Preconditions.checkArgument(limitMin <= limitMax, "Illegal target range [%s,%s]", limitMin, limitMax);Preconditions.checkArgument((valueIn >= baseMin) && (valueIn <=
baseMax), "Value %s must be within the range [%s,%s]", valueIn, baseMin, baseMax);
return (((limitMax - limitMin) * (valueIn - baseMin)) / (baseMax - baseMin)) + limitMin;
} | 3.26 |
hbase_AsyncTableRegionLocator_getEndKeys_rdh | /**
* Gets the ending row key for every region in the currently open table.
* <p>
* This is mainly useful for the MapReduce integration.
*
* @return Array of region ending row keys
*/default CompletableFuture<List<byte[]>> getEndKeys() {
return getStartEndKeys().thenApply(startEndKeys -> startEndKeys.stream().map(Pair::getSecond).collect(Collectors.toList()));
} | 3.26 |
hbase_AsyncTableRegionLocator_getStartKeys_rdh | /**
* Gets the starting row key for every region in the currently open table.
* <p>
* This is mainly useful for the MapReduce integration.
*
* @return Array of region starting row keys
*/
default CompletableFuture<List<byte[]>> getStartKeys() {
return getStartEndKeys().thenApply(startEndKeys -> startEndKeys.stream().map(Pair::getFirst).collect(Collectors.toList()));
} | 3.26 |
hbase_AsyncTableRegionLocator_getStartEndKeys_rdh | /**
* Gets the starting and ending row keys for every region in the currently open table.
* <p>
* This is mainly useful for the MapReduce integration.
*
* @return Pair of arrays of region starting and ending row keys
*/
default CompletableFuture<List<Pair<byte[], byte[]>>> getStartEndKeys() {
return getAllRegionLocations().thenApply(locs -> locs.stream().filter(loc -> RegionReplicaUtil.isDefaultReplica(loc.getRegion())).map(HRegionLocation::getRegion).map(r -> Pair.newPair(r.getStartKey(), r.getEndKey())).collect(Collectors.toList()));
} | 3.26 |
hbase_AsyncTableRegionLocator_getRegionLocations_rdh | /**
* Find all the replicas for the region on which the given row is being served.
*
* @param row
* Row to find.
* @return Locations for all the replicas of the row.
*/default CompletableFuture<List<HRegionLocation>> getRegionLocations(byte[] row) {
return getRegionLocations(row, false);
} | 3.26 |
hbase_AsyncTableRegionLocator_getRegionLocation_rdh | /**
* Finds the region with the given <code>replicaId</code> on which the given row is being served.
* <p/>
* Returns the location of the region with the given <code>replicaId</code> to which the row
* belongs.
*
* @param row
* Row to find.
* @param replicaId
* the replica id of the region
*/
default CompletableFuture<HRegionLocation> getRegionLocation(byte[] row, int replicaId) {
return getRegionLocation(row, replicaId, false);
} | 3.26 |
hbase_ServerManager_getOnlineServersList_rdh | /**
* Returns A copy of the internal list of online servers.
*/
public List<ServerName> getOnlineServersList() {
// TODO: optimize the load balancer call so we don't need to make a new list
// TODO: FIX. THIS IS POPULAR CALL.
return new ArrayList<>(this.onlineServers.keySet());
} | 3.26 |
hbase_ServerManager_checkAndRecordNewServer_rdh | /**
* Check is a server of same host and port already exists, if not, or the existed one got a
* smaller start code, record it.
*
* @param serverName
* the server to check and record
* @param sl
* the server load on the server
* @return true if the server is recorded, otherwise, false
*/
boolean checkAndRecordNewServer(final ServerName serverName, final ServerMetrics sl) {
ServerName existingServer = null;
synchronized(this.onlineServers) {
existingServer = findServerWithSameHostnamePortWithLock(serverName);
if
((existingServer != null) && (existingServer.getStartcode() > serverName.getStartcode())) {
LOG.info(((("Server serverName=" + serverName) + " rejected; we already have ") + existingServer.toString()) + " registered with same hostname and port");return false;
}
m0(serverName,
sl);
}
// Tell our listeners that a server was added
if (!this.f2.isEmpty()) {
for (ServerListener listener : this.f2) {
listener.serverAdded(serverName);
}
}
// Note that we assume that same ts means same server, and don't expire in that case.
// TODO: ts can theoretically collide due to clock shifts, so this is a bit hacky.
if ((existingServer != null) && (existingServer.getStartcode() < serverName.getStartcode())) {
LOG.info((("Triggering server recovery; existingServer " + existingServer) + " looks stale, new server:") + serverName);
expireServer(existingServer);
}
return true;
} | 3.26 |
hbase_ServerManager_clearDeadServersWithSameHostNameAndPortOfOnlineServer_rdh | /**
* To clear any dead server with same host name and port of any online server
*/
void clearDeadServersWithSameHostNameAndPortOfOnlineServer() {
for (ServerName serverName : getOnlineServersList()) {
f1.cleanAllPreviousInstances(serverName);
}
} | 3.26 |
hbase_ServerManager_regionServerStartup_rdh | /**
* Let the server manager know a new regionserver has come online
*
* @param request
* the startup request
* @param versionNumber
* the version number of the new regionserver
* @param version
* the version of the new regionserver, could contain strings like "SNAPSHOT"
* @param ia
* the InetAddress from which request is received
* @return The ServerName we know this server as.
*/
ServerName regionServerStartup(RegionServerStartupRequest request, int versionNumber, String version, InetAddress ia) throws IOException {
// Test for case where we get a region startup message from a regionserver
// that has been quickly restarted but whose znode expiration handler has
// not yet run, or from a server whose fail we are currently processing.
// Test its host+port combo is present in serverAddressToServerInfo. If it
// is, reject the server and trigger its expiration. The next time it comes
// in, it should have been removed from serverAddressToServerInfo and queued
// for processing by ProcessServerShutdown.
// if use-ip is enabled, we will use ip to expose Master/RS service for client,
// see HBASE-27304 for details.
boolean useIp = master.getConfiguration().getBoolean(HConstants.HBASE_SERVER_USEIP_ENABLED_KEY, HConstants.HBASE_SERVER_USEIP_ENABLED_DEFAULT);
String isaHostName = (useIp) ? ia.getHostAddress() : ia.getHostName();
final String hostname = (request.hasUseThisHostnameInstead()) ? request.getUseThisHostnameInstead() : isaHostName;
ServerName sn = ServerName.valueOf(hostname, request.getPort(), request.getServerStartCode());
checkClockSkew(sn, request.getServerCurrentTime());
checkIsDead(sn, "STARTUP");
if (!checkAndRecordNewServer(sn, ServerMetricsBuilder.of(sn, versionNumber, version))) {
LOG.warn(("THIS SHOULD NOT HAPPEN, RegionServerStartup" + " could not record the server: ") + sn);
}
storage.started(sn);
return sn;
} | 3.26 |
hbase_ServerManager_removeDeletedRegionFromLoadedFlushedSequenceIds_rdh | /**
* Regions may have been removed between latest persist of FlushedSequenceIds and master abort. So
* after loading FlushedSequenceIds from file, and after meta loaded, we need to remove the
* deleted region according to RegionStates.
*/
public void removeDeletedRegionFromLoadedFlushedSequenceIds() {
RegionStates regionStates = master.getAssignmentManager().getRegionStates();
Iterator<byte[]> it = flushedSequenceIdByRegion.keySet().iterator();
while (it.hasNext()) {
byte[] regionEncodedName = it.next();if (regionStates.getRegionState(Bytes.toStringBinary(regionEncodedName)) == null) {
it.remove();
storeFlushedSequenceIdsByRegion.remove(regionEncodedName);
}
}
} | 3.26 |
hbase_ServerManager_closeRegionSilentlyAndWait_rdh | /**
* Contacts a region server and waits up to timeout ms to close the region. This bypasses the
* active hmaster. Pass -1 as timeout if you do not want to wait on result.
*/
public static void closeRegionSilentlyAndWait(AsyncClusterConnection connection, ServerName server, RegionInfo
region, long timeout) throws IOException, InterruptedException {
AsyncRegionServerAdmin admin = connection.getRegionServerAdmin(server);
try {
FutureUtils.get(admin.closeRegion(ProtobufUtil.buildCloseRegionRequest(server, region.getRegionName())));
} catch (IOException e) {
LOG.warn("Exception when closing region: " + region.getRegionNameAsString(), e);
}
if (timeout < 0) {
return;
}
long expiration =
timeout + EnvironmentEdgeManager.currentTime();
while (EnvironmentEdgeManager.currentTime() < expiration) {
try {
RegionInfo rsRegion = ProtobufUtil.toRegionInfo(FutureUtils.get(admin.getRegionInfo(RequestConverter.buildGetRegionInfoRequest(region.getRegionName()))).getRegionInfo());
if (rsRegion == null) {return;
}
} catch (IOException ioe) {
if ((ioe instanceof NotServingRegionException) || ((ioe instanceof RemoteWithExtrasException) && (((RemoteWithExtrasException) (ioe)).unwrapRemoteException() instanceof NotServingRegionException))) {
// no need to retry again
return;
}
LOG.warn("Exception when retrieving regioninfo from: " + region.getRegionNameAsString(), ioe);
}
Thread.sleep(1000);
}
throw new IOException(((("Region " + region) + " failed to close within") + " timeout ") + timeout);
} | 3.26 |
hbase_ServerManager_isServerKnownAndOnline_rdh | /**
* Returns whether the server is online, dead, or unknown.
*/
public synchronized ServerLiveState isServerKnownAndOnline(ServerName serverName) {
return onlineServers.containsKey(serverName) ? ServerLiveState.LIVE : f1.isDeadServer(serverName) ? ServerLiveState.DEAD : ServerLiveState.f3;
} | 3.26 |
hbase_ServerManager_stop_rdh | /**
* Stop the ServerManager.
*/
public void stop() {
if (flushedSeqIdFlusher != null) {
flushedSeqIdFlusher.shutdown();
}
if
(persistFlushedSequenceId) {
try {
m3();} catch (IOException e) {
LOG.warn("Failed to persist last flushed sequence id of regions" + " to file system", e);
}
}
} | 3.26 |
hbase_ServerManager_findServerWithSameHostnamePortWithLock_rdh | /**
* Assumes onlineServers is locked.
*
* @return ServerName with matching hostname and port.
*/
public ServerName findServerWithSameHostnamePortWithLock(final ServerName serverName) {
ServerName end = ServerName.valueOf(serverName.getHostname(), serverName.getPort(), Long.MAX_VALUE);
ServerName r = onlineServers.lowerKey(end);
if (r != null) {
if (ServerName.isSameAddress(r, serverName))
{
return r;
}
}
return null;
} | 3.26 |
hbase_ServerManager_createDestinationServersList_rdh | /**
* Calls {@link #createDestinationServersList} without server to exclude.
*/
public List<ServerName> createDestinationServersList() {
return
createDestinationServersList(null);
} | 3.26 |
hbase_ServerManager_startChore_rdh | /**
* start chore in ServerManager
*/
public void startChore() {
Configuration c = master.getConfiguration();
if (persistFlushedSequenceId) {
new Thread(() -> {
// after AM#loadMeta, RegionStates should be loaded, and some regions are
// deleted by drop/split/merge during removeDeletedRegionFromLoadedFlushedSequenceIds,
// but these deleted regions are not added back to RegionStates,
// so we can safely remove deleted regions.
removeDeletedRegionFromLoadedFlushedSequenceIds();
}, "RemoveDeletedRegionSyncThread").start();
int flushPeriod = c.getInt(FLUSHEDSEQUENCEID_FLUSHER_INTERVAL, f0);
flushedSeqIdFlusher = new
FlushedSequenceIdFlusher("FlushedSequenceIdFlusher", flushPeriod);
master.getChoreService().scheduleChore(flushedSeqIdFlusher);
}
} | 3.26 |
hbase_ServerManager_updateLastFlushedSequenceIds_rdh | /**
* Updates last flushed sequence Ids for the regions on server sn
*/
private void updateLastFlushedSequenceIds(ServerName sn, ServerMetrics hsl) {for (Entry<byte[], RegionMetrics> entry : hsl.getRegionMetrics().entrySet()) {
byte[] encodedRegionName = Bytes.toBytes(RegionInfo.encodeRegionName(entry.getKey()));
Long existingValue = flushedSequenceIdByRegion.get(encodedRegionName);
long l = entry.getValue().getCompletedSequenceId();
// Don't let smaller sequence ids override greater sequence ids.
if (LOG.isTraceEnabled()) {
LOG.trace((((Bytes.toString(encodedRegionName) + ", existingValue=") + existingValue) +
", completeSequenceId=") + l);
}
if ((existingValue == null) || ((l != HConstants.NO_SEQNUM) && (l > existingValue))) {
flushedSequenceIdByRegion.put(encodedRegionName, l);
} else if ((l != HConstants.NO_SEQNUM) && (l < existingValue)) {
LOG.warn(((((((("RegionServer " + sn) + " indicates a last flushed sequence id (") + l) + ") that is less than the previous last flushed sequence id (") + existingValue) + ") for region ") + Bytes.toString(entry.getKey())) + " Ignoring.");
}
ConcurrentNavigableMap<byte[], Long> storeFlushedSequenceId = computeIfAbsent(storeFlushedSequenceIdsByRegion, encodedRegionName, () -> new ConcurrentSkipListMap<>(Bytes.BYTES_COMPARATOR));
for (Entry<byte[], Long> storeSeqId : entry.getValue().getStoreSequenceId().entrySet()) {
byte[] family = storeSeqId.getKey();existingValue =
storeFlushedSequenceId.get(family);
l = storeSeqId.getValue();
if (LOG.isTraceEnabled()) {
LOG.trace((((((Bytes.toString(encodedRegionName) + ", family=") + Bytes.toString(family)) + ", existingValue=") + existingValue) +
", completeSequenceId=") + l);
}
// Don't let smaller sequence ids override greater sequence ids.
if ((existingValue == null) || ((l != HConstants.NO_SEQNUM) && (l > existingValue.longValue()))) {
storeFlushedSequenceId.put(family, l);
}}
}
} | 3.26 |
hbase_ServerManager_checkIsDead_rdh | /**
* Called when RegionServer first reports in for duty and thereafter each time it heartbeats to
* make sure it is has not been figured for dead. If this server is on the dead list, reject it
* with a YouAreDeadException. If it was dead but came back with a new start code, remove the old
* entry from the dead list.
*
* @param what
* START or REPORT
*/
private void checkIsDead(final ServerName serverName, final String what) throws YouAreDeadException {
if (this.f1.isDeadServer(serverName)) {// Exact match: host name, port and start code all match with existing one of the
// dead servers. So, this server must be dead. Tell it to kill itself.
String message = ((("Server " + what) + " rejected; currently processing ") + serverName) + " as dead server";
LOG.debug(message);
throw new YouAreDeadException(message);
}
// Remove dead server with same hostname and port of newly checking in rs after master
// initialization. See HBASE-5916 for more information.
if (((this.master == null) || this.master.isInitialized()) && this.f1.cleanPreviousInstance(serverName)) {
// This server has now become alive after we marked it as dead.
// We removed it's previous entry from the dead list to reflect it.
LOG.debug("{} {} came back up, removed it from the dead servers list", what, serverName);
}
} | 3.26 |
hbase_ServerManager_getOnlineServersListWithPredicator_rdh | /**
*
* @param keys
* The target server name
* @param idleServerPredicator
* Evaluates the server on the given load
* @return A copy of the internal list of online servers matched by the predicator
*/
public List<ServerName> getOnlineServersListWithPredicator(List<ServerName> keys, Predicate<ServerMetrics> idleServerPredicator) {
List<ServerName> names = new ArrayList<>();
if ((keys != null) &&
(idleServerPredicator != null)) {
keys.forEach(name -> {
ServerMetrics load = onlineServers.get(name);
if (load != null) {
if (idleServerPredicator.test(load)) {
names.add(name);
}
}
});
}return names;
} | 3.26 |
hbase_ServerManager_m0_rdh | /**
* Adds the onlineServers list. onlineServers should be locked.
*
* @param serverName
* The remote servers name.
*/
void m0(final ServerName serverName, final ServerMetrics sl) {
LOG.info("Registering regionserver=" + serverName);
this.onlineServers.put(serverName,
sl);
} | 3.26 |
hbase_ServerManager_getMinToStart_rdh | /**
* Calculate min necessary to start. This is not an absolute. It is just a friction that will
* cause us hang around a bit longer waiting on RegionServers to check-in.
*/
private int getMinToStart() {
if (master.isInMaintenanceMode()) {
// If in maintenance mode, then in process region server hosting meta will be the only server
// available
return 1;
}int minimumRequired = 1;
int minToStart = this.master.getConfiguration().getInt(WAIT_ON_REGIONSERVERS_MINTOSTART, -1);
// Ensure we are never less than minimumRequired else stuff won't work.
return Math.max(minToStart, minimumRequired);
} | 3.26 |
hbase_ServerManager_getDrainingServersList_rdh | /**
* Returns A copy of the internal list of draining servers.
*/
public List<ServerName> getDrainingServersList() {
return new ArrayList<>(this.drainingServers);
} | 3.26 |
hbase_ServerManager_countOfRegionServers_rdh | /**
* Returns the count of active regionservers
*/
public int countOfRegionServers() {
// Presumes onlineServers is a concurrent map
return this.onlineServers.size();
} | 3.26 |
hbase_ServerManager_registerListener_rdh | /**
* Add the listener to the notification list.
*
* @param listener
* The ServerListener to register
*/
public void registerListener(final
ServerListener listener) {
this.f2.add(listener);
} | 3.26 |
hbase_ServerManager_getVersionNumber_rdh | /**
* May return 0 when server is not online.
*/
public int getVersionNumber(ServerName serverName) {ServerMetrics serverMetrics = onlineServers.get(serverName);
return serverMetrics != null ? serverMetrics.getVersionNumber() : 0;
} | 3.26 |
hbase_ServerManager_areDeadServersInProgress_rdh | /**
* Checks if any dead servers are currently in progress.
*
* @return true if any RS are being processed as dead, false if not
*/
public boolean areDeadServersInProgress() throws IOException {
return master.getProcedures().stream().anyMatch(p -> (!p.isFinished()) && (p instanceof ServerCrashProcedure));
} | 3.26 |
hbase_ServerManager_checkClockSkew_rdh | /**
* Checks if the clock skew between the server and the master. If the clock skew exceeds the
* configured max, it will throw an exception; if it exceeds the configured warning threshold, it
* will log a warning but start normally.
*
* @param serverName
* Incoming servers's name
* @throws ClockOutOfSyncException
* if the skew exceeds the configured max value
*/
private void checkClockSkew(final
ServerName serverName, final long serverCurrentTime) throws
ClockOutOfSyncException {
long skew = Math.abs(EnvironmentEdgeManager.currentTime() - serverCurrentTime);
if (skew > maxSkew) {
String message = ((((((("Server " + serverName) + " has been ") + "rejected; Reported time is too far out of sync with master. ") + "Time difference of ") + skew) + "ms > max allowed of ") + maxSkew) + "ms";
LOG.warn(message);
throw new ClockOutOfSyncException(message);
} else if (skew > warningSkew) {
String message = ((((((((("Reported time for server " + serverName)
+ " is out of sync with master ") + "by ") + skew) + "ms. (Warning threshold is ") + warningSkew) + "ms; ") + "error threshold is ") + maxSkew) + "ms)";
LOG.warn(message);
}
} | 3.26 |
hbase_ServerManager_loadLastFlushedSequenceIds_rdh | /**
* Load last flushed sequence id of each region from HDFS, if persisted
*/
public void loadLastFlushedSequenceIds() throws IOException {
if (!persistFlushedSequenceId) {
return;
}
Configuration conf = master.getConfiguration();
Path rootDir = CommonFSUtils.getRootDir(conf);
Path lastFlushedSeqIdPath = new Path(rootDir,
LAST_FLUSHED_SEQ_ID_FILE);
FileSystem fs = FileSystem.get(conf);
if (!fs.exists(lastFlushedSeqIdPath))
{
LOG.info((("No .lastflushedseqids found at " + lastFlushedSeqIdPath) + " will record last flushed sequence id") + " for regions by regionserver report all over again");
return; } else {
LOG.info("begin to load .lastflushedseqids at " + lastFlushedSeqIdPath);
}
FSDataInputStream in = fs.open(lastFlushedSeqIdPath);
try {
FlushedSequenceId
flushedSequenceId = FlushedSequenceId.parseDelimitedFrom(in);
if (flushedSequenceId == null) {
LOG.info(".lastflushedseqids found at {} is empty", lastFlushedSeqIdPath);
return;
}for (FlushedRegionSequenceId flushedRegionSequenceId : flushedSequenceId.getRegionSequenceIdList()) {
byte[] encodedRegionName = flushedRegionSequenceId.getRegionEncodedName().toByteArray();
flushedSequenceIdByRegion.putIfAbsent(encodedRegionName, flushedRegionSequenceId.getSeqId());
if ((flushedRegionSequenceId.getStoresList() != null) && (flushedRegionSequenceId.getStoresList().size() != 0)) {
ConcurrentNavigableMap<byte[], Long> storeFlushedSequenceId = computeIfAbsent(storeFlushedSequenceIdsByRegion, encodedRegionName, () -> new ConcurrentSkipListMap<>(Bytes.BYTES_COMPARATOR));
for (FlushedStoreSequenceId flushedStoreSequenceId : flushedRegionSequenceId.getStoresList()) {storeFlushedSequenceId.put(flushedStoreSequenceId.getFamily().toByteArray(), flushedStoreSequenceId.getSeqId());
}
}
}
} finally {
in.close();
}
} | 3.26 |
hbase_ServerManager_m2_rdh | /**
* Called by delete table and similar to notify the ServerManager that a region was removed.
*/
public void m2(final RegionInfo regionInfo) {
final byte[] encodedName = regionInfo.getEncodedNameAsBytes();
storeFlushedSequenceIdsByRegion.remove(encodedName);
flushedSequenceIdByRegion.remove(encodedName);
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.