name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hbase_LogRollRegionServerProcedureManager_stop_rdh | /**
* Close <tt>this</tt> and all running backup procedure tasks
*
* @param force
* forcefully stop all running tasks
* @throws IOException
* exception
*/
@Override
public void stop(boolean force) throws IOException {
if (!started) {
return;
}
String mode = (force) ? "abruptly" : "gracefully";
LOG.info(("Stopping RegionServerBackupManager " + mode) + ".");
try {
this.member.close();
} finally {
this.memberRpcs.close();
}
} | 3.26 |
hbase_TableSnapshotInputFormatImpl_write_rdh | // TODO: We should have ProtobufSerialization in Hadoop, and directly use PB objects instead of
// doing this wrapping with Writables.
@Override
public void write(DataOutput out) throws IOException {
TableSnapshotRegionSplit.Builder builder = TableSnapshotRegionSplit.newBuilder().setTable(ProtobufUtil.toTableSchema(htd)).setRegion(ProtobufUtil.toRegionInfo(regionInfo));
for (String location : locations) {
builder.addLocations(location);
}
TableSnapshotRegionSplit split = builder.build();
ByteArrayOutputStream baos = new ByteArrayOutputStream();
split.writeTo(baos);
baos.close();
byte[] buf = baos.toByteArray();
out.writeInt(buf.length);
out.write(buf);
Bytes.writeByteArray(out, Bytes.toBytes(scan));
Bytes.writeByteArray(out, Bytes.toBytes(restoreDir));
} | 3.26 |
hbase_TableSnapshotInputFormatImpl_cleanRestoreDir_rdh | /**
* clean restore directory after snapshot scan job
*
* @param job
* the snapshot scan job
* @param snapshotName
* the name of the snapshot to read from
* @throws IOException
* if an error occurs
*/
public static void cleanRestoreDir(Job job, String snapshotName) throws IOException {
Configuration conf = job.getConfiguration();
Path restoreDir = new Path(conf.get(RESTORE_DIR_KEY));
FileSystem fs = restoreDir.getFileSystem(conf);
if (!fs.exists(restoreDir))
{
LOG.warn("{} doesn't exist on file system, maybe it's already been cleaned", restoreDir);
return;
}
if (!fs.delete(restoreDir, true)) {
LOG.warn("Failed clean restore dir {} for snapshot {}", restoreDir, snapshotName);
}
LOG.debug("Clean restore directory {} for {}", restoreDir, snapshotName);
} | 3.26 |
hbase_TableSnapshotInputFormatImpl_calculateLocationsForInputSplit_rdh | /**
* Compute block locations for snapshot files (which will get the locations for referred hfiles)
* only when localityEnabled is true.
*/
private static List<String> calculateLocationsForInputSplit(Configuration conf, TableDescriptor htd, RegionInfo hri, Path tableDir) throws IOException {
return getBestLocations(conf, HRegion.computeHDFSBlocksDistribution(conf, htd, hri, tableDir));
} | 3.26 |
hbase_TableSnapshotInputFormatImpl_getBestLocations_rdh | /**
* This computes the locations to be passed from the InputSplit. MR/Yarn schedulers does not take
* weights into account, thus will treat every location passed from the input split as equal. We
* do not want to blindly pass all the locations, since we are creating one split per region, and
* the region's blocks are all distributed throughout the cluster unless favorite node assignment
* is used. On the expected stable case, only one location will contain most of the blocks as
* local. On the other hand, in favored node assignment, 3 nodes will contain highly local blocks.
* Here we are doing a simple heuristic, where we will pass all hosts which have at least 80%
* (hbase.tablesnapshotinputformat.locality.cutoff.multiplier) as much block locality as the top
* host with the best locality. Return at most numTopsAtMost locations if there are more than
* that.
*/
private static List<String> getBestLocations(Configuration conf, HDFSBlocksDistribution blockDistribution, int numTopsAtMost) {
HostAndWeight[] hostAndWeights = blockDistribution.getTopHostsWithWeights();
if (hostAndWeights.length == 0) {
// no matter what numTopsAtMost is
return null;
}
if (numTopsAtMost < 1) {
// invalid if numTopsAtMost < 1, correct it to be 1
numTopsAtMost = 1;
}
int top = Math.min(numTopsAtMost, hostAndWeights.length);
List<String> locations = new ArrayList<>(top);
HostAndWeight topHost = hostAndWeights[0];
locations.add(topHost.getHost());
if (top == 1)
{
// only care about the top host
return locations;}// When top >= 2,
// do the heuristic: filter all hosts which have at least cutoffMultiplier % of block locality
double cutoffMultiplier = conf.getFloat(LOCALITY_CUTOFF_MULTIPLIER, DEFAULT_LOCALITY_CUTOFF_MULTIPLIER);
double filterWeight = topHost.getWeight() * cutoffMultiplier;
for (int i = 1; i <= (top - 1); i++) {
if (hostAndWeights[i].getWeight() >= filterWeight) {
locations.add(hostAndWeights[i].getHost());
} else {
// As hostAndWeights is in descending order,
// we could break the loop as long as we meet a weight which is less than filterWeight.
break;
}
}
return locations;
} | 3.26 |
hbase_TableSnapshotInputFormatImpl_setInput_rdh | /**
* Configures the job to use TableSnapshotInputFormat to read from a snapshot.
*
* @param conf
* the job to configure
* @param snapshotName
* the name of the snapshot to read from
* @param restoreDir
* a temporary directory to restore the snapshot into. Current user
* should have write permissions to this directory, and this should not
* be a subdirectory of rootdir. After the job is finished, restoreDir
* can be deleted.
* @param numSplitsPerRegion
* how many input splits to generate per one region
* @param splitAlgo
* SplitAlgorithm to be used when generating InputSplits
* @throws IOException
* if an error occurs
*/
public static void setInput(Configuration conf, String snapshotName, Path restoreDir, RegionSplitter.SplitAlgorithm splitAlgo, int numSplitsPerRegion) throws IOException
{
conf.set(SNAPSHOT_NAME_KEY, snapshotName);
if (numSplitsPerRegion < 1) {
throw new IllegalArgumentException(("numSplits must be >= 1, " + "illegal numSplits : ") + numSplitsPerRegion);
}
if ((splitAlgo == null) && (numSplitsPerRegion > 1)) {
throw new IllegalArgumentException("Split algo can't be null when numSplits > 1");
}
if (splitAlgo != null) {
conf.set(SPLIT_ALGO, splitAlgo.getClass().getName());
}
conf.setInt(NUM_SPLITS_PER_REGION, numSplitsPerRegion);
Path rootDir = CommonFSUtils.getRootDir(conf);
FileSystem fs = rootDir.getFileSystem(conf);
restoreDir = new Path(restoreDir, UUID.randomUUID().toString());
RestoreSnapshotHelper.copySnapshotForScanner(conf, fs, rootDir, restoreDir, snapshotName);
conf.set(RESTORE_DIR_KEY, restoreDir.toString());
} | 3.26 |
hbase_FilterBase_toString_rdh | /**
* Return filter's info for debugging and logging purpose.
*/
@Override
public String toString() {
return this.getClass().getSimpleName();
} | 3.26 |
hbase_FilterBase_hasFilterRow_rdh | /**
* Filters that never filter by modifying the returned List of Cells can inherit this
* implementation that does nothing. {@inheritDoc }
*/
@Override
public boolean hasFilterRow() {
return false;
} | 3.26 |
hbase_FilterBase_filterRow_rdh | /**
* Filters that never filter by rows based on previously gathered state from
* {@link #filterCell(Cell)} can inherit this implementation that never filters a row.
* {@inheritDoc }
*/
@Override
public boolean filterRow() throws IOException {
return false;
} | 3.26 |
hbase_FilterBase_transformCell_rdh | /**
* By default no transformation takes place {@inheritDoc }
*/
@Override
public Cell
transformCell(Cell v) throws IOException {
return v;
} | 3.26 |
hbase_FilterBase_createFilterFromArguments_rdh | /**
* Given the filter's arguments it constructs the filter
* <p>
*
* @param filterArguments
* the filter's arguments
* @return constructed filter object
*/
public static Filter createFilterFromArguments(ArrayList<byte[]> filterArguments) {
throw new IllegalArgumentException("This method has not been implemented");
} | 3.26 |
hbase_FilterBase_getNextCellHint_rdh | /**
* Filters that are not sure which key must be next seeked to, can inherit this implementation
* that, by default, returns a null Cell. {@inheritDoc }
*/
@Override
public Cell getNextCellHint(Cell currentCell) throws
IOException {
return null;
} | 3.26 |
hbase_FilterBase_isFamilyEssential_rdh | /**
* By default, we require all scan's column families to be present. Our subclasses may be more
* precise. {@inheritDoc }
*/
@Override
public boolean isFamilyEssential(byte[] name) throws IOException {
return true;
} | 3.26 |
hbase_FilterBase_reset_rdh | /**
* Filters that are purely stateless and do nothing in their reset() methods can inherit this
* null/empty implementation. {@inheritDoc }
*/
@Override
public void reset() throws IOException {
} | 3.26 |
hbase_FilterBase_filterRowCells_rdh | /**
* Filters that never filter by modifying the returned List of Cells can inherit this
* implementation that does nothing. {@inheritDoc }
*/
@Override
public void filterRowCells(List<Cell> ignored) throws IOException {
} | 3.26 |
hbase_FilterBase_filterAllRemaining_rdh | /**
* Filters that never filter all remaining can inherit this implementation that never stops the
* filter early. {@inheritDoc }
*/
@Override
public boolean filterAllRemaining() throws IOException {
return false;
} | 3.26 |
hbase_ZKSecretWatcher_getKeysParentZNode_rdh | /**
* get token keys parent node
*
* @return token keys parent node
*/
String getKeysParentZNode() {
return keysParentZNode;
} | 3.26 |
hbase_ZKSecretWatcher_refreshKeys_rdh | /**
* refresh keys
*/
synchronized void refreshKeys() {
try {
List<ZKUtil.NodeAndData> nodes = ZKUtil.getChildDataAndWatchForNewChildren(watcher, keysParentZNode);
refreshNodes(nodes);
} catch (KeeperException ke) {
LOG.error(HBaseMarkers.FATAL, "Error reading data from zookeeper", ke);
watcher.abort("Error reading changed keys from zookeeper", ke);
}
} | 3.26 |
hbase_SyncReplicationReplayWALRemoteProcedure_truncateWALs_rdh | /**
* Only truncate wals one by one when task succeed. The parent procedure will check the first wal
* length to know whether this task succeed.
*/private void truncateWALs(MasterProcedureEnv env) {
String firstWal
= wals.get(0);
try {
env.getMasterServices().getSyncReplicationReplayWALManager().finishReplayWAL(firstWal);
} catch (IOException e) {
// As it is idempotent to rerun this task. Just ignore this exception and return.
LOG.warn("Failed to truncate wal {} for peer id={}", firstWal, peerId, e);
return;
}
for (int i = 1; i < wals.size(); i++) {
String
wal = wals.get(i);try {
env.getMasterServices().getSyncReplicationReplayWALManager().finishReplayWAL(wal);
} catch (IOException e1) {
try {
// retry
env.getMasterServices().getSyncReplicationReplayWALManager().finishReplayWAL(wal); } catch (IOException e2) {
// As the parent procedure only check the first wal length. Just ignore this exception.
LOG.warn("Failed to truncate wal {} for peer id={}", wal, peerId, e2);
}
}
}
} | 3.26 |
hbase_SchemaLocking_getLocks_rdh | /**
* List lock queues.
*
* @return the locks
*/
List<LockedResource> getLocks() {
List<LockedResource>
lockedResources = new
ArrayList<>();
addToLockedResources(lockedResources, serverLocks, sn -> sn.getServerName(), LockedResourceType.SERVER);addToLockedResources(lockedResources, namespaceLocks, Function.identity(), LockedResourceType.NAMESPACE);
addToLockedResources(lockedResources, tableLocks, tn -> tn.getNameAsString(), LockedResourceType.TABLE);
addToLockedResources(lockedResources, regionLocks, Function.identity(), LockedResourceType.REGION);
addToLockedResources(lockedResources, peerLocks, Function.identity(), LockedResourceType.PEER);
addToLockedResources(lockedResources, ImmutableMap.of(TableName.META_TABLE_NAME, metaLock), tn -> tn.getNameAsString(), LockedResourceType.META);
addToLockedResources(lockedResources, globalLocks, Function.identity(), LockedResourceType.GLOBAL);
return lockedResources;
}
/**
*
* @return {@link LockedResource} | 3.26 |
hbase_SchemaLocking_clear_rdh | /**
* Removes all locks by clearing the maps. Used when procedure executor is stopped for failure and
* recovery testing.
*/
void clear() {
serverLocks.clear();
namespaceLocks.clear();
tableLocks.clear();
regionLocks.clear();
peerLocks.clear();
} | 3.26 |
hbase_BlockCacheKey_getHfileName_rdh | // can't avoid this unfortunately
/**
* Returns The hfileName portion of this cache key
*/
public String getHfileName() {
return hfileName;
} | 3.26 |
hbase_RandomQueueBalancer_getQueues_rdh | /**
* Exposed for use in tests
*/
List<BlockingQueue<CallRunner>> getQueues() {
return queues;
} | 3.26 |
hbase_AtomicUtils_updateMin_rdh | /**
* Updates a AtomicLong which is supposed to maintain the minimum values. This method is not
* synchronized but is thread-safe.
*/
public static void updateMin(AtomicLong min, long value) {while (true) {
long cur = min.get();
if (value >= cur) {
break;
}if (min.compareAndSet(cur, value)) {
break;
}
}
} | 3.26 |
hbase_AtomicUtils_updateMax_rdh | /**
* Updates a AtomicLong which is supposed to maintain the maximum values. This method is not
* synchronized but is thread-safe.
*/
public static void updateMax(AtomicLong max, long value) {
while (true) {
long cur = max.get();
if (value <= cur) {
break;
}
if (max.compareAndSet(cur, value)) {break;
}
}
} | 3.26 |
hbase_Segment_tailSet_rdh | /**
* Returns a subset of the segment cell set, which starts with the given cell
*
* @param firstCell
* a cell in the segment
* @return a subset of the segment cell set, which starts with the given cell
*/
protected SortedSet<Cell> tailSet(Cell
firstCell) {
return getCellSet().tailSet(firstCell);
} | 3.26 |
hbase_Segment_getScanner_rdh | /**
* Creates the scanner for the given read point
*
* @return a scanner for the given read point
*/
protected KeyValueScanner getScanner(long readPoint) {
return new SegmentScanner(this, readPoint);
} | 3.26 |
hbase_Segment_getCellSet_rdh | /**
* Returns a set of all cells in the segment
*/
protected CellSet getCellSet() {
return cellSet.get();
} | 3.26 |
hbase_Segment_maybeCloneWithAllocator_rdh | /**
* If the segment has a memory allocator the cell is being cloned to this space, and returned;
* otherwise the given cell is returned When a cell's size is too big (bigger than maxAlloc), it
* is not allocated on MSLAB. Since the process of flattening to CellChunkMap assumes that all
* cells are allocated on MSLAB, during this process, the input parameter forceCloneOfBigCell is
* set to 'true' and the cell is copied into MSLAB.
*
* @return either the given cell or its clone
*/
public Cell maybeCloneWithAllocator(Cell cell, boolean forceCloneOfBigCell) {
if (this.memStoreLAB ==
null) {
return cell;
}
Cell cellFromMslab;if (forceCloneOfBigCell) {
cellFromMslab = this.memStoreLAB.forceCopyOfBigCellInto(cell);
} else {
cellFromMslab = this.memStoreLAB.copyCellInto(cell);
}
return cellFromMslab != null ? cellFromMslab : cell;
} | 3.26 |
hbase_Segment_isEmpty_rdh | /**
* Returns whether the segment has any cells
*/
public boolean isEmpty() {
return getCellSet().isEmpty();
} | 3.26 |
hbase_Segment_dump_rdh | // Debug methods
/**
* Dumps all cells of the segment into the given log
*/
void dump(Logger
log) {
for (Cell cell : getCellSet()) {
log.debug(Objects.toString(cell));
}
} | 3.26 |
hbase_Segment_getCellLength_rdh | /**
* Get cell length after serialized in {@link KeyValue}
*/
static int getCellLength(Cell cell) {
return cell.getSerializedSize();
} | 3.26 |
hbase_Segment_setCellSet_rdh | /**
* Setting the CellSet of the segment - used only for flat immutable segment for setting immutable
* CellSet after its creation in immutable segment constructor
*
* @return this object
*/
protected Segment setCellSet(CellSet cellSetOld, CellSet cellSetNew) {
this.cellSet.compareAndSet(cellSetOld, cellSetNew);
return this;} | 3.26 |
hbase_Segment_last_rdh | // *** Methods for SegmentsScanner
public Cell last() {
return getCellSet().last();} | 3.26 |
hbase_Segment_getComparator_rdh | /**
* Returns the Cell comparator used by this segment
*
* @return the Cell comparator used by this segment
*/
protected CellComparator getComparator() {
return comparator;
} | 3.26 |
hbase_Segment_close_rdh | /**
* Closing a segment before it is being discarded
*/
public void close() {if (this.memStoreLAB != null) {
this.memStoreLAB.close();
}
// do not set MSLab to null as scanners may still be reading the data here and need to decrease
// the counter when they finish
} | 3.26 |
hbase_ClassSize_sizeOf_rdh | /**
* Calculate the memory consumption (in byte) of a byte array, including the array header and the
* whole backing byte array. If the whole byte array is occupied (not shared with other objects),
* please use this function. If not, please use {@link #sizeOfByteArray(int)} instead.
*
* @param b
* the byte array
* @return the memory consumption (in byte) of the whole byte array
*/
public static long sizeOf(byte[] b) {
return memoryLayout.sizeOfByteArray(b.length);
} | 3.26 |
hbase_ClassSize_align_rdh | /**
* Aligns a number to 8.
*
* @param num
* number to align to 8
* @return smallest number >= input that is a multiple of 8
*/
public static long align(long num) {
return memoryLayout.align(num);
} | 3.26 |
hbase_ClassSize_estimateBaseFromCoefficients_rdh | /**
* Estimate the static space taken up by a class instance given the coefficients returned by
* getSizeCoefficients.
*
* @param coeff
* the coefficients
* @param debug
* debug flag
* @return the size estimate, in bytes
*/
private static long estimateBaseFromCoefficients(int[] coeff, boolean debug) {
long prealign_size = (((long) (OBJECT)) + coeff[0]) + (coeff[2] * REFERENCE);
// Round up to a multiple of 8
long size = align(prealign_size) + align(coeff[1] * ARRAY);
if (debug)
{
if (LOG.isDebugEnabled()) {
LOG.debug((((((((((("Primitives=" + coeff[0]) + ", arrays=") + coeff[1]) + ", references=") + coeff[2]) + ", refSize ") + REFERENCE) + ", size=") + size) + ", prealign_size=") + prealign_size);
}
}
return size;
} | 3.26 |
hbase_ClassSize_oopSize_rdh | /**
* Return the size of an "ordinary object pointer". Either 4 or 8, depending on 32/64 bit, and
* CompressedOops
*/
int oopSize() {
return is32BitJVM() ? 4 : 8;
} | 3.26 |
hbase_ClassSize_sizeOfByteArray_rdh | /**
* Calculate the memory consumption (in byte) of a part of a byte array, including the array
* header and the part of the backing byte array. This function is used when the byte array backs
* multiple objects. For example, in {@link org.apache.hadoop.hbase.KeyValue}, multiple KeyValue
* objects share a same backing byte array ({@link org.apache.hadoop.hbase.KeyValue#bytes}). Also
* see {@link org.apache.hadoop.hbase.KeyValue#heapSize()}.
*
* @param len
* the length (in byte) used partially in the backing byte array
* @return the memory consumption (in byte) of the part of the byte array
*/
public static long sizeOfByteArray(int len) {
return memoryLayout.sizeOfByteArray(len);
} | 3.26 |
hbase_ClassSize_estimateBase_rdh | /**
* Estimate the static space taken up by the fields of a class. This includes the space taken up
* by by references (the pointer) but not by the referenced object. So the estimated size of an
* array field does not depend on the size of the array. Similarly the size of an object
* (reference) field does not depend on the object.
*
* @param cl
* class
* @param debug
* debug flag
* @return the size estimate in bytes.
*/
@SuppressWarnings("unchecked")
public static long estimateBase(Class cl, boolean debug) {
return estimateBaseFromCoefficients(m0(cl, debug), debug);
} | 3.26 |
hbase_ByteBuff_toBytes_rdh | /**
* Copy the content from this ByteBuff to a byte[].
*/
public byte[] toBytes() {
return toBytes(0, this.limit());
} | 3.26 |
hbase_ByteBuff_checkRefCount_rdh | /**
* ************************* Methods for reference count *********************************
*/
/**
* Checks that there are still references to the buffer. This protects against the case where a
* ByteBuff method (i.e. slice, get, etc) could be called against a buffer whose backing data may
* have been released. We only need to do this check if the refCnt has a recycler. If there's no
* recycler, the backing data will be handled by normal java GC and won't get incorrectly
* released. So we can avoid the overhead of checking the refCnt on every call. See HBASE-27710.
*/
protected void checkRefCount() {
if (f0.hasRecycler()) {
ObjectUtil.checkPositive(refCnt(),
REFERENCE_COUNT_NAME);
}
} | 3.26 |
hbase_ByteBuff_readLong_rdh | /**
* Read long which was written to fitInBytes bytes and increment position.
*
* @param fitInBytes
* In how many bytes given long is stored.
* @return The value of parsed long.
*/
public static long readLong(ByteBuff in, final int fitInBytes) {
long tmpLength = 0;for (int i = 0; i < fitInBytes; ++i) {
tmpLength |= (in.get() & 0xffL) << (8L * i);
}
return tmpLength;
} | 3.26 |
hbase_ByteBuff_readCompressedInt_rdh | /**
* Read integer from ByteBuff coded in 7 bits and increment position.
*/
public static int readCompressedInt(ByteBuff buf) {
byte b = buf.get();
if ((b & ByteBufferUtils.NEXT_BIT_MASK) != 0) {
return (b & ByteBufferUtils.VALUE_MASK) + (readCompressedInt(buf) << ByteBufferUtils.NEXT_BIT_SHIFT);
}return b & ByteBufferUtils.VALUE_MASK;
} | 3.26 |
hbase_ByteBuff_wrap_rdh | // Make this private because we don't want to expose the refCnt related wrap method to upstream.
private static ByteBuff wrap(ByteBuffer buffer, RefCnt refCnt) {
return new SingleByteBuff(refCnt, buffer);
} | 3.26 |
hbase_ByteBuff_read_rdh | // static helper methods
public static int read(ReadableByteChannel channel, ByteBuffer buf, long offset, ChannelReader reader) throws IOException {
if (buf.remaining() <= NIO_BUFFER_LIMIT) {
return reader.read(channel, buf, offset);
}
int originalLimit = buf.limit();
int initialRemaining = buf.remaining();
int v2 = 0;
while (buf.remaining() > 0) {
try {
int ioSize = Math.min(buf.remaining(), NIO_BUFFER_LIMIT);
buf.limit(buf.position() + ioSize);
offset += v2;
v2 = reader.read(channel, buf, offset);
if (v2 < ioSize) {
break;
}
} finally {
buf.limit(originalLimit);
}
} int v4 = initialRemaining - buf.remaining();
return v4 > 0 ? v4 : v2;
} | 3.26 |
hbase_ByteBuff_touch_rdh | /**
* Calling this method in strategic locations where ByteBuffs are referenced may help diagnose
* potential buffer leaks. We pass the buffer itself as a default hint, but one can use
* {@link #touch(Object)} to pass their own hint as well.
*/
@Override
public ByteBuff touch() {
return touch(this);
} | 3.26 |
hbase_CreateTableProcedure_addRegionsToMeta_rdh | /**
* Add the specified set of regions to the hbase:meta table.
*/
private static void addRegionsToMeta(final MasterProcedureEnv env, final TableDescriptor tableDescriptor, final List<RegionInfo> regionInfos) throws IOException {
MetaTableAccessor.addRegionsToMeta(env.getMasterServices().getConnection(), regionInfos, tableDescriptor.getRegionReplication());} | 3.26 |
hbase_WALCoprocessorHost_preWALRoll_rdh | /**
* Called before rolling the current WAL
*
* @param oldPath
* the path of the current wal that we are replacing
* @param newPath
* the path of the wal we are going to create
*/
public void preWALRoll(Path oldPath, Path newPath) throws IOException {
execOperation(coprocEnvironments.isEmpty() ? null : new WALObserverOperation() {
@Override
protected void call(WALObserver observer) throws IOException {
observer.preWALRoll(this, oldPath, newPath);
}
});
} | 3.26 |
hbase_WALCoprocessorHost_postWALRoll_rdh | /**
* Called after rolling the current WAL
*
* @param oldPath
* the path of the wal that we replaced
* @param newPath
* the path of the wal we have created and now is the current
*/
public void postWALRoll(Path oldPath, Path newPath) throws IOException {
execOperation(coprocEnvironments.isEmpty() ? null : new WALObserverOperation() {
@Override
protected void call(WALObserver
observer) throws IOException {
observer.postWALRoll(this, oldPath, newPath);
}
});
} | 3.26 |
hbase_ReplicationPeers_getAllPeerIds_rdh | /**
* Returns the set of peerIds of the clusters that have been connected and have an underlying
* ReplicationPeer.
*
* @return a Set of Strings for peerIds
*/
public Set<String> getAllPeerIds() {
return Collections.unmodifiableSet(peerCache.keySet());
} | 3.26 |
hbase_ReplicationPeers_getPeer_rdh | /**
* Returns the ReplicationPeerImpl for the specified cached peer. This ReplicationPeer will
* continue to track changes to the Peer's state and config. This method returns null if no peer
* has been cached with the given peerId.
*
* @param peerId
* id for the peer
* @return ReplicationPeer object
*/
public ReplicationPeerImpl getPeer(String peerId) {
return peerCache.get(peerId);
} | 3.26 |
hbase_ReplicationPeers_createPeer_rdh | /**
* Helper method to connect to a peer
*
* @param peerId
* peer's identifier
* @return object representing the peer
*/
private ReplicationPeerImpl createPeer(String peerId) throws ReplicationException {
ReplicationPeerConfig peerConfig = peerStorage.getPeerConfig(peerId);
boolean enabled = peerStorage.isPeerEnabled(peerId);
SyncReplicationState syncReplicationState = peerStorage.getPeerSyncReplicationState(peerId);
SyncReplicationState newSyncReplicationState = peerStorage.getPeerNewSyncReplicationState(peerId);
return new ReplicationPeerImpl(ReplicationUtils.getPeerClusterConfiguration(peerConfig, conf), peerId, peerConfig, enabled, syncReplicationState, newSyncReplicationState);
} | 3.26 |
hbase_ReplicationPeers_addPeer_rdh | /**
* Method called after a peer has been connected. It will create a ReplicationPeer to track the
* newly connected cluster.
*
* @param peerId
* a short that identifies the cluster
* @return whether a ReplicationPeer was successfully created
* @throws ReplicationException
* if connecting to the peer fails
*/
public boolean addPeer(String peerId) throws ReplicationException {
if (this.peerCache.containsKey(peerId)) {
return false;
}
peerCache.put(peerId, createPeer(peerId));
return true;
} | 3.26 |
hbase_ProcedureManagerHost_loadUserProcedures_rdh | /**
* Load system procedures. Read the class names from configuration. Called by constructor.
*/
protected void loadUserProcedures(Configuration conf, String confKey) {
Class<?> implClass = null;
// load default procedures from configure file
String[] defaultProcClasses = conf.getStrings(confKey);
if ((defaultProcClasses == null) || (defaultProcClasses.length == 0))
return;
List<E> configured = new ArrayList<>();for (String className : defaultProcClasses) {
className = className.trim();
ClassLoader cl = this.getClass().getClassLoader();
Thread.currentThread().setContextClassLoader(cl);
try {
implClass = cl.loadClass(className);
configured.add(loadInstance(implClass));
LOG.info(("User procedure " + className) + " was loaded successfully.");
} catch (ClassNotFoundException e) {
LOG.warn((("Class " + className) + " cannot be found. ") + e.getMessage());
} catch (IOException e) {
LOG.warn((("Load procedure " + className) + " failed. ") + e.getMessage());
}
}
// add entire set to the collection
procedures.addAll(configured);
} | 3.26 |
hbase_ProcedureManagerHost_register_rdh | // Register a procedure manager object
public void register(E
obj) {
procedures.add(obj);} | 3.26 |
hbase_FastDiffDeltaEncoder_decompressFirstKV_rdh | /**
* Copies the first key/value from the given stream, and initializes decompression state based
* on it. Assumes that we have already read key and value lengths. Does not set
* {@link #qualifierLength} (not used by decompression) or {@link #prevOffset} (set by the calle
* afterwards).
*/
private void decompressFirstKV(ByteBuffer out, DataInputStream in) throws IOException {
int kvPos = out.position();out.putInt(keyLength);
out.putInt(valueLength);
prevTimestampOffset = (out.position() + keyLength) - KeyValue.TIMESTAMP_TYPE_SIZE;
ByteBufferUtils.copyFromStreamToBuffer(out, in, keyLength + valueLength);
rowLength = out.getShort(kvPos + KeyValue.ROW_OFFSET);
familyLength = out.get(((kvPos + KeyValue.ROW_OFFSET) + KeyValue.ROW_LENGTH_SIZE) + rowLength);
type = out.get(prevTimestampOffset + KeyValue.TIMESTAMP_SIZE);
} | 3.26 |
hbase_OrderedFloat64_encodeDouble_rdh | /**
* Write instance {@code val} into buffer {@code dst}.
*
* @param dst
* the {@link PositionedByteRange} to write to
* @param val
* the value to write to {@code dst}
* @return the number of bytes written
*/
public int encodeDouble(PositionedByteRange dst, double val) {
return OrderedBytes.encodeFloat64(dst, val, order);
} | 3.26 |
hbase_OrderedFloat64_decodeDouble_rdh | /**
* Read a {@code double} value from the buffer {@code src}.
*
* @param src
* the {@link PositionedByteRange} to read the {@code double} from
* @return the {@code double} floating-point value with the same bit pattern
*/
public double decodeDouble(PositionedByteRange src) {
return OrderedBytes.decodeFloat64(src);
} | 3.26 |
hbase_BitSetNode_getBitmapIndex_rdh | // ========================================================================
// Bitmap Helpers
// ========================================================================
private int getBitmapIndex(final long procId) {
return ((int) (procId - start));
} | 3.26 |
hbase_BitSetNode_getActiveMinProcId_rdh | // ========================================================================
// Min/Max Helpers
// ========================================================================
public long getActiveMinProcId() {long
minProcId = start;
for (int i = 0; i < deleted.length; ++i) {
if (deleted[i] == 0) {
return minProcId;
}
if (deleted[i] != WORD_MASK) {
for (int j = 0; j < BITS_PER_WORD; ++j) {
if ((deleted[i] & (1L << j)) == 0) {
return minProcId + j;
}
}}
minProcId += BITS_PER_WORD;
}
return Procedure.NO_PROC_ID;
} | 3.26 |
hbase_BitSetNode_m2_rdh | // ========================================================================
// Helpers
// ========================================================================
/**
* Returns upper boundary (aligned to multiple of BITS_PER_WORD) of bitmap range x belongs to.
*/
private static long m2(final long x) {
return (x + (BITS_PER_WORD - 1)) & (-BITS_PER_WORD);
} | 3.26 |
hbase_BitSetNode_getEnd_rdh | /**
* Inclusive.
*/
public long getEnd() {
return (start + (modified.length << ADDRESS_BITS_PER_WORD)) - 1;
} | 3.26 |
hbase_BitSetNode_convert_rdh | /**
* Convert to
* org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode
* protobuf.
*/ public TrackerNode convert() {
ProcedureProtos.ProcedureStoreTracker.TrackerNode.Builder builder = ProcedureStoreTracker.TrackerNode.newBuilder();
builder.setStartId(start);
for (int i = 0; i < modified.length; ++i) {
builder.addUpdated(modified[i]);
builder.addDeleted(deleted[i]);
}
return builder.build();
} | 3.26 |
hbase_BitSetNode_canGrow_rdh | // ========================================================================
// Grow/Merge Helpers
// ========================================================================
public boolean canGrow(long procId) {if (procId <= start) {
return (getEnd() - procId) < MAX_NODE_SIZE;
} else {
return (procId - start) < MAX_NODE_SIZE;
}
} | 3.26 |
hbase_BitSetNode_isAllModified_rdh | /**
* Returns true, if all the procedures has been modified.
*/
public boolean isAllModified() {
// TODO: cache the value
for (int i = 0; i < modified.length; ++i) {
if ((modified[i] | deleted[i]) != WORD_MASK) {
return false;
}
}
return true;
} | 3.26 |
hbase_BitSetNode_isEmpty_rdh | /**
* Returns true, if there are no active procedures in this BitSetNode, else false.
*/
public boolean isEmpty() {
// TODO: cache the value
for
(int i = 0; i < deleted.length; ++i) {
if (deleted[i] != WORD_MASK) {
return false;
}
}
return true;
} | 3.26 |
hbase_BitSetNode_getActiveProcIds_rdh | /**
* Returns all the active procedure ids in this bit set.
*/
public long[] getActiveProcIds() {
List<Long> procIds = new ArrayList<>();
for (int wordIndex = 0; wordIndex < modified.length; wordIndex++) {
if ((deleted[wordIndex] == WORD_MASK) || (modified[wordIndex]
== 0)) {
// This should be the common case, where most procedures has been deleted.
continue;
}
long
baseProcId = getStart() + (wordIndex << ADDRESS_BITS_PER_WORD);
for (int i = 0; i < (1 << ADDRESS_BITS_PER_WORD); i++) {
long mask = 1L << i;
if (((deleted[wordIndex] & mask) == 0) && ((modified[wordIndex] & mask) != 0)) {
procIds.add(baseProcId + i);
}
}
}
return procIds.stream().mapToLong(Long::longValue).toArray();
} | 3.26 |
hbase_BitSetNode_alignDown_rdh | /**
* Returns lower boundary (aligned to multiple of BITS_PER_WORD) of bitmap range x belongs to.
*/
private static long alignDown(final long x) {
return
x & (-BITS_PER_WORD);
} | 3.26 |
hbase_CompositeImmutableSegment_incMemStoreSize_rdh | /**
* Updates the heap size counter of the segment by the given delta
*/@Override
public long incMemStoreSize(long delta, long heapOverhead, long offHeapOverhead, int cellsCount) {
throw new IllegalStateException("Not supported by CompositeImmutableScanner");
} | 3.26 |
hbase_CompositeImmutableSegment_getCellsCount_rdh | /**
* Returns number of cells in segment
*/
@Override
public int getCellsCount() {
int result = 0;
for (ImmutableSegment s : segments) {
result += s.getCellsCount();
}
return result;
} | 3.26 |
hbase_CompositeImmutableSegment_tailSet_rdh | /**
* Returns a subset of the segment cell set, which starts with the given cell
*
* @param firstCell
* a cell in the segment
* @return a subset of the segment cell set, which starts with the given cell
*/
@Override
protected SortedSet<Cell> tailSet(Cell firstCell) {
throw new IllegalStateException("Not supported by CompositeImmutableScanner");
} | 3.26 |
hbase_CompositeImmutableSegment_close_rdh | /**
* Closing a segment before it is being discarded
*/
@Override
public void close() {
for (ImmutableSegment s : segments) {
s.close();
}
} | 3.26 |
hbase_CompositeImmutableSegment_getHeapSize_rdh | /**
* Returns The heap size of this segment.
*/
@Override
public long getHeapSize() {
long result = 0;
for (ImmutableSegment s : segments) {
result += s.getHeapSize();
}
return result;
} | 3.26 |
hbase_CompositeImmutableSegment_getScanner_rdh | /**
* Creates the scanner for the given read point
*
* @return a scanner for the given read point
*/
@Override
public KeyValueScanner getScanner(long readPoint) {
throw new IllegalStateException("Not supported by CompositeImmutableScanner");
} | 3.26 |
hbase_CompositeImmutableSegment_maybeCloneWithAllocator_rdh | /**
* If the segment has a memory allocator the cell is being cloned to this space, and returned;
* otherwise the given cell is returned
*
* @return either the given cell or its clone
*/
@Override
public Cell maybeCloneWithAllocator(Cell cell, boolean forceCloneOfBigCell) {
throw new IllegalStateException("Not supported by CompositeImmutableScanner");
} | 3.26 |
hbase_CompositeImmutableSegment_getCellSet_rdh | /**
* Returns a set of all cells in the segment
*/
@Override
protected CellSet getCellSet() {
throw new IllegalStateException("Not supported by CompositeImmutableScanner");} | 3.26 |
hbase_CompositeImmutableSegment_getDataSize_rdh | /**
* Returns Sum of all cell sizes.
*/
@Override
public long getDataSize() {
return this.keySize;
} | 3.26 |
hbase_CompositeImmutableSegment_last_rdh | // *** Methods for SegmentsScanner
@Override
public Cell last() {
throw new IllegalStateException("Not supported by CompositeImmutableScanner");
} | 3.26 |
hbase_CompositeImmutableSegment_dump_rdh | // Debug methods
/**
* Dumps all cells of the segment into the given log
*/
@Override
void
dump(Logger log) {
for (ImmutableSegment v9 : segments) {
v9.dump(log);
}
} | 3.26 |
hbase_CompositeImmutableSegment_m0_rdh | /**
* Returns whether the segment has any cells
*/
@Override
public boolean m0() {
for (ImmutableSegment s : segments) {
if (!s.isEmpty())
return false;
}
return true;
} | 3.26 |
hbase_CompositeImmutableSegment_setCellSet_rdh | /**
* Setting the CellSet of the segment - used only for flat immutable segment for setting immutable
* CellSet after its creation in immutable segment constructor
*
* @return this object
*/
@Override
protected CompositeImmutableSegment setCellSet(CellSet cellSetOld, CellSet cellSetNew)
{
throw new IllegalStateException("Not supported by CompositeImmutableScanner");
} | 3.26 |
hbase_ObjectPool_get_rdh | /**
* Returns a shared object associated with the given {@code key}, which is identified by the
* {@code equals} method.
*
* @throws NullPointerException
* if {@code key} is {@code null}
*/
public V get(K key) {
Reference<V> ref = referenceCache.get(Objects.requireNonNull(key));
if (ref != null) {
V obj = ref.get();
if (obj != null) {
return obj;
}
referenceCache.remove(key, ref);
}
V newObj = objectFactory.createObject(key);
Reference<V> newRef = createReference(key, newObj);
while (true) {
Reference<V> existingRef = referenceCache.putIfAbsent(key, newRef);
if (existingRef ==
null) {
return newObj;
}
V existingObject = existingRef.get();
if (existingObject != null)
{
return existingObject;
}
referenceCache.remove(key, existingRef);
}
} | 3.26 |
hbase_ObjectPool_purge_rdh | /**
* Removes stale references of shared objects from the pool. References newly becoming stale may
* still remain.
* <p/>
* The implementation of this method is expected to be lightweight when there is no stale
* reference with the Oracle (Sun) implementation of {@code ReferenceQueue}, because
* {@code ReferenceQueue.poll} just checks a volatile instance variable in {@code ReferenceQueue}.
*/
public void purge() {if (purgeLock.tryLock()) {
// no parallel purge
try {
while (true) {
@SuppressWarnings("unchecked")
Reference<V> ref = ((Reference<V>) (staleRefQueue.poll()));
if (ref == null) {
break;
}
referenceCache.remove(getReferenceKey(ref), ref);
}
} finally {
purgeLock.unlock();
}
}} | 3.26 |
hbase_ObjectPool_size_rdh | /**
* Returns an estimated count of objects kept in the pool. This also counts stale references, and
* you might want to call {@link #purge()} beforehand.
*/
public int size() {
return referenceCache.size(); } | 3.26 |
hbase_IdentityTableReduce_reduce_rdh | /**
* No aggregation, output pairs of (key, record)
*/
public void reduce(ImmutableBytesWritable key, Iterator<Put> values, OutputCollector<ImmutableBytesWritable, Put> output, Reporter reporter)
throws IOException {
while (values.hasNext()) {
output.collect(key, values.next());
}
} | 3.26 |
hbase_ZKProcedureMemberRpcs_sendMemberAcquired_rdh | /**
* This attempts to create an acquired state znode for the procedure (snapshot name). It then
* looks for the reached znode to trigger in-barrier execution. If not present we have a watcher,
* if present then trigger the in-barrier action.
*/
@Override
public void sendMemberAcquired(Subprocedure sub) throws IOException {
String procName = sub.getName();
try
{
LOG.debug(((("Member: '" + memberName) + "' joining acquired barrier for procedure (") + procName) + ") in zk");
String acquiredZNode = ZNodePaths.joinZNode(ZKProcedureUtil.getAcquireBarrierNode(zkController, procName), memberName);
ZKUtil.createAndFailSilent(zkController.getWatcher(), acquiredZNode);
// watch for the complete node for this snapshot
String reachedBarrier = zkController.getReachedBarrierNode(procName);
LOG.debug("Watch for global barrier reached:" + reachedBarrier);
if (ZKUtil.watchAndCheckExists(zkController.getWatcher(), reachedBarrier)) {
receivedReachedGlobalBarrier(reachedBarrier);
}
} catch (KeeperException e) {
member.controllerConnectionFailure((("Failed to acquire barrier for procedure: " + procName) + " and member: ") + memberName, e, procName);
}
} | 3.26 |
hbase_ZKProcedureMemberRpcs_sendMemberAborted_rdh | /**
* This should be called by the member and should write a serialized root cause exception as to
* the abort znode.
*/@Override
public void sendMemberAborted(Subprocedure sub, ForeignException ee) {
if (sub == null) {
LOG.error("Failed due to null subprocedure", ee);
return;
}
String procName
= sub.getName();
LOG.debug(("Aborting procedure (" + procName) + ") in zk");
String procAbortZNode = zkController.getAbortZNode(procName);
try {
String source =
(ee.getSource() ==
null) ? memberName : ee.getSource();
byte[] errorInfo = ProtobufUtil.prependPBMagic(ForeignException.serialize(source, ee));
ZKUtil.createAndFailSilent(zkController.getWatcher(), procAbortZNode, errorInfo);
LOG.debug("Finished creating abort znode:" + procAbortZNode);
} catch (KeeperException e) {
// possible that we get this error for the procedure if we already reset the zk state, but in
// that case we should still get an error for that procedure anyways
zkController.logZKTree(zkController.getBaseZnode());
member.controllerConnectionFailure(("Failed to post zk node:" + procAbortZNode) + " to abort procedure", e, procName);
}
} | 3.26 |
hbase_ZKProcedureMemberRpcs_m0_rdh | /**
* Kick off a new sub-procedure on the listener with the data stored in the passed znode.
* <p>
* Will attempt to create the same procedure multiple times if an procedure znode with the same
* name is created. It is left up the coordinator to ensure this doesn't occur.
*
* @param path
* full path to the znode for the procedure to start
*/
private synchronized void m0(String path) {
LOG.debug("Found procedure znode: " + path);
String opName
= ZKUtil.getNodeName(path);
// start watching for an abort notification for the procedure
String abortZNode = zkController.getAbortZNode(opName);try {
if (ZKUtil.watchAndCheckExists(zkController.getWatcher(), abortZNode)) {
LOG.debug(("Not starting:" + opName) + " because we already have an abort notification.");
return;
}
} catch (KeeperException e) {
member.controllerConnectionFailure((("Failed to get the abort znode (" + abortZNode) + ") for procedure :") + opName, e, opName);
return;
}
// get the data for the procedure
Subprocedure subproc = null;
try {
byte[] data = ZKUtil.getData(zkController.getWatcher(), path);
if (!ProtobufUtil.isPBMagicPrefix(data)) {
String msg = ((("Data in for starting procedure " + opName) + " is illegally formatted (no pb magic). ") + "Killing the procedure: ") + Bytes.toString(data);
LOG.error(msg);
throw new IllegalArgumentException(msg);
}
LOG.debug("start proc data length is " + data.length);
data = Arrays.copyOfRange(data, ProtobufUtil.lengthOfPBMagic(), data.length);
LOG.debug("Found data for znode:" + path);
subproc = member.createSubprocedure(opName, data);
member.submitSubprocedure(subproc);
} catch (IllegalArgumentException iae) {
LOG.error("Illegal argument exception", iae);
sendMemberAborted(subproc, new ForeignException(getMemberName(), iae));
} catch (IllegalStateException ise) {
LOG.error("Illegal state exception ", ise);sendMemberAborted(subproc, new ForeignException(getMemberName(), ise));
} catch (KeeperException e) {
member.controllerConnectionFailure("Failed to get data for new procedure:" + opName, e, opName);
} catch (InterruptedException e) {
member.controllerConnectionFailure("Failed to get data for new procedure:" + opName, e, opName);
Thread.currentThread().interrupt();
}
} | 3.26 |
hbase_ZKProcedureMemberRpcs_receivedReachedGlobalBarrier_rdh | /**
* Pass along the procedure global barrier notification to any listeners
*
* @param path
* full znode path that cause the notification
*/
private void receivedReachedGlobalBarrier(String path) {
LOG.debug("Received reached global barrier:" + path);
String procName = ZKUtil.getNodeName(path);
this.member.receivedReachedGlobalBarrier(procName);
} | 3.26 |
hbase_ZKProcedureMemberRpcs_sendMemberCompleted_rdh | /**
* This acts as the ack for a completed procedure
*/
@Override
public void sendMemberCompleted(Subprocedure
sub, byte[] data) throws
IOException {
String procName = sub.getName();
LOG.debug(((("Marking procedure '" + procName) + "' completed for member '") + memberName) + "' in zk");
String joinPath = ZNodePaths.joinZNode(zkController.getReachedBarrierNode(procName), memberName);
// ProtobufUtil.prependPBMagic does not take care of null
if (data == null) {
data = new byte[0];}
try {
ZKUtil.createAndFailSilent(zkController.getWatcher(), joinPath, ProtobufUtil.prependPBMagic(data));
} catch
(KeeperException e)
{ member.controllerConnectionFailure(("Failed to post zk node:" + joinPath) + " to join procedure barrier.", e, procName);
}
} | 3.26 |
hbase_SlowLogPersistentService_addAllLogsToSysTable_rdh | /**
* Poll from queueForSysTable and insert 100 records in hbase:slowlog table in single batch
*/
public void addAllLogsToSysTable(Connection connection) {
if (queueForSysTable == null) {
f0.trace("hbase.regionserver.slowlog.systable.enabled is turned off. Exiting.");
return;
}
if (LOCK.isLocked()) {
return;}
LOCK.lock();
try {
List<TooSlowLog.SlowLogPayload> slowLogPayloads = new ArrayList<>();
int i = 0;
while (!queueForSysTable.isEmpty()) {
slowLogPayloads.add(queueForSysTable.poll());
i++;
if (i == f1) {
SlowLogTableAccessor.addSlowLogRecords(slowLogPayloads, connection);
slowLogPayloads.clear();
i = 0;
}
}
if (slowLogPayloads.size() > 0) { SlowLogTableAccessor.addSlowLogRecords(slowLogPayloads, connection);
}
} finally {
LOCK.unlock();
}
} | 3.26 |
hbase_AuthenticationFilterInitializer_initFilter_rdh | /**
* Initializes hadoop-auth AuthenticationFilter.
* <p>
* Propagates to hadoop-auth AuthenticationFilter configuration all Hadoop configuration
* properties prefixed with "hadoop.http.authentication."
*
* @param container
* The filter container
* @param conf
* Configuration for run-time parameters
*/@Override
public void initFilter(FilterContainer container, Configuration conf) {
Map<String, String> filterConfig = getFilterConfigMap(conf, PREFIX);
container.addFilter("authentication", AuthenticationFilter.class.getName(), filterConfig);
} | 3.26 |
hbase_JVMClusterUtil_getRegionServer_rdh | /**
* Returns the region server
*/
public HRegionServer getRegionServer() {
return this.regionServer;
} | 3.26 |
hbase_JVMClusterUtil_shutdown_rdh | /**
*/
public static void shutdown(final List<MasterThread> masters, final List<RegionServerThread> regionservers)
{
LOG.debug("Shutting down HBase Cluster");if (masters != null) {
// Do backups first.
JVMClusterUtil.MasterThread
activeMaster =
null;
for (JVMClusterUtil.MasterThread t : masters) {
// Master was killed but could be still considered as active. Check first if it is stopped.
if (!t.master.isStopped()) {
if (!t.master.isActiveMaster()) {
try {
t.master.stopMaster();
} catch (IOException e) {
LOG.error("Exception occurred while stopping master", e);
}
LOG.info("Stopped backup Master {} is stopped: {}", t.master.hashCode(), t.master.isStopped());
} else {
if (activeMaster != null) {
LOG.warn("Found more than 1 active master, hash {}", activeMaster.master.hashCode());
}
activeMaster = t;
LOG.debug("Found active master hash={}, stopped={}", t.master.hashCode(), t.master.isStopped());
}
}
}
// Do active after.
if (activeMaster != null) {
try {
activeMaster.master.shutdown();
} catch (IOException e) {
LOG.error("Exception occurred in HMaster.shutdown()",
e);
}
}
}
boolean wasInterrupted = false;
final long maxTime = EnvironmentEdgeManager.currentTime() + (30 * 1000);
if (regionservers != null) {
// first try nicely.
for (RegionServerThread
t : regionservers) {
t.getRegionServer().stop("Shutdown requested");
}for (RegionServerThread t : regionservers) {
long now = EnvironmentEdgeManager.currentTime();
if ((t.isAlive() && (!wasInterrupted))
&& (now < maxTime)) {
try {
t.join(maxTime - now);
} catch (InterruptedException e) {
LOG.info("Got InterruptedException on shutdown - " +
"not waiting anymore on region server ends", e);
wasInterrupted = true;// someone wants us to speed up.
}
}
}
// Let's try to interrupt the remaining threads if any.
for (int i = 0; i < 100; ++i) {
boolean atLeastOneLiveServer = false;
for (RegionServerThread t : regionservers) {
if (t.isAlive()) {
atLeastOneLiveServer = true;
try {
LOG.warn("RegionServerThreads remaining, give one more chance before interrupting");
t.join(1000);
} catch (InterruptedException e) {
wasInterrupted = true;
}
}
}
if (!atLeastOneLiveServer)
break;
for (RegionServerThread t : regionservers) {
if (t.isAlive()) {
LOG.warn(("RegionServerThreads taking too long to stop, interrupting; thread dump " + "if > 3 attempts: i=") + i);
if (i
> 3) {
Threads.printThreadInfo(System.out, "Thread dump " + t.getName());
}
t.interrupt();
}
}
}
}
if (masters != null) {
for (JVMClusterUtil.MasterThread t : masters) {
while (t.master.isAlive() && (!wasInterrupted)) {
try {
// The below has been replaced to debug sometime hangs on end of
// tests.
// this.master.join():
Threads.threadDumpingIsAlive(t.master);
} catch (InterruptedException e) {
LOG.info("Got InterruptedException on shutdown - " + "not waiting anymore on master ends", e);
wasInterrupted = true;
}
}
}
}LOG.info((((("Shutdown of " + (masters != null ? masters.size() : "0")) + " master(s) and ") + (regionservers != null ? regionservers.size() : "0")) + " regionserver(s) ") + (wasInterrupted ? "interrupted" : "complete"));
if (wasInterrupted) {
Thread.currentThread().interrupt();
}
} | 3.26 |
hbase_JVMClusterUtil_createRegionServerThread_rdh | /**
* Creates a {@link RegionServerThread}. Call 'start' on the returned thread to make it run.
*
* @param c
* Configuration to use.
* @param hrsc
* Class to create.
* @param index
* Used distinguishing the object returned.
* @return Region server added.
*/
public static JVMClusterUtil.RegionServerThread createRegionServerThread(final Configuration c, final Class<? extends HRegionServer> hrsc, final int index) throws IOException {
HRegionServer server;
try {
Constructor<? extends HRegionServer> ctor = hrsc.getConstructor(Configuration.class);
ctor.setAccessible(true);
server = ctor.newInstance(c);
} catch (InvocationTargetException ite) {Throwable target = ite.getTargetException();
throw new RuntimeException(("Failed construction of RegionServer: " + hrsc.toString()) + (target.getCause() != null ? target.getCause().getMessage() : ""), target);
} catch (Exception e) {
throw new IOException(e);
}
return new JVMClusterUtil.RegionServerThread(server, index);
} | 3.26 |
hbase_JVMClusterUtil_waitForServerOnline_rdh | /**
* Block until the region server has come online, indicating it is ready to be used.
*/
public void waitForServerOnline() {
// The server is marked online after the init method completes inside of
// the HRS#run method. HRS#init can fail for whatever region. In those
// cases, we'll jump out of the run without setting online flag. Check
// stopRequested so we don't wait here a flag that will never be flipped.
regionServer.waitForServerOnline();
} | 3.26 |
hbase_JVMClusterUtil_createMasterThread_rdh | /**
* Creates a {@link MasterThread}. Call 'start' on the returned thread to make it run.
*
* @param c
* Configuration to use.
* @param hmc
* Class to create.
* @param index
* Used distinguishing the object returned.
* @return Master added.
*/public static JVMClusterUtil.MasterThread createMasterThread(final Configuration c, final Class<? extends HMaster> hmc, final int index) throws IOException {
HMaster server;
try {
server = hmc.getConstructor(Configuration.class).newInstance(c);
}
catch (InvocationTargetException ite) {
Throwable target = ite.getTargetException();
throw new RuntimeException(("Failed construction of Master: " + hmc.toString()) + (target.getCause() != null ? target.getCause().getMessage() : ""), target);
} catch (Exception e) {
throw new IOException(e);
}// Needed if a master based registry is configured for internal cluster connections. Here, we
// just add the current master host port since we do not know other master addresses up front
// in mini cluster tests.
c.set(HConstants.MASTER_ADDRS_KEY, Preconditions.checkNotNull(server.getServerName().getAddress()).toString());
return new JVMClusterUtil.MasterThread(server, index);
} | 3.26 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.