name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hbase_PrivateCellUtil_writeCellToBuffer_rdh | /**
* Writes a cell to the buffer at the given offset
*
* @param cell
* the cell to be written
* @param buf
* the buffer to which the cell has to be wrriten
* @param offset
* the offset at which the cell should be written
*/
public static void writeCellToBuffer(Cell cell, ByteBuffer buf, int offset) {
if (cell instanceof ExtendedCell) {
((ExtendedCell) (cell)).write(buf, offset);
} else {
// Using the KVUtil
byte[] bytes = KeyValueUtil.copyToNewByteArray(cell);
ByteBufferUtils.copyFromArrayToBuffer(buf, offset, bytes, 0, bytes.length);
}
} | 3.26 |
hbase_PrivateCellUtil_getValueAsBigDecimal_rdh | /**
* Converts the value bytes of the given cell into a BigDecimal
*
* @return value as BigDecimal
*/
public static BigDecimal getValueAsBigDecimal(Cell cell) {
if (cell instanceof ByteBufferExtendedCell) {
return ByteBufferUtils.toBigDecimal(((ByteBufferExtendedCell) (cell)).getValueByteBuffer(), ((ByteBufferExtendedCell) (cell)).getValuePosition(), cell.getValueLength());
}
return Bytes.toBigDecimal(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength());
} | 3.26 |
hbase_PrivateCellUtil_compareQualifier_rdh | /**
* Compare cell's qualifier against given comparator
*
* @param cell
* the cell to use for comparison
* @param comparator
* the {@link CellComparator} to use for comparison
* @return result comparing cell's qualifier
*/
public static int compareQualifier(Cell cell, ByteArrayComparable
comparator) {
if (cell instanceof ByteBufferExtendedCell) {
return comparator.compareTo(((ByteBufferExtendedCell) (cell)).getQualifierByteBuffer(), ((ByteBufferExtendedCell) (cell)).getQualifierPosition(), cell.getQualifierLength());
}
return comparator.compareTo(cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength());
} | 3.26 |
hbase_PrivateCellUtil_writeFlatKey_rdh | /**
* Writes the Cell's key part as it would have serialized in a KeyValue. The format is <2 bytes
* rk len><rk><1 byte cf len><cf><qualifier><8 bytes
* timestamp><1 byte type>
*/
public static void writeFlatKey(Cell cell, DataOutput out) throws IOException {
short rowLen = cell.getRowLength();
byte fLen = cell.getFamilyLength();
int qLen = cell.getQualifierLength();
// Using just one if/else loop instead of every time checking before writing every
// component of cell
if (cell instanceof ByteBufferExtendedCell) {
out.writeShort(rowLen);
ByteBufferUtils.copyBufferToStream(out, ((ByteBufferExtendedCell) (cell)).getRowByteBuffer(), ((ByteBufferExtendedCell) (cell)).getRowPosition(), rowLen);
out.writeByte(fLen);
ByteBufferUtils.copyBufferToStream(out, ((ByteBufferExtendedCell) (cell)).getFamilyByteBuffer(), ((ByteBufferExtendedCell) (cell)).getFamilyPosition(), fLen);
ByteBufferUtils.copyBufferToStream(out, ((ByteBufferExtendedCell) (cell)).getQualifierByteBuffer(), ((ByteBufferExtendedCell) (cell)).getQualifierPosition(), qLen);
} else {
out.writeShort(rowLen);out.write(cell.getRowArray(), cell.getRowOffset(), rowLen);
out.writeByte(fLen);
out.write(cell.getFamilyArray(), cell.getFamilyOffset(), fLen);
out.write(cell.getQualifierArray(), cell.getQualifierOffset(), qLen);
}
out.writeLong(cell.getTimestamp());
out.writeByte(cell.getTypeByte());
} | 3.26 |
hbase_PrivateCellUtil_isDelete_rdh | /**
* Return true if a delete type, a {@link KeyValue.Type#Delete} or a {KeyValue.Type#DeleteFamily}
* or a {@link KeyValue.Type#DeleteColumn} KeyValue type.
*/
public static boolean isDelete(final byte type) {
return (Type.Delete.getCode() <= type) && (type <= Type.DeleteFamily.getCode());
} | 3.26 |
hbase_PrivateCellUtil_writeTags_rdh | /**
* Writes the tag from the given cell to the output stream
*
* @param out
* The outputstream to which the data has to be written
* @param cell
* The cell whose contents has to be written
* @param tagsLength
* the tag length
*/
public static void writeTags(OutputStream out, Cell cell, int tagsLength) throws IOException {
if (cell instanceof ByteBufferExtendedCell) {
ByteBufferUtils.copyBufferToStream(out, ((ByteBufferExtendedCell) (cell)).getTagsByteBuffer(), ((ByteBufferExtendedCell) (cell)).getTagsPosition(), tagsLength);
} else {
out.write(cell.getTagsArray(), cell.getTagsOffset(), tagsLength);
}
} | 3.26 |
hbase_PrivateCellUtil_overlappingKeys_rdh | /**
* Returns true if the first range start1...end1 overlaps with the second range start2...end2,
* assuming the byte arrays represent row keys
*/
public static boolean overlappingKeys(final byte[] start1, final byte[] end1, final byte[] start2, final byte[] end2) {
return (((end2.length == 0) || (start1.length == 0)) || (Bytes.compareTo(start1, end2) < 0)) && (((end1.length == 0) || (start2.length == 0)) || (Bytes.compareTo(start2, end1) < 0));
} | 3.26 |
hbase_PrivateCellUtil_compareFamily_rdh | /**
* Compare cell's column family against given comparator
*
* @param cell
* the cell to use for comparison
* @param comparator
* the {@link CellComparator} to use for comparison
* @return result comparing cell's column family
*/
public static int compareFamily(Cell cell, ByteArrayComparable comparator) {
if (cell instanceof ByteBufferExtendedCell) {
return comparator.compareTo(((ByteBufferExtendedCell) (cell)).getFamilyByteBuffer(), ((ByteBufferExtendedCell) (cell)).getFamilyPosition(), cell.getFamilyLength());
}
return comparator.compareTo(cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength());
} | 3.26 |
hbase_PrivateCellUtil_writeQualifierSkippingBytes_rdh | /**
* Writes the qualifier from the given cell to the output stream excluding the common prefix
*
* @param out
* The dataoutputstream to which the data has to be written
* @param cell
* The cell whose contents has to be written
* @param qlength
* the qualifier length
*/
public static void writeQualifierSkippingBytes(DataOutputStream out,
Cell cell, int qlength, int
commonPrefix) throws IOException {
if (cell instanceof ByteBufferExtendedCell) {
ByteBufferUtils.copyBufferToStream(((DataOutput) (out)), ((ByteBufferExtendedCell) (cell)).getQualifierByteBuffer(), ((ByteBufferExtendedCell) (cell)).getQualifierPosition() + commonPrefix, qlength - commonPrefix);
} else {out.write(cell.getQualifierArray(), cell.getQualifierOffset() + commonPrefix, qlength - commonPrefix);
}
} | 3.26 |
hbase_PrivateCellUtil_writeRowSkippingBytes_rdh | /**
* Writes the row from the given cell to the output stream excluding the common prefix
*
* @param out
* The dataoutputstream to which the data has to be written
* @param cell
* The cell whose contents has to be written
* @param rlength
* the row length
*/
public static void
writeRowSkippingBytes(DataOutputStream out, Cell cell, short rlength, int commonPrefix) throws IOException {
if (cell instanceof ByteBufferExtendedCell) {
ByteBufferUtils.copyBufferToStream(((DataOutput) (out)), ((ByteBufferExtendedCell) (cell)).getRowByteBuffer(), ((ByteBufferExtendedCell) (cell)).getRowPosition() +
commonPrefix, rlength - commonPrefix);
} else {
out.write(cell.getRowArray(), cell.getRowOffset() + commonPrefix, rlength - commonPrefix);
}
}
/**
* Find length of common prefix in keys of the cells, considering key as byte[] if serialized in
* {@link KeyValue} | 3.26 |
hbase_PrivateCellUtil_writeRow_rdh | /**
* Writes the row from the given cell to the output stream
*
* @param out
* The outputstream to which the data has to be written
* @param cell
* The cell whose contents has to be written
* @param rlength
* the row length
*/
public static void writeRow(OutputStream out, Cell cell, short rlength) throws IOException {
if (cell instanceof ByteBufferExtendedCell) {
ByteBufferUtils.copyBufferToStream(out, ((ByteBufferExtendedCell) (cell)).getRowByteBuffer(), ((ByteBufferExtendedCell) (cell)).getRowPosition(), rlength);
} else
{
out.write(cell.getRowArray(), cell.getRowOffset(), rlength);
}
} | 3.26 |
hbase_PrivateCellUtil_isDeleteType_rdh | /**
* Returns True if this cell is a {@link KeyValue.Type#Delete} type.
*/
public static boolean isDeleteType(Cell cell) {
return cell.getTypeByte() == Type.Delete.getCode();
} | 3.26 |
hbase_PrivateCellUtil_getValueAsInt_rdh | /**
* Converts the value bytes of the given cell into a int value
*
* @return value as int
*/
public static int getValueAsInt(Cell cell) {if (cell instanceof ByteBufferExtendedCell) {
return ByteBufferUtils.toInt(((ByteBufferExtendedCell) (cell)).getValueByteBuffer(), ((ByteBufferExtendedCell) (cell)).getValuePosition());}
return Bytes.toInt(cell.getValueArray(), cell.getValueOffset());
} | 3.26 |
hbase_PrivateCellUtil_qualifierStartsWith_rdh | /**
* Finds if the start of the qualifier part of the Cell matches <code>buf</code>
*
* @param left
* the cell with which we need to match the qualifier
* @param startsWith
* the serialized keyvalue format byte[]
* @return true if the qualifier have same staring characters, false otherwise
*/
public static boolean qualifierStartsWith(final Cell left, final byte[] startsWith) {
if ((startsWith == null) || (startsWith.length == 0)) {
throw new IllegalArgumentException("Cannot pass an empty startsWith");
}
if (left.getQualifierLength()
< startsWith.length) {
return false;
}
if (left instanceof ByteBufferExtendedCell) {
return ByteBufferUtils.equals(((ByteBufferExtendedCell) (left)).getQualifierByteBuffer(), ((ByteBufferExtendedCell) (left)).getQualifierPosition(), startsWith.length, startsWith, 0, startsWith.length);
}
return Bytes.equals(left.getQualifierArray(), left.getQualifierOffset(), startsWith.length, startsWith, 0, startsWith.length);
} | 3.26 |
hbase_PrivateCellUtil_isDeleteColumnOrFamily_rdh | /**
* Returns True if this cell is a delete family or column type.
*/
public static boolean isDeleteColumnOrFamily(Cell cell) {
int
t = cell.getTypeByte();
return (t == Type.DeleteColumn.getCode()) || (t == Type.DeleteFamily.getCode());
} | 3.26 |
hbase_CellCounter_createSubmittableJob_rdh | /**
* Sets up the actual job.
*
* @param conf
* The current configuration.
* @param args
* The command line parameters.
* @return The newly created job.
* @throws IOException
* When setting up the job fails.
*/
public static Job createSubmittableJob(Configuration conf, String[] args) throws IOException {
String tableName = args[0];
Path outputDir = new Path(args[1]);
String reportSeparatorString = (args.length > 2) ? args[2] : ":";
conf.set("ReportSeparator", reportSeparatorString);
Job job = Job.getInstance(conf, conf.get(JOB_NAME_CONF_KEY, (NAME + "_") + tableName));
job.setJarByClass(CellCounter.class);
Scan scan = getConfiguredScanForJob(conf, args);
TableMapReduceUtil.initTableMapperJob(tableName, scan, CellCounter.CellCounterMapper.class, ImmutableBytesWritable.class, Result.class, job);job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(LongWritable.class);
job.setOutputFormatClass(TextOutputFormat.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(LongWritable.class);
FileOutputFormat.setOutputPath(job, outputDir);
job.setReducerClass(CellCounter.LongSumReducer.class);
job.setCombinerClass(CellCounter.LongSumReducer.class);
return job;
} | 3.26 |
hbase_AsyncRpcRetryingCaller_preProcessError_rdh | // Sub classes can override this method to change the error type, to control the retry logic.
// For example, during rolling upgrading, if we call this newly added method, we will get a
// UnsupportedOperationException(wrapped by a DNRIOE), and sometimes we may want to fallback to
// use the old method first, so the sub class could change the exception type to something not a
// DNRIOE, so we will schedule a retry, and the next time the sub class could use old method to
// make the rpc call.
protected Throwable preProcessError(Throwable error) {
return error;} | 3.26 |
hbase_DefaultMemStore_snapshot_rdh | /**
* Creates a snapshot of the current memstore. Snapshot must be cleared by call to
* {@link #clearSnapshot(long)}
*/
@Override
public MemStoreSnapshot snapshot() {
// If snapshot currently has entries, then flusher failed or didn't call
// cleanup. Log a warning.
if (!this.snapshot.isEmpty()) {
LOG.warn("Snapshot called again without clearing previous. " + "Doing nothing. Another ongoing flush or did we fail last attempt?");
} else {
this.snapshotId = EnvironmentEdgeManager.currentTime();
if (!getActive().isEmpty()) {
// Record the ImmutableSegment' heap overhead when initialing
MemStoreSizing memstoreAccounting = new NonThreadSafeMemStoreSizing();
ImmutableSegment v1
= SegmentFactory.instance().createImmutableSegment(getActive(), memstoreAccounting);
// regionServices can be null when testing
if (regionServices != null) {
regionServices.addMemStoreSize(memstoreAccounting.getDataSize(), memstoreAccounting.getHeapSize(), memstoreAccounting.getOffHeapSize(), memstoreAccounting.getCellsCount());}
this.snapshot = v1;
resetActive();
resetTimeOfOldestEdit();
}
}
return new MemStoreSnapshot(this.snapshotId, this.snapshot);
} | 3.26 |
hbase_DefaultMemStore_getScanners_rdh | /**
* This method is protected under {@link HStore#lock} read lock. <br/>
* Scanners are ordered from 0 (oldest) to newest in increasing order.
*/
@Override
public List<KeyValueScanner> getScanners(long readPt) throws IOException {
List<KeyValueScanner> list = new ArrayList<>();
addToScanners(getActive(), readPt, list);
addToScanners(getSnapshotSegments(), readPt, list);
return list;
} | 3.26 |
hbase_DefaultMemStore_main_rdh | /**
* Code to help figure if our approximation of object heap sizes is close enough. See hbase-900.
* Fills memstores then waits so user can heap dump and bring up resultant hprof in something like
* jprofiler which allows you get 'deep size' on objects.
*
* @param args
* main args
*/
public static void main(String[] args) {
RuntimeMXBean v5 = ManagementFactory.getRuntimeMXBean();
LOG.info((((("vmName=" + v5.getVmName()) + ", vmVendor=") + v5.getVmVendor()) + ", vmVersion=") + v5.getVmVersion());
LOG.info("vmInputArguments=" +
v5.getInputArguments());
DefaultMemStore
memstore1 = new DefaultMemStore();
// TODO: x32 vs x64
final int count = 10000;
byte[] fam = Bytes.toBytes("col");
byte[] qf = Bytes.toBytes("umn");
byte[] empty = new byte[0];
MemStoreSizing memStoreSizing = new NonThreadSafeMemStoreSizing();
for (int i = 0; i < count; i++) {
// Give each its own ts
memstore1.add(new KeyValue(Bytes.toBytes(i), fam, qf, i, empty),
memStoreSizing);
}
LOG.info("memstore1 estimated size={}", memStoreSizing.getMemStoreSize().getDataSize() + memStoreSizing.getMemStoreSize().getHeapSize());
for
(int i = 0; i < count; i++) {
memstore1.add(new KeyValue(Bytes.toBytes(i), fam, qf, i, empty), memStoreSizing);
}
LOG.info("memstore1 estimated size (2nd loading of same data)={}", memStoreSizing.getMemStoreSize().getDataSize() + memStoreSizing.getMemStoreSize().getHeapSize());
// Make a variably sized memstore.
DefaultMemStore memstore2 = new DefaultMemStore();
memStoreSizing = new NonThreadSafeMemStoreSizing();
for (int i = 0; i < count; i++) {
memstore2.add(new KeyValue(Bytes.toBytes(i), fam, qf, i, new byte[i]), memStoreSizing);
}
LOG.info("memstore2 estimated size={}", memStoreSizing.getMemStoreSize().getDataSize() + memStoreSizing.getMemStoreSize().getHeapSize());
final int seconds = 30;LOG.info(("Waiting "
+ seconds) + " seconds while heap dump is taken");
LOG.info("Exiting.");
} | 3.26 |
hbase_DefaultMemStore_getNextRow_rdh | /**
*
* @param cell
* Find the row that comes after this one. If null, we return the first.
* @return Next row or null if none found.
*/
Cell getNextRow(final Cell cell) {
return getLowest(getNextRow(cell, this.getActive().getCellSet()), getNextRow(cell, this.snapshot.getCellSet()));
} | 3.26 |
hbase_SyncReplicationReplayWALManager_addUsedPeerWorker_rdh | /**
* Will only be called when loading procedures, where we need to construct the used worker set for
* each peer.
*/
public void addUsedPeerWorker(String peerId, ServerName worker) {
usedWorkersByPeer.get(peerId).used(worker);
} | 3.26 |
hbase_SyncReplicationReplayWALManager_acquirePeerWorker_rdh | /**
* Get a worker for replaying remote wal for a give peer. If no worker available, i.e, all the
* region servers have been used by others, a {@link ProcedureSuspendedException} will be thrown
* to suspend the procedure. And it will be woken up later when there are available workers,
* either by others release a worker, or there is a new region server joins the cluster.
*/
public ServerName acquirePeerWorker(String peerId, Procedure<?> proc) throws
ProcedureSuspendedException {
UsedReplayWorkersForPeer usedWorkers = usedWorkersByPeer.get(peerId);
synchronized(usedWorkers) {
Optional<ServerName> worker = usedWorkers.acquire(serverManager);
if (worker.isPresent()) {
return worker.get();
}
// no worker available right now, suspend the procedure
usedWorkers.suspend(proc);
}
throw new ProcedureSuspendedException();
} | 3.26 |
hbase_MetaRegionLocationCache_loadMetaLocationsFromZk_rdh | /**
* Populates the current snapshot of meta locations from ZK. If no meta znodes exist, it registers
* a watcher on base znode to check for any CREATE/DELETE events on the children.
*
* @param retryCounter
* controls the number of retries and sleep between retries.
*/
private void loadMetaLocationsFromZk(RetryCounter retryCounter, ZNodeOpType opType) {
TraceUtil.trace(() -> {
List<String> znodes = null;
while (retryCounter.shouldRetry()) {
try
{
znodes = watcher.getMetaReplicaNodesAndWatchChildren();
break;
} catch (KeeperException ke) {
LOG.debug("Error populating initial meta locations", ke);
if (!retryCounter.shouldRetry()) {
// Retries exhausted and watchers not set. This is not a desirable state since the cache
// could remain stale forever. Propagate the exception.
watcher.abort("Error populating meta locations", ke);
return;
}
try {
retryCounter.sleepUntilNextRetry();
} catch (InterruptedException
ie) {
LOG.error("Interrupted while loading meta locations from ZK", ie);
Thread.currentThread().interrupt();
return;
}
}
}
if ((znodes == null) || znodes.isEmpty()) {
// No meta znodes exist at this point but we registered a watcher on the base znode to
// listen for updates. They will be handled via nodeChildrenChanged().
return;
}
if (znodes.size() == cachedMetaLocations.size())
{
// No new meta znodes got added.
return;
}
for (String znode : znodes) {
String path = ZNodePaths.joinZNode(watcher.getZNodePaths().baseZNode, znode);updateMetaLocation(path, opType);
}
}, "MetaRegionLocationCache.loadMetaLocationsFromZk");
} | 3.26 |
hbase_MetaRegionLocationCache_isValidMetaPath_rdh | /**
* Helper to check if the given 'path' corresponds to a meta znode. This listener is only
* interested in changes to meta znodes.
*/
private boolean isValidMetaPath(String path) {
return watcher.getZNodePaths().isMetaZNodePath(path);
} | 3.26 |
hbase_MetaRegionLocationCache_getMetaRegionLocations_rdh | /**
* Returns Optional list of HRegionLocations for meta replica(s), null if the cache is empty.
*/
public List<HRegionLocation> getMetaRegionLocations() {
ConcurrentNavigableMap<Integer, HRegionLocation> snapshot = cachedMetaLocations.tailMap(cachedMetaLocations.firstKey());if (snapshot.isEmpty()) {
// This could be possible if the master has not successfully initialized yet or meta region
// is stuck in some weird state.
return Collections.emptyList();
}
List<HRegionLocation> result = new ArrayList<>();
// Explicitly iterate instead of new ArrayList<>(snapshot.values()) because the underlying
// ArrayValueCollection does not implement toArray().
snapshot.values().forEach(location -> result.add(location));
return result;
} | 3.26 |
hbase_MetaRegionLocationCache_getMetaRegionLocation_rdh | /**
* Gets the HRegionLocation for a given meta replica ID. Renews the watch on the znode for future
* updates.
*
* @param replicaId
* ReplicaID of the region.
* @return HRegionLocation for the meta replica.
* @throws KeeperException
* if there is any issue fetching/parsing the serialized data.
*/
private HRegionLocation getMetaRegionLocation(int replicaId) throws KeeperException {
RegionState metaRegionState;
try {
byte[] data = ZKUtil.getDataAndWatch(watcher, watcher.getZNodePaths().getZNodeForReplica(replicaId));metaRegionState = ProtobufUtil.parseMetaRegionStateFrom(data, replicaId);
} catch (DeserializationException e) {
throw
ZKUtil.convert(e);
}
return new HRegionLocation(metaRegionState.getRegion(), metaRegionState.getServerName());
} | 3.26 |
hbase_KeyValueUtil_previousKey_rdh | /**
* ************* next/previous *********************************
*/
/**
* Decrement the timestamp. For tests (currently wasteful) Remember timestamps are sorted reverse
* chronologically.
*
* @return previous key
*/
public static KeyValue previousKey(final KeyValue in) {
return createFirstOnRow(CellUtil.cloneRow(in), CellUtil.cloneFamily(in), CellUtil.cloneQualifier(in), in.getTimestamp() - 1);
} | 3.26 |
hbase_KeyValueUtil_keyLength_rdh | /**
* Returns number of bytes this cell's key part would have been used if serialized as in
* {@link KeyValue}. Key includes rowkey, family, qualifier, timestamp and type.
*
* @return the key length
*/
public static int keyLength(final Cell cell) {
return keyLength(cell.getRowLength(), cell.getFamilyLength(), cell.getQualifierLength());
} | 3.26 |
hbase_KeyValueUtil_appendTo_rdh | /**
* Copy the Cell content into the passed buf in KeyValue serialization format.
*/
public static int appendTo(Cell cell, ByteBuffer buf, int offset, boolean withTags) {
offset = ByteBufferUtils.putInt(buf, offset, keyLength(cell));// Key length
offset = ByteBufferUtils.putInt(buf, offset, cell.getValueLength());// Value length
offset = appendKeyTo(cell, buf, offset);
offset = CellUtil.copyValueTo(cell, buf, offset);// Value bytes
int tagsLength = cell.getTagsLength();
if (withTags && (tagsLength > 0))
{
offset = ByteBufferUtils.putAsShort(buf, offset, tagsLength);// Tags length
offset = PrivateCellUtil.copyTagsTo(cell, buf, offset);// Tags bytes
}
return offset;
} | 3.26 |
hbase_KeyValueUtil_m2_rdh | /**
* Write out a KeyValue in the manner in which we used to when KeyValue was a Writable.
*
* @return Length written on stream
* @see #create(DataInput) for the inverse function
*/
public static long m2(final KeyValue kv, final DataOutput out) throws IOException {
// This is how the old Writables write used to serialize KVs. Need to figure
// way to make it
// work for all implementations.
int length = kv.getLength();
out.writeInt(length);
out.write(kv.getBuffer(), kv.getOffset(), length);
return ((long) (length)) + Bytes.SIZEOF_INT;
} | 3.26 |
hbase_KeyValueUtil_appendToByteArray_rdh | /**
* ************** copy key and value ********************
*/
public static int appendToByteArray(Cell cell, byte[]
output, int offset, boolean withTags) {
int pos = offset;
pos = Bytes.putInt(output, pos,
keyLength(cell));
pos = Bytes.putInt(output, pos, cell.getValueLength());
pos = appendKeyTo(cell, output, pos); pos = CellUtil.copyValueTo(cell, output, pos);
if (withTags && (cell.getTagsLength() > 0)) {
pos = Bytes.putAsShort(output, pos, cell.getTagsLength());
pos = PrivateCellUtil.copyTagsTo(cell, output, pos);
}
return pos;} | 3.26 |
hbase_KeyValueUtil_createFirstOnRow_rdh | /**
* Create a KeyValue for the specified row, family and qualifier that would be smaller than all
* other possible KeyValues that have the same row, family, qualifier. Used for seeking.
*
* @param buffer
* the buffer to use for the new <code>KeyValue</code> object
* @param boffset
* buffer offset
* @param row
* the value key
* @param roffset
* row offset
* @param rlength
* row length
* @param family
* family name
* @param foffset
* family offset
* @param flength
* family length
* @param qualifier
* column qualifier
* @param qoffset
* qualifier offset
* @param qlength
* qualifier length
* @return First possible key on passed Row, Family, Qualifier.
* @throws IllegalArgumentException
* The resulting <code>KeyValue</code> object would be larger
* than the provided buffer or than
* <code>Integer.MAX_VALUE</code>
*/
public static KeyValue createFirstOnRow(byte[] buffer, final int boffset, final byte[] row,
final int roffset, final int rlength, final byte[] family, final
int foffset, final int flength, final byte[] qualifier, final int qoffset, final int qlength) throws IllegalArgumentException {
long lLength = KeyValue.getKeyValueDataStructureSize(rlength, flength, qlength, 0);
if (lLength > Integer.MAX_VALUE) {
throw new IllegalArgumentException((("KeyValue length " + lLength) + " > ")
+ Integer.MAX_VALUE);
}
int iLength = ((int) (lLength));
if ((buffer.length - boffset) < iLength) {
throw new IllegalArgumentException((("Buffer size " +
(buffer.length - boffset)) + " < ") + iLength);
}
int len = KeyValue.writeByteArray(buffer, boffset, row, roffset,
rlength, family, foffset, flength, qualifier, qoffset, qlength, HConstants.LATEST_TIMESTAMP, Type.Maximum, null, 0, 0, null);
return new KeyValue(buffer, boffset, len);
}
/**
* ************* misc *********************************
*/
/**
*
* @return <code>cell</code> if it is an object of class {@link KeyValue} else we will return a
new {@link KeyValue} instance made from <code>cell</code> Note: Even if the cell is an
object of any of the subclass of {@link KeyValue}, we will create a new
{@link KeyValue} | 3.26 |
hbase_KeyValueUtil_copyToNewKeyValue_rdh | /**
* ************** copy the cell to create a new keyvalue ********************
*/
public static KeyValue copyToNewKeyValue(final Cell cell) {
byte[] bytes = copyToNewByteArray(cell);
KeyValue kvCell = new KeyValue(bytes, 0, bytes.length);
kvCell.setSequenceId(cell.getSequenceId());
return kvCell;
} | 3.26 |
hbase_KeyValueUtil_createLastOnRow_rdh | /**
* Creates a KeyValue that is last on the specified row id. That is, every other possible KeyValue
* for the given row would compareTo() less than the result of this call.
*
* @param row
* row key
* @return Last possible KeyValue on passed <code>row</code>
*/
public static KeyValue createLastOnRow(final byte[] row) {
return new KeyValue(row, null, null, HConstants.LATEST_TIMESTAMP, Type.Minimum);
} | 3.26 |
hbase_KeyValueUtil_m0_rdh | /**
* ************** length ********************
*/
public static int m0(short rlen, byte flen, int qlen, int vlen, int tlen, boolean withTags) {
if (withTags) {
return ((int) (KeyValue.getKeyValueDataStructureSize(rlen, flen, qlen, vlen, tlen)));
}
return ((int) (KeyValue.getKeyValueDataStructureSize(rlen, flen, qlen, vlen)));
} | 3.26 |
hbase_KeyValueUtil_m1_rdh | /**
* Create a KeyValue for the specified row, family and qualifier that would be smaller than all
* other possible KeyValues that have the same row,family,qualifier. Used for seeking.
*
* @param row
* - row key (arbitrary byte array)
* @param family
* - family name
* @param qualifier
* - column qualifier
* @return First possible key on passed <code>row</code>, and column.
*/
public static KeyValue m1(final byte[] row, final byte[] family, final byte[] qualifier) {
return new KeyValue(row, family, qualifier, HConstants.LATEST_TIMESTAMP, Type.Maximum);
} | 3.26 |
hbase_KeyValueUtil_copyKeyToNewByteBuffer_rdh | /**
* The position will be set to the beginning of the new ByteBuffer
*
* @return the Bytebuffer containing the key part of the cell
*/
public static ByteBuffer copyKeyToNewByteBuffer(final Cell cell) {
byte[]
bytes = new byte[keyLength(cell)];
appendKeyTo(cell, bytes, 0);
ByteBuffer buffer = ByteBuffer.wrap(bytes);
return buffer;
} | 3.26 |
hbase_KeyValueUtil_createKeyValueFromKey_rdh | /**
* Return a KeyValue made of a byte array that holds the key-only part. Needed to convert hfile
* index members to KeyValues.
*/
public static KeyValue createKeyValueFromKey(final byte[] b, final int o, final int l) {
byte[] newb = new byte[l + KeyValue.ROW_OFFSET];
System.arraycopy(b, o, newb, KeyValue.ROW_OFFSET,
l);
Bytes.putInt(newb, 0, l);
Bytes.putInt(newb, Bytes.SIZEOF_INT, 0);
return new KeyValue(newb);
} | 3.26 |
hbase_KeyValueUtil_nextShallowCopy_rdh | /**
* ************** iterating ******************************
*/
/**
* Creates a new KeyValue object positioned in the supplied ByteBuffer and sets the ByteBuffer's
* position to the start of the next KeyValue. Does not allocate a new array or copy data.
*/
public static KeyValue nextShallowCopy(final ByteBuffer bb, final boolean includesMvccVersion, boolean includesTags) {
if (bb.isDirect()) {
throw new IllegalArgumentException("only supports heap buffers");
}
if (bb.remaining() < 1) {
return null;
}
int underlyingArrayOffset = bb.arrayOffset() + bb.position();
int keyLength = bb.getInt();
int valueLength = bb.getInt();
ByteBufferUtils.skip(bb, keyLength + valueLength);
int tagsLength = 0;
if (includesTags) {
// Read short as unsigned, high byte first
tagsLength = ((bb.get() &
0xff) << 8) ^ (bb.get() & 0xff);
ByteBufferUtils.skip(bb, tagsLength);
}
int kvLength = ((int) (KeyValue.getKeyValueDataStructureSize(keyLength, valueLength, tagsLength)));
KeyValue keyValue = new KeyValue(bb.array(), underlyingArrayOffset, kvLength);
if (includesMvccVersion) {
long
mvccVersion = ByteBufferUtils.readVLong(bb);
keyValue.setSequenceId(mvccVersion);
}
return keyValue;
} | 3.26 |
hbase_CachedMobFile_getReferenceCount_rdh | /**
* Gets the reference of the current mob file. Internal usage, currently it's for testing.
*
* @return The reference of the current mob file.
*/
public long getReferenceCount()
{
return this.referenceCount.longValue();
} | 3.26 |
hbase_CachedMobFile_close_rdh | /**
* Decreases the reference of the underlying reader for the mob file. It's not thread-safe. Use
* MobFileCache.closeFile() instead. This underlying reader isn't closed until the reference is 0.
*/
@Override
public void close() throws IOException {
long refs = referenceCount.decrementAndGet();
if (refs == 0) {
super.close();
}
} | 3.26 |
hbase_CachedMobFile_open_rdh | /**
* Opens the mob file if it's not opened yet and increases the reference. It's not thread-safe.
* Use MobFileCache.openFile() instead. The reader of the mob file is just opened when it's not
* opened no matter how many times this open() method is invoked. The reference is a counter that
* how many times this reader is referenced. When the reference is 0, this reader is closed.
*/
@Override
public void open() throws IOException {
super.open();referenceCount.incrementAndGet();
} | 3.26 |
hbase_IncrementalBackupManager_getLogFilesForNewBackup_rdh | /**
* For each region server: get all log files newer than the last timestamps but not newer than the
* newest timestamps.
*
* @param olderTimestamps
* the timestamp for each region server of the last backup.
* @param newestTimestamps
* the timestamp for each region server that the backup should lead to.
* @param conf
* the Hadoop and Hbase configuration
* @param savedStartCode
* the startcode (timestamp) of last successful backup.
* @return a list of log files to be backed up
* @throws IOException
* exception
*/
private List<String> getLogFilesForNewBackup(Map<String, Long> olderTimestamps, Map<String, Long> newestTimestamps, Configuration conf, String savedStartCode) throws IOException {
LOG.debug(((("In getLogFilesForNewBackup()\n" + "olderTimestamps: ") + olderTimestamps) + "\n newestTimestamps: ") + newestTimestamps);
Path walRootDir = CommonFSUtils.getWALRootDir(conf);
Path logDir = new Path(walRootDir, HConstants.HREGION_LOGDIR_NAME);
Path oldLogDir = new Path(walRootDir, HConstants.HREGION_OLDLOGDIR_NAME);
FileSystem fs = walRootDir.getFileSystem(conf);
NewestLogFilter pathFilter = new NewestLogFilter();
List<String> resultLogFiles = new ArrayList<>();
List<String> newestLogs = new ArrayList<>();/* The old region servers and timestamps info we kept in backup system table may be out of sync
if new region server is added or existing one lost. We'll deal with it here when processing
the logs. If data in backup system table has more hosts, just ignore it. If the .logs
directory includes more hosts, the additional hosts will not have old timestamps to compare
with. We'll just use all the logs in that directory. We always write up-to-date region server
and timestamp info to backup system table at the end of successful backup.
*/
FileStatus[] rss;
Path p;
String host;
Long oldTimeStamp;
String currentLogFile;long currentLogTS;
// Get the files in .logs.
rss = fs.listStatus(logDir);
for (FileStatus rs : rss) {
p = rs.getPath();
host = BackupUtils.parseHostNameFromLogFile(p);
if (host == null) {
continue;
}
FileStatus[] logs;
oldTimeStamp = olderTimestamps.get(host);
// It is possible that there is no old timestamp in backup system table for this host if
// this region server is newly added after our last backup.
if (oldTimeStamp == null) {
logs = fs.listStatus(p);
} else {
pathFilter.setLastBackupTS(oldTimeStamp);
logs = fs.listStatus(p, pathFilter);
}for (FileStatus v26 : logs) {
LOG.debug("currentLogFile: " + v26.getPath().toString());
if (AbstractFSWALProvider.isMetaFile(v26.getPath())) {
if (LOG.isDebugEnabled()) {
LOG.debug("Skip hbase:meta log file: " + v26.getPath().getName());
}
continue;
}currentLogFile = v26.getPath().toString();
resultLogFiles.add(currentLogFile);
currentLogTS = BackupUtils.getCreationTime(v26.getPath());
// If newestTimestamps.get(host) is null, means that
// either RS (host) has been restarted recently with different port number
// or RS is down (was decommisioned). In any case, we treat this
// log file as eligible for inclusion into incremental backup log list
Long ts = newestTimestamps.get(host);
if (ts == null) {
LOG.warn((("ORPHAN log found: " + v26) + " host=") + host);
LOG.debug("Known hosts (from newestTimestamps):");
for (String s : newestTimestamps.keySet()) {
LOG.debug(s);
}
}
if ((ts == null) || (currentLogTS > ts)) {
newestLogs.add(currentLogFile);
}
}
}
// Include the .oldlogs files too.
FileStatus[] oldlogs = fs.listStatus(oldLogDir);
for (FileStatus oldlog : oldlogs) {p = oldlog.getPath();
currentLogFile = p.toString();
if (AbstractFSWALProvider.isMetaFile(p)) {
if (LOG.isDebugEnabled()) {
LOG.debug("Skip .meta log file: " + currentLogFile);
}
continue;
}
host = BackupUtils.parseHostFromOldLog(p);
if (host == null) {
continue;
}
currentLogTS = BackupUtils.getCreationTime(p);
oldTimeStamp = olderTimestamps.get(host);
/* It is possible that there is no old timestamp in backup system table for this host. At the
time of our last backup operation, this rs did not exist. The reason can be one of the two:
1. The rs already left/crashed. Its logs were moved to .oldlogs. 2. The rs was added after
our last backup.
*/
if (oldTimeStamp == null)
{
if (currentLogTS < Long.parseLong(savedStartCode)) {
// This log file is really old, its region server was before our last backup.
continue;
} else {
resultLogFiles.add(currentLogFile);
}
} else if (currentLogTS > oldTimeStamp) {
resultLogFiles.add(currentLogFile);
}
// It is possible that a host in .oldlogs is an obsolete region server
// so newestTimestamps.get(host) here can be null.
// Even if these logs belong to a obsolete region server, we still need
// to include they to avoid loss of edits for backup.
Long newTimestamp = newestTimestamps.get(host);
if ((newTimestamp == null) || (currentLogTS > newTimestamp)) {
newestLogs.add(currentLogFile);
}
}
// remove newest log per host because they are still in use
resultLogFiles.removeAll(newestLogs);
return resultLogFiles;
} | 3.26 |
hbase_IncrementalBackupManager_getIncrBackupLogFileMap_rdh | /**
* Obtain the list of logs that need to be copied out for this incremental backup. The list is set
* in BackupInfo.
*
* @return The new HashMap of RS log time stamps after the log roll for this incremental backup.
* @throws IOException
* exception
*/public Map<String, Long> getIncrBackupLogFileMap() throws IOException {
List<String> logList;
Map<String, Long> newTimestamps;
Map<String, Long> previousTimestampMins;
String savedStartCode = readBackupStartCode();
// key: tableName
// value: <RegionServer,PreviousTimeStamp>
Map<TableName, Map<String, Long>> previousTimestampMap
= readLogTimestampMap();
previousTimestampMins = BackupUtils.getRSLogTimestampMins(previousTimestampMap);
if (LOG.isDebugEnabled()) {
LOG.debug((("StartCode " + savedStartCode) + "for backupID ") + backupInfo.getBackupId());
}
// get all new log files from .logs and .oldlogs after last TS and before new timestamp
if (((savedStartCode == null) || (previousTimestampMins ==
null)) || previousTimestampMins.isEmpty()) { throw new IOException("Cannot read any previous back up timestamps from backup system table. " + "In order to create an incremental backup, at least one full backup is needed.");
}
LOG.info("Execute roll log procedure for incremental backup ...");
HashMap<String, String> props = new HashMap<>();
props.put("backupRoot",
backupInfo.getBackupRootDir());
try (Admin admin = conn.getAdmin()) {
admin.execProcedure(LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_SIGNATURE, LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_NAME, props);
}
newTimestamps = readRegionServerLastLogRollResult();
logList = getLogFilesForNewBackup(previousTimestampMins, newTimestamps, conf, savedStartCode);
logList = excludeProcV2WALs(logList);
backupInfo.setIncrBackupFileList(logList);
return newTimestamps;
} | 3.26 |
hbase_AsyncRegionLocationCache_getAll_rdh | /**
* Returns all cached RegionLocations
*/
public Collection<RegionLocations> getAll() {return Collections.unmodifiableCollection(cache.values());
} | 3.26 |
hbase_AsyncRegionLocationCache_cleanProblematicOverlappedRegions_rdh | /**
* When caching a location, the region may have been the result of a merge. Check to see if the
* region's boundaries overlap any other cached locations in a problematic way. Those would have
* been merge parents which no longer exist. We need to proactively clear them out to avoid a case
* where a merged region which receives no requests never gets cleared. This causes requests to
* other merged regions after it to see the wrong cached location.
* <p>
* For example, if we have Start_New < Start_Old < End_Old < End_New, then if we only access
* within range [End_Old, End_New], then it will always return the old region but it will then
* find out the row is not in the range, and try to get the new region, and then we get
* [Start_New, End_New), still fall into the same situation.
* <p>
* If Start_Old is less than Start_New, even if we have overlap, it is not a problem, as when the
* row is greater than Start_New, we will locate to the new region, and if the row is less than
* Start_New, it will fall into the old region's range and we will try to access the region and
* get a NotServing exception, and then we will clean the cache.
* <p>
* See HBASE-27650
*
* @param locations
* the new location that was just cached
*/
private void cleanProblematicOverlappedRegions(RegionLocations locations) {
RegionInfo region = locations.getRegionLocation().getRegion();boolean isLast = isEmptyStopRow(region.getEndKey());while (true) {
Map.Entry<byte[], RegionLocations> v7 = (isLast) ? cache.lastEntry() : cache.lowerEntry(region.getEndKey());
if (((v7 == null) || (v7.getValue() == locations)) || Bytes.equals(v7.getKey(), region.getStartKey())) {
break;
}
if (LOG.isDebugEnabled()) {
LOG.debug("Removing cached location {} (endKey={}) because it overlaps with " + "new location {} (endKey={})", v7.getValue(), Bytes.toStringBinary(v7.getValue().getRegionLocation().getRegion().getEndKey()), locations, Bytes.toStringBinary(locations.getRegionLocation().getRegion().getEndKey()));
}
cache.remove(v7.getKey());
} } | 3.26 |
hbase_AsyncRegionLocationCache_findForRow_rdh | /**
* Finds the RegionLocations for the region with the greatest startKey less than or equal to the
* given row
*
* @param row
* row to find locations
*/
public RegionLocations findForRow(byte[] row, int replicaId) {
Map.Entry<byte[], RegionLocations> entry = cache.floorEntry(row);
if (entry == null) {
return
null;
}
RegionLocations locs = entry.getValue();
if (locs == null) {
return null;
}
HRegionLocation loc = locs.getRegionLocation(replicaId); if (loc == null) {
return null;}
byte[] endKey = loc.getRegion().getEndKey();
if (isEmptyStopRow(endKey) || (Bytes.compareTo(row, endKey) < 0)) {
if (LOG.isTraceEnabled()) {
LOG.trace("Found {} in cache for {}, row='{}', locateType={}, replicaId={}", loc, tableName, Bytes.toStringBinary(row), RegionLocateType.CURRENT, replicaId);
}
return locs;
} else {
if (LOG.isTraceEnabled()) {
LOG.trace("Requested row {} comes after region end key of {} for cached location {}", Bytes.toStringBinary(row), Bytes.toStringBinary(endKey), locs);
}
return null;
}
} | 3.26 |
hbase_AsyncRegionLocationCache_size_rdh | /**
* Returns the size of the region locations cache
*/
public int size() {
return cache.size();
} | 3.26 |
hbase_AsyncRegionLocationCache_add_rdh | /**
* Add the given locations to the cache, merging with existing if necessary. Also cleans out any
* previously cached locations which may have been superseded by this one (i.e. in case of merged
* regions). See {@link #cleanProblematicOverlappedRegions(RegionLocations)}
*
* @param locs
* the locations to cache
* @return the final location (possibly merged) that was added to the cache
*/
public synchronized RegionLocations add(RegionLocations locs) {
byte[] startKey = locs.getRegionLocation().getRegion().getStartKey();
RegionLocations oldLocs = cache.putIfAbsent(startKey, locs);
if (oldLocs == null) {
cleanProblematicOverlappedRegions(locs);
return locs;
}
// check whether the regions are the same, this usually happens when table is split/merged,
// or deleted and recreated again.
RegionInfo region = locs.getRegionLocation().getRegion();
RegionInfo oldRegion = oldLocs.getRegionLocation().getRegion();
if (region.getEncodedName().equals(oldRegion.getEncodedName())) {
RegionLocations mergedLocs = oldLocs.mergeLocations(locs);
if (isEqual(mergedLocs, oldLocs)) {
// the merged one is the same with the old one, give up
LOG.trace(("Will not add {} to cache because the old value {} " + " is newer than us or has the same server name.") + " Maybe it is updated before we replace it", locs, oldLocs);
return oldLocs;
}
locs = mergedLocs;
} else // the region is different, here we trust the one we fetched. This maybe wrong but finally
// the upper layer can detect this and trigger removal of the wrong locations
if (LOG.isDebugEnabled()) {
LOG.debug("The newly fetch region {} is different from the old one {} for row '{}'," + " try replaying the old one...", region, oldRegion, Bytes.toStringBinary(startKey));
}
cache.put(startKey, locs);
cleanProblematicOverlappedRegions(locs);
return locs;
} | 3.26 |
hbase_AsyncRegionLocationCache_removeForServer_rdh | /**
* Removes serverName from all locations in the cache, fully removing any RegionLocations which
* are empty after removing the server from it.
*
* @param serverName
* server to remove from locations
*/public synchronized void removeForServer(ServerName serverName) {
for (Map.Entry<byte[], RegionLocations> entry : cache.entrySet()) {
byte[] regionName = entry.getKey();
RegionLocations locs = entry.getValue();
RegionLocations newLocs =
locs.removeByServer(serverName);
if (locs == newLocs) {continue;
}
if (newLocs.isEmpty()) {
cache.remove(regionName, locs);
} else {
cache.put(regionName, newLocs);
}
}
} | 3.26 |
hbase_AsyncRegionLocationCache_get_rdh | /**
* Gets the RegionLocations for a given region's startKey. This is a direct lookup, if the key
* does not exist in the cache it will return null.
*
* @param startKey
* region start key to directly look up
*/
public RegionLocations get(byte[] startKey) {
return cache.get(startKey);
} | 3.26 |
hbase_AsyncRegionLocationCache_findForBeforeRow_rdh | /**
* Finds the RegionLocations for the region with the greatest startKey strictly less than the
* given row
*
* @param row
* row to find locations
*/
public RegionLocations findForBeforeRow(byte[] row, int replicaId) {
boolean isEmptyStopRow = isEmptyStopRow(row);
Map.Entry<byte[], RegionLocations> entry = (isEmptyStopRow) ? cache.lastEntry() : cache.lowerEntry(row);
if (entry == null) {
return null;
}
RegionLocations locs = entry.getValue();if (locs ==
null) {
return null;
}
HRegionLocation loc = locs.getRegionLocation(replicaId);
if (loc == null) {
return
null;
}
if (isEmptyStopRow(loc.getRegion().getEndKey()) || ((!isEmptyStopRow) && (Bytes.compareTo(loc.getRegion().getEndKey(), row) >= 0))) {
if (LOG.isTraceEnabled()) {
LOG.trace("Found {} in cache for {}, row='{}', locateType={}, replicaId={}", loc, tableName, Bytes.toStringBinary(row), RegionLocateType.BEFORE, replicaId);
}
return locs;
} else {
return null;
}
} | 3.26 |
hbase_AsyncRegionLocationCache_remove_rdh | /**
* Removes the location from the cache if it exists and can be removed.
*
* @return true if entry was removed
*/
public synchronized boolean remove(HRegionLocation loc) {
byte[] startKey = loc.getRegion().getStartKey();
RegionLocations oldLocs = cache.get(startKey);
if (oldLocs == null) {
return false;
}
HRegionLocation oldLoc = oldLocs.getRegionLocation(loc.getRegion().getReplicaId());
if (!canUpdateOnError(loc, oldLoc)) {
return false;
}
RegionLocations newLocs = removeRegionLocation(oldLocs, loc.getRegion().getReplicaId());
if (newLocs == null) {
if (cache.remove(startKey, oldLocs)) {
return true;
}
} else {
cache.put(startKey, newLocs);
return true;
}
return false;
} | 3.26 |
hbase_Superusers_isSuperUser_rdh | /**
* Check if the current user is a super user
*
* @return true if current user is a super user, false otherwise.
* @param user
* to check
*/
public static boolean isSuperUser(String user) {
return superUsers.contains(user) || superGroups.contains(user);
} | 3.26 |
hbase_Superusers_initialize_rdh | /**
* Should be called only once to pre-load list of super users and super groups from Configuration.
* This operation is idempotent.
*
* @param conf
* configuration to load users from
* @throws IOException
* if unable to initialize lists of superusers or super groups
* @throws IllegalStateException
* if current user is null
*/
public static void initialize(Configuration conf) throws IOException {
ImmutableSet.Builder<String> superUsersBuilder = ImmutableSet.builder();
ImmutableSet.Builder<String> v1 = ImmutableSet.builder();
systemUser = User.getCurrent();
if (systemUser == null) {
throw
new IllegalStateException("Unable to obtain the current user, " + "authorization checks for internal operations will not work correctly!");
}
String currentUser = systemUser.getShortName();
LOG.trace("Current user name is {}", currentUser);
superUsersBuilder.add(currentUser);
String[] superUserList = conf.getStrings(SUPERUSER_CONF_KEY, new String[0]);
for (String name : superUserList) {
if (AuthUtil.isGroupPrincipal(name)) {
// Let's keep the '@' for distinguishing from user.
v1.add(name);
} else {
superUsersBuilder.add(name);
}
}superUsers = superUsersBuilder.build();
superGroups = v1.build();
} | 3.26 |
hbase_CombinedBlockCache_getFullyCachedFiles_rdh | /**
* Returns the list of fully cached files
*/
@Override
public Optional<Map<String, Pair<String, Long>>> getFullyCachedFiles() {
return this.l2Cache.getFullyCachedFiles();
} | 3.26 |
hbase_ThriftMetrics_exception_rdh | /**
* Increment the count for a specific exception type. This is called for each exception type that
* is returned to the thrift handler.
*
* @param rawThrowable
* type of exception
*/
public void exception(Throwable rawThrowable) {
source.exception();
Throwable throwable = unwrap(rawThrowable);
/**
* Keep some metrics for commonly seen exceptions Try and put the most common types first. Place
* child types before the parent type that they extend. If this gets much larger we might have
* to go to a hashmap
*/if (throwable != null) {
if (throwable instanceof OutOfOrderScannerNextException) {
source.outOfOrderException();
} else if (throwable instanceof RegionTooBusyException) {
source.tooBusyException();
} else if (throwable instanceof UnknownScannerException) {
source.unknownScannerException();
} else
if (throwable instanceof ScannerResetException) {
source.scannerResetException();
} else if (throwable instanceof RegionMovedException) {
source.movedRegionException();
} else if (throwable instanceof NotServingRegionException) {
source.notServingRegionException();
} else if
(throwable instanceof FailedSanityCheckException) {
source.failedSanityException();} else if (throwable instanceof MultiActionResultTooLarge) {
source.multiActionTooLargeException();
} else if (throwable instanceof CallQueueTooBigException) {
source.callQueueTooBigException();
} else if (throwable instanceof QuotaExceededException) {
source.quotaExceededException();
} else if (throwable instanceof RpcThrottlingException) {
source.rpcThrottlingException();
}
else if (throwable instanceof CallDroppedException) {
source.callDroppedException();
} else if (throwable instanceof RequestTooBigException) {
source.requestTooBigException();
} else {
source.otherExceptions();
if (LOG.isDebugEnabled()) {
LOG.debug("Unknown exception type", throwable);
}
}
}
} | 3.26 |
hbase_Operation_toJSON_rdh | /**
* Produces a JSON object sufficient for description of a query in a debugging or logging context.
*
* @return the produced JSON object, as a string
*/
public String toJSON() throws IOException {
return toJSON(DEFAULT_MAX_COLS);} | 3.26 |
hbase_Operation_toMap_rdh | /**
* Produces a Map containing a full summary of a query.
*
* @return a map containing parameters of a query (i.e. rows, columns...)
*/
public Map<String, Object> toMap() {
return toMap(DEFAULT_MAX_COLS);
} | 3.26 |
hbase_Operation_toString_rdh | /**
* Produces a string representation of this Operation. It defaults to a JSON representation, but
* falls back to a string representation of the fingerprint and details in the case of a JSON
* encoding failure.
*/
@Override
public String toString() {
return toString(DEFAULT_MAX_COLS);
} | 3.26 |
hbase_MetricsREST_incrementSucessfulGetRequests_rdh | /**
*
* @param inc
* How much to add to sucessfulGetCount.
*/
public void incrementSucessfulGetRequests(final int inc) {
source.incrementSucessfulGetRequests(inc);
} | 3.26 |
hbase_MetricsREST_incrementSucessfulAppendRequests_rdh | /**
*
* @param inc
* How much to add to sucessfulAppendCount.
*/
public synchronized void incrementSucessfulAppendRequests(final int inc) {
source.incrementSucessfulAppendRequests(inc);
} | 3.26 |
hbase_MetricsREST_incrementSucessfulIncrementRequests_rdh | /**
*
* @param inc
* How much to add to sucessfulIncrementCount.
*/
public synchronized void incrementSucessfulIncrementRequests(final int inc) {
source.incrementSucessfulIncrementRequests(inc);
} | 3.26 |
hbase_MetricsREST_incrementFailedDeleteRequests_rdh | /**
*
* @param inc
* How much to add to failedDeleteCount.
*/
public void incrementFailedDeleteRequests(final int inc) {
source.incrementFailedDeleteRequests(inc);
} | 3.26 |
hbase_MetricsREST_incrementFailedPutRequests_rdh | /**
*
* @param inc
* How much to add to failedPutCount.
*/
public void incrementFailedPutRequests(final int inc) {
source.incrementFailedPutRequests(inc);
} | 3.26 |
hbase_MetricsREST_incrementFailedIncrementRequests_rdh | /**
*
* @param inc
* How much to add to failedIncrementCount.
*/
public void incrementFailedIncrementRequests(final int inc) {
source.incrementFailedIncrementRequests(inc);} | 3.26 |
hbase_MetricsREST_incrementFailedAppendRequests_rdh | /**
*
* @param inc
* How much to add to failedAppendCount.
*/
public void incrementFailedAppendRequests(final int inc) {
source.incrementFailedAppendRequests(inc); } | 3.26 |
hbase_MetricsREST_incrementSucessfulPutRequests_rdh | /**
*
* @param inc
* How much to add to sucessfulPutCount.
*/
public void incrementSucessfulPutRequests(final int inc) {source.incrementSucessfulPutRequests(inc);
} | 3.26 |
hbase_MetricsREST_incrementSucessfulScanRequests_rdh | /**
*
* @param inc
* How much to add to sucessfulScanCount.
*/
public synchronized void incrementSucessfulScanRequests(final int inc) {
source.incrementSucessfulScanRequests(inc);
} | 3.26 |
hbase_MetricsREST_incrementFailedGetRequests_rdh | /**
*
* @param inc
* How much to add to failedGetCount.
*/
public void incrementFailedGetRequests(final int inc) {
source.incrementFailedGetRequests(inc);
} | 3.26 |
hbase_MetricsREST_incrementFailedScanRequests_rdh | /**
*
* @param inc
* How much to add to failedScanCount.
*/
public void incrementFailedScanRequests(final int inc) {
source.incrementFailedScanRequests(inc);
} | 3.26 |
hbase_MetricsREST_incrementRequests_rdh | /**
*
* @param inc
* How much to add to requests.
*/
public void incrementRequests(final int inc) {
source.incrementRequests(inc);
} | 3.26 |
hbase_MetricsREST_incrementSucessfulDeleteRequests_rdh | /**
*
* @param inc
* How much to add to sucessfulDeleteCount.
*/
public void incrementSucessfulDeleteRequests(final int inc) {
source.incrementSucessfulDeleteRequests(inc);
} | 3.26 |
hbase_MemStoreSizing_decMemStoreSize_rdh | /**
* Returns The new dataSize ONLY as a convenience
*/
default long decMemStoreSize(long dataSizeDelta, long heapSizeDelta, long offHeapSizeDelta, int cellsCountDelta) {
return incMemStoreSize(-dataSizeDelta, -heapSizeDelta, -offHeapSizeDelta, -cellsCountDelta);
} | 3.26 |
hbase_QuotaUtil_enableTableIfNotEnabled_rdh | /**
* Method to enable a table, if not already enabled. This method suppresses
* {@link TableNotDisabledException} and {@link TableNotFoundException}, if thrown while enabling
* the table.
*
* @param conn
* connection to re-use
* @param tableName
* name of the table to be enabled
*/
public static void enableTableIfNotEnabled(Connection conn, TableName tableName) throws IOException {try {
conn.getAdmin().enableTable(tableName);
} catch (TableNotDisabledException | TableNotFoundException e) {
// ignore
}
} | 3.26 |
hbase_QuotaUtil_isQuotaEnabled_rdh | /**
* Returns true if the support for quota is enabled
*/
public static boolean isQuotaEnabled(final Configuration conf) {
return conf.getBoolean(QUOTA_CONF_KEY,
QUOTA_ENABLED_DEFAULT);
} | 3.26 |
hbase_QuotaUtil_disableTableIfNotDisabled_rdh | /**
* Method to disable a table, if not already disabled. This method suppresses
* {@link TableNotEnabledException}, if thrown while disabling the table.
*
* @param conn
* connection to re-use
* @param tableName
* table name which has moved into space quota violation
*/public static void disableTableIfNotDisabled(Connection conn, TableName tableName) throws IOException {
try {
conn.getAdmin().disableTable(tableName);
} catch (TableNotEnabledException | TableNotFoundException e) {
// ignore
}
} | 3.26 |
hbase_QuotaUtil_doPut_rdh | /* ========================================================================= HTable helpers */
private static void doPut(final Connection connection, final Put put) throws IOException {
try (Table table = connection.getTable(QuotaUtil.QUOTA_TABLE_NAME)) {
table.put(put);
}
} | 3.26 |
hbase_QuotaUtil_calculateMutationSize_rdh | /* ========================================================================= Data Size Helpers */
public static long calculateMutationSize(final Mutation mutation) {
long size = 0;
for (Map.Entry<byte[], List<Cell>> entry : mutation.getFamilyCellMap().entrySet()) {
for (Cell cell : entry.getValue()) {
size += cell.getSerializedSize();
}
}
return size;
} | 3.26 |
hbase_QuotaUtil_updateClusterQuotaToMachineQuota_rdh | /**
* Convert cluster scope quota to machine scope quota
*
* @param quotas
* the original quota
* @param factor
* factor used to divide cluster limiter to machine limiter
* @return the converted quota whose quota limiters all in machine scope
*/
private static Quotas updateClusterQuotaToMachineQuota(Quotas quotas, double factor) {
Quotas.Builder newQuotas = Quotas.newBuilder(quotas);
if (newQuotas.hasThrottle()) {
Throttle.Builder throttle = Throttle.newBuilder(newQuotas.getThrottle());
if (throttle.hasReqNum()) {
throttle.setReqNum(updateTimedQuota(throttle.getReqNum(), factor));
}
if (throttle.hasReqSize()) {
throttle.setReqSize(updateTimedQuota(throttle.getReqSize(), factor));
}
if (throttle.hasReadNum()) {
throttle.setReadNum(updateTimedQuota(throttle.getReadNum(), factor));
}
if (throttle.hasReadSize()) {
throttle.setReadSize(updateTimedQuota(throttle.getReadSize(), factor));
}
if (throttle.hasWriteNum()) {
throttle.setWriteNum(updateTimedQuota(throttle.getWriteNum(), factor));
}
if (throttle.hasWriteSize()) {
throttle.setWriteSize(updateTimedQuota(throttle.getWriteSize(), factor));
}
if (throttle.hasReqCapacityUnit()) {
throttle.setReqCapacityUnit(updateTimedQuota(throttle.getReqCapacityUnit(), factor));
}
if (throttle.hasReadCapacityUnit())
{
throttle.setReadCapacityUnit(updateTimedQuota(throttle.getReadCapacityUnit(), factor));
}
if (throttle.hasWriteCapacityUnit()) {
throttle.setWriteCapacityUnit(updateTimedQuota(throttle.getWriteCapacityUnit(), factor));
}
newQuotas.setThrottle(throttle.build());}return newQuotas.build();
} | 3.26 |
hbase_ConfigurationManager_notifyAllObservers_rdh | /**
* The conf object has been repopulated from disk, and we have to notify all the observers that
* are expressed interest to do that.
*/
public void notifyAllObservers(Configuration conf) {
LOG.info("Starting to notify all observers that config changed.");
synchronized(configurationObservers) {
for (ConfigurationObserver observer : configurationObservers) {
try {
if (observer != null) {
observer.onConfigurationChange(conf);
}
} catch (Throwable t) {
LOG.error("Encountered a throwable while notifying observers: of type : {}({})", observer.getClass().getCanonicalName(), observer, t);
}
}}
} | 3.26 |
hbase_ConfigurationManager_containsObserver_rdh | /**
* Returns true if contains the observer, for unit test only
*/
public boolean containsObserver(ConfigurationObserver observer) {
synchronized(configurationObservers) {
return configurationObservers.contains(observer);
}} | 3.26 |
hbase_ConfigurationManager_deregisterObserver_rdh | /**
* Deregister an observer class
*
* @param observer
* to be deregistered.
*/
public void deregisterObserver(ConfigurationObserver observer) {synchronized(configurationObservers)
{
configurationObservers.remove(observer);
if (observer instanceof PropagatingConfigurationObserver) {
((PropagatingConfigurationObserver) (observer)).deregisterChildren(this);}
}
} | 3.26 |
hbase_ConfigurationManager_registerObserver_rdh | /**
* Register an observer class
*
* @param observer
* observer to be registered.
*/ public void registerObserver(ConfigurationObserver observer) {
synchronized(configurationObservers) {
configurationObservers.add(observer);
if (observer instanceof PropagatingConfigurationObserver) {
((PropagatingConfigurationObserver) (observer)).registerChildren(this);
}
}
} | 3.26 |
hbase_ConfigurationManager_getNumObservers_rdh | /**
* Returns the number of observers.
*/
public int getNumObservers() {
synchronized(configurationObservers)
{
return configurationObservers.size();
} } | 3.26 |
hbase_CloseChecker_isSizeLimit_rdh | /**
* Check periodically to see if a system stop is requested every written bytes reach size limit.
*
* @return if true, system stop.
*/
public boolean isSizeLimit(Store store, long bytesWritten) {
if (closeCheckSizeLimit <= 0) {
return false;
}
bytesWrittenProgressForCloseCheck += bytesWritten;
if (bytesWrittenProgressForCloseCheck <= closeCheckSizeLimit) {
return false;
}
bytesWrittenProgressForCloseCheck = 0;
return !store.areWritesEnabled();
} | 3.26 |
hbase_CloseChecker_isTimeLimit_rdh | /**
* Check periodically to see if a system stop is requested every time.
*
* @return if true, system stop.
*/
public boolean isTimeLimit(Store store, long now) {
if (closeCheckTimeLimit <= 0) {
return false;
}
final long elapsedMillis = now -
lastCloseCheckMillis;
if (elapsedMillis <= closeCheckTimeLimit) {
return false;
}
lastCloseCheckMillis = now;
return !store.areWritesEnabled();
} | 3.26 |
hbase_MultiTableSnapshotInputFormatImpl_setInput_rdh | /**
* Configure conf to read from snapshotScans, with snapshots restored to a subdirectory of
* restoreDir.
* <p/>
* Sets: {@link #RESTORE_DIRS_KEY}, {@link #SNAPSHOT_TO_SCANS_KEY}
*/
public void setInput(Configuration conf, Map<String, Collection<Scan>> snapshotScans, Path restoreDir) throws IOException {
Path rootDir = CommonFSUtils.getRootDir(conf);
FileSystem fs = rootDir.getFileSystem(conf);
setSnapshotToScans(conf, snapshotScans);
Map<String, Path> restoreDirs = generateSnapshotToRestoreDirMapping(snapshotScans.keySet(), restoreDir);
setSnapshotDirs(conf, restoreDirs);
restoreSnapshots(conf, restoreDirs, fs);
} | 3.26 |
hbase_MultiTableSnapshotInputFormatImpl_getSnapshotDirs_rdh | /**
* Retrieve the directories into which snapshots have been restored from
* ({@link #RESTORE_DIRS_KEY})
*
* @param conf
* Configuration to extract restore directories from
* @return the directories into which snapshots have been restored from
*/
public Map<String, Path> getSnapshotDirs(Configuration conf) throws IOException {
List<Map.Entry<String, String>> kvps = ConfigurationUtil.getKeyValues(conf,
RESTORE_DIRS_KEY);
Map<String, Path> rtn = Maps.newHashMapWithExpectedSize(kvps.size());
for (Map.Entry<String, String> kvp : kvps) {
rtn.put(kvp.getKey(), new Path(kvp.getValue()));
}
return rtn;
} | 3.26 |
hbase_MultiTableSnapshotInputFormatImpl_setSnapshotToScans_rdh | /**
* Push snapshotScans to conf (under the key {@link #SNAPSHOT_TO_SCANS_KEY})
*/
public void setSnapshotToScans(Configuration conf, Map<String, Collection<Scan>> snapshotScans) throws IOException {
// flatten out snapshotScans for serialization to the job conf
List<Map.Entry<String, String>> snapshotToSerializedScans = Lists.newArrayList();
for (Map.Entry<String, Collection<Scan>> entry : snapshotScans.entrySet()) {
String snapshotName = entry.getKey();
Collection<Scan> scans = entry.getValue();
// serialize all scans and map them to the appropriate snapshot
for (Scan scan : scans) {
snapshotToSerializedScans.add(new AbstractMap.SimpleImmutableEntry<>(snapshotName, TableMapReduceUtil.convertScanToString(scan)));
}
}
ConfigurationUtil.setKeyValues(conf, SNAPSHOT_TO_SCANS_KEY, snapshotToSerializedScans);
} | 3.26 |
hbase_MultiTableSnapshotInputFormatImpl_getSplits_rdh | /**
* Return the list of splits extracted from the scans/snapshots pushed to conf by
* {@link #setInput(Configuration, Map, Path)}
*
* @param conf
* Configuration to determine splits from
* @return Return the list of splits extracted from the scans/snapshots pushed to conf
*/
public List<TableSnapshotInputFormatImpl.InputSplit> getSplits(Configuration conf) throws IOException {
Path rootDir = CommonFSUtils.getRootDir(conf);FileSystem fs = rootDir.getFileSystem(conf);
List<TableSnapshotInputFormatImpl.InputSplit> rtn = Lists.newArrayList();
Map<String, Collection<Scan>> snapshotsToScans = getSnapshotsToScans(conf);
Map<String, Path> snapshotsToRestoreDirs = getSnapshotDirs(conf);
for (Map.Entry<String, Collection<Scan>> entry : snapshotsToScans.entrySet()) {
String snapshotName = entry.getKey();
Path restoreDir = snapshotsToRestoreDirs.get(snapshotName);
SnapshotManifest manifest = TableSnapshotInputFormatImpl.getSnapshotManifest(conf, snapshotName, rootDir, fs);
List<RegionInfo> regionInfos = TableSnapshotInputFormatImpl.getRegionInfosFromManifest(manifest);for (Scan scan : entry.getValue()) {
List<TableSnapshotInputFormatImpl.InputSplit> splits = TableSnapshotInputFormatImpl.getSplits(scan, manifest, regionInfos, restoreDir, conf);
rtn.addAll(splits);
}
}
return rtn;
} | 3.26 |
hbase_MultiTableSnapshotInputFormatImpl_restoreSnapshots_rdh | /**
* Restore each (snapshot name, restore directory) pair in snapshotToDir
*
* @param conf
* configuration to restore with
* @param snapshotToDir
* mapping from snapshot names to restore directories
* @param fs
* filesystem to do snapshot restoration on
*/
public void restoreSnapshots(Configuration conf, Map<String, Path> snapshotToDir, FileSystem fs) throws IOException {
// TODO: restore from record readers to parallelize.
Path rootDir = CommonFSUtils.getRootDir(conf);
for (Map.Entry<String, Path> entry : snapshotToDir.entrySet()) {
String snapshotName = entry.getKey();
Path restoreDir = entry.getValue();
LOG.info(((("Restoring snapshot " + snapshotName) + " into ") + restoreDir) + " for MultiTableSnapshotInputFormat");
restoreSnapshot(conf, snapshotName, rootDir, restoreDir, fs);
}
} | 3.26 |
hbase_MultiTableSnapshotInputFormatImpl_getSnapshotsToScans_rdh | /**
* Retrieve the snapshot name -> list<scan> mapping pushed to configuration by
* {@link #setSnapshotToScans(Configuration, Map)}
*
* @param conf
* Configuration to extract name -> list<scan> mappings from.
* @return the snapshot name -> list<scan> mapping pushed to configuration
*/
public Map<String, Collection<Scan>> getSnapshotsToScans(Configuration conf) throws IOException {
Map<String, Collection<Scan>> rtn = Maps.newHashMap();
for (Map.Entry<String, String> entry : ConfigurationUtil.getKeyValues(conf, SNAPSHOT_TO_SCANS_KEY)) {
String snapshotName = entry.getKey();
String scan = entry.getValue();
Collection<Scan> snapshotScans = rtn.get(snapshotName);
if (snapshotScans == null) {
snapshotScans =
Lists.newArrayList();
rtn.put(snapshotName, snapshotScans);
}
snapshotScans.add(TableMapReduceUtil.convertStringToScan(scan));
}
return rtn;
} | 3.26 |
hbase_MultiTableSnapshotInputFormatImpl_generateSnapshotToRestoreDirMapping_rdh | /**
* Generate a random path underneath baseRestoreDir for each snapshot in snapshots and return a
* map from the snapshot to the restore directory.
*
* @param snapshots
* collection of snapshot names to restore
* @param baseRestoreDir
* base directory under which all snapshots in snapshots will be restored
* @return a mapping from snapshot name to the directory in which that snapshot has been restored
*/
private Map<String, Path> generateSnapshotToRestoreDirMapping(Collection<String> snapshots, Path baseRestoreDir) {
Map<String, Path> rtn = Maps.newHashMap();
for (String snapshotName : snapshots) {
Path restoreSnapshotDir = new Path(baseRestoreDir, (snapshotName + "__") + UUID.randomUUID().toString());rtn.put(snapshotName, restoreSnapshotDir); }
return rtn;
} | 3.26 |
hbase_MemStoreCompactor_start_rdh | /**
* ---------------------------------------------------------------------- The request to dispatch
* the compaction asynchronous task. The method returns true if compaction was successfully
* dispatched, or false if there is already an ongoing compaction or no segments to compact.
*/
public boolean start() throws IOException {
if (!compactingMemStore.hasImmutableSegments()) {
// no compaction on empty pipeline
return false;
}
// get a snapshot of the list of the segments from the pipeline,
// this local copy of the list is marked with specific version
versionedList = compactingMemStore.getImmutableSegments();
LOG.trace("Speculative compaction starting on {}/{}", compactingMemStore.getStore().getHRegion().getRegionInfo().getEncodedName(), compactingMemStore.getStore().getColumnFamilyName());
HStore store = compactingMemStore.getStore();
RegionCoprocessorHost cpHost = store.getCoprocessorHost();
if (cpHost
!= null) {
cpHost.preMemStoreCompaction(store);
}
try {
doCompaction();
} finally {
if (cpHost != null) {
cpHost.postMemStoreCompaction(store);
}
}
return true;
} | 3.26 |
hbase_MemStoreCompactor_stop_rdh | /**
* ---------------------------------------------------------------------- The request to cancel
* the compaction asynchronous task The compaction may still happen if the request was sent too
* late Non-blocking request
*/
public void stop() {
isInterrupted.compareAndSet(false, true);
} | 3.26 |
hbase_MemStoreCompactor_releaseResources_rdh | /**
* ---------------------------------------------------------------------- Reset the interruption
* indicator and clear the pointers in order to allow good garbage collection
*/
private void releaseResources() {
isInterrupted.set(false);
versionedList = null;
} | 3.26 |
hbase_MemStoreCompactor_doCompaction_rdh | /**
* ---------------------------------------------------------------------- The worker thread
* performs the compaction asynchronously. The solo (per compactor) thread only reads the
* compaction pipeline. There is at most one thread per memstore instance.
*/
private void doCompaction() {
ImmutableSegment result = null;
boolean resultSwapped = false;
MemStoreCompactionStrategy.Action nextStep = strategy.getAction(versionedList);
boolean merge = (nextStep == Action.MERGE) ||
(nextStep == Action.MERGE_COUNT_UNIQUE_KEYS);
try {
if (isInterrupted.get()) {
// if the entire process is interrupted cancel flattening
return;// the compaction also doesn't start when interrupted
}
if (nextStep == Action.NOOP) {
return;}
if ((nextStep == Action.FLATTEN) || (nextStep == Action.FLATTEN_COUNT_UNIQUE_KEYS)) {
// some Segment in the pipeline is with SkipList index, make it flat
compactingMemStore.flattenOneSegment(versionedList.getVersion(), nextStep);
return;
}
// Create one segment representing all segments in the compaction pipeline,
// either by compaction or by merge
if (!isInterrupted.get()) {
result = createSubstitution(nextStep);
}
// Substitute the pipeline with one segment
if (!isInterrupted.get()) {
resultSwapped = compactingMemStore.swapCompactedSegments(versionedList,
result, merge);
if (resultSwapped) {// update compaction strategy
strategy.updateStats(result);
// update the wal so it can be truncated and not get too long
compactingMemStore.updateLowestUnflushedSequenceIdInWAL(true);// only if greater
}
}
} catch (IOException e) {
LOG.trace("Interrupting in-memory compaction for store={}", compactingMemStore.getFamilyName());
Thread.currentThread().interrupt();
} finally {// For the MERGE case, if the result was created, but swap didn't happen,
// we DON'T need to close the result segment (meaning its MSLAB)!
// Because closing the result segment means closing the chunks of all segments
// in the compaction pipeline, which still have ongoing scans.
if (((!merge) && (result != null)) && (!resultSwapped)) {
result.close();
}
releaseResources();
compactingMemStore.setInMemoryCompactionCompleted();
}
} | 3.26 |
hbase_MemStoreCompactor_createSubstitution_rdh | /**
* ---------------------------------------------------------------------- Creation of the
* ImmutableSegment either by merge or copy-compact of the segments of the pipeline, based on the
* Compactor Iterator. The new ImmutableSegment is returned.
*/
private ImmutableSegment createSubstitution(MemStoreCompactionStrategy.Action action) throws IOException {
ImmutableSegment result = null;
MemStoreSegmentsIterator iterator = null;
List<ImmutableSegment> segments = versionedList.getStoreSegments();
for (ImmutableSegment s : segments) {
s.waitForUpdates();// to ensure all updates preceding s in-memory flush have completed.
// we skip empty segment when create MemStoreSegmentsIterator following.
}
switch (action) {
case COMPACT :iterator = new MemStoreCompactorSegmentsIterator(segments, compactingMemStore.getComparator(),
compactionKVMax, compactingMemStore.getStore());
result = SegmentFactory.instance().createImmutableSegmentByCompaction(compactingMemStore.getConfiguration(), compactingMemStore.getComparator(), iterator, versionedList.getNumOfCells(), compactingMemStore.getIndexType(), action);
iterator.close();
break;case MERGE :
case MERGE_COUNT_UNIQUE_KEYS :
iterator = new MemStoreMergerSegmentsIterator(segments, compactingMemStore.getComparator(), compactionKVMax);
result = SegmentFactory.instance().createImmutableSegmentByMerge(compactingMemStore.getConfiguration(), compactingMemStore.getComparator(), iterator, versionedList.getNumOfCells(), segments, compactingMemStore.getIndexType(), action);
iterator.close();
break;
default :
throw new RuntimeException("Unknown action " + action);// sanity check
}
return result;
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.