name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hbase_MultiByteBuff_getItemIndex_rdh | /* Returns in which sub ByteBuffer, the given element index will be available. */
private int getItemIndex(int elemIndex) {
if (elemIndex < 0) {
throw new IndexOutOfBoundsException();
}
int index = 1;
while (elemIndex >= this.itemBeginPos[index]) {
index++;
if (index == this.itemBeginPos.length) {
throw new IndexOutOfBoundsException();
}
}
return index - 1;
} | 3.26 |
hbase_MultiByteBuff_hasRemaining_rdh | /**
* Returns true if there are elements between the current position and the limt
*
* @return true if there are elements, false otherwise
*/
@Override
public final boolean hasRemaining() {
checkRefCount();
return this.curItem.hasRemaining() || ((this.curItemIndex < this.limitedItemIndex) && this.items[this.curItemIndex + 1].hasRemaining());
} | 3.26 |
hbase_MultiByteBuff_asSubByteBuffer_rdh | /**
* Returns bytes from given offset till length specified, as a single ByteBuffer. When all these
* bytes happen to be in a single ByteBuffer, which this object wraps, that ByteBuffer item as
* such will be returned (with offset in this ByteBuffer where the bytes starts). So users are
* warned not to change the position or limit of this returned ByteBuffer. When the required bytes
* happen to span across multiple ByteBuffers, this API will copy the bytes to a newly created
* ByteBuffer of required size and return that.
*
* @param offset
* the offset in this MBB from where the subBuffer should be created
* @param length
* the length of the subBuffer
* @param pair
* a pair that will have the bytes from the current position till length specified,
* as a single ByteBuffer and offset in that Buffer where the bytes starts. The
* method would set the values on the pair that is passed in by the caller
*/
@Override
public void asSubByteBuffer(int offset, int length, ObjectIntPair<ByteBuffer> pair) {
checkRefCount();
if (this.itemBeginPos[this.curItemIndex] <= offset)
{
int relOffsetInCurItem
= offset -
this.itemBeginPos[this.curItemIndex];
if ((this.curItem.limit() - relOffsetInCurItem) >= length) {
pair.setFirst(this.curItem);
pair.setSecond(relOffsetInCurItem);
return;
}
}
int itemIndex
= getItemIndex(offset);
ByteBuffer
v95 = this.items[itemIndex];
offset = offset - this.itemBeginPos[itemIndex];
if ((v95.limit() - offset) >= length) {
pair.setFirst(v95);
pair.setSecond(offset);
return;
}
byte[] dst
= new byte[length];
int destOffset = 0;
while (length > 0) {
int toRead = Math.min(length, v95.limit() - offset);
ByteBufferUtils.copyFromBufferToArray(dst, v95, offset, destOffset, toRead);
length -= toRead;
if (length == 0)
break;
itemIndex++;
v95 = this.items[itemIndex];
destOffset += toRead;
offset = 0;
}
pair.setFirst(ByteBuffer.wrap(dst));
pair.setSecond(0);
} | 3.26 |
hbase_MultiByteBuff_rewind_rdh | /**
* Rewinds this MBB and the position is set to 0
*
* @return this object
*/
@Override
public MultiByteBuff rewind() {checkRefCount();
for (int i = 0; i < this.items.length; i++) {
this.items[i].rewind();
}
this.curItemIndex = 0;
this.curItem = this.items[this.curItemIndex];
this.markedItemIndex = -1;
return this;
} | 3.26 |
hbase_MultiByteBuff_limit_rdh | /**
* Returns the limit of this MBB
*
* @return limit of the MBB
*/
@Override
public int limit() {
return this.limit;
} | 3.26 |
hbase_MultiByteBuff_put_rdh | /**
* Copies from the given byte[] to this MBB.
*/
@Override
public MultiByteBuff put(byte[] src, int offset, int length) {
checkRefCount();
if (this.curItem.remaining() >= length) {ByteBufferUtils.copyFromArrayToBuffer(this.curItem, src, offset, length);
return this;
}
int end = offset + length;
for (int i = offset; i
<
end; i++) {
this.put(src[i]);
}
return this;
} | 3.26 |
hbase_MultiByteBuff_toBytes_rdh | /**
* Copy the content from this MBB to a byte[] based on the given offset and length the position
* from where the copy should start the length upto which the copy has to be done
*
* @return byte[] with the copied contents from this MBB.
*/
@Override
public byte[] toBytes(int
offset, int length) {
checkRefCount();
byte[] output = new byte[length];
this.get(offset, output, 0, length);
return output;
} | 3.26 |
hbase_MultiByteBuff_getShort_rdh | /**
* Returns the short value at the current position. Also advances the position by the size of
* short
*
* @return the short value at the current position
*/
@Override
public short getShort() {
checkRefCount();
int
remaining = this.curItem.remaining();
if (remaining >= Bytes.SIZEOF_SHORT) {
return this.curItem.getShort();
}
short n = 0;
n = ((short) (n ^ (get() & 0xff)));
n = ((short) (n << 8));
n = ((short) (n ^ (get() & 0xff)));
return n;
} | 3.26 |
hbase_MultiByteBuff_remaining_rdh | /**
* Returns the number of elements between the current position and the limit.
*
* @return the remaining elements in this MBB
*/
@Override
public int remaining() {
checkRefCount();
int remain =
0;
for (int i = curItemIndex; i < items.length; i++) {
remain += items[i].remaining();
}
return remain;
} | 3.26 |
hbase_MultiByteBuff_reset_rdh | /**
* Similar to {@link ByteBuffer}.reset(), ensures that this MBB is reset back to last marked
* position.
*
* @return This MBB
*/ @Override
public MultiByteBuff reset() {
checkRefCount();
// when the buffer is moved to the next one.. the reset should happen on the previous marked
// item and the new one should be taken as the base
if (this.markedItemIndex < 0)
throw new InvalidMarkException();
ByteBuffer markedItem = this.items[this.markedItemIndex];
markedItem.reset();
this.curItem =
markedItem;
// All items after the marked position upto the current item should be reset to 0
for (int i = this.curItemIndex; i > this.markedItemIndex; i--) {
this.items[i].position(0);
}
this.curItemIndex = this.markedItemIndex;
return this;
} | 3.26 |
hbase_MultiByteBuff_putInt_rdh | /**
* Writes an int to this MBB at its current position. Also advances the position by size of int
*
* @param val
* Int value to write
* @return this object
*/
@Override
public MultiByteBuff putInt(int val) {
checkRefCount();
if (this.curItem.remaining() >= Bytes.SIZEOF_INT) {
this.curItem.putInt(val);
return this;
}
if (this.curItemIndex == (this.items.length - 1)) {
throw new BufferOverflowException();
}
// During read, we will read as byte by byte for this case. So just write in Big endian
put(int3(val));
put(int2(val));
put(int1(val));put(int0(val));
return this;
} | 3.26 |
hbase_MultiByteBuff_array_rdh | /**
*
* @throws UnsupportedOperationException
* MBB does not support array based operations
*/
@Override
public byte[] array() {
throw new UnsupportedOperationException();
} | 3.26 |
hbase_MultiByteBuff_skip_rdh | /**
* Jumps the current position of this MBB by specified length.
*/
@Override
public MultiByteBuff skip(int length) {
checkRefCount();
// Get available bytes from this item and remaining from next
int jump = 0;
while (true) {
jump = this.curItem.remaining();
if (jump >= length) {
this.curItem.position(this.curItem.position() + length);
break;
}
this.curItem.position(this.curItem.position() + jump);
length -= jump;
this.curItemIndex++;
this.curItem = this.items[this.curItemIndex];
}
return this;
} | 3.26 |
hbase_MultiByteBuff_getInt_rdh | /**
* Returns the int value at the current position. Also advances the position by the size of int
*
* @return the int value at the current position
*/
@Override
public int
getInt() {
checkRefCount();
int remaining = this.curItem.remaining();
if (remaining >= Bytes.SIZEOF_INT) {
return this.curItem.getInt();
}
int n = 0;
for (int i = 0; i < Bytes.SIZEOF_INT; i++) {
n <<= 8;
n ^= get() & 0xff;
}
return n;
} | 3.26 |
hbase_MultiByteBuff_putLong_rdh | /**
* Writes a long to this MBB at its current position. Also advances the position by size of long
*
* @param val
* Long value to write
* @return this object
*/
@Override
public MultiByteBuff putLong(long val) {
checkRefCount();
if (this.curItem.remaining() >= Bytes.SIZEOF_LONG) {
this.curItem.putLong(val);
return this;
}
if (this.curItemIndex == (this.items.length - 1)) {
throw new BufferOverflowException();
}
// During read, we will read as byte by byte for this case. So just write in Big endian
put(long7(val)); put(long6(val));
put(long5(val));
put(long4(val));
put(long3(val));
put(long2(val));
put(long1(val));
put(long0(val));
return this;
} | 3.26 |
hbase_MultiByteBuff_arrayOffset_rdh | /**
*
* @throws UnsupportedOperationException
* MBB does not support array based operations
*/
@Override
public int arrayOffset() {
throw new UnsupportedOperationException();
} | 3.26 |
hbase_MultiByteBuff_mark_rdh | /**
* Marks the current position of the MBB
*
* @return this object
*/
@Override
public MultiByteBuff mark() {
checkRefCount();
this.markedItemIndex = this.curItemIndex;
this.curItem.mark();
return this;
} | 3.26 |
hbase_MultiByteBuff_slice_rdh | /**
* Returns an MBB which is a sliced version of this MBB. The position, limit and mark of the new
* MBB will be independent than that of the original MBB. The content of the new MBB will start at
* this MBB's current position
*
* @return a sliced MBB
*/
@Override
public MultiByteBuff slice() {
checkRefCount();
ByteBuffer[] copy = new ByteBuffer[(this.limitedItemIndex - this.curItemIndex) + 1];
for (int i = curItemIndex, j = 0; i <= this.limitedItemIndex; i++ , j++) {
copy[j] = this.items[i].slice();
} return new MultiByteBuff(refCnt, copy);
} | 3.26 |
hbase_MultiByteBuff_capacity_rdh | /**
* Returns the total capacity of this MultiByteBuffer.
*/
@Override
public int capacity() {
checkRefCount();
int c = 0;
for (ByteBuffer item : this.items) {
c += item.capacity();
}
return c;
} | 3.26 |
hbase_MultiByteBuff_get_rdh | /**
* Copies the content from an this MBB to a ByteBuffer
*
* @param out
* the ByteBuffer to which the copy has to happen, its position will be
* advanced.
* @param sourceOffset
* the offset in the MBB from which the elements has to be copied
* @param length
* the length in the MBB upto which the elements has to be copied
*/
@Override
public void get(ByteBuffer out, int sourceOffset, int length) {checkRefCount();
int itemIndex = getItemIndex(sourceOffset);
ByteBuffer in = this.items[itemIndex];
sourceOffset = sourceOffset - this.itemBeginPos[itemIndex];
while (length > 0) {
int toRead = Math.min(in.limit() - sourceOffset, length); ByteBufferUtils.copyFromBufferToBuffer(in, out, sourceOffset, toRead);
length -= toRead;
if (length == 0) {
break;
}
itemIndex++;
in = this.items[itemIndex];
sourceOffset = 0;
} } | 3.26 |
hbase_MultiByteBuff_hasArray_rdh | /**
* Returns false. MBB does not support array based operations
*/@Override
public boolean hasArray() {
return false;
} | 3.26 |
hbase_MultiByteBuff_getLong_rdh | /**
* Returns the long value at the current position. Also advances the position by the size of long
*
* @return the long value at the current position
*/
@Override
public long getLong() {
checkRefCount();
int remaining = this.curItem.remaining();
if (remaining >= Bytes.SIZEOF_LONG) {
return this.curItem.getLong();
}
long l = 0;
for (int i = 0; i < Bytes.SIZEOF_LONG; i++) {
l <<= 8;
l ^= get() & 0xff;
}
return l;
} | 3.26 |
hbase_MultiByteBuff_moveBack_rdh | /**
* Jumps back the current position of this MBB by specified length.
*/
@Override
public MultiByteBuff moveBack(int length) {
checkRefCount();
while (length != 0) {
if (length > curItem.position()) {
length -= curItem.position();
this.curItem.position(0);
this.curItemIndex--;
this.curItem = this.items[curItemIndex];
} else {
this.curItem.position(curItem.position() - length);
break;
}
}
return this;
} | 3.26 |
hbase_MultiByteBuff_position_rdh | /**
* Sets this MBB's position to the given value.
*
* @return this object
*/
@Override
public MultiByteBuff
position(int position) {
checkRefCount();
// Short circuit for positioning within the cur item. Mostly that is the case.
if ((this.itemBeginPos[this.curItemIndex] <= position) && (this.itemBeginPos[this.curItemIndex
+ 1] > position)) {
this.curItem.position(position - this.itemBeginPos[this.curItemIndex]);
return this;
}
int itemIndex = getItemIndex(position);
// All items from 0 - curItem-1 set position at end.
for (int i = 0; i < itemIndex; i++) {
this.items[i].position(this.items[i].limit());
}
// All items after curItem set position at begin
for (int i = itemIndex +
1; i < this.items.length; i++) {
this.items[i].position(0);
}
this.curItem = this.items[itemIndex];
this.curItem.position(position - this.itemBeginPos[itemIndex]);
this.curItemIndex = itemIndex;
return this;
} | 3.26 |
hbase_ExecutorType_getExecutorName_rdh | /**
* Returns Conflation of the executor type and the passed {@code serverName}.
*/
String getExecutorName(String serverName) {
return (this.toString() + "-") + serverName.replace("%", "%%");
} | 3.26 |
hbase_PairOfSameType_getFirst_rdh | /**
* Return the first element stored in the pair.
*/
public T getFirst() {
return first;
} | 3.26 |
hbase_PairOfSameType_getSecond_rdh | /**
* Return the second element stored in the pair.
*/
public T getSecond() {
return second;
} | 3.26 |
hbase_RawCellBuilderFactory_create_rdh | /**
* Returns the cell that is created
*/
public static RawCellBuilder create() {
return new KeyValueBuilder();} | 3.26 |
hbase_ProcedurePrepareLatch_getNoopLatch_rdh | /**
* Returns the singleton latch which does nothing.
*/
public static ProcedurePrepareLatch getNoopLatch() {
return noopLatch;
} | 3.26 |
hbase_ProcedurePrepareLatch_createBlockingLatch_rdh | /**
* Creates a latch which blocks.
*/
public static ProcedurePrepareLatch createBlockingLatch() {
return new CompatibilityLatch();
} | 3.26 |
hbase_ProcedurePrepareLatch_m0_rdh | /**
* Create a latch if the client does not have async proc support
*
* @param major
* major version with async proc support
* @param minor
* minor version with async proc support
* @return a CompatibilityLatch or a NoopLatch if the client has async proc support
*/
public static ProcedurePrepareLatch m0(int major, int minor) {
// don't use the latch if we have procedure support
return hasProcedureSupport(major, minor) ? noopLatch : new CompatibilityLatch();
} | 3.26 |
hbase_ProcedurePrepareLatch_createLatch_rdh | /**
* Create a latch if the client does not have async proc support. This uses the default 1.1
* version.
*
* @return a CompatibilityLatch or a NoopLatch if the client has async proc support
*/public static ProcedurePrepareLatch createLatch() {
// don't use the latch if we have procedure support (default 1.1)
return m0(1, 1);
} | 3.26 |
hbase_LockProcedure_setTimeoutFailure_rdh | /**
* Re run the procedure after every timeout to write new WAL entries so we don't hold back old
* WALs.
*
* @return false, so procedure framework doesn't mark this procedure as failure.
*/
@Override
protected synchronized boolean setTimeoutFailure(final MasterProcedureEnv env) {
synchronized(event) {
if (LOG.isDebugEnabled())
LOG.debug("Timeout failure "
+ this.event);
if (!event.isReady()) {
// Maybe unlock() awakened the event.
setState(ProcedureState.RUNNABLE);
if (LOG.isDebugEnabled())
LOG.debug("Calling wake on " + this.event);
event.wake(env.getProcedureScheduler());
}
}
return false;// false: do not mark the procedure as failed.
} | 3.26 |
hbase_LockProcedure_updateHeartBeat_rdh | /**
* Updates timeout deadline for the lock.
*/
public void updateHeartBeat() {
f1.set(EnvironmentEdgeManager.currentTime());
if (LOG.isDebugEnabled()) {
LOG.debug("Heartbeat " + toString());
}} | 3.26 |
hbase_LockProcedure_unlock_rdh | // Can be called before procedure gets scheduled, in which case, the execute() will finish
// immediately and release the underlying locks.
public void unlock(final MasterProcedureEnv env) {
unlock.set(true);
locked.set(false);
// Maybe timeout already awakened the event and the procedure has finished.
synchronized(event) {
if ((!event.isReady()) && suspended) {
setState(ProcedureState.RUNNABLE);
event.wake(env.getProcedureScheduler());suspended = false;
}
}
} | 3.26 |
hbase_LockProcedure_beforeReplay_rdh | /**
* On recovery, re-execute from start to acquire the locks. Need to explicitly set it to RUNNABLE
* because the procedure might have been in WAITING_TIMEOUT state when crash happened. In which
* case, it'll be sent back to timeout queue on recovery, which we don't want since we want to
* require locks.
*/
@Override
protected void beforeReplay(MasterProcedureEnv env) {
setState(ProcedureState.RUNNABLE);
} | 3.26 |
hbase_Writables_copyWritable_rdh | /**
* Copy one Writable to another. Copies bytes using data streams.
*
* @param bytes
* Source Writable
* @param tgt
* Target Writable
* @return The target Writable.
* @throws IOException
* e
*/
public static Writable copyWritable(final byte[] bytes, final Writable tgt) throws IOException {
DataInputStream dis = new DataInputStream(new ByteArrayInputStream(bytes));
try {
tgt.readFields(dis);
}
finally {
dis.close();
}
return tgt;
} | 3.26 |
hbase_OrderedInt64_encodeLong_rdh | /**
* Write instance {@code val} into buffer {@code dst}.
*
* @param dst
* the {@link PositionedByteRange} to write to
* @param val
* the value to write to {@code dst}
* @return the number of bytes written
*/
public int encodeLong(PositionedByteRange dst, long val)
{ return OrderedBytes.encodeInt64(dst, val, order);
} | 3.26 |
hbase_OrderedInt64_decodeLong_rdh | /**
* Read a {@code long} value from the buffer {@code src}.
*
* @param src
* the {@link PositionedByteRange} to read the {@code long} from
* @return the {@code long} read from the buffer
*/
public long decodeLong(PositionedByteRange src) {
return OrderedBytes.decodeInt64(src);
} | 3.26 |
hbase_StorageClusterVersionModel_getVersion_rdh | /**
* Returns the storage cluster version
*/
@XmlAttribute(name = "Version")
public String getVersion() {
return version;
} | 3.26 |
hbase_StorageClusterVersionModel_setVersion_rdh | /**
*
* @param version
* the storage cluster version
*/
public void setVersion(String version) {
this.version = version;
} | 3.26 |
hbase_StorageClusterVersionModel_valueOf_rdh | // needed for jackson deserialization
private static StorageClusterVersionModel valueOf(String value) {
StorageClusterVersionModel versionModel = new StorageClusterVersionModel();
versionModel.setVersion(value);
return versionModel;
} | 3.26 |
hbase_StorageClusterVersionModel_toString_rdh | /* (non-Javadoc)
@see java.lang.Object#toString()
*/
@Override
public String toString() {
return version;
} | 3.26 |
hbase_AbstractFSWAL_trySetReadyForRolling_rdh | // return whether we have successfully set readyForRolling to true.
private boolean trySetReadyForRolling() {
// Check without holding lock first. Usually we will just return here.
// waitingRoll is volatile and unacedEntries is only accessed inside event loop so it is safe to
// check them outside the consumeLock.
if ((!waitingRoll(epochAndState)) || (!unackedAppends.isEmpty())) {
return false;
}
consumeLock.lock();
try {
// 1. a roll is requested
// 2. all out-going entries have been acked(we have confirmed above).
if (waitingRoll(epochAndState)) {
readyForRolling = true;
readyForRollingCond.signalAll();
return true;
} else {
return false;
}
} finally {
consumeLock.unlock();
}
} | 3.26 |
hbase_AbstractFSWAL_findRegionsToForceFlush_rdh | /**
* If the number of un-archived WAL files ('live' WALs) is greater than maximum allowed, check the
* first (oldest) WAL, and return those regions which should be flushed so that it can be
* let-go/'archived'.
*
* @return stores of regions (encodedRegionNames) to flush in order to archive oldest WAL file.
*/
Map<byte[], List<byte[]>> findRegionsToForceFlush() throws IOException {
Map<byte[], List<byte[]>> regions = null;
int v19 = getNumRolledLogFiles();
if ((v19 > this.maxLogs) && (v19 > 0)) {
Map.Entry<Path, WALProps> firstWALEntry = this.walFile2Props.firstEntry();
regions = this.sequenceIdAccounting.findLower(firstWALEntry.getValue().encodedName2HighestSequenceId);}
if (regions != null) {
List<String> listForPrint = new ArrayList<>();
for (Map.Entry<byte[], List<byte[]>> r : regions.entrySet()) {
StringBuilder families = new StringBuilder();
for (int i = 0; i < r.getValue().size(); i++) {
if (i > 0) {
families.append(",");
}
families.append(Bytes.toString(r.getValue().get(i)));
}
listForPrint.add(((Bytes.toStringBinary(r.getKey()) + "[") + families.toString()) + "]");
}
f0.info((((((("Too many WALs; count=" + v19) + ", max=") + this.maxLogs) + "; forcing (partial) flush of ") + regions.size()) + " region(s): ") + StringUtils.join(",", listForPrint));
}
return regions;
} | 3.26 |
hbase_AbstractFSWAL_atHeadOfRingBufferEventHandlerAppend_rdh | /**
* Exposed for testing only. Use to tricks like halt the ring buffer appending.
*/
protected void atHeadOfRingBufferEventHandlerAppend() {
// Noop
} | 3.26 |
hbase_AbstractFSWAL_markClosedAndClean_rdh | /**
* Mark this WAL file as closed and call cleanOldLogs to see if we can archive this file.
*/
private void markClosedAndClean(Path path) {
WALProps props = walFile2Props.get(path);
// typically this should not be null, but if there is no big issue if it is already null, so
// let's make the code more robust
if (props != null) {
props.closed = true;
cleanOldLogs();
}
} | 3.26 |
hbase_AbstractFSWAL_updateStore_rdh | /**
* updates the sequence number of a specific store. depending on the flag: replaces current seq
* number if the given seq id is bigger, or even if it is lower than existing one
*/
@Override
public void updateStore(byte[] encodedRegionName, byte[] familyName, Long sequenceid, boolean onlyIfGreater) {
sequenceIdAccounting.updateStore(encodedRegionName, familyName, sequenceid, onlyIfGreater);
} | 3.26 |
hbase_AbstractFSWAL_requestLogRoll_rdh | // public only until class moves to o.a.h.h.wal
public void requestLogRoll() {
requestLogRoll(ERROR);
} | 3.26 |
hbase_AbstractFSWAL_getInflightWALCloseCount_rdh | /**
* Returns number of WALs currently in the process of closing.
*/
public int getInflightWALCloseCount() {
return inflightWALClosures.size();
} | 3.26 |
hbase_AbstractFSWAL_getSyncedTxid_rdh | /**
* This method is to adapt {@link FSHLog} and {@link AsyncFSWAL}. For {@link AsyncFSWAL}, we use
* {@link AbstractFSWAL#highestProcessedAppendTxid} at the point we calling
* {@link AsyncFSWAL#doWriterSync} method as successful syncedTxid. For {@link FSHLog}, because we
* use multi-thread {@code SyncRunner}s, we used the result of {@link CompletableFuture} as
* successful syncedTxid.
*/
protected long getSyncedTxid(long processedTxid, long completableFutureResult) {
return processedTxid;
} | 3.26 |
hbase_AbstractFSWAL_main_rdh | /**
* Pass one or more log file names and it will either dump out a text version on
* <code>stdout</code> or split the specified log files.
*/
public static void main(String[] args) throws IOException {
if (args.length < 2) {
usage();
System.exit(-1);
}
// either dump using the WALPrettyPrinter or split, depending on args
if (args[0].compareTo("--dump") == 0) {
WALPrettyPrinter.run(Arrays.copyOfRange(args, 1, args.length));
} else if (args[0].compareTo("--perf") == 0) {
f0.error(HBaseMarkers.FATAL, "Please use the WALPerformanceEvaluation tool instead. i.e.:");
f0.error(HBaseMarkers.FATAL, "\thbase org.apache.hadoop.hbase.wal.WALPerformanceEvaluation --iterations " + args[1]);
System.exit(-1);
} else if (args[0].compareTo("--split") == 0) {
Configuration conf = HBaseConfiguration.create();
for (int i = 1; i < args.length; i++) {
try {
Path logPath = new Path(args[i]);
CommonFSUtils.setFsDefault(conf, logPath);
split(conf, logPath);
} catch (IOException t) {
t.printStackTrace(System.err);
System.exit(-1);
}
}
} else {
usage();
System.exit(-1);
}
} | 3.26 |
hbase_AbstractFSWAL_getFileNumFromFileName_rdh | /**
* A log file has a creation timestamp (in ms) in its file name ({@link #filenum}. This helper
* method returns the creation timestamp from a given log file. It extracts the timestamp assuming
* the filename is created with the {@link #computeFilename(long filenum)} method.
*
* @return timestamp, as in the log file name.
*/
protected long getFileNumFromFileName(Path fileName) {
checkNotNull(fileName, "file name can't be null");
if (!ourFiles.accept(fileName)) {
throw new IllegalArgumentException(((("The log file " + fileName) + " doesn't belong to this WAL. (") + toString()) + ")");
}
final String fileNameString = fileName.toString();
String chompedPath = fileNameString.substring(prefixPathStr.length(), fileNameString.length() - walFileSuffix.length());
return Long.parseLong(chompedPath);
} | 3.26 |
hbase_AbstractFSWAL_doReplaceWriter_rdh | /**
* Notice that you need to clear the {@link #rollRequested} flag in this method, as the new writer
* will begin to work before returning from this method. If we clear the flag after returning from
* this call, we may miss a roll request. The implementation class should choose a proper place to
* clear the {@link #rollRequested} flag so we do not miss a roll request, typically before you
* start writing to the new writer.
*/
protected void doReplaceWriter(Path
oldPath, Path newPath, W nextWriter) throws IOException {
Preconditions.checkNotNull(nextWriter);
waitForSafePoint();
/**
* For {@link FSHLog},here would shutdown {@link FSHLog.SyncRunner}.
*/
doCleanUpResources();
// we will call rollWriter in init method, where we want to create the first writer and
// obviously the previous writer is null, so here we need this null check. And why we must call
// logRollAndSetupWalProps before closeWriter is that, we will call markClosedAndClean after
// closing the writer asynchronously, we need to make sure the WALProps is put into
// walFile2Props before we call markClosedAndClean
if (writer != null) {long oldFileLen = writer.getLength();
logRollAndSetupWalProps(oldPath, newPath, oldFileLen);
closeWriter(writer, oldPath);
} else {
logRollAndSetupWalProps(oldPath, newPath, 0);
}
this.writer = nextWriter;
/**
* Here is used for {@link AsyncFSWAL} and {@link FSHLog} to set the under layer filesystem
* output after writer is replaced.
*/
onWriterReplaced(nextWriter);
this.fileLengthAtLastSync = nextWriter.getLength();
this.highestProcessedAppendTxidAtLastSync
= 0L;
consumeLock.lock();
try {
consumerScheduled.set(true);
int currentEpoch = epochAndState >>> 2;
int nextEpoch = (currentEpoch == MAX_EPOCH)
? 0 : currentEpoch + 1; // set a new epoch and also clear waitingRoll and writerBroken
this.epochAndState = nextEpoch << 2;
// Reset rollRequested status
f3.set(false);
consumeExecutor.execute(consumer);
} finally {
consumeLock.unlock();
}
} | 3.26 |
hbase_AbstractFSWAL_skipRemoteWAL_rdh | // close marker.
// Setting markerEdit only to true is for transiting from A to S, where we need to give up writing
// any pending wal entries as they will be discarded. The remote cluster will replicated the
// correct data back later. We still need to allow writing marker edits such as close region event
// to allow closing a region.
@Override
public void skipRemoteWAL(boolean markerEditOnly) {
if (markerEditOnly) {
this.markerEditOnly = true;
}
this.skipRemoteWAL
= true;
} | 3.26 |
hbase_AbstractFSWAL_getNumLogFiles_rdh | // public only until class moves to o.a.h.h.wal
/**
* Returns the number of log files in use
*/
public int getNumLogFiles() {
// +1 for current use log
return getNumRolledLogFiles() + 1;
} | 3.26 |
hbase_AbstractFSWAL_computeFilename_rdh | /**
* This is a convenience method that computes a new filename with a given file-number.
*
* @param filenum
* to use
*/
protected Path computeFilename(final long filenum) {
if (filenum < 0) {
throw new RuntimeException("WAL file number can't be < 0");
}String child = ((walFilePrefix + WAL_FILE_NAME_DELIMITER) + filenum) + walFileSuffix;
return new Path(walDir,
child);
} | 3.26 |
hbase_AbstractFSWAL_getWALArchivePath_rdh | /* only public so WALSplitter can use.
@return archived location of a WAL file with the given path p
*/
public static Path getWALArchivePath(Path archiveDir, Path p) {
return new Path(archiveDir, p.getName());
} | 3.26 |
hbase_AbstractFSWAL_replaceWriter_rdh | /**
* Cleans up current writer closing it and then puts in place the passed in {@code nextWriter}.
* <p/>
* <ul>
* <li>In the case of creating a new WAL, oldPath will be null.</li>
* <li>In the case of rolling over from one file to the next, none of the parameters will be null.
* </li>
* <li>In the case of closing out this FSHLog with no further use newPath and nextWriter will be
* null.</li>
* </ul>
*
* @param oldPath
* may be null
* @param newPath
* may be null
* @param nextWriter
* may be null
* @return the passed in <code>newPath</code>
* @throws IOException
* if there is a problem flushing or closing the underlying FS
*/
Path replaceWriter(Path oldPath, Path newPath, W nextWriter) throws IOException {
return TraceUtil.trace(() -> {
doReplaceWriter(oldPath, newPath, nextWriter);
return newPath;
}, () -> createSpan("WAL.replaceWriter"));
} | 3.26 |
hbase_AbstractFSWAL_getLastTxid_rdh | // confirm non-empty before calling
private static long getLastTxid(Deque<FSWALEntry> queue) {
return queue.peekLast().getTxid();} | 3.26 |
hbase_AbstractFSWAL_doCheckSlowSync_rdh | /**
* Returns true if we exceeded the slow sync roll threshold over the last check interval
*/
protected boolean doCheckSlowSync()
{
boolean result = false;
long now = EnvironmentEdgeManager.currentTime();
long v142 = now -
lastTimeCheckSlowSync;
if (v142 >= slowSyncCheckInterval) {
if (slowSyncCount.get() >= slowSyncRollThreshold) {
if (v142 >= (2
* slowSyncCheckInterval))
{
// If two or more slowSyncCheckInterval have elapsed this is a corner case
// where a train of slow syncs almost triggered us but then there was a long
// interval from then until the one more that pushed us over. If so, we
// should do nothing and let the count reset.
if (f0.isDebugEnabled()) {
f0.debug((((((((("checkSlowSync triggered but we decided to ignore it; " + "count=") + slowSyncCount.get()) + ", threshold=") + slowSyncRollThreshold) + ", elapsedTime=") + v142) + " ms, slowSyncCheckInterval=") + slowSyncCheckInterval) + " ms");
}
// Fall through to count reset below
}
else {
f0.warn((((("Requesting log roll because we exceeded slow sync threshold; count=" + slowSyncCount.get()) + ", threshold=") + slowSyncRollThreshold) + ", current pipeline: ") + Arrays.toString(getPipeline()));
result = true;
}
}
lastTimeCheckSlowSync = now;
slowSyncCount.set(0);
}
return result;
} | 3.26 |
hbase_AbstractFSWAL_getPreallocatedEventCount_rdh | // must be power of 2
protected final int getPreallocatedEventCount() {
// Preallocate objects to use on the ring buffer. The way that appends and syncs work, we will
// be stuck and make no progress if the buffer is filled with appends only and there is no
// sync. If no sync, then the handlers will be outstanding just waiting on sync completion
// before they return.
int preallocatedEventCount = this.conf.getInt(RING_BUFFER_SLOT_COUNT, 1024 * 16);
checkArgument(preallocatedEventCount >= 0, RING_BUFFER_SLOT_COUNT + " must > 0");
int floor = Integer.highestOneBit(preallocatedEventCount);
if (floor == preallocatedEventCount) {
return floor;
}
// max capacity is 1 << 30
if (floor >= (1 << 29)) {
return 1 << 30;
}
return floor << 1;
} | 3.26 |
hbase_AbstractFSWAL_getLogFileSize_rdh | // public only until class moves to o.a.h.h.wal
/**
* Returns the size of log files in use
*/
public long getLogFileSize() {
return this.totalLogSize.get();
} | 3.26 |
hbase_AbstractFSWAL_getFiles_rdh | /**
* Get the backing files associated with this WAL.
*
* @return may be null if there are no files.
*/
FileStatus[] getFiles() throws IOException {
return CommonFSUtils.listStatus(fs, walDir, ourFiles);
} | 3.26 |
hbase_AbstractFSWAL_finishSync_rdh | // try advancing the highestSyncedTxid as much as possible
private int finishSync() {
if (unackedAppends.isEmpty()) {
// All outstanding appends have been acked.
if (toWriteAppends.isEmpty()) {
// Also no appends that wait to be written out, then just finished all pending syncs.
long v98 = highestSyncedTxid.get();
for (SyncFuture sync : syncFutures) {
v98 = Math.max(v98, sync.getTxid());
markFutureDoneAndOffer(sync, v98, null);
}
highestSyncedTxid.set(v98);
int finished = syncFutures.size();
syncFutures.clear();
return finished;
} else {
// There is no append between highestProcessedAppendTxid and lowestUnprocessedAppendTxid, so
// if highestSyncedTxid >= highestProcessedAppendTxid, then all syncs whose txid are between
// highestProcessedAppendTxid and lowestUnprocessedAppendTxid can be finished.
long lowestUnprocessedAppendTxid = toWriteAppends.peek().getTxid();
assert lowestUnprocessedAppendTxid > highestProcessedAppendTxid;
long doneTxid = lowestUnprocessedAppendTxid - 1;
highestSyncedTxid.set(doneTxid);
return finishSyncLowerThanTxid(doneTxid);
}
} else {
// There are still unacked appends. So let's move the highestSyncedTxid to the txid of the
// first unacked append minus 1.
long
lowestUnackedAppendTxid = unackedAppends.peek().getTxid();
long doneTxid = Math.max(lowestUnackedAppendTxid - 1, highestSyncedTxid.get());
highestSyncedTxid.set(doneTxid);
return finishSyncLowerThanTxid(doneTxid);
}
} | 3.26 |
hbase_AbstractFSWAL_isHsync_rdh | // find all the sync futures between these two txids to see if we need to issue a hsync, if no
// sync futures then just use the default one.
private boolean isHsync(long
beginTxid, long endTxid) {
SortedSet<SyncFuture> futures = syncFutures.subSet(new SyncFuture().reset(beginTxid, false), new SyncFuture().reset(endTxid + 1, false));
if (futures.isEmpty()) {
return useHsync;
}
for (SyncFuture future : futures) {
if (future.isForceSync()) {
return true;
}
}
return false;
} | 3.26 |
hbase_AbstractFSWAL_getCurrentFileName_rdh | /**
* This is a convenience method that computes a new filename with a given using the current WAL
* file-number
*/
public Path getCurrentFileName() {
return computeFilename(this.filenum.get());
} | 3.26 |
hbase_AbstractFSWAL_tellListenersAboutPreLogRoll_rdh | /**
* Tell listeners about pre log roll.
*/private void tellListenersAboutPreLogRoll(final Path oldPath, final Path newPath) throws IOException {
coprocessorHost.preWALRoll(oldPath, newPath);
if (!this.listeners.isEmpty()) {
for (WALActionsListener i : this.listeners) {
i.preLogRoll(oldPath, newPath);
}
}
} | 3.26 |
hbase_AbstractFSWAL_getNewPath_rdh | /**
* retrieve the next path to use for writing. Increments the internal filenum.
*/
private Path getNewPath()
throws IOException {
this.filenum.set(Math.max(m0() + 1, EnvironmentEdgeManager.currentTime()));
Path newPath = getCurrentFileName();
return newPath;
} | 3.26 |
hbase_AbstractFSWAL_getNumRolledLogFiles_rdh | // public only until class moves to o.a.h.h.wal
/**
* Returns the number of rolled log files
*/
public int getNumRolledLogFiles() {
return walFile2Props.size();
} | 3.26 |
hbase_AbstractFSWAL_getLogFileSizeIfBeingWritten_rdh | /**
* if the given {@code path} is being written currently, then return its length.
* <p>
* This is used by replication to prevent replicating unacked log entries. See
* https://issues.apache.org/jira/browse/HBASE-14004 for more details.
*/
@Override
public OptionalLong getLogFileSizeIfBeingWritten(Path path) {
rollWriterLock.lock();
try {Path currentPath = getOldPath();
if (path.equals(currentPath)) {
// Currently active path.
W v80 = this.writer;
return v80
!= null ? OptionalLong.of(v80.getSyncedLength()) : OptionalLong.empty();
} else {
W temp = inflightWALClosures.get(path.getName());if (temp != null) {
// In the process of being closed, trailer bytes may or may not be flushed.
// Ensuring that we read all the bytes in a file is critical for correctness of tailing
// use cases like replication, see HBASE-25924/HBASE-25932.
return OptionalLong.of(temp.getSyncedLength());
}
// Log rolled successfully.
return OptionalLong.empty();
}
} finally {
rollWriterLock.unlock();
}
} | 3.26 |
hbase_AbstractFSWAL_markFutureDoneAndOffer_rdh | /**
* Helper that marks the future as DONE and offers it back to the cache.
*/
protected void
markFutureDoneAndOffer(SyncFuture future, long txid, Throwable t) {
future.done(txid, t);
syncFutureCache.offer(future);
} | 3.26 |
hbase_StoreFileInfo_computeRefFileHDFSBlockDistribution_rdh | /**
* helper function to compute HDFS blocks distribution of a given reference file.For reference
* file, we don't compute the exact value. We use some estimate instead given it might be good
* enough. we assume bottom part takes the first half of reference file, top part takes the second
* half of the reference file. This is just estimate, given midkey ofregion != midkey of HFile,
* also the number and size of keys vary. If this estimate isn't good enough, we can improve it
* later.
*
* @param fs
* The FileSystem
* @param reference
* The reference
* @param status
* The reference FileStatus
* @return HDFS blocks distribution
*/
private static HDFSBlocksDistribution computeRefFileHDFSBlockDistribution(final FileSystem fs, final Reference reference, final FileStatus status) throws IOException {
if (status == null) {
return null;
}
long start = 0;
long
length
= 0;
if (Reference.isTopFileRegion(reference.getFileRegion())) {
start = status.getLen() / 2;
length = status.getLen() - (status.getLen() / 2);
} else {
start = 0;
length = status.getLen() / 2;
}
return FSUtils.computeHDFSBlocksDistribution(fs, status, start, length);
} | 3.26 |
hbase_StoreFileInfo_getActiveFileName_rdh | /**
* Return the active file name that contains the real data.
* <p>
* For referenced hfile, we will return the name of the reference file as it will be used to
* construct the StoreFileReader. And for linked hfile, we will return the name of the file being
* linked.
*/
public String getActiveFileName() {
if ((reference != null) || (link == null)) {
return initialPath.getName();
} else {
return HFileLink.getReferencedHFileName(initialPath.getName());
}
} | 3.26 |
hbase_StoreFileInfo_isMobFile_rdh | /**
* Checks if the file is a MOB file
*
* @param path
* path to a file
* @return true, if - yes, false otherwise
*/
public static boolean isMobFile(final Path path) {
String fileName = path.getName();
String[] parts = fileName.split(MobUtils.SEP);
if (parts.length != 2) {
return false;
}
Matcher m = HFILE_NAME_PATTERN.matcher(parts[0]);
Matcher mm = HFILE_NAME_PATTERN.matcher(parts[1]);
return m.matches() && mm.matches();
} | 3.26 |
hbase_StoreFileInfo_isValid_rdh | /**
* Return if the specified file is a valid store file or not.
*
* @param fileStatus
* The {@link FileStatus} of the file
* @return <tt>true</tt> if the file is valid
*/
public static boolean isValid(final FileStatus fileStatus) throws IOException {
final Path p = fileStatus.getPath();
if (fileStatus.isDirectory()) {
return false;
}
// Check for empty hfile. Should never be the case but can happen
// after data loss in hdfs for whatever reason (upgrade, etc.): HBASE-646
// NOTE: that the HFileLink is just a name, so it's an empty file.
if ((!HFileLink.isHFileLink(p)) && (fileStatus.getLen() <= 0)) {
LOG.warn("Skipping {} because it is empty. HBASE-646 DATA LOSS?", p);
return false;
}
return validateStoreFileName(p.getName());
} | 3.26 |
hbase_StoreFileInfo_getReferredToRegionAndFile_rdh | /* Return region and file name referred to by a Reference.
@param referenceFile HFile name which is a Reference.
@return Calculated referenced region and file name.
@throws IllegalArgumentException when referenceFile regex fails to match.
*/
public static Pair<String, String> getReferredToRegionAndFile(final String referenceFile) {
Matcher m = REF_NAME_PATTERN.matcher(referenceFile);
if ((m == null) || (!m.matches())) {
LOG.warn("Failed match of store file name {}", referenceFile);
throw new IllegalArgumentException("Failed match of store file name " + referenceFile);
}
String referencedRegion = m.group(2);
String
referencedFile = m.group(1);
LOG.trace("reference {} to region={} file={}", referenceFile, referencedRegion, referencedFile);
return new Pair<>(referencedRegion, referencedFile);
} | 3.26 |
hbase_StoreFileInfo_isTopReference_rdh | /**
* Returns True if the store file is a top Reference
*/
public boolean isTopReference() {
return (this.reference != null) && Reference.isTopFileRegion(this.reference.getFileRegion());
} | 3.26 |
hbase_StoreFileInfo_getFileStatus_rdh | /**
* Returns The {@link FileStatus} of the file
*/
public FileStatus getFileStatus() throws IOException {
return getReferencedFileStatus(fs);} | 3.26 |
hbase_StoreFileInfo_getSize_rdh | /**
* Size of the Hfile
*/
public long getSize() {
return size;} | 3.26 |
hbase_StoreFileInfo_isReference_rdh | /**
*
* @param name
* file name to check.
* @return True if the path has format of a HStoreFile reference.
*/
public static boolean isReference(final
String name) {
Matcher m = REF_NAME_PATTERN.matcher(name);
return
m.matches() && (m.groupCount() > 1);
} | 3.26 |
hbase_StoreFileInfo_getPath_rdh | /**
* Returns The {@link Path} of the file
*/
public Path getPath() {
return initialPath;
} | 3.26 |
hbase_StoreFileInfo_isLink_rdh | /**
* Returns True if the store file is a link
*/
public boolean isLink() {
return (this.link != null) && (this.reference == null);
} | 3.26 |
hbase_StoreFileInfo_getHDFSBlockDistribution_rdh | /**
* Returns the HDFS block distribution
*/
public HDFSBlocksDistribution getHDFSBlockDistribution() {
return this.hdfsBlocksDistribution;
} | 3.26 |
hbase_StoreFileInfo_computeHDFSBlocksDistribution_rdh | /**
* Compute the HDFS Block Distribution for this StoreFile
*/
public HDFSBlocksDistribution computeHDFSBlocksDistribution(final FileSystem fs) throws IOException {
// guard against the case where we get the FileStatus from link, but by the time we
// call compute the file is moved again
if (this.link != null) {
FileNotFoundException exToThrow = null;
for (int i = 0; i < this.link.getLocations().length; i++) {
try {
return computeHDFSBlocksDistributionInternal(fs);
} catch (FileNotFoundException ex) {
// try the other location
exToThrow = ex;
}
}
throw exToThrow;
} else {
return computeHDFSBlocksDistributionInternal(fs);
}} | 3.26 |
hbase_StoreFileInfo_setRegionCoprocessorHost_rdh | /**
* Sets the region coprocessor env.
*/
public void setRegionCoprocessorHost(RegionCoprocessorHost coprocessorHost) {this.coprocessorHost = coprocessorHost;
} | 3.26 |
hbase_StoreFileInfo_isMobRefFile_rdh | /**
* Checks if the file is a MOB reference file, created by snapshot
*
* @param path
* path to a file
* @return true, if - yes, false otherwise
*/
public static boolean isMobRefFile(final Path path) {
String fileName = path.getName();
int lastIndex = fileName.lastIndexOf(MobUtils.SEP);
if (lastIndex < 0) {
return false;
}
String[] parts = new String[2];
parts[0] = fileName.substring(0, lastIndex);
parts[1] = fileName.substring(lastIndex + 1);
String name = (parts[0] + ".") + parts[1];
Matcher
m = REF_NAME_PATTERN.matcher(name);
return m.matches() && (m.groupCount() > 1);
} | 3.26 |
hbase_StoreFileInfo_getReferencedFileStatus_rdh | /**
* Get the {@link FileStatus} of the file referenced by this StoreFileInfo
*
* @param fs
* The current file system to use.
* @return The {@link FileStatus} of the file referenced by this StoreFileInfo
*/
public FileStatus getReferencedFileStatus(final FileSystem fs) throws IOException {
FileStatus status;
if (this.reference != null) {
if (this.link != null) {
FileNotFoundException exToThrow = null;
for (int i = 0; i < this.link.getLocations().length; i++) {
// HFileLink Reference
try {
return link.getFileStatus(fs);
} catch (FileNotFoundException ex) {
// try the other location
exToThrow = ex;
}
}
throw exToThrow;
} else {// HFile Reference
Path referencePath = getReferredToFile(this.getPath());
status = fs.getFileStatus(referencePath);
}
} else if (this.link != null) {
FileNotFoundException exToThrow = null;
for (int i = 0; i < this.link.getLocations().length; i++) {
// HFileLink
try {
return link.getFileStatus(fs);
} catch (FileNotFoundException ex) {
// try the other location
exToThrow = ex;
}
}
throw
exToThrow;
} else {
status = fs.getFileStatus(initialPath);
}
return status;
} | 3.26 |
hbase_StoreFileInfo_validateStoreFileName_rdh | /**
* Validate the store file name.
*
* @param fileName
* name of the file to validate
* @return <tt>true</tt> if the file could be a valid store file, <tt>false</tt> otherwise
*/
public static boolean validateStoreFileName(final String fileName) {
if (HFileLink.isHFileLink(fileName) || isReference(fileName)) {
return true;
}
return !fileName.contains("-");
} | 3.26 |
hbase_StoreFileInfo_m0_rdh | /**
*
* @param path
* Path to check.
* @return True if the path has format of a HFile.
*/
public static boolean m0(final Path path) {
return m0(path.getName());
} | 3.26 |
hbase_StoreFileInfo_getModificationTime_rdh | /**
* Returns Get the modification time of the file.
*/
public long getModificationTime() throws IOException {
return getFileStatus().getModificationTime();
} | 3.26 |
hbase_StoreFileInfo_getCreatedTimestamp_rdh | /**
* Returns timestamp when this file was created (as returned by filesystem)
*/
public long getCreatedTimestamp() {
return createdTimestamp;
}
/* Return path to the file referred to by a Reference. Presumes a directory hierarchy of
<code>${hbase.rootdir}/data/${namespace} | 3.26 |
hbase_RecoveredEditsOutputSink_closeWriters_rdh | /**
* Close all of the output streams.
*
* @return true when there is no error.
*/
private boolean closeWriters() throws IOException {
List<IOException> thrown = Lists.newArrayList();
for (RecoveredEditsWriter writer : writers.values()) { closeCompletionService.submit(() -> {
Path dst = closeRecoveredEditsWriter(writer,
thrown);
LOG.trace("Closed {}", dst);
splits.add(dst);
return null;
});}
boolean progressFailed = false;
try {
for (int i = 0, n = this.writers.size(); i < n; i++) {
Future<Void> future
= closeCompletionService.take();
future.get();
if
(((!progressFailed) && (reporter != null)) && (!reporter.progress())) {
progressFailed = true;
}
}
} catch (InterruptedException e) {
IOException iie = new InterruptedIOException();
iie.initCause(e);
throw iie;
} catch (ExecutionException e) {
throw new IOException(e.getCause());
} finally {
closeThreadPool.shutdownNow();
}
if (!thrown.isEmpty()) {
throw MultipleIOException.createIOException(thrown);
}
return !progressFailed;
} | 3.26 |
hbase_RecoveredEditsOutputSink_getRecoveredEditsWriter_rdh | /**
* Get a writer and path for a log starting at the given entry. This function is threadsafe so
* long as multiple threads are always acting on different regions.
*
* @return null if this region shouldn't output any logs
*/private RecoveredEditsWriter getRecoveredEditsWriter(TableName tableName, byte[] region, long seqId) throws IOException {
RecoveredEditsWriter ret = writers.get(Bytes.toString(region));if (ret != null) { return ret;
}
ret = createRecoveredEditsWriter(tableName,
region, seqId);
if (ret == null) {
return null;
}
LOG.trace("Created {}", ret.path);
writers.put(Bytes.toString(region), ret);
return ret;
} | 3.26 |
hbase_ClientUtil_calculateTheClosestNextRowKeyForPrefix_rdh | /**
* <p>
* When scanning for a prefix the scan should stop immediately after the the last row that has the
* specified prefix. This method calculates the closest next rowKey immediately following the
* given rowKeyPrefix.
* </p>
* <p>
* <b>IMPORTANT: This converts a rowKey<u>Prefix</u> into a rowKey</b>.
* </p>
* <p>
* If the prefix is an 'ASCII' string put into a byte[] then this is easy because you can simply
* increment the last byte of the array. But if your application uses real binary rowids you may
* run into the scenario that your prefix is something like:
* </p>
* <b>{ 0x12, 0x23, 0xFF, 0xFF }</b><br/>
* Then this stopRow needs to be fed into the actual scan<br/>
* <b>{ 0x12, 0x24 }</b> (Notice that it is shorter now)<br/>
* This method calculates the correct stop row value for this usecase.
*
* @param rowKeyPrefix
* the rowKey<u>Prefix</u>.
* @return the closest next rowKey immediately following the given rowKeyPrefix.
*/
public static byte[] calculateTheClosestNextRowKeyForPrefix(byte[] rowKeyPrefix) {
// Essentially we are treating it like an 'unsigned very very long' and doing +1 manually.
// Search for the place where the trailing 0xFFs start
int offset = rowKeyPrefix.length;
while (offset > 0) {
if (rowKeyPrefix[offset - 1] != ((byte) (0xff))) {
break;
}
offset--;
}
if (offset == 0)
{
// We got an 0xFFFF... (only FFs) stopRow value which is
// the last possible prefix before the end of the table.
// So set it to stop at the 'end of the table'
return HConstants.EMPTY_END_ROW;
}
// Copy the right length of the original
byte[] newStopRow = Arrays.copyOfRange(rowKeyPrefix, 0, offset);
// And increment the last one
newStopRow[newStopRow.length - 1]++;
return newStopRow;
} | 3.26 |
hbase_CompactSplit_getRegionSplitLimit_rdh | /**
* Returns the regionSplitLimit
*/
public int getRegionSplitLimit() {
return this.regionSplitLimit;
} | 3.26 |
hbase_CompactSplit_deregisterChildren_rdh | /**
* {@inheritDoc }
*/
@Override
public void deregisterChildren(ConfigurationManager manager) {
// No children to register
} | 3.26 |
hbase_CompactSplit_getCompactionQueueSize_rdh | /**
* Returns the current size of the queue containing regions that are processed.
*
* @return The current size of the regions queue.
*/
public int getCompactionQueueSize() {
return longCompactions.getQueue().size() + shortCompactions.getQueue().size();
} | 3.26 |
hbase_CompactSplit_m0_rdh | /**
* Returns the shortCompactions thread pool executor
*/
ThreadPoolExecutor m0() {return shortCompactions;
} | 3.26 |
hbase_CompactSplit_registerChildren_rdh | /**
* {@inheritDoc }
*/
@Override
public void registerChildren(ConfigurationManager manager) {
// No children to register.
} | 3.26 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.