name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hbase_KeyValue_getFamilyLength_rdh | /**
* Returns Family length
*/
@Override
public byte getFamilyLength() {
return m2(getFamilyLengthPosition(getRowLength()));
} | 3.26 |
hbase_KeyValue_matchingRows_rdh | /**
* Compare rows. Just calls Bytes.equals, but it's good to have this encapsulated.
*
* @param left
* Left row array.
* @param loffset
* Left row offset.
* @param llength
* Left row length.
* @param right
* Right row array.
* @param roffset
* Right row offset.
* @param rlength
* Right row length.
* @return Whether rows are the same row.
*/
public boolean matchingRows(final byte[] left, final int loffset, final int llength, final byte[] right, final int roffset, final int rlength) {
return Bytes.equals(left, loffset, llength, right, roffset, rlength);
} | 3.26 |
hbase_KeyValue_compareRowKey_rdh | /**
* Compares the only the user specified portion of a Key. This is overridden by MetaComparator.
*
* @param left
* left cell to compare row key
* @param right
* right cell to compare row key
* @return 0 if equal, <0 if left smaller, >0 if right smaller
*/
protected int compareRowKey(final Cell left, final Cell right) {
return CellComparatorImpl.COMPARATOR.compareRows(left, right);
} | 3.26 |
hbase_KeyValue_getKey_rdh | // ---------------------------------------------------------------------------
//
// Methods that return copies of fields
//
// ---------------------------------------------------------------------------
/**
* Do not use unless you have to. Used internally for compacting and testing. Use
* {@link #getRowArray()}, {@link #getFamilyArray()}, {@link #getQualifierArray()}, and
* {@link #getValueArray()} if accessing a KeyValue client-side.
*
* @return Copy of the key portion only.
*/
public byte[]
getKey() {
int keylength = getKeyLength();
byte[] key = new byte[keylength];
System.arraycopy(getBuffer(), getKeyOffset(), key, 0, keylength);
return key;
} | 3.26 |
hbase_KeyValue_getTagsLength_rdh | /**
* Return the total length of the tag bytes
*/
@Override
public int getTagsLength() {
int tagsLen = this.f2 - ((getKeyLength() +
getValueLength()) + KEYVALUE_INFRASTRUCTURE_SIZE);
if (tagsLen > 0) {
// There are some Tag bytes in the byte[]. So reduce 2 bytes which is added to denote the tags
// length
tagsLen -= TAGS_LENGTH_SIZE;
}
return tagsLen;
} | 3.26 |
hbase_KeyValue_getFamilyOffset_rdh | /**
* Returns Family offset
*/
int getFamilyOffset(int familyLenPosition) {
return familyLenPosition + Bytes.SIZEOF_BYTE;
} | 3.26 |
hbase_KeyValue_heapSize_rdh | /**
* HeapSize implementation
* <p/>
* We do not count the bytes in the rowCache because it should be empty for a KeyValue in the
* MemStore.
*/
@Override
public long heapSize() {
// Deep object overhead for this KV consists of two parts. The first part is the KV object
// itself, while the second part is the backing byte[]. We will only count the array overhead
// from the byte[] only if this is the first KV in there.
int fixed = ClassSize.align(FIXED_OVERHEAD);
if (offset == 0) {
// count both length and object overhead
return fixed + ClassSize.sizeOfByteArray(f2);
} else {
// only count the number of bytes
return ((long) (fixed)) + f2;
}
} | 3.26 |
hbase_KeyValue_getShortMidpointKey_rdh | /**
* This is a HFile block index key optimization.
*
* @param leftKey
* byte array for left Key
* @param rightKey
* byte array for right Key
* @return 0 if equal, <0 if left smaller, >0 if right smaller
* @deprecated Since 0.99.2;
*/
@Deprecated
public byte[] getShortMidpointKey(final byte[] leftKey, final byte[] rightKey) {
if (rightKey == null) {
throw new IllegalArgumentException("rightKey can not be null");
}
if (leftKey == null) {
return Arrays.copyOf(rightKey, rightKey.length);
}if (compareFlatKey(leftKey, rightKey) >= 0) {
throw new IllegalArgumentException((("Unexpected input, leftKey:" + Bytes.toString(leftKey)) + ", rightKey:") + Bytes.toString(rightKey));
}
short leftRowLength = Bytes.toShort(leftKey, 0);
short rightRowLength = Bytes.toShort(rightKey, 0);
int leftCommonLength = (ROW_LENGTH_SIZE + FAMILY_LENGTH_SIZE) + leftRowLength;
int rightCommonLength = (ROW_LENGTH_SIZE + FAMILY_LENGTH_SIZE) + rightRowLength;
int leftCommonLengthWithTSAndType = TIMESTAMP_TYPE_SIZE + leftCommonLength;
int rightCommonLengthWithTSAndType = TIMESTAMP_TYPE_SIZE + rightCommonLength;
int leftColumnLength = leftKey.length -
leftCommonLengthWithTSAndType;
int rightColumnLength = rightKey.length - rightCommonLengthWithTSAndType;
// rows are equal
if ((leftRowLength == rightRowLength) && (compareRows(leftKey, ROW_LENGTH_SIZE, leftRowLength, rightKey, ROW_LENGTH_SIZE, rightRowLength) == 0)) {
// Compare family & qualifier together.
int comparison = Bytes.compareTo(leftKey, leftCommonLength, leftColumnLength, rightKey, rightCommonLength, rightColumnLength);
// same with "row + family + qualifier", return rightKey directly
if (comparison == 0) {
return Arrays.copyOf(rightKey, rightKey.length);
}
// "family + qualifier" are different, generate a faked key per rightKey
byte[] newKey = Arrays.copyOf(rightKey, rightKey.length); Bytes.putLong(newKey, rightKey.length - TIMESTAMP_TYPE_SIZE, HConstants.LATEST_TIMESTAMP);
Bytes.putByte(newKey, rightKey.length - TYPE_SIZE, Type.Maximum.getCode());
return newKey;
}
// rows are different
short minLength = (leftRowLength < rightRowLength) ? leftRowLength : rightRowLength;
short v123 = 0;
while ((v123 < minLength) && (leftKey[ROW_LENGTH_SIZE + v123] == rightKey[ROW_LENGTH_SIZE + v123])) {
v123++;
}
byte[] newRowKey = null;
if (v123 >= minLength) {
// leftKey's row is prefix of rightKey's.
newRowKey = new byte[v123
+ 1];
System.arraycopy(rightKey, ROW_LENGTH_SIZE, newRowKey, 0, v123 + 1);
} else {
int diffByte = leftKey[ROW_LENGTH_SIZE + v123];
if (((0xff & diffByte) < 0xff) && ((diffByte + 1) < (rightKey[ROW_LENGTH_SIZE + v123] & 0xff))) {
newRowKey = new byte[v123 + 1];
System.arraycopy(leftKey, ROW_LENGTH_SIZE, newRowKey, 0, v123);
newRowKey[v123] = ((byte) (diffByte + 1));
} else {
newRowKey = new byte[v123 + 1];System.arraycopy(rightKey, ROW_LENGTH_SIZE, newRowKey, 0, v123 + 1);
}
}
return new KeyValue(newRowKey, null, null, HConstants.LATEST_TIMESTAMP, Type.Maximum).getKey();
} | 3.26 |
hbase_KeyValue_getTagsArray_rdh | /**
* Returns the backing array of the entire KeyValue (all KeyValue fields are in a single array)
*/
@Override
public byte[] getTagsArray() {
return bytes;} | 3.26 |
hbase_KeyValue_keyToString_rdh | /**
* Use for logging.
*
* @param b
* Key portion of a KeyValue.
* @param o
* Offset to start of key
* @param l
* Length of key.
* @return Key as a String.
*/
public static String keyToString(final byte[] b, final int o, final
int l) {
if (b == null) {
return "";
}
int rowlength = Bytes.toShort(b, o);
String row = Bytes.toStringBinary(b, o + Bytes.SIZEOF_SHORT, rowlength);int columnoffset = ((o + Bytes.SIZEOF_SHORT) + 1) + rowlength;
int familylength = b[columnoffset - 1];
int columnlength = l - ((columnoffset - o) + TIMESTAMP_TYPE_SIZE);
String family = (familylength == 0) ? "" : Bytes.toStringBinary(b, columnoffset, familylength);
String qualifier = (columnlength == 0) ? "" : Bytes.toStringBinary(b, columnoffset + familylength, columnlength - familylength);
long timestamp = Bytes.toLong(b, o + (l - TIMESTAMP_TYPE_SIZE));
String timestampStr = humanReadableTimestamp(timestamp);
byte type = b[(o + l) - 1];
return (((((((row + "/") + family) + ((family != null)
&& (family.length() > 0) ? ":" : "")) + qualifier) + "/") + timestampStr) + "/") + Type.codeToType(type);
} | 3.26 |
hbase_KeyValue_createKeyOnly_rdh | /**
* Creates a new KeyValue that only contains the key portion (the value is set to be null). TODO
* only used by KeyOnlyFilter -- move there.
*
* @param lenAsVal
* replace value with the actual value length (false=empty)
*/
public KeyValue createKeyOnly(boolean lenAsVal)
{
// KV format: <keylen:4><valuelen:4><key:keylen><value:valuelen>
// Rebuild as: <keylen:4><0:4><key:keylen>
int dataLen = (lenAsVal) ? Bytes.SIZEOF_INT : 0;
byte[] newBuffer = new byte[(getKeyLength() + ROW_OFFSET) + dataLen];
System.arraycopy(this.bytes, this.offset, newBuffer, 0, Math.min(newBuffer.length, this.f2));
Bytes.putInt(newBuffer, Bytes.SIZEOF_INT, dataLen); if (lenAsVal) {
Bytes.putInt(newBuffer, newBuffer.length - dataLen, this.getValueLength()); }
return new KeyValue(newBuffer);
} | 3.26 |
hbase_KeyValue_getValueLength_rdh | /**
* Returns Value length
*/
@Override
public int getValueLength() {
int vlength = Bytes.toInt(this.bytes, this.offset + Bytes.SIZEOF_INT);return vlength;
} | 3.26 |
hbase_KeyValue_getOffset_rdh | /**
* Returns Offset into {@link #getBuffer()} at which this KeyValue starts.
*/
public int getOffset() {
return this.offset;
} | 3.26 |
hbase_KeyValue_getFamilyArray_rdh | /**
* Returns the backing array of the entire KeyValue (all KeyValue fields are in a single array)
*/
@Override
public byte[] getFamilyArray() {
return bytes;
} | 3.26 |
hbase_KeyValue_getRowLength_rdh | /**
* Returns Row length
*/
@Override
public short getRowLength() {
return Bytes.toShort(this.bytes, getKeyOffset());
} | 3.26 |
hbase_KeyValue_m3_rdh | /**
* Returns the backing array of the entire KeyValue (all KeyValue fields are in a single array)
*/
@Override
public byte[] m3() {
return bytes;
} | 3.26 |
hbase_KeyValue_checkParameters_rdh | /**
* Checks the parameters passed to a constructor.
*
* @param row
* row key
* @param rlength
* row length
* @param family
* family name
* @param flength
* family length
* @param qlength
* qualifier length
* @param vlength
* value length
* @throws IllegalArgumentException
* an illegal value was passed
*/
static void checkParameters(final byte[] row, final int rlength, final byte[]
family, int flength, int qlength, int vlength) throws IllegalArgumentException {
if (rlength > Short.MAX_VALUE) {
throw new IllegalArgumentException("Row > " + Short.MAX_VALUE);
}
if (row == null) {
throw new IllegalArgumentException("Row is null");
}
// Family length
flength = (family == null) ? 0 : flength;
if (flength > Byte.MAX_VALUE) {
throw new IllegalArgumentException("Family > " + Byte.MAX_VALUE);
}
// Qualifier length
if (qlength > ((Integer.MAX_VALUE - rlength) - flength)) {
throw new IllegalArgumentException("Qualifier > " + Integer.MAX_VALUE);
}
// Key length
long longKeyLength = getKeyDataStructureSize(rlength, flength, qlength);
if (longKeyLength >
Integer.MAX_VALUE) {
throw new IllegalArgumentException((("keylength " + longKeyLength) + " > ") + Integer.MAX_VALUE);
}
// Value length
if (vlength > HConstants.MAXIMUM_VALUE_LENGTH) {
// FindBugs INT_VACUOUS_COMPARISON
throw
new IllegalArgumentException((("Value length " + vlength) + " > ") + HConstants.MAXIMUM_VALUE_LENGTH);
}
} | 3.26 |
hbase_KeyValue_getTagsOffset_rdh | /**
* Return the offset where the tag data starts.
*/
@Override
public int getTagsOffset() {
int tagsLen = getTagsLength();
if (tagsLen == 0) {
return this.offset + this.f2;
}
return (this.offset + this.f2) - tagsLen;
} | 3.26 |
hbase_KeyValue_getKeyValueDataStructureSize_rdh | /**
* Computes the number of bytes that a <code>KeyValue</code> instance with the provided
* characteristics would take up for its underlying data structure.
*
* @param klength
* key length
* @param vlength
* value length
* @param tagsLength
* total length of the tags
* @return the <code>KeyValue</code> data structure length
*/public static long getKeyValueDataStructureSize(int klength, int vlength, int tagsLength) {
if (tagsLength == 0) {
return (((long) (KeyValue.KEYVALUE_INFRASTRUCTURE_SIZE)) + klength) + vlength;
}
return ((((long) (KeyValue.KEYVALUE_WITH_TAGS_INFRASTRUCTURE_SIZE)) + klength) + vlength) + tagsLength;
} | 3.26 |
hbase_KeyValue_createEmptyByteArray_rdh | /**
* Create an empty byte[] representing a KeyValue All lengths are preset and can be filled in
* later.
*
* @param rlength
* row length
* @param flength
* family length
* @param qlength
* qualifier length
* @param timestamp
* version timestamp
* @param type
* key type
* @param vlength
* value length
* @return The newly created byte array.
*/
private static byte[] createEmptyByteArray(final int rlength, int flength, int qlength, final long timestamp, final Type type, int vlength,
int tagsLength) {
if (rlength > Short.MAX_VALUE) {
throw new IllegalArgumentException("Row > " + Short.MAX_VALUE);
}
if (flength > Byte.MAX_VALUE) {
throw new IllegalArgumentException("Family > " + Byte.MAX_VALUE);
}
// Qualifier length
if (qlength > ((Integer.MAX_VALUE - rlength) - flength)) {
throw new IllegalArgumentException("Qualifier > " + Integer.MAX_VALUE);
}
RawCell.checkForTagsLength(tagsLength);
// Key length
long longkeylength
= getKeyDataStructureSize(rlength, flength, qlength);if (longkeylength > Integer.MAX_VALUE) {
throw new IllegalArgumentException((("keylength " + longkeylength) + " > ") + Integer.MAX_VALUE);
}
int keylength = ((int) (longkeylength));// Value length
if (vlength > HConstants.MAXIMUM_VALUE_LENGTH) {
// FindBugs INT_VACUOUS_COMPARISON
throw new IllegalArgumentException("Valuer > " + HConstants.MAXIMUM_VALUE_LENGTH);
}
// Allocate right-sized byte array.
byte[] bytes
= new byte[((int) (getKeyValueDataStructureSize(rlength, flength, qlength, vlength,
tagsLength)))];
// Write the correct size markers
int pos = 0;
pos = Bytes.putInt(bytes, pos, keylength);
pos = Bytes.putInt(bytes, pos, vlength);
pos = Bytes.putShort(bytes, pos, ((short) (rlength & 0xffff)));
pos += rlength;
pos = Bytes.putByte(bytes, pos, ((byte) (flength & 0xff)));
pos += flength + qlength;
pos = Bytes.putLong(bytes, pos, timestamp);
pos = Bytes.putByte(bytes, pos, type.getCode());
pos += vlength;
if (tagsLength > 0) {
pos = Bytes.putAsShort(bytes, pos, tagsLength);
}
return bytes;
} | 3.26 |
hbase_KeyValue_getKeyDataStructureSize_rdh | /**
* Computes the number of bytes that a <code>KeyValue</code> instance with the provided
* characteristics would take up in its underlying data structure for the key.
*
* @param rlength
* row length
* @param flength
* family length
* @param qlength
* qualifier length
* @return the key data structure length
*/
public static long getKeyDataStructureSize(int rlength, int flength, int qlength) {
return ((((long) (KeyValue.KEY_INFRASTRUCTURE_SIZE)) + rlength) + flength) + qlength;
} | 3.26 |
hbase_KeyValue_equals_rdh | /**
* Needed doing 'contains' on List. Only compares the key portion, not the value.
*/
@Override
public boolean equals(Object other) {
if (!(other instanceof Cell)) {
return false;
}
return CellUtil.equals(this, ((Cell) (other)));
} | 3.26 |
hbase_KeyValue_getBuffer_rdh | // ---------------------------------------------------------------------------
//
// Public Member Accessors
//
// ---------------------------------------------------------------------------
/**
* To be used only in tests where the Cells are clearly assumed to be of type KeyValue and that we
* need access to the backing array to do some test case related assertions.
*
* @return The byte array backing this KeyValue.
*/
public byte[] getBuffer() {
return this.bytes;
} | 3.26 |
hbase_KeyValue_shallowCopy_rdh | /**
* Creates a shallow copy of this KeyValue, reusing the data byte buffer.
* http://en.wikipedia.org/wiki/Object_copy
*
* @return Shallow copy of this KeyValue
*/
public KeyValue shallowCopy()
{
KeyValue v29 = new KeyValue(this.bytes, this.offset, this.f2);
v29.m0(this.seqId);
return v29;
} | 3.26 |
hbase_KeyValue_toStringMap_rdh | /**
* Produces a string map for this key/value pair. Useful for programmatic use and manipulation of
* the data stored in an WALKey, for example, printing as JSON. Values are left out due to their
* tendency to be large. If needed, they can be added manually.
*
* @return the Map<String,?> containing data from this key
*/
public Map<String, Object> toStringMap() {
Map<String, Object> stringMap = new HashMap<>();
stringMap.put("row", Bytes.toStringBinary(getRowArray(), getRowOffset(), getRowLength()));
stringMap.put("family", Bytes.toStringBinary(getFamilyArray(), getFamilyOffset(), getFamilyLength()));
stringMap.put("qualifier", Bytes.toStringBinary(m3(), getQualifierOffset(), getQualifierLength()));
stringMap.put("timestamp", getTimestamp());
stringMap.put("vlen", getValueLength());
Iterator<Tag> tags = getTags();
if (tags != null) { List<String> tagsString = new ArrayList<String>();
while (tags.hasNext()) {
tagsString.add(tags.next().toString());
}
stringMap.put("tag", tagsString);
}
return stringMap;
} | 3.26 |
hbase_KeyValue_getQualifierOffset_rdh | /**
* Returns Qualifier offset
*/
int getQualifierOffset(int foffset, int flength) {
return foffset + flength;
} | 3.26 |
hbase_KeyValue_setKey_rdh | /**
* A setter that helps to avoid object creation every time and whenever there is a need to
* create new KeyOnlyKeyValue.
*
* @param key
* Key to set
* @param offset
* Offset of the Key
* @param length
* length of the Key
*/
public void
setKey(byte[] key, int offset,
int length) {
this.bytes = key;
this.offset = offset;
this.f2 = length;
this.rowLen = Bytes.toShort(this.bytes, this.offset);
} | 3.26 |
hbase_KeyValue_getKeyLength_rdh | /**
* Returns Length of key portion.
*/
public int getKeyLength() {
return Bytes.toInt(this.bytes, this.offset);
} | 3.26 |
hbase_KeyValue_compare_rdh | // RawComparator
@Override
public int compare(byte[] l, int loff, int llen, byte[] r, int roff, int rlen) {
return compareFlatKey(l, loff, llen, r, roff, rlen);
} | 3.26 |
hbase_KeyValue_write_rdh | /**
* Write out a KeyValue in the manner in which we used to when KeyValue was a Writable.
*
* @param kv
* the KeyValue on which write is being requested
* @param out
* OutputStream to write keyValue to
* @return Length written on stream
* @throws IOException
* if any IO error happen
* @see #create(DataInput) for the inverse function
*/
public static long write(final KeyValue
kv, final DataOutput out) throws IOException {
// This is how the old Writables write used to serialize KVs. Need to figure way to make it
// work for all implementations.
int length = kv.getLength();
out.writeInt(length);
out.write(kv.getBuffer(), kv.getOffset(), length);
return ((long) (length)) + Bytes.SIZEOF_INT;
}
/**
* Write out a KeyValue in the manner in which we used to when KeyValue was a Writable but do not
* require a {@link DataOutput}, just take plain {@link OutputStream} Named <code>oswrite</code>
* so does not clash with {@link #write(KeyValue, DataOutput)}
*
* @param kv
* the KeyValue on which write is being requested
* @param out
* OutputStream to write keyValue to
* @param withTags
* boolean value indicating write is with Tags or not
* @return Length written on stream
* @throws IOException
* if any IO error happen
* @see #create(DataInput) for the inverse function
* @see #write(KeyValue, DataOutput)
* @see KeyValueUtil#oswrite(Cell, OutputStream, boolean)
* @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. Instead use
{@link #write(OutputStream, boolean)} | 3.26 |
hbase_KeyValue_writeByteArray_rdh | /**
* Write KeyValue format into the provided byte array.
*
* @param buffer
* the bytes buffer to use
* @param boffset
* buffer offset
* @param row
* row key
* @param roffset
* row offset
* @param rlength
* row length
* @param family
* family name
* @param foffset
* family offset
* @param flength
* family length
* @param qualifier
* column qualifier
* @param qoffset
* qualifier offset
* @param qlength
* qualifier length
* @param timestamp
* version timestamp
* @param type
* key type
* @param value
* column value
* @param voffset
* value offset
* @param vlength
* value length
* @return The number of useful bytes in the buffer.
* @throws IllegalArgumentException
* an illegal value was passed or there is insufficient space
* remaining in the buffer
*/
public static int writeByteArray(byte[] buffer, final int boffset, final byte[] row, final int roffset, final int rlength, final byte[] family, final int foffset, int flength, final byte[] qualifier, final int qoffset, int qlength, final long timestamp, final Type type, final byte[] value, final int voffset, int vlength, Tag[] tags) {
checkParameters(row, rlength, family, flength, qlength, vlength);
// Calculate length of tags area
int tagsLength = 0;
if ((tags != null) && (tags.length > 0)) {
for (Tag t : tags) {
tagsLength += t.getValueLength() + Tag.INFRASTRUCTURE_SIZE;
}
}
RawCell.checkForTagsLength(tagsLength);
int keyLength =
((int) (getKeyDataStructureSize(rlength, flength, qlength)));
int keyValueLength = ((int) (getKeyValueDataStructureSize(rlength, flength, qlength, vlength, tagsLength)));
if (keyValueLength > (buffer.length - boffset)) {
throw new IllegalArgumentException((("Buffer size " + (buffer.length - boffset)) + " < ") + keyValueLength);
}
// Write key, value and key row length.
int pos = boffset;
pos = Bytes.putInt(buffer, pos, keyLength);
pos = Bytes.putInt(buffer, pos, vlength);
pos = Bytes.putShort(buffer, pos, ((short) (rlength & 0xffff)));
pos = Bytes.putBytes(buffer, pos, row, roffset, rlength);
pos = Bytes.putByte(buffer, pos, ((byte) (flength & 0xff)));
if (flength != 0) {
pos = Bytes.putBytes(buffer, pos, family, foffset, flength);}
if (qlength != 0) {
pos = Bytes.putBytes(buffer, pos, qualifier, qoffset, qlength);
}
pos = Bytes.putLong(buffer, pos, timestamp);
pos = Bytes.putByte(buffer, pos, type.getCode());
if ((value != null) && (value.length > 0)) {
pos = Bytes.putBytes(buffer, pos, value, voffset, vlength);
}
// Write the number of tags. If it is 0 then it means there are no tags.
if (tagsLength > 0) {
pos = Bytes.putAsShort(buffer, pos, tagsLength);
for (Tag t : tags) {
int tlen = t.getValueLength();
pos = Bytes.putAsShort(buffer, pos, tlen + Tag.TYPE_LENGTH_SIZE);
pos = Bytes.putByte(buffer, pos, t.getType());
Tag.copyValueTo(t, buffer, pos);
pos += tlen;
}
} return keyValueLength;
} | 3.26 |
hbase_KeyValue_codeToType_rdh | /**
* Cannot rely on enum ordinals . They change if item is removed or moved. Do our own codes.
*
* @param b
* the kv serialized byte[] to process
* @return Type associated with passed code.
*/
public static Type codeToType(final byte b) {
Type t = codeArray[b & 0xff];
if (t != null) {return t;
}throw new RuntimeException("Unknown code " + b);
} | 3.26 |
hbase_KeyValue_compareWithoutRow_rdh | /**
* Compare columnFamily, qualifier, timestamp, and key type (everything except the row). This
* method is used both in the normal comparator and the "same-prefix" comparator. Note that we
* are assuming that row portions of both KVs have already been parsed and found identical, and
* we don't validate that assumption here. the length of the common prefix of the two key-values
* being compared, including row length and row
*/
private int compareWithoutRow(int commonPrefix, byte[] left, int loffset, int llength, byte[] right, int roffset, int rlength, short rowlength) {
/**
* *
* KeyValue Format and commonLength:
* |_keyLen_|_valLen_|_rowLen_|_rowKey_|_famiLen_|_fami_|_Quali_|....
* ------------------|-------commonLength--------|--------------
*/
int commonLength = (ROW_LENGTH_SIZE + FAMILY_LENGTH_SIZE) + rowlength;
// commonLength + TIMESTAMP_TYPE_SIZE
int commonLengthWithTSAndType = TIMESTAMP_TYPE_SIZE + commonLength;
// ColumnFamily + Qualifier length.
int lcolumnlength = llength - commonLengthWithTSAndType;
int rcolumnlength = rlength - commonLengthWithTSAndType;
byte ltype = left[loffset + (llength - 1)];
byte rtype
= right[roffset + (rlength - 1)];
// If the column is not specified, the "minimum" key type appears the
// latest in the sorted order, regardless of the timestamp. This is used
// for specifying the last key/value in a given row, because there is no
// "lexicographically last column" (it would be infinitely long). The
// "maximum" key type does not need this behavior.
if ((lcolumnlength == 0) && (ltype == Type.Minimum.getCode())) {
// left is "bigger", i.e. it appears later in the sorted order
return 1;
}
if ((rcolumnlength == 0) && (rtype == Type.Minimum.getCode())) {
return -1; }
int lfamilyoffset = commonLength + loffset;
int v89 = commonLength + roffset;
// Column family length.
int lfamilylength = left[lfamilyoffset - 1];
int
rfamilylength = right[v89 - 1];
// If left family size is not equal to right family size, we need not
// compare the qualifiers.
boolean sameFamilySize = lfamilylength == rfamilylength;
int common
= 0;
if (commonPrefix > 0) {
common = Math.max(0, commonPrefix - commonLength);if (!sameFamilySize) {
// Common should not be larger than Math.min(lfamilylength,
// rfamilylength).
common = Math.min(common, Math.min(lfamilylength, rfamilylength));
} else {
common = Math.min(common, Math.min(lcolumnlength, rcolumnlength));
}
}
if (!sameFamilySize) {
// comparing column family is enough.
return Bytes.compareTo(left, lfamilyoffset + common, lfamilylength - common, right, v89 + common, rfamilylength - common);
}
// Compare family & qualifier together.
final int comparison = Bytes.compareTo(left, lfamilyoffset + common, lcolumnlength - common, right, v89 + common, rcolumnlength -
common);
if (comparison != 0) {
return comparison;
}
// //
// Next compare timestamps.
long ltimestamp = Bytes.toLong(left, loffset + (llength - TIMESTAMP_TYPE_SIZE));
long rtimestamp = Bytes.toLong(right, roffset + (rlength - TIMESTAMP_TYPE_SIZE));
int compare = compareTimestamps(ltimestamp, rtimestamp);
if (compare != 0) {
return compare;}// Compare types. Let the delete types sort ahead of puts; i.e. types
// of higher numbers sort before those of lesser numbers. Maximum (255)
// appears ahead of everything, and minimum (0) appears after
// everything.
return (0xff & rtype) - (0xff & ltype);
} | 3.26 |
hbase_KeyValue_getLegacyKeyComparatorName_rdh | /**
* Compare KeyValues. When we compare KeyValues, we only compare the Key portion. This means two
* KeyValues with same Key but different Values are considered the same as far as this Comparator
* is concerned.
*
* @deprecated : Use {@link CellComparatorImpl}. Deprecated for hbase 2.0, remove for hbase 3.0.
*/
@Deprecatedpublic static class KVComparator implements RawComparator<Cell> , SamePrefixComparator<byte[]> {
/**
* The HFileV2 file format's trailer contains this class name. We reinterpret this and
* instantiate the appropriate comparator. TODO: With V3 consider removing this.
*
* @return legacy class name for FileFileTrailer#comparatorClassName
*/
public String getLegacyKeyComparatorName() {
return "org.apache.hadoop.hbase.KeyValue$KeyComparator";
} | 3.26 |
hbase_KeyValue_getTimestamp_rdh | /**
* Return the timestamp.
*/
long getTimestamp(final int keylength) {
int tsOffset = getTimestampOffset(keylength);
return Bytes.toLong(this.bytes, tsOffset);
} | 3.26 |
hbase_KeyValue_m2_rdh | /**
* Returns Family length
*/
public byte m2(int famLenPos) {
return this.bytes[famLenPos];
} | 3.26 |
hbase_CatalogReplicaLoadBalanceSimpleSelector_stop_rdh | // This class implements the Stoppable interface as chores needs a Stopable object, there is
// no-op on this Stoppable object currently.
@Override
public void stop(String why) {
isStopped = true;
} | 3.26 |
hbase_CatalogReplicaLoadBalanceSimpleSelector_onError_rdh | /**
* When a client runs into RegionNotServingException, it will call this method to update
* Selector's internal state.
*
* @param loc
* the location which causes exception.
*/
@Override
public void onError(HRegionLocation loc) {
ConcurrentNavigableMap<byte[], StaleLocationCacheEntry> tableCache = computeIfAbsent(staleCache, loc.getRegion().getTable(), () -> new ConcurrentSkipListMap<>(BYTES_COMPARATOR));
byte[] startKey = loc.getRegion().getStartKey();
tableCache.putIfAbsent(startKey, new StaleLocationCacheEntry(loc.getRegion().getEndKey()));
LOG.debug("Add entry to stale cache for table {} with startKey {}, {}", loc.getRegion().getTable(), startKey, loc.getRegion().getEndKey());
} | 3.26 |
hbase_CatalogReplicaLoadBalanceSimpleSelector_getRandomReplicaId_rdh | /**
* Select an random replica id (including the primary replica id). In case there is no replica
* region configured, return the primary replica id.
*
* @return Replica id
*/
private int getRandomReplicaId() {
int cachedNumOfReplicas = this.numOfReplicas;
if (cachedNumOfReplicas == CatalogReplicaLoadBalanceSelector.UNINITIALIZED_NUM_OF_REPLICAS) {
cachedNumOfReplicas = refreshCatalogReplicaCount();
this.numOfReplicas = cachedNumOfReplicas;
}
// In case of no replica configured, return the primary region id.
if (cachedNumOfReplicas <=
1) {
return RegionInfo.DEFAULT_REPLICA_ID;
}
return ThreadLocalRandom.current().nextInt(cachedNumOfReplicas);
} | 3.26 |
hbase_CatalogReplicaLoadBalanceSimpleSelector_select_rdh | /**
* When it looks up a location, it will call this method to find a replica region to go. For a
* normal case, > 99% of region locations from catalog/meta replica will be up to date. In extreme
* cases such as region server crashes, it will depends on how fast replication catches up.
*
* @param tableName
* table name it looks up
* @param row
* key it looks up.
* @param locateType
* locateType, Only BEFORE and CURRENT will be passed in.
* @return catalog replica id
*/
@Override
public int select(final TableName tableName, final byte[] row, final RegionLocateType locateType) {
Preconditions.checkArgument((locateType == RegionLocateType.BEFORE) || (locateType == RegionLocateType.CURRENT), "Expected type BEFORE or CURRENT but got: %s", locateType);
ConcurrentNavigableMap<byte[], StaleLocationCacheEntry> tableCache =
staleCache.get(tableName);
// If there is no entry in StaleCache, select a random replica id.
if (tableCache == null) {
return getRandomReplicaId();
}
Map.Entry<byte[], StaleLocationCacheEntry> entry;
boolean isEmptyStopRow = isEmptyStopRow(row);
// Only BEFORE and CURRENT are passed in.
if (locateType == RegionLocateType.BEFORE) {
entry = (isEmptyStopRow) ? tableCache.lastEntry() : tableCache.lowerEntry(row);
} else {
entry = tableCache.floorEntry(row);
}
// It is not in the stale cache, return a random replica id.
if (entry == null) {
return getRandomReplicaId();
}
// The entry here is a possible match for the location. Check if the entry times out first as
// long comparing is faster than comparing byte arrays(in most cases). It could remove
// stale entries faster. If the possible match entry does not time out, it will check if
// the entry is a match for the row passed in and select the replica id accordingly.
if ((EnvironmentEdgeManager.currentTime() - entry.getValue().getTimestamp())
>=
STALE_CACHE_TIMEOUT_IN_MILLISECONDS) {
LOG.debug("Entry for table {} with startKey {}, {} times out", tableName, entry.getKey(), entry);
tableCache.remove(entry.getKey());
return getRandomReplicaId();
}byte[] endKey = entry.getValue().getEndKey();
// The following logic is borrowed from AsyncNonMetaRegionLocator.
if (isEmptyStopRow(endKey)) {LOG.debug("Lookup {} goes to primary region", row);
return RegionInfo.DEFAULT_REPLICA_ID;
}
if (locateType == RegionLocateType.BEFORE) {
if ((!isEmptyStopRow) && (Bytes.compareTo(endKey, row) >= 0)) {
LOG.debug("Lookup {} goes to primary meta", row);
return RegionInfo.DEFAULT_REPLICA_ID;
}
} else if (Bytes.compareTo(row, endKey) < 0) {
LOG.debug("Lookup {} goes to primary meta", row);
return RegionInfo.DEFAULT_REPLICA_ID;
}
// Not in stale cache, return a random replica id.
return getRandomReplicaId();
} | 3.26 |
hbase_ReversedKeyValueHeap_compareRows_rdh | /**
* Compares rows of two KeyValue
*
* @return less than 0 if left is smaller, 0 if equal etc..
*/
public int compareRows(Cell left, Cell right) {
return super.kvComparator.compareRows(left, right);
} | 3.26 |
hbase_AbstractStateMachineRegionProcedure_getRegion_rdh | /**
* Returns The RegionInfo of the region we are operating on.
*/
public RegionInfo getRegion() {
return this.hri;
} | 3.26 |
hbase_AbstractStateMachineRegionProcedure_setRegion_rdh | /**
* Used when deserializing. Otherwise, DON'T TOUCH IT!
*/
protected void setRegion(final RegionInfo hri) {
this.hri = hri; } | 3.26 |
hbase_HQuorumPeer_main_rdh | /**
* Parse ZooKeeper configuration from HBase XML config and run a QuorumPeer.
*
* @param args
* String[] of command line arguments. Not used.
*/
public static void main(String[] args) {
Configuration conf = HBaseConfiguration.create();
try {
Properties zkProperties = ZKConfig.makeZKProps(conf);
writeMyID(zkProperties);
QuorumPeerConfig zkConfig =
new QuorumPeerConfig();
zkConfig.parseProperties(zkProperties);
// login the zookeeper server principal (if using security)
ZKAuthentication.loginServer(conf, HConstants.ZK_SERVER_KEYTAB_FILE, HConstants.ZK_SERVER_KERBEROS_PRINCIPAL, zkConfig.getClientPortAddress().getHostName());
runZKServer(zkConfig);
} catch (Exception e) {
LOG.error("Failed to start ZKServer", e);
System.exit(-1);
}
} | 3.26 |
hbase_VisibilityController_postOpen_rdh | /**
* **************************** Region related hooks *****************************
*/
@Override
public void postOpen(ObserverContext<RegionCoprocessorEnvironment> e) {
// Read the entire labels table and populate the zk
if (e.getEnvironment().getRegion().getRegionInfo().getTable().equals(LABELS_TABLE_NAME)) {
this.labelsRegion = true;
synchronized(this) {
this.accessControllerAvailable = CoprocessorHost.getLoadedCoprocessors().contains(AccessController.class.getName());
}
initVisibilityLabelService(e.getEnvironment());
} else {
checkAuths = e.getEnvironment().getConfiguration().getBoolean(VisibilityConstants.CHECK_AUTHS_FOR_MUTATION, false);
initVisibilityLabelService(e.getEnvironment());
}
} | 3.26 |
hbase_VisibilityController_getRegionObserver_rdh | /**
* ************************** Observer/Service Getters ***********************************
*/
@Override
public Optional<RegionObserver> getRegionObserver() {
return
Optional.of(this);
} | 3.26 |
hbase_VisibilityController_postStartMaster_rdh | /**
* ******************************* Master related hooks *********************************
*/
@Override
public void postStartMaster(ObserverContext<MasterCoprocessorEnvironment> ctx) throws IOException {
// Need to create the new system table for labels here
try (Admin v0 = ctx.getEnvironment().getConnection().getAdmin()) {
if (!v0.tableExists(LABELS_TABLE_NAME)) {
// We will cache all the labels. No need of normal table block cache.
// Let the "labels" table having only one region always. We are not expecting too many
// labels in the system.
TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(LABELS_TABLE_NAME).setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(LABELS_TABLE_FAMILY).setBloomFilterType(BloomType.NONE).setBlockCacheEnabled(false).build()).setValue(TableDescriptorBuilder.SPLIT_POLICY, DisabledRegionSplitPolicy.class.getName()).build();
v0.createTable(tableDescriptor);
}
}
} | 3.26 |
hbase_VisibilityController_addLabels_rdh | /**
* ****************************
* VisibilityEndpoint service related methods
* ****************************
*/
@Override
public synchronized void addLabels(RpcController controller, VisibilityLabelsRequest request, RpcCallback<VisibilityLabelsResponse> done) {
VisibilityLabelsResponse.Builder response = VisibilityLabelsResponse.newBuilder();
List<VisibilityLabel> visLabels = request.getVisLabelList();
if (!initialized) {
setExceptionResults(visLabels.size(), new VisibilityControllerNotReadyException("VisibilityController not yet initialized!"), response);
} else {
List<byte[]> v60 = new ArrayList<>(visLabels.size());
try {
if (authorizationEnabled) {
checkCallingUserAuth();
}
RegionActionResult successResult = RegionActionResult.newBuilder().build();
for (VisibilityLabel visLabel : visLabels) {
byte[] label = visLabel.getLabel().toByteArray();
v60.add(label);
response.addResult(successResult);// Just mark as success. Later it will get reset
// based on the result from
// visibilityLabelService.addLabels ()
}
if (!v60.isEmpty()) {
OperationStatus[] opStatus = this.visibilityLabelService.addLabels(v60);
logResult(true, "addLabels", "Adding labels allowed", null, v60, null); int i = 0;
for (OperationStatus status : opStatus) {
while (!Objects.equals(response.getResult(i), successResult)) { i++;
}
if (status.getOperationStatusCode() != SUCCESS) {
RegionActionResult.Builder failureResultBuilder = RegionActionResult.newBuilder();
failureResultBuilder.setException(buildException(new DoNotRetryIOException(status.getExceptionMsg())));
response.setResult(i, failureResultBuilder.build());
}
i++;
}
}
} catch (AccessDeniedException e) {
logResult(false, "addLabels", e.getMessage(), null, v60, null);LOG.error("User is not having required permissions to add labels", e);
setExceptionResults(visLabels.size(), e, response);
} catch (IOException e) {
LOG.error(e.toString(), e);
setExceptionResults(visLabels.size(), e, response);
}
}
done.run(response.build());
} | 3.26 |
hbase_CellCreator_m0_rdh | /**
* Returns Visibility expression resolver
*/
public VisibilityExpressionResolver m0() {
return this.visExpResolver;
} | 3.26 |
hbase_CellCreator_create_rdh | /**
*
* @param row
* row key
* @param roffset
* row offset
* @param rlength
* row length
* @param family
* family name
* @param foffset
* family offset
* @param flength
* family length
* @param qualifier
* column qualifier
* @param qoffset
* qualifier offset
* @param qlength
* qualifier length
* @param timestamp
* version timestamp
* @param value
* column value
* @param voffset
* value offset
* @param vlength
* value length
* @return created Cell
*/
public Cell create(byte[] row, int roffset, int rlength, byte[] family, int foffset, int flength, byte[] qualifier, int qoffset, int qlength, long timestamp, byte[] value, int voffset, int vlength, List<Tag> tags) throws IOException {
return new KeyValue(row, roffset, rlength, family, foffset, flength, qualifier, qoffset, qlength, timestamp, Type.Put, value, voffset, vlength, tags);
} | 3.26 |
hbase_CompatibilitySingletonFactory_getInstance_rdh | /**
* Get the singleton instance of Any classes defined by compatibiliy jar's
*
* @return the singleton
*/
@SuppressWarnings("unchecked")
public static <T> T getInstance(Class<T> klass) {
synchronized(SingletonStorage.INSTANCE.lock) {
T instance = ((T) (SingletonStorage.INSTANCE.instances.get(klass)));
if (instance == null) {
try {
ServiceLoader<T> loader = ServiceLoader.load(klass);
Iterator<T> it = loader.iterator();
instance = it.next();
if (it.hasNext()) {
StringBuilder msg = new StringBuilder();
msg.append("ServiceLoader provided more than one implementation for class: ").append(klass).append(", using implementation: ").append(instance.getClass()).append(", other implementations: {");
while (it.hasNext()) {
msg.append(it.next()).append(" ");
}
msg.append("}");
LOG.warn(msg.toString());
}
} catch (Exception e) {
throw new RuntimeException(createExceptionString(klass), e);
} catch
(Error e) {
throw new RuntimeException(createExceptionString(klass), e);
}
// If there was nothing returned and no exception then throw an exception.
if (instance == null) {
throw new RuntimeException(createExceptionString(klass));
}
SingletonStorage.INSTANCE.instances.put(klass, instance);
}
return instance;
}
} | 3.26 |
hbase_CompactedHFilesDischarger_setUseExecutor_rdh | /**
* CompactedHFilesDischarger runs asynchronously by default using the hosting RegionServer's
* Executor. In tests it can be useful to force a synchronous cleanup. Use this method to set
* no-executor before you call run.
*
* @return The old setting for <code>useExecutor</code>
*/
boolean setUseExecutor(final boolean useExecutor) {
boolean oldSetting = this.useExecutor;
this.useExecutor = useExecutor;
return oldSetting;
} | 3.26 |
hbase_FastPathRpcHandler_loadCallRunner_rdh | /**
*
* @param cr
* Task gotten via fastpath.
* @return True if we successfully loaded our task
*/
boolean loadCallRunner(final CallRunner cr) {
this.loadedCallRunner = cr;
this.semaphore.release();
return true;
} | 3.26 |
hbase_RequestControllerFactory_create_rdh | /**
* Constructs a {@link org.apache.hadoop.hbase.client.RequestController}.
*
* @param conf
* The {@link Configuration} to use.
* @return A RequestController which is built according to the configuration.
*/
public static RequestController create(Configuration conf) {
Class<? extends RequestController> clazz = conf.getClass(REQUEST_CONTROLLER_IMPL_CONF_KEY, SimpleRequestController.class, RequestController.class);
return ReflectionUtils.newInstance(clazz, conf);
} | 3.26 |
hbase_StoreFileTrackerValidationUtils_checkForNewFamily_rdh | // should not use MigrationStoreFileTracker for new family
private static void checkForNewFamily(Configuration conf, TableDescriptor table, ColumnFamilyDescriptor family) throws IOException {
Configuration mergedConf = StoreUtils.createStoreConfiguration(conf, table, family);
Class<? extends StoreFileTracker> tracker = StoreFileTrackerFactory.getTrackerClass(mergedConf);
if (MigrationStoreFileTracker.class.isAssignableFrom(tracker)) {
throw new DoNotRetryIOException((((("Should not use " + Trackers.MIGRATION) + " as store file tracker for new family ") + family.getNameAsString()) + " of table ") + table.getTableName());
}
} | 3.26 |
hbase_StoreFileTrackerValidationUtils_validatePreRestoreSnapshot_rdh | /**
* Makes sure restoring a snapshot does not break the current SFT setup follows
* StoreUtils.createStoreConfiguration
*
* @param currentTableDesc
* Existing Table's TableDescriptor
* @param snapshotTableDesc
* Snapshot's TableDescriptor
* @param baseConf
* Current global configuration
* @throws RestoreSnapshotException
* if restore would break the current SFT setup
*/
public static void validatePreRestoreSnapshot(TableDescriptor currentTableDesc, TableDescriptor snapshotTableDesc, Configuration baseConf) throws RestoreSnapshotException {
for (ColumnFamilyDescriptor cfDesc : currentTableDesc.getColumnFamilies()) {
ColumnFamilyDescriptor snapCFDesc = snapshotTableDesc.getColumnFamily(cfDesc.getName());
// if there is no counterpart in the snapshot it will be just deleted so the config does
// not matter
if (snapCFDesc != null) {
Configuration currentCompositeConf = StoreUtils.createStoreConfiguration(baseConf, currentTableDesc, cfDesc);
Configuration snapCompositeConf = StoreUtils.createStoreConfiguration(baseConf, snapshotTableDesc, snapCFDesc);
Class<? extends StoreFileTracker> currentSFT = StoreFileTrackerFactory.getTrackerClass(currentCompositeConf);
Class<? extends StoreFileTracker> snapSFT = StoreFileTrackerFactory.getTrackerClass(snapCompositeConf);
// restoration is not possible if there is an SFT mismatch
if (currentSFT != snapSFT) {
throw new RestoreSnapshotException(((((("Restoring Snapshot is not possible because " + " the config for column family ") + cfDesc.getNameAsString()) + " has incompatible configuration. Current SFT: ") + currentSFT) + " SFT from snapshot: ") + snapSFT);
}}
}
} | 3.26 |
hbase_StoreFileTrackerValidationUtils_checkForCreateTable_rdh | /**
* Pre check when creating a new table.
* <p/>
* For now, only make sure that we do not use {@link Trackers#MIGRATION} for newly created tables.
*
* @throws IOException
* when there are check errors, the upper layer should fail the
* {@code CreateTableProcedure}.
*/
public static void checkForCreateTable(Configuration conf, TableDescriptor table) throws IOException {
for (ColumnFamilyDescriptor family : table.getColumnFamilies()) {
checkForNewFamily(conf, table, family);
}
}
/**
* Pre check when modifying a table.
* <p/>
* The basic idea is when you want to change the store file tracker implementation, you should use
* {@link Trackers#MIGRATION} first and then change to the destination store file tracker
* implementation.
* <p/>
* There are several rules:
* <ul>
* <li>For newly added family, you should not use {@link Trackers#MIGRATION}.</li>
* <li>For modifying a family:
* <ul>
* <li>If old tracker is {@link Trackers#MIGRATION}, then:
* <ul>
* <li>The new tracker is also {@link Trackers#MIGRATION}, then they must have the same src and
* dst tracker.</li>
* <li>The new tracker is not {@link Trackers#MIGRATION}, then the new tracker must be the dst
* tracker of the old tracker.</li>
* </ul>
* </li>
* <li>If the old tracker is not {@link Trackers#MIGRATION}, then:
* <ul>
* <li>If the new tracker is {@link Trackers#MIGRATION}, then the old tracker must be the src
* tracker of the new tracker.</li>
* <li>If the new tracker is not {@link Trackers#MIGRATION}, then the new tracker must be the same
* with old tracker.</li>
* </ul>
* </li>
* </ul>
* </li>
* </ul>
*
* @throws IOException
* when there are check errors, the upper layer should fail the
* {@code ModifyTableProcedure} | 3.26 |
hbase_ChaosAgent_createIfZNodeNotExists_rdh | /**
* Checks if given ZNode exists, if not creates a PERSISTENT ZNODE for same.
*
* @param path
* Path to check for ZNode
*/
private void createIfZNodeNotExists(String path) {
try {
if (zk.exists(path, false) == null) {
createZNode(path, new byte[0]);
}
} catch (KeeperException | InterruptedException e) {
LOG.error((("Error checking given node : " + path) + " ") + e);}
} | 3.26 |
hbase_ChaosAgent_setStatusOfTaskZNode_rdh | /**
* sets given Status for Task Znode
*
* @param taskZNode
* ZNode to set status
* @param status
* Status value
*/
public void setStatusOfTaskZNode(String taskZNode, String status) {
LOG.info((("Setting status of Task ZNode: " + taskZNode) + " status : ") + status); zk.setData(taskZNode, status.getBytes(StandardCharsets.UTF_8), -1, setStatusOfTaskZNodeCallback, null);} | 3.26 |
hbase_ChaosAgent_register_rdh | /**
* registration of ChaosAgent by checking and creating necessary ZNodes.
*/
private void register() {
createIfZNodeNotExists(ChaosConstants.CHAOS_TEST_ROOT_ZNODE);
createIfZNodeNotExists(ChaosConstants.CHAOS_AGENT_REGISTRATION_EPIMERAL_ZNODE);
createIfZNodeNotExists(ChaosConstants.CHAOS_AGENT_STATUS_PERSISTENT_ZNODE);
createIfZNodeNotExists((ChaosConstants.CHAOS_AGENT_STATUS_PERSISTENT_ZNODE + ChaosConstants.ZNODE_PATH_SEPARATOR) + f0);
createEphemeralZNode((ChaosConstants.CHAOS_AGENT_REGISTRATION_EPIMERAL_ZNODE + ChaosConstants.ZNODE_PATH_SEPARATOR) + f0, new byte[0]);
} | 3.26 |
hbase_ChaosAgent_createEphemeralZNode_rdh | /**
* *
* Function to create EPHEMERAL ZNODE with given path and data as params.
*
* @param path
* Path at which Ephemeral ZNode to create
* @param data
* Data to put under ZNode
*/
public void createEphemeralZNode(String path, byte[] data) {
zk.create(path, data, Ids.OPEN_ACL_UNSAFE, CreateMode.EPHEMERAL, createEphemeralZNodeCallback, data);
} | 3.26 |
hbase_ChaosAgent_createZKConnection_rdh | /**
* *
* Creates Connection with ZooKeeper.
*
* @throws IOException
* if something goes wrong
*/
private void createZKConnection(Watcher watcher) throws IOException {
if (watcher == null) {
zk = new ZooKeeper(quorum, ChaosConstants.SESSION_TIMEOUT_ZK,
this);
} else {
zk = new ZooKeeper(quorum, ChaosConstants.SESSION_TIMEOUT_ZK, watcher);
}
LOG.info("ZooKeeper Connection created for ChaosAgent: " +
f0);
} | 3.26 |
hbase_ChaosAgent_execWithRetries_rdh | /**
* Below function executes command with retries with given user. Uses LocalShell to execute a
* command.
*
* @param user
* user name, default none
* @param cmd
* Command to execute
* @return A pair of Exit Code and Shell output
* @throws IOException
* Exception while executing shell command
*/
private Pair<Integer, String> execWithRetries(String user, String cmd) throws IOException {
RetryCounter v9 = retryCounterFactory.create();while (true) {
try {
return exec(user, cmd);
} catch (IOException e) {
retryOrThrow(v9, e, user, cmd);
}
try {
v9.sleepUntilNextRetry();} catch (InterruptedException e) {
LOG.warn("Sleep Interrupted: " + e);
}
}
} | 3.26 |
hbase_ChaosAgent_createZNode_rdh | /**
* *
* Function to create PERSISTENT ZNODE with given path and data given as params
*
* @param path
* Path at which ZNode to create
* @param data
* Data to put under ZNode
*/
public void createZNode(String path, byte[] data) {
zk.create(path, data, Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT, createZNodeCallback,
data);
} | 3.26 |
hbase_ChaosAgent_initChaosAgent_rdh | /**
* *
* sets global params and initiates connection with ZooKeeper then does registration.
*
* @param conf
* initial configuration to use
* @param quorum
* ZK Quorum
* @param agentName
* AgentName to use
*/
private void initChaosAgent(Configuration conf, String quorum, String agentName) {
this.conf
= conf;
this.quorum = quorum;
this.f0 = agentName;
this.retryCounterFactory = new RetryCounterFactory(new RetryCounter.RetryConfig().setMaxAttempts(conf.getInt(ChaosConstants.RETRY_ATTEMPTS_KEY, ChaosConstants.DEFAULT_RETRY_ATTEMPTS)).setSleepInterval(conf.getLong(ChaosConstants.RETRY_SLEEP_INTERVAL_KEY, ChaosConstants.DEFAULT_RETRY_SLEEP_INTERVAL)));
try {
this.createZKConnection(null);this.register();
} catch (IOException e) {
LOG.error("Error Creating Connection: " + e);
}
} | 3.26 |
hbase_ChaosAgent_getTasks_rdh | /**
* *
* Gets tasks for execution, basically sets Watch on it's respective host's Znode and waits for
* tasks to be assigned, also has a getTasksForAgentCallback which handles execution of task.
*/
private void getTasks() {
LOG.info(("Getting Tasks for Agent: " + f0) + "and setting watch for new Tasks");
zk.getChildren((ChaosConstants.CHAOS_AGENT_STATUS_PERSISTENT_ZNODE + ChaosConstants.ZNODE_PATH_SEPARATOR) +
f0, newTaskCreatedWatcher, getTasksForAgentCallback, null);
} | 3.26 |
hbase_ZKPermissionWatcher_deleteNamespaceACLNode_rdh | /**
* *
* Delete the acl notify node of namespace
*/
public void deleteNamespaceACLNode(final String
namespace) {
String v15 = ZNodePaths.joinZNode(watcher.getZNodePaths().baseZNode, ACL_NODE);
v15 = ZNodePaths.joinZNode(v15, PermissionStorage.NAMESPACE_PREFIX + namespace);
try {
ZKUtil.deleteNode(watcher, v15);
} catch (KeeperException.NoNodeException e) {
LOG.warn(("No acl notify node of namespace '" + namespace) + "'"); } catch (KeeperException e)
{
LOG.error(("Failed deleting acl node of namespace '" + namespace) + "'", e);watcher.abort("Failed deleting node " + v15, e);
}
} | 3.26 |
hbase_ZKPermissionWatcher_deleteTableACLNode_rdh | /**
* *
* Delete the acl notify node of table
*/
public void deleteTableACLNode(final TableName tableName) {
String zkNode = ZNodePaths.joinZNode(watcher.getZNodePaths().baseZNode, ACL_NODE);
zkNode = ZNodePaths.joinZNode(zkNode, tableName.getNameAsString());
try {
ZKUtil.deleteNode(watcher, zkNode);
} catch (KeeperException.NoNodeException e) {
LOG.warn(("No acl notify node of table '" + tableName) + "'");
} catch (KeeperException e) {
LOG.error(("Failed deleting acl node of table '" + tableName) + "'", e);
watcher.abort("Failed deleting node " + zkNode, e);
}
} | 3.26 |
hbase_HRegionServer_getFavoredNodesForRegion_rdh | /**
* Return the favored nodes for a region given its encoded name. Look at the comment around
* {@link #regionFavoredNodesMap} on why we convert to InetSocketAddress[] here.
*
* @param encodedRegionName
* the encoded region name.
* @return array of favored locations
*/
@Override
public InetSocketAddress[] getFavoredNodesForRegion(String encodedRegionName)
{
return Address.toSocketAddress(regionFavoredNodesMap.get(encodedRegionName));
} | 3.26 |
hbase_HRegionServer_dumpRowLocks_rdh | /**
* Used by {@link RSDumpServlet} to generate debugging information.
*/
public void dumpRowLocks(final PrintWriter
out) {
StringBuilder v5 = new StringBuilder();
for
(HRegion region : getRegions()) {
if (region.getLockedRows().size() > 0) {
for (HRegion.RowLockContext rowLockContext : region.getLockedRows().values()) {v5.setLength(0);
v5.append(region.getTableDescriptor().getTableName()).append(",").append(region.getRegionInfo().getEncodedName()).append(",");
v5.append(rowLockContext.toString());
out.println(v5);
}}
}
} | 3.26 |
hbase_HRegionServer_getOnlineRegionsLocalContext_rdh | /**
* For tests, web ui and metrics. This method will only work if HRegionServer is in the same JVM
* as client; HRegion cannot be serialized to cross an rpc.
*/
public Collection<HRegion> getOnlineRegionsLocalContext() {
Collection<HRegion> regions = this.onlineRegions.values();
return Collections.unmodifiableCollection(regions);
} | 3.26 |
hbase_HRegionServer_closeUserRegions_rdh | /**
* Schedule closes on all user regions. Should be safe calling multiple times because it wont'
* close regions that are already closed or that are closing.
*
* @param abort
* Whether we're running an abort.
*/
private void closeUserRegions(final boolean abort) {
this.onlineRegionsLock.writeLock().lock();
try {
for (Map.Entry<String, HRegion> e : this.onlineRegions.entrySet()) {
HRegion r = e.getValue();
if ((!r.getRegionInfo().isMetaRegion()) && r.isAvailable()) {
// Don't update zk with this close transition; pass false.
closeRegionIgnoreErrors(r.getRegionInfo(), abort);
}
}
} finally {
this.onlineRegionsLock.writeLock().unlock();
}} | 3.26 |
hbase_HRegionServer_handleReportForDutyResponse_rdh | /**
* Run init. Sets up wal and starts up all server threads.
*
* @param c
* Extra configuration.
*/
protected void handleReportForDutyResponse(final
RegionServerStartupResponse c) throws IOException {
try {
boolean v77 = false;
for (NameStringPair e : c.getMapEntriesList()) {
String key = e.getName();
// The hostname the master sees us as.
if (key.equals(HConstants.KEY_FOR_HOSTNAME_SEEN_BY_MASTER)) {
String hostnameFromMasterPOV = e.getValue();
this.serverName =
ServerName.valueOf(hostnameFromMasterPOV, rpcServices.getSocketAddress().getPort(), this.startcode);
String expectedHostName = rpcServices.getSocketAddress().getHostName();
// if Master use-ip is enabled, RegionServer use-ip will be enabled by default even if it
// is set to disable. so we will use the ip of the RegionServer to compare with the
// hostname passed by the Master, see HBASE-27304 for details.
if ((StringUtils.isBlank(useThisHostnameInstead) && getActiveMaster().isPresent()) && InetAddresses.isInetAddress(getActiveMaster().get().getHostname())) {
expectedHostName = rpcServices.getSocketAddress().getAddress().getHostAddress();
}
boolean isHostnameConsist = (StringUtils.isBlank(useThisHostnameInstead)) ? hostnameFromMasterPOV.equals(expectedHostName) : hostnameFromMasterPOV.equals(useThisHostnameInstead);
if (!isHostnameConsist) {
String msg = (("Master passed us a different hostname to use; was=" + (StringUtils.isBlank(useThisHostnameInstead) ? rpcServices.getSocketAddress().getHostName() : this.useThisHostnameInstead)) + ", but now=") + hostnameFromMasterPOV;
LOG.error(msg);
throw new IOException(msg);
}
continue;
}
String value = e.getValue();
if (key.equals(HConstants.HBASE_DIR)) {
if ((value != null) && (!value.equals(conf.get(HConstants.HBASE_DIR)))) {
v77 = true;
}
}
if (LOG.isDebugEnabled()) {
LOG.debug((("Config from master: " + key) + "=") + value);
}
this.conf.set(key, value);
}
// Set our ephemeral znode up in zookeeper now we have a name.
createMyEphemeralNode();
if (v77) {
// initialize file system by the config fs.defaultFS and hbase.rootdir from master
initializeFileSystem();
}
// hack! Maps DFSClient => RegionServer for logs. HDFS made this
// config param for task trackers, but we can piggyback off of it.
if (this.conf.get("mapreduce.task.attempt.id") == null) {
this.conf.set("mapreduce.task.attempt.id", "hb_rs_" + this.serverName.toString());
} // Save it in a file, this will allow to see if we crash
ZNodeClearer.writeMyEphemeralNodeOnDisk(getMyEphemeralNodePath());
// This call sets up an initialized replication and WAL. Later we start it up.
setupWALAndReplication();
// Init in here rather than in constructor after thread name has been set
final MetricsTable metricsTable = new MetricsTable(new MetricsTableWrapperAggregateImpl(this));
this.metricsRegionServerImpl = new MetricsRegionServerWrapperImpl(this);
this.metricsRegionServer = new MetricsRegionServer(metricsRegionServerImpl, conf, metricsTable);
// Now that we have a metrics source, start the pause monitor
this.pauseMonitor
= new JvmPauseMonitor(conf, m3().getMetricsSource());
pauseMonitor.start();// There is a rare case where we do NOT want services to start. Check config.
if (getConfiguration().getBoolean("hbase.regionserver.workers", true)) {
startServices();
}// In here we start up the replication Service. Above we initialized it. TODO. Reconcile.
// or make sense of it.
startReplicationService();
// Set up ZK
LOG.info((((("Serving as " + this.serverName) + ", RpcServer on ") + rpcServices.getSocketAddress()) + ", sessionid=0x") + Long.toHexString(this.zooKeeper.getRecoverableZooKeeper().getSessionId()));
// Wake up anyone waiting for this server to online
synchronized(online) {
online.set(true);
online.notifyAll();
}
} catch (Throwable e) {
stop("Failed initialization");
throw convertThrowableToIOE(cleanup(e, "Failed init"), "Region server startup failed");
} finally {
sleeper.skipSleepCycle();
}
} | 3.26 |
hbase_HRegionServer_cleanup_rdh | /**
* Cleanup after Throwable caught invoking method. Converts <code>t</code> to IOE if it isn't
* already.
*
* @param t
* Throwable
* @param msg
* Message to log in error. Can be null.
* @return Throwable converted to an IOE; methods can only let out IOEs.
*/
private Throwable cleanup(final Throwable t, final String
msg) {
// Don't log as error if NSRE; NSRE is 'normal' operation.
if (t instanceof NotServingRegionException) {
LOG.debug("NotServingRegionException; " + t.getMessage());
return t;
}
Throwable e = (t instanceof
RemoteException) ? ((RemoteException) (t)).unwrapRemoteException() : t;
if (msg == null) {
LOG.error("", e);
} else {
LOG.error(msg, e);
}
if (!rpcServices.checkOOME(t)) {
checkFileSystem();
}
return t;
} | 3.26 |
hbase_HRegionServer_stopServiceThreads_rdh | /**
* Wait on all threads to finish. Presumption is that all closes and stops have already been
* called.
*/
protected void stopServiceThreads() {
// clean up the scheduled chores
stopChoreService();
if (bootstrapNodeManager != null) {bootstrapNodeManager.stop();
}
if (this.cacheFlusher != null) {
this.cacheFlusher.shutdown();
}
if (this.walRoller != null) {
this.walRoller.close();
}
if (this.compactSplitThread != null) {
this.compactSplitThread.join();}
stopExecutorService();
if (sameReplicationSourceAndSink && (this.replicationSourceHandler != null)) {
this.replicationSourceHandler.stopReplicationService();
} else {
if (this.replicationSourceHandler != null) {
this.replicationSourceHandler.stopReplicationService();
}
if (this.replicationSinkHandler != null) {
this.replicationSinkHandler.stopReplicationService();
}
}
} | 3.26 |
hbase_HRegionServer_getReplicationSinkService_rdh | /**
* Returns Return the object that implements the replication sink executorService.
*/
public ReplicationSinkService getReplicationSinkService() {
return replicationSinkHandler;} | 3.26 |
hbase_HRegionServer_triggerFlushInPrimaryRegion_rdh | /**
* Trigger a flush in the primary region replica if this region is a secondary replica. Does not
* block this thread. See RegionReplicaFlushHandler for details.
*/
private void triggerFlushInPrimaryRegion(final HRegion region) {
if (ServerRegionReplicaUtil.isDefaultReplica(region.getRegionInfo())) {
return;}
TableName tn = region.getTableDescriptor().getTableName();
if (((!ServerRegionReplicaUtil.isRegionReplicaReplicationEnabled(region.conf, tn)) || (!ServerRegionReplicaUtil.isRegionReplicaWaitForPrimaryFlushEnabled(region.conf))) || // If the memstore replication not setup, we do not have to wait for observing a flush event
// from primary before starting to serve reads, because gaps from replication is not
// applicable,this logic is from
// TableDescriptorBuilder.ModifyableTableDescriptor.setRegionMemStoreReplication by
// HBASE-13063
(!region.getTableDescriptor().hasRegionMemStoreReplication())) {
region.setReadsEnabled(true);
return;
}
region.setReadsEnabled(false);// disable reads before marking the region as opened.
// RegionReplicaFlushHandler might reset this.
// Submit it to be handled by one of the handlers so that we do not block OpenRegionHandler
if (this.executorService != null) {
this.executorService.submit(new RegionReplicaFlushHandler(this, region));} else {
LOG.info("Executor is null; not running flush of primary region replica for {}", region.getRegionInfo());
}
} | 3.26 |
hbase_HRegionServer_getRetryPauseTime_rdh | /**
* Return pause time configured in {@link HConstants#HBASE_RPC_SHORTOPERATION_RETRY_PAUSE_TIME}}
*
* @return pause time
*/
@InterfaceAudience.Private
public long getRetryPauseTime() {
return this.retryPauseTime;
} | 3.26 |
hbase_HRegionServer_checkCodecs_rdh | /**
* Run test on configured codecs to make sure supporting libs are in place.
*/
private static void checkCodecs(final Configuration c) throws IOException {
// check to see if the codec list is available:
String[] codecs = c.getStrings(REGIONSERVER_CODEC, ((String[]) (null)));
if (codecs == null) {
return;
}
for (String
codec : codecs) {
if (!CompressionTest.testCompression(codec)) {
throw new IOException(("Compression codec " + codec) + " not supported, aborting RS construction");
}
}
} | 3.26 |
hbase_HRegionServer_setupWALAndReplication_rdh | /**
* Setup WAL log and replication if enabled. Replication setup is done in here because it wants to
* be hooked up to WAL.
*/
private void setupWALAndReplication() throws IOException {
WALFactory factory = new WALFactory(conf, serverName, this);
// TODO Replication make assumptions here based on the default filesystem impl
Path oldLogDir = new Path(walRootDir, HConstants.HREGION_OLDLOGDIR_NAME);String logName = AbstractFSWALProvider.getWALDirectoryName(this.serverName.toString());
Path logDir = new Path(walRootDir, logName);
LOG.debug("logDir={}", logDir);
if (this.walFs.exists(logDir)) {
throw new RegionServerRunningException("Region server has already created directory at " + this.serverName.toString());
}
// Always create wal directory as now we need this when master restarts to find out the live
// region servers.
if (!this.walFs.mkdirs(logDir)) {
throw new IOException("Can not create wal directory " + logDir);
}
// Instantiate replication if replication enabled. Pass it the log directories.
createNewReplicationInstance(conf, this, this.walFs, logDir, oldLogDir, factory);
WALActionsListener walEventListener = getWALEventTrackerListener(conf);
if ((walEventListener != null) && (factory.getWALProvider() != null)) {
factory.getWALProvider().addWALActionsListener(walEventListener);
}
this.walFactory = factory;
} | 3.26 |
hbase_HRegionServer_getRegions_rdh | /**
* Gets the online regions of the specified table. This method looks at the in-memory
* onlineRegions. It does not go to <code>hbase:meta</code>. Only returns <em>online</em> regions.
* If a region on this table has been closed during a disable, etc., it will not be included in
* the returned list. So, the returned list may not necessarily be ALL regions in this table, its
* all the ONLINE regions in the table.
*
* @param tableName
* table to limit the scope of the query
* @return Online regions from <code>tableName</code>
*/
@Override
public List<HRegion> getRegions(TableName tableName) {
List<HRegion> tableRegions = new ArrayList<>();
synchronized(this.onlineRegions) {
for (HRegion region : this.onlineRegions.values()) {
RegionInfo regionInfo = region.getRegionInfo();
if (regionInfo.getTable().equals(tableName)) {
tableRegions.add(region);
}
}
}
return tableRegions;
} | 3.26 |
hbase_HRegionServer_reportRegionSizesForQuotas_rdh | /**
* Reports the given map of Regions and their size on the filesystem to the active Master.
*
* @param regionSizeStore
* The store containing region sizes
* @return false if FileSystemUtilizationChore should pause reporting to master. true otherwise
*/
public boolean reportRegionSizesForQuotas(RegionSizeStore regionSizeStore) {
RegionServerStatusService.BlockingInterface rss = f0;
if (rss == null) {
// the current server could be stopping.
LOG.trace("Skipping Region size report to HMaster as stub is null");
return true;
}
try {
buildReportAndSend(rss, regionSizeStore);
} catch (ServiceException se) {
IOException ioe = ProtobufUtil.getRemoteException(se);
if (ioe instanceof PleaseHoldException) {
LOG.trace("Failed to report region sizes to Master because it is initializing." + " This will be retried.", ioe);
// The Master is coming up. Will retry the report later. Avoid re-creating the stub.
return true;}
if
(f0 == rss) {
f0
= null;
}
createRegionServerStatusStub(true);
if (ioe instanceof DoNotRetryIOException) {
DoNotRetryIOException doNotRetryEx = ((DoNotRetryIOException) (ioe));
if (doNotRetryEx.getCause() != null) {Throwable
t = doNotRetryEx.getCause();
if (t instanceof UnsupportedOperationException) {
LOG.debug("master doesn't support ReportRegionSpaceUse, pause before retrying");
return false;
}
}
}
LOG.debug("Failed to report region sizes to Master. This will be retried.", ioe);
}
return true;
} | 3.26 |
hbase_HRegionServer_getUseThisHostnameInstead_rdh | // HMaster should override this method to load the specific config for master
@Override
protected String getUseThisHostnameInstead(Configuration conf) throws IOException {
String hostname = conf.get(UNSAFE_RS_HOSTNAME_KEY);
if (conf.getBoolean(UNSAFE_RS_HOSTNAME_DISABLE_MASTER_REVERSEDNS_KEY, false)) {if (!StringUtils.isBlank(hostname)) {String v4 = ((((((UNSAFE_RS_HOSTNAME_DISABLE_MASTER_REVERSEDNS_KEY + " and ") + UNSAFE_RS_HOSTNAME_KEY) + " are mutually exclusive. Do not set ") + UNSAFE_RS_HOSTNAME_DISABLE_MASTER_REVERSEDNS_KEY) + " to true while ") + UNSAFE_RS_HOSTNAME_KEY) + " is used";
throw new
IOException(v4);
} else {
return rpcServices.getSocketAddress().getHostName();
}
} else {
return hostname;
}
} | 3.26 |
hbase_HRegionServer_main_rdh | /**
*
* @see org.apache.hadoop.hbase.regionserver.HRegionServerCommandLine
*/
public static void main(String[] args)
{
LOG.info("STARTING executorService " + HRegionServer.class.getSimpleName());
VersionInfo.logVersion();
Configuration conf = HBaseConfiguration.create();
@SuppressWarnings("unchecked")
Class<? extends HRegionServer> regionServerClass = ((Class<? extends HRegionServer>) (conf.getClass(HConstants.REGION_SERVER_IMPL, HRegionServer.class)));
new HRegionServerCommandLine(regionServerClass).doMain(args);
} | 3.26 |
hbase_HRegionServer_buildReportAndSend_rdh | /**
* Builds the region size report and sends it to the master. Upon successful sending of the
* report, the region sizes that were sent are marked as sent.
*
* @param rss
* The stub to send to the Master
* @param regionSizeStore
* The store containing region sizes
*/
private void buildReportAndSend(RegionServerStatusService.BlockingInterface rss, RegionSizeStore regionSizeStore) throws ServiceException {
RegionSpaceUseReportRequest v41 = buildRegionSpaceUseReportRequest(Objects.requireNonNull(regionSizeStore));
rss.reportRegionSpaceUse(null, v41);
// Record the number of size reports sent
if (metricsRegionServer != null) {
metricsRegionServer.incrementNumRegionSizeReportsSent(regionSizeStore.size());
}
} | 3.26 |
hbase_HRegionServer_walRollRequestFinished_rdh | /**
* For testing
*
* @return whether all wal roll request finished for this regionserver
*/
@InterfaceAudience.Private
public boolean walRollRequestFinished() {
return this.walRoller.walRollFinished();
} | 3.26 |
hbase_HRegionServer_getFlushRequester_rdh | /**
* Returns reference to FlushRequester
*/
@Overridepublic FlushRequester getFlushRequester() {
return this.cacheFlusher;
} | 3.26 |
hbase_HRegionServer_submitRegionProcedure_rdh | /**
* Will ignore the open/close region procedures which already submitted or executed. When master
* had unfinished open/close region procedure and restarted, new active master may send duplicate
* open/close region request to regionserver. The open/close request is submitted to a thread pool
* and execute. So first need a cache for submitted open/close region procedures. After the
* open/close region request executed and report region transition succeed, cache it in executed
* region procedures cache. See {@link #finishRegionProcedure(long)}. After report region
* transition succeed, master will not send the open/close region request to regionserver again.
* And we thought that the ongoing duplicate open/close region request should not be delayed more
* than 600 seconds. So the executed region procedures cache will expire after 600 seconds. See
* HBASE-22404 for more details.
*
* @param procId
* the id of the open/close region procedure
* @return true if the procedure can be submitted.
*/
boolean submitRegionProcedure(long procId) {
if (procId == (-1)) {
return true;
}
// Ignore the region procedures which already submitted.
Long previous = submittedRegionProcedures.putIfAbsent(procId, procId);
if (previous != null) {
LOG.warn("Received procedure pid={}, which already submitted, just ignore it", procId);
return false;
}
// Ignore the region procedures which already executed.
if (executedRegionProcedures.getIfPresent(procId) != null) {
LOG.warn("Received procedure pid={}, which already executed, just ignore it", procId);
return
false;
}
return true;
} | 3.26 |
hbase_HRegionServer_getReplicationSourceService_rdh | /**
* Returns Return the object that implements the replication source executorService.
*/
@Override
public ReplicationSourceService getReplicationSourceService() {
return replicationSourceHandler;
} | 3.26 |
hbase_HRegionServer_createRegionLoad_rdh | /**
*
* @param r
* Region to get RegionLoad for.
* @param regionLoadBldr
* the RegionLoad.Builder, can be null
* @param regionSpecifier
* the RegionSpecifier.Builder, can be null
* @return RegionLoad instance.
*/
RegionLoad createRegionLoad(final HRegion r, RegionLoad.Builder regionLoadBldr, RegionSpecifier.Builder regionSpecifier) throws IOException
{
byte[] name = r.getRegionInfo().getRegionName();
String regionEncodedName = r.getRegionInfo().getEncodedName();
int stores = 0;
int storefiles
= 0;int storeRefCount = 0;
int maxCompactedStoreFileRefCount = 0;
long storeUncompressedSize = 0L;
long storefileSize = 0L;
long v97 = 0L;
long rootLevelIndexSize = 0L;
long totalStaticIndexSize = 0L;
long totalStaticBloomSize = 0L;long v101 = 0L;
long currentCompactedKVs = 0L;
long totalRegionSize =
0L;
List<HStore> storeList = r.getStores();
stores += storeList.size();
for (HStore store :
storeList)
{
storefiles += store.getStorefilesCount();
int v106 = store.getStoreRefCount();
storeRefCount += v106;
int currentMaxCompactedStoreFileRefCount = store.getMaxCompactedStoreFileRefCount();
maxCompactedStoreFileRefCount = Math.max(maxCompactedStoreFileRefCount, currentMaxCompactedStoreFileRefCount);
storeUncompressedSize += store.getStoreSizeUncompressed();
storefileSize += store.getStorefilesSize();
totalRegionSize += store.getHFilesSize();// TODO: storefileIndexSizeKB is same with rootLevelIndexSizeKB?
v97 += store.getStorefilesRootLevelIndexSize();
CompactionProgress v108 = store.getCompactionProgress();
if (v108 != null) {
v101 += v108.getTotalCompactingKVs();
currentCompactedKVs += v108.currentCompactedKVs;
}
rootLevelIndexSize += store.getStorefilesRootLevelIndexSize();
totalStaticIndexSize += store.getTotalStaticIndexSize();
totalStaticBloomSize += store.getTotalStaticBloomSize();
}
int memstoreSizeMB = roundSize(r.getMemStoreDataSize(), unitMB);
int storeUncompressedSizeMB = roundSize(storeUncompressedSize, unitMB);
int storefileSizeMB = roundSize(storefileSize, unitMB);
int storefileIndexSizeKB = roundSize(v97, unitKB);
int rootLevelIndexSizeKB = roundSize(rootLevelIndexSize, unitKB);
int totalStaticIndexSizeKB = roundSize(totalStaticIndexSize, unitKB);
int totalStaticBloomSizeKB = roundSize(totalStaticBloomSize, unitKB);
int regionSizeMB = roundSize(totalRegionSize, unitMB);
final MutableFloat currentRegionCachedRatio = new MutableFloat(0.0F);
computeIfPersistentBucketCache(bc -> {
if (bc.getRegionCachedInfo().containsKey(regionEncodedName)) {
currentRegionCachedRatio.setValue(regionSizeMB == 0 ? 0.0F : ((float) (roundSize(bc.getRegionCachedInfo().get(regionEncodedName), unitMB))) / regionSizeMB);
}
});
HDFSBlocksDistribution hdfsBd = r.getHDFSBlocksDistribution();float dataLocality = hdfsBd.getBlockLocalityIndex(serverName.getHostname());
float dataLocalityForSsd = hdfsBd.getBlockLocalityIndexForSsd(serverName.getHostname());
long blocksTotalWeight = hdfsBd.getUniqueBlocksTotalWeight();
long blocksLocalWeight = hdfsBd.getBlocksLocalWeight(serverName.getHostname());
long blocksLocalWithSsdWeight = hdfsBd.getBlocksLocalWithSsdWeight(serverName.getHostname());
if (regionLoadBldr == null) {
regionLoadBldr = RegionLoad.newBuilder();
}
if (regionSpecifier == null) {regionSpecifier = RegionSpecifier.newBuilder();
}
regionSpecifier.setType(RegionSpecifierType.REGION_NAME);
regionSpecifier.setValue(UnsafeByteOperations.unsafeWrap(name));
regionLoadBldr.setRegionSpecifier(regionSpecifier.build()).setStores(stores).setStorefiles(storefiles).setStoreRefCount(storeRefCount).setMaxCompactedStoreFileRefCount(maxCompactedStoreFileRefCount).setStoreUncompressedSizeMB(storeUncompressedSizeMB).setStorefileSizeMB(storefileSizeMB).setMemStoreSizeMB(memstoreSizeMB).setStorefileIndexSizeKB(storefileIndexSizeKB).setRootIndexSizeKB(rootLevelIndexSizeKB).setTotalStaticIndexSizeKB(totalStaticIndexSizeKB).setTotalStaticBloomSizeKB(totalStaticBloomSizeKB).setReadRequestsCount(r.getReadRequestsCount()).setCpRequestsCount(r.getCpRequestsCount()).setFilteredReadRequestsCount(r.getFilteredReadRequestsCount()).setWriteRequestsCount(r.getWriteRequestsCount()).setTotalCompactingKVs(v101).setCurrentCompactedKVs(currentCompactedKVs).setDataLocality(dataLocality).setDataLocalityForSsd(dataLocalityForSsd).setBlocksLocalWeight(blocksLocalWeight).setBlocksLocalWithSsdWeight(blocksLocalWithSsdWeight).setBlocksTotalWeight(blocksTotalWeight).setCompactionState(ProtobufUtil.createCompactionStateForRegionLoad(r.getCompactionState())).setLastMajorCompactionTs(r.getOldestHfileTs(true)).setRegionSizeMB(regionSizeMB).setCurrentRegionCachedRatio(currentRegionCachedRatio.floatValue());
r.setCompleteSequenceId(regionLoadBldr);
return regionLoadBldr.build();
} | 3.26 |
hbase_HRegionServer_closeRegion_rdh | /**
* Close asynchronously a region, can be called from the master or internally by the regionserver
* when stopping. If called from the master, the region will update the status.
* <p>
* If an opening was in progress, this method will cancel it, but will not start a new close. The
* coprocessors are not called in this case. A NotServingRegionException exception is thrown.
* </p>
* <p>
* If a close was in progress, this new request will be ignored, and an exception thrown.
* </p>
* <p>
* Provides additional flag to indicate if this region blocks should be evicted from the cache.
* </p>
*
* @param encodedName
* Region to close
* @param abort
* True if we are aborting
* @param destination
* Where the Region is being moved too... maybe null if unknown.
* @return True if closed a region.
* @throws NotServingRegionException
* if the region is not online
*/
protected boolean closeRegion(String encodedName, final boolean abort,
final ServerName destination) throws NotServingRegionException {
// Check for permissions to close.
HRegion actualRegion = this.getRegion(encodedName);
// Can be null if we're calling close on a region that's not online
if ((actualRegion != null) && (actualRegion.getCoprocessorHost() != null)) {
try {
actualRegion.getCoprocessorHost().preClose(false);
} catch (IOException exp) {
LOG.warn("Unable to close region: the coprocessor launched an error ", exp);
return false;
}
}
// previous can come back 'null' if not in map.
final Boolean previous = this.regionsInTransitionInRS.putIfAbsent(Bytes.toBytes(encodedName), Boolean.FALSE);
if (Boolean.TRUE.equals(previous)) {
LOG.info((("Received CLOSE for the region:" + encodedName) + " , which we are already ") + "trying to OPEN. Cancelling OPENING.");
if (!regionsInTransitionInRS.replace(Bytes.toBytes(encodedName), previous, Boolean.FALSE)) {
// The replace failed. That should be an exceptional case, but theoretically it can happen.
// We're going to try to do a standard close then.
LOG.warn((("The opening for region " + encodedName) + " was done before we could cancel it.") + " Doing a standard close now");return closeRegion(encodedName, abort, destination);
}
// Let's get the region from the online region list again
actualRegion = this.getRegion(encodedName);
if (actualRegion
== null) {
// If already online, we still need to close it.
LOG.info("The opening previously in progress has been cancelled by a CLOSE request.");
// The master deletes the znode when it receives this exception.
throw new NotServingRegionException(("The region " + encodedName) + " was opening but not yet served. Opening is cancelled.");
}
} else if (previous == null) {
LOG.info("Received CLOSE for {}", encodedName);
}
else if (Boolean.FALSE.equals(previous)) {
LOG.info(("Received CLOSE for the region: " + encodedName) + ", which we are already trying to CLOSE, but not completed yet");
return true;
}
if (actualRegion == null) {
LOG.debug("Received CLOSE for a region which is not online, and we're not opening.");
this.regionsInTransitionInRS.remove(Bytes.toBytes(encodedName));
// The master deletes the znode when it receives this exception.
throw new NotServingRegionException(("The region " + encodedName) + " is not online, and is not opening.");
}
CloseRegionHandler crh;
final RegionInfo hri = actualRegion.getRegionInfo();
if (hri.isMetaRegion()) {
crh = new CloseMetaHandler(this, this, hri, abort);
} else {
crh = new CloseRegionHandler(this, this, hri, abort, destination);
}
this.executorService.submit(crh);
return true;
} | 3.26 |
hbase_HRegionServer_getBlockCache_rdh | /**
* May be null if this is a master which not carry table.
*
* @return The block cache instance used by the regionserver.
*/
@Override
public Optional<BlockCache> getBlockCache() {
return Optional.ofNullable(this.blockCache);
} | 3.26 |
hbase_HRegionServer_roundSize_rdh | // Round the size with KB or MB.
// A trick here is that if the sizeInBytes is less than sizeUnit, we will round the size to 1
// instead of 0 if it is not 0, to avoid some schedulers think the region has no data. See
// HBASE-26340 for more details on why this is important.
private static int roundSize(long sizeInByte, int sizeUnit) {
if (sizeInByte == 0) {
return 0;
} else if (sizeInByte < sizeUnit) {
return 1;
} else {
return ((int) (Math.min(sizeInByte / sizeUnit, Integer.MAX_VALUE)));
}
} | 3.26 |
hbase_HRegionServer_convertThrowableToIOE_rdh | /**
*
* @param msg
* Message to put in new IOE if passed <code>t</code> is not an IOE
* @return Make <code>t</code> an IOE if it isn't already.
*/
private IOException convertThrowableToIOE(final Throwable t, final String msg)
{
return t instanceof IOException ? ((IOException) (t)) : (msg == null) || (msg.length() == 0) ? new IOException(t) : new IOException(msg, t);
} | 3.26 |
hbase_HRegionServer_run_rdh | /**
* The HRegionServer sticks in this loop until closed.
*/
@Override
public void run() {
if (isStopped()) {
LOG.info("Skipping run; stopped");
return;
} try {
// Do pre-registration initializations; zookeeper, lease threads, etc.
preRegistrationInitialization();
} catch (Throwable e)
{
abort("Fatal exception during initialization", e);
}
try {
if ((!isStopped()) && (!isAborted())) {
installShutdownHook();
// Initialize the RegionServerCoprocessorHost now that our ephemeral
// node was created, in case any coprocessors want to use ZooKeeper
this.rsHost = new
RegionServerCoprocessorHost(this, this.conf);
// Try and register with the Master; tell it we are here. Break if server is stopped or
// the clusterup flag is down or hdfs went wacky. Once registered successfully, go ahead and
// start up all Services. Use RetryCounter to get backoff in case Master is struggling to
// come up.
LOG.debug("About to register with Master.");
TraceUtil.trace(() -> {
RetryCounterFactory rcf = new RetryCounterFactory(Integer.MAX_VALUE, this.sleeper.getPeriod(), (1000 * 60) * 5);
RetryCounter rc = rcf.create();
while (keepLooping()) {
RegionServerStartupResponse w = reportForDuty();
if (w == null) {
long sleepTime = rc.getBackoffTimeAndIncrementAttempts();
LOG.warn("reportForDuty failed; sleeping {} ms and then retrying.", sleepTime);
this.sleeper.sleep(sleepTime);
} else {
handleReportForDutyResponse(w);
break;
}
}
}, "HRegionServer.registerWithMaster");
}
if ((!isStopped()) && isHealthy()) {TraceUtil.trace(() -> {
// start the snapshot handler and other procedure handlers,
// since the server is ready to run
if (this.rspmHost != null) {
this.rspmHost.start();
}
// Start the Quota Manager
if (this.rsQuotaManager != null) {
rsQuotaManager.start(getRpcServer().getScheduler());
}
if (this.rsSpaceQuotaManager != null) {
this.rsSpaceQuotaManager.start();
}
}, "HRegionServer.startup");
}
// We registered with the Master. Go into run mode.
long lastMsg = EnvironmentEdgeManager.currentTime();
long oldRequestCount = -1;
// The main run loop.
while ((!isStopped()) && isHealthy()) {
if (!isClusterUp()) {
if (onlineRegions.isEmpty()) {
stop("Exiting; cluster shutdown set and not carrying any regions");
} else if (!this.stopping) {
this.stopping = true;
LOG.info("Closing user regions");
closeUserRegions(isAborted());
} else {
boolean allUserRegionsOffline = areAllUserRegionsOffline();
if (allUserRegionsOffline) {
// Set stopped if no more write requests tp meta tables
// since last time we went around the loop. Any open
// meta regions will be closed on our way out.
if (oldRequestCount == getWriteRequestCount()) {
stop("Stopped; only catalog regions remaining online");
break;
}
oldRequestCount = getWriteRequestCount();
} else {
// Make sure all regions have been closed -- some regions may
// have not got it because we were splitting at the time of
// the call to closeUserRegions.
closeUserRegions(this.abortRequested.get());
}
LOG.debug("Waiting on " + getOnlineRegionsAsPrintableString());
}
}
long now = EnvironmentEdgeManager.currentTime();
if ((now - lastMsg) >= msgInterval) {
tryRegionServerReport(lastMsg, now);
lastMsg = EnvironmentEdgeManager.currentTime();
}
if ((!isStopped()) && (!isAborted())) {
this.sleeper.sleep();
}
} // for
} catch (Throwable t) {
if (!rpcServices.checkOOME(t)) {
String prefix = (t instanceof YouAreDeadException) ? "" : "Unhandled: ";
abort(prefix + t.getMessage(), t);
}
}
final Span span = TraceUtil.createSpan("HRegionServer exiting main loop");
try (Scope ignored = span.makeCurrent()) {
if (this.leaseManager != null) {
this.leaseManager.closeAfterLeasesExpire();
}
if (this.splitLogWorker != null) {
splitLogWorker.stop();}
stopInfoServer();
// Send cache a shutdown.
if (blockCache != null) {blockCache.shutdown();
}
if (mobFileCache != null) {
mobFileCache.shutdown();
}
// Send interrupts to wake up threads if sleeping so they notice shutdown.
// TODO: Should we check they are alive? If OOME could have exited already
if (this.hMemManager != null) {
this.hMemManager.stop();
}
if (this.cacheFlusher != null) {
this.cacheFlusher.interruptIfNecessary();
}
if (this.compactSplitThread !=
null) {
this.compactSplitThread.interruptIfNecessary();
}
// Stop the snapshot and other procedure handlers, forcefully killing all running tasks
if (rspmHost != null) {
rspmHost.stop(this.abortRequested.get()
|| this.killed);
}
if (this.killed) {
// Just skip out w/o closing regions. Used when testing.
} else if (abortRequested.get()) {
if (this.dataFsOk) {
closeUserRegions(abortRequested.get());// Don't leave any open file handles
}
LOG.info("aborting server " + this.serverName);
} else {
closeUserRegions(abortRequested.get());LOG.info("stopping server " + this.serverName);
}
regionReplicationBufferManager.stop();
closeClusterConnection();// Closing the compactSplit thread before closing meta regions
if ((!this.killed) && containsMetaTableRegions()) {
if ((!abortRequested.get()) || this.dataFsOk) {
if (this.compactSplitThread != null) {
this.compactSplitThread.join();
this.compactSplitThread = null;
}
closeMetaTableRegions(abortRequested.get());
}
}
if ((!this.killed) && this.dataFsOk) {
waitOnAllRegionsToClose(abortRequested.get());
LOG.info(("stopping server " + this.serverName) +
"; all regions closed.");
}
// Stop the quota manager
if (rsQuotaManager != null) {
rsQuotaManager.stop();
}
if (rsSpaceQuotaManager != null) {
rsSpaceQuotaManager.stop();
rsSpaceQuotaManager = null;
}
// flag may be changed when closing regions throws exception.
if (this.dataFsOk) {
shutdownWAL(!abortRequested.get());
}// Make sure the proxy is down.
if (this.f0 != null) {
this.f0 = null;
} if (this.lockStub != null) {
this.lockStub = null;
}
if (this.rpcClient != null) {
this.rpcClient.close();
}
if (this.leaseManager != null) {this.leaseManager.close();
}
if (this.pauseMonitor != null) {
this.pauseMonitor.stop();
}
if (!killed) {
stopServiceThreads();
}
if (this.rpcServices != null) {
this.rpcServices.stop();
}
try {
deleteMyEphemeralNode();
} catch (KeeperException.NoNodeException nn) {
// pass
} catch (KeeperException e) {
LOG.warn("Failed deleting my ephemeral node", e);
}
// We may have failed to delete the znode at the previous step, but
// we delete the file anyway: a second attempt to delete the znode is likely to fail again.
ZNodeClearer.deleteMyEphemeralNodeOnDisk();
closeZooKeeper();
closeTableDescriptors();
LOG.info(("Exiting; stopping=" + this.serverName) + "; zookeeper connection closed.");
span.setStatus(StatusCode.OK);
} finally {
span.end();
}
} | 3.26 |
hbase_HRegionServer_getOnlineTables_rdh | /**
* Gets the online tables in this RS. This method looks at the in-memory onlineRegions.
*
* @return all the online tables in this RS
*/
public Set<TableName> getOnlineTables() {
Set<TableName> tables = new HashSet<>();
synchronized(this.onlineRegions) {
for (Region region : this.onlineRegions.values()) {
tables.add(region.getTableDescriptor().getTableName());
}
}
return tables;
} | 3.26 |
hbase_HRegionServer_createRegionServerStatusStub_rdh | /**
* Get the current master from ZooKeeper and open the RPC connection to it. To get a fresh
* connection, the current rssStub must be null. Method will block until a master is available.
* You can break from this block by requesting the server stop.
*
* @param refresh
* If true then master address will be read from ZK, otherwise use cached data
* @return master + port, or null if server has been stopped
*/
@InterfaceAudience.Private
protected synchronized ServerName createRegionServerStatusStub(boolean
refresh) {if (f0 != null) {
return masterAddressTracker.getMasterAddress();
}
ServerName v204 = null;
long previousLogTime = 0;
RegionServerStatusService.BlockingInterface intRssStub = null;
LockService.BlockingInterface intLockStub = null;
boolean interrupted = false;
try {
while (keepLooping()) {
v204 = this.masterAddressTracker.getMasterAddress(refresh);
if (v204 == null) {if (!keepLooping()) { // give up with no connection.
LOG.debug("No master found and cluster is stopped; bailing out");
return null;
}
if (EnvironmentEdgeManager.currentTime() > (previousLogTime
+ 1000)) {
LOG.debug("No master found; retry");
previousLogTime = EnvironmentEdgeManager.currentTime();
}
refresh = true;// let's try pull it from ZK directly
if (sleepInterrupted(200)) {
interrupted = true;
}
continue;
}
try {
BlockingRpcChannel channel = this.rpcClient.createBlockingRpcChannel(v204, userProvider.getCurrent(), shortOperationTimeout);
intRssStub = RegionServerStatusService.newBlockingStub(channel);
intLockStub = LockService.newBlockingStub(channel);
break;} catch (IOException e) {
if (EnvironmentEdgeManager.currentTime() > (previousLogTime + 1000)) {
e = (e instanceof RemoteException) ? ((RemoteException) (e)).unwrapRemoteException() : e;
if (e instanceof ServerNotRunningYetException) {LOG.info("Master isn't available yet, retrying");
} else {
LOG.warn("Unable to connect to master. Retrying. Error was:", e);}
previousLogTime = EnvironmentEdgeManager.currentTime();
}
if (sleepInterrupted(200)) {
interrupted = true;
}
}
}
} finally {
if (interrupted) {
Thread.currentThread().interrupt();
}
}
this.f0 = intRssStub;
this.lockStub = intLockStub;
return v204;
} | 3.26 |
hbase_HRegionServer_closeMetaTableRegions_rdh | /**
* Close meta region if we carry it
*
* @param abort
* Whether we're running an abort.
*/
private void closeMetaTableRegions(final boolean abort) {
HRegion meta = null;
this.onlineRegionsLock.writeLock().lock();
try {
for (Map.Entry<String, HRegion> e : onlineRegions.entrySet())
{RegionInfo hri = e.getValue().getRegionInfo();
if (hri.isMetaRegion()) {
meta = e.getValue();
}
if (meta != null) {
break;}
}
} finally {
this.onlineRegionsLock.writeLock().unlock();
}
if (meta != null) {
closeRegionIgnoreErrors(meta.getRegionInfo(), abort);
}
} | 3.26 |
hbase_HRegionServer_m0_rdh | /**
* Bring up connection to zk ensemble and then wait until a master for this cluster and then after
* that, wait until cluster 'up' flag has been set. This is the order in which master does things.
* <p>
* Finally open long-living server short-circuit connection.
*/
@SuppressWarnings(value = "RV_RETURN_VALUE_IGNORED_BAD_PRACTICE", justification = "cluster Id znode read would give us correct response")
private void m0() throws IOException, InterruptedException {
// Nothing to do in here if no Master in the mix.
if (this.masterless) {
return;}
// Create the master address tracker, register with zk, and start it. Then
// block until a master is available. No point in starting up if no master
// running.
blockAndCheckIfStopped(this.masterAddressTracker);
// Wait on cluster being up. Master will set this flag up in zookeeper
// when ready.
blockAndCheckIfStopped(this.clusterStatusTracker);
// If we are HMaster then the cluster id should have already been set.
if (clusterId == null) {
// Retrieve clusterId
// Since cluster status is now up
// ID should have already been set by HMaster
try {
clusterId = ZKClusterId.readClusterIdZNode(this.zooKeeper);
if (clusterId == null) {
this.abort("Cluster ID has not been set");
}
LOG.info("ClusterId : " + clusterId);
} catch (KeeperException e) {
this.abort("Failed to retrieve Cluster ID", e);
}
}
if (isStopped() || isAborted()) {
return;// No need for further initialization
}
// watch for snapshots and other procedures
try {
rspmHost = new RegionServerProcedureManagerHost();
rspmHost.loadProcedures(conf);
rspmHost.initialize(this);
} catch (KeeperException e) {
this.abort("Failed to reach coordination cluster when creating procedure handler.", e);
}
} | 3.26 |
hbase_HRegionServer_constructRegionServer_rdh | /**
* Utility for constructing an instance of the passed HRegionServer class.
*/
static HRegionServer constructRegionServer(final Class<? extends HRegionServer> regionServerClass, final Configuration conf) { try {
Constructor<? extends HRegionServer> c = regionServerClass.getConstructor(Configuration.class);
return c.newInstance(conf);
} catch (Exception e) {
throw new RuntimeException(("Failed construction of " + "Regionserver: ") + regionServerClass.toString(), e);
}
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.