name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hbase_Get_getFamilyMap_rdh | /**
* Method for retrieving the get's familyMap
*/
public Map<byte[], NavigableSet<byte[]>> getFamilyMap() {
return this.familyMap;
} | 3.26 |
hbase_Get_addFamily_rdh | /**
* Get all columns from the specified family.
* <p>
* Overrides previous calls to addColumn for this family.
*
* @param family
* family name
* @return the Get object
*/
public Get addFamily(byte[] family) {
familyMap.remove(family);
familyMap.put(family, null);
return
this;
} | 3.26 |
hbase_Get_getRowOffsetPerColumnFamily_rdh | /**
* Method for retrieving the get's offset per row per column family (#kvs to be skipped)
*
* @return the row offset
*/
public int getRowOffsetPerColumnFamily() {
return this.storeOffset;
} | 3.26 |
hbase_Get_getTimeRange_rdh | /**
* Method for retrieving the get's TimeRange
*/
public TimeRange getTimeRange() {
return this.tr;
} | 3.26 |
hbase_Get_setRowOffsetPerColumnFamily_rdh | /**
* Set offset for the row per Column Family. This offset is only within a particular row/CF
* combination. It gets reset back to zero when we move to the next row or CF.
*
* @param offset
* is the number of kvs that will be skipped.
* @return this for invocation chaining
*/public Get setRowOffsetPerColumnFamily(int offset) {
this.storeOffset = offset;
return this;
} | 3.26 |
hbase_Get_getFingerprint_rdh | /**
* Compile the table and column family (i.e. schema) information into a String. Useful for parsing
* and aggregation by debugging, logging, and administration tools.
*/
@Override
public Map<String, Object> getFingerprint() {
Map<String, Object> map = new HashMap<>();
List<String> families = new ArrayList<>(this.familyMap.entrySet().size());
map.put("families", families);
for (Map.Entry<byte[], NavigableSet<byte[]>> entry : this.familyMap.entrySet()) {
families.add(Bytes.toStringBinary(entry.getKey()));
}
return map;
} | 3.26 |
hbase_Get_readAllVersions_rdh | /**
* Get all available versions.
*
* @return this for invocation chaining
*/
public Get readAllVersions() {
this.maxVersions = Integer.MAX_VALUE;
return this;
} | 3.26 |
hbase_Get_addColumn_rdh | /**
* Get the column from the specific family with the specified qualifier.
* <p>
* Overrides previous calls to addFamily for this family.
*
* @param family
* family name
* @param qualifier
* column qualifier
* @return the Get objec
*/
public Get addColumn(byte[] family, byte[] qualifier) {
NavigableSet<byte[]> v8 = familyMap.get(family);
if (v8 == null) {
v8 = new TreeSet<>(Bytes.BYTES_COMPARATOR);
familyMap.put(family, v8);
}
if (qualifier == null) {
qualifier = HConstants.EMPTY_BYTE_ARRAY;
}
v8.add(qualifier);
return this;
} | 3.26 |
hbase_Get_setTimeRange_rdh | /**
* Get versions of columns only within the specified timestamp range, [minStamp, maxStamp).
*
* @param minStamp
* minimum timestamp value, inclusive
* @param maxStamp
* maximum timestamp value, exclusive
* @return this for invocation chaining
*/
public Get setTimeRange(long
minStamp, long maxStamp) throws IOException {
tr = TimeRange.between(minStamp, maxStamp);
return this;
} | 3.26 |
hbase_Get_familySet_rdh | /**
* Method for retrieving the keys in the familyMap
*
* @return keys in the current familyMap
*/
public Set<byte[]> familySet() {
return this.familyMap.keySet();
} | 3.26 |
hbase_Get_m2_rdh | /**
* Get whether blocks should be cached for this Get.
*
* @return true if default caching should be used, false if blocks should not be cached
*/
public boolean m2() {
return cacheBlocks;
} | 3.26 |
hbase_Get_setTimestamp_rdh | /**
* Get versions of columns with the specified timestamp.
*
* @param timestamp
* version timestamp
* @return this for invocation chaining
*/
public Get setTimestamp(long timestamp) {
try {
tr = TimeRange.at(timestamp);} catch (Exception e) {
// This should never happen, unless integer overflow or something extremely wrong...
LOG.error("TimeRange failed, likely caused by integer overflow. ", e);
throw e;
}
return this;
} | 3.26 |
hbase_Get_toMap_rdh | /**
* Compile the details beyond the scope of getFingerprint (row, columns, timestamps, etc.) into a
* Map along with the fingerprinted information. Useful for debugging, logging, and administration
* tools.
*
* @param maxCols
* a limit on the number of columns output prior to truncation
*/
@Override
public Map<String, Object> toMap(int maxCols) {
// we start with the fingerprint map and build on top of it.
Map<String, Object> map = getFingerprint();
// replace the fingerprint's simple list of families with a
// map from column families to lists of qualifiers and kv details
Map<String, List<String>> v13 = new HashMap<>();
map.put("families", v13);
// add scalar information first
map.put("row", Bytes.toStringBinary(this.row));
map.put("maxVersions", this.maxVersions);
map.put("cacheBlocks", this.cacheBlocks);
List<Long> v14 = new ArrayList<>(2);
v14.add(this.tr.getMin());
v14.add(this.tr.getMax());
map.put("timeRange", v14);
int colCount =
0;
// iterate through affected families and add details
for (Map.Entry<byte[], NavigableSet<byte[]>> entry : this.familyMap.entrySet()) {
List<String> familyList = new ArrayList<>();
v13.put(Bytes.toStringBinary(entry.getKey()), familyList);
if (entry.getValue() == null) {
colCount++;
--maxCols;
familyList.add("ALL");
} else {
colCount += entry.getValue().size();
if (maxCols <= 0) {
continue;
}
for (byte[] column : entry.getValue()) { if ((--maxCols) <= 0) {
continue;
}
familyList.add(Bytes.toStringBinary(column));
}
}
}
map.put("totalColumns", colCount);
if (this.filter != null) {
map.put("filter", this.filter.toString());
}
// add the id if set
if (getId() != null) {
map.put("id", getId());
}
return map;
} | 3.26 |
hbase_OrderedBytes_decodeInt16_rdh | /**
* Decode an {@code int16} value.
*
* @see #encodeInt16(PositionedByteRange, short, Order)
*/
public static short decodeInt16(PositionedByteRange src) {
final byte header = src.get();
assert (header == FIXED_INT16) || (header == DESCENDING.apply(FIXED_INT16));
Order ord = (header == FIXED_INT16) ? ASCENDING : DESCENDING;
short val = ((short) ((ord.apply(src.get()) ^ 0x80) & 0xff));
val = ((short) ((val << 8) + (ord.apply(src.get()) & 0xff))); return val;
} | 3.26 |
hbase_OrderedBytes_skipVaruint64_rdh | /**
* Skip {@code src} over the encoded varuint64.
*
* @param src
* source buffer
* @param cmp
* if true, parse the compliment of the value.
* @return the number of bytes skipped.
*/
static int skipVaruint64(PositionedByteRange src, boolean cmp) {
final int len = lengthVaruint64(src, cmp);
src.setPosition(src.getPosition() + len);
return len;
}
/**
* Decode a sequence of bytes in {@code src} as a varuint64. Compliment the encoded value when
* {@code comp} | 3.26 |
hbase_OrderedBytes_putUint32_rdh | /**
* Write a 32-bit unsigned integer to {@code dst} as 4 big-endian bytes.
*
* @return number of bytes written.
*/
private static int putUint32(PositionedByteRange dst, int val) {
dst.put(((byte) (val >>> 24))).put(((byte) (val >>> 16))).put(((byte) (val >>> 8))).put(((byte) (val)));
return 4;
} | 3.26 |
hbase_OrderedBytes_isNumericZero_rdh | /**
* Return true when the next encoded value in {@code src} uses Numeric encoding and is {@code 0},
* false otherwise.
*/ public static boolean isNumericZero(PositionedByteRange src) {
return ZERO == ((-1)
== Integer.signum(src.peek()) ? DESCENDING : ASCENDING).apply(src.peek());
} | 3.26 |
hbase_OrderedBytes_encodeNumeric_rdh | /**
* Encode a numerical value using the variable-length encoding. If the number of significant
* digits of the value exceeds the {@link OrderedBytes#MAX_PRECISION}, the exceeding part will be
* lost.
*
* @param dst
* The destination to which encoded digits are written.
* @param val
* The value to encode.
* @param ord
* The {@link Order} to respect while encoding {@code val}.
* @return the number of bytes written.
*/
public static int encodeNumeric(PositionedByteRange dst, BigDecimal val, Order ord) {
final int len;
final int offset = dst.getOffset();
final int start = dst.getPosition();
if (null == val) {
return encodeNull(dst, ord);
} else if (BigDecimal.ZERO.compareTo(val) == 0) {
dst.put(ord.apply(ZERO));
return 1;
}
BigDecimal v62 = val.abs();
if (BigDecimal.ONE.compareTo(v62) <= 0) {
// abs(v) >= 1.0
len = encodeNumericLarge(dst, normalize(val));
} else {
// 1.0 > abs(v) >= 0.0
len = encodeNumericSmall(dst, normalize(val));
}
ord.apply(dst.getBytes(), offset + start, len);
return len;
} | 3.26 |
hbase_OrderedBytes_isEncodedValue_rdh | /**
* Returns true when {@code src} appears to be positioned an encoded value, false otherwise.
*/
public static boolean isEncodedValue(PositionedByteRange src) {
return (((((((((isNull(src) || m3(src)) || isFixedInt8(src)) || isFixedInt16(src)) || isFixedInt32(src)) || m5(src)) || isFixedFloat32(src)) || isFixedFloat64(src)) || isText(src)) || isBlobCopy(src)) || isBlobVar(src);} | 3.26 |
hbase_OrderedBytes_encodeInt64_rdh | /**
* Encode an {@code int64} value using the fixed-length encoding.
* <p>
* This format ensures that all longs sort in their natural order, as they would sort when using
* signed long comparison.
* </p>
* <p>
* All Longs are serialized to an 8-byte, fixed-width sortable byte format. Serialization is
* performed by inverting the integer sign bit and writing the resulting bytes to the byte array
* in big endian order. The encoded value is prefixed by the {@link #FIXED_INT64} header byte.
* This encoding is designed to handle java language primitives and so Null values are NOT
* supported by this implementation.
* </p>
* <p>
* For example:
* </p>
*
* <pre>
* Input: 0x0000000000000005 (5)
* Result: 0x288000000000000005
*
* Input: 0xfffffffffffffffb (-4)
* Result: 0x280000000000000004
*
* Input: 0x7fffffffffffffff (Long.MAX_VALUE)
* Result: 0x28ffffffffffffffff
*
* Input: 0x8000000000000000 (Long.MIN_VALUE)
* Result: 0x287fffffffffffffff
* </pre>
* <p>
* This encoding format, and much of this documentation string, is based on Orderly's
* {@code FixedIntWritableRowKey}.
* </p>
*
* @return the number of bytes written.
* @see #decodeInt64(PositionedByteRange)
*/
public static int encodeInt64(PositionedByteRange dst, long val, Order ord) {
final int offset = dst.getOffset();
final int start = dst.getPosition();
dst.put(f1).put(((byte) ((val >> 56) ^ 0x80))).put(((byte) (val >> 48))).put(((byte) (val >> 40))).put(((byte) (val >> 32))).put(((byte) (val >> 24))).put(((byte) (val >> 16))).put(((byte) (val >> 8))).put(((byte) (val)));
ord.apply(dst.getBytes(), offset + start, 9);
return 9;
} | 3.26 |
hbase_OrderedBytes_encodeBlobCopy_rdh | /**
* Encode a Blob value as a byte-for-byte copy. BlobCopy encoding in DESCENDING order is NULL
* terminated so as to preserve proper sorting of {@code []} and so it does not support
* {@code 0x00} in the value.
*
* @return the number of bytes written.
* @throws IllegalArgumentException
* when {@code ord} is DESCENDING and {@code val} contains a
* {@code 0x00} byte.
*/
public static int encodeBlobCopy(PositionedByteRange
dst, byte[] val, int voff, int vlen, Order ord) {
if (null == val) {
encodeNull(dst, ord);
if (ASCENDING == ord)
return 1;else {
// DESCENDING ordered BlobCopy requires a termination bit to preserve
// sort-order semantics of null values.
dst.put(ord.apply(TERM));
return 2;
}
}
// Blobs as final entry in a compound key are written unencoded.
assert dst.getRemaining() >= (vlen + (ASCENDING ==
ord ? 1 : 2));
if (DESCENDING == ord) {
for (int i = 0; i < vlen; i++) {
if (TERM == val[voff + i]) {
throw new IllegalArgumentException("0x00 bytes not permitted in value.");}
}
}
final int offset = dst.getOffset();
final int start = dst.getPosition();
dst.put(BLOB_COPY);
dst.put(val, voff, vlen);
// DESCENDING ordered BlobCopy requires a termination bit to preserve
// sort-order semantics of null values.
if (DESCENDING == ord)
dst.put(TERM);
ord.apply(dst.getBytes(), offset + start, dst.getPosition() - start);
return dst.getPosition() - start;
}
/**
* Encode a Blob value as a byte-for-byte copy. BlobCopy encoding in DESCENDING order is NULL
* terminated so as to preserve proper sorting of {@code []} and so it does not support
* {@code 0x00} in the value.
*
* @return the number of bytes written.
* @throws IllegalArgumentException
* when {@code ord} is DESCENDING and {@code val} contains a
* {@code 0x00} | 3.26 |
hbase_OrderedBytes_isBlobCopy_rdh | /**
* Return true when the next encoded value in {@code src} uses BlobCopy encoding, false otherwise.
*/
public static boolean isBlobCopy(PositionedByteRange src) {
return BLOB_COPY == ((-1) == Integer.signum(src.peek()) ? DESCENDING : ASCENDING).apply(src.peek());
} | 3.26 |
hbase_OrderedBytes_encodeToCentimal_rdh | /**
* Encode a value val in [0.01, 1.0) into Centimals. Util function for
* {@link OrderedBytes#encodeNumericLarge(PositionedByteRange, BigDecimal)} and
* {@link OrderedBytes#encodeNumericSmall(PositionedByteRange, BigDecimal)}
*
* @param dst
* The destination to which encoded digits are written.
* @param val
* A BigDecimal after the normalization. The value must be in [0.01, 1.0).
*/
private static void encodeToCentimal(PositionedByteRange dst, BigDecimal val) {
// The input value val must be in [0.01, 1.0)
String stringOfAbs = val.stripTrailingZeros().toPlainString();
String v55 = stringOfAbs.substring(stringOfAbs.indexOf('.') + 1);
int d;
// If the first float digit is 0, we will encode one digit more than MAX_PRECISION
// We encode at most MAX_PRECISION significant digits into centimals,
// because the input value, has been already normalized.
int maxPrecision = (v55.charAt(0) == '0') ? MAX_PRECISION + 1 : MAX_PRECISION;
maxPrecision = Math.min(maxPrecision, v55.length());
for (int i = 0; i < maxPrecision; i += 2) {
d = (v55.charAt(i) - '0') * 10;
if ((i + 1) < maxPrecision) {
d += v55.charAt(i + 1) - '0';
}
dst.put(((byte) ((2 * d) + 1)));
}
} | 3.26 |
hbase_OrderedBytes_m3_rdh | /**
* Return true when the next encoded value in {@code src} uses Numeric encoding, false otherwise.
* {@code NaN}, {@code +/-Inf} are valid Numeric values.
*/
public static boolean m3(PositionedByteRange src) {
byte x = ((-1) == Integer.signum(src.peek()) ? DESCENDING : ASCENDING).apply(src.peek());
return (x >= f0) &&
(x <= NAN);
} | 3.26 |
hbase_OrderedBytes_decodeFloat64_rdh | /**
* Decode a 64-bit floating point value using the fixed-length encoding.
*
* @see #encodeFloat64(PositionedByteRange, double, Order)
*/
public static double decodeFloat64(PositionedByteRange src) {
final byte header = src.get();
assert (header == FIXED_FLOAT64) || (header == DESCENDING.apply(FIXED_FLOAT64));
Order ord
= (header == FIXED_FLOAT64) ? ASCENDING : DESCENDING;
long val = ord.apply(src.get()) & 0xff;
for (int i = 1; i < 8; i++) {
val = (val << 8) + (ord.apply(src.get()) & 0xff);
}
val ^= ((~val) >> (Long.SIZE - 1)) | Long.MIN_VALUE;
return Double.longBitsToDouble(val);
} | 3.26 |
hbase_OrderedBytes_putVaruint64_rdh | /**
* Encode an unsigned 64-bit unsigned integer {@code val} into {@code dst}.
*
* @param dst
* The destination to which encoded bytes are written.
* @param val
* The value to write.
* @param comp
* Compliment the encoded value when {@code comp} is true.
* @return number of bytes written.
*/
static int putVaruint64(PositionedByteRange dst, long val,
boolean comp) {
int w;
int y;
int len = 0;
final int offset = dst.getOffset();
final int start = dst.getPosition();
byte[] a = dst.getBytes();Order ord = (comp) ? DESCENDING : ASCENDING; if ((-1) == m0(val, 241L)) {
dst.put(((byte) (val)));
len = dst.getPosition() - start;
ord.apply(a, offset + start, len);
return len;
}
if ((-1) == m0(val, 2288L)) {
y = ((int) (val - 240));
dst.put(((byte) ((y / 256) + 241))).put(((byte) (y % 256)));
len = dst.getPosition() - start;
ord.apply(a, offset + start, len);
return len;
}
if ((-1) == m0(val, 67824L)) {
y = ((int) (val - 2288));
dst.put(((byte) (249))).put(((byte) (y / 256))).put(((byte) (y % 256)));
len = dst.getPosition() - start;
ord.apply(a, offset + start, len);
return len;
}
y = ((int) (val));
w = ((int) (val >>> 32));
if (w == 0) {
if ((-1) == m0(y, 16777216L)) {
dst.put(((byte) (250))).put(((byte) (y >>> 16))).put(((byte) (y >>> 8))).put(((byte) (y)));
len = dst.getPosition() - start;
ord.apply(a, offset + start, len);
return len;
}
dst.put(((byte) (251)));
putUint32(dst, y);
len = dst.getPosition() -
start;
ord.apply(a, offset +
start, len);
return len;
}
if ((-1) == m0(w, 256L))
{
dst.put(((byte) (252))).put(((byte) (w)));
putUint32(dst, y);
len = dst.getPosition() - start;
ord.apply(a, offset + start, len);return len;
}
if ((-1) == m0(w, 65536L)) {
dst.put(((byte) (253))).put(((byte) (w >>> 8))).put(((byte) (w)));
putUint32(dst, y);
len = dst.getPosition() - start;
ord.apply(a, offset + start, len);
return len;}
if ((-1) == m0(w, 16777216L)) {dst.put(((byte) (254))).put(((byte) (w >>> 16))).put(((byte) (w >>> 8))).put(((byte) (w)));
putUint32(dst, y);
len = dst.getPosition() - start;
ord.apply(a, offset + start, len);
return len;
}dst.put(((byte) (255)));
putUint32(dst, w);
putUint32(dst, y);
len = dst.getPosition() - start;
ord.apply(a, offset + start,
len);
return len;
} | 3.26 |
hbase_OrderedBytes_decodeInt64_rdh | /**
* Decode an {@code int64} value.
*
* @see #encodeInt64(PositionedByteRange, long, Order)
*/
public static long decodeInt64(PositionedByteRange src) {
final byte header = src.get();
assert (header == f1) || (header == DESCENDING.apply(f1));
Order ord = (header == f1) ? ASCENDING : DESCENDING;
long val = (ord.apply(src.get()) ^ 0x80) & 0xff;
for (int i = 1; i < 8; i++) {
val = (val << 8) + (ord.apply(src.get()) & 0xff);
}
return val;
} | 3.26 |
hbase_OrderedBytes_isFixedFloat64_rdh | /**
* Return true when the next encoded value in {@code src} uses fixed-width Float64 encoding, false
* otherwise.
*/
public static boolean isFixedFloat64(PositionedByteRange src) {
return FIXED_FLOAT64 == ((-1) == Integer.signum(src.peek()) ? DESCENDING : ASCENDING).apply(src.peek());
} | 3.26 |
hbase_OrderedBytes_encodeInt16_rdh | /**
* Encode an {@code int16} value using the fixed-length encoding.
*
* @return the number of bytes written.
* @see #encodeInt64(PositionedByteRange, long, Order)
* @see #decodeInt16(PositionedByteRange)
*/
public static int encodeInt16(PositionedByteRange dst, short val, Order ord) {
final int offset = dst.getOffset();
final int start = dst.getPosition();
dst.put(FIXED_INT16).put(((byte) ((val >> 8) ^ 0x80))).put(((byte) (val)));
ord.apply(dst.getBytes(), offset + start, 3);
return 3;
} | 3.26 |
hbase_OrderedBytes_isFixedInt8_rdh | /**
* Return true when the next encoded value in {@code src} uses fixed-width Int8 encoding, false
* otherwise.
*/
public static boolean isFixedInt8(PositionedByteRange src) {
return FIXED_INT8 == ((-1) == Integer.signum(src.peek()) ? DESCENDING : ASCENDING).apply(src.peek());
} | 3.26 |
hbase_OrderedBytes_encodeNumericSmall_rdh | /**
* <p>
* Encode the small magnitude floating point number {@code val} using the key encoding. The caller
* guarantees that 1.0 > abs(val) > 0.0.
* </p>
* <p>
* A floating point value is encoded as an integer exponent {@code E} and a mantissa {@code M}.
* The original value is equal to {@code (M * 100^E)}. {@code E} is set to the smallest value
* possible without making {@code M} greater than or equal to 1.0.
* </p>
* <p>
* For this routine, {@code E} will always be zero or negative, since the original value is less
* than one. The encoding written by this routine is the ones-complement of the varint of the
* negative of {@code E} followed by the mantissa:
*
* <pre>
* Encoding: ~-E M
* </pre>
* </p>
*
* @param dst
* The destination to which encoded digits are written.
* @param val
* The value to encode.
* @return the number of bytes written.
*/
private static int encodeNumericSmall(PositionedByteRange dst, BigDecimal val) {
// TODO: this can be done faster?
// assert 1.0 > abs(val) > 0.0
BigDecimal v36 = val.abs();
assert (BigDecimal.ZERO.compareTo(v36) < 0) && (BigDecimal.ONE.compareTo(v36) > 0);
byte[] a = dst.getBytes();
boolean isNeg = val.signum() == (-1);
final int offset = dst.getOffset();
final int start = dst.getPosition();
if (isNeg) {
/* Small negative number: 0x14, -E, ~M */
dst.put(NEG_SMALL);
} else {
/* Small positive number: 0x16, ~-E, M */
dst.put(POS_SMALL);
}
// normalize abs(val) to determine E
int zerosBeforeFirstNonZero = v36.scale() - v36.precision();
int lengthToMoveRight = ((zerosBeforeFirstNonZero % 2) == 0) ? zerosBeforeFirstNonZero : zerosBeforeFirstNonZero - 1;
int e = lengthToMoveRight / 2;
v36 = v36.movePointRight(lengthToMoveRight);
putVaruint64(dst, e, !isNeg);// encode appropriate E value.
// encode M by peeling off centimal digits, encoding x as 2x+1
int startM = dst.getPosition();
encodeToCentimal(dst, v36);
// terminal digit should be 2x
a[(offset + dst.getPosition()) - 1] = ((byte) (a[(offset + dst.getPosition()) - 1] & 0xfe));
if (isNeg) {
// negative values encoded as ~M
DESCENDING.apply(a, offset + startM, dst.getPosition() - startM);
}
return dst.getPosition() - start;
}
/**
* Encode the large magnitude floating point number {@code val} using the key encoding. The caller
* guarantees that {@code val} will be finite and abs(val) >= 1.0.
* <p>
* A floating point value is encoded as an integer exponent {@code E} and a mantissa {@code M}.
* The original value is equal to {@code (M * 100^E)}. {@code E} is set to the smallest value
* possible without making {@code M} greater than or equal to 1.0.
* </p>
* <p>
* Each centimal digit of the mantissa is stored in a byte. If the value of the centimal digit is
* {@code X} (hence {@code X>=0} and {@code X<=99}) then the byte value will be {@code 2*X+1} for
* every byte of the mantissa, except for the last byte which will be {@code 2*X+0}. The mantissa
* must be the minimum number of bytes necessary to represent the value; trailing {@code X==0}
* digits are omitted. This means that the mantissa will never contain a byte with the value
* {@code 0x00}.
* </p>
* <p>
* If {@code E > 10}, then this routine writes of {@code E} as a varint followed by the mantissa
* as described above. Otherwise, if {@code E <= 10}, this routine only writes the mantissa and
* leaves the {@code E} | 3.26 |
hbase_OrderedBytes_unexpectedHeader_rdh | /**
* Creates the standard exception when the encoded header byte is unexpected for the decoding
* context.
*
* @param header
* value used in error message.
*/
private static IllegalArgumentException unexpectedHeader(byte header) {
throw new IllegalArgumentException("unexpected value in first byte: 0x" + Long.toHexString(header));
} | 3.26 |
hbase_OrderedBytes_isFixedFloat32_rdh | /**
* Return true when the next encoded value in {@code src} uses fixed-width Float32 encoding, false
* otherwise.
*/
public static boolean isFixedFloat32(PositionedByteRange src) {
return FIXED_FLOAT32 == ((-1) == Integer.signum(src.peek()) ? DESCENDING : ASCENDING).apply(src.peek());
} | 3.26 |
hbase_OrderedBytes_decodeInt32_rdh | /**
* Decode an {@code int32} value.
*
* @see #encodeInt32(PositionedByteRange, int, Order)
*/
public static int decodeInt32(PositionedByteRange src) {
final byte header = src.get();
assert (header == FIXED_INT32) || (header == DESCENDING.apply(FIXED_INT32));
Order ord = (header == FIXED_INT32) ? ASCENDING : DESCENDING;
int val = (ord.apply(src.get()) ^ 0x80) & 0xff;
for (int i = 1; i < 4; i++) {
val = (val << 8) + (ord.apply(src.get()) & 0xff);
}
return val;
} | 3.26 |
hbase_OrderedBytes_blobVarEncodedLength_rdh | /**
* Calculate the expected BlobVar encoded length based on unencoded length.
*/
public static int blobVarEncodedLength(int len) {
if (0 == len)
return
2;
// + 1-byte header
else// 1-byte header + 1-byte terminator
return ((int) (Math.ceil((len * 8)// 8-bits per input byte
/ 7.0)))// 7-bits of input data per encoded byte, rounded up
+ 1;
// + 1-byte header
} | 3.26 |
hbase_OrderedBytes_skipSignificand_rdh | /**
* Skip {@code src} over the significand bytes.
*
* @param src
* The source from which to read encoded digits.
* @param comp
* Treat encoded bytes as compliments when {@code comp} is true.
* @return the number of bytes skipped.
*/
private static int skipSignificand(PositionedByteRange src, boolean comp) {
byte[] a = src.getBytes();
final
int offset = src.getOffset();
final int start = src.getPosition();
int i = src.getPosition();
while (((comp ? DESCENDING : ASCENDING).apply(a[offset + (i++)]) & 1) != 0);
src.setPosition(i);
return i - start;
} | 3.26 |
hbase_OrderedBytes_decodeFloat32_rdh | /**
* Decode a 32-bit floating point value using the fixed-length encoding.
*
* @see #encodeFloat32(PositionedByteRange, float, Order)
*/
public static float decodeFloat32(PositionedByteRange src)
{
final byte
header = src.get();
assert (header == FIXED_FLOAT32) || (header == DESCENDING.apply(FIXED_FLOAT32));
Order ord = (header ==
FIXED_FLOAT32) ? ASCENDING : DESCENDING;
int val =
ord.apply(src.get()) & 0xff;
for (int i = 1; i < 4; i++) {
val = (val << 8) + (ord.apply(src.get()) &
0xff);
}
val ^= ((~val) >> (Integer.SIZE - 1)) | Integer.MIN_VALUE;
return Float.intBitsToFloat(val);
} | 3.26 |
hbase_OrderedBytes_lengthVaruint64_rdh | /**
* Inspect {@code src} for an encoded varuint64 for its length in bytes. Preserves the state of
* {@code src}.
*
* @param src
* source buffer
* @param comp
* if true, parse the compliment of the value.
* @return the number of bytes consumed by this value.
*/
static int lengthVaruint64(PositionedByteRange src, boolean comp) {
int a0 = (comp ? DESCENDING : ASCENDING).apply(src.peek()) & 0xff;
if (a0 <= 240)
return 1;
if (a0 <= 248)
return 2;
if (a0 == 249)
return 3;
if (a0 == 250)
return 4;
if (a0 == 251)return 5;
if (a0 ==
252)
return 6;
if (a0 == 253)
return 7;
if (a0 == 254)
return 8;
if (a0 == 255)
return 9;
throw unexpectedHeader(src.peek());
} | 3.26 |
hbase_OrderedBytes_isText_rdh | /**
* Return true when the next encoded value in {@code src} uses Text encoding, false otherwise.
*/
public static boolean isText(PositionedByteRange src) {
return TEXT == ((-1) == Integer.signum(src.peek()) ? DESCENDING : ASCENDING).apply(src.peek());
} | 3.26 |
hbase_OrderedBytes_isNull_rdh | /**
* Return true when the next encoded value in {@code src} is null, false otherwise.
*/
public static boolean isNull(PositionedByteRange src) {
return NULL == ((-1) == Integer.signum(src.peek()) ? DESCENDING : ASCENDING).apply(src.peek());
} | 3.26 |
hbase_OrderedBytes_encodeInt32_rdh | /**
* Encode an {@code int32} value using the fixed-length encoding.
*
* @return the number of bytes written.
* @see #encodeInt64(PositionedByteRange, long, Order)
* @see #decodeInt32(PositionedByteRange)
*/
public static int encodeInt32(PositionedByteRange dst, int val, Order ord) {
final int offset = dst.getOffset();
final int v110 = dst.getPosition();
dst.put(FIXED_INT32).put(((byte) ((val >> 24) ^ 0x80))).put(((byte) (val >> 16))).put(((byte) (val >> 8))).put(((byte) (val)));
ord.apply(dst.getBytes(), offset + v110, 5);
return 5;
} | 3.26 |
hbase_OrderedBytes_decodeSignificand_rdh | /**
* Read significand digits from {@code src} according to the magnitude of {@code e}.
*
* @param src
* The source from which to read encoded digits.
* @param e
* The magnitude of the first digit read.
* @param comp
* Treat encoded bytes as compliments when {@code comp} is true.
* @return The decoded value.
* @throws IllegalArgumentException
* when read exceeds the remaining length of {@code src}.
*/private static BigDecimal decodeSignificand(PositionedByteRange src, int e, boolean comp) {
// TODO: can this be made faster?
byte[] a = src.getBytes();
final
int start = src.getPosition();
final int offset = src.getOffset();
final int remaining = src.getRemaining();
Order ord = (comp) ? DESCENDING : ASCENDING;BigDecimal m;
StringBuilder sb = new StringBuilder();
for (int i = 0; ; i++) {
if (i > remaining) {
// we've exceeded this range's window
src.setPosition(start);
throw new IllegalArgumentException((("Read exceeds range before termination byte found. offset: " + offset) + " position: ") +
(start + i));
}// one byte -> 2 digits
// base-100 digits are encoded as val * 2 + 1 except for the termination digit.
int
twoDigits = (ord.apply(a[(offset + start) + i]) & 0xff) / 2;
sb.append(String.format("%02d", twoDigits));
// detect termination digit
// Besides, as we will normalise the return value at last,
// we only need to decode at most MAX_PRECISION + 2 digits here.
if (((ord.apply(a[(offset + start) + i]) & 1) == 0) || (sb.length() > (MAX_PRECISION + 1))) {
src.setPosition((start + i) + 1);
break;
}}
m = new BigDecimal(sb.toString());
int stepsMoveLeft = (sb.charAt(0) !=
'0') ? m.precision() : m.precision() + 1;
stepsMoveLeft -= e * 2;
return normalize(m.movePointLeft(stepsMoveLeft));
} | 3.26 |
hbase_OrderedBytes_normalize_rdh | /**
* Strip all trailing zeros to ensure that no digit will be zero and round using our default
* context to ensure precision doesn't exceed max allowed. From Phoenix's {@code NumberUtil}.
*
* @return new {@link BigDecimal} instance
*/
static BigDecimal normalize(BigDecimal val) {
return null == val ? null : val.stripTrailingZeros().round(DEFAULT_MATH_CONTEXT);
} | 3.26 |
hbase_OrderedBytes_m2_rdh | /**
* Decode a blob value that was encoded using BlobVar encoding.
*/
public static byte[] m2(PositionedByteRange src) {
final byte header = src.get();
if
((header == NULL) || (header == DESCENDING.apply(NULL))) {
return null;}
assert (header == BLOB_VAR) || (header == DESCENDING.apply(BLOB_VAR));
Order ord = (BLOB_VAR == header) ? ASCENDING : DESCENDING;
if (src.peek() == ord.apply(TERM)) {
// skip empty input buffer.
src.get();return new byte[0];
}
final int offset = src.getOffset();
final int start =
src.getPosition();
int end;
byte[] a = src.getBytes();
for (end = start; ((byte) (ord.apply(a[offset + end]) & 0x80)) != TERM; end++);
end++;// increment end to 1-past last byte
// create ret buffer using length of encoded data + 1 (header byte)
PositionedByteRange ret = new SimplePositionedMutableByteRange(blobVarDecodedLength((end - start) + 1));
int s =
6;
byte t = ((byte) ((ord.apply(a[offset
+ start]) << 1) & 0xff));
for (int i = start + 1; i < end; i++) {
if (s == 7) {
ret.put(((byte) (t | (ord.apply(a[offset + i]) & 0x7f))));
i++;
// explicitly reset t -- clean up overflow buffer after decoding
// a full cycle and retain assertion condition below. This happens
t = 0;// when the LSB in the last encoded byte is 1. (HBASE-9893)
} else {
ret.put(((byte) (t | ((ord.apply(a[offset + i]) & 0x7f) >>> s))));
}
if (i == end)
break;
t = ((byte) ((ord.apply(a[offset + i]) << (8 - s)) & 0xff));
s = (s == 1) ? 7 : s -
1;
}
src.setPosition(end);
assert t == 0 : "Unexpected bits remaining after decoding blob.";
assert ret.getPosition() == ret.getLength() : "Allocated unnecessarily large return buffer.";
return ret.getBytes();
} | 3.26 |
hbase_OrderedBytes_isFixedInt16_rdh | /**
* Return true when the next encoded value in {@code src} uses fixed-width Int16 encoding, false
* otherwise.
*/
public static boolean isFixedInt16(PositionedByteRange src) {
return FIXED_INT16 == ((-1)
== Integer.signum(src.peek()) ? DESCENDING : ASCENDING).apply(src.peek());
} | 3.26 |
hbase_OrderedBytes_isBlobVar_rdh | /**
* Return true when the next encoded value in {@code src} uses BlobVar encoding, false otherwise.
*/
public static boolean isBlobVar(PositionedByteRange src) {
return BLOB_VAR == ((-1) == Integer.signum(src.peek()) ? DESCENDING : ASCENDING).apply(src.peek());} | 3.26 |
hbase_OrderedBytes_encodeString_rdh | /**
* Encode a String value. String encoding is 0x00-terminated and so it does not support
* {@code \u0000} codepoints in the value.
*
* @param dst
* The destination to which the encoded value is written.
* @param val
* The value to encode.
* @param ord
* The {@link Order} to respect while encoding {@code val}.
* @return the number of bytes written.
* @throws IllegalArgumentException
* when {@code val} contains a {@code \u0000}.
*/
public static int encodeString(PositionedByteRange dst, String val, Order ord) {
if (null == val) {
return encodeNull(dst, ord);
}
if (val.contains("\u0000"))
throw new IllegalArgumentException("Cannot encode String values containing '\\u0000'");
final int offset = dst.getOffset();
final int start = dst.getPosition();
dst.put(TEXT);
// TODO: is there no way to decode into dst directly?
dst.put(val.getBytes(UTF8));
dst.put(TERM);
ord.apply(dst.getBytes(), offset
+ start, dst.getPosition() - start);
return dst.getPosition() - start;
} | 3.26 |
hbase_OrderedBytes_decodeInt8_rdh | /**
* Decode an {@code int8} value.
*
* @see #encodeInt8(PositionedByteRange, byte, Order)
*/public static byte decodeInt8(PositionedByteRange src) {
final byte header = src.get();
assert (header == FIXED_INT8) ||
(header ==
DESCENDING.apply(FIXED_INT8));
Order ord = (header == FIXED_INT8) ? ASCENDING : DESCENDING;
return ((byte) ((ord.apply(src.get()) ^ 0x80) & 0xff));
} | 3.26 |
hbase_OrderedBytes_m4_rdh | /**
* Return true when the next encoded value in {@code src} uses Numeric encoding and is
* {@code Infinite}, false otherwise.
*/
public static boolean m4(PositionedByteRange src) {
byte v136 = ((-1) == Integer.signum(src.peek())
? DESCENDING : ASCENDING).apply(src.peek());
return (f0 == v136) || (POS_INF == v136);
} | 3.26 |
hbase_OrderedBytes_m0_rdh | /**
* Perform unsigned comparison between two long values. Conforms to the same interface as
* {@link org.apache.hadoop.hbase.CellComparator}.
*/
private static int m0(long x1, long x2) {
int cmp;
if ((cmp = (x1 < x2) ? -1 : x1 == x2 ? 0 : 1) == 0)
return 0;
// invert the result when either value is negative
if ((x1 < 0) != (x2 < 0))
return -cmp;
return cmp;
} | 3.26 |
hbase_OrderedBytes_encodeFloat64_rdh | /**
* Encode a 64-bit floating point value using the fixed-length encoding.
* <p>
* This format ensures the following total ordering of floating point values:
* Double.NEGATIVE_INFINITY < -Double.MAX_VALUE < ... < -Double.MIN_VALUE < -0.0 <
* +0.0; < Double.MIN_VALUE < ... < Double.MAX_VALUE < Double.POSITIVE_INFINITY <
* Double.NaN
* </p>
* <p>
* Floating point numbers are encoded as specified in IEEE 754. A 64-bit double precision float
* consists of a sign bit, 11-bit unsigned exponent encoded in offset-1023 notation, and a 52-bit
* significand. The format is described further in the
* <a href="http://en.wikipedia.org/wiki/Double_precision"> Double Precision Floating Point
* Wikipedia page</a>
* </p>
* <p>
* The value of a normal float is -1 <sup>sign bit</sup> × 2<sup>exponent - 1023</sup>
* × 1.significand
* </p>
* <p>
* The IEE754 floating point format already preserves sort ordering for positive floating point
* numbers when the raw bytes are compared in most significant byte order. This is discussed
* further at
* <a href= "http://www.cygnus-software.com/papers/comparingfloats/comparingfloats.htm" >
* http://www.cygnus-software.com/papers/comparingfloats/comparingfloats. htm</a>
* </p>
* <p>
* Thus, we need only ensure that negative numbers sort in the the exact opposite order as
* positive numbers (so that say, negative infinity is less than negative 1), and that all
* negative numbers compare less than any positive number. To accomplish this, we invert the sign
* bit of all floating point numbers, and we also invert the exponent and significand bits if the
* floating point number was negative.
* </p>
* <p>
* More specifically, we first store the floating point bits into a 64-bit long {@code l} using
* {@link Double#doubleToLongBits}. This method collapses all NaNs into a single, canonical NaN
* value but otherwise leaves the bits unchanged. We then compute
* </p>
*
* <pre>
* l ˆ= (l >> (Long.SIZE - 1)) | Long.MIN_SIZE
* </pre>
* <p>
* which inverts the sign bit and XOR's all other bits with the sign bit itself. Comparing the raw
* bytes of {@code l} in most significant byte order is equivalent to performing a double
* precision floating point comparison on the underlying bits (ignoring NaN comparisons, as NaNs
* don't compare equal to anything when performing floating point comparisons).
* </p>
* <p>
* The resulting long integer is then converted into a byte array by serializing the long one byte
* at a time in most significant byte order. The serialized integer is prefixed by a single header
* byte. All serialized values are 9 bytes in length.
* </p>
* <p>
* This encoding format, and much of this highly detailed documentation string, is based on
* Orderly's {@code DoubleWritableRowKey}.
* </p>
*
* @return the number of bytes written.
* @see #decodeFloat64(PositionedByteRange)
*/
public static int encodeFloat64(PositionedByteRange dst, double val, Order
ord) {
final int offset = dst.getOffset();
final int start = dst.getPosition();
long lng
= Double.doubleToLongBits(val);
lng ^= (lng >> (Long.SIZE - 1)) | Long.MIN_VALUE;
dst.put(FIXED_FLOAT64).put(((byte) (lng >> 56))).put(((byte) (lng >> 48))).put(((byte) (lng >> 40))).put(((byte) (lng >> 32))).put(((byte) (lng >> 24))).put(((byte) (lng >> 16))).put(((byte) (lng >> 8))).put(((byte) (lng)));
ord.apply(dst.getBytes(), offset + start, 9);
return 9;
} | 3.26 |
hbase_OrderedBytes_encodeBlobVar_rdh | /**
* Encode a blob value using a modified varint encoding scheme.
*
* @return the number of bytes written.
* @see #encodeBlobVar(PositionedByteRange, byte[], int, int, Order)
*/
public static int encodeBlobVar(PositionedByteRange dst, byte[] val, Order ord) {
return encodeBlobVar(dst, val, 0, null != val ? val.length :
0, ord);
} | 3.26 |
hbase_OrderedBytes_encodeFloat32_rdh | /**
* Encode a 32-bit floating point value using the fixed-length encoding. Encoding format is
* described at length in {@link #encodeFloat64(PositionedByteRange, double, Order)}.
*
* @return the number of bytes written.
* @see #decodeFloat32(PositionedByteRange)
* @see #encodeFloat64(PositionedByteRange, double, Order)
*/
public static int encodeFloat32(PositionedByteRange dst, float val, Order ord) {
final int offset = dst.getOffset();
final int start = dst.getPosition();
int i = Float.floatToIntBits(val);
i ^= (i >> (Integer.SIZE
- 1)) | Integer.MIN_VALUE;
dst.put(FIXED_FLOAT32).put(((byte) (i >> 24))).put(((byte) (i >> 16))).put(((byte) (i >> 8))).put(((byte) (i)));
ord.apply(dst.getBytes(), offset + start, 5);
return 5;
} | 3.26 |
hbase_OrderedBytes_encodeNull_rdh | /**
* Encode a null value.
*
* @param dst
* The destination to which encoded digits are written.
* @param ord
* The {@link Order} to respect while encoding {@code val}.
* @return the number of bytes written.
*/
public static int encodeNull(PositionedByteRange dst, Order ord) {
dst.put(ord.apply(NULL));
return 1;
}
/**
* Encode an {@code int8} | 3.26 |
hbase_OrderedBytes_decodeBlobCopy_rdh | /**
* Decode a Blob value, byte-for-byte copy.
*
* @see #encodeBlobCopy(PositionedByteRange, byte[], int, int, Order)
*/
public static byte[] decodeBlobCopy(PositionedByteRange src) {
byte header
= src.get();
if ((header == NULL) || (header == DESCENDING.apply(NULL))) {
return null;
}
assert (header == BLOB_COPY) || (header == DESCENDING.apply(BLOB_COPY));
Order ord = (header == BLOB_COPY) ? ASCENDING : DESCENDING;
final int length = src.getRemaining() - (ASCENDING == ord ? 0 :
1);byte[] ret = new byte[length];
src.get(ret);
ord.apply(ret, 0, ret.length);// DESCENDING ordered BlobCopy requires a termination bit to preserve
// sort-order semantics of null values.
if (DESCENDING == ord)
src.get();
return ret;} | 3.26 |
hbase_OrderedBytes_blobVarDecodedLength_rdh | /**
* Calculate the expected BlobVar decoded length based on encoded length.
*/ static int blobVarDecodedLength(int len) {
return ((len - 1)// 1-byte header
* 7)// 7-bits of payload per encoded byte
/ 8;// 8-bits per byte
} | 3.26 |
hbase_OrderedBytes_decodeNumericAsLong_rdh | /**
* Decode a primitive {@code long} value from the Numeric encoding. Numeric encoding is based on
* {@link BigDecimal}; in the event the encoded value is larger than can be represented in a
* {@code long}, this method performs an implicit narrowing conversion as described in
* {@link BigDecimal#doubleValue()}.
*
* @throws NullPointerException
* when the encoded value is {@code NULL}.
* @throws IllegalArgumentException
* when the encoded value is not a Numeric.
* @see #encodeNumeric(PositionedByteRange, long, Order)
* @see BigDecimal#longValue()
*/
public static long decodeNumericAsLong(PositionedByteRange src) {
// TODO: should an encoded NULL value throw unexpectedHeader() instead?
if (isNull(src))
throw new NullPointerException();
if (!m3(src))throw unexpectedHeader(src.peek());
if (isNumericNaN(src)) throw unexpectedHeader(src.peek());
if (m4(src))
throw unexpectedHeader(src.peek());
if (isNumericZero(src)) {
src.get();
return Long.valueOf(0);
}
return decodeNumericValue(src).longValue();
} | 3.26 |
hbase_OrderedBytes_isFixedInt32_rdh | /**
* Return true when the next encoded value in {@code src} uses fixed-width Int32 encoding, false
* otherwise.
*/
public static boolean isFixedInt32(PositionedByteRange src) {
return FIXED_INT32 == ((-1) == Integer.signum(src.peek()) ? DESCENDING : ASCENDING).apply(src.peek());}
/**
* Return true when the next encoded value in {@code src} | 3.26 |
hbase_OrderedBytes_skip_rdh | /**
* Skip {@code buff}'s position forward over one encoded value.
*
* @return number of bytes skipped.
*/
public static int skip(PositionedByteRange src) {final int start = src.getPosition();byte header = src.get();
Order ord = ((-1) == Integer.signum(header)) ? DESCENDING : ASCENDING;
header = ord.apply(header);
switch (header) {
case NULL
:
case f0 :
return 1;
case NEG_LARGE :
/* Large negative number: 0x08, ~E, ~M */
skipVaruint64(src, DESCENDING != ord);
skipSignificand(src, DESCENDING != ord);
return src.getPosition() -
start;
case NEG_MED_MIN :
/* Medium negative number: 0x13-E, ~M */
case NEG_MED_MIN + 0x1 :
case NEG_MED_MIN + 0x2 :case NEG_MED_MIN + 0x3 :
case NEG_MED_MIN + 0x4 :
case NEG_MED_MIN + 0x5 :
case NEG_MED_MIN + 0x6 :
case NEG_MED_MIN + 0x7 :
case NEG_MED_MIN +
0x8 :case NEG_MED_MIN + 0x9 :case NEG_MED_MAX :
skipSignificand(src, DESCENDING != ord);
return src.getPosition() - start;
case NEG_SMALL :
/* Small negative number: 0x14, -E, ~M */
skipVaruint64(src,
DESCENDING == ord);
skipSignificand(src, DESCENDING != ord);
return src.getPosition() - start;
case ZERO :
return 1;
case POS_SMALL :
/* Small positive number: 0x16, ~-E, M */
skipVaruint64(src, DESCENDING != ord);
skipSignificand(src, DESCENDING == ord);
return src.getPosition()
- start;
case POS_MED_MIN :
/* Medium positive number: 0x17+E, M */
case POS_MED_MIN + 0x1 :
case POS_MED_MIN + 0x2 :
case POS_MED_MIN + 0x3 :
case POS_MED_MIN + 0x4 :
case POS_MED_MIN + 0x5 :
case POS_MED_MIN + 0x6 :
case POS_MED_MIN + 0x7 :
case POS_MED_MIN + 0x8 :
case POS_MED_MIN + 0x9 :
case POS_MED_MAX :
skipSignificand(src, DESCENDING == ord);
return src.getPosition() - start;
case POS_LARGE :
/* Large positive number: 0x22, E, M */
skipVaruint64(src, DESCENDING
== ord);
skipSignificand(src, DESCENDING ==
ord);
return src.getPosition() - start;
case POS_INF :
return 1;
case NAN :
return 1;
case FIXED_INT8 :
src.setPosition(src.getPosition() + 1);
return src.getPosition() - start;
case FIXED_INT16 :
src.setPosition(src.getPosition() +
2);
return src.getPosition() - start;
case FIXED_INT32 :
src.setPosition(src.getPosition() + 4);
return src.getPosition() - start;
case f1 :
src.setPosition(src.getPosition() + 8);
return src.getPosition() - start;
case FIXED_FLOAT32 :
src.setPosition(src.getPosition() + 4);
return src.getPosition() - start;
case FIXED_FLOAT64 :src.setPosition(src.getPosition() + 8);
return src.getPosition() -
start;
case TEXT :
// for null-terminated values, skip to the end.
do {
header = ord.apply(src.get());
} while (header != TERM );
return src.getPosition() - start;
case BLOB_VAR :
// read until we find a 0 in the MSB
do {
header = ord.apply(src.get());
} while (((byte) (header & 0x80)) != TERM );
return src.getPosition() - start;
case
BLOB_COPY :
if (Order.DESCENDING == ord) {
// if descending, read to termination byte.
do {
header = ord.apply(src.get());
} while (header != TERM );
return src.getPosition() - start;
} else {
// otherwise, just skip to the end.
src.setPosition(src.getLength());
return src.getPosition() - start;
}
default :
throw unexpectedHeader(header);
}} | 3.26 |
hbase_OrderedBytes_decodeString_rdh | /**
* Decode a String value.
*/
public static String decodeString(PositionedByteRange
src) {
final byte header = src.get();
if ((header == NULL) || (header == DESCENDING.apply(NULL)))
return null;
assert (header == TEXT) || (header == DESCENDING.apply(TEXT));
Order ord = (header == TEXT) ? ASCENDING : DESCENDING;
byte[] a = src.getBytes();
final int offset = src.getOffset();
final int start
= src.getPosition();
final byte v74 = ord.apply(TERM);
int rawStartPos = offset + start;
int rawTermPos = rawStartPos;
for (; a[rawTermPos] != v74; rawTermPos++);
src.setPosition((rawTermPos - offset) + 1);// advance position to TERM + 1
if (DESCENDING == ord) {
// make a copy so that we don't disturb encoded value with ord.
byte[] copy
= new byte[rawTermPos - rawStartPos];
System.arraycopy(a, rawStartPos, copy, 0, copy.length);
ord.apply(copy);
return new String(copy, UTF8);
} else {
return new String(a, rawStartPos, rawTermPos - rawStartPos, UTF8);
}} | 3.26 |
hbase_OrderedBytes_length_rdh | /**
* Return the number of encoded entries remaining in {@code buff}. The state of {@code buff} is
* not modified through use of this method.
*/
public static int length(PositionedByteRange buff) {
PositionedByteRange b = new SimplePositionedMutableByteRange(buff.getBytes(), buff.getOffset(), buff.getLength());
b.setPosition(buff.getPosition());
int cnt = 0;
for (;
isEncodedValue(b); skip(b) ,
cnt++);
return cnt;
} | 3.26 |
hbase_OrderedBytes_decodeNumericValue_rdh | /**
* Decode a {@link BigDecimal} from {@code src}. Assumes {@code src} encodes a value in Numeric
* encoding and is within the valid range of {@link BigDecimal} values. {@link BigDecimal} does
* not support {@code NaN} or {@code Infinte} values.
*
* @see #decodeNumericAsDouble(PositionedByteRange)
*/
private static BigDecimal decodeNumericValue(PositionedByteRange
src) {
final int e;
byte header
= src.get();
boolean dsc = (-1) == Integer.signum(header);
header = (dsc) ? DESCENDING.apply(header) : header;
if (header == NULL)
return null;
if (header == NEG_LARGE) {
/* Large negative number: 0x08, ~E, ~M */
e = ((int) (getVaruint64(src, !dsc)));return decodeSignificand(src, e, !dsc).negate();
}
if ((header >= NEG_MED_MIN) && (header <= NEG_MED_MAX)) {
/* Medium negative number: 0x13-E, ~M */
e = NEG_MED_MAX - header;
return decodeSignificand(src, e, !dsc).negate();
}
if (header == NEG_SMALL) {
/* Small negative number: 0x14, -E, ~M */
e = ((int) (-getVaruint64(src, dsc)));
return decodeSignificand(src, e, !dsc).negate();
}
if (header == ZERO) {
return BigDecimal.ZERO;
}
if (header == POS_SMALL) {
/* Small positive number: 0x16, ~-E, M */
e = ((int) (-getVaruint64(src, !dsc)));
return decodeSignificand(src, e, dsc);
}
if ((header >= POS_MED_MIN) && (header <= POS_MED_MAX)) {
/* Medium positive number: 0x17+E, M */
e =
header - POS_MED_MIN;
return decodeSignificand(src, e, dsc);
}
if (header == POS_LARGE) {
/* Large positive number: 0x22, E, M */
e = ((int) (getVaruint64(src, dsc)));
return decodeSignificand(src, e, dsc);
}
throw unexpectedHeader(header);
}
/**
* Decode a primitive {@code double} value from the Numeric encoding. Numeric encoding is based on
* {@link BigDecimal}; in the event the encoded value is larger than can be represented in a
* {@code double}, this method performs an implicit narrowing conversion as described in
* {@link BigDecimal#doubleValue()}.
*
* @throws NullPointerException
* when the encoded value is {@code NULL} | 3.26 |
hbase_OrderedBytes_isNumericNaN_rdh | /**
* Return true when the next encoded value in {@code src} uses Numeric encoding and is
* {@code NaN}, false otherwise.
*/
public static boolean isNumericNaN(PositionedByteRange src) {
return NAN == ((-1) == Integer.signum(src.peek()) ? DESCENDING : ASCENDING).apply(src.peek());
} | 3.26 |
hbase_AbstractMultiFileWriter_commitWriters_rdh | /**
* Commit all writers.
* <p>
* Notice that here we use the same <code>maxSeqId</code> for all output files since we haven't
* find an easy to find enough sequence ids for different output files in some corner cases. See
* comments in HBASE-15400 for more details.
*/
public List<Path> commitWriters(long
maxSeqId, boolean majorCompaction) throws IOException {
return commitWriters(maxSeqId,
majorCompaction, Collections.emptyList());
} | 3.26 |
hbase_AbstractMultiFileWriter_preCommitWriters_rdh | /**
* Subclasses override this method to be called at the end of a successful sequence of append; all
* appends are processed before this method is called.
*/
protected void preCommitWriters()
throws IOException {
} | 3.26 |
hbase_AbstractMultiFileWriter_abortWriters_rdh | /**
* Close all writers without throwing any exceptions. This is used when compaction failed usually.
*/
public List<Path> abortWriters() {
List<Path> paths = new ArrayList<>();
for (StoreFileWriter writer : writers()) {
try {
if (writer != null) {
paths.add(writer.getPath());
writer.close();
}
} catch (Exception ex) {
LOG.error("Failed to close the writer after an unfinished compaction.", ex);
}
}
return paths;
} | 3.26 |
hbase_AbstractMultiFileWriter_init_rdh | /**
* Initializes multi-writer before usage.
*
* @param sourceScanner
* Optional store scanner to obtain the information about read progress.
* @param factory
* Factory used to produce individual file writers.
*/
public void init(StoreScanner sourceScanner, WriterFactory factory) {
this.writerFactory = factory;
this.sourceScanner = sourceScanner;
} | 3.26 |
hbase_ZNodeClearer_writeMyEphemeralNodeOnDisk_rdh | /**
* Logs the errors without failing on exception.
*/
public static void writeMyEphemeralNodeOnDisk(String fileContent) {
String fileName = ZNodeClearer.getMyEphemeralNodeFileName();
if (fileName == null) {
LOG.warn("Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared " + "on crash by start scripts (Longer MTTR!)");
return;
}
FileWriter fstream;
try {
fstream = new FileWriter(fileName);
} catch (IOException e) {
LOG.warn("Can't write znode file " + fileName, e);
return;
}
BufferedWriter out = new BufferedWriter(fstream);
try {
try {out.write(fileContent + "\n");
} finally {
try {
out.close();
} finally
{
fstream.close();
}
}
} catch (IOException e) {
LOG.warn("Can't write znode file " + fileName, e);
}
} | 3.26 |
hbase_ZNodeClearer_clear_rdh | /**
* Delete the master znode if its content (ServerName string) is the same as the one in the znode
* file. (env: HBASE_ZNODE_FILE). I case of master-rs colloaction we extract ServerName string
* from rsZnode path.(HBASE-14861)
*
* @return true on successful deletion, false otherwise.
*/
public static boolean clear(Configuration conf) {
Configuration tempConf = new Configuration(conf);
tempConf.setInt("zookeeper.recovery.retry", 0);
ZKWatcher zkw;
try {
zkw = new ZKWatcher(tempConf, "clean znode for master", new Abortable() {
@Override
public void abort(String why, Throwable e) {
}
@Override
public boolean isAborted() {
return false;
}
});
} catch (IOException e) {
LOG.warn("Can't connect to zookeeper to read the master znode", e);
return false;
}
String v12;
try {
v12
= ZNodeClearer.readMyEphemeralNodeOnDisk();
return MasterAddressTracker.deleteIfEquals(zkw, v12);
} catch (FileNotFoundException
fnfe) {
// If no file, just keep going -- return success.
LOG.warn("Can't find the znode file; presume non-fatal", fnfe);
return true;
} catch (IOException e) {
LOG.warn("Can't read the content of the znode file", e);
return false;
} finally {
zkw.close();
}
} | 3.26 |
hbase_ZNodeClearer_getMyEphemeralNodeFileName_rdh | /**
* Get the name of the file used to store the znode contents
*/
public static String getMyEphemeralNodeFileName() {
return System.getenv().get("HBASE_ZNODE_FILE");
} | 3.26 |
hbase_ZNodeClearer_deleteMyEphemeralNodeOnDisk_rdh | /**
* delete the znode file
*/
public static void deleteMyEphemeralNodeOnDisk() {
String
fileName = getMyEphemeralNodeFileName();
if (fileName != null) {
new File(fileName).delete();
}} | 3.26 |
hbase_ZNodeClearer_parseMasterServerName_rdh | /**
* See HBASE-14861. We are extracting master ServerName from rsZnodePath example:
* "/hbase/rs/server.example.com,16020,1448266496481"
*
* @param rsZnodePath
* from HBASE_ZNODE_FILE
* @return String representation of ServerName or null if fails
*/
public static String parseMasterServerName(String rsZnodePath)
{
String masterServerName = null;
try {
String[] rsZnodeParts = rsZnodePath.split("/");
masterServerName = rsZnodeParts[rsZnodeParts.length - 1];
} catch (IndexOutOfBoundsException e) {
LOG.warn(("String " + rsZnodePath) + " has wrong format", e);
}
return masterServerName;
} | 3.26 |
hbase_BloomFilterChunk_createAnother_rdh | /**
* Creates another similar Bloom filter. Does not copy the actual bits, and sets the new filter's
* key count to zero.
*
* @return a Bloom filter with the same configuration as this
*/
public BloomFilterChunk createAnother() {
BloomFilterChunk bbf = new BloomFilterChunk(hashType, this.bloomType);
bbf.byteSize = byteSize;bbf.hashCount = hashCount;
bbf.maxKeys = maxKeys;
return bbf;
} | 3.26 |
hbase_BloomFilterChunk_add_rdh | // Used only by tests
void add(byte[] buf, int offset, int len) {
/* For faster hashing, use combinatorial generation
http://www.eecs.harvard.edu/~kirsch/pubs/bbbf/esa06.pdf
*/
HashKey<byte[]> hashKey = new ByteArrayHashKey(buf, offset, len);
int hash1 = this.hash.hash(hashKey, 0);
int hash2 = this.hash.hash(hashKey, hash1);
setHashLoc(hash1, hash2);
} | 3.26 |
hbase_BloomFilterChunk_get_rdh | /**
* Check if bit at specified index is 1.
*
* @param pos
* index of bit
* @return true if bit at specified index is 1, false if 0.
*/static boolean get(int pos, ByteBuffer bloomBuf, int bloomOffset) {
int bytePos = pos >> 3;// pos / 8
int bitPos = pos & 0x7;// pos % 8
// TODO access this via Util API which can do Unsafe access if possible(?)
byte curByte = bloomBuf.get(bloomOffset + bytePos);
curByte &= BloomFilterUtil.bitvals[bitPos];
return curByte != 0;
} | 3.26 |
hbase_BloomFilterChunk_writeBloom_rdh | /**
* Writes just the bloom filter to the output array
*
* @param out
* OutputStream to place bloom
* @throws IOException
* Error writing bloom array
*/
public void writeBloom(final DataOutput out) throws IOException {
if (!this.bloom.hasArray()) {
throw new IOException("Only writes ByteBuffer with underlying array.");
} out.write(this.bloom.array(), this.bloom.arrayOffset(),
this.bloom.limit());
} | 3.26 |
hbase_BloomFilterChunk_set_rdh | // ---------------------------------------------------------------------------
/**
* Private helpers
*/
/**
* Set the bit at the specified index to 1.
*
* @param pos
* index of bit
*/
void set(long pos) {
int v10 = ((int) (pos / 8));
int bitPos = ((int) (pos % 8));
byte curByte = bloom.get(v10);
curByte |= BloomFilterUtil.bitvals[bitPos];
bloom.put(v10, curByte);} | 3.26 |
hbase_BloomFilterChunk_actualErrorRate_rdh | /**
* Computes the error rate for this Bloom filter, taking into account the actual number of hash
* functions and keys inserted. The return value of this function changes as a Bloom filter is
* being populated. Used for reporting the actual error rate of compound Bloom filters when
* writing them out.
*
* @return error rate for this particular Bloom filter
*/
public double actualErrorRate() {
return BloomFilterUtil.actualErrorRate(keyCount, byteSize * 8, hashCount);
} | 3.26 |
hbase_BatchScanResultCache_m0_rdh | // Add new result to the partial list and return a batched Result if caching size exceed batching
// limit. As the RS will also respect the scan.getBatch, we can make sure that we will get only
// one Result back at most(or null, which means we do not have enough cells).
private Result m0(Result result) {
partialResults.addLast(result);
numCellsOfPartialResults += result.size();
if (numCellsOfPartialResults < batch) {
return
null;
}
Cell[] v1 = new Cell[batch];
int cellCount = 0;
boolean stale = false;
for (; ;) {
Result v4 = partialResults.pollFirst();
stale = stale || v4.isStale();
int newCellCount = cellCount + v4.size();
if (newCellCount > batch) {
// We have more cells than expected, so split the current result
int len = batch - cellCount;
System.arraycopy(v4.rawCells(), 0, v1, cellCount, len);
Cell[] remainingCells = new Cell[v4.size() - len];
System.arraycopy(v4.rawCells(), len, remainingCells, 0, v4.size() - len);
partialResults.addFirst(Result.create(remainingCells, v4.getExists(), v4.isStale(), v4.mayHaveMoreCellsInRow()));
break;
}
System.arraycopy(v4.rawCells(), 0, v1, cellCount, v4.size());
if (newCellCount == batch) {
break;
}
cellCount = newCellCount;
}
numCellsOfPartialResults -= batch;
return Result.create(v1, null, stale, result.mayHaveMoreCellsInRow() || (!partialResults.isEmpty()));
} | 3.26 |
hbase_DefaultMobStoreCompactor_calculateMobLengthMap_rdh | /**
*
* @param mobRefs
* multimap of original table name -> mob hfile
*/
private void calculateMobLengthMap(SetMultimap<TableName, String> mobRefs) throws IOException {
FileSystem fs = store.getFileSystem();
HashMap<String, Long> map = mobLengthMap.get();
map.clear();
for (Entry<TableName, String>
reference : mobRefs.entries()) {
final TableName table = reference.getKey();
final String mobfile = reference.getValue();
if (MobFileName.isOldMobFileName(mobfile)) {
disableIO.set(Boolean.TRUE);
}
List<Path> locations = mobStore.getLocations(table);
for (Path p : locations) {
try {
FileStatus st = fs.getFileStatus(new Path(p, mobfile));
long size = st.getLen();
LOG.debug("Referenced MOB file={} size={}", mobfile, size);
map.put(mobfile, size);
break;
} catch (FileNotFoundException exception) {
LOG.debug("Mob file {} was not in location {}. May have other locations to try.", mobfile, p);
}
}
if (!map.containsKey(mobfile)) {
throw new FileNotFoundException(((("Could not find mob file " + mobfile) + " in the list of ") + "expected locations: ") + locations);}
}
} | 3.26 |
hbase_DefaultMobStoreCompactor_performCompaction_rdh | /**
* Performs compaction on a column family with the mob flag enabled. This works only when MOB
* compaction is explicitly requested (by User), or by Master There are two modes of a MOB
* compaction:<br>
* <p>
* <ul>
* <li>1. Full mode - when all MOB data for a region is compacted into a single MOB file.
* <li>2. I/O optimized mode - for use cases with no or infrequent updates/deletes of a <br>
* MOB data. The main idea behind i/o optimized compaction is to limit maximum size of a MOB file
* produced during compaction and to limit I/O write/read amplification.
* </ul>
* The basic algorithm of compaction is the following: <br>
* 1. If the Put cell has a mob reference tag, the cell's value is the path of the mob file.
* <ol>
* <li>If the value size of a cell is larger than the threshold, this cell is regarded as a mob,
* directly copy the (with mob tag) cell into the new store file.</li>
* <li>Otherwise, retrieve the mob cell from the mob file, and writes a copy of the cell into the
* new store file.</li>
* </ol>
* 2. If the Put cell doesn't have a reference tag.
* <ol>
* <li>If the value size of a cell is larger than the threshold, this cell is regarded as a mob,
* write this cell to a mob file, and write the path of this mob file to the store file.</li>
* <li>Otherwise, directly write this cell into the store file.</li>
* </ol>
*
* @param fd
* File details
* @param scanner
* Where to read from.
* @param writer
* Where to write to.
* @param smallestReadPoint
* Smallest read point.
* @param cleanSeqId
* When true, remove seqId(used to be mvcc) value which is <=
* smallestReadPoint
* @param throughputController
* The compaction throughput controller.
* @param request
* compaction request.
* @param progress
* Progress reporter.
* @return Whether compaction ended; false if it was interrupted for any reason.
*/
@Override
protected boolean performCompaction(FileDetails fd, InternalScanner scanner, CellSink writer, long smallestReadPoint, boolean cleanSeqId, ThroughputController throughputController, CompactionRequestImpl request, CompactionProgress progress) throws IOException {long bytesWrittenProgressForLog = 0;
long bytesWrittenProgressForShippedCall = 0;
// Clear old mob references
mobRefSet.get().clear();
boolean isUserRequest = userRequest.get();
boolean major = request.isAllFiles();
boolean compactMOBs = major && isUserRequest;boolean discardMobMiss = conf.getBoolean(MobConstants.MOB_UNSAFE_DISCARD_MISS_KEY, MobConstants.DEFAULT_MOB_DISCARD_MISS);
if (discardMobMiss)
{
LOG.warn(("{}=true. This is unsafe setting recommended only when first upgrading to a version" + " with the distributed mob compaction feature on a cluster that has experienced MOB data ") + "corruption.", MobConstants.MOB_UNSAFE_DISCARD_MISS_KEY);
}
long maxMobFileSize = conf.getLong(MobConstants.MOB_COMPACTION_MAX_FILE_SIZE_KEY, MobConstants.DEFAULT_MOB_COMPACTION_MAX_FILE_SIZE);
boolean
ioOptimizedMode = this.ioOptimizedMode && (!disableIO.get());
LOG.info("Compact MOB={} optimized configured={} optimized enabled={} maximum MOB file size={}" + " major={} store={}", compactMOBs, this.ioOptimizedMode, ioOptimizedMode, maxMobFileSize, major, getStoreInfo());
// Since scanner.next() can return 'false' but still be delivering data,
// we have to use a do/while loop.
List<Cell> cells = new ArrayList<>();
// Limit to "hbase.hstore.compaction.kv.max" (default 10) to avoid OOME
long currentTime
= EnvironmentEdgeManager.currentTime();
long lastMillis = 0;
if (LOG.isDebugEnabled()) {
lastMillis = currentTime;
}
CloseChecker closeChecker = new CloseChecker(conf, currentTime);
String compactionName = ThroughputControlUtil.getNameForThrottling(store, "compaction");
long
now = 0;
boolean hasMore;
byte[] fileName = null;
StoreFileWriter mobFileWriter = null;
/* mobCells are used only to decide if we need to commit or abort current MOB output file. */
long mobCells = 0;
long cellsCountCompactedToMob = 0;
long cellsCountCompactedFromMob = 0;
long cellsSizeCompactedToMob = 0;
long cellsSizeCompactedFromMob = 0;
boolean finished = false;
ScannerContext scannerContext = ScannerContext.newBuilder().setBatchLimit(compactionKVMax).setSizeLimit(LimitScope.BETWEEN_CELLS, Long.MAX_VALUE, Long.MAX_VALUE, compactScannerSizeLimit).build();
throughputController.start(compactionName);
KeyValueScanner kvs =
(scanner instanceof KeyValueScanner) ? ((KeyValueScanner) (scanner)) : null;
long shippedCallSizeLimit = ((long) (request.getFiles().size())) * this.store.getColumnFamilyDescriptor().getBlocksize();
Cell mobCell = null;
List<String> committedMobWriterFileNames = new ArrayList<>();
try {
mobFileWriter
= newMobWriter(fd, major, request.getWriterCreationTracker());
fileName = Bytes.toBytes(mobFileWriter.getPath().getName());
do {
hasMore = scanner.next(cells, scannerContext);
currentTime = EnvironmentEdgeManager.currentTime();
if (LOG.isDebugEnabled()) {
now = currentTime;
}
if (closeChecker.isTimeLimit(store, currentTime)) {
progress.cancel();
return false;
}
for (Cell c : cells) {
if (compactMOBs) {
if (MobUtils.isMobReferenceCell(c)) {
String fName = MobUtils.getMobFileName(c);
// Added to support migration
try {
mobCell = mobStore.resolve(c, true, false).getCell(); } catch (DoNotRetryIOException e) {
if ((discardMobMiss && (e.getCause() != null)) && (e.getCause() instanceof FileNotFoundException)) {
LOG.error("Missing MOB cell: file={} not found cell={}", fName, c);
continue;
} else {
throw e;
}
}
if (discardMobMiss && (mobCell.getValueLength() == 0)) {
LOG.error("Missing MOB cell value: file={} mob cell={} cell={}", fName, mobCell, c);continue;
} else if (mobCell.getValueLength() == 0) {
String errMsg = String.format("Found 0 length MOB cell in a file=%s mob cell=%s " + " cell=%s", fName, mobCell, c);
throw new IOException(errMsg);}
if (mobCell.getValueLength() > mobSizeThreshold) {
// put the mob data back to the MOB store file
PrivateCellUtil.setSequenceId(mobCell, c.getSequenceId());if
(!ioOptimizedMode) {
mobFileWriter.append(mobCell);
mobCells++;
writer.append(MobUtils.createMobRefCell(mobCell, fileName, this.mobStore.getRefCellTags()));
} else {
// I/O optimized mode
// Check if MOB cell origin file size is
// greater than threshold
Long size = mobLengthMap.get().get(fName);
if (size == null) {
// FATAL error (we should never get here though), abort compaction
// This error means that meta section of store file does not contain
// MOB file, which has references in at least one cell from this store file
String msg = String.format("Found an unexpected MOB file during compaction %s, aborting compaction %s", fName, getStoreInfo());
throw new IOException(msg);
}
// Can not be null
if (size < maxMobFileSize) {
// If MOB cell origin file is below threshold
// it is get compacted
mobFileWriter.append(mobCell);
// Update number of mobCells in a current mob writer
mobCells++;
writer.append(MobUtils.createMobRefCell(mobCell, fileName, this.mobStore.getRefCellTags()));
// Update total size of the output (we do not take into account
// file compression yet)
long len = mobFileWriter.getPos();
if (len > maxMobFileSize) {LOG.debug("Closing output MOB File, length={} file={}, store={}", len, mobFileWriter.getPath().getName(), getStoreInfo());
mobFileWriter = switchToNewMobWriter(mobFileWriter, fd, mobCells, major, request, committedMobWriterFileNames);
fileName = Bytes.toBytes(mobFileWriter.getPath().getName());
mobCells = 0;
}
} else {
// We leave large MOB file as is (is not compacted),
// then we update set of MOB file references
// and append mob cell directly to the store's writer
Optional<TableName> refTable = MobUtils.getTableName(c);
if (refTable.isPresent()) {
mobRefSet.get().put(refTable.get(), fName);
writer.append(c);
} else {
throw new IOException(String.format(("MOB cell did not contain a tablename " + "tag. should not be possible. see ref guide on mob troubleshooting. ") + "store=%s cell=%s", getStoreInfo(), c));
}
}
}
} else {
// If MOB value is less than threshold, append it directly to a store file
PrivateCellUtil.setSequenceId(mobCell, c.getSequenceId());
writer.append(mobCell);
cellsCountCompactedFromMob++;
cellsSizeCompactedFromMob += mobCell.getValueLength();}
} else {// Not a MOB reference cell
int size = c.getValueLength();
if (size > mobSizeThreshold) {
// This MOB cell comes from a regular store file
// therefore we store it into original mob output
mobFileWriter.append(c);
writer.append(MobUtils.createMobRefCell(c, fileName, this.mobStore.getRefCellTags()));
mobCells++;
cellsCountCompactedToMob++;
cellsSizeCompactedToMob += c.getValueLength();
if (ioOptimizedMode) {
// Update total size of the output (we do not take into account
// file compression yet)
long v51 = mobFileWriter.getPos();
if
(v51 > maxMobFileSize) {
mobFileWriter = switchToNewMobWriter(mobFileWriter, fd, mobCells, major, request, committedMobWriterFileNames);
fileName = Bytes.toBytes(mobFileWriter.getPath().getName());
mobCells = 0;
}
}
} else {
// Not a MOB cell, write it directly to a store file
writer.append(c);
}
}
} else if (c.getTypeByte() != Type.Put.getCode()) {
// Not a major compaction or major with MOB disabled
// If the kv type is not put, directly write the cell
// to the store file.
writer.append(c);
} else if (MobUtils.isMobReferenceCell(c)) {
// Not a major MOB compaction, Put MOB reference
if (MobUtils.hasValidMobRefCellValue(c)) {
// We do not check mobSizeThreshold during normal compaction,
// leaving it to a MOB compaction run
Optional<TableName> refTable = MobUtils.getTableName(c);
if (refTable.isPresent()) {
mobRefSet.get().put(refTable.get(), MobUtils.getMobFileName(c));
writer.append(c);
} else {
throw new IOException(String.format(("MOB cell did not contain a tablename " + "tag. should not be possible. see ref guide on mob troubleshooting. ") + "store=%s cell=%s", getStoreInfo(), c));
}
} else {
String errMsg = String.format("Corrupted MOB reference: %s", c.toString());
throw new IOException(errMsg);
}
} else if (c.getValueLength() <= mobSizeThreshold) {
// If the value size of a cell is not larger than the threshold, directly write it to
// the store file.
writer.append(c);
} else {
// If the value size of a cell is larger than the threshold, it's regarded as a mob,
// write this cell to a mob file, and write the path to the store file.
mobCells++;
// append the original keyValue in the mob file.
mobFileWriter.append(c);
Cell reference = MobUtils.createMobRefCell(c, fileName, this.mobStore.getRefCellTags());
// write the cell whose value is the path of a mob file to the store file.
writer.append(reference);
cellsCountCompactedToMob++;
cellsSizeCompactedToMob += c.getValueLength();
if (ioOptimizedMode)
{
long len = mobFileWriter.getPos();
if (len > maxMobFileSize) {
mobFileWriter = switchToNewMobWriter(mobFileWriter, fd, mobCells, major, request, committedMobWriterFileNames);
fileName = Bytes.toBytes(mobFileWriter.getPath().getName());
mobCells = 0;
}
}
}
int len = c.getSerializedSize();
++progress.currentCompactedKVs;
progress.totalCompactedSize += len;
bytesWrittenProgressForShippedCall += len;
if (LOG.isDebugEnabled()) {
bytesWrittenProgressForLog += len;
}
throughputController.control(compactionName, len);
if (closeChecker.isSizeLimit(store, len)) {
progress.cancel();return false; }
if ((kvs != null) && (bytesWrittenProgressForShippedCall > shippedCallSizeLimit)) {
((ShipperListener) (writer)).beforeShipped();
kvs.shipped();
bytesWrittenProgressForShippedCall = 0;
}
}
// Log the progress of long running compactions every minute if
// logging at DEBUG level
if (LOG.isDebugEnabled()) {
if ((now - lastMillis) >= COMPACTION_PROGRESS_LOG_INTERVAL) {
String rate = String.format("%.2f", (bytesWrittenProgressForLog / 1024.0) / ((now - lastMillis) / 1000.0));
LOG.debug("Compaction progress: {} {}, rate={} KB/sec, throughputController is {}", compactionName, progress, rate, throughputController);
lastMillis = now;
bytesWrittenProgressForLog = 0;
}
}
cells.clear();
} while (hasMore );
// Commit last MOB writer
commitOrAbortMobWriter(mobFileWriter, fd.maxSeqId, mobCells, major);
finished = true;
} catch
(InterruptedException e) {
progress.cancel();
throw new InterruptedIOException("Interrupted while control throughput of compacting " + compactionName);
} catch (IOException t) {
String msg = "Mob compaction failed for region: " + store.getRegionInfo().getEncodedName();
throw new IOException(msg, t);
} finally {
// Clone last cell in the final because writer will append last cell when committing. If
// don't clone here and once the scanner get closed, then the memory of last cell will be
// released. (HBASE-22582)
((ShipperListener) (writer)).beforeShipped();
throughputController.finish(compactionName);
if ((!finished) && (mobFileWriter != null)) {
// Remove all MOB references because compaction failed
clearThreadLocals();
// Abort writer
LOG.debug("Aborting writer for {} because of a compaction failure, Store {}", mobFileWriter.getPath(), getStoreInfo());
abortWriter(mobFileWriter);
deleteCommittedMobFiles(committedMobWriterFileNames);
}
}
mobStore.updateCellsCountCompactedFromMob(cellsCountCompactedFromMob);
mobStore.updateCellsCountCompactedToMob(cellsCountCompactedToMob);
mobStore.updateCellsSizeCompactedFromMob(cellsSizeCompactedFromMob);
mobStore.updateCellsSizeCompactedToMob(cellsSizeCompactedToMob);
progress.complete();
return true;
} | 3.26 |
hbase_ResultStatsUtil_updateStats_rdh | /**
* Update the statistics for the specified region.
*
* @param tracker
* tracker to update
* @param server
* server from which the result was obtained
* @param regionName
* full region name for the statistics
* @param stats
* statistics to update for the specified region
*/
public static void updateStats(StatisticTrackable tracker, ServerName server, byte[] regionName, RegionLoadStats stats) {if (((regionName != null) && (stats != null)) && (tracker != null)) {
tracker.updateRegionStats(server, regionName, stats);
}
} | 3.26 |
hbase_CellChunkImmutableSegment_createCellReference_rdh | /* ------------------------------------------------------------------------ */
// for a given cell, write the cell representation on the index chunk
private int createCellReference(ByteBufferKeyValue cell, ByteBuffer idxBuffer, int idxOffset) {
int offset = idxOffset;
int dataChunkID = cell.getChunkId();
offset = ByteBufferUtils.putInt(idxBuffer, offset, dataChunkID);// write data chunk id
offset = ByteBufferUtils.putInt(idxBuffer, offset, cell.getOffset());// offset
offset = ByteBufferUtils.putInt(idxBuffer, offset, cell.getSerializedSize());// length
offset = ByteBufferUtils.putLong(idxBuffer, offset, cell.getSequenceId());// seqId
return
offset;
} | 3.26 |
hbase_CellChunkImmutableSegment_reinitializeCellSet_rdh | /* ------------------------------------------------------------------------ */
// Create CellSet based on CellChunkMap from current ConcurrentSkipListMap based CellSet
// (without compacting iterator)
// This is a service for not-flat immutable segments
private void reinitializeCellSet(int numOfCells, KeyValueScanner segmentScanner, CellSet oldCellSet, MemStoreSizing memstoreSizing, MemStoreCompactionStrategy.Action action) {
Cell curCell;
Chunk[] chunks = m0(numOfCells);
int currentChunkIdx = 0;int offsetInCurentChunk = ChunkCreator.SIZEOF_CHUNK_HEADER;
int numUniqueKeys = 0;
Cell prev = null;
try {
while ((curCell = segmentScanner.next()) != null) {
assert curCell instanceof ExtendedCell;
if (((ExtendedCell) (curCell)).getChunkId() == ExtendedCell.CELL_NOT_BASED_ON_CHUNK) {
// CellChunkMap assumes all cells are allocated on MSLAB.
// Therefore, cells which are not allocated on MSLAB initially,
// are copied into MSLAB here.
curCell = copyCellIntoMSLAB(curCell, memstoreSizing);
}if ((offsetInCurentChunk + ClassSize.CELL_CHUNK_MAP_ENTRY) > chunks[currentChunkIdx].size) {
// continue to the next metadata chunk
currentChunkIdx++;
offsetInCurentChunk = ChunkCreator.SIZEOF_CHUNK_HEADER;
}
offsetInCurentChunk = createCellReference(((ByteBufferKeyValue) (curCell)), chunks[currentChunkIdx].getData(), offsetInCurentChunk);
if (action == Action.FLATTEN_COUNT_UNIQUE_KEYS) {
// counting number of unique keys
if (prev != null) {
if (!CellUtil.matchingRowColumn(prev, curCell)) {
numUniqueKeys++;
}
} else {
numUniqueKeys++;
}
}
prev =
curCell;
}
if (action != Action.FLATTEN_COUNT_UNIQUE_KEYS) {
numUniqueKeys = CellSet.UNKNOWN_NUM_UNIQUES;
}
} catch (IOException ie) {
throw
new IllegalStateException(ie);
} finally {
segmentScanner.close();
}
CellChunkMap ccm = new CellChunkMap(getComparator(), chunks, 0, numOfCells, false);
// update the CellSet of this Segment
this.setCellSet(oldCellSet, new CellSet(ccm, numUniqueKeys));
} | 3.26 |
hbase_CellChunkImmutableSegment_initializeCellSet_rdh | // /////////////////// PRIVATE METHODS /////////////////////
/* ------------------------------------------------------------------------ */
// Create CellSet based on CellChunkMap from compacting iterator
private void initializeCellSet(int numOfCells, MemStoreSegmentsIterator iterator, MemStoreCompactionStrategy.Action action) {
int numOfCellsAfterCompaction = 0;int currentChunkIdx = 0;
int offsetInCurentChunk = ChunkCreator.SIZEOF_CHUNK_HEADER;
int numUniqueKeys = 0;
Cell prev = null;
Chunk[] chunks = m0(numOfCells);
while (iterator.hasNext()) {
// the iterator hides the elimination logic for compaction
boolean alreadyCopied = false;
Cell c = iterator.next();
numOfCellsAfterCompaction++;
assert c instanceof ExtendedCell;
if (((ExtendedCell) (c)).getChunkId() == ExtendedCell.CELL_NOT_BASED_ON_CHUNK) {
// CellChunkMap assumes all cells are allocated on MSLAB.
// Therefore, cells which are not allocated on MSLAB initially,
// are copied into MSLAB here.
c = copyCellIntoMSLAB(c, null);// no memstore sizing object to update
alreadyCopied = true;
}
if ((offsetInCurentChunk +
ClassSize.CELL_CHUNK_MAP_ENTRY) > chunks[currentChunkIdx].size) {
currentChunkIdx++;// continue to the next index chunk
offsetInCurentChunk = ChunkCreator.SIZEOF_CHUNK_HEADER;
}
if ((action == Action.COMPACT) && (!alreadyCopied)) { // For compaction copy cell to the new segment (MSLAB copy),here we set forceCloneOfBigCell
// to true, because the chunk which the cell is allocated may be freed after the compaction
// is completed, see HBASE-27464.
c = maybeCloneWithAllocator(c, true);
}
// add the Cell reference to the index chunk
offsetInCurentChunk = createCellReference(((ByteBufferKeyValue) (c)), chunks[currentChunkIdx].getData(), offsetInCurentChunk);
// the sizes still need to be updated in the new segment
// second parameter true, because in compaction/merge the addition of the cell to new segment
// is always successful
updateMetaInfo(c, true, null);// updates the size per cell
if (action == Action.MERGE_COUNT_UNIQUE_KEYS) {
// counting number of unique keys
if (prev != null) {
if (!CellUtil.matchingRowColumnBytes(prev, c)) {
numUniqueKeys++;
}
} else {
numUniqueKeys++;
}
}
prev = c;
}
if (action == Action.COMPACT) {
numUniqueKeys = numOfCells;
} else if (action != Action.MERGE_COUNT_UNIQUE_KEYS) {
numUniqueKeys = CellSet.UNKNOWN_NUM_UNIQUES;
}
// build the immutable CellSet
CellChunkMap ccm = new CellChunkMap(getComparator(), chunks, 0, numOfCellsAfterCompaction, false);
this.setCellSet(null, new CellSet(ccm, numUniqueKeys));// update the CellSet of this Segment
} | 3.26 |
hbase_SpaceQuotaSnapshot_toSpaceQuotaSnapshot_rdh | // ProtobufUtil is in hbase-client, and this doesn't need to be public.
public static SpaceQuotaSnapshot toSpaceQuotaSnapshot(QuotaProtos.SpaceQuotaSnapshot proto) {
return new SpaceQuotaSnapshot(SpaceQuotaStatus.toStatus(proto.getQuotaStatus()), proto.getQuotaUsage(), proto.getQuotaLimit());} | 3.26 |
hbase_SpaceQuotaSnapshot_getUsage_rdh | /**
* Returns the current usage, in bytes, of the target (e.g. table, namespace).
*/
@Override
public long getUsage() {
return usage; } | 3.26 |
hbase_SpaceQuotaSnapshot_getNoSuchSnapshot_rdh | /**
* Returns a singleton that corresponds to no snapshot information.
*/
public static SpaceQuotaSnapshot getNoSuchSnapshot() {
return NO_SUCH_SNAPSHOT;
} | 3.26 |
hbase_SpaceQuotaSnapshot_notInViolation_rdh | /**
* Returns a singleton referring to a quota which is not in violation.
*/
public static SpaceQuotaStatus notInViolation() {
return f0;} | 3.26 |
hbase_SpaceQuotaSnapshot_getLimit_rdh | /**
* Returns the limit, in bytes, of the target (e.g. table, namespace).
*/
@Override
public long getLimit() {
return limit;
} | 3.26 |
hbase_SpaceQuotaSnapshot_getPolicy_rdh | /**
* Returns the violation policy, which may be null. It is guaranteed to be non-null if
* {@link #isInViolation()} is {@code true}, but may be null otherwise.
*/
@Override
public Optional<SpaceViolationPolicy> getPolicy() {return policy;
} | 3.26 |
hbase_SpaceQuotaSnapshot_isInViolation_rdh | /**
* Returns {@code true} if the quota is being violated, {@code false} otherwise.
*/
@Override
public boolean isInViolation() {
return inViolation;
} | 3.26 |
hbase_SpaceQuotaSnapshot_getQuotaStatus_rdh | /**
* Returns the status of the quota.
*/
@Override
public SpaceQuotaStatus getQuotaStatus() {
return quotaStatus;
} | 3.26 |
hbase_StripeStoreFileManager_processResults_rdh | /**
* Process new files, and add them either to the structure of existing stripes, or to the list
* of new candidate stripes.
*
* @return New candidate stripes.
*/
private TreeMap<byte[], HStoreFile> processResults() {
TreeMap<byte[], HStoreFile> v82 = null;
for (HStoreFile sf :
this.results) {
byte[]
startRow = startOf(sf);
byte[] endRow = endOf(sf);
if (isInvalid(endRow) || isInvalid(startRow)) {
if (!isFlush) {
LOG.warn("The newly compacted file doesn't have stripes set: " + sf.getPath());
}
insertFileIntoStripe(getLevel0Copy(), sf);
this.l0Results.add(sf);
continue;
}
if (!this.stripeFiles.isEmpty()) {
int stripeIndex = findStripeIndexByEndRow(endRow);
if ((stripeIndex >= 0) && rowEquals(getStartRow(stripeIndex), startRow)) {
// Simple/common case - add file to an existing stripe.
insertFileIntoStripe(getStripeCopy(stripeIndex), sf);
continue;
}
}
// Make a new candidate stripe.
if (v82 == null) {
v82 = new TreeMap<>(MAP_COMPARATOR);
}
HStoreFile oldSf = v82.put(endRow, sf);
if (oldSf != null) {
throw new IllegalStateException((((("Compactor has produced multiple files for the stripe ending in [" + Bytes.toString(endRow)) + "], found ") + sf.getPath()) + " and ") + oldSf.getPath());
}
}
return v82;
} | 3.26 |
hbase_StripeStoreFileManager_findStripeForRow_rdh | /**
* Finds the stripe index for the stripe containing a row provided externally for get/scan.
*/
private final int findStripeForRow(byte[] row, boolean isStart) {
if (isStart && Arrays.equals(row, HConstants.EMPTY_START_ROW))
return 0;
if ((!isStart) && Arrays.equals(row, HConstants.EMPTY_END_ROW)) {
return state.stripeFiles.size() - 1;
}
// If there's an exact match below, a stripe ends at "row". Stripe right boundary is
// exclusive, so that means the row is in the next stripe; thus, we need to add one to index.
// If there's no match, the return value of binarySearch is (-(insertion point) - 1), where
// insertion point is the index of the next greater element, or list size if none. The
// insertion point happens to be exactly what we need, so we need to add one to the result.
return Math.abs(Arrays.binarySearch(state.f0, row, Bytes.BYTES_COMPARATOR) + 1);
} | 3.26 |
hbase_StripeStoreFileManager_getLevel0Copy_rdh | /**
* Returns A lazy L0 copy from current state.
*/
private final ArrayList<HStoreFile> getLevel0Copy() {
if (this.level0Files == null) {
this.level0Files = new ArrayList<>(StripeStoreFileManager.this.state.level0Files);
}
return this.level0Files;
} | 3.26 |
hbase_StripeStoreFileManager_insertFileIntoStripe_rdh | /**
* Inserts a file in the correct place (by seqnum) in a stripe copy.
*
* @param stripe
* Stripe copy to insert into.
* @param sf
* File to insert.
*/
private static void insertFileIntoStripe(ArrayList<HStoreFile> stripe, HStoreFile sf) {
// The only operation for which sorting of the files matters is KeyBefore. Therefore,
// we will store the file in reverse order by seqNum from the outset.
for (int insertBefore = 0; ; ++insertBefore) {
if ((insertBefore == stripe.size()) || (StoreFileComparators.SEQ_ID.compare(sf, stripe.get(insertBefore)) >= 0)) {
stripe.add(insertBefore, sf);
break;
}
}
} | 3.26 |
hbase_StripeStoreFileManager_m1_rdh | // Mark the files as compactedAway once the storefiles and compactedfiles list is finalised
// Let a background thread close the actual reader on these compacted files and also
// ensure to evict the blocks from block cache so that they are no longer in
// cache
private void m1(Collection<HStoreFile> compactedFiles) {
for (HStoreFile file : compactedFiles) {
file.markCompactedAway();
}
} | 3.26 |
hbase_StripeStoreFileManager_removeCompactedFiles_rdh | /**
* Remove compacted files.
*/
private void removeCompactedFiles() {
for (HStoreFile oldFile : this.compactedFiles) { byte[] oldEndRow = endOf(oldFile);
List<HStoreFile> source = null;
if (isInvalid(oldEndRow)) {
source = getLevel0Copy();
} else {
int stripeIndex = findStripeIndexByEndRow(oldEndRow);
if (stripeIndex < 0) {
throw new IllegalStateException((((("An allegedly compacted file [" + oldFile) + "] does not belong") + " to a known stripe (end row - [") + Bytes.toString(oldEndRow)) + "])");
}
source
= getStripeCopy(stripeIndex);
}
if (!source.remove(oldFile))
{
LOG.warn("An allegedly compacted file [{}] was not found", oldFile);
}
}
} | 3.26 |
hbase_StripeStoreFileManager_getSplitPoint_rdh | /**
* Override of getSplitPoint that determines the split point as the boundary between two stripes,
* unless it causes significant imbalance between split sides' sizes. In that case, the split
* boundary will be chosen from the middle of one of the stripes to minimize imbalance.
*
* @return The split point, or null if no split is possible.
*/
@Override
public Optional<byte[]> getSplitPoint()
throws IOException {
if (this.getStorefileCount() == 0) {
return Optional.empty();
}
if (state.stripeFiles.size() <= 1) {return getSplitPointFromAllFiles();
}
int leftIndex = -1;
int rightIndex = state.stripeFiles.size();
long leftSize = 0;long rightSize = 0;
long lastLeftSize = 0;
long lastRightSize = 0;
while ((rightIndex - 1) != leftIndex) {
if (leftSize >= rightSize) {
--rightIndex;
lastRightSize = getStripeFilesSize(rightIndex);
rightSize += lastRightSize;
} else {
++leftIndex;
lastLeftSize = getStripeFilesSize(leftIndex);
leftSize
+= lastLeftSize;
}
}
if ((leftSize == 0) || (rightSize == 0)) {
String errMsg = String.format("Cannot split on a boundary - left index %d size %d, " + "right index %d size %d", leftIndex, leftSize, rightIndex, rightSize);
debugDumpState(errMsg);
LOG.warn(errMsg);
return getSplitPointFromAllFiles();
}
double ratio = ((double) (rightSize)) / leftSize;
if (ratio < 1) {
ratio = 1 / ratio;
}
if (config.getMaxSplitImbalance() > ratio) {
return Optional.of(state.f0[leftIndex]);
}
// If the difference between the sides is too large, we could get the proportional key on
// the a stripe to equalize the difference, but there's no proportional key method at the
// moment, and it's not extremely important.
// See if we can achieve better ratio if we split the bigger side in half.
boolean isRightLarger = rightSize >= leftSize;double newRatio = (isRightLarger) ? getMidStripeSplitRatio(leftSize, rightSize, lastRightSize) : getMidStripeSplitRatio(rightSize, leftSize, lastLeftSize);
if (newRatio < 1) {
newRatio = 1 / newRatio;
}
if (newRatio >= ratio) {
return Optional.of(state.f0[leftIndex]);
}
LOG.debug((((("Splitting the stripe - ratio w/o split " + ratio) + ", ratio with split ") + newRatio) + " configured ratio ") + config.getMaxSplitImbalance());
// OK, we may get better ratio, get it.
return StoreUtils.getSplitPoint(state.stripeFiles.get(isRightLarger ? rightIndex : leftIndex), cellComparator);
} | 3.26 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.