name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hbase_AggregateImplementation_getStd_rdh | /**
* Gives a Pair with first object a List containing Sum and sum of squares, and the second object
* as row count. It is computed for a given combination of column qualifier and column family in
* the given row range as defined in the Scan object. In its current implementation, it takes one
* column family and one column qualifier (if provided). The idea is get the value of variance
* first: the average of the squares less the square of the average a standard deviation is square
* root of variance.
*/
@Override
public void getStd(RpcController controller, AggregateRequest request, RpcCallback<AggregateResponse> done) { InternalScanner scanner = null;
AggregateResponse response = null;
try {
ColumnInterpreter<T, S, P, Q, R> ci = constructColumnInterpreterFromRequest(request);
S v71
= null;
S sumSqVal = null;
S tempVal = null;
long rowCountVal = 0L;
Scan v75 = ProtobufUtil.toScan(request.getScan());
scanner = env.getRegion().getScanner(v75);
byte[] colFamily = v75.getFamilies()[0];
NavigableSet<byte[]> qualifiers = v75.getFamilyMap().get(colFamily);
byte[] qualifier = null;
if ((qualifiers != null) && (!qualifiers.isEmpty())) {
qualifier = qualifiers.pollFirst();
}List<Cell> results = new ArrayList<>();
boolean hasMoreRows = false;
do {
tempVal = null;
hasMoreRows = scanner.next(results);
int listSize = results.size();
for (int i = 0; i < listSize; i++) {
tempVal = ci.add(tempVal, ci.castToReturnType(ci.getValue(colFamily, qualifier, results.get(i))));
}
results.clear();
v71 = ci.add(v71, tempVal);
sumSqVal = ci.add(sumSqVal, ci.multiply(tempVal, tempVal));
rowCountVal++;
} while (hasMoreRows );
if (v71 != null) {
ByteString first_sumVal = ci.getProtoForPromotedType(v71).toByteString();
ByteString first_sumSqVal = ci.getProtoForPromotedType(sumSqVal).toByteString();
AggregateResponse.Builder pair =
AggregateResponse.newBuilder();
pair.addFirstPart(first_sumVal);
pair.addFirstPart(first_sumSqVal);
ByteBuffer v86 = ByteBuffer.allocate(8).putLong(rowCountVal);
v86.rewind();
pair.setSecondPart(ByteString.copyFrom(v86));
response = pair.build();
}
} catch (IOException e) {CoprocessorRpcUtils.setControllerException(controller, e);
} finally {
if (scanner != null) {
IOUtils.closeQuietly(scanner);
}
}
done.run(response);
} | 3.26 |
hbase_AggregateImplementation_start_rdh | /**
* Stores a reference to the coprocessor environment provided by the
* {@link org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost} from the region where this
* coprocessor is loaded. Since this is a coprocessor endpoint, it always expects to be loaded on
* a table region, so always expects this to be an instance of
* {@link RegionCoprocessorEnvironment}.
*
* @param env
* the environment provided by the coprocessor host
* @throws IOException
* if the provided environment is not an instance of
* {@code RegionCoprocessorEnvironment}
*/
@Override
public void start(CoprocessorEnvironment env) throws IOException {
if (env instanceof RegionCoprocessorEnvironment) {
this.env = ((RegionCoprocessorEnvironment) (env));
} else {
throw new CoprocessorException("Must be loaded on a table region!");
}
} | 3.26 |
hbase_AggregateImplementation_getAvg_rdh | /**
* Gives a Pair with first object as Sum and second object as row count, computed for a given
* combination of column qualifier and column family in the given row range as defined in the Scan
* object. In its current implementation, it takes one column family and one column qualifier (if
* provided). In case of null column qualifier, an aggregate sum over all the entire column family
* will be returned.
* <p>
* The average is computed in AggregationClient#avg(byte[], ColumnInterpreter, Scan) by processing
* results from all regions, so its "ok" to pass sum and a Long type.
*/
@Overridepublic void getAvg(RpcController controller, AggregateRequest request, RpcCallback<AggregateResponse> done) {
AggregateResponse response = null;
InternalScanner scanner = null;
try {
ColumnInterpreter<T, S, P, Q, R> ci = constructColumnInterpreterFromRequest(request);
S sumVal = null;
Long rowCountVal = 0L;
Scan scan = ProtobufUtil.toScan(request.getScan());
scanner = env.getRegion().getScanner(scan);
byte[] colFamily = scan.getFamilies()[0];NavigableSet<byte[]> qualifiers = scan.getFamilyMap().get(colFamily);
byte[] qualifier = null;
if ((qualifiers != null) && (!qualifiers.isEmpty())) {
qualifier = qualifiers.pollFirst();
}
List<Cell> v61 = new ArrayList<>();boolean hasMoreRows = false;
do {v61.clear();
hasMoreRows = scanner.next(v61);
int listSize = v61.size();
for (int
i =
0; i < listSize; i++) {
sumVal = ci.add(sumVal, ci.castToReturnType(ci.getValue(colFamily, qualifier, v61.get(i))));
}
rowCountVal++;
} while (hasMoreRows );
if (sumVal != null) {
ByteString first = ci.getProtoForPromotedType(sumVal).toByteString();
AggregateResponse.Builder pair = AggregateResponse.newBuilder();
pair.addFirstPart(first);
ByteBuffer bb = ByteBuffer.allocate(8).putLong(rowCountVal);
bb.rewind();
pair.setSecondPart(ByteString.copyFrom(bb));
response = pair.build();
}
} catch (IOException e) {
CoprocessorRpcUtils.setControllerException(controller,
e);
} finally {if (scanner != null) {
IOUtils.closeQuietly(scanner);
}
}
done.run(response);
} | 3.26 |
hbase_AggregateImplementation_getSum_rdh | /**
* Gives the sum for a given combination of column qualifier and column family, in the given row
* range as defined in the Scan object. In its current implementation, it takes one column family
* and one column qualifier (if provided). In case of null column qualifier, sum for the entire
* column family will be returned.
*/
@Override
public void getSum(RpcController controller, AggregateRequest request, RpcCallback<AggregateResponse> done) {
AggregateResponse response = null;
InternalScanner scanner = null;
long sum = 0L;
try {
ColumnInterpreter<T, S, P, Q, R> ci = constructColumnInterpreterFromRequest(request);
S sumVal = null;
T temp;
Scan scan = ProtobufUtil.toScan(request.getScan());
scanner = env.getRegion().getScanner(scan);
byte[] colFamily = scan.getFamilies()[0];
NavigableSet<byte[]> qualifiers = scan.getFamilyMap().get(colFamily);
byte[] qualifier = null;
if ((qualifiers != null) && (!qualifiers.isEmpty())) {
qualifier = qualifiers.pollFirst();
}
List<Cell> results = new ArrayList<>();
boolean hasMoreRows = false;
do {
hasMoreRows = scanner.next(results);
int listSize = results.size();
for (int i = 0; i < listSize; i++) {
temp = ci.getValue(colFamily, qualifier, results.get(i));
if (temp != null) {
sumVal =
ci.add(sumVal, ci.castToReturnType(temp));
}
}
results.clear();
} while (hasMoreRows );if (sumVal != null) {
response = AggregateResponse.newBuilder().addFirstPart(ci.getProtoForPromotedType(sumVal).toByteString()).build();
}
} catch (IOException e) {
CoprocessorRpcUtils.setControllerException(controller, e);
} finally {
if (scanner != null) {
IOUtils.closeQuietly(scanner);
}
}
log.debug((("Sum from this region is " + env.getRegion().getRegionInfo().getRegionNameAsString()) + ": ") + sum);
done.run(response);
} | 3.26 |
hbase_AggregateImplementation_constructColumnInterpreterFromRequest_rdh | // Used server-side too by Aggregation Coprocesor Endpoint. Undo this interdependence. TODO.
@SuppressWarnings("unchecked")
ColumnInterpreter<T, S, P, Q, R> constructColumnInterpreterFromRequest(AggregateRequest request) throws IOException {
String className = request.getInterpreterClassName();
try {
ColumnInterpreter<T, S,
P, Q, R> ci;
Class<?> cls = Class.forName(className);
ci = ((ColumnInterpreter<T, S, P, Q, R>)
(cls.getDeclaredConstructor().newInstance()));
if (request.hasInterpreterSpecificBytes()) {
ByteString b
= request.getInterpreterSpecificBytes();
P initMsg = getParsedGenericInstance(ci.getClass(), 2, b);
ci.initialize(initMsg);
}
return ci;
} catch (ClassNotFoundException | InstantiationException | IllegalAccessException | NoSuchMethodException | InvocationTargetException e) {
throw new IOException(e);}
} | 3.26 |
hbase_AggregateImplementation_getMedian_rdh | /**
* Gives a List containing sum of values and sum of weights. It is computed for the combination of
* column family and column qualifier(s) in the given row range as defined in the Scan object. In
* its current implementation, it takes one column family and two column qualifiers. The first
* qualifier is for values column and the second qualifier (optional) is for weight column.
*/
@Override
public void getMedian(RpcController controller, AggregateRequest request, RpcCallback<AggregateResponse> done) {
AggregateResponse
response = null;
InternalScanner scanner = null;
try {
ColumnInterpreter<T, S, P, Q, R> ci = constructColumnInterpreterFromRequest(request);
S sumVal = null;
S sumWeights = null;
S tempVal = null;
S tempWeight = null;
Scan scan = ProtobufUtil.toScan(request.getScan());
scanner = env.getRegion().getScanner(scan);
byte[] v95 = scan.getFamilies()[0];
NavigableSet<byte[]> qualifiers = scan.getFamilyMap().get(v95);
byte[] valQualifier = null;
byte[] weightQualifier = null;
if ((qualifiers != null) && (!qualifiers.isEmpty())) {
valQualifier = qualifiers.pollFirst();
// if weighted median is requested, get qualifier for the weight column
weightQualifier = qualifiers.pollLast();
}
List<Cell> results = new ArrayList<>();
boolean hasMoreRows = false;
do {
tempVal = null;
tempWeight = null;hasMoreRows = scanner.next(results);
int listSize = results.size();
for (int i = 0; i < listSize; i++) {
Cell kv = results.get(i);
tempVal =
ci.add(tempVal, ci.castToReturnType(ci.getValue(v95, valQualifier, kv)));
if (weightQualifier != null) {
tempWeight = ci.add(tempWeight, ci.castToReturnType(ci.getValue(v95, weightQualifier, kv)));
}
}
results.clear();
sumVal = ci.add(sumVal, tempVal);
sumWeights = ci.add(sumWeights, tempWeight);
} while (hasMoreRows );
ByteString v104 = ci.getProtoForPromotedType(sumVal).toByteString(); S s = (sumWeights == null) ? ci.castToReturnType(ci.getMinValue()) : sumWeights;
ByteString first_sumWeights = ci.getProtoForPromotedType(s).toByteString();
AggregateResponse.Builder pair = AggregateResponse.newBuilder();
pair.addFirstPart(v104);
pair.addFirstPart(first_sumWeights);
response = pair.build();} catch (IOException e) {
CoprocessorRpcUtils.setControllerException(controller, e);
} finally {if (scanner != null) {
IOUtils.closeQuietly(scanner);
}
}
done.run(response);
} | 3.26 |
hbase_AggregateImplementation_getRowNum_rdh | /**
* Gives the row count for the given column family and column qualifier, in the given row range as
* defined in the Scan object.
*/
@Override
public void getRowNum(RpcController controller, AggregateRequest request, RpcCallback<AggregateResponse> done) {AggregateResponse response = null;
long v42 = 0L;List<Cell> results = new ArrayList<>();
InternalScanner scanner = null;
try {
Scan scan = ProtobufUtil.toScan(request.getScan());
byte[][] colFamilies = scan.getFamilies();
byte[] colFamily = (colFamilies != null) ? colFamilies[0] : null;
NavigableSet<byte[]> qualifiers = (colFamilies != null)
? scan.getFamilyMap().get(colFamily) : null;byte[] qualifier = null;
if ((qualifiers != null) && (!qualifiers.isEmpty())) {
qualifier = qualifiers.pollFirst();
}
if ((scan.getFilter() == null) && (qualifier == null)) {
scan.setFilter(new FirstKeyOnlyFilter());
}
scanner = env.getRegion().getScanner(scan);
boolean hasMoreRows = false;
do {
hasMoreRows = scanner.next(results);
if (results.size() > 0) {
v42++;
}
results.clear();
} while (hasMoreRows );
ByteBuffer bb = ByteBuffer.allocate(8).putLong(v42);
bb.rewind();
response = AggregateResponse.newBuilder().addFirstPart(ByteString.copyFrom(bb)).build();
} catch (IOException
e) {
CoprocessorRpcUtils.setControllerException(controller, e);
} finally {
if (scanner != null) {
IOUtils.closeQuietly(scanner);
}
}
log.info((("Row counter from this region is " + env.getRegion().getRegionInfo().getRegionNameAsString()) + ": ") + v42);
done.run(response);
} | 3.26 |
hbase_AggregateImplementation_m0_rdh | /**
* Gives the maximum for a given combination of column qualifier and column family, in the given
* row range as defined in the Scan object. In its current implementation, it takes one column
* family and one column qualifier (if provided). In case of null column qualifier, maximum value
* for the entire column family will be returned.
*/
@Override
public void m0(RpcController controller, AggregateRequest request, RpcCallback<AggregateResponse> done) {
InternalScanner scanner = null;
AggregateResponse response = null;
T max = null;
try {
ColumnInterpreter<T, S, P, Q, R> ci = constructColumnInterpreterFromRequest(request);
T temp;
Scan scan = ProtobufUtil.toScan(request.getScan());
scanner = env.getRegion().getScanner(scan);
List<Cell> results = new ArrayList<>();
byte[] colFamily = scan.getFamilies()[0];
NavigableSet<byte[]> qualifiers = scan.getFamilyMap().get(colFamily);byte[] qualifier = null;
if ((qualifiers != null) && (!qualifiers.isEmpty())) {
qualifier = qualifiers.pollFirst();}
// qualifier can be null.
boolean hasMoreRows = false;do {
hasMoreRows = scanner.next(results);
int listSize = results.size();
for (int i = 0; i < listSize; i++) {
temp = ci.getValue(colFamily, qualifier, results.get(i));
max = ((max == null) || ((temp != null) && (ci.compare(temp, max) > 0))) ? temp : max;
}
results.clear();
} while (hasMoreRows );
if (max != null) { AggregateResponse.Builder builder
= AggregateResponse.newBuilder();
builder.addFirstPart(ci.getProtoForCellType(max).toByteString());
response = builder.build();
}
} catch (IOException e) {
CoprocessorRpcUtils.setControllerException(controller, e);
} finally {if (scanner != null) {
IOUtils.closeQuietly(scanner);
}
}
log.info((("Maximum from this region is " + env.getRegion().getRegionInfo().getRegionNameAsString()) + ": ") + max);
done.run(response);
} | 3.26 |
hbase_UserPermission_getAccessScope_rdh | /**
* Get this permission access scope.
*
* @return access scope
*/
public Scope getAccessScope() {
return permission.getAccessScope();
} | 3.26 |
hbase_MetricsSnapshot_addSnapshot_rdh | /**
* Record a single instance of a snapshot
*
* @param time
* time that the snapshot took
*/
public void addSnapshot(long time) {
source.updateSnapshotTime(time);
} | 3.26 |
hbase_MetricsSnapshot_addSnapshotRestore_rdh | /**
* Record a single instance of a snapshot
*
* @param time
* time that the snapshot restore took
*/
public void addSnapshotRestore(long time) {
source.updateSnapshotRestoreTime(time);
} | 3.26 |
hbase_MetricsSnapshot_addSnapshotClone_rdh | /**
* Record a single instance of a snapshot cloned table
*
* @param time
* time that the snapshot clone took
*/
public void addSnapshotClone(long time)
{
source.updateSnapshotCloneTime(time);
} | 3.26 |
hbase_ChecksumType_nameToType_rdh | /**
* Map a checksum name to a specific type. Do our own names.
*
* @return Type associated with passed code.
*/
public static ChecksumType nameToType(final String name) {
for (ChecksumType t : ChecksumType.values()) {
if (t.getName().equals(name)) {
return t;
}
}
throw new RuntimeException("Unknown checksum type name " + name);
} | 3.26 |
hbase_ChecksumType_codeToType_rdh | /**
* Cannot rely on enum ordinals . They change if item is removed or moved. Do our own codes.
*
* @return Type associated with passed code.
*/
public static ChecksumType codeToType(final byte b) {
for (ChecksumType t : ChecksumType.values()) {
if
(t.getCode() == b) {
return t;
}
}
throw new RuntimeException("Unknown checksum type code " + b);} | 3.26 |
hbase_EncryptionUtil_createEncryptionContext_rdh | /**
* Helper to create an encyption context.
*
* @param conf
* The current configuration.
* @param family
* The current column descriptor.
* @return The created encryption context.
* @throws IOException
* if an encryption key for the column cannot be unwrapped
* @throws IllegalStateException
* in case of encryption related configuration errors
*/
public static Context createEncryptionContext(Configuration conf, ColumnFamilyDescriptor family) throws IOException {
Encryption.Context cryptoContext = Context.NONE;
String cipherName = family.getEncryptionType();
if (cipherName != null) {
if (!Encryption.isEncryptionEnabled(conf)) {
throw new IllegalStateException(((("Encryption for family '" + family.getNameAsString()) + "' configured with type '") + cipherName) + "' but the encryption feature is disabled");
}
Cipher v20;
Key key;
byte[] v22 = family.getEncryptionKey();
if (v22 != null) {
// Family provides specific key material
key = unwrapKey(conf, v22);
// Use the algorithm the key wants
v20 = Encryption.getCipher(conf, key.getAlgorithm());
if (v20 == null) {
throw new IllegalStateException(("Cipher '" + key.getAlgorithm()) + "' is not available");
}
// Fail if misconfigured
// We use the encryption type specified in the column schema as a sanity check on
// what the wrapped key is telling us
if (!v20.getName().equalsIgnoreCase(cipherName)) {
throw new IllegalStateException(((((("Encryption for family '" + family.getNameAsString())
+ "' configured with type '") +
cipherName) + "' but key specifies algorithm '") + v20.getName()) + "'");
}} else {
// Family does not provide key material, create a random key
v20 = Encryption.getCipher(conf, cipherName);
if (v20 == null) {
throw new IllegalStateException(("Cipher '" + cipherName) + "' is not available");
}
key = v20.getRandomKey();
}
cryptoContext = Encryption.newContext(conf);
cryptoContext.setCipher(v20);
cryptoContext.setKey(key);
}
return cryptoContext;
} | 3.26 |
hbase_EncryptionUtil_wrapKey_rdh | /**
* Protect a key by encrypting it with the secret key of the given subject. The configuration must
* be set up correctly for key alias resolution.
*
* @param conf
* configuration
* @param subject
* subject key alias
* @param key
* the key
* @return the encrypted key bytes
*/
public static byte[] wrapKey(Configuration conf, String subject, Key key) throws IOException {
// Wrap the key with the configured encryption algorithm.
String algorithm
= conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES);
Cipher cipher = Encryption.getCipher(conf, algorithm);
if (cipher == null) {
throw new RuntimeException(("Cipher '" + algorithm) + "' not available");
}
EncryptionProtos.WrappedKey.Builder builder = EncryptionProtos.WrappedKey.newBuilder();
builder.setAlgorithm(key.getAlgorithm());
byte[] iv = null;
if (cipher.getIvLength() > 0) {
iv = new byte[cipher.getIvLength()];
Bytes.secureRandom(iv);
builder.setIv(UnsafeByteOperations.unsafeWrap(iv));
}
byte[] keyBytes = key.getEncoded();
builder.setLength(keyBytes.length);
builder.setHashAlgorithm(Encryption.getConfiguredHashAlgorithm(conf));builder.setHash(UnsafeByteOperations.unsafeWrap(Encryption.computeCryptoKeyHash(conf, keyBytes)));
ByteArrayOutputStream out = new ByteArrayOutputStream();
Encryption.encryptWithSubjectKey(out, new ByteArrayInputStream(keyBytes), subject, conf, cipher, iv);
builder.setData(UnsafeByteOperations.unsafeWrap(out.toByteArray()));// Build and return the protobuf message
out.reset();
builder.build().writeDelimitedTo(out);
return out.toByteArray();
} | 3.26 |
hbase_EncryptionUtil_unwrapKey_rdh | /**
* Helper for {@link #unwrapKey(Configuration, String, byte[])} which automatically uses the
* configured master and alternative keys, rather than having to specify a key type to unwrap
* with. The configuration must be set up correctly for key alias resolution.
*
* @param conf
* the current configuration
* @param keyBytes
* the key encrypted by master (or alternative) to unwrap
* @return the key bytes, decrypted
* @throws IOException
* if the key cannot be unwrapped
*/
public static Key unwrapKey(Configuration conf, byte[] keyBytes) throws IOException {
Key key;
String v24 = conf.get(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY, User.getCurrent().getShortName());
try {
// First try the master key
key = unwrapKey(conf, v24,
keyBytes);
} catch (KeyException e) {
// If the current master key fails to unwrap, try the alternate, if
// one is configured
if (LOG.isDebugEnabled()) {
LOG.debug(("Unable to unwrap key with current master key '" + v24) + "'");
}
String alternateKeyName = conf.get(HConstants.CRYPTO_MASTERKEY_ALTERNATE_NAME_CONF_KEY);
if (alternateKeyName !=
null) {
try {
key =
unwrapKey(conf, alternateKeyName, keyBytes);
} catch (KeyException ex) {
throw new IOException(ex);
}
} else {
throw new IOException(e);
}
}
return key;
} | 3.26 |
hbase_EncryptionUtil_unwrapWALKey_rdh | /**
* Unwrap a wal key by decrypting it with the secret key of the given subject. The configuration
* must be set up correctly for key alias resolution.
*
* @param conf
* configuration
* @param subject
* subject key alias
* @param value
* the encrypted key bytes
* @return the raw key bytes
* @throws IOException
* if key is not found for the subject, or if some I/O error occurs
* @throws KeyException
* if fail to unwrap the key
*/
public static Key unwrapWALKey(Configuration conf, String subject, byte[] value) throws IOException, KeyException {
EncryptionProtos.WrappedKey wrappedKey = WrappedKey.PARSER.parseDelimitedFrom(new ByteArrayInputStream(value));
String algorithm = conf.get(HConstants.CRYPTO_WAL_ALGORITHM_CONF_KEY,
HConstants.CIPHER_AES); Cipher cipher = Encryption.getCipher(conf, algorithm);
if (cipher == null) {
throw new RuntimeException(("Cipher '" + algorithm) + "' not available");
}
return getUnwrapKey(conf, subject, wrappedKey, cipher);
} | 3.26 |
hbase_ImmutableMemStoreLAB_forceCopyOfBigCellInto_rdh | /**
* The process of merging assumes all cells are allocated on mslab. There is a rare case in which
* the first immutable segment, participating in a merge, is a CSLM. Since the CSLM hasn't been
* flattened yet, and there is no point in flattening it (since it is going to be merged), its big
* cells (for whom size > maxAlloc) must be copied into mslab. This method copies the passed cell
* into the first mslab in the mslabs list, returning either a new cell instance over the copied
* data, or null when this cell cannt be copied.
*/
@Override
public Cell forceCopyOfBigCellInto(Cell cell) {
MemStoreLAB mslab = this.mslabs.get(0);
return mslab.forceCopyOfBigCellInto(cell);
} | 3.26 |
hbase_Delete_addFamilyVersion_rdh | /**
* Delete all columns of the specified family with a timestamp equal to the specified timestamp.
*
* @param family
* family name
* @param timestamp
* version timestamp
* @return this for invocation chaining
*/
public Delete addFamilyVersion(final byte[] family, final long timestamp) {
if (timestamp < 0) {
throw new IllegalArgumentException("Timestamp cannot be negative. ts=" + timestamp);
}
List<Cell> list = getCellList(family);
list.add(new KeyValue(row,
family, null, timestamp, Type.DeleteFamilyVersion));
return this;
} | 3.26 |
hbase_Delete_addFamily_rdh | /**
* Delete all columns of the specified family with a timestamp less than or equal to the specified
* timestamp.
* <p>
* Overrides previous calls to deleteColumn and deleteColumns for the specified family.
*
* @param family
* family name
* @param timestamp
* maximum version timestamp
* @return this for invocation chaining
*/
public Delete addFamily(final byte[]
family, final long timestamp) {
if (timestamp < 0) {
throw new IllegalArgumentException("Timestamp cannot be negative. ts=" + timestamp);
}
List<Cell> list = getCellList(family);
if (!list.isEmpty()) {
list.clear();
}
KeyValue kv = new KeyValue(row, family, null, timestamp, Type.DeleteFamily);
list.add(kv);
return this;
} | 3.26 |
hbase_Delete_add_rdh | /**
* Add an existing delete marker to this Delete object.
*
* @param cell
* An existing cell of type "delete".
* @return this for invocation chaining
*/
@Override
public Delete add(Cell cell) throws IOException {
super.add(cell);return this;
} | 3.26 |
hbase_Delete_m0_rdh | /**
* Delete the latest version of the specified column. This is an expensive call in that on the
* server-side, it first does a get to find the latest versions timestamp. Then it adds a delete
* using the fetched cells timestamp.
*
* @param family
* family name
* @param qualifier
* column qualifier
* @return this for invocation chaining
*/
public Delete m0(final byte[] family, final byte[] qualifier) {
this.addColumn(family, qualifier, this.ts);
return this;
} | 3.26 |
hbase_Delete_addColumns_rdh | /**
* Delete all versions of the specified column with a timestamp less than or equal to the
* specified timestamp.
*
* @param family
* family name
* @param qualifier
* column qualifier
* @param timestamp
* maximum version timestamp
* @return this for invocation chaining
*/
public Delete addColumns(final byte[] family, final byte[] qualifier, final long timestamp) {
if (timestamp < 0) {
throw new IllegalArgumentException("Timestamp cannot be negative. ts=" + timestamp);
}
List<Cell> list = getCellList(family);
list.add(new KeyValue(this.row, family, qualifier, timestamp, Type.DeleteColumn));
return this;
} | 3.26 |
hbase_Delete_addColumn_rdh | /**
* Delete the specified version of the specified column.
*
* @param family
* family name
* @param qualifier
* column qualifier
* @param timestamp
* version timestamp
* @return this for invocation chaining
*/
public Delete addColumn(byte[] family, byte[]
qualifier, long timestamp) {
if (timestamp < 0) {
throw new IllegalArgumentException("Timestamp cannot be negative. ts=" + timestamp);
}
List<Cell> list = getCellList(family);
KeyValue kv = new KeyValue(this.row, family, qualifier, timestamp, Type.Delete);
list.add(kv);
return this;
} | 3.26 |
hbase_SaslServerAuthenticationProvider_init_rdh | /**
* Encapsulates the server-side logic to authenticate a client over SASL. Tied one-to-one to a
* single client authentication implementation.
*/
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.AUTHENTICATION)
@InterfaceStability.Evolvingpublic interface SaslServerAuthenticationProvider extends SaslAuthenticationProvider {
/**
* Allows implementations to initialize themselves, prior to creating a server.
*/
default void init(Configuration conf) throws IOException {
} | 3.26 |
hbase_NettyFutureUtils_safeClose_rdh | /**
* Close the channel and eat the returned future by logging the error when the future is completed
* with error.
*/
public static void safeClose(ChannelOutboundInvoker channel)
{
consume(channel.close());
} | 3.26 |
hbase_NettyFutureUtils_addListener_rdh | /**
* This is method is used when you just want to add a listener to the given netty future. Ignoring
* the return value of a Future is considered as a bad practice as it may suppress exceptions
* thrown from the code that completes the future, and this method will catch all the exception
* thrown from the {@code listener} to catch possible code bugs.
* <p/>
* And the error phone check will always report FutureReturnValueIgnored because every method in
* the {@link Future} class will return a new {@link Future}, so you always have one future that
* has not been checked. So we introduce this method and add a suppress warnings annotation here.
*/@SuppressWarnings({ "FutureReturnValueIgnored", "rawtypes", "unchecked" })
public static <V> void addListener(Future<V> future, GenericFutureListener<? extends Future<? super V>> listener) {
future.addListener(f -> {
try {
// the ? operator in template makes it really hard to pass compile, so here we just cast the
// listener to raw type.
((GenericFutureListener) (listener)).operationComplete(f);
} catch (Throwable t) {
LOG.error("Unexpected error caught when processing netty", t);
}
});
} | 3.26 |
hbase_NettyFutureUtils_safeWriteAndFlush_rdh | /**
* Call writeAndFlush on the channel and eat the returned future by logging the error when the
* future is completed with error.
*/
public static void safeWriteAndFlush(ChannelOutboundInvoker channel, Object msg) {
consume(channel.writeAndFlush(msg));
} | 3.26 |
hbase_NettyFutureUtils_consume_rdh | /**
* Log the error if the future indicates any failure.
*/@SuppressWarnings("FutureReturnValueIgnored")
public static void consume(Future<?> future) {future.addListener(NettyFutureUtils::loggingWhenError);
} | 3.26 |
hbase_NettyFutureUtils_safeWrite_rdh | /**
* Call write on the channel and eat the returned future by logging the error when the future is
* completed with error.
*/
public static void safeWrite(ChannelOutboundInvoker channel, Object msg) {
consume(channel.write(msg));
} | 3.26 |
hbase_LruAdaptiveBlockCache_updateSizeMetrics_rdh | /**
* Helper function that updates the local size counter and also updates any per-cf or
* per-blocktype metrics it can discern from given {@link LruCachedBlock}
*/
private long updateSizeMetrics(LruCachedBlock cb, boolean evict) {
long heapsize = cb.heapSize(); BlockType bt = cb.getBuffer().getBlockType();
if (evict) {
heapsize *= -1;
}
if ((bt != null) && bt.isData()) {
dataBlockSize.add(heapsize);
}
return size.addAndGet(heapsize);} | 3.26 |
hbase_LruAdaptiveBlockCache_cacheBlock_rdh | /**
* Cache the block with the specified name and buffer.
* <p>
* TODO after HBASE-22005, we may cache an block which allocated from off-heap, but our LRU cache
* sizing is based on heap size, so we should handle this in HBASE-22127. It will introduce an
* switch whether make the LRU on-heap or not, if so we may need copy the memory to on-heap,
* otherwise the caching size is based on off-heap.
*
* @param cacheKey
* block's cache key
* @param buf
* block buffer
*/
@Override
public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf) {
cacheBlock(cacheKey, buf, false);
} | 3.26 |
hbase_LruAdaptiveBlockCache_getBlock_rdh | /**
* Get the buffer of the block with the specified name.
*
* @param cacheKey
* block's cache key
* @param caching
* true if the caller caches blocks on cache misses
* @param repeat
* Whether this is a repeat lookup for the same block (used to avoid
* double counting cache misses when doing double-check locking)
* @param updateCacheMetrics
* Whether to update cache metrics or not
* @return buffer of specified cache key, or null if not in cache
*/
@Override
public Cacheable getBlock(BlockCacheKey cacheKey, boolean caching, boolean repeat, boolean updateCacheMetrics) {
LruCachedBlock cb = map.computeIfPresent(cacheKey, (key, val) -> {
// It will be referenced by RPC path, so increase here. NOTICE: Must do the retain inside
// this block. because if retain outside the map#computeIfPresent, the evictBlock may remove
// the block and release, then we're retaining a block with refCnt=0 which is disallowed.
// see HBASE-22422.
val.getBuffer().retain();
return val;
});
if (cb == null) {
if ((!repeat) && updateCacheMetrics) {
stats.miss(caching, cacheKey.isPrimary(), cacheKey.getBlockType());
}
// If there is another block cache then try and read there.
// However if this is a retry ( second time in double checked locking )
// And it's already a miss then the l2 will also be a miss.
if ((victimHandler != null) && (!repeat)) {
// The handler will increase result's refCnt for RPC, so need no extra retain.
Cacheable result = victimHandler.getBlock(cacheKey, caching, repeat, updateCacheMetrics);
// Promote this to L1.
if (result != null) {
if (caching) {
/* inMemory = */
cacheBlock(cacheKey, result, false);
}
}
return result;
}
return null;
}
if (updateCacheMetrics) {
stats.hit(caching, cacheKey.isPrimary(), cacheKey.getBlockType());
}
cb.access(count.incrementAndGet());
return cb.getBuffer();
} | 3.26 |
hbase_LruAdaptiveBlockCache_clearCache_rdh | /**
* Clears the cache. Used in tests.
*/
public void clearCache() {
this.map.clear();
this.elements.set(0);
} | 3.26 |
hbase_LruAdaptiveBlockCache_evictBlocksByHfileName_rdh | /**
* Evicts all blocks for a specific HFile. This is an expensive operation implemented as a
* linear-time search through all blocks in the cache. Ideally this should be a search in a
* log-access-time map.
* <p>
* This is used for evict-on-close to remove all blocks of a specific HFile.
*
* @return the number of blocks evicted
*/
@Override
public int evictBlocksByHfileName(String hfileName) {
int numEvicted = ((int) (map.keySet().stream().filter(key -> key.getHfileName().equals(hfileName)).filter(this::evictBlock).count()));
if (victimHandler != null) {
numEvicted += victimHandler.evictBlocksByHfileName(hfileName);
}
return numEvicted;
} | 3.26 |
hbase_LruAdaptiveBlockCache_containsBlock_rdh | /**
* Whether the cache contains block with specified cacheKey
*
* @return true if contains the block
*/
@Override
public boolean containsBlock(BlockCacheKey cacheKey) {
return map.containsKey(cacheKey);
} | 3.26 |
hbase_LruAdaptiveBlockCache_acceptableSize_rdh | // Simple calculators of sizes given factors and maxSize
long acceptableSize() {
return ((long) (Math.floor(this.maxSize * this.acceptableFactor)));
} | 3.26 |
hbase_LruAdaptiveBlockCache_assertCounterSanity_rdh | /**
* Sanity-checking for parity between actual block cache content and metrics. Intended only for
* use with TRACE level logging and -ea JVM.
*/
private static void assertCounterSanity(long mapSize, long counterVal) {
if (counterVal < 0) {
LOG.trace((("counterVal overflow. Assertions unreliable. counterVal=" + counterVal) + ", mapSize=") + mapSize);
return;
}
if (mapSize < Integer.MAX_VALUE) {
double pct_diff = Math.abs((((double) (counterVal)) / ((double) (mapSize))) - 1.0);
if (pct_diff >
0.05) {
LOG.trace((("delta between reported and actual size > 5%. counterVal=" + counterVal) + ", mapSize=") + mapSize);
}
}
} | 3.26 |
hbase_LruAdaptiveBlockCache_evictBlock_rdh | /**
* Evict the block, and it will be cached by the victim handler if exists && block may be
* read again later
*
* @param evictedByEvictionProcess
* true if the given block is evicted by EvictionThread
* @return the heap size of evicted block
*/
protected long evictBlock(LruCachedBlock
block, boolean evictedByEvictionProcess) {
LruCachedBlock previous = map.remove(block.getCacheKey());
if (previous == null) {
return 0;}
updateSizeMetrics(block, true);long val = elements.decrementAndGet();
if (LOG.isTraceEnabled()) {
long
size = map.size();
assertCounterSanity(size, val);
}
if (block.getBuffer().getBlockType().isData()) { dataBlockElements.decrement();
}
if (evictedByEvictionProcess) {
// When the eviction of the block happened because of invalidation of HFiles, no need to
// update the stats counter.
stats.evicted(block.getCachedTime(), block.getCacheKey().isPrimary());
if (victimHandler != null) {
victimHandler.cacheBlock(block.getCacheKey(), block.getBuffer());
}
}
// Decrease the block's reference count, and if refCount is 0, then it'll auto-deallocate. DO
// NOT move this up because if do that then the victimHandler may access the buffer with
// refCnt = 0 which is disallowed.
previous.getBuffer().release();
return block.heapSize();
} | 3.26 |
hbase_LruAdaptiveBlockCache_isEnteringRun_rdh | /**
* Used for the test.
*/
boolean isEnteringRun() {
return this.enteringRun;
} | 3.26 |
hbase_LruAdaptiveBlockCache_evict_rdh | /**
* Eviction method. Evict items in order of use, allowing delete items which haven't been used for
* the longest amount of time.
*
* @return how many bytes were freed
*/
long evict() {
// Ensure only one eviction at a time
if (!evictionLock.tryLock()) {return 0;
}
long bytesToFree = 0L;
try {
evictionInProgress = true;
long currentSize = this.size.get();
bytesToFree = currentSize - minSize();
if (LOG.isTraceEnabled()) {
LOG.trace((("Block cache LRU eviction started; Attempting to free " + StringUtils.byteDesc(bytesToFree)) + " of total=") + StringUtils.byteDesc(currentSize));
}
if (bytesToFree <= 0) {
return 0;
}
// Instantiate priority buckets
BlockBucket bucketSingle = new BlockBucket("single", bytesToFree, blockSize, singleSize());
BlockBucket bucketMulti = new BlockBucket("multi", bytesToFree, blockSize, multiSize());
BlockBucket bucketMemory
= new BlockBucket("memory", bytesToFree, blockSize, memorySize());
// Scan entire map putting into appropriate buckets
for (LruCachedBlock cachedBlock : map.values()) {
switch (cachedBlock.getPriority()) {
case SINGLE
:
{
bucketSingle.add(cachedBlock);
break;
}
case MULTI :
{
bucketMulti.add(cachedBlock);
break;
}
case MEMORY :
{
bucketMemory.add(cachedBlock);
break;
}
}
}
long bytesFreed = 0;
if (forceInMemory || (memoryFactor > 0.999F)) {
long s = bucketSingle.totalSize();
long m =
bucketMulti.totalSize();
if (bytesToFree > (s + m)) { // this means we need to evict blocks in memory bucket to make room,
// so the single and multi buckets will be emptied
bytesFreed = bucketSingle.free(s);
bytesFreed += bucketMulti.free(m);
if (LOG.isTraceEnabled()) {
LOG.trace(("freed " + StringUtils.byteDesc(bytesFreed)) + " from single and multi buckets");
}
bytesFreed += bucketMemory.free(bytesToFree - bytesFreed);
if (LOG.isTraceEnabled()) {
LOG.trace(("freed " + StringUtils.byteDesc(bytesFreed)) + " total from all three buckets ");
}
} else {
// this means no need to evict block in memory bucket,
// and we try best to make the ratio between single-bucket and
// multi-bucket is 1:2
long bytesRemain = (s + m) - bytesToFree;
if ((3 * s) <= bytesRemain) {
// single-bucket is small enough that no eviction happens for it
// hence all eviction goes from multi-bucket
bytesFreed = bucketMulti.free(bytesToFree);
} else if ((3 * m) <= (2 * bytesRemain)) {
// multi-bucket is small enough that no eviction happens for it
// hence all eviction goes from single-bucket
bytesFreed = bucketSingle.free(bytesToFree);
} else {
// both buckets need to evict some blocks
bytesFreed = bucketSingle.free(s - (bytesRemain / 3));
if (bytesFreed < bytesToFree) {
bytesFreed += bucketMulti.free(bytesToFree - bytesFreed);
}
}
}
} else {
PriorityQueue<BlockBucket> bucketQueue = new PriorityQueue<>(3);
bucketQueue.add(bucketSingle);
bucketQueue.add(bucketMulti);
bucketQueue.add(bucketMemory);
int remainingBuckets = bucketQueue.size();
BlockBucket bucket;
while ((bucket = bucketQueue.poll()) != null) {
long overflow = bucket.overflow();
if (overflow > 0)
{
long v32
= Math.min(overflow,
(bytesToFree - bytesFreed) / remainingBuckets);
bytesFreed += bucket.free(v32);
}
remainingBuckets--;
}
}
if (LOG.isTraceEnabled()) {
long single = bucketSingle.totalSize();
long v34 = bucketMulti.totalSize();
long memory = bucketMemory.totalSize();
LOG.trace(((((((((((((("Block cache LRU eviction completed; " + "freed=") + StringUtils.byteDesc(bytesFreed)) + ", ") + "total=") + StringUtils.byteDesc(this.size.get())) + ", ") + "single=") + StringUtils.byteDesc(single))
+ ", ") + "multi=") + StringUtils.byteDesc(v34)) + ", ")
+ "memory=") + StringUtils.byteDesc(memory));}
} finally {
stats.evict();
evictionInProgress = false;
evictionLock.unlock();}
return bytesToFree;
} | 3.26 |
hbase_LruAdaptiveBlockCache_asReferencedHeapBlock_rdh | /**
* The block cached in LruAdaptiveBlockCache will always be an heap block: on the one side, the
* heap access will be more faster then off-heap, the small index block or meta block cached in
* CombinedBlockCache will benefit a lot. on other side, the LruAdaptiveBlockCache size is always
* calculated based on the total heap size, if caching an off-heap block in LruAdaptiveBlockCache,
* the heap size will be messed up. Here we will clone the block into an heap block if it's an
* off-heap block, otherwise just use the original block. The key point is maintain the refCnt of
* the block (HBASE-22127): <br>
* 1. if cache the cloned heap block, its refCnt is an totally new one, it's easy to handle; <br>
* 2. if cache the original heap block, we're sure that it won't be tracked in ByteBuffAllocator's
* reservoir, if both RPC and LruAdaptiveBlockCache release the block, then it can be garbage
* collected by JVM, so need a retain here.
*
* @param buf
* the original block
* @return an block with an heap memory backend.
*/
private Cacheable asReferencedHeapBlock(Cacheable buf) {
if (buf instanceof HFileBlock) {
HFileBlock blk = ((HFileBlock) (buf));
if (blk.isSharedMem()) {return HFileBlock.deepCloneOnHeap(blk);
}
}
// The block will be referenced by this LruAdaptiveBlockCache,
// so should increase its refCnt here.
return buf.retain();
} | 3.26 |
hbase_LruAdaptiveBlockCache_getStats_rdh | /**
* Get counter statistics for this cache.
* <p>
* Includes: total accesses, hits, misses, evicted blocks, and runs of the eviction processes.
*/
@Override
public CacheStats
getStats() {
return this.stats;
} | 3.26 |
hbase_LruAdaptiveBlockCache_runEviction_rdh | /**
* Multi-threaded call to run the eviction process.
*/
private void runEviction() {
if (evictionThread == null) {
evict();
} else {
evictionThread.m0();
}} | 3.26 |
hbase_VersionResource_getVersionResource_rdh | /**
* Dispatch <tt>/version/rest</tt> to self.
*/
@Path("rest")
public VersionResource getVersionResource() {
return this;
} | 3.26 |
hbase_VersionResource_getClusterVersionResource_rdh | /**
* Dispatch to StorageClusterVersionResource
*/
@Path("cluster")
public StorageClusterVersionResource getClusterVersionResource() throws IOException
{
return new StorageClusterVersionResource();
} | 3.26 |
hbase_VersionResource_m0_rdh | /**
* Build a response for a version request.
*
* @param context
* servlet context
* @param uriInfo
* (JAX-RS context variable) request URL
* @return a response for a version request
*/
@GET
@Produces({ MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, MIMETYPE_PROTOBUF_IETF })
public Response m0(@Context
final ServletContext context, @Context
final UriInfo uriInfo) {
if (LOG.isTraceEnabled()) {
LOG.trace("GET " + uriInfo.getAbsolutePath());
}
servlet.getMetrics().incrementRequests(1);
ResponseBuilder response = Response.ok(new VersionModel(context));
response.cacheControl(f0);
servlet.getMetrics().incrementSucessfulGetRequests(1);
return response.build();
} | 3.26 |
hbase_MetricsIO_getInstance_rdh | /**
* Get a static instance for the MetricsIO so that accessors access the same instance. We want to
* lazy initialize so that correct process name is in place. See HBASE-27966 for more details.
*/
public static MetricsIO getInstance() {
if (instance == null) {
synchronized(MetricsIO.class) {
if (instance == null) {
instance = new MetricsIO(new MetricsIOWrapperImpl());
}
}
}
return instance;
} | 3.26 |
hbase_ServerName_m0_rdh | /**
* Return {@link #getServerName()} as bytes with a short-sized prefix with the {@link #VERSION} of
* this class.
*/
public synchronized byte[] m0() {
if (this.bytes == null) {
this.bytes = Bytes.add(VERSION_BYTES, Bytes.toBytes(getServerName()));
}
return this.bytes;
} | 3.26 |
hbase_ServerName_getHostNameMinusDomain_rdh | /**
*
* @param hostname
* the hostname string to get the actual hostname from
* @return hostname minus the domain, if there is one (will do pass-through on ip addresses)
*/
private static String getHostNameMinusDomain(final String hostname) {
if (InetAddresses.isInetAddress(hostname))
{
return hostname;}List<String> parts = Splitter.on('.').splitToList(hostname);
if (parts.size() == 0) {
return hostname;
}
Iterator<String>
i = parts.iterator();
return i.next();
} | 3.26 |
hbase_ServerName_isFullServerName_rdh | /**
* Returns true if the String follows the pattern of {@link #toString()}, false otherwise.
*/
public static boolean isFullServerName(final String str) {
if ((str == null) || str.isEmpty()) {
return false;
}
return SERVERNAME_PATTERN.matcher(str).matches();
} | 3.26 |
hbase_ServerName_parseServerName_rdh | /**
* Parse a ServerName from a string
*
* @param str
* Either an instance of {@link #toString()} or a "'<hostname>' ':'
* '<port>'".
* @return A ServerName instance.
*/
public static ServerName parseServerName(final String str) {
return SERVERNAME_PATTERN.matcher(str).matches() ? valueOf(str) : valueOf(str, NON_STARTCODE);
} | 3.26 |
hbase_ServerName_isSameAddress_rdh | /**
* Compare two addresses
*
* @param left
* the first server address to compare
* @param right
* the second server address to compare
* @return {@code true} if {@code left} and {@code right} have the same hostname and port.
*/
public static boolean isSameAddress(final ServerName left, final ServerName right) {
return left.getAddress().equals(right.getAddress());
} | 3.26 |
hbase_ServerName_getStartcode_rdh | /**
* Return the start code.
*
* @deprecated Since 2.5.0, will be removed in 4.0.0. Use {@link #getStartCode()} instead.
*/
@Deprecated
public long getStartcode() {
return startCode;
} | 3.26 |
hbase_ServerName_parseVersionedServerName_rdh | /**
* Use this method instantiating a {@link ServerName} from bytes gotten from a call to
* {@link #getVersionedBytes()}. Will take care of the case where bytes were written by an earlier
* version of hbase.
*
* @param versionedBytes
* Pass bytes gotten from a call to {@link #getVersionedBytes()}
* @return A ServerName instance.
* @see #getVersionedBytes()
*/
public static ServerName parseVersionedServerName(final byte[] versionedBytes) {
// Version is a short.
short version = Bytes.toShort(versionedBytes);
if (version == VERSION) {
int length = versionedBytes.length - Bytes.SIZEOF_SHORT;
return valueOf(Bytes.toString(versionedBytes, Bytes.SIZEOF_SHORT, length));}
// Presume the bytes were written with an old version of hbase and that the
// bytes are actually a String of the form "'<hostname>' ':' '<port>'".
return valueOf(Bytes.toString(versionedBytes), NON_STARTCODE);
} | 3.26 |
hbase_ServerName_getStartCode_rdh | /**
* Return the start code.
*/
public long getStartCode() {
return startCode;
} | 3.26 |
hbase_ServerName_valueOf_rdh | /**
* Retrieve an instance of {@link ServerName}. Callers should use the {@link #equals(Object)}
* method to compare returned instances, though we may return a shared immutable object as an
* internal optimization.
*
* @param address
* the {@link Address} to use for getting the {@link ServerName}
* @param startCode
* the startcode to use for getting the {@link ServerName}
* @return the constructed {@link ServerName}
* @see #valueOf(String, int, long)
*/
public static ServerName valueOf(final Address address, final long startCode) {
return valueOf(address.getHostname(), address.getPort(), startCode);
} | 3.26 |
hbase_ServerName_toShortString_rdh | /**
* Return a SHORT version of {@link #toString()}, one that has the host only, minus the domain,
* and the port only -- no start code; the String is for us internally mostly tying threads to
* their server. Not for external use. It is lossy and will not work in in compares, etc.
*/
public String toShortString() {
return Addressing.createHostAndPortStr(getHostNameMinusDomain(this.address.getHostname()), this.address.getPort());
} | 3.26 |
hbase_MultiTableInputFormatBase_createRecordReader_rdh | /**
* Builds a TableRecordReader. If no TableRecordReader was provided, uses the default.
*
* @param split
* The split to work with.
* @param context
* The current context.
* @return The newly created record reader.
* @throws IOException
* When creating the reader fails.
* @throws InterruptedException
* when record reader initialization fails
* @see InputFormat#createRecordReader(InputSplit, TaskAttemptContext)
*/
@Override
public RecordReader<ImmutableBytesWritable, Result> createRecordReader(InputSplit split, TaskAttemptContext context) throws IOException, InterruptedException {
TableSplit tSplit = ((TableSplit) (split));
LOG.info(MessageFormat.format("Input split length: {0} bytes.", tSplit.getLength()));
if (tSplit.getTable() == null) {
throw new IOException(("Cannot create a record reader because of a" + " previous error. Please look at the previous logs lines from") + " the task's full log for more details.");
}
final Connection connection = ConnectionFactory.createConnection(context.getConfiguration());
Table table = connection.getTable(tSplit.getTable());
if (this.tableRecordReader == null) {
this.tableRecordReader = new TableRecordReader();
}
final TableRecordReader trr = this.tableRecordReader;
try {
Scan sc = tSplit.getScan();
sc.withStartRow(tSplit.getStartRow());
sc.withStopRow(tSplit.getEndRow());
trr.setScan(sc);
trr.setTable(table);
return new RecordReader<ImmutableBytesWritable, Result>() {
@Override
public void m0()
throws IOException {
trr.close();
connection.close();
}
@Override
public
ImmutableBytesWritable getCurrentKey() throws IOException, InterruptedException {
return trr.getCurrentKey();
}
@Override
public Result getCurrentValue() throws IOException, InterruptedException {
return trr.getCurrentValue();
}
@Override
public float getProgress() throws IOException, InterruptedException {
return trr.getProgress();
}
@Override
public void initialize(InputSplit inputsplit, TaskAttemptContext context) throws IOException, InterruptedException {
trr.initialize(inputsplit, context);
}
@Override
public boolean nextKeyValue() throws IOException, InterruptedException {
return trr.nextKeyValue();
}
};
} catch (IOException ioe) {
// If there is an exception make sure that all
// resources are closed and released.
trr.close();
connection.close();
throw ioe;
}
} | 3.26 |
hbase_MultiTableInputFormatBase_getScans_rdh | /**
* Allows subclasses to get the list of {@link Scan} objects.
*/
protected List<Scan> getScans() {
return this.f0;
} | 3.26 |
hbase_MultiTableInputFormatBase_setTableRecordReader_rdh | /**
* Allows subclasses to set the {@link TableRecordReader}.
*
* @param tableRecordReader
* A different {@link TableRecordReader} implementation.
*/
protected void setTableRecordReader(TableRecordReader tableRecordReader) {
this.tableRecordReader = tableRecordReader;
} | 3.26 |
hbase_MultiTableInputFormatBase_includeRegionInSplit_rdh | /**
* Test if the given region is to be included in the InputSplit while splitting the regions of a
* table.
* <p>
* This optimization is effective when there is a specific reasoning to exclude an entire region
* from the M-R job, (and hence, not contributing to the InputSplit), given the start and end keys
* of the same. <br>
* Useful when we need to remember the last-processed top record and revisit the [last, current)
* interval for M-R processing, continuously. In addition to reducing InputSplits, reduces the
* load on the region server as well, due to the ordering of the keys. <br>
* <br>
* Note: It is possible that <code>endKey.length() == 0 </code> , for the last (recent) region.
* <br>
* Override this method, if you want to bulk exclude regions altogether from M-R. By default, no
* region is excluded( i.e. all regions are included).
*
* @param startKey
* Start key of the region
* @param endKey
* End key of the region
* @return true, if this region needs to be included as part of the input (default).
*/
protected boolean includeRegionInSplit(final byte[] startKey, final byte[] endKey) {
return true;} | 3.26 |
hbase_MultiTableInputFormatBase_getSplits_rdh | /**
* Calculates the splits that will serve as input for the map tasks. The number of splits matches
* the number of regions in a table.
*
* @param context
* The current job context.
* @return The list of input splits.
* @throws IOException
* When creating the list of splits fails.
* @see InputFormat#getSplits(org.apache.hadoop.mapreduce.JobContext)
*/
@Override
public List<InputSplit> getSplits(JobContext context)
throws IOException {
if (f0.isEmpty()) {
throw
new IOException("No scans were provided.");
}
Map<TableName, List<Scan>> tableMaps = new HashMap<>();
for (Scan scan : f0) {
byte[] tableNameBytes = scan.getAttribute(Scan.SCAN_ATTRIBUTES_TABLE_NAME);
if (tableNameBytes == null)
throw new IOException("A scan object did not have a table name");
TableName tableName = TableName.valueOf(tableNameBytes);
List<Scan> scanList = tableMaps.get(tableName);
if (scanList == null) {
scanList = new ArrayList<>();
tableMaps.put(tableName, scanList);
}
scanList.add(scan);
}
List<InputSplit> splits = new ArrayList<>();
Iterator iter = tableMaps.entrySet().iterator();
// Make a single Connection to the Cluster and use it across all tables.
try (Connection conn = ConnectionFactory.createConnection(context.getConfiguration())) {
while (iter.hasNext()) {
Map.Entry<TableName, List<Scan>> entry = ((Map.Entry<TableName, List<Scan>>) (iter.next()));
TableName tableName = entry.getKey();
List<Scan> scanList = entry.getValue();
try (Table table = conn.getTable(tableName);RegionLocator regionLocator = conn.getRegionLocator(tableName)) {
RegionSizeCalculator v18 = new RegionSizeCalculator(regionLocator, conn.getAdmin());
Pair<byte[][], byte[][]> keys = regionLocator.getStartEndKeys();
for (Scan scan : scanList) {
if (((keys == null) || (keys.getFirst() == null)) || (keys.getFirst().length == 0)) {
throw new IOException("Expecting at least one region for table : " + tableName.getNameAsString());
}
int count =
0;
byte[] startRow =
scan.getStartRow();
byte[] stopRow = scan.getStopRow();
for (int i = 0; i < keys.getFirst().length; i++) {
if (!includeRegionInSplit(keys.getFirst()[i], keys.getSecond()[i])) {
continue;
}
if ((((startRow.length
== 0) || (keys.getSecond()[i].length == 0)) || (Bytes.compareTo(startRow, keys.getSecond()[i]) < 0)) && ((stopRow.length == 0) || (Bytes.compareTo(stopRow, keys.getFirst()[i]) > 0))) {byte[] splitStart = ((startRow.length == 0) || (Bytes.compareTo(keys.getFirst()[i], startRow) >= 0)) ? keys.getFirst()[i] : startRow;
byte[] splitStop = (((stopRow.length == 0) || (Bytes.compareTo(keys.getSecond()[i], stopRow) <= 0)) && (keys.getSecond()[i].length > 0)) ? keys.getSecond()[i] : stopRow;
HRegionLocation hregionLocation = regionLocator.getRegionLocation(keys.getFirst()[i], false);
String regionHostname = hregionLocation.getHostname();
RegionInfo regionInfo = hregionLocation.getRegion();
String encodedRegionName = regionInfo.getEncodedName();
long regionSize = v18.getRegionSize(regionInfo.getRegionName());
TableSplit split = new TableSplit(table.getName(), scan, splitStart, splitStop, regionHostname, encodedRegionName,
regionSize);
splits.add(split);
if (LOG.isDebugEnabled()) {
LOG.debug((("getSplits: split -> " + (count++)) + " -> ") + split);
}
}
}
}
}
} }
return splits;
} | 3.26 |
hbase_MultiTableInputFormatBase_setScans_rdh | /**
* Allows subclasses to set the list of {@link Scan} objects.
*
* @param scans
* The list of {@link Scan} used to define the input
*/
protected void setScans(List<Scan> scans) {
this.f0 = scans;
} | 3.26 |
hbase_ModifyPeerProcedure_nextStateAfterRefresh_rdh | /**
* Implementation class can override this method. By default we will jump to
* POST_PEER_MODIFICATION and finish the procedure.
*/
protected PeerModificationState nextStateAfterRefresh() {
return PeerModificationState.POST_PEER_MODIFICATION;
} | 3.26 |
hbase_ModifyPeerProcedure_enablePeerBeforeFinish_rdh | /**
* The implementation class should override this method if the procedure may enter the serial
* related states.
*/
protected boolean enablePeerBeforeFinish() {
throw new UnsupportedOperationException();
} | 3.26 |
hbase_ModifyPeerProcedure_needReopen_rdh | // If the table is in enabling state, we need to wait until it is enabled and then reopen all its
// regions.
private boolean needReopen(TableStateManager tsm, TableName tn) throws IOException {
for (; ;)
{
try {
TableState state = tsm.getTableState(tn);
if (state.isEnabled()) {
return true;
}
if (!state.isEnabling()) {
return false;
}
Thread.sleep(SLEEP_INTERVAL_MS);
} catch (TableNotFoundException e) {
return false;
} catch (InterruptedException e) {
throw ((IOException) (new InterruptedIOException(e.getMessage()).initCause(e)));
}
}
} | 3.26 |
hbase_ModifyPeerProcedure_reopenRegions_rdh | // will be override in test to simulate error
protected void reopenRegions(MasterProcedureEnv env) throws IOException
{
ReplicationPeerConfig peerConfig = getNewPeerConfig();
ReplicationPeerConfig oldPeerConfig
= getOldPeerConfig();
TableStateManager tsm = env.getMasterServices().getTableStateManager();
for (TableDescriptor td : env.getMasterServices().getTableDescriptors().getAll().values()) {if (!td.hasGlobalReplicationScope()) {
continue;
}
TableName tn
= td.getTableName();
if (!peerConfig.needToReplicate(tn)) {
continue;
}
if (((oldPeerConfig != null) && oldPeerConfig.isSerial()) && oldPeerConfig.needToReplicate(tn)) {
continue;
}
if (needReopen(tsm, tn)) {
addChildProcedure(new ReopenTableRegionsProcedure(tn));
}}
} | 3.26 |
hbase_SimpleServerRpcConnection_initByteBuffToReadInto_rdh | // It creates the ByteBuff and CallCleanup and assign to Connection instance.
private void initByteBuffToReadInto(int length) {
this.data = rpcServer.bbAllocator.allocate(length);
this.callCleanup = data::release;
} | 3.26 |
hbase_SimpleServerRpcConnection_readAndProcess_rdh | /**
* Read off the wire. If there is not enough data to read, update the connection state with what
* we have and returns.
*
* @return Returns -1 if failure (and caller will close connection), else zero or more.
*/
public int
readAndProcess() throws IOException, InterruptedException {
// If we have not read the connection setup preamble, look to see if that is on the wire.
if (!connectionPreambleRead) {
int count = readPreamble();
if (!connectionPreambleRead) {
return count;
}
}
// Try and read in an int. it will be length of the data to read (or -1 if a ping). We catch the
// integer length into the 4-byte this.dataLengthBuffer.
int count = read4Bytes();
if ((count < 0) || (dataLengthBuffer.remaining() > 0)) {
return count;
}
// We have read a length and we have read the preamble. It is either the connection header
// or it is a request.
if (data == null) {
dataLengthBuffer.flip();
int dataLength = dataLengthBuffer.getInt();
if (dataLength == RpcClient.PING_CALL_ID) {
if (!useWrap) {
// covers the !useSasl too
dataLengthBuffer.clear();
return 0;// ping message
}}
if (dataLength <
0) {
// A data length of zero is legal.
throw new DoNotRetryIOException((("Unexpected data length " + dataLength) + "!! from ") + getHostAddress());
}
if (dataLength > this.rpcServer.maxRequestSize) {
String msg = ((((((("RPC data length of " + dataLength) + " received from ") + getHostAddress()) + " is greater than max allowed ") + this.rpcServer.maxRequestSize) + ". Set \"") + SimpleRpcServer.MAX_REQUEST_SIZE) + "\" on server to override this limit (not recommended)";
SimpleRpcServer.LOG.warn(msg);
if (connectionHeaderRead && connectionPreambleRead) {
incRpcCount();
// Construct InputStream for the non-blocking SocketChannel
// We need the InputStream because we want to read only the request header
// instead of the whole rpc.
ByteBuffer buf = ByteBuffer.allocate(1);
InputStream is = new InputStream() {
@Override
public int read() throws IOException {
SimpleServerRpcConnection.this.rpcServer.channelRead(channel, buf);
buf.flip();
int x = buf.get();
buf.flip();
return x;
}
};
CodedInputStream cis = CodedInputStream.newInstance(is);
int headerSize = cis.readRawVarint32();
Message.Builder builder = RequestHeader.newBuilder();
ProtobufUtil.mergeFrom(builder, cis, headerSize);
RequestHeader header = ((RequestHeader) (builder.build()));
// Notify the client about the offending request
SimpleServerCall v21 = new SimpleServerCall(header.getCallId(), this.service, null, null, null, null, this, 0, this.addr, EnvironmentEdgeManager.currentTime(), 0, this.rpcServer.bbAllocator, this.rpcServer.cellBlockBuilder, null, responder);
RequestTooBigException reqTooBigEx = new RequestTooBigException(msg);
this.rpcServer.metrics.exception(reqTooBigEx);
// Make sure the client recognizes the underlying exception
// Otherwise, throw a DoNotRetryIOException.
if (VersionInfoUtil.hasMinimumVersion(connectionHeader.getVersionInfo(), RequestTooBigException.MAJOR_VERSION, RequestTooBigException.MINOR_VERSION)) {
v21.setResponse(null, null, reqTooBigEx, msg);
} else {
v21.setResponse(null, null, new DoNotRetryIOException(msg),
msg);
} // In most cases we will write out the response directly. If not, it is still OK to just
// close the connection without writing out the reqTooBig response. Do not try to write
// out directly here, and it will cause deserialization error if the connection is slow
// and we have a half writing response in the queue.
v21.sendResponseIfReady();
}
// Close the connection
return -1;
}
// Initialize this.data with a ByteBuff.
// This call will allocate a ByteBuff to read request into and assign to this.data
// Also when we use some buffer(s) from pool, it will create a CallCleanup instance also and
// assign to this.callCleanup
initByteBuffToReadInto(dataLength);
// Increment the rpc count. This counter will be decreased when we write
// the response. If we want the connection to be detected as idle properly, we
// need to keep the inc / dec correct.
incRpcCount();
}
count = channelDataRead(channel, data);
if ((count >= 0) && (data.remaining() == 0)) {
// count==0 if dataLength == 0
process();
}
return count;
} | 3.26 |
hbase_SimpleServerRpcConnection_decRpcCount_rdh | /* Decrement the outstanding RPC count */
protected void decRpcCount() {
rpcCount.decrement();
} | 3.26 |
hbase_SimpleServerRpcConnection_process_rdh | /**
* Process the data buffer and clean the connection state for the next call.
*/
private void process() throws IOException, InterruptedException {
data.rewind();try {
if (skipInitialSaslHandshake) {
skipInitialSaslHandshake = false;
return;
}
if (useSasl) {saslReadAndProcess(data);
} else {
processOneRpc(data);
}
} catch (Exception e) {
callCleanupIfNeeded();
throw e;
} finally {
dataLengthBuffer.clear();// Clean for the next call
data = null;// For the GC
this.callCleanup = null;
}
} | 3.26 |
hbase_SimpleServerRpcConnection_incRpcCount_rdh | /* Increment the outstanding RPC count */
protected void incRpcCount() {
rpcCount.increment();
} | 3.26 |
hbase_SimpleServerRpcConnection_isIdle_rdh | /* Return true if the connection has no outstanding rpc */boolean isIdle() {
return rpcCount.sum() == 0;
} | 3.26 |
hbase_SimpleByteRange_shallowCopy_rdh | //
// methods for duplicating the current instance
//
@Override
public ByteRange shallowCopy() {
SimpleByteRange v0 = new SimpleByteRange(bytes, offset, length);
if (isHashCached()) {
v0.hash = hash;
}
return v0;
} | 3.26 |
hbase_SimpleByteRange_unset_rdh | //
@Override
public ByteRange unset() {
throw new ReadOnlyByteRangeException();
} | 3.26 |
hbase_SimpleByteRange_put_rdh | //
// methods for retrieving data
//
@Override
public ByteRange put(int index, byte val) {
throw new ReadOnlyByteRangeException();
} | 3.26 |
hbase_StoreFileWriter_build_rdh | /**
* Create a store file writer. Client is responsible for closing file when done. If metadata,
* add BEFORE closing using {@link StoreFileWriter#appendMetadata}.
*/
public StoreFileWriter build() throws IOException {
if (((dir == null ? 0 : 1) + (filePath == null ? 0 : 1)) != 1) {
throw new IllegalArgumentException("Either specify parent directory " + "or file path");
}
if (dir == null) {
dir = filePath.getParent();
}
if (!fs.exists(dir)) {
// Handle permission for non-HDFS filesystem properly
// See HBASE-17710
HRegionFileSystem.mkdirs(fs, conf, dir);
}// set block storage policy for temp path
String policyName = this.conf.get(ColumnFamilyDescriptorBuilder.STORAGE_POLICY);
if (null == policyName) {
policyName = this.conf.get(HStore.BLOCK_STORAGE_POLICY_KEY);
}
CommonFSUtils.setStoragePolicy(this.fs, dir, policyName);
if (filePath == null) {
// The stored file and related blocks will used the directory based StoragePolicy.
// Because HDFS DistributedFileSystem does not support create files with storage policy
// before version 3.3.0 (See HDFS-13209). Use child dir here is to make stored files
// satisfy the specific storage policy when writing. So as to avoid later data movement.
// We don't want to change whole temp dir to 'fileStoragePolicy'.
if (!Strings.isNullOrEmpty(f0)) {
dir = new Path(dir, HConstants.STORAGE_POLICY_PREFIX + f0);
if (!fs.exists(dir)) {
HRegionFileSystem.mkdirs(fs, conf, dir);
LOG.info((("Create tmp dir " + dir.toString()) + " with storage policy: ") + f0);
}
CommonFSUtils.setStoragePolicy(this.fs, dir, f0);
}
filePath = getUniqueFile(fs, dir);
if (!BloomFilterFactory.isGeneralBloomEnabled(conf)) {
bloomType = BloomType.NONE;
}
}
// make sure we call this before actually create the writer
// in fact, it is not a big deal to even add an inexistent file to the track, as we will never
// try to delete it and finally we will clean the tracker up after compaction. But if the file
// cleaner find the file but we haven't recorded it yet, it may accidentally delete the file
// and cause problem.
if (writerCreationTracker != null) {
writerCreationTracker.accept(filePath);
}return new StoreFileWriter(fs, filePath, conf, cacheConf, bloomType, maxKeyCount,
favoredNodes, fileContext, shouldDropCacheBehind, compactedFilesSupplier);
} | 3.26 |
hbase_StoreFileWriter_m0_rdh | /**
* Used when write {@link HStoreFile#COMPACTION_EVENT_KEY} to new file's file info. The compacted
* store files's name is needed. But if the compacted store file is a result of compaction, it's
* compacted files which still not archived is needed, too. And don't need to add compacted files
* recursively. If file A, B, C compacted to new file D, and file D compacted to new file E, will
* write A, B, C, D to file E's compacted files. So if file E compacted to new file F, will add E
* to F's compacted files first, then add E's compacted files: A, B, C, D to it. And no need to
* add D's compacted file, as D's compacted files has been in E's compacted files, too. See
* HBASE-20724 for more details.
*
* @param storeFiles
* The compacted store files to generate this new file
* @return bytes of CompactionEventTracker
*/
private byte[] m0(Collection<HStoreFile> storeFiles) {
Set<String> v0 = this.compactedFilesSupplier.get().stream().map(sf -> sf.getPath().getName()).collect(Collectors.toSet());
Set<String> compactedStoreFiles = new HashSet<>();
for (HStoreFile storeFile : storeFiles) {
compactedStoreFiles.add(storeFile.getFileInfo().getPath().getName());
for (String csf : storeFile.getCompactedStoreFiles()) {
if (v0.contains(csf)) {
compactedStoreFiles.add(csf);
}
}
}
return ProtobufUtil.toCompactionEventTrackerBytes(compactedStoreFiles);
} | 3.26 |
hbase_StoreFileWriter_withOutputDir_rdh | /**
* Use either this method or {@link #withFilePath}, but not both.
*
* @param dir
* Path to column family directory. The directory is created if does not exist. The
* file is given a unique name within this directory.
* @return this (for chained invocation)
*/public Builder withOutputDir(Path dir) {
Preconditions.checkNotNull(dir);
this.dir = dir;
return this;
} | 3.26 |
hbase_StoreFileWriter_getGeneralBloomWriter_rdh | /**
* For unit testing only.
*
* @return the Bloom filter used by this writer.
*/
BloomFilterWriter getGeneralBloomWriter() {
return generalBloomFilterWriter;
} | 3.26 |
hbase_StoreFileWriter_appendMetadata_rdh | /**
* Writes meta data. Call before {@link #close()} since its written as meta data to this file.
*
* @param maxSequenceId
* Maximum sequence id.
* @param majorCompaction
* True if this file is product of a major compaction
* @param mobCellsCount
* The number of mob cells.
* @throws IOException
* problem writing to FS
*/
public void appendMetadata(final long maxSequenceId, final boolean majorCompaction, final long mobCellsCount) throws IOException {
writer.appendFileInfo(MAX_SEQ_ID_KEY, Bytes.toBytes(maxSequenceId));
writer.appendFileInfo(MAJOR_COMPACTION_KEY, Bytes.toBytes(majorCompaction));
writer.appendFileInfo(MOB_CELLS_COUNT, Bytes.toBytes(mobCellsCount));
appendTrackedTimestampsToMetadata();
} | 3.26 |
hbase_StoreFileWriter_appendTrackedTimestampsToMetadata_rdh | /**
* Add TimestampRange and earliest put timestamp to Metadata
*/
public void appendTrackedTimestampsToMetadata() throws IOException {
// TODO: The StoreFileReader always converts the byte[] to TimeRange
// via TimeRangeTracker, so we should write the serialization data of TimeRange directly.
appendFileInfo(TIMERANGE_KEY, TimeRangeTracker.toByteArray(timeRangeTracker));
appendFileInfo(EARLIEST_PUT_TS, Bytes.toBytes(earliestPutTs));} | 3.26 |
hbase_StoreFileWriter_appendMobMetadata_rdh | /**
* Appends MOB - specific metadata (even if it is empty)
*
* @param mobRefSet
* - original table -> set of MOB file names
* @throws IOException
* problem writing to FS
*/
public void appendMobMetadata(SetMultimap<TableName, String> mobRefSet) throws IOException {
writer.appendFileInfo(MOB_FILE_REFS, MobUtils.serializeMobFileRefs(mobRefSet));
} | 3.26 |
hbase_StoreFileWriter_withMaxKeyCount_rdh | /**
*
* @param maxKeyCount
* estimated maximum number of keys we expect to add
* @return this (for chained invocation)
*/
public Builder withMaxKeyCount(long maxKeyCount) {
this.maxKeyCount = maxKeyCount;
return this;
} | 3.26 |
hbase_StoreFileWriter_trackTimestamps_rdh | /**
* Record the earlest Put timestamp. If the timeRangeTracker is not set, update TimeRangeTracker
* to include the timestamp of this key
*/
public void trackTimestamps(final Cell cell) {
if (Type.Put.getCode() == cell.getTypeByte()) {
earliestPutTs = Math.min(earliestPutTs, cell.getTimestamp());
}
timeRangeTracker.includeTimestamp(cell);
} | 3.26 |
hbase_StoreFileWriter_withFilePath_rdh | /**
* Use either this method or {@link #withOutputDir}, but not both.
*
* @param filePath
* the StoreFile path to write
* @return this (for chained invocation)
*/
public Builder withFilePath(Path filePath) {
Preconditions.checkNotNull(filePath);
this.filePath = filePath;
return this;
} | 3.26 |
hbase_StoreFileWriter_withFavoredNodes_rdh | /**
*
* @param favoredNodes
* an array of favored nodes or possibly null
* @return this (for chained invocation)
*/
public Builder withFavoredNodes(InetSocketAddress[] favoredNodes) {
this.favoredNodes = favoredNodes;
return this;
} | 3.26 |
hbase_StoreFileWriter_getUniqueFile_rdh | /**
*
* @param dir
* Directory to create file in.
* @return random filename inside passed <code>dir</code>
*/
public static Path getUniqueFile(final FileSystem fs, final Path dir) throws IOException {
if (!fs.getFileStatus(dir).isDirectory()) {
throw new IOException(("Expecting " + dir.toString()) + " to be a directory");
}
return new Path(dir,
dash.matcher(UUID.randomUUID().toString()).replaceAll(""));
} | 3.26 |
hbase_StoreFileWriter_getHFileWriter_rdh | /**
* For use in testing.
*/
Writer getHFileWriter() {
return writer;
} | 3.26 |
hbase_ReplicationSink_decorateConf_rdh | /**
* decorate the Configuration object to make replication more receptive to delays: lessen the
* timeout and numTries.
*/
private void decorateConf() {
this.conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, this.conf.getInt("replication.sink.client.retries.number", 4));
this.conf.setInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, this.conf.getInt("replication.sink.client.ops.timeout", 10000));
String replicationCodec = this.conf.get(HConstants.REPLICATION_CODEC_CONF_KEY);
if (StringUtils.isNotEmpty(replicationCodec)) {
this.conf.set(HConstants.RPC_CODEC_CONF_KEY, replicationCodec);
}
// use server ZK cluster for replication, so we unset the client ZK related properties if any
if (this.conf.get(HConstants.CLIENT_ZOOKEEPER_QUORUM) != null) {
this.conf.unset(HConstants.CLIENT_ZOOKEEPER_QUORUM);
}
} | 3.26 |
hbase_ReplicationSink_stopReplicationSinkServices_rdh | /**
* stop the thread pool executor. It is called when the regionserver is stopped.
*/
public void stopReplicationSinkServices() {
try {
if (this.sharedConn != null) {
synchronized(sharedConnLock) {if (this.sharedConn != null) {
this.sharedConn.close();
this.sharedConn = null;
}
}
}
} catch (IOException e) {
LOG.warn("IOException while closing the connection", e);// ignoring as we are closing.
}
} | 3.26 |
hbase_ReplicationSink_getSinkMetrics_rdh | /**
* Get replication Sink Metrics
*/
public MetricsSink getSinkMetrics() {
return this.metrics;
} | 3.26 |
hbase_ReplicationSink_addToHashMultiMap_rdh | /**
* Simple helper to a map from key to (a list of) values TODO: Make a general utility method
*
* @return the list of values corresponding to key1 and key2
*/
private <K1, K2, V> List<V> addToHashMultiMap(Map<K1, Map<K2, List<V>>> map, K1 key1, K2 key2, V value) {
Map<K2, List<V>> innerMap = map.computeIfAbsent(key1, k -> new HashMap<>());
List<V> values = innerMap.computeIfAbsent(key2, k -> new ArrayList<>());
values.add(value);
return values;
} | 3.26 |
hbase_ReplicationSink_batch_rdh | /**
* Do the changes and handle the pool
*
* @param tableName
* table to insert into
* @param allRows
* list of actions
* @param batchRowSizeThreshold
* rowSize threshold for batch mutation
*/
private void batch(TableName tableName, Collection<List<Row>> allRows, int batchRowSizeThreshold) throws IOException {
if (allRows.isEmpty()) {return;
}
AsyncTable<?> table = getConnection().getTable(tableName);
List<Future<?>> futures = new ArrayList<>();
for (List<Row> rows : allRows) {
List<List<Row>> batchRows;
if (rows.size() > batchRowSizeThreshold) {
batchRows = Lists.partition(rows, batchRowSizeThreshold);
} else {
batchRows = Collections.singletonList(rows);
}
futures.addAll(batchRows.stream().map(table::batchAll).collect(Collectors.toList()));
}
for (Future<?> future : futures) {
try {
FutureUtils.get(future);
} catch (RetriesExhaustedException e) {
if (e.getCause() instanceof
TableNotFoundException) {
throw new TableNotFoundException(("'" + tableName) + "'");
}
throw e;
}
}
} | 3.26 |
hbase_ReplicationSink_isNewRowOrType_rdh | /**
* Returns True if we have crossed over onto a new row or type
*/
private boolean isNewRowOrType(final Cell previousCell, final Cell cell) {
return ((previousCell == null) || (previousCell.getTypeByte() != cell.getTypeByte())) || (!CellUtil.matchingRows(previousCell, cell));
} | 3.26 |
hbase_ReplicationSink_replicateEntries_rdh | /**
* Replicate this array of entries directly into the local cluster using the native client. Only
* operates against raw protobuf type saving on a conversion from pb to pojo.
*
* @param entries
* WAL entries to be replicated.
* @param cells
* cell scanner for iteration.
* @param replicationClusterId
* Id which will uniquely identify source cluster FS client
* configurations in the replication configuration directory
* @param sourceBaseNamespaceDirPath
* Path that point to the source cluster base namespace
* directory
* @param sourceHFileArchiveDirPath
* Path that point to the source cluster hfile archive directory
* @throws IOException
* If failed to replicate the data
*/
public void replicateEntries(List<WALEntry> entries, final CellScanner cells, String replicationClusterId, String sourceBaseNamespaceDirPath, String sourceHFileArchiveDirPath) throws IOException {
if (entries.isEmpty()) {
return;
}
// Very simple optimization where we batch sequences of rows going
// to the same table.
try
{
long totalReplicated = 0;
// Map of table => list of Rows, grouped by cluster id, we only want to flushCommits once per
// invocation of this method per table and cluster id.
Map<TableName, Map<List<UUID>, List<Row>>> rowMap = new TreeMap<>();
Map<List<String>, Map<String, List<Pair<byte[], List<String>>>>> bulkLoadsPerClusters = null;
Pair<List<Mutation>, List<WALEntry>> mutationsToWalEntriesPairs = new Pair<>(new ArrayList<>(), new ArrayList<>());
for (WALEntry entry : entries) {
TableName table = TableName.valueOf(entry.getKey().getTableName().toByteArray());
if (this.walEntrySinkFilter != null) {
if (this.walEntrySinkFilter.filter(table, entry.getKey().getWriteTime())) {
// Skip Cells in CellScanner associated with this entry.
int count = entry.getAssociatedCellCount();
for (int i = 0; i < count; i++) {
// Throw index out of bounds if our cell count is off
if (!cells.advance()) {
this.metrics.incrementFailedBatches();
throw new ArrayIndexOutOfBoundsException((("Expected=" + count) + ", index=") + i);
}
}
continue;}}
Cell previousCell = null;
Mutation mutation = null;
int count = entry.getAssociatedCellCount();for (int i = 0; i < count; i++) {
// Throw index out of bounds if our cell count is off
if (!cells.advance()) {
this.metrics.incrementFailedBatches();
throw new ArrayIndexOutOfBoundsException((("Expected=" + count) + ", index=") + i);
}
Cell cell = cells.current();
// Handle bulk load hfiles replication
if (CellUtil.matchingQualifier(cell, WALEdit.BULK_LOAD)) {
BulkLoadDescriptor bld = WALEdit.getBulkLoadDescriptor(cell);
if (bld.getReplicate()) {
if (bulkLoadsPerClusters == null)
{
bulkLoadsPerClusters = new HashMap<>();
}
// Map of table name Vs list of pair of family and list of
// hfile paths from its namespace
Map<String, List<Pair<byte[], List<String>>>>
bulkLoadHFileMap = bulkLoadsPerClusters.computeIfAbsent(bld.getClusterIdsList(), k -> new HashMap<>());
buildBulkLoadHFileMap(bulkLoadHFileMap, table, bld);
}
} else if (CellUtil.matchingQualifier(cell, WALEdit.REPLICATION_MARKER))
{
Mutation put = processReplicationMarkerEntry(cell);
if (put == null) {
continue;
}
table = REPLICATION_SINK_TRACKER_TABLE_NAME;
List<UUID> clusterIds = new ArrayList<>();
for (HBaseProtos.UUID clusterId : entry.getKey().getClusterIdsList()) {clusterIds.add(toUUID(clusterId));
}
put.setClusterIds(clusterIds);
addToHashMultiMap(rowMap, table, clusterIds, put);} else {
// Handle wal replication
if (isNewRowOrType(previousCell, cell)) {
// Create new mutation
mutation = (CellUtil.isDelete(cell)) ? new Delete(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()) : new Put(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength());
List<UUID> clusterIds = new ArrayList<>(entry.getKey().getClusterIdsList().size());
for (HBaseProtos.UUID clusterId : entry.getKey().getClusterIdsList()) {
clusterIds.add(toUUID(clusterId));
}
mutation.setClusterIds(clusterIds);
mutation.setAttribute(ReplicationUtils.REPLICATION_ATTR_NAME, HConstants.EMPTY_BYTE_ARRAY);
if (rsServerHost != null) {
rsServerHost.preReplicationSinkBatchMutate(entry,
mutation);
mutationsToWalEntriesPairs.getFirst().add(mutation);
mutationsToWalEntriesPairs.getSecond().add(entry);
}
addToHashMultiMap(rowMap, table, clusterIds, mutation);
}if (CellUtil.isDelete(cell)) {
((Delete) (mutation)).add(cell);
} else {
((Put) (mutation)).add(cell);
}
previousCell = cell;
}
}
totalReplicated++;
}
// TODO Replicating mutations and bulk loaded data can be made parallel
if (!rowMap.isEmpty()) {
LOG.debug("Started replicating mutations.");
for (Entry<TableName, Map<List<UUID>, List<Row>>> entry : rowMap.entrySet()) {
batch(entry.getKey(), entry.getValue().values(), rowSizeWarnThreshold);
}
LOG.debug("Finished replicating mutations.");
}
if (rsServerHost != null) {
List<Mutation> mutations = mutationsToWalEntriesPairs.getFirst();
List<WALEntry> walEntries = mutationsToWalEntriesPairs.getSecond();
for (int i = 0; i < mutations.size(); i++) {
rsServerHost.postReplicationSinkBatchMutate(walEntries.get(i), mutations.get(i));
}
}
if (bulkLoadsPerClusters != null) {
for (Entry<List<String>, Map<String, List<Pair<byte[], List<String>>>>> entry : bulkLoadsPerClusters.entrySet()) {
Map<String, List<Pair<byte[], List<String>>>> bulkLoadHFileMap = entry.getValue();
if
((bulkLoadHFileMap !=
null) && (!bulkLoadHFileMap.isEmpty())) {
LOG.debug("Replicating {} bulk loaded data", entry.getKey().toString());
Configuration providerConf = this.provider.getConf(this.conf,
replicationClusterId);
try
(HFileReplicator hFileReplicator = new HFileReplicator(providerConf, sourceBaseNamespaceDirPath, sourceHFileArchiveDirPath, bulkLoadHFileMap, conf, getConnection(), entry.getKey())) {
hFileReplicator.replicate();
LOG.debug("Finished replicating {} bulk loaded data", entry.getKey().toString());
}
}
}}
int size = entries.size();
this.metrics.setAgeOfLastAppliedOp(entries.get(size - 1).getKey().getWriteTime());
this.metrics.applyBatch(size + hfilesReplicated, hfilesReplicated);this.totalReplicatedEdits.addAndGet(totalReplicated);
} catch (IOException ex) {
LOG.error("Unable to accept edit because:", ex);
this.metrics.incrementFailedBatches();
throw ex;
}
} | 3.26 |
hbase_Hash_parseHashType_rdh | /**
* This utility method converts String representation of hash function name to a symbolic
* constant. Currently three function types are supported, "jenkins", "murmur" and "murmur3".
*
* @param name
* hash function name
* @return one of the predefined constants
*/
public static int parseHashType(String name) {
if ("jenkins".equalsIgnoreCase(name)) {
return f0;
} else if ("murmur".equalsIgnoreCase(name)) {
return MURMUR_HASH;
} else if ("murmur3".equalsIgnoreCase(name)) {
return MURMUR_HASH3;
} else {
return INVALID_HASH;
}
} | 3.26 |
hbase_Hash_getInstance_rdh | /**
* Get a singleton instance of hash function of a type defined in the configuration.
*
* @param conf
* current configuration
* @return defined hash type, or null if type is invalid
*/
public static Hash getInstance(Configuration conf) {
int type = getHashType(conf);
return getInstance(type);
} | 3.26 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.