name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hbase_AsyncTable_fromRow_rdh | /**
* Specify a start row
*
* @param startKey
* start region selection with region containing this row, inclusive.
*/
default CoprocessorServiceBuilder<S, R> fromRow(byte[] startKey) {
return fromRow(startKey, true);
} | 3.26 |
hbase_AsyncTable_getScanner_rdh | /**
* Gets a scanner on the current table for the given family and qualifier.
*
* @param family
* The column family to scan.
* @param qualifier
* The column qualifier to scan.
* @return A scanner.
*/
default ResultScanner getScanner(byte[] family, byte[] qualifier) {
return getScanner(new Scan().addColumn(family, qualifier));
} | 3.26 |
hbase_AsyncTable_getRequestAttributes_rdh | /**
* Get the map of request attributes
*
* @return a map of request attributes supplied by the client
*/
default Map<String, byte[]> getRequestAttributes() {
throw new NotImplementedException("Add an implementation!");
}
/**
* Test for the existence of columns in the table, as specified by the Get.
* <p>
* This will return true if the Get matches one or more keys, false if not.
* <p>
* This is a server-side call so it prevents any data from being transfered to the client.
*
* @return true if the specified Get matches one or more keys, false if not. The return value will
be wrapped by a {@link CompletableFuture} | 3.26 |
hbase_AsyncTable_getAll_rdh | /**
* A simple version for batch get. It will fail if there are any failures and you will get the
* whole result list at once if the operation is succeeded.
*
* @param gets
* The objects that specify what data to fetch and from which rows.
* @return A {@link CompletableFuture} that wrapper the result list.
*/
default CompletableFuture<List<Result>> getAll(List<Get> gets) {
return allOf(get(gets));
} | 3.26 |
hbase_AsyncTable_exists_rdh | /**
* Test for the existence of columns in the table, as specified by the Gets.
* <p>
* This will return a list of booleans. Each value will be true if the related Get matches one or
* more keys, false if not.
* <p>
* This is a server-side call so it prevents any data from being transferred to the client.
*
* @param gets
* the Gets
* @return A list of {@link CompletableFuture}s that represent the existence for each get.
*/
default List<CompletableFuture<Boolean>> exists(List<Get> gets) {
return get(toCheckExistenceOnly(gets)).stream().<CompletableFuture<Boolean>>map(f -> f.thenApply(r -> r.getExists())).collect(toList());
}
/**
* A simple version for batch exists. It will fail if there are any failures and you will get the
* whole result boolean list at once if the operation is succeeded.
*
* @param gets
* the Gets
* @return A {@link CompletableFuture} | 3.26 |
hbase_AsyncTable_batchAll_rdh | /**
* A simple version of batch. It will fail if there are any failures and you will get the whole
* result list at once if the operation is succeeded.
*
* @param actions
* list of Get, Put, Delete, Increment, Append and RowMutations objects
* @return A list of the result for the actions. Wrapped by a {@link CompletableFuture}.
*/
default <T> CompletableFuture<List<T>> batchAll(List<? extends Row> actions) {
return allOf(batch(actions));
} | 3.26 |
hbase_AsyncTable_deleteAll_rdh | /**
* A simple version of batch delete. It will fail if there are any failures.
*
* @param deletes
* list of things to delete.
* @return A {@link CompletableFuture} that always returns null when complete normally.
*/
default CompletableFuture<Void> deleteAll(List<Delete> deletes) {
return allOf(delete(deletes)).thenApply(r -> null);
} | 3.26 |
hbase_AsyncTable_checkAndMutateAll_rdh | /**
* A simple version of batch checkAndMutate. It will fail if there are any failures.
*
* @param checkAndMutates
* The list of rows to apply.
* @return A {@link CompletableFuture} that wrapper the result list.
*/
default CompletableFuture<List<CheckAndMutateResult>> checkAndMutateAll(List<CheckAndMutate> checkAndMutates) {
return allOf(checkAndMutate(checkAndMutates));
} | 3.26 |
hbase_AsyncTable_ifEquals_rdh | /**
* Check for equality.
*
* @param value
* the expected value
*/
default CheckAndMutateBuilder ifEquals(byte[] value) {
return ifMatches(CompareOperator.EQUAL, value);
} | 3.26 |
hbase_AsyncTable_putAll_rdh | /**
* A simple version of batch put. It will fail if there are any failures.
*
* @param puts
* The list of mutations to apply.
* @return A {@link CompletableFuture} that always returns null when complete normally.
*/
default CompletableFuture<Void> putAll(List<Put> puts) {
return allOf(put(puts)).thenApply(r -> null);
}
/**
* Deletes the specified cells/rows in bulk.
*
* @param deletes
* list of things to delete.
* @return A list of {@link CompletableFuture} | 3.26 |
hbase_ZKSplitLog_isRescanNode_rdh | /**
* Checks if the given path represents a rescan node.
*
* @param zkw
* reference to the {@link ZKWatcher} which also contains configuration and constants
* @param path
* the absolute path, starts with '/'
* @return whether the path represents a rescan node
*/
public static boolean isRescanNode(ZKWatcher zkw,
String path) {
String prefix = getRescanNode(zkw);
if (path.length() <= prefix.length()) {
return false;
}
for (int i = 0; i < prefix.length(); i++) {
if (prefix.charAt(i) != path.charAt(i)) {
return false;}
}return true;
} | 3.26 |
hbase_ZKSplitLog_getEncodedNodeName_rdh | /**
* Gets the full path node name for the log file being split. This method will url encode the
* filename.
*
* @param zkw
* zk reference
* @param filename
* log file name (only the basename)
*/
public static String getEncodedNodeName(ZKWatcher zkw, String filename) {
return ZNodePaths.joinZNode(zkw.getZNodePaths().splitLogZNode, encode(filename));
} | 3.26 |
hbase_NamespaceTableAndRegionInfo_getTables_rdh | /**
* Gets the set of table names belonging to namespace.
*
* @return A set of table names.
*/
synchronized Set<TableName> getTables() {
return this.tableAndRegionInfo.keySet();
} | 3.26 |
hbase_NamespaceTableAndRegionInfo_getName_rdh | /**
* Gets the name of the namespace.
*
* @return name of the namespace.
*/
String getName() {
return f0;
} | 3.26 |
hbase_NamespaceTableAndRegionInfo_getRegionCount_rdh | /**
* Gets the total number of regions in namespace.
*
* @return the region count
*/
synchronized int getRegionCount() {
int regionCount = 0;
for (Entry<TableName, AtomicInteger> entry : this.tableAndRegionInfo.entrySet()) {
regionCount = regionCount + entry.getValue().get();
}
return regionCount;
} | 3.26 |
hbase_RowFilter_toByteArray_rdh | /**
* Returns The filter serialized using pb
*/
@Override
public byte[] toByteArray() {
FilterProtos.RowFilter.Builder builder = FilterProtos.RowFilter.newBuilder(); builder.setCompareFilter(super.convert());
return builder.build().toByteArray();
} | 3.26 |
hbase_RowFilter_areSerializedFieldsEqual_rdh | /**
* Returns true if and only if the fields of the filter that are serialized are equal to the
* corresponding fields in other. Used for testing.
*/
@Override boolean areSerializedFieldsEqual(Filter o) {
if (o == this) {
return true;
}
if (!(o instanceof RowFilter)) {
return false;
}
return super.areSerializedFieldsEqual(o);
} | 3.26 |
hbase_RowFilter_parseFrom_rdh | /**
* Parse a serialized representation of {@link RowFilter}
*
* @param pbBytes
* A pb serialized {@link RowFilter} instance
* @return An instance of {@link RowFilter} made from <code>bytes</code>
* @throws DeserializationException
* if an error occurred
* @see #toByteArray
*/
public static RowFilter parseFrom(final byte[] pbBytes) throws DeserializationException {
FilterProtos.RowFilter proto;
try {
proto = FilterProtos.RowFilter.parseFrom(pbBytes);
} catch (InvalidProtocolBufferException e) {
throw new DeserializationException(e);
}
final CompareOperator valueCompareOp = CompareOperator.valueOf(proto.getCompareFilter().getCompareOp().name());
ByteArrayComparable valueComparator = null;
try {
if
(proto.getCompareFilter().hasComparator()) {
valueComparator = ProtobufUtil.toComparator(proto.getCompareFilter().getComparator());
}
} catch (IOException ioe) {
throw new DeserializationException(ioe);
}
return new RowFilter(valueCompareOp, valueComparator);
} | 3.26 |
hbase_HBaseSaslRpcClient_readNextRpcPacket_rdh | // unwrap messages with Crypto AES
private void readNextRpcPacket() throws IOException {
LOG.debug("reading next wrapped RPC packet");
DataInputStream dis = new DataInputStream(in);int rpcLen = dis.readInt();
byte[] rpcBuf = new byte[rpcLen];
dis.readFully(rpcBuf);
// unwrap with Crypto AES
rpcBuf = cryptoAES.unwrap(rpcBuf, 0, rpcBuf.length);
if (LOG.isDebugEnabled()) {
LOG.debug("unwrapping token of length:" + rpcBuf.length);
}
unwrappedRpcBuffer =
ByteBuffer.wrap(rpcBuf);} | 3.26 |
hbase_HBaseSaslRpcClient_m1_rdh | /**
* Get a SASL wrapped OutputStream. Can be called only after saslConnect() has been called.
*
* @return a SASL wrapped OutputStream
*/
public OutputStream m1() throws IOException {
if (!saslClient.isComplete()) {
throw new IOException("Sasl authentication exchange hasn't completed yet");
}
// If Crypto AES is enabled, return cryptoOutputStream which wrap the data with Crypto AES.
if (cryptoAesEnable && (cryptoOutputStream != null)) {
return cryptoOutputStream;
}
return saslOutputStream;} | 3.26 |
hbase_HBaseSaslRpcClient_saslConnect_rdh | /**
* Do client side SASL authentication with server via the given InputStream and OutputStream
*
* @param inS
* InputStream to use
* @param outS
* OutputStream to use
* @return true if connection is set up, or false if needs to switch to simple Auth.
*/
public boolean saslConnect(InputStream inS, OutputStream outS) throws IOException {
DataInputStream inStream = new DataInputStream(new BufferedInputStream(inS));
DataOutputStream outStream = new DataOutputStream(new BufferedOutputStream(outS));
try {
byte[] saslToken = getInitialResponse();
if (saslToken != null)
{
outStream.writeInt(saslToken.length);
outStream.write(saslToken, 0, saslToken.length);
outStream.flush();
if (LOG.isDebugEnabled()) {
LOG.debug(("Have sent token of size " + saslToken.length) + " from initSASLContext.");
}}
if (!isComplete()) {
readStatus(inStream);
int
len = inStream.readInt();
if (len ==
SaslUtil.SWITCH_TO_SIMPLE_AUTH) {
if (!fallbackAllowed) {
throw new IOException("Server asks us to fall back to SIMPLE auth, " + "but this client is configured to only allow secure connections.");
}
if (LOG.isDebugEnabled()) {
LOG.debug("Server asks us to fall back to simple auth.");
}
dispose();
return false;
}
saslToken = new byte[len];
if (LOG.isDebugEnabled()) {
LOG.debug(("Will read input token of size " + saslToken.length) + " for processing by initSASLContext");
}
inStream.readFully(saslToken);
}
while (!isComplete()) {
saslToken = evaluateChallenge(saslToken);
if (saslToken != null) {
if (LOG.isDebugEnabled()) {
LOG.debug(("Will send token of size " + saslToken.length) + " from initSASLContext.");
}
outStream.writeInt(saslToken.length);outStream.write(saslToken, 0, saslToken.length);
outStream.flush();
}
if (!isComplete()) {
readStatus(inStream);
saslToken = new byte[inStream.readInt()];
if (LOG.isDebugEnabled()) {
LOG.debug(("Will read input token of size " + saslToken.length) + " for processing by initSASLContext");
}
inStream.readFully(saslToken);
}
}
if (LOG.isDebugEnabled()) {
LOG.debug("SASL client context established. Negotiated QoP: " + saslClient.getNegotiatedProperty(Sasl.QOP));
}
// initial the inputStream, outputStream for both Sasl encryption
// and Crypto AES encryption if necessary
// if Crypto AES encryption enabled, the saslInputStream/saslOutputStream is
// only responsible for connection header negotiation,
// cryptoInputStream/cryptoOutputStream is responsible for rpc encryption with Crypto AES
f0 = new SaslInputStream(inS, saslClient);
saslOutputStream = new SaslOutputStream(outS, saslClient);
if (initStreamForCrypto) {
cryptoInputStream = new
WrappedInputStream(inS);
cryptoOutputStream = new WrappedOutputStream(outS);
}
return true;
} catch (IOException e) {
try {
saslClient.dispose();
} catch (SaslException ignored) {
// ignore further exceptions during cleanup
}
throw e;}
} | 3.26 |
hbase_HBaseSaslRpcClient_getInputStream_rdh | /**
* Get a SASL wrapped InputStream. Can be called only after saslConnect() has been called.
*
* @return a SASL wrapped InputStream
*/
public InputStream getInputStream() throws IOException {
if (!saslClient.isComplete()) {
throw new IOException("Sasl authentication exchange hasn't completed yet");
}
// If Crypto AES is enabled, return cryptoInputStream which unwrap the data with Crypto AES.
if (cryptoAesEnable && (cryptoInputStream != null)) {return cryptoInputStream;
}
return f0;
} | 3.26 |
hbase_DirectMemoryUtils_getDirectMemorySize_rdh | /**
* Returns the direct memory limit of the current progress
*/
public static long getDirectMemorySize() {return f1;
} | 3.26 |
hbase_DirectMemoryUtils_getDirectMemoryUsage_rdh | /**
* Returns the current amount of direct memory used.
*/public static long getDirectMemoryUsage() {
if (((f0 == null) || (NIO_DIRECT_POOL == null)) || (!HAS_MEMORY_USED_ATTRIBUTE))
return 0;
try {
Long value = ((Long) (f0.getAttribute(NIO_DIRECT_POOL, MEMORY_USED)));
return value == null ? 0 : value;
}
catch (JMException e) {
// should print further diagnostic information?
return 0;
}
} | 3.26 |
hbase_DirectMemoryUtils_getNettyDirectMemoryUsage_rdh | /**
* Returns the current amount of direct memory used by netty module.
*/
public static long getNettyDirectMemoryUsage() {
ByteBufAllocatorMetric metric = ((ByteBufAllocatorMetricProvider) (PooledByteBufAllocator.DEFAULT)).metric();
return metric.usedDirectMemory();
} | 3.26 |
hbase_RequestConverter_buildGetOnlineRegionRequest_rdh | /**
* Create a protocol buffer GetOnlineRegionRequest
*
* @return a protocol buffer GetOnlineRegionRequest
*/
public static GetOnlineRegionRequest buildGetOnlineRegionRequest() {
return GetOnlineRegionRequest.newBuilder().build();} | 3.26 |
hbase_RequestConverter_buildBulkLoadHFileRequest_rdh | /**
* Create a protocol buffer bulk load request
*
* @return a bulk load request
*/
public static BulkLoadHFileRequest buildBulkLoadHFileRequest(final List<Pair<byte[], String>> familyPaths, final byte[] regionName, boolean
assignSeqNum, final Token<?> userToken, final String
bulkToken, boolean copyFiles, List<String> clusterIds, boolean replicate) {
RegionSpecifier region = RequestConverter.buildRegionSpecifier(RegionSpecifierType.REGION_NAME, regionName);ClientProtos.DelegationToken protoDT = null; if (userToken != null) {
protoDT = ClientProtos.DelegationToken.newBuilder().setIdentifier(UnsafeByteOperations.unsafeWrap(userToken.getIdentifier())).setPassword(UnsafeByteOperations.unsafeWrap(userToken.getPassword())).setKind(userToken.getKind().toString()).setService(userToken.getService().toString()).build();
}
List<ClientProtos.BulkLoadHFileRequest.FamilyPath> protoFamilyPaths = new ArrayList<>(familyPaths.size());
if (!familyPaths.isEmpty()) {
ClientProtos.BulkLoadHFileRequest.FamilyPath.Builder pathBuilder = BulkLoadHFileRequest.FamilyPath.newBuilder();
for (Pair<byte[], String> el : familyPaths) {
protoFamilyPaths.add(pathBuilder.setFamily(UnsafeByteOperations.unsafeWrap(el.getFirst())).setPath(el.getSecond()).build());}
pathBuilder.clear();
}
BulkLoadHFileRequest.Builder request = ClientProtos.BulkLoadHFileRequest.newBuilder().setRegion(region).setAssignSeqNum(assignSeqNum).addAllFamilyPath(protoFamilyPaths);
if (userToken !=
null) {
request.setFsToken(protoDT);
}
if (bulkToken != null) {
request.setBulkToken(bulkToken);
}
request.setCopyFile(copyFiles);
if (clusterIds != null) {
request.addAllClusterIds(clusterIds);
}
request.setReplicate(replicate);
return request.build();
} | 3.26 |
hbase_RequestConverter_buildModifyNamespaceRequest_rdh | /**
* Creates a protocol buffer ModifyNamespaceRequest
*
* @return a ModifyNamespaceRequest
*/
public static ModifyNamespaceRequest buildModifyNamespaceRequest(final NamespaceDescriptor descriptor) {
ModifyNamespaceRequest.Builder builder = ModifyNamespaceRequest.newBuilder();
builder.setNamespaceDescriptor(ProtobufUtil.toProtoNamespaceDescriptor(descriptor)); return builder.build();
} | 3.26 |
hbase_RequestConverter_buildSetSnapshotCleanupRequest_rdh | /**
* Creates SetSnapshotCleanupRequest for turning on/off auto snapshot cleanup
*
* @param enabled
* Set to <code>true</code> to enable, <code>false</code> to disable.
* @param synchronous
* If <code>true</code>, it waits until current snapshot cleanup is completed,
* if outstanding.
* @return a SetSnapshotCleanupRequest
*/
public static SetSnapshotCleanupRequest buildSetSnapshotCleanupRequest(final boolean enabled, final boolean synchronous) {
return SetSnapshotCleanupRequest.newBuilder().setEnabled(enabled).setSynchronous(synchronous).build();
} | 3.26 |
hbase_RequestConverter_buildNoDataRegionActions_rdh | /**
* Create a protocol buffer multirequest with NO data for a list of actions (data is carried
* otherwise than via protobuf). This means it just notes attributes, whether to write the WAL,
* etc., and the presence in protobuf serves as place holder for the data which is coming along
* otherwise. Note that Get is different. It does not contain 'data' and is always carried by
* protobuf. We return references to the data by adding them to the passed in <code>data</code>
* param.
* <p>
* Propagates Actions original index.
* <p>
* The passed in multiRequestBuilder will be populated with region actions.
*
* @param regionName
* The region name of the actions.
* @param actions
* The actions that are grouped by the same region name.
* @param cells
* Place to stuff references to actual data.
* @param multiRequestBuilder
* The multiRequestBuilder to be populated with region actions.
* @param regionActionBuilder
* regionActionBuilder to be used to build region action.
* @param actionBuilder
* actionBuilder to be used to build action.
* @param mutationBuilder
* mutationBuilder to be used to build mutation.
* @param nonceGroup
* nonceGroup to be applied.
* @param indexMap
* Map of created RegionAction to the original index for a
* RowMutations/CheckAndMutate within the original list of actions
*/
public static void buildNoDataRegionActions(final byte[] regionName, final Iterable<Action> actions, final List<CellScannable> cells, final MultiRequest.Builder multiRequestBuilder, final RegionAction.Builder regionActionBuilder, final ClientProtos.Action.Builder actionBuilder, final MutationProto.Builder mutationBuilder, long nonceGroup, final Map<Integer, Integer> indexMap) throws IOException {
regionActionBuilder.clear();
RegionAction.Builder builder = getRegionActionBuilderWithRegion(regionActionBuilder, regionName);
ClientProtos.CoprocessorServiceCall.Builder cpBuilder = null;
boolean hasNonce = false;
List<Action> rowMutationsList = new ArrayList<>();
List<Action> checkAndMutates = new ArrayList<>();
for (Action action : actions) {
Row row = action.getAction();
actionBuilder.clear();
actionBuilder.setIndex(action.getOriginalIndex());mutationBuilder.clear();
if (row
instanceof Get) {
Get g = ((Get) (row));
builder.addAction(actionBuilder.setGet(ProtobufUtil.toGet(g)));
} else if (row instanceof Put) {
buildNoDataRegionAction(((Put) (row)), cells, builder, actionBuilder, mutationBuilder);
} else if (row instanceof Delete) {
buildNoDataRegionAction(((Delete) (row)), cells, builder, actionBuilder, mutationBuilder);
} else if (row instanceof Append) {
buildNoDataRegionAction(((Append) (row)), cells, action.getNonce(), builder, actionBuilder, mutationBuilder);
hasNonce = true;
} else if (row instanceof
Increment) {
buildNoDataRegionAction(((Increment) (row)), cells, action.getNonce(), builder, actionBuilder, mutationBuilder);
hasNonce = true;
} else if (row instanceof RegionCoprocessorServiceExec) {
RegionCoprocessorServiceExec exec = ((RegionCoprocessorServiceExec) (row));
// DUMB COPY!!! FIX!!! Done to copy from c.g.p.ByteString to shaded ByteString.
ByteString value
= UnsafeByteOperations.unsafeWrap(exec.getRequest().toByteArray());
if (cpBuilder == null) {
cpBuilder = ClientProtos.CoprocessorServiceCall.newBuilder();
} else {
cpBuilder.clear();
}
builder.addAction(actionBuilder.setServiceCall(cpBuilder.setRow(UnsafeByteOperations.unsafeWrap(exec.getRow())).setServiceName(exec.getMethod().getService().getFullName()).setMethodName(exec.getMethod().getName()).setRequest(value)));
} else if (row instanceof RowMutations) { rowMutationsList.add(action);} else if (row instanceof CheckAndMutate) {
checkAndMutates.add(action);
} else {
throw new DoNotRetryIOException("Multi doesn't support " + row.getClass().getName());
}
}
if (builder.getActionCount() > 0)
{
multiRequestBuilder.addRegionAction(builder.build());}
// Process RowMutations here. We can not process it in the big loop above because
// it will corrupt the sequence order maintained in cells.
// RowMutations is a set of Puts and/or Deletes all to be applied atomically
// on the one row. We do separate RegionAction for each RowMutations.
// We maintain a map to keep track of this RegionAction and the original Action index.
for (Action action : rowMutationsList) {
builder.clear();
getRegionActionBuilderWithRegion(builder, regionName);
boolean hasIncrementOrAppend = buildNoDataRegionAction(((RowMutations) (action.getAction())), cells, action.getNonce(), builder, actionBuilder, mutationBuilder);
if (hasIncrementOrAppend) {
hasNonce = true;
}
builder.setAtomic(true);
multiRequestBuilder.addRegionAction(builder.build());
// This rowMutations region action is at (multiRequestBuilder.getRegionActionCount() - 1)
// in the overall multiRequest.
indexMap.put(multiRequestBuilder.getRegionActionCount() - 1, action.getOriginalIndex());
}
// Process CheckAndMutate here. Similar to RowMutations, we do separate RegionAction for each
// CheckAndMutate and maintain a map to keep track of this RegionAction and the original
// Action index.
for (Action action : checkAndMutates) {
builder.clear();
getRegionActionBuilderWithRegion(builder, regionName);
CheckAndMutate cam = ((CheckAndMutate) (action.getAction()));
builder.setCondition(ProtobufUtil.toCondition(cam.getRow(), cam.getFamily(), cam.getQualifier(),
cam.getCompareOp(), cam.getValue(), cam.getFilter(), cam.getTimeRange()));
if (cam.getAction() instanceof Put)
{
actionBuilder.clear();
mutationBuilder.clear();
buildNoDataRegionAction(((Put) (cam.getAction())), cells, builder, actionBuilder, mutationBuilder);
} else if (cam.getAction() instanceof Delete) {
actionBuilder.clear();
mutationBuilder.clear();
buildNoDataRegionAction(((Delete) (cam.getAction())), cells, builder, actionBuilder, mutationBuilder);
} else if (cam.getAction() instanceof Increment) {
actionBuilder.clear();
mutationBuilder.clear();
buildNoDataRegionAction(((Increment) (cam.getAction())), cells, action.getNonce(), builder, actionBuilder, mutationBuilder);
hasNonce = true;
} else if (cam.getAction() instanceof Append) {
actionBuilder.clear();
mutationBuilder.clear();buildNoDataRegionAction(((Append) (cam.getAction())), cells, action.getNonce(), builder, actionBuilder, mutationBuilder);
hasNonce = true;
} else if (cam.getAction()
instanceof RowMutations) {
boolean hasIncrementOrAppend = buildNoDataRegionAction(((RowMutations) (cam.getAction())), cells, action.getNonce(), builder, actionBuilder, mutationBuilder);
if (hasIncrementOrAppend) {
hasNonce = true;
}builder.setAtomic(true);
} else {
throw new DoNotRetryIOException("CheckAndMutate doesn't support " + cam.getAction().getClass().getName());
}
multiRequestBuilder.addRegionAction(builder.build());
// This CheckAndMutate region action is at (multiRequestBuilder.getRegionActionCount() - 1)
// in the overall multiRequest.
indexMap.put(multiRequestBuilder.getRegionActionCount() - 1, action.getOriginalIndex());
}
if ((!multiRequestBuilder.hasNonceGroup()) && hasNonce) {
multiRequestBuilder.setNonceGroup(nonceGroup);
}
} | 3.26 |
hbase_RequestConverter_buildSetBalancerRunningRequest_rdh | /**
* Creates a protocol buffer SetBalancerRunningRequest
*
* @return a SetBalancerRunningRequest
*/
public static SetBalancerRunningRequest buildSetBalancerRunningRequest(boolean on, boolean synchronous) {
return SetBalancerRunningRequest.newBuilder().setOn(on).setSynchronous(synchronous).build();
} | 3.26 |
hbase_RequestConverter_buildStopServerRequest_rdh | /**
* Create a new StopServerRequest
*
* @param reason
* the reason to stop the server
* @return a StopServerRequest
*/public static StopServerRequest buildStopServerRequest(final String reason) {
StopServerRequest.Builder builder = StopServerRequest.newBuilder();
builder.setReason(reason);
return builder.build();
} | 3.26 |
hbase_RequestConverter_buildAssignRegionRequest_rdh | /**
* Create a protocol buffer AssignRegionRequest
*
* @return an AssignRegionRequest
*/
public static AssignRegionRequest buildAssignRegionRequest(final byte[] regionName) {
AssignRegionRequest.Builder builder = AssignRegionRequest.newBuilder();
builder.setRegion(buildRegionSpecifier(RegionSpecifierType.REGION_NAME, regionName));
return builder.build();
} | 3.26 |
hbase_RequestConverter_buildDeleteColumnRequest_rdh | /**
* Create a protocol buffer DeleteColumnRequest
*
* @return a DeleteColumnRequest
*/
public static DeleteColumnRequest buildDeleteColumnRequest(final TableName tableName, final byte[] columnName, final long nonceGroup, final
long nonce) {
DeleteColumnRequest.Builder builder
= DeleteColumnRequest.newBuilder();
builder.setTableName(ProtobufUtil.toProtoTableName(tableName));
builder.setColumnName(UnsafeByteOperations.unsafeWrap(columnName));
builder.setNonceGroup(nonceGroup);
builder.setNonce(nonce);
return builder.build();
} | 3.26 |
hbase_RequestConverter_buildGetQuotaStatesRequest_rdh | /**
* Returns a {@link GetQuotaStatesRequest} object.
*/
public static GetQuotaStatesRequest buildGetQuotaStatesRequest() {
return GetQuotaStatesRequest.getDefaultInstance();
} | 3.26 |
hbase_RequestConverter_buildWarmupRegionRequest_rdh | /**
* Create a WarmupRegionRequest for a given region name
*
* @param regionInfo
* Region we are warming up
*/
public static WarmupRegionRequest buildWarmupRegionRequest(final RegionInfo regionInfo) {
WarmupRegionRequest.Builder builder = WarmupRegionRequest.newBuilder();
builder.setRegionInfo(ProtobufUtil.toRegionInfo(regionInfo));
return builder.build();
} | 3.26 |
hbase_RequestConverter_buildSetTableStateInMetaRequest_rdh | /**
* Creates a protocol buffer SetTableStateInMetaRequest
*
* @param state
* table state to update in Meta
* @return a SetTableStateInMetaRequest
*/
public static SetTableStateInMetaRequest buildSetTableStateInMetaRequest(final TableState state) {
return SetTableStateInMetaRequest.newBuilder().setTableState(state.convert()).setTableName(ProtobufUtil.toProtoTableName(state.getTableName())).build();
} | 3.26 |
hbase_RequestConverter_buildIsSplitOrMergeEnabledRequest_rdh | /**
* Creates a protocol buffer IsSplitOrMergeEnabledRequest
*
* @param switchType
* see {@link org.apache.hadoop.hbase.client.MasterSwitchType}
* @return a IsSplitOrMergeEnabledRequest
*/
public static IsSplitOrMergeEnabledRequest buildIsSplitOrMergeEnabledRequest(MasterSwitchType switchType) {
IsSplitOrMergeEnabledRequest.Builder builder = IsSplitOrMergeEnabledRequest.newBuilder();
builder.setSwitchType(convert(switchType));
return builder.build();
} | 3.26 |
hbase_RequestConverter_buildTruncateTableRequest_rdh | /**
* Creates a protocol buffer TruncateTableRequest
*
* @param tableName
* name of table to truncate
* @param preserveSplits
* True if the splits should be preserved
* @return a TruncateTableRequest
*/public static TruncateTableRequest buildTruncateTableRequest(final TableName tableName, final boolean preserveSplits, final long nonceGroup, final long nonce) {TruncateTableRequest.Builder builder = TruncateTableRequest.newBuilder();
builder.setTableName(ProtobufUtil.toProtoTableName(tableName));
builder.setPreserveSplits(preserveSplits);
builder.setNonceGroup(nonceGroup);
builder.setNonce(nonce);
return builder.build();
} | 3.26 |
hbase_RequestConverter_buildCatalogScanRequest_rdh | /**
* Creates a request for running a catalog scan
*
* @return A {@link RunCatalogScanRequest}
*/
public static RunCatalogScanRequest buildCatalogScanRequest() {
return RunCatalogScanRequest.getDefaultInstance();
} | 3.26 |
hbase_RequestConverter_buildCompactRegionRequest_rdh | /**
* Create a CompactRegionRequest for a given region name
*
* @param regionName
* the name of the region to get info
* @param major
* indicator if it is a major compaction
* @return a CompactRegionRequest
*/
public static CompactRegionRequest buildCompactRegionRequest(byte[] regionName,
boolean major, byte[] columnFamily) {
CompactRegionRequest.Builder builder = CompactRegionRequest.newBuilder();
RegionSpecifier region = buildRegionSpecifier(RegionSpecifierType.REGION_NAME, regionName);
builder.setRegion(region);
builder.setMajor(major);
if (columnFamily != null) {
builder.setFamily(UnsafeByteOperations.unsafeWrap(columnFamily));
}return builder.build();
} | 3.26 |
hbase_RequestConverter_buildMultiRequest_rdh | /**
* Create a protocol buffer MultiRequest for row mutations
*
* @return a multi request
*/
public static MultiRequest buildMultiRequest(final byte[] regionName, final RowMutations rowMutations, long nonceGroup, long nonce) throws IOException {
return buildMultiRequest(regionName, rowMutations, null, nonceGroup, nonce);
} | 3.26 |
hbase_RequestConverter_m2_rdh | /**
* Creates a request for querying the master whether the cleaner chore is enabled
*
* @return A {@link IsCleanerChoreEnabledRequest}
*/
public static IsCleanerChoreEnabledRequest m2() {
return IsCleanerChoreEnabledRequest.getDefaultInstance();} | 3.26 |
hbase_RequestConverter_toAssignRegionsRequest_rdh | // HBCK2
public static AssignsRequest toAssignRegionsRequest(List<String> encodedRegionNames, boolean override) {
MasterProtos.AssignsRequest.Builder b = MasterProtos.AssignsRequest.newBuilder();
return b.addAllRegion(toEncodedRegionNameRegionSpecifiers(encodedRegionNames)).setOverride(override).build();
} | 3.26 |
hbase_RequestConverter_buildIsCatalogJanitorEnabledRequest_rdh | /**
* Creates a request for querying the master whether the catalog janitor is enabled
*
* @return A {@link IsCatalogJanitorEnabledRequest}
*/
public static IsCatalogJanitorEnabledRequest buildIsCatalogJanitorEnabledRequest() {
return IsCatalogJanitorEnabledRequest.getDefaultInstance();
} | 3.26 |
hbase_RequestConverter_buildClearSlowLogResponseRequest_rdh | /**
* Create a protocol buffer {@link ClearSlowLogResponseRequest}
*
* @return a protocol buffer ClearSlowLogResponseRequest
*/
public static ClearSlowLogResponseRequest buildClearSlowLogResponseRequest() {
return ClearSlowLogResponseRequest.newBuilder().build();
} | 3.26 |
hbase_RequestConverter_buildDisableTableRequest_rdh | /**
* Creates a protocol buffer DisableTableRequest
*
* @return a DisableTableRequest
*/
public static DisableTableRequest buildDisableTableRequest(final TableName tableName, final long nonceGroup, final long nonce) {
DisableTableRequest.Builder builder = DisableTableRequest.newBuilder();
builder.setTableName(ProtobufUtil.toProtoTableName(tableName));
builder.setNonceGroup(nonceGroup);
builder.setNonce(nonce);
return builder.build();
} | 3.26 |
hbase_RequestConverter_buildAddColumnRequest_rdh | /**
* Create a protocol buffer AddColumnRequest
*
* @return an AddColumnRequest
*/
public static AddColumnRequest
buildAddColumnRequest(final TableName tableName, final ColumnFamilyDescriptor column, final long nonceGroup, final long nonce) {
AddColumnRequest.Builder builder
= AddColumnRequest.newBuilder();
builder.setTableName(ProtobufUtil.toProtoTableName(tableName));
builder.setColumnFamilies(ProtobufUtil.toColumnFamilySchema(column));
builder.setNonceGroup(nonceGroup);
builder.setNonce(nonce);
return
builder.build();
} | 3.26 |
hbase_RequestConverter_buildSetNormalizerRunningRequest_rdh | /**
* Creates a protocol buffer SetNormalizerRunningRequest
*
* @return a SetNormalizerRunningRequest
*/
public static SetNormalizerRunningRequest buildSetNormalizerRunningRequest(boolean on) {
return SetNormalizerRunningRequest.newBuilder().setOn(on).build();
} | 3.26 |
hbase_RequestConverter_buildIsSnapshotCleanupEnabledRequest_rdh | /**
* Creates IsSnapshotCleanupEnabledRequest to determine if auto snapshot cleanup based on TTL
* expiration is turned on
*/
public static IsSnapshotCleanupEnabledRequest buildIsSnapshotCleanupEnabledRequest() {return IsSnapshotCleanupEnabledRequest.newBuilder().build();
} | 3.26 |
hbase_RequestConverter_buildDeleteTableRequest_rdh | /**
* Creates a protocol buffer DeleteTableRequest
*
* @return a DeleteTableRequest
*/
public static DeleteTableRequest buildDeleteTableRequest(final TableName tableName, final long
nonceGroup, final long nonce) {
DeleteTableRequest.Builder builder = DeleteTableRequest.newBuilder(); builder.setTableName(ProtobufUtil.toProtoTableName(tableName));
builder.setNonceGroup(nonceGroup);
builder.setNonce(nonce);
return builder.build();
} | 3.26 |
hbase_RequestConverter_buildIsBalancerEnabledRequest_rdh | /**
* Creates a protocol buffer IsBalancerEnabledRequest
*
* @return a IsBalancerEnabledRequest
*/
public static IsBalancerEnabledRequest buildIsBalancerEnabledRequest() {
return IsBalancerEnabledRequest.newBuilder().build();
} | 3.26 |
hbase_RequestConverter_buildUnassignRegionRequest_rdh | /**
* Creates a protocol buffer UnassignRegionRequest
*
* @return an UnassignRegionRequest
*/
public static UnassignRegionRequest buildUnassignRegionRequest(final byte[] regionName) {
UnassignRegionRequest.Builder builder = UnassignRegionRequest.newBuilder();
builder.setRegion(buildRegionSpecifier(RegionSpecifierType.REGION_NAME, regionName));
return builder.build();
} | 3.26 |
hbase_RequestConverter_buildSetRegionStateInMetaRequest_rdh | /**
* Creates a protocol buffer SetRegionStateInMetaRequest
*
* @param nameOrEncodedName2State
* list of regions states to update in Meta
* @return a SetRegionStateInMetaRequest
*/
public static SetRegionStateInMetaRequest buildSetRegionStateInMetaRequest(Map<String, RegionState.State> nameOrEncodedName2State) {
SetRegionStateInMetaRequest.Builder builder = SetRegionStateInMetaRequest.newBuilder();
nameOrEncodedName2State.forEach((name, state) -> {
byte[] bytes = Bytes.toBytes(name);
RegionSpecifier spec;
if (RegionInfo.isEncodedRegionName(bytes)) {
spec = buildRegionSpecifier(RegionSpecifierType.ENCODED_REGION_NAME, bytes);
} else {
spec = buildRegionSpecifier(RegionSpecifierType.REGION_NAME, bytes);
}
builder.addStates(RegionSpecifierAndState.newBuilder().setRegionSpecifier(spec).setState(state.convert()).build());
});
return builder.build();
} | 3.26 |
hbase_RequestConverter_buildGetRegionInfoRequest_rdh | /**
* Create a protocol buffer GetRegionInfoRequest,
*
* @param regionName
* the name of the region to get info
* @param includeCompactionState
* indicate if the compaction state is requested
* @param includeBestSplitRow
* indicate if the bestSplitRow is requested
* @return protocol buffer GetRegionInfoRequest
*/
public static GetRegionInfoRequest buildGetRegionInfoRequest(final byte[] regionName, final boolean includeCompactionState, boolean includeBestSplitRow) {
GetRegionInfoRequest.Builder builder = GetRegionInfoRequest.newBuilder();
RegionSpecifier region = buildRegionSpecifier(RegionSpecifierType.REGION_NAME, regionName);
builder.setRegion(region);
if (includeCompactionState) {
builder.setCompactionState(includeCompactionState);
}
if (includeBestSplitRow) {
builder.setBestSplitRow(includeBestSplitRow);
}
return builder.build();
} | 3.26 |
hbase_RequestConverter_buildMoveRegionRequest_rdh | /**
* Create a protocol buffer MoveRegionRequest
*
* @return A MoveRegionRequest
*/
public static MoveRegionRequest buildMoveRegionRequest(byte[] encodedRegionName, ServerName destServerName) {MoveRegionRequest.Builder builder = MoveRegionRequest.newBuilder();
builder.setRegion(buildRegionSpecifier(RegionSpecifierType.ENCODED_REGION_NAME, encodedRegionName));
if (destServerName != null) {
builder.setDestServerName(ProtobufUtil.toServerName(destServerName));
}
return builder.build();
} | 3.26 |
hbase_RequestConverter_buildClearRegionBlockCacheRequest_rdh | /**
* Creates a protocol buffer ClearRegionBlockCacheRequest
*
* @return a ClearRegionBlockCacheRequest
*/
public static ClearRegionBlockCacheRequest buildClearRegionBlockCacheRequest(List<RegionInfo> hris) {
ClearRegionBlockCacheRequest.Builder builder = ClearRegionBlockCacheRequest.newBuilder();
hris.forEach(hri -> builder.addRegion(buildRegionSpecifier(RegionSpecifierType.REGION_NAME, hri.getRegionName())));
return builder.build();
} | 3.26 |
hbase_RequestConverter_buildEnableTableRequest_rdh | /**
* Creates a protocol buffer EnableTableRequest
*
* @return an EnableTableRequest
*/
public static EnableTableRequest buildEnableTableRequest(final TableName tableName, final long nonceGroup, final long nonce) {
EnableTableRequest.Builder v77 = EnableTableRequest.newBuilder();
v77.setTableName(ProtobufUtil.toProtoTableName(tableName));
v77.setNonceGroup(nonceGroup);
v77.setNonce(nonce);
return v77.build();
} | 3.26 |
hbase_RequestConverter_buildGetSpaceQuotaSnapshotsRequest_rdh | /**
* Returns a {@link GetSpaceQuotaSnapshotsRequest} object.
*/
public static GetSpaceQuotaSnapshotsRequest buildGetSpaceQuotaSnapshotsRequest() {
return GetSpaceQuotaSnapshotsRequest.getDefaultInstance();
} | 3.26 |
hbase_RequestConverter_buildOpenRegionRequest_rdh | /**
* Create a protocol buffer OpenRegionRequest for a given region
*
* @param server
* the serverName for the RPC
* @param region
* the region to open
* @param favoredNodes
* a list of favored nodes
* @return a protocol buffer OpenRegionRequest
*/
public static OpenRegionRequest buildOpenRegionRequest(ServerName
server, final RegionInfo region, List<ServerName> favoredNodes) {OpenRegionRequest.Builder builder = OpenRegionRequest.newBuilder();
builder.addOpenInfo(buildRegionOpenInfo(region, favoredNodes, -1L));if (server != null) {
builder.setServerStartCode(server.getStartcode());
}
builder.setMasterSystemTime(EnvironmentEdgeManager.currentTime());
return builder.build();
} | 3.26 |
hbase_RequestConverter_buildRunCleanerChoreRequest_rdh | /**
* Creates a request for running cleaner chore
*
* @return A {@link RunCleanerChoreRequest}
*/
public static RunCleanerChoreRequest buildRunCleanerChoreRequest() {
return RunCleanerChoreRequest.getDefaultInstance();
} | 3.26 |
hbase_RequestConverter_m1_rdh | /**
* Create a new GetServerInfoRequest
*
* @return a GetServerInfoRequest
*/
public static GetServerInfoRequest m1() {
return GetServerInfoRequest.getDefaultInstance();
} | 3.26 |
hbase_RequestConverter_buildIsNormalizerEnabledRequest_rdh | /**
* Creates a protocol buffer IsNormalizerEnabledRequest
*
* @return a IsNormalizerEnabledRequest
*/
public static IsNormalizerEnabledRequest buildIsNormalizerEnabledRequest() {
return IsNormalizerEnabledRequest.newBuilder().build();
} | 3.26 |
hbase_RequestConverter_buildNormalizeRequest_rdh | /**
* Creates a protocol buffer NormalizeRequest
*
* @return a NormalizeRequest
*/
public static NormalizeRequest buildNormalizeRequest(NormalizeTableFilterParams ntfp) {
final NormalizeRequest.Builder builder = NormalizeRequest.newBuilder();
if (ntfp.getTableNames() != null) {
builder.addAllTableNames(ProtobufUtil.toProtoTableNameList(ntfp.getTableNames()));
}
if (ntfp.getRegex()
!= null) {
builder.setRegex(ntfp.getRegex());
}
if (ntfp.getNamespace() != null) {
builder.setNamespace(ntfp.getNamespace());
}
return builder.build();
} | 3.26 |
hbase_RequestConverter_buildMutateRequest_rdh | /**
* Create a protocol buffer MutateRequest for a delete
*
* @return a mutate request
*/
public static MutateRequest buildMutateRequest(final byte[] regionName, final Delete delete) throws IOException {
MutateRequest.Builder builder = MutateRequest.newBuilder();
RegionSpecifier region = buildRegionSpecifier(RegionSpecifierType.REGION_NAME, regionName);
builder.setRegion(region);
builder.setMutation(ProtobufUtil.toMutation(MutationType.DELETE, delete, MutationProto.newBuilder()));
return builder.build();
} | 3.26 |
hbase_RequestConverter_buildIsMasterRunningRequest_rdh | /**
* Creates a protocol buffer IsMasterRunningRequest
*
* @return a IsMasterRunningRequest
*/
public static IsMasterRunningRequest buildIsMasterRunningRequest() {
return IsMasterRunningRequest.newBuilder().build();
} | 3.26 |
hbase_RequestConverter_buildGetRegionLoadRequest_rdh | /**
* Create a protocol buffer GetRegionLoadRequest for all regions/regions of a table.
*
* @param tableName
* the table for which regionLoad should be obtained from RS
* @return a protocol buffer GetRegionLoadRequest
*/
public static GetRegionLoadRequest buildGetRegionLoadRequest(final TableName tableName) {
GetRegionLoadRequest.Builder builder = GetRegionLoadRequest.newBuilder();
if (tableName != null) {
builder.setTableName(ProtobufUtil.toProtoTableName(tableName));
}
return builder.build();
} | 3.26 |
hbase_RequestConverter_buildRegionSpecifier_rdh | // End utilities for Admin
/**
* Convert a byte array to a protocol buffer RegionSpecifier
*
* @param type
* the region specifier type
* @param value
* the region specifier byte array value
* @return a protocol buffer RegionSpecifier
*/
public static RegionSpecifier buildRegionSpecifier(final RegionSpecifierType type, final byte[] value) {
RegionSpecifier.Builder regionBuilder = RegionSpecifier.newBuilder();
regionBuilder.setValue(UnsafeByteOperations.unsafeWrap(value));
regionBuilder.setType(type);
return regionBuilder.build();
} | 3.26 |
hbase_RequestConverter_buildSlowLogResponseRequest_rdh | /**
* Build RPC request payload for getLogEntries
*
* @param filterParams
* map of filter params
* @param limit
* limit for no of records that server returns
* @param logType
* type of the log records
* @return request payload {@link HBaseProtos.LogRequest}
*/
public static LogRequest buildSlowLogResponseRequest(final Map<String, Object> filterParams, final int limit, final String logType) {
SlowLogResponseRequest.Builder builder = SlowLogResponseRequest.newBuilder();
builder.setLimit(limit);
if (logType.equals("SLOW_LOG")) {
builder.setLogType(LogType.SLOW_LOG);
} else if (logType.equals("LARGE_LOG")) {
builder.setLogType(LogType.LARGE_LOG);
}
boolean filterByAnd = false;
if (MapUtils.isNotEmpty(filterParams)) {
if (filterParams.containsKey("clientAddress")) {
final String clientAddress = ((String) (filterParams.get("clientAddress")));
if (StringUtils.isNotEmpty(clientAddress)) {
builder.setClientAddress(clientAddress);
}}
if (filterParams.containsKey("regionName")) {
final String regionName = ((String) (filterParams.get("regionName")));
if (StringUtils.isNotEmpty(regionName)) {builder.setRegionName(regionName);
}
}
if (filterParams.containsKey("tableName")) {
final String tableName = ((String) (filterParams.get("tableName")));
if (StringUtils.isNotEmpty(tableName)) {
builder.setTableName(tableName);
}
}
if (filterParams.containsKey("userName")) {
final String userName = ((String) (filterParams.get("userName")));
if (StringUtils.isNotEmpty(userName)) {
builder.setUserName(userName);
}
}if
(filterParams.containsKey("filterByOperator")) {
final String filterByOperator = ((String) (filterParams.get("filterByOperator")));if (StringUtils.isNotEmpty(filterByOperator)) {
if (filterByOperator.toUpperCase().equals("AND")) {
filterByAnd = true;
}}
}
}
if (filterByAnd) {
builder.setFilterByOperator(FilterByOperator.AND);
} else {
builder.setFilterByOperator(FilterByOperator.OR);
}
SlowLogResponseRequest slowLogResponseRequest = builder.build();
return HBaseProtos.LogRequest.newBuilder().setLogClassName(slowLogResponseRequest.getClass().getName()).setLogMessage(slowLogResponseRequest.toByteString()).build();
} | 3.26 |
hbase_RequestConverter_buildGetNamespaceDescriptorRequest_rdh | /**
* Creates a protocol buffer GetNamespaceDescriptorRequest
*
* @return a GetNamespaceDescriptorRequest
*/public static GetNamespaceDescriptorRequest buildGetNamespaceDescriptorRequest(final String name) {GetNamespaceDescriptorRequest.Builder builder = GetNamespaceDescriptorRequest.newBuilder();
builder.setNamespaceName(name);
return builder.build();
} | 3.26 |
hbase_RequestConverter_buildRegionOpenInfo_rdh | /**
* Create a RegionOpenInfo based on given region info and version of offline node
*/
public static RegionOpenInfo buildRegionOpenInfo(RegionInfo region, List<ServerName> favoredNodes, long openProcId) {
RegionOpenInfo.Builder builder = RegionOpenInfo.newBuilder();builder.setRegion(ProtobufUtil.toRegionInfo(region));
if (favoredNodes != null) {
for (ServerName server : favoredNodes) {
builder.addFavoredNodes(ProtobufUtil.toServerName(server));
}
}
builder.setOpenProcId(openProcId);
return
builder.build();
} | 3.26 |
hbase_RequestConverter_buildNoDataRegionAction_rdh | /**
* Returns whether or not the rowMutations has a Increment or Append
*/
private static boolean buildNoDataRegionAction(final RowMutations rowMutations, final List<CellScannable> cells,
long nonce, final RegionAction.Builder regionActionBuilder, final ClientProtos.Action.Builder actionBuilder, final MutationProto.Builder mutationBuilder) throws IOException {
boolean ret = false;
for (Mutation mutation : rowMutations.getMutations()) {
mutationBuilder.clear();
MutationProto mp;
if ((mutation instanceof Increment) || (mutation instanceof Append)) {
mp = ProtobufUtil.toMutationNoData(getMutationType(mutation), mutation, mutationBuilder, nonce);
ret = true;
} else {
mp = ProtobufUtil.toMutationNoData(getMutationType(mutation), mutation, mutationBuilder);
}
cells.add(mutation);actionBuilder.clear();
regionActionBuilder.addAction(actionBuilder.setMutation(mp).build());
}
return ret;
} | 3.26 |
hbase_RequestConverter_buildSetSplitOrMergeEnabledRequest_rdh | /**
* Creates a protocol buffer SetSplitOrMergeEnabledRequest
*
* @param enabled
* switch is enabled or not
* @param synchronous
* set switch sync?
* @param switchTypes
* see {@link org.apache.hadoop.hbase.client.MasterSwitchType}, it is a list.
* @return a SetSplitOrMergeEnabledRequest
*/
public static SetSplitOrMergeEnabledRequest buildSetSplitOrMergeEnabledRequest(boolean enabled, boolean synchronous, MasterSwitchType... switchTypes) {
SetSplitOrMergeEnabledRequest.Builder builder = SetSplitOrMergeEnabledRequest.newBuilder();
builder.setEnabled(enabled);
builder.setSynchronous(synchronous);
for (MasterSwitchType switchType : switchTypes) {
builder.addSwitchTypes(convert(switchType));
}
return builder.build();
} | 3.26 |
hbase_RequestConverter_buildSetCleanerChoreRunningRequest_rdh | /**
* Creates a request for enabling/disabling the cleaner chore
*
* @return A {@link SetCleanerChoreRunningRequest}
*/
public static SetCleanerChoreRunningRequest buildSetCleanerChoreRunningRequest(boolean on) {
return SetCleanerChoreRunningRequest.newBuilder().setOn(on).build();
} | 3.26 |
hbase_RequestConverter_buildGetTableDescriptorsRequest_rdh | /**
* Creates a protocol buffer GetTableDescriptorsRequest for a single table
*
* @param tableName
* the table name
* @return a GetTableDescriptorsRequest
*/
public static GetTableDescriptorsRequest buildGetTableDescriptorsRequest(final TableName tableName) {
return GetTableDescriptorsRequest.newBuilder().addTableNames(ProtobufUtil.toProtoTableName(tableName)).build();
} | 3.26 |
hbase_RequestConverter_buildCreateNamespaceRequest_rdh | /**
* Creates a protocol buffer CreateNamespaceRequest
*
* @return a CreateNamespaceRequest
*/
public static CreateNamespaceRequest buildCreateNamespaceRequest(final NamespaceDescriptor descriptor) {
CreateNamespaceRequest.Builder builder = CreateNamespaceRequest.newBuilder();
builder.setNamespaceDescriptor(ProtobufUtil.toProtoNamespaceDescriptor(descriptor));
return builder.build();
} | 3.26 |
hbase_RequestConverter_buildModifyColumnRequest_rdh | /**
* Create a protocol buffer ModifyColumnRequest
*
* @return an ModifyColumnRequest
*/
public static ModifyColumnRequest buildModifyColumnRequest(final TableName tableName, final ColumnFamilyDescriptor column, final long nonceGroup, final long nonce) {ModifyColumnRequest.Builder builder = ModifyColumnRequest.newBuilder();
builder.setTableName(ProtobufUtil.toProtoTableName(tableName));
builder.setColumnFamilies(ProtobufUtil.toColumnFamilySchema(column));
builder.setNonceGroup(nonceGroup);
builder.setNonce(nonce);
return builder.build();
} | 3.26 |
hbase_RequestConverter_buildCreateTableRequest_rdh | /**
* Creates a protocol buffer CreateTableRequest
*
* @return a CreateTableRequest
*/
public static CreateTableRequest buildCreateTableRequest(final TableDescriptor tableDescriptor, final byte[][] splitKeys, final long nonceGroup, final long nonce) {
CreateTableRequest.Builder builder = CreateTableRequest.newBuilder();
builder.setTableSchema(ProtobufUtil.toTableSchema(tableDescriptor));
if (splitKeys != null) {
for (byte[]
key : splitKeys) {
builder.addSplitKeys(UnsafeByteOperations.unsafeWrap(key));
}
}
builder.setNonceGroup(nonceGroup);
builder.setNonce(nonce);
return builder.build();
} | 3.26 |
hbase_RequestConverter_buildDeleteNamespaceRequest_rdh | /**
* Creates a protocol buffer DeleteNamespaceRequest
*
* @return a DeleteNamespaceRequest
*/
public static DeleteNamespaceRequest buildDeleteNamespaceRequest(final String name) {
DeleteNamespaceRequest.Builder builder = DeleteNamespaceRequest.newBuilder();
builder.setNamespaceName(name);
return builder.build();
} | 3.26 |
hbase_RequestConverter_buildFlushRegionRequest_rdh | /**
* Create a protocol buffer FlushRegionRequest for a given region name
*
* @param regionName
* the name of the region to get info
* @param columnFamily
* column family within a region
* @return a protocol buffer FlushRegionRequest
*/
public static FlushRegionRequest buildFlushRegionRequest(final byte[] regionName, byte[] columnFamily, boolean writeFlushWALMarker) {
FlushRegionRequest.Builder v51 = FlushRegionRequest.newBuilder();
RegionSpecifier region = buildRegionSpecifier(RegionSpecifierType.REGION_NAME, regionName);
v51.setRegion(region);
v51.setWriteFlushWalMarker(writeFlushWALMarker);
if (columnFamily != null) {
v51.setFamily(UnsafeByteOperations.unsafeWrap(columnFamily));
}
return v51.build();
} | 3.26 |
hbase_RequestConverter_buildGetLastFlushedSequenceIdRequest_rdh | /**
* Creates a request for querying the master the last flushed sequence Id for a region
*
* @return A {@link GetLastFlushedSequenceIdRequest}
*/
public static GetLastFlushedSequenceIdRequest buildGetLastFlushedSequenceIdRequest(byte[] regionName) {
return GetLastFlushedSequenceIdRequest.newBuilder().setRegionName(UnsafeByteOperations.unsafeWrap(regionName)).build();
} | 3.26 |
hbase_RequestConverter_buildUpdateFavoredNodesRequest_rdh | /**
* Create a protocol buffer UpdateFavoredNodesRequest to update a list of favorednode mappings
*
* @param updateRegionInfos
* a list of favored node mappings
* @return a protocol buffer UpdateFavoredNodesRequest
*/
public static UpdateFavoredNodesRequest buildUpdateFavoredNodesRequest(final List<Pair<RegionInfo, List<ServerName>>> updateRegionInfos) {
UpdateFavoredNodesRequest.Builder ubuilder = UpdateFavoredNodesRequest.newBuilder();
if ((updateRegionInfos != null) && (!updateRegionInfos.isEmpty())) {
RegionUpdateInfo.Builder builder = RegionUpdateInfo.newBuilder();
for (Pair<RegionInfo, List<ServerName>> pair : updateRegionInfos) {
builder.setRegion(ProtobufUtil.toRegionInfo(pair.getFirst()));
for (ServerName server : pair.getSecond()) {
builder.addFavoredNodes(ProtobufUtil.toServerName(server));
}
ubuilder.addUpdateInfo(builder.build()); builder.clear();
}
}
return ubuilder.build();
} | 3.26 |
hbase_RequestConverter_buildEnableCatalogJanitorRequest_rdh | /**
* Creates a request for enabling/disabling the catalog janitor
*
* @return A {@link EnableCatalogJanitorRequest}
*/
public static EnableCatalogJanitorRequest buildEnableCatalogJanitorRequest(boolean enable) {
return EnableCatalogJanitorRequest.newBuilder().setEnable(enable).build();
} | 3.26 |
hbase_RequestConverter_buildGetTableNamesRequest_rdh | /**
* Creates a protocol buffer GetTableNamesRequest
*
* @param pattern
* The compiled regular expression to match against
* @param includeSysTables
* False to match only against userspace tables
* @return a GetTableNamesRequest
*/
public static GetTableNamesRequest buildGetTableNamesRequest(final Pattern pattern, boolean includeSysTables) {
GetTableNamesRequest.Builder builder = GetTableNamesRequest.newBuilder();
if (pattern != null) {
builder.setRegex(pattern.toString());
}
builder.setIncludeSysTables(includeSysTables);
return builder.build();
} | 3.26 |
hbase_RequestConverter_buildScanRequest_rdh | /**
* Create a protocol buffer ScanRequest for a scanner id
*
* @return a scan request
*/
public static ScanRequest buildScanRequest(long scannerId, int numberOfRows, boolean closeScanner, long nextCallSeq, boolean trackMetrics, boolean renew, int limitOfRows) {
ScanRequest.Builder builder = ScanRequest.newBuilder();
builder.setNumberOfRows(numberOfRows);
builder.setCloseScanner(closeScanner);
builder.setScannerId(scannerId);
builder.setNextCallSeq(nextCallSeq);
builder.setClientHandlesPartials(true);
builder.setClientHandlesHeartbeats(true);
builder.setTrackScanMetrics(trackMetrics);
builder.setRenew(renew);if (limitOfRows > 0) {
builder.setLimitOfRows(limitOfRows);
}
return builder.build();
} | 3.26 |
hbase_RequestConverter_buildGetRequest_rdh | // Start utilities for Client
/**
* Create a protocol buffer GetRequest for a client Get
*
* @param regionName
* the name of the region to get
* @param get
* the client Get
* @return a protocol buffer GetRequest
*/
public static GetRequest buildGetRequest(final byte[] regionName, final Get get) throws IOException {
GetRequest.Builder builder = GetRequest.newBuilder();
RegionSpecifier region = buildRegionSpecifier(RegionSpecifierType.REGION_NAME, regionName);
builder.setRegion(region);
builder.setGet(ProtobufUtil.toGet(get));
return builder.build();
} | 3.26 |
hbase_MemorySizeUtil_getGlobalMemStoreSize_rdh | /**
* Returns Pair of global memstore size and memory type(ie. on heap or off heap).
*/
public static Pair<Long, MemoryType> getGlobalMemStoreSize(Configuration conf) {
long offheapMSGlobal = conf.getLong(OFFHEAP_MEMSTORE_SIZE_KEY, 0);// Size in MBs
if (offheapMSGlobal > 0) {
// Off heap memstore size has not relevance when MSLAB is turned OFF. We will go with making
// this entire size split into Chunks and pooling them in MemstoreLABPoool. We dont want to
// create so many on demand off heap chunks. In fact when this off heap size is configured, we
// will go with 100% of this size as the pool size
if (MemStoreLAB.isEnabled(conf)) {
// We are in offheap Memstore use
long globalMemStoreLimit = ((long) ((offheapMSGlobal * 1024) * 1024));// Size in bytes
return new Pair<>(globalMemStoreLimit, MemoryType.NON_HEAP);
} else {
// Off heap max memstore size is configured with turning off MSLAB. It makes no sense. Do a
// warn log and go with on heap memstore percentage. By default it will be 40% of Xmx
LOG.warn((((((("There is no relevance of configuring '" + OFFHEAP_MEMSTORE_SIZE_KEY) + "' when '")
+ MemStoreLAB.USEMSLAB_KEY) + "' is turned off.") + " Going with on heap global memstore size ('") + MEMSTORE_SIZE_KEY) + "')");
}
}return new Pair<>(getOnheapGlobalMemStoreSize(conf), MemoryType.HEAP);
} | 3.26 |
hbase_MemorySizeUtil_getOnheapGlobalMemStoreSize_rdh | /**
* Returns the onheap global memstore limit based on the config
* 'hbase.regionserver.global.memstore.size'.
*
* @return the onheap global memstore limt
*/
public static long getOnheapGlobalMemStoreSize(Configuration conf) {
long max = -1L;
final MemoryUsage usage = safeGetHeapMemoryUsage();
if (usage != null) {
max = usage.getMax();
}
float globalMemStorePercent = getGlobalMemStoreHeapPercent(conf, true);
return ((long) (max * globalMemStorePercent));
} | 3.26 |
hbase_MemorySizeUtil_getBlockCacheHeapPercent_rdh | /**
* Retrieve configured size for on heap block cache as percentage of total heap.
*/
public static float getBlockCacheHeapPercent(final Configuration conf) {
// L1 block cache is always on heap
float l1CachePercent = conf.getFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, HConstants.HFILE_BLOCK_CACHE_SIZE_DEFAULT);
return l1CachePercent;
} | 3.26 |
hbase_MemorySizeUtil_getGlobalMemStoreHeapLowerMark_rdh | /**
* Retrieve configured size for global memstore lower water mark as fraction of global memstore
* size.
*/
public static float getGlobalMemStoreHeapLowerMark(final Configuration conf, boolean honorOldConfig) {
String lowMarkPercentStr = conf.get(MEMSTORE_SIZE_LOWER_LIMIT_KEY);
if (lowMarkPercentStr != null) {
float lowMarkPercent = Float.parseFloat(lowMarkPercentStr);
if (lowMarkPercent > 1.0F) {
LOG.error(((("Bad configuration value for " + MEMSTORE_SIZE_LOWER_LIMIT_KEY) + ": ") + lowMarkPercent) + ". Using 1.0f instead.");
lowMarkPercent = 1.0F;
}
return lowMarkPercent;
}
if (!honorOldConfig)
return DEFAULT_MEMSTORE_SIZE_LOWER_LIMIT;
String lowerWaterMarkOldValStr = conf.get(MEMSTORE_SIZE_LOWER_LIMIT_OLD_KEY);
if (lowerWaterMarkOldValStr != null) {
LOG.warn((MEMSTORE_SIZE_LOWER_LIMIT_OLD_KEY + " is deprecated. Instead use ") + MEMSTORE_SIZE_LOWER_LIMIT_KEY);
float lowerWaterMarkOldVal = Float.parseFloat(lowerWaterMarkOldValStr);
float v10 = getGlobalMemStoreHeapPercent(conf, false);
if (lowerWaterMarkOldVal > v10) {
lowerWaterMarkOldVal = v10;
LOG.error(((((((((((("Value of " + MEMSTORE_SIZE_LOWER_LIMIT_OLD_KEY) + " (") + lowerWaterMarkOldVal) + ") is greater than global memstore limit (") + v10) + ") set by ") + MEMSTORE_SIZE_KEY) + "/") + MEMSTORE_SIZE_OLD_KEY) +
". Setting memstore lower limit ") + "to ") +
v10);
}
return lowerWaterMarkOldVal / v10;
}
return DEFAULT_MEMSTORE_SIZE_LOWER_LIMIT;
} | 3.26 |
hbase_MemorySizeUtil_checkForClusterFreeHeapMemoryLimit_rdh | /**
* Checks whether we have enough heap memory left out after portion for Memstore and Block cache.
* We need atleast 20% of heap left out for other RS functions.
*/
public static void checkForClusterFreeHeapMemoryLimit(Configuration conf) {
if (conf.get(MEMSTORE_SIZE_OLD_KEY) != null) {
LOG.warn((MEMSTORE_SIZE_OLD_KEY + " is deprecated by ") + MEMSTORE_SIZE_KEY);
}
float globalMemstoreSize = getGlobalMemStoreHeapPercent(conf, false);
int gml = ((int) (globalMemstoreSize * CONVERT_TO_PERCENTAGE));
float blockCacheUpperLimit = getBlockCacheHeapPercent(conf);
int bcul = ((int) (blockCacheUpperLimit * CONVERT_TO_PERCENTAGE));
if ((CONVERT_TO_PERCENTAGE - (gml + bcul)) < ((int) (CONVERT_TO_PERCENTAGE * HConstants.HBASE_CLUSTER_MINIMUM_MEMORY_THRESHOLD))) {
throw new RuntimeException(((((((("Current heap configuration for MemStore and BlockCache exceeds " +
"the threshold required for successful cluster operation. ") + "The combined value cannot exceed 0.8. Please check ") + "the settings for hbase.regionserver.global.memstore.size and ") + "hfile.block.cache.size in your configuration. ") + "hbase.regionserver.global.memstore.size is ") + globalMemstoreSize) + " hfile.block.cache.size is ") + blockCacheUpperLimit);
}
} | 3.26 |
hbase_HbckTableInfo_getTableDescriptor_rdh | /**
* Returns descriptor common to all regions. null if are none or multiple!
*/
TableDescriptor getTableDescriptor() {
if (htds.size() == 1) {
return ((TableDescriptor) (htds.toArray()[0]));
} else {
LOG.error((("None/Multiple table descriptors found for table '" + tableName) + "' regions: ") + htds);
}
return null; } | 3.26 |
hbase_HbckTableInfo_sidelineBigOverlaps_rdh | /**
* Sideline some regions in a big overlap group so that it will have fewer regions, and it is
* easier to merge them later on.
*
* @param bigOverlap
* the overlapped group with regions more than maxMerge
*/
void sidelineBigOverlaps(Collection<HbckRegionInfo> bigOverlap) throws IOException {
int overlapsToSideline
= bigOverlap.size() - hbck.getMaxMerge();
if (overlapsToSideline > hbck.getMaxOverlapsToSideline()) {
overlapsToSideline = hbck.getMaxOverlapsToSideline();
}
List<HbckRegionInfo> regionsToSideline
= RegionSplitCalculator.findBigRanges(bigOverlap, overlapsToSideline);
FileSystem fs = FileSystem.get(f0);
for (HbckRegionInfo regionToSideline : regionsToSideline) {
try {
LOG.info("Closing region: " + regionToSideline);
hbck.closeRegion(regionToSideline);
} catch (IOException ioe) {
LOG.warn(("Was unable to close region " + regionToSideline) + ". Just continuing... ", ioe);
} catch (InterruptedException e) {
LOG.warn(("Was unable to close region " + regionToSideline) + ". Just continuing... ", e);}
try
{
LOG.info("Offlining region: " + regionToSideline);
hbck.offline(regionToSideline.getRegionName());
} catch (IOException ioe) {
LOG.warn(("Unable to offline region from master: " + regionToSideline) + ". Just continuing... ", ioe);
}
LOG.info("Before sideline big overlapped region: " + regionToSideline.toString());
Path sidelineRegionDir = hbck.sidelineRegionDir(fs,
TO_BE_LOADED, regionToSideline);
if (sidelineRegionDir != null) {
sidelinedRegions.put(sidelineRegionDir, regionToSideline);
LOG.info((("After sidelined big overlapped region: " + regionToSideline.getRegionNameAsString()) + " to ") + sidelineRegionDir.toString());
hbck.fixes++;
}
}
} | 3.26 |
hbase_HbckTableInfo_handleOverlapGroup_rdh | /**
* This takes set of overlapping regions and merges them into a single region. This covers cases
* like degenerate regions, shared start key, general overlaps, duplicate ranges, and partial
* overlapping regions. Cases: - Clean regions that overlap - Only .oldlogs regions (can't find
* start/stop range, or figure out) This is basically threadsafe, except for the fixer increment
* in mergeOverlaps.
*/
@Override
public void handleOverlapGroup(Collection<HbckRegionInfo> overlap) throws IOException {Preconditions.checkNotNull(overlap);
Preconditions.checkArgument(overlap.size() > 0);
if (!this.fixOverlaps) {
LOG.warn("Not attempting to repair overlaps.");
return;
}
if (overlap.size() > hbck.getMaxMerge()) {
LOG.warn((((("Overlap group has " + overlap.size()) + " overlapping ") +
"regions which is greater than ") + hbck.getMaxMerge()) + ", the max number of regions to merge");
if (hbck.shouldSidelineBigOverlaps()) {
// we only sideline big overlapped groups that exceeds the max number of regions to merge
sidelineBigOverlaps(overlap);
}
return;
}
if (hbck.shouldRemoveParents()) {
removeParentsAndFixSplits(overlap);
}
mergeOverlaps(overlap);
} | 3.26 |
hbase_HbckTableInfo_handleHoleInRegionChain_rdh | /**
* There is a hole in the hdfs regions that violates the table integrity rules. Create a new
* empty region that patches the hole.
*/
@Override
public void handleHoleInRegionChain(byte[] holeStartKey, byte[] holeStopKey) throws IOException {
errors.reportError(ERROR_CODE.HOLE_IN_REGION_CHAIN, (((("There is a hole in the region chain between "
+ Bytes.toStringBinary(holeStartKey)) + " and ") + Bytes.toStringBinary(holeStopKey)) + ". Creating a new regioninfo and region ") + "dir in hdfs to plug the hole.");
TableDescriptor htd = getTableInfo().getTableDescriptor();
RegionInfo newRegion = RegionInfoBuilder.newBuilder(htd.getTableName()).setStartKey(holeStartKey).setEndKey(holeStopKey).build();
HRegion region = HBaseFsckRepair.createHDFSRegionDir(f0, newRegion, htd);
LOG.info((("Plugged hole by creating new empty region: " +
newRegion) + " ") + region);hbck.fixes++;
} | 3.26 |
hbase_HbckTableInfo_handleRegionStartKeyNotEmpty_rdh | /**
* This is a special case hole -- when the first region of a table is missing from META, HBase
* doesn't acknowledge the existance of the table.
*/@Override
public void handleRegionStartKeyNotEmpty(HbckRegionInfo next) throws IOException {
errors.reportError(ERROR_CODE.FIRST_REGION_STARTKEY_NOT_EMPTY, "First region should start with an empty key. Creating a new " + "region and regioninfo in HDFS to plug the hole.", getTableInfo(), next);
TableDescriptor htd = getTableInfo().getTableDescriptor();
// from special EMPTY_START_ROW to next region's startKey
RegionInfo newRegion = RegionInfoBuilder.newBuilder(htd.getTableName()).setStartKey(HConstants.EMPTY_START_ROW).setEndKey(next.getStartKey()).build();
// TODO test
HRegion region = HBaseFsckRepair.createHDFSRegionDir(f0, newRegion, htd);
LOG.info((("Table region start key was not empty. Created new empty region: " + newRegion) + " ") + region);
hbck.fixes++;
} | 3.26 |
hbase_HbckTableInfo_dump_rdh | /**
* This dumps data in a visually reasonable way for visual debugging
*/
private void dump(SortedSet<byte[]> splits, Multimap<byte[], HbckRegionInfo> regions) {
// we display this way because the last end key should be displayed as well.
StringBuilder sb = new StringBuilder();
for (byte[] k : splits) {
sb.setLength(0);// clear out existing buffer, if any.
sb.append(Bytes.toStringBinary(k) + ":\t");
for (HbckRegionInfo r : regions.get(k)) {
sb.append(((("[ " + r.toString()) + ", ") + Bytes.toStringBinary(r.getEndKey())) + "]\t");
}
hbck.getErrors().print(sb.toString());
}
} | 3.26 |
hbase_HbckTableInfo_checkRegionChain_rdh | /**
* Check the region chain (from META) of this table. We are looking for holes, overlaps, and
* cycles.
*
* @return false if there are errors
*/
public boolean checkRegionChain(TableIntegrityErrorHandler handler) throws IOException {
// When table is disabled no need to check for the region chain. Some of the regions
// accidently if deployed, this below code might report some issues like missing start
// or end regions or region hole in chain and may try to fix which is unwanted.
if (hbck.isTableDisabled(this.tableName)) {
return true;
}
int originalErrorsCount = hbck.getErrors().getErrorList().size();
Multimap<byte[], HbckRegionInfo> regions = sc.calcCoverage();
SortedSet<byte[]>
splits = sc.getSplits();
byte[] prevKey = null;
byte[] problemKey = null;
if (splits.isEmpty()) {
// no region for this table
handler.handleHoleInRegionChain(HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
}
for (byte[] key : splits) {
Collection<HbckRegionInfo> ranges = regions.get(key);
if ((prevKey == null) && (!Bytes.equals(key, HConstants.EMPTY_BYTE_ARRAY))) {
for (HbckRegionInfo rng : ranges) {
handler.handleRegionStartKeyNotEmpty(rng);
}
}
// check for degenerate ranges
for (HbckRegionInfo rng : ranges) {
// special endkey case converts '' to null
byte[] endKey = rng.getEndKey();
endKey = (endKey.length == 0) ? null :
endKey;
if (Bytes.equals(rng.getStartKey(),
endKey)) {
handler.handleDegenerateRegion(rng);
}
}
if (ranges.size() == 1) {// this split key is ok -- no overlap, not a hole.
if (problemKey != null) {
LOG.warn("reached end of problem group: " + Bytes.toStringBinary(key));
}
problemKey = null;// fell through, no more problem.
} else if (ranges.size() > 1) {
// set the new problem key group name, if already have problem key, just
// keep using it.
if (problemKey == null) {
// only for overlap regions.
LOG.warn("Naming new problem group: " + Bytes.toStringBinary(key));
problemKey = key;
}
overlapGroups.putAll(problemKey,
ranges);
// record errors
ArrayList<HbckRegionInfo> subRange = new ArrayList<>(ranges);
// this dumb and n^2 but this shouldn't happen often
for (HbckRegionInfo r1 : ranges) {
if (r1.getReplicaId() != RegionInfo.DEFAULT_REPLICA_ID) {
continue;
}
subRange.remove(r1);
for (HbckRegionInfo r2 : subRange) {
if (r2.getReplicaId() != RegionInfo.DEFAULT_REPLICA_ID) {
continue;
}
// general case of same start key
if (Bytes.compareTo(r1.getStartKey(),
r2.getStartKey()) == 0) {
handler.handleDuplicateStartKeys(r1, r2);
} else if ((Bytes.compareTo(r1.getEndKey(), r2.getStartKey()) == 0) && (r1.getHdfsHRI().getRegionId() == r2.getHdfsHRI().getRegionId())) {
LOG.info("this is a split, log to splits");
handler.handleSplit(r1, r2);
} else {
// overlap
handler.handleOverlapInRegionChain(r1, r2);
}
}}
} else if (ranges.isEmpty()) {
if (problemKey != null) {
LOG.warn("reached end of problem group: " + Bytes.toStringBinary(key));
}
problemKey = null;
byte[] holeStopKey = sc.getSplits().higher(key);// if higher key is null we reached the top.
if (holeStopKey != null) {
// hole
handler.handleHoleInRegionChain(key, holeStopKey);
}
}
prevKey = key;
}
// When the last region of a table is proper and having an empty end key, 'prevKey'
// will be null.
if (prevKey != null) {
handler.handleRegionEndKeyNotEmpty(prevKey);
}
// TODO fold this into the TableIntegrityHandler
if (hbck.getConf().getBoolean("hbasefsck.overlap.merge.parallel",
true)) {boolean ok = handleOverlapsParallel(handler, prevKey);
if (!ok)
{
return false;}} else {
for (Collection<HbckRegionInfo> overlap : overlapGroups.asMap().values()) {
handler.handleOverlapGroup(overlap);
}
}
if (HBaseFsck.shouldDisplayFullReport()) {
// do full region split map dump
hbck.getErrors().print(("---- Table '" + this.tableName) + "': region split map");
dump(splits, regions);
hbck.getErrors().print(("---- Table '" + this.tableName) + "': overlap groups");
dumpOverlapProblems(overlapGroups);
hbck.getErrors().print(((("There are " + overlapGroups.keySet().size()) + " overlap groups with ") + overlapGroups.size()) + " overlapping regions");
}
if (!sidelinedRegions.isEmpty()) {
LOG.warn("Sidelined big overlapped regions, please bulk load them!");
hbck.getErrors().print(("---- Table '" + this.tableName) + "': sidelined big overlapped regions");
m1(sidelinedRegions);
}
return hbck.getErrors().getErrorList().size() == originalErrorsCount;
} | 3.26 |
hbase_PrettyPrinter_humanReadableSizeToBytes_rdh | /**
* Convert a human readable size to bytes. Examples of the human readable size are: 50 GB 20 MB 1
* KB , 25000 B etc. The units of size specified can be in uppercase as well as lowercase. Also,
* if a single number is specified without any time unit, it is assumed to be in bytes.
*
* @param humanReadableSize
* human readable size
* @return value in bytes
*/
private static long humanReadableSizeToBytes(final String humanReadableSize) throws HBaseException {
if
(humanReadableSize == null) {
return -1;
}
try {
return Long.parseLong(humanReadableSize);
} catch (NumberFormatException ex) {
LOG.debug("Given size value is not a number, parsing for human readable format");
}
String tb = null;
String gb = null;
String mb = null;
String kb = null;
String b = null;
String expectedSize = null;
long size = 0;
Matcher matcher = PrettyPrinter.SIZE_PATTERN.matcher(humanReadableSize);
if (matcher.matches()) {
expectedSize = matcher.group(2);
tb = matcher.group(4);
gb = matcher.group(6);
mb
=
matcher.group(8);
kb = matcher.group(10);
b = matcher.group(12);
}
size += (tb != null)
? Long.parseLong(tb) * HConstants.TB_IN_BYTES : 0;
size += (gb != null) ? Long.parseLong(gb) * HConstants.GB_IN_BYTES : 0;
size += (mb != null) ? Long.parseLong(mb) * HConstants.MB_IN_BYTES : 0;
size += (kb != null) ? Long.parseLong(kb) * HConstants.KB_IN_BYTES : 0;
size += (b != null) ? Long.parseLong(b) : 0;
if ((expectedSize
!= null) && (Long.parseLong(expectedSize) != size)) {
throw new HBaseException("Malformed size string: values in byte and human readable" + "format do not match");
}
return size;
} | 3.26 |
hbase_PrettyPrinter_humanReadableByte_rdh | /**
* Convert a long size to a human readable string. Example: 10763632640 -> 10763632640 B (10GB
* 25MB)
*
* @param size
* the size in bytes
* @return human readable string
*/
private static String humanReadableByte(final long size) {
StringBuilder sb = new StringBuilder();
long tb;
long gb;
long mb;
long kb;
long b;
if (size < HConstants.KB_IN_BYTES) {
sb.append(size);
sb.append(" B");
return sb.toString();
}
tb = size / HConstants.TB_IN_BYTES;
gb = (size - (HConstants.TB_IN_BYTES * tb)) / HConstants.GB_IN_BYTES;
mb = ((size - (HConstants.TB_IN_BYTES * tb)) - (HConstants.GB_IN_BYTES * gb)) / HConstants.MB_IN_BYTES;
kb = (((size -
(HConstants.TB_IN_BYTES * tb)) -
(HConstants.GB_IN_BYTES * gb)) - (HConstants.MB_IN_BYTES * mb)) / HConstants.KB_IN_BYTES;
b = (((size - (HConstants.TB_IN_BYTES * tb)) - (HConstants.GB_IN_BYTES * gb)) - (HConstants.MB_IN_BYTES * mb)) - (HConstants.KB_IN_BYTES * kb);
sb.append(size).append(" B (");
if (tb > 0) {
sb.append(tb);
sb.append("TB");
}
if (gb > 0) {
sb.append(tb > 0 ? " " : "");
sb.append(gb);
sb.append("GB");
}
if (mb > 0) {
sb.append((tb + gb) > 0 ? " " : "");
sb.append(mb);
sb.append("MB");
}
if (kb >
0) {
sb.append(((tb + gb) + mb) > 0 ? " " : "");
sb.append(kb);
sb.append("KB");
}
if (b > 0) {
sb.append((((tb + gb) + mb) + kb) > 0 ? " " : "");
sb.append(b);
sb.append("B");
}
sb.append(")");
return sb.toString();
} | 3.26 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.