name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hbase_MetricsSource_incrSourceInitializing_rdh | /**
* Increment the count for initializing sources
*/
public void incrSourceInitializing() {
singleSourceSource.incrSourceInitializing();
globalSourceSource.incrSourceInitializing();
} | 3.26 |
hbase_MetricsSource_getPeerID_rdh | /**
* Get the slave peer ID
*/
public String getPeerID() {
return id;
} | 3.26 |
hbase_MetricsSource_getOpsShipped_rdh | /**
* Gets the number of OPs shipped by this source queue to target cluster.
*
* @return oPsShipped total number of OPs shipped by this source.
*/
public long getOpsShipped() {
return this.singleSourceSource.getShippedOps();
} | 3.26 |
hbase_MetricsSource_incrLogEditsRead_rdh | /**
* Increment the number of log edits read by one.
*/
public void incrLogEditsRead() {
incrLogEditsRead(1);
} | 3.26 |
hbase_MetricsSource_getTimestampOfLastShippedOp_rdh | /**
* Get the timestampsOfLastShippedOp, if there are multiple groups, return the latest one
*/
public long getTimestampOfLastShippedOp()
{
long lastTimestamp = 0L;
for (long ts : lastShippedTimeStamps.values()) {
if (ts > lastTimestamp) {
lastTimestamp = ts;
}
}
return lastTimestamp;
} | 3.26 |
hbase_MetricsSource_getSourceInitializing_rdh | /**
* Get the source initializing counts
*
* @return number of replication sources getting initialized
*/
public int getSourceInitializing() {
return singleSourceSource.getSourceInitializing(); } | 3.26 |
hbase_MetricsSource_incrLogEditsFiltered_rdh | /**
* The number of log edits filtered out.
*/
public void incrLogEditsFiltered() {
incrLogEditsFiltered(1);
} | 3.26 |
hbase_MetricsSource_getSizeOfLogQueue_rdh | /**
* Get the sizeOfLogQueue
*/
public int getSizeOfLogQueue() {
return singleSourceSource.getSizeOfLogQueue();
} | 3.26 |
hbase_MetricsSource_setAgeOfLastShippedOp_rdh | /**
* Set the age of the last edit that was shipped
*
* @param timestamp
* target write time of the edit
* @param walGroup
* which group we are setting
*/
public void
setAgeOfLastShippedOp(long timestamp, String walGroup) { long v0 = EnvironmentEdgeManager.currentTime() - timestamp;
singleSourceSource.setLastShippedAge(v0);
globalSourceSource.setLastShippedAge(v0);
this.f0.put(walGroup, v0);
this.lastShippedTimeStamps.put(walGroup, timestamp);
} | 3.26 |
hbase_MetricsSource_getEditsFiltered_rdh | /**
* Gets the number of edits not eligible for replication this source queue logs so far.
*
* @return logEditsFiltered non-replicable edits filtered from this queue logs.
*/
public long getEditsFiltered() {
return this.singleSourceSource.getEditsFiltered();
} | 3.26 |
hbase_MetricsSource_setTimeStampNextToReplicate_rdh | /**
* TimeStamp of next edit targeted for replication. Used for calculating lag, as if this timestamp
* is greater than timestamp of last shipped, it means there's at least one edit pending
* replication.
*
* @param timeStampNextToReplicate
* timestamp of next edit in the queue that should be replicated.
*/
public void setTimeStampNextToReplicate(long timeStampNextToReplicate) {
this.timeStampNextToReplicate = timeStampNextToReplicate;
} | 3.26 |
hbase_MetricsSource_clear_rdh | /**
* Removes all metrics about this Source.
*/
public void clear() {m0();
singleSourceSource.clear();
} | 3.26 |
hbase_MetricsSource_decrSourceInitializing_rdh | /**
* Decrement the count for initializing sources
*/
public void decrSourceInitializing() {
singleSourceSource.decrSourceInitializing();
globalSourceSource.decrSourceInitializing();
} | 3.26 |
hbase_MetricsSource_incrLogReadInBytes_rdh | /**
* increase the byte number read by source from log file
*/
public void incrLogReadInBytes(long readInBytes) {
singleSourceSource.incrLogReadInBytes(readInBytes);
globalSourceSource.incrLogReadInBytes(readInBytes);
} | 3.26 |
hbase_MetricsSource_getReplicableEdits_rdh | /**
* Gets the number of edits eligible for replication read from this source queue logs so far.
*
* @return replicableEdits total number of replicable edits read from this queue logs.
*/
public long getReplicableEdits()
{
return this.singleSourceSource.getWALEditsRead() - this.singleSourceSource.getEditsFiltered();
} | 3.26 |
hbase_MetricsSource_getTimeStampNextToReplicate_rdh | /**
* TimeStamp of next edit to be replicated.
*
* @return timeStampNextToReplicate - TimeStamp of next edit to be replicated.
*/
public long getTimeStampNextToReplicate() {
return timeStampNextToReplicate;
} | 3.26 |
hbase_ServerRpcConnection_sendConnectionHeaderResponseIfNeeded_rdh | /**
* Send the response for connection header
*/
private void sendConnectionHeaderResponseIfNeeded() throws FatalConnectionException {
Pair<RPCProtos.ConnectionHeaderResponse, CryptoAES> pair = setupCryptoCipher();// Response the connection header if Crypto AES is enabled
if (pair == null) {
return;
}
try {
int size = pair.getFirst().getSerializedSize();
BufferChain bc;
try (ByteBufferOutputStream bbOut = new ByteBufferOutputStream(4 + size);DataOutputStream out = new DataOutputStream(bbOut)) {
out.writeInt(size);
pair.getFirst().writeTo(out);
bc = new BufferChain(bbOut.getByteBuffer());
}
doRespond(new RpcResponse() {
@Override
public BufferChain getResponse() {
return bc;
}
@Override
public void done() {
// must switch after sending the connection header response, as the client still uses the
// original SaslClient to unwrap the data we send back
saslServer.switchToCryptoAES(pair.getSecond());
}
});
} catch (IOException ex) {
throw new UnsupportedCryptoException(ex.getMessage(), ex);
}
} | 3.26 |
hbase_ServerRpcConnection_setupCryptoCipher_rdh | /**
* Set up cipher for rpc encryption with Apache Commons Crypto.
*/
private Pair<RPCProtos.ConnectionHeaderResponse, CryptoAES> setupCryptoCipher() throws FatalConnectionException {
// If simple auth, return
if
(saslServer == null) {
return null;
}
// check if rpc encryption with Crypto AES
String qop = saslServer.getNegotiatedQop();
boolean isEncryption = QualityOfProtection.PRIVACY.getSaslQop().equalsIgnoreCase(qop);
boolean isCryptoAesEncryption = isEncryption && this.rpcServer.conf.getBoolean("hbase.rpc.crypto.encryption.aes.enabled", false);
if (!isCryptoAesEncryption) {
return null;
}
if (!connectionHeader.hasRpcCryptoCipherTransformation()) {return null;
}
String transformation
= connectionHeader.getRpcCryptoCipherTransformation();
if ((transformation == null) || (transformation.length() == 0)) {
return null;
}
// Negotiates AES based on complete saslServer.
// The Crypto metadata need to be encrypted and send to client.
Properties properties = new Properties();
// the property for SecureRandomFactory
properties.setProperty(CryptoRandomFactory.CLASSES_KEY, this.rpcServer.conf.get("hbase.crypto.sasl.encryption.aes.crypto.random", "org.apache.commons.crypto.random.JavaCryptoRandom"));
// the property for cipher class
properties.setProperty(CryptoCipherFactory.CLASSES_KEY,
this.rpcServer.conf.get("hbase.rpc.crypto.encryption.aes.cipher.class", "org.apache.commons.crypto.cipher.JceCipher"));
int cipherKeyBits = this.rpcServer.conf.getInt("hbase.rpc.crypto.encryption.aes.cipher.keySizeBits", 128);
// generate key and iv
if ((cipherKeyBits % 8) != 0) {
throw new IllegalArgumentException("The AES cipher key size in bits" + " should be a multiple of byte");
}
int len = cipherKeyBits / 8;
byte[] inKey = new byte[len];
byte[] outKey = new
byte[len];
byte[] inIv = new byte[len];
byte[] outIv = new byte[len];
CryptoAES cryptoAES;
try {
// generate the cipher meta data with SecureRandom
CryptoRandom secureRandom
= CryptoRandomFactory.getCryptoRandom(properties);
secureRandom.nextBytes(inKey);
secureRandom.nextBytes(outKey);
secureRandom.nextBytes(inIv);
secureRandom.nextBytes(outIv);
// create CryptoAES for server
cryptoAES = new CryptoAES(transformation, properties, inKey, outKey, inIv, outIv);
} catch (GeneralSecurityException | IOException ex) {
throw new UnsupportedCryptoException(ex.getMessage(), ex);
}
// create SaslCipherMeta and send to client,
// for client, the [inKey, outKey], [inIv, outIv] should be reversed
RPCProtos.CryptoCipherMeta.Builder ccmBuilder = RPCProtos.CryptoCipherMeta.newBuilder();
ccmBuilder.setTransformation(transformation);
ccmBuilder.setInIv(getByteString(outIv));
ccmBuilder.setInKey(getByteString(outKey));
ccmBuilder.setOutIv(getByteString(inIv));
ccmBuilder.setOutKey(getByteString(inKey));RPCProtos.ConnectionHeaderResponse resp = RPCProtos.ConnectionHeaderResponse.newBuilder().setCryptoCipherMeta(ccmBuilder).build();return Pair.newPair(resp, cryptoAES);
} | 3.26 |
hbase_ServerRpcConnection_doRawSaslReply_rdh | /**
* No protobuf encoding of raw sasl messages
*/
protected final void doRawSaslReply(SaslStatus status, Writable rv, String errorClass, String error) throws IOException {
BufferChain bc;
// In my testing, have noticed that sasl messages are usually
// in the ballpark of 100-200. That's why the initial capacity is 256.
try (ByteBufferOutputStream saslResponse = new ByteBufferOutputStream(256);DataOutputStream out = new DataOutputStream(saslResponse)) {
out.writeInt(status.state);// write status
if (status == SaslStatus.SUCCESS) {
rv.write(out);} else {
WritableUtils.writeString(out, errorClass);
WritableUtils.writeString(out, error);
}
bc = new BufferChain(saslResponse.getByteBuffer());
}
doRespond(() -> bc);
} | 3.26 |
hbase_ServerRpcConnection_processRequest_rdh | /**
* Has the request header and the request param and optionally encoded data buffer all in this one
* array.
* <p/>
* Will be overridden in tests.
*/
protected void processRequest(ByteBuff buf) throws IOException, InterruptedException {
long totalRequestSize = buf.limit();
int offset = 0;
// Here we read in the header. We avoid having pb
// do its default 4k allocation for CodedInputStream. We force it to use
// backing array.
CodedInputStream cis
= createCis(buf);
int headerSize = cis.readRawVarint32();
offset
= cis.getTotalBytesRead();
Message.Builder builder = RequestHeader.newBuilder();
ProtobufUtil.mergeFrom(builder, cis, headerSize);
RequestHeader header = ((RequestHeader) (builder.build()));
offset += headerSize;
Context traceCtx = GlobalOpenTelemetry.getPropagators().getTextMapPropagator().extract(Context.current(), header.getTraceInfo(), getter);
// n.b. Management of this Span instance is a little odd. Most exit paths from this try scope
// are early-exits due to error cases. There's only one success path, the asynchronous call to
// RpcScheduler#dispatch. The success path assumes ownership of the span, which is represented
// by null-ing out the reference in this scope. All other paths end the span. Thus, and in
// order to avoid accidentally orphaning the span, the call to Span#end happens in a finally
// block iff the span is non-null.
Span v43 = TraceUtil.createRemoteSpan("RpcServer.process", traceCtx);
try (Scope ignored = v43.makeCurrent()) {
int id = header.getCallId();
// HBASE-28128 - if server is aborting, don't bother trying to process. It will
// fail at the handler layer, but worse might result in CallQueueTooBigException if the
// queue is full but server is not properly processing requests. Better to throw an aborted
// exception here so that the client can properly react.
if
((rpcServer.server != null) && rpcServer.server.isAborted()) {
RegionServerAbortedException serverIsAborted = new RegionServerAbortedException(("Server " + rpcServer.server.getServerName()) + " aborting");
this.rpcServer.metrics.exception(serverIsAborted);
sendErrorResponseForCall(id, totalRequestSize, v43, serverIsAborted.getMessage(), serverIsAborted);return;
}
if (RpcServer.LOG.isTraceEnabled()) {
RpcServer.LOG.trace(((("RequestHeader " + TextFormat.shortDebugString(header))
+ " totalRequestSize: ") + totalRequestSize) + " bytes");
}
// Enforcing the call queue size, this triggers a retry in the client
// This is a bit late to be doing this check - we have already read in the
// total request.
if ((totalRequestSize + this.rpcServer.callQueueSizeInBytes.sum()) > this.rpcServer.maxQueueSizeInBytes) {
this.rpcServer.metrics.exception(RpcServer.CALL_QUEUE_TOO_BIG_EXCEPTION);
sendErrorResponseForCall(id, totalRequestSize, v43, ("Call queue is full on " +
this.rpcServer.server.getServerName()) + ", is hbase.ipc.server.max.callqueue.size too small?", RpcServer.CALL_QUEUE_TOO_BIG_EXCEPTION);
return;
}
MethodDescriptor md = null;
Message param =
null;
CellScanner cellScanner = null;
try {
if (header.hasRequestParam() && header.getRequestParam()) {
md = this.service.getDescriptorForType().findMethodByName(header.getMethodName());
if (md == null) {
throw new UnsupportedOperationException(header.getMethodName());
}
builder = this.service.getRequestPrototype(md).newBuilderForType();
cis.resetSizeCounter();
int paramSize = cis.readRawVarint32();
offset += cis.getTotalBytesRead();
if (builder != null) {
ProtobufUtil.mergeFrom(builder, cis, paramSize);
param = builder.build();
}
offset += paramSize;
} else {
// currently header must have request param, so we directly throw
// exception here
String msg = ("Invalid request header: " + TextFormat.shortDebugString(header)) + ", should have param set in it";
RpcServer.LOG.warn(msg);
throw new DoNotRetryIOException(msg);
}
if (header.hasCellBlockMeta()) {
buf.position(offset);
ByteBuff dup = buf.duplicate();
dup.limit(offset + header.getCellBlockMeta().getLength());
cellScanner = this.rpcServer.cellBlockBuilder.createCellScannerReusingBuffers(this.codec, this.compressionCodec, dup);
}
} catch (Throwable thrown) {
InetSocketAddress address = this.rpcServer.getListenerAddress();
String msg = ((address != null ? address : "(channel closed)") + " is unable to read call parameter from client ") + getHostAddress();
RpcServer.LOG.warn(msg, thrown);
this.rpcServer.metrics.exception(thrown);
final Throwable responseThrowable;
if (thrown instanceof LinkageError) {
// probably the hbase hadoop version does not match the running hadoop version
responseThrowable = new DoNotRetryIOException(thrown);
} else if (thrown instanceof UnsupportedOperationException) {// If the method is not present on the server, do not retry.
responseThrowable = new DoNotRetryIOException(thrown);
} else {
responseThrowable = thrown;
}
sendErrorResponseForCall(id, totalRequestSize, v43, (msg + "; ") + responseThrowable.getMessage(), responseThrowable);return;
}
int timeout = 0;
if (header.hasTimeout() && (header.getTimeout() > 0)) {
timeout = Math.max(this.rpcServer.minClientRequestTimeout, header.getTimeout());
}
ServerCall<?> call = createCall(id, this.service, md, header, param, cellScanner, totalRequestSize, this.addr, timeout, this.callCleanup);
if (this.rpcServer.scheduler.dispatch(new CallRunner(this.rpcServer, call))) {
// unset span do that it's not closed in the finally block
v43 = null;
} else {this.rpcServer.callQueueSizeInBytes.add((-1) * call.getSize());
this.rpcServer.metrics.exception(RpcServer.CALL_QUEUE_TOO_BIG_EXCEPTION);
call.setResponse(null, null, RpcServer.CALL_QUEUE_TOO_BIG_EXCEPTION, ("Call queue is full on " +
this.rpcServer.server.getServerName()) + ", too many items queued ?");
TraceUtil.setError(v43, RpcServer.CALL_QUEUE_TOO_BIG_EXCEPTION);
call.sendResponseIfReady();
}
} finally {
if (v43 != null) {
v43.end();
}
}
} | 3.26 |
hbase_ServerRpcConnection_setupCellBlockCodecs_rdh | /**
* Set up cell block codecs
*/
private void setupCellBlockCodecs() throws FatalConnectionException {
// TODO: Plug in other supported decoders.
if (!connectionHeader.hasCellBlockCodecClass()) {
return;
}
String className = connectionHeader.getCellBlockCodecClass();
if ((className == null) || (className.length()
== 0)) {
return;
}
try {
this.codec = ((Codec) (Class.forName(className).getDeclaredConstructor().newInstance()));
} catch (Exception e) {
throw new UnsupportedCellCodecException(className, e);
}
if (!connectionHeader.hasCellBlockCompressorClass()) {
return;
}
className = connectionHeader.getCellBlockCompressorClass();
try {
this.compressionCodec = ((CompressionCodec) (Class.forName(className).getDeclaredConstructor().newInstance()));
} catch (Exception e) {
throw new UnsupportedCompressionCodecException(className, e);
}
} | 3.26 |
hbase_TableStateManager_setTableState_rdh | /**
* Set table state to provided. Caller should lock table on write.
*
* @param tableName
* table to change state for
* @param newState
* new state
*/
public void setTableState(TableName tableName, TableState.State newState) throws IOException {
ReadWriteLock lock = tnLock.getLock(tableName);
lock.writeLock().lock();
try {
updateMetaState(tableName, newState);
} finally {
lock.writeLock().unlock();
}
} | 3.26 |
hbase_EventHandler_getPriority_rdh | /**
* Get the priority level for this handler instance. This uses natural ordering so lower numbers
* are higher priority.
* <p>
* Lowest priority is Integer.MAX_VALUE. Highest priority is 0.
* <p>
* Subclasses should override this method to allow prioritizing handlers.
* <p>
* Handlers with the same priority are handled in FIFO order.
* <p>
*
* @return Integer.MAX_VALUE by default, override to set higher priorities
*/
public int getPriority() {
return Integer.MAX_VALUE;
} | 3.26 |
hbase_EventHandler_handleException_rdh | /**
* Event exception handler, may be overridden
*
* @param t
* Throwable object
*/
protected void handleException(Throwable t) {
String msg = "Caught throwable while processing event " + eventType;
LOG.error(msg, t);
if ((server != null) && ((t instanceof Error) || (t instanceof RuntimeException))) {
server.abort(msg, t);
}
} | 3.26 |
hbase_EventHandler_getSeqid_rdh | /**
* Returns This events' sequence id.
*/
public long getSeqid() {
return this.seqid;
} | 3.26 |
hbase_EventHandler_compareTo_rdh | /**
* Default prioritized runnable comparator which implements a FIFO ordering.
* <p>
* Subclasses should not override this. Instead, if they want to implement priority beyond FIFO,
* they should override {@link #getPriority()}.
*/
@Override
public int compareTo(EventHandler o) {
if (o == null) {
return 1;
}
if (getPriority() != o.getPriority()) {
return getPriority() < o.getPriority() ? -1 : 1;
}
return this.seqid < o.seqid ? -1 : 1;
} | 3.26 |
hbase_EventHandler_prepare_rdh | /**
* Event handlers should do all the necessary checks in this method (rather than in the
* constructor, or in process()) so that the caller, which is mostly executed in the ipc context
* can fail fast. Process is executed async from the client ipc, so this method gives a quick
* chance to do some basic checks. Should be called after constructing the EventHandler, and
* before process().
*
* @return the instance of this class
* @throws Exception
* when something goes wrong
*/
public EventHandler prepare() throws Exception {
return this;
} | 3.26 |
hbase_EventHandler_getInformativeName_rdh | /**
* Event implementations should override thie class to provide an informative name about what
* event they are handling. For example, event-specific information such as which region or server
* is being processed should be included if possible.
*/
public String getInformativeName() {
return this.getClass().toString();
} | 3.26 |
hbase_TableNamespaceManager_doesNamespaceExist_rdh | /**
* check whether a namespace has already existed.
*/
public boolean doesNamespaceExist(String namespaceName) throws IOException {
return cache.containsKey(namespaceName);
} | 3.26 |
hbase_CellCodecWithTags_readByteArray_rdh | /**
* Returns Byte array read from the stream. n
*/
private byte[] readByteArray(final InputStream in) throws IOException {
byte[] intArray = new byte[Bytes.SIZEOF_INT];IOUtils.readFully(in, intArray);
int length = Bytes.toInt(intArray);
byte[] bytes = new byte[length];
IOUtils.readFully(in, bytes);return bytes;
} | 3.26 |
hbase_CellCodecWithTags_write_rdh | /**
* Write int length followed by array bytes.
*/
private void write(final byte[] bytes, final int offset, final
int length) throws IOException {
this.out.write(Bytes.toBytes(length));
this.out.write(bytes, offset, length);
} | 3.26 |
hbase_MetricsSink_refreshAgeOfLastAppliedOp_rdh | /**
* Refreshing the age makes sure the value returned is the actual one and not the one set a
* replication time
*
* @return refreshed age
*/
public long refreshAgeOfLastAppliedOp() {
return setAgeOfLastAppliedOp(lastTimestampForAge);
} | 3.26 |
hbase_MetricsSink_incrementFailedBatches_rdh | /**
* Convenience method to update metrics when batch of operations has failed.
*/
public void incrementFailedBatches() {
mss.incrFailedBatches();
} | 3.26 |
hbase_MetricsSink_getAppliedOps_rdh | /**
* Gets the total number of OPs delivered to this sink.
*/
public long getAppliedOps() {
return this.mss.getSinkAppliedOps();
} | 3.26 |
hbase_MetricsSink_m0_rdh | /**
* Get the count of the failed bathes
*/
protected long m0() {
return mss.getFailedBatches();
} | 3.26 |
hbase_MetricsSink_m1_rdh | /**
* Get the TimestampOfLastAppliedOp. If no replication Op applied yet, the value is the timestamp
* at which hbase instance starts
*
* @return timeStampsOfLastAppliedOp;
*/
public long m1() {
return this.lastTimestampForAge;
} | 3.26 |
hbase_MetricsSink_getAgeOfLastAppliedOp_rdh | /**
* Get the Age of Last Applied Op
*/
public long getAgeOfLastAppliedOp() { return mss.getLastAppliedOpAge();
} | 3.26 |
hbase_MetricsSink_setAgeOfLastAppliedOp_rdh | /**
* Set the age of the last applied operation
*
* @param timestamp
* The timestamp of the last operation applied.
* @return the age that was set
*/
public long setAgeOfLastAppliedOp(long timestamp) {
long age = 0;
if (lastTimestampForAge != timestamp) {
lastTimestampForAge = timestamp;
age = EnvironmentEdgeManager.currentTime() - lastTimestampForAge;
}
mss.setLastAppliedOpAge(age);
return age;
} | 3.26 |
hbase_MetricsSink_getStartTimestamp_rdh | /**
* Gets the time stamp from when the Sink was initialized.
*/
public long
getStartTimestamp() {
return this.startTimestamp;
} | 3.26 |
hbase_MetricsSink_applyBatch_rdh | /**
* Convience method to change metrics when a batch of operations are applied.
*
* @param batchSize
* total number of mutations that are applied/replicated
* @param hfileSize
* total number of hfiles that are applied/replicated
*/
public void applyBatch(long batchSize, long hfileSize) {applyBatch(batchSize);
mss.incrAppliedHFiles(hfileSize);
} | 3.26 |
hbase_FileCleanerDelegate_postClean_rdh | /**
* Will be called after cleaner run.
*/
default void postClean() {
} | 3.26 |
hbase_FileCleanerDelegate_isEmptyDirDeletable_rdh | /**
* Check if a empty directory with no subdirs or subfiles can be deleted
*
* @param dir
* Path of the directory
* @return True if the directory can be deleted, otherwise false
*/
default boolean isEmptyDirDeletable(Path dir) {
return true;
} | 3.26 |
hbase_KeyValueCodec_getDecoder_rdh | /**
* Implementation depends on {@link InputStream#available()}
*/
@Override
public Decoder getDecoder(final InputStream is) {
return new KeyValueDecoder(is);
} | 3.26 |
hbase_RemoteProcedureDispatcher_removeNode_rdh | /**
* Remove a remote node
*
* @param key
* the node identifier
*/
public boolean removeNode(final TRemote key) {
final BufferNode node = nodeMap.remove(key);
if (node == null) {
return false;
}
node.abortOperationsInQueue();
return true;
} | 3.26 |
hbase_RemoteProcedureDispatcher_submitTask_rdh | // ============================================================================================
// Task Helpers
// ============================================================================================
protected final void submitTask(Runnable task) {
threadPool.execute(task);
} | 3.26 |
hbase_RemoteProcedureDispatcher_addOperationToNode_rdh | /**
* Add a remote rpc.
*
* @param key
* the node identifier
*/
public void addOperationToNode(final TRemote key, RemoteProcedure rp) throws NullTargetServerDispatchException, NoServerDispatchException, NoNodeDispatchException {
if (key == null) {
throw new NullTargetServerDispatchException(rp.toString());
}
BufferNode node = nodeMap.get(key);
if (node == null) {
// If null here, it means node has been removed because it crashed. This happens when server
// is expired in ServerManager. ServerCrashProcedure may or may not have run.
throw new NoServerDispatchException((key.toString() + "; ") + rp.toString());
}
node.add(rp);
// Check our node still in the map; could have been removed by #removeNode.
if (!nodeMap.containsValue(node)) {
throw new NoNodeDispatchException((key.toString() + "; ") + rp.toString());
}
} | 3.26 |
hbase_RemoteProcedureDispatcher_storeInDispatchedQueue_rdh | /**
* Whether store this remote procedure in dispatched queue only OpenRegionProcedure and
* CloseRegionProcedure return false since they are not fully controlled by dispatcher
*/
default boolean storeInDispatchedQueue() {
return true;
} | 3.26 |
hbase_RemoteProcedureDispatcher_addNode_rdh | // ============================================================================================
// Node Helpers
// ============================================================================================
/**
* Add a node that will be able to execute remote procedures
*
* @param key
* the node identifier
*/
public void addNode(final TRemote key) {
assert key != null : "Tried to add a node with a null key";
nodeMap.computeIfAbsent(key, k -> new BufferNode(k));
} | 3.26 |
hbase_FileKeyStoreLoaderBuilderProvider_getBuilderForKeyStoreFileType_rdh | /**
* Returns a {@link FileKeyStoreLoader.Builder} that can build a loader which loads keys and certs
* from files of the given {@link KeyStoreFileType}.
*
* @param type
* the file type to load keys/certs from.
* @return a new Builder.
*/
static Builder<? extends FileKeyStoreLoader> getBuilderForKeyStoreFileType(KeyStoreFileType type) {
switch (Objects.requireNonNull(type)) {
case JKS :
return new JKSFileLoader.Builder();
case PEM :return new PEMFileLoader.Builder();
case PKCS12 :
return new PKCS12FileLoader.Builder();
case BCFKS :
return
new BCFKSFileLoader.Builder();
default :
throw new AssertionError("Unexpected StoreFileType: " + type.name());}
} | 3.26 |
hbase_UnassignProcedure_remoteCallFailed_rdh | /**
* Returns If true, we will re-wake up this procedure; if false, the procedure stays suspended.
*/
@Override
protected boolean remoteCallFailed(final MasterProcedureEnv env, final RegionStateNode regionNode, final IOException exception) {
return true;
} | 3.26 |
hbase_Table_checkAndMutate_rdh | /**
* checkAndMutate that atomically checks if a row matches the specified condition. If it does, it
* performs the specified action.
*
* @param checkAndMutate
* The CheckAndMutate object.
* @return A CheckAndMutateResult object that represents the result for the CheckAndMutate.
* @throws IOException
* if a remote or network exception occurs.
*/
default CheckAndMutateResult checkAndMutate(CheckAndMutate checkAndMutate) throws IOException {
return checkAndMutate(Collections.singletonList(checkAndMutate)).get(0);} | 3.26 |
hbase_Table_getOperationTimeout_rdh | /**
* Get timeout of each operation in Table instance.
*
* @param unit
* the unit of time the timeout to be represented in
* @return operation rpc timeout in the specified time unit
*/
default long getOperationTimeout(TimeUnit unit) {
throw new NotImplementedException("Add an implementation!");
} | 3.26 |
hbase_Table_batch_rdh | /**
* Method that does a batch call on Deletes, Gets, Puts, Increments, Appends, RowMutations. The
* ordering of execution of the actions is not defined. Meaning if you do a Put and a Get in the
* same {@link #batch} call, you will not necessarily be guaranteed that the Get returns what the
* Put had put.
*
* @param actions
* list of Get, Put, Delete, Increment, Append, RowMutations.
* @param results
* Empty Object[], same size as actions. Provides access to partial results, in
* case an exception is thrown. A null in the result array means that the call for
* that action failed, even after retries. The order of the objects in the results
* array corresponds to the order of actions in the request list.
* @since 0.90.0
*/
default void batch(final List<? extends Row> actions, final Object[]
results) throws IOException, InterruptedException {
throw new NotImplementedException("Add an implementation!");
}
/**
* Same as {@link #batch(List, Object[])}, but with a callback.
*
* @since 0.96.0
* @deprecated since 3.0.0, will removed in 4.0.0. Please use the batch related methods in
{@link AsyncTable} directly if you want to use callback. We reuse the callback for
coprocessor here, and the problem is that for batch operation, the
{@link AsyncTable} | 3.26 |
hbase_Table_close_rdh | /**
* Releases any resources held or pending changes in internal buffers.
*
* @throws IOException
* if a remote or network exception occurs.
*/
@Override
default void close()
throws IOException {
throw new NotImplementedException("Add an implementation!");
}
/**
* Creates and returns a {@link org.apache.hbase.thirdparty.com.google.protobuf.RpcChannel}
* instance connected to the table region containing the specified row. The row given does not
* actually have to exist. Whichever region would contain the row based on start and end keys will
* be used. Note that the {@code row} parameter is also not passed to the coprocessor handler
* registered for this protocol, unless the {@code row} is separately passed as an argument in the
* service request. The parameter here is only used to locate the region used to handle the call.
* <p/>
* The obtained {@link org.apache.hbase.thirdparty.com.google.protobuf.RpcChannel} instance can be
* used to access a published coprocessor {@link Service} using standard protobuf service
* invocations:
* <p/>
* <div style="background-color: #cccccc; padding: 2px"> <blockquote>
*
* <pre>
* CoprocessorRpcChannel channel = myTable.coprocessorService(rowkey);
* MyService.BlockingInterface service = MyService.newBlockingStub(channel);
* MyCallRequest request = MyCallRequest.newBuilder()
* ...
* .build();
* MyCallResponse response = service.myCall(null, request);
* </pre>
*
* </blockquote> </div>
*
* @param row
* The row key used to identify the remote region location
* @return A CoprocessorRpcChannel instance
* @deprecated since 3.0.0, will removed in 4.0.0. This is too low level, please stop using it any
more. Use the coprocessorService methods in {@link AsyncTable} | 3.26 |
hbase_Table_mutateRow_rdh | /**
* Performs multiple mutations atomically on a single row. Currently {@link Put} and
* {@link Delete} are supported.
*
* @param rm
* object that specifies the set of mutations to perform atomically
* @return results of Increment/Append operations
* @throws IOException
* if a remote or network exception occurs.
*/
default Result mutateRow(final RowMutations rm) throws IOException {
throw new NotImplementedException("Add an implementation!");
} | 3.26 |
hbase_Table_exists_rdh | /**
* Test for the existence of columns in the table, as specified by the Get.
* <p>
* This will return true if the Get matches one or more keys, false if not.
* <p>
* This is a server-side call so it prevents any data from being transfered to the client.
*
* @param get
* the Get
* @return true if the specified Get matches one or more keys, false if not
* @throws IOException
* e
*/
default boolean exists(Get get) throws IOException {
return exists(Collections.singletonList(get))[0];
} | 3.26 |
hbase_Table_ifEquals_rdh | /**
* Check for equality.
*
* @param value
* the expected value
*/
default CheckAndMutateBuilder
ifEquals(byte[] value) {
return ifMatches(CompareOperator.EQUAL, value);
} | 3.26 |
hbase_Table_append_rdh | /**
* Appends values to one or more columns within a single row.
* <p>
* This operation guaranteed atomicity to readers. Appends are done under a single row lock, so
* write operations to a row are synchronized, and readers are guaranteed to see this operation
* fully completed.
*
* @param append
* object that specifies the columns and values to be appended
* @throws IOException
* e
* @return values of columns after the append operation (maybe null)
*/
default Result append(final Append append) throws IOException {
throw new NotImplementedException("Add an implementation!");
} | 3.26 |
hbase_Table_put_rdh | /**
* Batch puts the specified data into the table.
* <p>
* This can be used for group commit, or for submitting user defined batches. Before sending a
* batch of mutations to the server, the client runs a few validations on the input list. If an
* error is found, for example, a mutation was supplied but was missing it's column an
* {@link IllegalArgumentException} will be thrown and no mutations will be applied. If there are
* any failures even after retries, a {@link RetriesExhaustedWithDetailsException} will be thrown.
* RetriesExhaustedWithDetailsException contains lists of failed mutations and corresponding
* remote exceptions. The ordering of mutations and exceptions in the encapsulating exception
* corresponds to the order of the input list of Put requests.
*
* @param puts
* The list of mutations to apply.
* @throws IOException
* if a remote or network exception occurs.
* @since 0.20.0
*/
default void put(List<Put> puts) throws IOException {
throw new NotImplementedException("Add an implementation!");
} | 3.26 |
hbase_Table_getRequestAttributes_rdh | /**
* Get the attributes to be submitted with requests
*
* @return map of request attributes
*/
default Map<String, byte[]> getRequestAttributes() {
throw new NotImplementedException("Add an implementation!");
} | 3.26 |
hbase_Table_getRpcTimeout_rdh | /**
* Get timeout of each rpc request in this Table instance. It will be overridden by a more
* specific rpc timeout config such as readRpcTimeout or writeRpcTimeout.
*
* @see #getReadRpcTimeout(TimeUnit)
* @see #getWriteRpcTimeout(TimeUnit)
* @param unit
* the unit of time the timeout to be represented in
* @return rpc timeout in the specified time unit
*/
default long getRpcTimeout(TimeUnit unit) {
throw new NotImplementedException("Add an implementation!");
} | 3.26 |
hbase_Table_getWriteRpcTimeout_rdh | /**
* Get timeout of each rpc write request in this Table instance.
*
* @param unit
* the unit of time the timeout to be represented in
* @return write rpc timeout in the specified time unit
*/
default long getWriteRpcTimeout(TimeUnit unit) {
throw new NotImplementedException("Add an implementation!");
} | 3.26 |
hbase_Table_increment_rdh | /**
* Increments one or more columns within a single row.
* <p>
* This operation ensures atomicity to readers. Increments are done under a single row lock, so
* write operations to a row are synchronized, and readers are guaranteed to see this operation
* fully completed.
*
* @param increment
* object that specifies the columns and amounts to be used for the increment
* operations
* @throws IOException
* e
* @return values of columns after the increment
*/
default Result increment(final Increment increment) throws IOException {
throw new NotImplementedException("Add an implementation!");
} | 3.26 |
hbase_Table_delete_rdh | /**
* Deletes the specified cells/row.
*
* @param delete
* The object that specifies what to delete.
* @throws IOException
* if a remote or network exception occurs.
* @since 0.20.0
*/
default void delete(Delete
delete) throws IOException {
throw new NotImplementedException("Add an implementation!");
}
/**
* Batch Deletes the specified cells/rows from the table.
* <p>
* If a specified row does not exist, {@link Delete} will report as though sucessful delete; no
* exception will be thrown. If there are any failures even after retries, a
* {@link RetriesExhaustedWithDetailsException} will be thrown.
* RetriesExhaustedWithDetailsException contains lists of failed {@link Delete}s and corresponding
* remote exceptions.
*
* @param deletes
* List of things to delete. The input list gets modified by this method. All
* successfully applied {@link Delete}s in the list are removed (in particular it
* gets re-ordered, so the order in which the elements are inserted in the list
* gives no guarantee as to the order in which the {@link Delete}s are executed).
* @throws IOException
* if a remote or network exception occurs. In that case the {@code deletes}
* argument will contain the {@link Delete} instances that have not be
* successfully applied.
* @since 0.20.1
* @apiNote In 3.0.0 version, the input list {@code deletes} will no longer be modified. Also,
{@link #put(List)} runs pre-flight validations on the input list on client. Currently
{@link #delete(List)} doesn't run validations on the client, there is no need
currently, but this may change in the future. An {@link IllegalArgumentException} | 3.26 |
hbase_Table_incrementColumnValue_rdh | /**
* Atomically increments a column value. If the column value already exists and is not a
* big-endian long, this could throw an exception. If the column value does not yet exist it is
* initialized to <code>amount</code> and written to the specified column.
* <p>
* Setting durability to {@link Durability#SKIP_WAL} means that in a fail scenario you will lose
* any increments that have not been flushed.
*
* @param row
* The row that contains the cell to increment.
* @param family
* The column family of the cell to increment.
* @param qualifier
* The column qualifier of the cell to increment.
* @param amount
* The amount to increment the cell with (or decrement, if the amount is
* negative).
* @param durability
* The persistence guarantee for this increment.
* @return The new value, post increment.
* @throws IOException
* if a remote or network exception occurs.
*/
default long incrementColumnValue(byte[] row, byte[] family, byte[]
qualifier, long amount, Durability durability) throws IOException {
Increment v2 =
new Increment(row).addColumn(family, qualifier, amount).setDurability(durability);
Cell cell = increment(v2).getColumnLatestCell(family, qualifier);
return Bytes.toLong(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength());
} | 3.26 |
hbase_Table_getScanner_rdh | /**
* Gets a scanner on the current table for the given family and qualifier.
*
* @param family
* The column family to scan.
* @param qualifier
* The column qualifier to scan.
* @return A scanner.
* @throws IOException
* if a remote or network exception occurs.
* @since 0.20.0
*/
default ResultScanner getScanner(byte[] family, byte[] qualifier) throws IOException {
throw new NotImplementedException("Add an implementation!");
} | 3.26 |
hbase_Table_getReadRpcTimeout_rdh | /**
* Get timeout of each rpc read request in this Table instance.
*
* @param unit
* the unit of time the timeout to be represented in
* @return read rpc timeout in the specified time unit
*/
default long getReadRpcTimeout(TimeUnit unit) {
throw new NotImplementedException("Add an implementation!");
} | 3.26 |
hbase_ReflectedFunctionCache_notFound_rdh | /**
* In order to use computeIfAbsent, we can't store nulls in our cache. So we store a lambda which
* resolves to null. The contract is that getAndCallByName returns null in this case.
*/
private R notFound(I argument) {
return null;
} | 3.26 |
hbase_ReflectedFunctionCache_getAndCallByName_rdh | /**
* Get and execute the Function for the given className, passing the argument to the function and
* returning the result.
*
* @param className
* the full name of the class to lookup
* @param argument
* the argument to pass to the function, if found.
* @return null if a function is not found for classname, otherwise the result of the function.
*/
@Nullable
public R getAndCallByName(String className, I argument) {
// todo: if we ever make java9+ our lowest supported jdk version, we can
// handle generating these for newly loaded classes from our DynamicClassLoader using
// MethodHandles.privateLookupIn(). For now this is not possible, because we can't easily
// create a privileged lookup in a non-default ClassLoader. So while this cache loads
// over time, it will never load a custom filter from "hbase.dynamic.jars.dir".
Function<I, ? extends R> lambda = ConcurrentMapUtils.computeIfAbsent(lambdasByClass, className, () -> loadFunction(className));
return lambda.apply(argument);
} | 3.26 |
hbase_OrderedBlob_encode_rdh | /**
* Write a subset of {@code val} to {@code dst}.
*
* @param dst
* the {@link PositionedByteRange} to write to
* @param val
* the value to write to {@code dst}
* @param voff
* the offset in {@code dst} where to write {@code val} to
* @param vlen
* the lenght of {@code val}
* @return the number of bytes written
*/
public int encode(PositionedByteRange dst, byte[] val, int voff, int vlen) {
return OrderedBytes.encodeBlobCopy(dst, val, voff, vlen, order);
} | 3.26 |
hbase_PrivateCellUtil_writeRowKeyExcludingCommon_rdh | /**
* Write rowkey excluding the common part.
*/
public static void writeRowKeyExcludingCommon(Cell cell, short rLen, int commonPrefix, DataOutputStream out) throws IOException {
if (commonPrefix == 0) {
out.writeShort(rLen);
} else if (commonPrefix == 1) {
out.writeByte(((byte) (rLen)));
commonPrefix--;}
else {
commonPrefix -= KeyValue.ROW_LENGTH_SIZE;
}
if (rLen > commonPrefix) {
writeRowSkippingBytes(out, cell, rLen,
commonPrefix);
}
} | 3.26 |
hbase_PrivateCellUtil_createNextOnRowCol_rdh | /**
* Return a new cell is located following input cell. If both of type and timestamp are minimum,
* the input cell will be returned directly.
*/
public static Cell createNextOnRowCol(Cell cell) {
long ts = cell.getTimestamp();
byte type = cell.getTypeByte();
if (type != Type.Minimum.getCode()) {
type = KeyValue.Type.values()[KeyValue.Type.codeToType(type).ordinal() - 1].getCode();
} else if (ts != PrivateConstants.OLDEST_TIMESTAMP) {
ts = ts - 1;
type = Type.Maximum.getCode();
} else {
return cell;
}
return createNextOnRowCol(cell, ts, type);} | 3.26 |
hbase_PrivateCellUtil_tagsIterator_rdh | /**
* Utility method to iterate through the tags in the given cell.
*
* @param cell
* The Cell over which tags iterator is needed.
* @return iterator for the tags
*/
public static Iterator<Tag> tagsIterator(final Cell cell) {
final int tagsLength = cell.getTagsLength();
// Save an object allocation where we can
if (tagsLength == 0) {
return TagUtil.EMPTY_TAGS_ITR;
}
if (cell instanceof ByteBufferExtendedCell) {
return tagsIterator(((ByteBufferExtendedCell) (cell)).getTagsByteBuffer(), ((ByteBufferExtendedCell) (cell)).getTagsPosition(), tagsLength);
}
return new Iterator<Tag>() {private int offset = cell.getTagsOffset();
private int pos = offset;
private int endOffset = (offset + cell.getTagsLength()) - 1;
@Override
public boolean hasNext() {
return this.pos < endOffset;
}
@Override
public Tag next() {
if (hasNext()) {
byte[] tags = cell.getTagsArray();
int curTagLen = Bytes.readAsInt(tags, this.pos,
Tag.TAG_LENGTH_SIZE);
Tag
v37 = new
ArrayBackedTag(tags, pos, curTagLen + TAG_LENGTH_SIZE);
this.pos += Bytes.SIZEOF_SHORT + curTagLen;
return v37;
}
return null;
}
@Override
public void remove()
{
throw new UnsupportedOperationException();
}
};
} | 3.26 |
hbase_PrivateCellUtil_createFirstOnRowColTS_rdh | /**
* Creates the first cell with the row/family/qualifier of this cell and the given timestamp. Uses
* the "maximum" type that guarantees that the new cell is the lowest possible for this
* combination of row, family, qualifier, and timestamp. This cell's own timestamp is ignored.
*
* @param cell
* - cell
*/
public static Cell createFirstOnRowColTS(Cell cell, long ts) {
if (cell instanceof ByteBufferExtendedCell) {
return new FirstOnRowColTSByteBufferExtendedCell(((ByteBufferExtendedCell) (cell)).getRowByteBuffer(), ((ByteBufferExtendedCell) (cell)).getRowPosition(), cell.getRowLength(), ((ByteBufferExtendedCell) (cell)).getFamilyByteBuffer(), ((ByteBufferExtendedCell) (cell)).getFamilyPosition(), cell.getFamilyLength(), ((ByteBufferExtendedCell) (cell)).getQualifierByteBuffer(),
((ByteBufferExtendedCell) (cell)).getQualifierPosition(), cell.getQualifierLength(), ts);
}
return new FirstOnRowColTSCell(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength(), cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength(), cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength(), ts);
} | 3.26 |
hbase_PrivateCellUtil_createLastOnRow_rdh | /**
* Create a Cell that is larger than all other possible Cells for the given Cell's row.
*
* @return Last possible Cell on passed Cell's row.
*/
public static Cell createLastOnRow(final Cell cell) {
if (cell instanceof ByteBufferExtendedCell) {return new LastOnRowByteBufferExtendedCell(((ByteBufferExtendedCell) (cell)).getRowByteBuffer(), ((ByteBufferExtendedCell) (cell)).getRowPosition(), cell.getRowLength());
}
return new LastOnRowCell(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength());
} | 3.26 |
hbase_PrivateCellUtil_getRowByte_rdh | /**
* ******************* misc ************************************
*/
public static byte getRowByte(Cell cell, int index) {
if (cell instanceof ByteBufferExtendedCell) {
return ((ByteBufferExtendedCell) (cell)).getRowByteBuffer().get(((ByteBufferExtendedCell) (cell)).getRowPosition() + index);
}
return cell.getRowArray()[cell.getRowOffset() +
index];
} | 3.26 |
hbase_PrivateCellUtil_deepClone_rdh | /**
* Deep clones the given cell if the cell supports deep cloning
*
* @param cell
* the cell to be cloned
* @return the cloned cell
*/
public static Cell
deepClone(Cell cell) throws CloneNotSupportedException {
if (cell instanceof ExtendedCell) {
return ((ExtendedCell) (cell)).deepClone();
}
throw new CloneNotSupportedException();
} | 3.26 |
hbase_PrivateCellUtil_compareValue_rdh | /**
* Compare cell's value against given comparator
*
* @param cell
* the cell to use for comparison
* @param comparator
* the {@link CellComparator} to use for comparison
* @return result comparing cell's value
*/
public static int compareValue(Cell cell, ByteArrayComparable comparator) {
if (cell
instanceof ByteBufferExtendedCell) {
return comparator.compareTo(((ByteBufferExtendedCell) (cell)).getValueByteBuffer(), ((ByteBufferExtendedCell) (cell)).getValuePosition(), cell.getValueLength());
}return comparator.compareTo(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength());
} | 3.26 |
hbase_PrivateCellUtil_createLastOnRowCol_rdh | /**
* Create a Cell that is larger than all other possible Cells for the given Cell's rk:cf:q. Used
* in creating "fake keys" for the multi-column Bloom filter optimization to skip the row/column
* we already know is not in the file.
*
* @return Last possible Cell on passed Cell's rk:cf:q.
*/
public static Cell createLastOnRowCol(final Cell cell) {
if (cell instanceof ByteBufferExtendedCell) {
return new LastOnRowColByteBufferExtendedCell(((ByteBufferExtendedCell) (cell)).getRowByteBuffer(), ((ByteBufferExtendedCell) (cell)).getRowPosition(), cell.getRowLength(), ((ByteBufferExtendedCell) (cell)).getFamilyByteBuffer(), ((ByteBufferExtendedCell) (cell)).getFamilyPosition(),
cell.getFamilyLength(), ((ByteBufferExtendedCell) (cell)).getQualifierByteBuffer(), ((ByteBufferExtendedCell) (cell)).getQualifierPosition(),
cell.getQualifierLength());
}
return new LastOnRowColCell(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength(), cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength(), cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength());
} | 3.26 |
hbase_PrivateCellUtil_writeValue_rdh | /**
* Writes the value from the given cell to the output stream
*
* @param out
* The outputstream to which the data has to be written
* @param cell
* The cell whose contents has to be written
* @param vlength
* the value length
*/
public static void writeValue(OutputStream out, Cell cell, int vlength) throws IOException {
if (cell instanceof ByteBufferExtendedCell) {
ByteBufferUtils.copyBufferToStream(out, ((ByteBufferExtendedCell) (cell)).getValueByteBuffer(), ((ByteBufferExtendedCell) (cell)).getValuePosition(), vlength);
} else {
out.write(cell.getValueArray(), cell.getValueOffset(), vlength);
}
} | 3.26 |
hbase_PrivateCellUtil_compareRow_rdh | /**
* Compare cell's row against given comparator
*
* @param cell
* the cell to use for comparison
* @param comparator
* the {@link CellComparator} to use for comparison
* @return result comparing cell's row
*/public static int compareRow(Cell cell, ByteArrayComparable comparator)
{
if (cell instanceof ByteBufferExtendedCell) {
return comparator.compareTo(((ByteBufferExtendedCell) (cell)).getRowByteBuffer(), ((ByteBufferExtendedCell) (cell)).getRowPosition(), cell.getRowLength());
}
return comparator.compareTo(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength());
} | 3.26 |
hbase_PrivateCellUtil_getTag_rdh | /**
* Retrieve Cell's first tag, matching the passed in type
*
* @param cell
* The Cell
* @param type
* Type of the Tag to retrieve
* @return Optional, empty if there is no tag of the passed in tag type
*/
public static Optional<Tag> getTag(Cell cell, byte type) {
boolean v28 = cell instanceof ByteBufferExtendedCell;
int length = cell.getTagsLength();
int offset = (v28) ? ((ByteBufferExtendedCell) (cell)).getTagsPosition() : cell.getTagsOffset();
int pos = offset;
while
(pos < (offset + length)) {
int tagLen;
if (v28) {
ByteBuffer tagsBuffer = ((ByteBufferExtendedCell) (cell)).getTagsByteBuffer();
tagLen = ByteBufferUtils.readAsInt(tagsBuffer, pos, TAG_LENGTH_SIZE);
if (ByteBufferUtils.toByte(tagsBuffer, pos + TAG_LENGTH_SIZE) == type)
{
return Optional.of(new ByteBufferTag(tagsBuffer, pos, tagLen + TAG_LENGTH_SIZE));
}
} else {
tagLen = Bytes.readAsInt(cell.getTagsArray(), pos, TAG_LENGTH_SIZE);
if (cell.getTagsArray()[pos + TAG_LENGTH_SIZE] == type) {
return Optional.of(new ArrayBackedTag(cell.getTagsArray(), pos, tagLen + TAG_LENGTH_SIZE));
}
}
pos += TAG_LENGTH_SIZE + tagLen;
}
return
Optional.empty();
} | 3.26 |
hbase_PrivateCellUtil_writeQualifier_rdh | /**
* Writes the qualifier from the given cell to the output stream
*
* @param out
* The outputstream to which the data has to be written
* @param cell
* The cell whose contents has to be written
* @param qlength
* the qualifier length
*/
public static void writeQualifier(OutputStream out, Cell cell, int qlength) throws IOException
{
if (cell instanceof ByteBufferExtendedCell) {
ByteBufferUtils.copyBufferToStream(out, ((ByteBufferExtendedCell)
(cell)).getQualifierByteBuffer(), ((ByteBufferExtendedCell) (cell)).getQualifierPosition(), qlength);
} else
{
out.write(cell.getQualifierArray(), cell.getQualifierOffset(), qlength);}
} | 3.26 |
hbase_PrivateCellUtil_setTimestamp_rdh | /**
* Sets the given timestamp to the cell.
*
* @throws IOException
* when the passed cell is not of type {@link ExtendedCell}
*/
public static void setTimestamp(Cell cell, byte[] ts) throws IOException {
if (cell instanceof ExtendedCell) {
((ExtendedCell) (cell)).setTimestamp(ts);
} else {
throw new IOException(new UnsupportedOperationException("Cell is not of type " + ExtendedCell.class.getName()));
}
} | 3.26 |
hbase_PrivateCellUtil_compressTags_rdh | /**
* Compresses the tags to the given outputstream using the TagcompressionContext
*
* @param out
* the outputstream to which the compression should happen
* @param cell
* the cell which has tags
* @param tagCompressionContext
* the TagCompressionContext
* @throws IOException
* can throw IOException if the compression encounters issue
*/
public static void compressTags(OutputStream out, Cell cell, TagCompressionContext tagCompressionContext) throws IOException {
if (cell instanceof ByteBufferExtendedCell) {
tagCompressionContext.compressTags(out, ((ByteBufferExtendedCell) (cell)).getTagsByteBuffer(), ((ByteBufferExtendedCell) (cell)).getTagsPosition(), cell.getTagsLength());
} else {
tagCompressionContext.compressTags(out, cell.getTagsArray(), cell.getTagsOffset(), cell.getTagsLength());
}
} | 3.26 |
hbase_PrivateCellUtil_estimatedSerializedSizeOfKey_rdh | /**
* Calculates the serialized key size. We always serialize in the KeyValue's serialization format.
*
* @param cell
* the cell for which the key size has to be calculated.
* @return the key size
*/
public static int estimatedSerializedSizeOfKey(final Cell cell) {
if (cell instanceof KeyValue)
return ((KeyValue) (cell)).getKeyLength();
return ((cell.getRowLength() + cell.getFamilyLength()) + cell.getQualifierLength()) + KeyValue.KEY_INFRASTRUCTURE_SIZE;
} | 3.26 |
hbase_PrivateCellUtil_createFirstOnRowCol_rdh | /**
* Create a Cell that is smaller than all other possible Cells for the given Cell's rk:cf and
* passed qualifier.
*
* @return Last possible Cell on passed Cell's rk:cf and passed qualifier.
*/
public static Cell createFirstOnRowCol(final Cell cell, byte[] qArray, int qoffest, int qlength) {
if (cell instanceof ByteBufferExtendedCell) {return new FirstOnRowColByteBufferExtendedCell(((ByteBufferExtendedCell) (cell)).getRowByteBuffer(), ((ByteBufferExtendedCell) (cell)).getRowPosition(), cell.getRowLength(), ((ByteBufferExtendedCell) (cell)).getFamilyByteBuffer(), ((ByteBufferExtendedCell) (cell)).getFamilyPosition(), cell.getFamilyLength(), ByteBuffer.wrap(qArray), qoffest, qlength);
}
return new FirstOnRowColCell(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength(), cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength(), qArray, qoffest, qlength);
} | 3.26 |
hbase_PrivateCellUtil_copyTagsTo_rdh | /**
* Copies the tags info into the tag portion of the cell
*/
public static int copyTagsTo(Cell cell, ByteBuffer destination, int destinationOffset) {
int v25 = cell.getTagsLength();
if (cell instanceof ByteBufferExtendedCell) {
ByteBufferUtils.copyFromBufferToBuffer(((ByteBufferExtendedCell) (cell)).getTagsByteBuffer(), destination, ((ByteBufferExtendedCell) (cell)).getTagsPosition(), destinationOffset, v25);
} else {
ByteBufferUtils.copyFromArrayToBuffer(destination, destinationOffset, cell.getTagsArray(), cell.getTagsOffset(), v25);
}
return destinationOffset + v25;
} | 3.26 |
hbase_PrivateCellUtil_write_rdh | /**
* Made into a static method so as to reuse the logic within
* ValueAndTagRewriteByteBufferExtendedCell
*/
static void write(ByteBuffer buf, int offset, Cell cell, byte[] value, byte[] tags) {
offset = ByteBufferUtils.putInt(buf, offset, KeyValueUtil.keyLength(cell));// Key length
offset = ByteBufferUtils.putInt(buf, offset, value.length);// Value length
offset = KeyValueUtil.appendKeyTo(cell, buf, offset);
ByteBufferUtils.copyFromArrayToBuffer(buf, offset, value, 0, value.length);
offset += value.length;
int v14
= (tags == null) ? 0 : tags.length;
if (v14 > 0) {
offset = ByteBufferUtils.putAsShort(buf, offset, v14);
ByteBufferUtils.copyFromArrayToBuffer(buf, offset, tags, 0, v14);
}
} | 3.26 |
hbase_PrivateCellUtil_createFirstOnRow_rdh | /**
* Create a Cell that is smaller than all other possible Cells for the given Cell's row.
*
* @return First possible Cell on passed Cell's row.
*/
public static Cell createFirstOnRow(final Cell cell) {
if (cell instanceof ByteBufferExtendedCell) {
return new FirstOnRowByteBufferExtendedCell(((ByteBufferExtendedCell) (cell)).getRowByteBuffer(), ((ByteBufferExtendedCell) (cell)).getRowPosition(), cell.getRowLength());
}
return new FirstOnRowCell(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength());
} | 3.26 |
hbase_PrivateCellUtil_writeFamily_rdh | /**
* Writes the family from the given cell to the output stream
*
* @param out
* The outputstream to which the data has to be written
* @param cell
* The cell whose contents has to be written
* @param flength
* the family length
*/
public static void writeFamily(OutputStream out, Cell cell, byte flength) throws IOException {
if (cell instanceof ByteBufferExtendedCell) {
ByteBufferUtils.copyBufferToStream(out, ((ByteBufferExtendedCell) (cell)).getFamilyByteBuffer(), ((ByteBufferExtendedCell) (cell)).getFamilyPosition(), flength);
} else {
out.write(cell.getFamilyArray(), cell.getFamilyOffset(), flength);
}
} | 3.26 |
hbase_PrivateCellUtil_getValueAsLong_rdh | /**
* Converts the value bytes of the given cell into a long value
*
* @return value as long
*/
public static long getValueAsLong(Cell cell) {
if (cell instanceof ByteBufferExtendedCell) {
return ByteBufferUtils.toLong(((ByteBufferExtendedCell) (cell)).getValueByteBuffer(), ((ByteBufferExtendedCell) (cell)).getValuePosition());
}
return Bytes.toLong(cell.getValueArray(), cell.getValueOffset());
} | 3.26 |
hbase_PrivateCellUtil_createCell_rdh | /**
* Returns A new cell which is having the extra tags also added to it.
*/
public static Cell createCell(Cell cell, byte[] tags) {
if (cell instanceof ByteBufferExtendedCell) {
return new TagRewriteByteBufferExtendedCell(((ByteBufferExtendedCell) (cell)), tags);
}
return new TagRewriteCell(cell, tags);
} | 3.26 |
hbase_PrivateCellUtil_equalsIgnoreMvccVersion_rdh | /**
* special case for Cell.equals
*/
public static boolean equalsIgnoreMvccVersion(Cell a, Cell b) {
// row
boolean res = CellUtil.matchingRows(a, b);
if (!res)return res;
// family
res = CellUtil.matchingColumn(a, b);
if (!res)
return res;
// timestamp: later sorts first
if (!CellUtil.matchingTimestamp(a, b))
return false;
// type
int c = (0xff & b.getTypeByte()) - (0xff & a.getTypeByte());
if (c != 0)
return false;
else
return true;
} | 3.26 |
hbase_PrivateCellUtil_writeCell_rdh | /**
* Writes the cell to the given OutputStream
*
* @param cell
* the cell to be written
* @param out
* the outputstream
* @param withTags
* if tags are to be written or not
* @return the total bytes written
*/
public static int
writeCell(Cell cell, OutputStream out, boolean withTags) throws IOException {
if (cell instanceof ExtendedCell) {
return ((ExtendedCell) (cell)).write(out, withTags);
} else {
ByteBufferUtils.putInt(out, estimatedSerializedSizeOfKey(cell));
ByteBufferUtils.putInt(out, cell.getValueLength());
writeFlatKey(cell, out);
writeValue(out, cell, cell.getValueLength());
int tagsLength = cell.getTagsLength();
if (withTags) {
byte[] len = new byte[Bytes.SIZEOF_SHORT];
Bytes.putAsShort(len, 0, tagsLength); out.write(len);
if (tagsLength > 0) {
writeTags(out, cell,
tagsLength);
}
}
int lenWritten = ((2 * Bytes.SIZEOF_INT) + estimatedSerializedSizeOfKey(cell)) + cell.getValueLength();
if (withTags) {
lenWritten += Bytes.SIZEOF_SHORT + tagsLength;
}
return lenWritten;
}
} | 3.26 |
hbase_PrivateCellUtil_updateLatestStamp_rdh | /**
* Sets the given timestamp to the cell iff current timestamp is
* {@link HConstants#LATEST_TIMESTAMP}.
*
* @return True if cell timestamp is modified.
* @throws IOException
* when the passed cell is not of type {@link ExtendedCell}
*/
public static boolean updateLatestStamp(Cell cell, byte[] ts) throws IOException {
if (cell.getTimestamp() == HConstants.LATEST_TIMESTAMP) {
setTimestamp(cell, ts);
return true;
}
return false;
} | 3.26 |
hbase_PrivateCellUtil_getValueAsDouble_rdh | /**
* Converts the value bytes of the given cell into a double value
*
* @return value as double
*/
public static double getValueAsDouble(Cell cell) {
if (cell instanceof ByteBufferExtendedCell) {
return ByteBufferUtils.toDouble(((ByteBufferExtendedCell)
(cell)).getValueByteBuffer(), ((ByteBufferExtendedCell) (cell)).getValuePosition());
}
return Bytes.toDouble(cell.getValueArray(), cell.getValueOffset());
} | 3.26 |
hbase_PrivateCellUtil_getTags_rdh | /**
* Return tags in the given Cell as a List
*
* @param cell
* The Cell
* @return Tags in the given Cell as a List
*/
public static List<Tag> getTags(Cell cell) {List<Tag> tags = new ArrayList<>();
Iterator<Tag> tagsItr = tagsIterator(cell);
while (tagsItr.hasNext()) {
tags.add(tagsItr.next());
}
return tags;
} | 3.26 |
hbase_PrivateCellUtil_fillRowRange_rdh | /**
* ***************** ByteRange ******************************
*/
public static ByteRange fillRowRange(Cell cell, ByteRange range) {
return range.set(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength());
} | 3.26 |
hbase_PrivateCellUtil_createFirstDeleteFamilyCellOnRow_rdh | /**
* Create a Delete Family Cell for the specified row and family that would be smaller than all
* other possible Delete Family KeyValues that have the same row and family. Used for seeking.
*
* @param row
* - row key (arbitrary byte array)
* @param fam
* - family name
* @return First Delete Family possible key on passed <code>row</code>.
*/
public static Cell createFirstDeleteFamilyCellOnRow(final byte[] row, final byte[] fam) {
return new FirstOnRowDeleteFamilyCell(row, fam);
} | 3.26 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.