name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hbase_DependentColumnFilter_parseFrom_rdh | /**
* Parse a seralized representation of {@link DependentColumnFilter}
*
* @param pbBytes
* A pb serialized {@link DependentColumnFilter} instance
* @return An instance of {@link DependentColumnFilter} made from <code>bytes</code>
* @throws DeserializationException
* if an error occurred
* @see #toByteArray
*/
public static DependentColumnFilter
parseFrom(final byte[] pbBytes) throws DeserializationException
{
FilterProtos.DependentColumnFilter proto;
try {
proto = FilterProtos.DependentColumnFilter.parseFrom(pbBytes);
} catch (InvalidProtocolBufferException e) {
throw new DeserializationException(e);
}
final CompareOperator valueCompareOp = CompareOperator.valueOf(proto.getCompareFilter().getCompareOp().name());
ByteArrayComparable valueComparator = null;
try {
if (proto.getCompareFilter().hasComparator()) {
valueComparator = ProtobufUtil.toComparator(proto.getCompareFilter().getComparator());
}
} catch (IOException ioe) {
throw new DeserializationException(ioe);
}
return new DependentColumnFilter(proto.hasColumnFamily() ? proto.getColumnFamily().toByteArray() : null, proto.hasColumnQualifier() ?
proto.getColumnQualifier().toByteArray() : null, proto.getDropDependentColumn(), valueCompareOp, valueComparator);
} | 3.26 |
hbase_DependentColumnFilter_toByteArray_rdh | /**
* Returns The filter serialized using pb
*/
@Override
public byte[] toByteArray() {
FilterProtos.DependentColumnFilter.Builder builder = FilterProtos.DependentColumnFilter.newBuilder();
builder.setCompareFilter(super.convert());
if (this.columnFamily != null) {
builder.setColumnFamily(UnsafeByteOperations.unsafeWrap(this.columnFamily));
}
if (this.columnQualifier != null) {
builder.setColumnQualifier(UnsafeByteOperations.unsafeWrap(this.columnQualifier));
}
builder.setDropDependentColumn(this.dropDependentColumn);
return builder.build().toByteArray();
} | 3.26 |
hbase_DependentColumnFilter_dropDependentColumn_rdh | /**
* Returns true if we should drop the dependent column, false otherwise
*/
public boolean dropDependentColumn() {
return this.dropDependentColumn;} | 3.26 |
hbase_DependentColumnFilter_getFamily_rdh | /**
* Returns the column family
*/
public byte[] getFamily() {
return this.columnFamily;
} | 3.26 |
hbase_Client_executePathOnly_rdh | /**
* Execute a transaction method given only the path. Will select at random one of the members of
* the supplied cluster definition and iterate through the list until a transaction can be
* successfully completed. The definition of success here is a complete HTTP transaction,
* irrespective of result code.
*
* @param cluster
* the cluster definition
* @param method
* the transaction method
* @param headers
* HTTP header values to send
* @param path
* the properly urlencoded path
* @return the HTTP response code
*/
public HttpResponse executePathOnly(Cluster cluster, HttpUriRequest method, Header[] headers, String path) throws IOException {
IOException lastException;
if (cluster.nodes.size() < 1) {
throw new IOException("Cluster is empty");
}
int start = ((int) (Math.round((cluster.nodes.size()
- 1) * Math.random())));
int i = start;do {
cluster.lastHost = cluster.nodes.get(i);
try {
StringBuilder sb = new StringBuilder();
if (sslEnabled) {
sb.append("https://");
} else {
sb.append("http://");
}
sb.append(cluster.lastHost);
sb.append(path);
URI uri = new URI(sb.toString());
if (method instanceof HttpPut) {
HttpPut put = new HttpPut(uri);
put.setEntity(((HttpPut) (method)).getEntity());
put.setHeaders(method.getAllHeaders());
method = put;
} else if (method instanceof HttpGet) {
method = new HttpGet(uri);
} else if (method instanceof HttpHead) {
method = new HttpHead(uri);
} else if (method instanceof HttpDelete) {
method = new HttpDelete(uri);
} else if (method
instanceof HttpPost) {
HttpPost post = new HttpPost(uri);
post.setEntity(((HttpPost) (method)).getEntity());
post.setHeaders(method.getAllHeaders());
method = post;
}
return executeURI(method, headers, uri.toString());
} catch (IOException e) {
lastException = e;
} catch (URISyntaxException use) {
lastException = new IOException(use);
}
} while (((++i) != start) && (i < cluster.nodes.size()) );
throw lastException;
} | 3.26 |
hbase_Client_getCluster_rdh | /**
* Returns the cluster definition
*/
public Cluster getCluster() {
return cluster;
} | 3.26 |
hbase_Client_put_rdh | /**
* Send a PUT request
*
* @param cluster
* the cluster definition
* @param path
* the path or URI
* @param headers
* the HTTP headers to include, <tt>Content-Type</tt> must be supplied
* @param content
* the content bytes
* @return a Response object with response detail
*/
public Response put(Cluster cluster, String path, Header[] headers, byte[] content) throws IOException {
HttpPut method = new HttpPut(path);
try {
method.setEntity(new InputStreamEntity(new ByteArrayInputStream(content), content.length));
HttpResponse resp = execute(cluster, method, headers, path);
headers = resp.getAllHeaders();
content =
getResponseBody(resp);
return new Response(resp.getStatusLine().getStatusCode(), headers, content);} finally {
method.releaseConnection();
}
} | 3.26 |
hbase_Client_post_rdh | /**
* Send a POST request
*
* @param cluster
* the cluster definition
* @param path
* the path or URI
* @param headers
* the HTTP headers to include, <tt>Content-Type</tt> must be supplied
* @param content
* the content bytes
* @return a Response object with response detail
*/
public Response post(Cluster cluster, String path,
Header[] headers, byte[] content) throws IOException {
HttpPost method = new HttpPost(path);
try {
method.setEntity(new InputStreamEntity(new ByteArrayInputStream(content), content.length));
HttpResponse resp = execute(cluster, method, headers, path);
headers = resp.getAllHeaders();
content = getResponseBody(resp);
return new Response(resp.getStatusLine().getStatusCode(), headers, content);
} finally {method.releaseConnection();
}
} | 3.26 |
hbase_Client_delete_rdh | /**
* Send a DELETE request
*
* @param cluster
* the cluster definition
* @param path
* the path or URI
* @return a Response object with response detail
* @throws IOException
* for error
*/
public Response delete(Cluster cluster, String path, Header extraHdr) throws IOException {
HttpDelete method = new HttpDelete(path);
try {
Header[] headers = new Header[]{ extraHdr };
HttpResponse resp = execute(cluster, method, headers, path);
headers = resp.getAllHeaders();byte[]
content = getResponseBody(resp);
return new Response(resp.getStatusLine().getStatusCode(), headers, content);
} finally {
method.releaseConnection();
}
} | 3.26 |
hbase_Client_head_rdh | /**
* Send a HEAD request
*
* @param path
* the path or URI
* @return a Response object with response detail
*/
public Response head(String path) throws IOException {
return m0(cluster, path, null);
} | 3.26 |
hbase_Client_executeURI_rdh | /**
* Execute a transaction method given a complete URI.
*
* @param method
* the transaction method
* @param headers
* HTTP header values to send
* @param uri
* a properly urlencoded URI
* @return the HTTP response code
*/
public HttpResponse executeURI(HttpUriRequest method, Header[] headers, String uri) throws IOException {
// method.setURI(new URI(uri, true));
for (Map.Entry<String, String> e : extraHeaders.entrySet()) {
method.addHeader(e.getKey(), e.getValue());
}
if (headers != null) {
for (Header header : headers) {
method.addHeader(header);
}
}
long startTime = EnvironmentEdgeManager.currentTime();
if (resp != null)EntityUtils.consumeQuietly(resp.getEntity());
resp = httpClient.execute(method);
if (resp.getStatusLine().getStatusCode() == HttpStatus.SC_UNAUTHORIZED) {
// Authentication error
LOG.debug("Performing negotiation with the server.");
negotiate(method, uri);
resp = httpClient.execute(method);
}
long endTime = EnvironmentEdgeManager.currentTime();
if (LOG.isTraceEnabled()) {
LOG.trace(((((((((method.getMethod() + " ") + uri) + " ") +
resp.getStatusLine().getStatusCode()) + " ") + resp.getStatusLine().getReasonPhrase()) + " in ") + (endTime - startTime)) + " ms");
}
return resp;
} | 3.26 |
hbase_Client_setCluster_rdh | /**
*
* @param cluster
* the cluster definition
*/
public void setCluster(Cluster cluster) {
this.cluster = cluster;
} | 3.26 |
hbase_Client_m1_rdh | /**
* Send a POST request
*
* @param cluster
* the cluster definition
* @param path
* the path or URI
* @param contentType
* the content MIME type
* @param content
* the content bytes
* @param extraHdr
* additional Header to send
* @return a Response object with response detail
* @throws IOException
* for error
*/
public Response m1(Cluster cluster, String path, String contentType, byte[] content, Header extraHdr) throws IOException {
int cnt = (extraHdr == null) ? 1 : 2;
Header[] headers = new Header[cnt];
headers[0] = new BasicHeader("Content-Type", contentType);
if (extraHdr != null) {
headers[1] = extraHdr;
}
return post(cluster, path, headers, content);
} | 3.26 |
hbase_Client_shutdown_rdh | /**
* Shut down the client. Close any open persistent connections.
*/
public void shutdown() {
} | 3.26 |
hbase_Client_getHttpClient_rdh | /**
* Returns the wrapped HttpClient
*/
public HttpClient getHttpClient() {
return httpClient;
} | 3.26 |
hbase_Client_addExtraHeader_rdh | /**
* Add extra headers. These extra headers will be applied to all http methods before they are
* removed. If any header is not used any more, client needs to remove it explicitly.
*/
public void addExtraHeader(final String name, final String value) {
extraHeaders.put(name, value);
} | 3.26 |
hbase_Client_negotiate_rdh | /**
* Initiate client side Kerberos negotiation with the server.
*
* @param method
* method to inject the authentication token into.
* @param uri
* the String to parse as a URL.
* @throws IOException
* if unknown protocol is found.
*/
private void negotiate(HttpUriRequest method, String uri) throws IOException {
try {
AuthenticatedURL.Token token = new AuthenticatedURL.Token();
KerberosAuthenticator authenticator = new KerberosAuthenticator();
authenticator.authenticate(new URL(uri), token);
// Inject the obtained negotiated token in the method cookie
injectToken(method, token);
} catch (AuthenticationException e) {
LOG.error("Failed to negotiate with the server.", e);
throw new IOException(e);
}
} | 3.26 |
hbase_Client_get_rdh | /**
* Send a GET request
*
* @param c
* the cluster definition
* @param path
* the path or URI
* @param headers
* the HTTP headers to include in the request
* @return a Response object with response detail
*/
public Response get(Cluster c, String path, Header[] headers) throws IOException {if (httpGet != null) {
httpGet.releaseConnection();
}
httpGet = new HttpGet(path);HttpResponse resp = execute(c, httpGet, headers, path);
return new Response(resp.getStatusLine().getStatusCode(), resp.getAllHeaders(), resp, resp.getEntity() == null ? null : resp.getEntity().getContent());
} | 3.26 |
hbase_Client_getExtraHeaders_rdh | /**
* Get all extra headers (read-only).
*/
public Map<String, String> getExtraHeaders() {
return Collections.unmodifiableMap(extraHeaders);
} | 3.26 |
hbase_Client_injectToken_rdh | /**
* Helper method that injects an authentication token to send with the method.
*
* @param method
* method to inject the authentication token into.
* @param token
* authentication token to inject.
*/
private void injectToken(HttpUriRequest method, AuthenticatedURL.Token token) {
String t = token.toString();
if (t != null) {
if (!t.startsWith("\"")) {
t = ("\"" + t) + "\"";
}
method.addHeader(COOKIE, AUTH_COOKIE_EQ + t);
}
} | 3.26 |
hbase_Client_execute_rdh | /**
* Execute a transaction method. Will call either <tt>executePathOnly</tt> or <tt>executeURI</tt>
* depending on whether a path only is supplied in 'path', or if a complete URI is passed instead,
* respectively.
*
* @param cluster
* the cluster definition
* @param method
* the HTTP method
* @param headers
* HTTP header values to send
* @param path
* the properly urlencoded path or URI
* @return the HTTP response code
*/
public HttpResponse execute(Cluster cluster, HttpUriRequest method, Header[] headers, String path) throws IOException {
if (path.startsWith("/")) {
return executePathOnly(cluster, method, headers, path);
}
return executeURI(method, headers, path);
} | 3.26 |
hbase_Client_getExtraHeader_rdh | /**
* Get an extra header value.
*/
public String getExtraHeader(final String name) {
return extraHeaders.get(name);
} | 3.26 |
hbase_Client_m0_rdh | /**
* Send a HEAD request
*
* @param cluster
* the cluster definition
* @param path
* the path or URI
* @param headers
* the HTTP headers to include in the request
* @return a Response object with response detail
*/
public Response m0(Cluster cluster, String path, Header[] headers) throws IOException {
HttpHead method = new HttpHead(path);
try {
HttpResponse resp = execute(cluster,
method, null, path);
return new Response(resp.getStatusLine().getStatusCode(),
resp.getAllHeaders(), null);
} finally {
method.releaseConnection();
}
} | 3.26 |
hbase_Client_removeExtraHeader_rdh | /**
* Remove an extra header.
*/
public void removeExtraHeader(final String name) {
extraHeaders.remove(name);
} | 3.26 |
hbase_MonitoredRPCHandlerImpl_getClient_rdh | /**
* If an RPC call is currently running, produces a String representation of the connection from
* which it was received.
*
* @return A human-readable string representation of the address and port of the client.
*/
@Override
public String getClient() {
return (clientAddress + ":") + remotePort;
} | 3.26 |
hbase_MonitoredRPCHandlerImpl_isRPCRunning_rdh | /**
* Indicates to the client whether this task is monitoring a currently active RPC call.
*
* @return true if the monitored handler is currently servicing an RPC call.
*/
@Override
public boolean isRPCRunning() {
return getState() == State.RUNNING;
} | 3.26 |
hbase_MonitoredRPCHandlerImpl_setRPC_rdh | /**
* Tells this instance that it is monitoring a new RPC call.
*
* @param methodName
* The name of the method that will be called by the RPC.
* @param params
* The parameters that will be passed to the indicated method.
*/
@Override
public synchronized void setRPC(String methodName, Object[] params, long queueTime) {
this.methodName = methodName;
this.params = params;
long now = EnvironmentEdgeManager.currentTime();
this.rpcStartTime = now;
setWarnTime(now);
this.rpcQueueTime = queueTime;
this.state = State.RUNNING;} | 3.26 |
hbase_MonitoredRPCHandlerImpl_getStatus_rdh | /**
* Gets the status of this handler; if it is currently servicing an RPC, this status will include
* the RPC information.
*
* @return a String describing the current status.
*/@Override
public String getStatus() {if (getState() != State.RUNNING) {
return super.getStatus();
}
return (((super.getStatus() + " from ") + getClient()) + ": ") + getRPC();
} | 3.26 |
hbase_MonitoredRPCHandlerImpl_isOperationRunning_rdh | /**
* Indicates to the client whether this task is monitoring a currently active RPC call to a
* database command. (as defined by o.a.h.h.client.Operation)
*
* @return true if the monitored handler is currently servicing an RPC call to a database command.
*/
@Override
public synchronized boolean isOperationRunning() {
if (!isRPCRunning()) {
return false;
}
for (Object param : params) {
if (param instanceof Operation) {
return true;
}
}
return false;
} | 3.26 |
hbase_MonitoredRPCHandlerImpl_m1_rdh | /**
* Registers current handler client details.
*
* @param clientAddress
* the address of the current client
* @param remotePort
* the port from which the client connected
*/
@Override
public void m1(String clientAddress, int remotePort) {
this.clientAddress = clientAddress;
this.remotePort = remotePort;
} | 3.26 |
hbase_MonitoredRPCHandlerImpl_getRPCPacketLength_rdh | /**
* Produces a string representation of the method currently being serviced by this Handler.
*
* @return A human-readable string representation of the method call.
*/
@Override
public long getRPCPacketLength() {
if ((getState() != State.RUNNING) || (packet == null)) {
// no RPC is currently running, or we don't have an RPC's packet info
return -1L;
}
return packet.getSerializedSize();
} | 3.26 |
hbase_MonitoredRPCHandlerImpl_getRPC_rdh | /**
* Produces a string representation of the method currently being serviced by this Handler.
*
* @param withParams
* toggle inclusion of parameters in the RPC String
* @return A human-readable string representation of the method call.
*/
@Override
public synchronized String getRPC(boolean
withParams) {
if (getState() != State.RUNNING) {
// no RPC is currently running
return "";
}
StringBuilder buffer = new StringBuilder(256);buffer.append(methodName);
if (withParams) {
buffer.append("(");
for (int i = 0; i < params.length; i++) {
if (i != 0)
buffer.append(", ");
buffer.append(params[i]);
}
buffer.append(")");
}
return buffer.toString();
} | 3.26 |
hbase_MonitoredRPCHandlerImpl_setRPCPacket_rdh | /**
* Gives this instance a reference to the protobuf received by the RPC, so that it can later
* compute its size if asked for it.
*
* @param param
* The protobuf received by the RPC for this call
*/
@Override
public void setRPCPacket(Message param) {
this.packet = param;
} | 3.26 |
hbase_MonitoredRPCHandlerImpl_getRPCStartTime_rdh | /**
* Accesses the start time for the currently running RPC on the monitored Handler.
*
* @return the start timestamp or -1 if there is no RPC currently running.
*/
@Override
public long getRPCStartTime() {
if (getState() != State.RUNNING) {
return -1;
}
return rpcStartTime;
} | 3.26 |
hbase_MonitoredRPCHandlerImpl_m0_rdh | /**
* Accesses the queue time for the currently running RPC on the monitored Handler.
*
* @return the queue timestamp or -1 if there is no RPC currently running.
*/
@Override
public long m0() {
if (getState() != State.RUNNING) {
return -1;
}return rpcQueueTime;
} | 3.26 |
hbase_IncrementCoalescer_dynamicallySetCoreSize_rdh | /**
* This method samples the incoming requests and, if selected, will check if the corePoolSize
* should be changed.
*
* @param countersMapSize
* the size of the counters map
*/
private void dynamicallySetCoreSize(int countersMapSize) {
// Here we are using countersMapSize as a random number, meaning this
// could be a Random object
if ((countersMapSize % 10) != 0) {
return;
}
double currentRatio = ((double) (countersMapSize)) / ((double) (maxQueueSize));
int newValue;
if (currentRatio < 0.1) {
newValue = 1;
} else if (currentRatio < 0.3) {
newValue = 2;
} else if (currentRatio < 0.5) {
newValue = 4;
} else if (currentRatio < 0.7) {
newValue = 8;
} else if (currentRatio < 0.9) {
newValue = 14;
} else {
newValue = 22;}
if (pool.getCorePoolSize() != newValue) {
pool.setCorePoolSize(newValue);
}
} | 3.26 |
hbase_IncrementCoalescer_getQueueSize_rdh | // MBean get/set methods
@Override
public int getQueueSize() {
return pool.getQueue().size();
} | 3.26 |
hbase_FailedServers_addToFailedServers_rdh | /**
* Add an address to the list of the failed servers list.
*/
public synchronized void addToFailedServers(Address address, Throwable throwable) {
final long expiry = EnvironmentEdgeManager.currentTime() + recheckServersTimeout;
this.failedServers.put(address, expiry);
this.latestExpiry = expiry;
if (LOG.isDebugEnabled()) {
LOG.debug((("Added failed server with address " + address) + " to list caused by ") + throwable.toString());
}
} | 3.26 |
hbase_FailedServers_isFailedServer_rdh | /**
* Check if the server should be considered as bad. Clean the old entries of the list.
*
* @return true if the server is in the failed servers list
*/
public synchronized boolean isFailedServer(final
Address address) {
if (failedServers.isEmpty()) {
return false;
}
final long now = EnvironmentEdgeManager.currentTime();
if (now > this.latestExpiry) {
failedServers.clear();
return false;
}
Long expiry
= this.failedServers.get(address);
if (expiry == null)
{
return false;
}
if (expiry >= now) {
return true;
} else {
this.failedServers.remove(address);
}
return false;
} | 3.26 |
hbase_ConnectionUtils_getPauseTime_rdh | /**
* Calculate pause time. Built on {@link HConstants#RETRY_BACKOFF}.
*
* @param pause
* time to pause
* @param tries
* amount of tries
* @return How long to wait after <code>tries</code> retries
*/public static long getPauseTime(final long pause,
final int tries) {
int ntries = tries;
if (ntries >= RETRY_BACKOFF.length) {
ntries = RETRY_BACKOFF.length - 1;
}if (ntries < 0) {
ntries = 0;
}
long normalPause = pause * HConstants.RETRY_BACKOFF[ntries];
// 1% possible jitter
long jitter = ((long) ((normalPause * ThreadLocalRandom.current().nextFloat()) * 0.01F));
return normalPause + jitter; } | 3.26 |
hbase_ConnectionUtils_setServerSideHConnectionRetriesConfig_rdh | /**
* Changes the configuration to set the number of retries needed when using Connection internally,
* e.g. for updating catalog tables, etc. Call this method before we create any Connections.
*
* @param c
* The Configuration instance to set the retries into.
* @param log
* Used to log what we set in here.
*/
public static void setServerSideHConnectionRetriesConfig(final Configuration c, final String sn, final Logger
log) {
// TODO: Fix this. Not all connections from server side should have 10 times the retries.
int hcRetries = c.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
// Go big. Multiply by 10. If we can't get to meta after this many retries
// then something seriously wrong.
int serversideMultiplier = c.getInt(HConstants.HBASE_CLIENT_SERVERSIDE_RETRIES_MULTIPLIER, HConstants.DEFAULT_HBASE_CLIENT_SERVERSIDE_RETRIES_MULTIPLIER);
int retries = hcRetries * serversideMultiplier;
c.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, retries);
log.info((sn + " server-side Connection retries=") + retries);
} | 3.26 |
hbase_ConnectionUtils_getStubKey_rdh | /**
* Get a unique key for the rpc stub to the given server.
*/
static String getStubKey(String serviceName, ServerName serverName) {
return String.format("%s@%s", serviceName, serverName);
} | 3.26 |
hbase_ConnectionUtils_createCloseRowBefore_rdh | /**
* Create a row before the specified row and very close to the specified row.
*/
static byte[] createCloseRowBefore(byte[] row) {
if (row.length == 0) {
return MAX_BYTE_ARRAY;
}
if (row[row.length - 1] == 0) {
return Arrays.copyOf(row, row.length - 1);
} else {
byte[] nextRow = new byte[row.length + MAX_BYTE_ARRAY.length];
System.arraycopy(row, 0, nextRow, 0, row.length - 1);
nextRow[row.length - 1] = ((byte) ((row[row.length - 1] & 0xff) - 1));
System.arraycopy(MAX_BYTE_ARRAY, 0, nextRow, row.length, MAX_BYTE_ARRAY.length);
return nextRow;
}
} | 3.26 |
hbase_ConnectionUtils_m0_rdh | /**
* Use the scan metrics returned by the server to add to the identically named counters in the
* client side metrics. If a counter does not exist with the same name as the server side metric,
* the attempt to increase the counter will fail.
*/
static void m0(ScanMetrics scanMetrics, ScanResponse response) {
if (((scanMetrics == null) || (response == null)) || (!response.hasScanMetrics())) {
return;}
ResponseConverter.getScanMetrics(response).forEach(scanMetrics::addToCounter);
} | 3.26 |
hbase_ConnectionUtils_calcPriority_rdh | /**
* Select the priority for the rpc call.
* <p/>
* The rules are:
* <ol>
* <li>If user set a priority explicitly, then just use it.</li>
* <li>For system table, use {@link HConstants#SYSTEMTABLE_QOS}.</li>
* <li>For other tables, use {@link HConstants#NORMAL_QOS}.</li>
* </ol>
*
* @param priority
* the priority set by user, can be {@link HConstants#PRIORITY_UNSET}.
* @param tableName
* the table we operate on
*/
static int calcPriority(int priority, TableName tableName) {
if (priority != HConstants.PRIORITY_UNSET) {
return priority;
} else {
return getPriority(tableName);
}
} | 3.26 |
hbase_ConnectionUtils_createClosestRowAfter_rdh | /**
* Create the closest row after the specified row
*/
static byte[] createClosestRowAfter(byte[] row) {
return Arrays.copyOf(row, row.length + 1);
} | 3.26 |
hbase_ConnectionUtils_connect_rdh | /**
* Connect the two futures, if the src future is done, then mark the dst future as done. And if
* the dst future is done, then cancel the src future. This is used for timeline consistent read.
* <p/>
* Pass empty metrics if you want to link the primary future and the dst future so we will not
* increase the hedge read related metrics.
*/
private static <T> void connect(CompletableFuture<T> srcFuture, CompletableFuture<T> dstFuture, Optional<MetricsConnection> metrics) {
addListener(srcFuture, (r, e) -> {
if (e !=
null) {
dstFuture.completeExceptionally(e);
} else if (dstFuture.complete(r)) {
metrics.ifPresent(MetricsConnection::incrHedgedReadWin);
}
});
// The cancellation may be a dummy one as the dstFuture may be completed by this srcFuture.
// Notice that this is a bit tricky, as the execution chain maybe 'complete src -> complete dst
// -> cancel src', for now it seems to be fine, as the will use CAS to set the result first in
// CompletableFuture. If later this causes problems, we could use whenCompleteAsync to break the
// tie.
addListener(dstFuture, (r, e) -> srcFuture.cancel(false));
} | 3.26 |
hbase_ConnectionUtils_retries2Attempts_rdh | /**
* Return retires + 1. The returned value will be in range [1, Integer.MAX_VALUE].
*/
static int retries2Attempts(int retries) {
return Math.max(1, retries == Integer.MAX_VALUE ? Integer.MAX_VALUE : retries + 1);
} | 3.26 |
hbase_ConnectionUtils_validatePut_rdh | // validate for well-formedness
static void validatePut(Put
put, int maxKeyValueSize) {
if (put.isEmpty()) {
throw new IllegalArgumentException("No columns to insert");
}if (maxKeyValueSize > 0) {
for (List<Cell> list : put.getFamilyCellMap().values()) {for (Cell cell : list) {
if (cell.getSerializedSize() > maxKeyValueSize) {
throw new IllegalArgumentException("KeyValue size too large");
}
}
}
}
} | 3.26 |
hbase_GroupingTableMapper_map_rdh | /**
* Extract the grouping columns from value to construct a new key. Pass the new key and value to
* reduce. If any of the grouping columns are not found in the value, the record is skipped.
*
* @param key
* The current key.
* @param value
* The current value.
* @param context
* The current context.
* @throws IOException
* When writing the record fails.
* @throws InterruptedException
* When the job is aborted.
*/
@Override
public void map(ImmutableBytesWritable key, Result value, Context context) throws IOException, InterruptedException {
byte[][] keyVals = extractKeyValues(value);
if (keyVals != null) {
ImmutableBytesWritable tKey = m0(keyVals);
context.write(tKey, value);
}
} | 3.26 |
hbase_GroupingTableMapper_extractKeyValues_rdh | /**
* Extract columns values from the current record. This method returns null if any of the columns
* are not found.
* <p>
* Override this method if you want to deal with nulls differently.
*
* @param r
* The current values.
* @return Array of byte values.
*/
protected byte[][] extractKeyValues(Result r) {
byte[][] keyVals = null;
ArrayList<byte[]> foundList = new ArrayList<>();
int numCols = f0.length;
if (numCols > 0) {
for (Cell value : r.listCells()) {
byte[] column = CellUtil.makeColumn(CellUtil.cloneFamily(value), CellUtil.cloneQualifier(value));
for (int i = 0; i < numCols; i++) {
if (Bytes.equals(column, f0[i])) {
foundList.add(CellUtil.cloneValue(value));
break;
}
}
}
if (foundList.size() == numCols) {
keyVals = foundList.toArray(new byte[numCols][]);
}
}
return keyVals;
} | 3.26 |
hbase_GroupingTableMapper_getConf_rdh | /**
* Returns the current configuration.
*
* @return The current configuration.
* @see org.apache.hadoop.conf.Configurable#getConf()
*/
@Override
public Configuration getConf() {
return conf;
} | 3.26 |
hbase_GroupingTableMapper_setConf_rdh | /**
* Sets the configuration. This is used to set up the grouping details.
*
* @param configuration
* The configuration to set.
* @see org.apache.hadoop.conf.Configurable#setConf( org.apache.hadoop.conf.Configuration)
*/
@Override
public void setConf(Configuration configuration) {
this.conf = configuration;
String[] cols = conf.get(GROUP_COLUMNS, "").split(" ");
f0 = new byte[cols.length][];
for (int i = 0; i < cols.length; i++) {
f0[i] = Bytes.toBytes(cols[i]);
}
} | 3.26 |
hbase_GroupingTableMapper_m0_rdh | /**
* Create a key by concatenating multiple column values.
* <p>
* Override this function in order to produce different types of keys.
*
* @param vals
* The current key/values.
* @return A key generated by concatenating multiple column values.
*/
protected ImmutableBytesWritable m0(byte[][] vals) {
if (vals == null) {
return null;
}
StringBuilder v8 = new StringBuilder();
for (int i = 0; i < vals.length; i++) {if (i > 0) {
v8.append(" ");
}
v8.append(Bytes.toString(vals[i]));
}
return new ImmutableBytesWritable(Bytes.toBytesBinary(v8.toString()));
} | 3.26 |
hbase_NamespacePermission_implies_rdh | /**
* check if given action is granted in given namespace.
*
* @param namespace
* namespace's name
* @param action
* action to be checked
* @return true if granted, false otherwise
*/
public boolean implies(String namespace, Action action) {
return namespace.equals(this.namespace) && implies(action);
} | 3.26 |
hbase_FSVisitor_visitRegionStoreFiles_rdh | /**
* Iterate over the region store files
*
* @param fs
* {@link FileSystem}
* @param regionDir
* {@link Path} to the region directory
* @param visitor
* callback object to get the store files
* @throws IOException
* if an error occurred while scanning the directory
*/
public static void visitRegionStoreFiles(final FileSystem fs, final Path regionDir, final StoreFileVisitor visitor) throws IOException {
List<FileStatus> v2 = FSUtils.listStatusWithStatusFilter(fs, regionDir, new FSUtils.FamilyDirFilter(fs));
if (v2 == null) {
if (LOG.isTraceEnabled()) {
LOG.trace("No families under region directory:" + regionDir);
}
return;
}
PathFilter fileFilter = new FSUtils.FileFilter(fs);
for (FileStatus family : v2) {
Path familyDir = family.getPath();
String familyName = familyDir.getName();
// get all the storeFiles in the family
FileStatus[] storeFiles = CommonFSUtils.listStatus(fs, familyDir, fileFilter);
if (storeFiles == null) {
if (LOG.isTraceEnabled()) {
LOG.trace(("No hfiles found for family: " + familyDir) + ", skipping.");
}
continue;
}
for (FileStatus hfile : storeFiles) {
Path hfilePath = hfile.getPath();
visitor.storeFile(regionDir.getName(), familyName, hfilePath.getName());
}
}
} | 3.26 |
hbase_FSVisitor_visitTableStoreFiles_rdh | /**
* Iterate over the table store files
*
* @param fs
* {@link FileSystem}
* @param tableDir
* {@link Path} to the table directory
* @param visitor
* callback object to get the store files
* @throws IOException
* if an error occurred while scanning the directory
*/
public static void visitTableStoreFiles(final FileSystem fs, final Path tableDir, final StoreFileVisitor visitor) throws IOException {
List<FileStatus> regions = FSUtils.listStatusWithStatusFilter(fs, tableDir, new FSUtils.RegionDirFilter(fs));
if (regions == null) {
if (LOG.isTraceEnabled()) {
LOG.trace("No regions under directory:" + tableDir);
}
return;
}
for (FileStatus region : regions) {
visitRegionStoreFiles(fs, region.getPath(), visitor);
}
} | 3.26 |
hbase_LruCachedBlock_access_rdh | /**
* Block has been accessed.
*
* @param accessTime
* Last access; this is actually a incremented sequence number rather than an
* actual time.
*/
public void access(long accessTime) {
this.accessTime = accessTime;
if (this.priority == BlockPriority.SINGLE) {
this.priority = BlockPriority.MULTI;
}
} | 3.26 |
hbase_TestingHBaseClusterOption_convert_rdh | /**
* Convert to the internal option. Not for public use so package private.
*/
StartTestingClusterOption convert() {
return StartTestingClusterOption.builder().numMasters(numMasters).numAlwaysStandByMasters(numAlwaysStandByMasters).numRegionServers(numRegionServers).rsPorts(rsPorts).numDataNodes(numDataNodes).dataNodeHosts(dataNodeHosts).numZkServers(numZkServers).createRootDir(createRootDir).createWALDir(createWALDir).build();
} | 3.26 |
hbase_TestingHBaseClusterOption_builder_rdh | /**
* Returns a new builder.
*/
public static Builder builder() {
return new Builder();
} | 3.26 |
hbase_KeyValueCodecWithTags_getDecoder_rdh | /**
* Implementation depends on {@link InputStream#available()}
*/
@Override
public Decoder getDecoder(final InputStream is) {
return new KeyValueDecoder(is);
} | 3.26 |
hbase_SpaceQuotaSnapshotNotifierFactory_create_rdh | /**
* Instantiates the {@link SpaceQuotaSnapshotNotifier} implementation as defined in the
* configuration provided.
*
* @param conf
* Configuration object
* @return The SpaceQuotaSnapshotNotifier implementation
* @throws IllegalArgumentException
* if the class could not be instantiated
*/
public SpaceQuotaSnapshotNotifier create(Configuration conf) {
Class<? extends SpaceQuotaSnapshotNotifier> clz = Objects.requireNonNull(conf).getClass(SNAPSHOT_NOTIFIER_KEY, SNAPSHOT_NOTIFIER_DEFAULT, SpaceQuotaSnapshotNotifier.class);
try {
return clz.getDeclaredConstructor().newInstance();
} catch (Exception e) {
throw new IllegalArgumentException("Failed to instantiate the implementation", e);
}} | 3.26 |
hbase_ReplicationSourceLogQueue_getQueueSize_rdh | /**
* Get the queue size for the given walGroupId.
*
* @param walGroupId
* walGroupId
*/
public int getQueueSize(String walGroupId) {
Queue<Path> queue = queues.get(walGroupId);
if (queue == null) {
return 0;
}
return queue.size();
} | 3.26 |
hbase_ReplicationSourceLogQueue_getNumQueues_rdh | /**
* Returns number of queues.
*/
public int
getNumQueues() {
return queues.size();
} | 3.26 |
hbase_ReplicationSourceLogQueue_enqueueLog_rdh | /**
* Enqueue the wal
*
* @param wal
* wal to be enqueued
* @param walGroupId
* Key for the wal in @queues map
* @return boolean whether this is the first time we are seeing this walGroupId.
*/
public boolean enqueueLog(Path wal, String walGroupId) {
boolean exists
= false;
PriorityBlockingQueue<Path> queue = queues.get(walGroupId);
if (queue == null) {
queue = new PriorityBlockingQueue<>(queueSizePerGroup, new AbstractFSWALProvider.WALStartTimeComparator());
// make sure that we do not use an empty queue when setting up a ReplicationSource, otherwise
// the shipper may quit immediately
queue.put(wal);
queues.put(walGroupId, queue);
} else {
exists = true;
queue.put(wal);
}
// Increment size of logQueue
this.metrics.incrSizeOfLogQueue();
// Compute oldest wal age
this.metrics.setOldestWalAge(getOldestWalAge());
// This will wal a warning for each new wal that gets created above the warn threshold
int
queueSize = queue.size();
if (queueSize > this.logQueueWarnThreshold) {
LOG.warn("{} WAL group {} queue size: {} exceeds value of " + "replication.source.log.queue.warn {}", source.logPeerId(), walGroupId, queueSize, logQueueWarnThreshold);
}
return exists;
} | 3.26 |
hbase_ReplicationSourceLogQueue_clear_rdh | /**
* Remove all the elements from the queue corresponding to walGroupId
*
* @param walGroupId
* walGroupId
*/
public void clear(String
walGroupId) {
PriorityBlockingQueue<Path> queue = getQueue(walGroupId);
while (!queue.isEmpty()) {
// Need to iterate since metrics#decrSizeOfLogQueue decrements just by 1.
queue.remove();
metrics.decrSizeOfLogQueue();
}
this.metrics.setOldestWalAge(getOldestWalAge());
} | 3.26 |
hbase_ReplicationSourceLogQueue_getQueue_rdh | /**
* Return queue for the given walGroupId Please don't add or remove elements from the returned
* queue. Use {@link #enqueueLog(Path, String)} and {@link #remove(String)} methods respectively.
*
* @param walGroupId
* walGroupId
*/
public PriorityBlockingQueue<Path> getQueue(String walGroupId)
{
return queues.get(walGroupId);
} | 3.26 |
hbase_ReplicationSourceLogQueue_getOldestWalAge_rdh | /* Returns the age of oldest wal. */
long getOldestWalAge() {
long now = EnvironmentEdgeManager.currentTime();
long timestamp = getOldestWalTimestamp();
if (timestamp == Long.MAX_VALUE) {
// If there are no wals in the queue then set the oldest wal timestamp to current time
// so that the oldest wal age will be 0.
timestamp = now;
}
long age = now - timestamp;
return age;
} | 3.26 |
hbase_ReplicationSourceLogQueue_getOldestWalTimestamp_rdh | /* Get the oldest wal timestamp from all the queues. */
private long getOldestWalTimestamp() {
long oldestWalTimestamp = Long.MAX_VALUE;
for (Map.Entry<String, PriorityBlockingQueue<Path>> entry : queues.entrySet()) {
PriorityBlockingQueue<Path> queue = entry.getValue();
Path path = queue.peek();
// Can path ever be null ?
if (path != null) {
oldestWalTimestamp = Math.min(oldestWalTimestamp, AbstractFSWALProvider.WALStartTimeComparator.getTS(path));
}
}
return oldestWalTimestamp;
} | 3.26 |
hbase_ReplicationSourceLogQueue_remove_rdh | /**
* Remove head from the queue corresponding to given walGroupId.
*
* @param walGroupId
* walGroupId
*/
public void remove(String walGroupId) {
PriorityBlockingQueue<Path> queue = getQueue(walGroupId);
if ((queue == null) || queue.isEmpty()) {
return;
}
queue.remove();
// Decrease size logQueue.
this.metrics.decrSizeOfLogQueue();
// Re-compute age of oldest wal metric.
this.metrics.setOldestWalAge(getOldestWalAge());
} | 3.26 |
hbase_YammerHistogramUtils_getHistogramReport_rdh | /**
* Returns a summary of {@code hist}.
*/
public static String getHistogramReport(final Histogram hist) {
Snapshot sn =
hist.getSnapshot();
return (((((((((((((((((((("mean=" + DOUBLE_FORMAT.format(sn.getMean())) + ", min=") + DOUBLE_FORMAT.format(sn.getMin())) + ", max=") + DOUBLE_FORMAT.format(sn.getMax())) + ", stdDev=") + DOUBLE_FORMAT.format(sn.getStdDev())) + ", 50th=") + DOUBLE_FORMAT.format(sn.getMedian())) + ", 75th=") + DOUBLE_FORMAT.format(sn.get75thPercentile())) + ", 95th=") + DOUBLE_FORMAT.format(sn.get95thPercentile())) + ", 99th=") + DOUBLE_FORMAT.format(sn.get99thPercentile())) + ", 99.9th=") + DOUBLE_FORMAT.format(sn.get999thPercentile())) + ", 99.99th=") + DOUBLE_FORMAT.format(sn.getValue(0.9999))) + ", 99.999th=") + DOUBLE_FORMAT.format(sn.getValue(0.99999));
} | 3.26 |
hbase_YammerHistogramUtils_newHistogram_rdh | /**
* Create a new {@link com.codahale.metrics.Histogram} instance. These constructors are not public
* in 2.2.0, so we use reflection to find them.
*/
public static Histogram newHistogram(Reservoir sample) {
try {
Constructor<?> v0 = Histogram.class.getDeclaredConstructor(Reservoir.class);
v0.setAccessible(true);
return ((Histogram) (v0.newInstance(sample)));
} catch (Exception e) {
throw new RuntimeException(e);
}
} | 3.26 |
hbase_YammerHistogramUtils_getShortHistogramReport_rdh | /**
* Returns an abbreviated summary of {@code hist}.
*/
public static String getShortHistogramReport(final Histogram hist) {
Snapshot sn = hist.getSnapshot();
return (((((((((("mean=" + DOUBLE_FORMAT.format(sn.getMean())) + ", min=") + DOUBLE_FORMAT.format(sn.getMin())) + ", max=") + DOUBLE_FORMAT.format(sn.getMax())) + ", stdDev=") + DOUBLE_FORMAT.format(sn.getStdDev())) + ", 95th=") + DOUBLE_FORMAT.format(sn.get95thPercentile())) + ", 99th=") + DOUBLE_FORMAT.format(sn.get99thPercentile());
} | 3.26 |
hbase_YammerHistogramUtils_getPrettyHistogramReport_rdh | /**
* Returns pretty summary of {@code hist}.
*/
public static String getPrettyHistogramReport(final Histogram h) {
Snapshot
sn = h.getSnapshot();
return (((((((((((((((((((((((((((((("Mean = " + DOUBLE_FORMAT.format(sn.getMean())) + "\n") + "Min = ") + DOUBLE_FORMAT.format(sn.getMin())) + "\n") + "Max = ") + DOUBLE_FORMAT.format(sn.getMax())) + "\n") +
"StdDev = ") + DOUBLE_FORMAT.format(sn.getStdDev())) + "\n") + "50th = ") + DOUBLE_FORMAT.format(sn.getMedian())) + "\n") + "75th = ") + DOUBLE_FORMAT.format(sn.get75thPercentile())) + "\n") + "95th = ") + DOUBLE_FORMAT.format(sn.get95thPercentile())) + "\n") + "99th = ") + DOUBLE_FORMAT.format(sn.get99thPercentile())) + "\n") + "99.9th = ") + DOUBLE_FORMAT.format(sn.get999thPercentile())) + "\n") + "99.99th = ") + DOUBLE_FORMAT.format(sn.getValue(0.9999))) + "\n") + "99.999th = ") + DOUBLE_FORMAT.format(sn.getValue(0.99999));
} | 3.26 |
hbase_CheckAndMutate_getCompareOp_rdh | /**
* Returns the comparison operator
*/
public CompareOperator getCompareOp() {
return op;
} | 3.26 |
hbase_CheckAndMutate_getQualifier_rdh | /**
* Returns the qualifier to check
*/
public byte[] getQualifier() { return qualifier;
} | 3.26 |
hbase_CheckAndMutate_getAction_rdh | /**
* Returns the action done if check succeeds
*/
public Row getAction() {return action;
} | 3.26 |
hbase_CheckAndMutate_ifEquals_rdh | /**
* Check for equality
*
* @param family
* family to check
* @param qualifier
* qualifier to check
* @param value
* the expected value
* @return the CheckAndMutate object
*/public Builder ifEquals(byte[] family, byte[] qualifier, byte[] value) {
return ifMatches(family, qualifier,
CompareOperator.EQUAL, value);
} | 3.26 |
hbase_CheckAndMutate_ifMatches_rdh | /**
* Check for match
*
* @param filter
* filter to check
* @return the CheckAndMutate object
*/ public Builder ifMatches(Filter filter) {
this.filter = Preconditions.checkNotNull(filter, "filter is null");
return this;
} | 3.26 |
hbase_CheckAndMutate_getValue_rdh | /**
* Returns the expected value
*/
public byte[] getValue() {
return value;
} | 3.26 |
hbase_CheckAndMutate_getFamily_rdh | /**
* Returns the family to check
*/
public byte[] getFamily() {
return family;
} | 3.26 |
hbase_CheckAndMutate_timeRange_rdh | /**
* Specify a timerange
*
* @param timeRange
* time range to check
* @return the CheckAndMutate object
*/
public Builder timeRange(TimeRange timeRange) {
this.timeRange = timeRange;
return this;
} | 3.26 |
hbase_CheckAndMutate_ifNotExists_rdh | /**
* Check for lack of column
*
* @param family
* family to check
* @param qualifier
* qualifier to check
* @return the CheckAndMutate object
*/
public Builder ifNotExists(byte[] family, byte[] qualifier) {
return ifEquals(family, qualifier, null);
} | 3.26 |
hbase_CheckAndMutate_hasFilter_rdh | /**
* Returns whether this has a filter or not
*/
public boolean hasFilter() {
return filter != null;
} | 3.26 |
hbase_CheckAndMutate_getFilter_rdh | /**
* Returns the filter to check
*/
public Filter getFilter() {
return filter;
} | 3.26 |
hbase_CheckAndMutate_newBuilder_rdh | /**
* returns a builder object to build a CheckAndMutate object
*
* @param row
* row
* @return a builder object
*/public static Builder newBuilder(byte[]
row) {
return new Builder(row);
} | 3.26 |
hbase_CheckAndMutate_build_rdh | /**
* Build the CheckAndMutate object with a RowMutations to commit if the check succeeds.
*
* @param mutations
* mutations to perform if check succeeds
* @return a CheckAndMutate object
*/
public CheckAndMutate build(RowMutations mutations) {
preCheck(mutations);
if (filter != null) {return new CheckAndMutate(row, filter, timeRange, mutations);
} else {
return new CheckAndMutate(row, family, qualifier, f0, value, timeRange, mutations);
}
} | 3.26 |
hbase_CheckAndMutate_getRow_rdh | /**
* Returns the row
*/
@Overridepublic byte[] getRow() {
return row;
} | 3.26 |
hbase_CheckAndMutate_getTimeRange_rdh | /**
* Returns the time range to check
*/
public TimeRange getTimeRange() {
return f1;
} | 3.26 |
hbase_VerifyReplication_createSubmittableJob_rdh | /**
* Sets up the actual job.
*
* @param conf
* The current configuration.
* @param args
* The command line parameters.
* @return The newly created job.
* @throws java.io.IOException
* When setting up the job fails.
*/
public Job createSubmittableJob(Configuration conf, String[] args) throws IOException {
if (!doCommandLine(args)) {
return null;
}
conf.set(f0 + ".tableName", tableName); conf.setLong(f0 + ".startTime", startTime);
conf.setLong(f0 + ".endTime", endTime);
conf.setInt(f0 + ".sleepMsBeforeReCompare", sleepMsBeforeReCompare);
conf.set(f0 + ".delimiter", delimiter);
conf.setInt(f0 + ".batch", batch);
conf.setBoolean(f0 + ".verbose", verbose);
conf.setBoolean(f0 + ".includeDeletedCells", includeDeletedCells);
if (families != null) {
conf.set(f0 + ".families", families);
}
if (rowPrefixes != null) {
conf.set(f0 + ".rowPrefixes", rowPrefixes);
}String peerQuorumAddress;
Pair<ReplicationPeerConfig, Configuration> peerConfigPair = null;if (peerId != null) {
peerConfigPair = getPeerQuorumConfig(conf, peerId);
ReplicationPeerConfig peerConfig
= peerConfigPair.getFirst();
peerQuorumAddress = peerConfig.getClusterKey();
LOG.info((("Peer Quorum Address: " + peerQuorumAddress) + ", Peer Configuration: ") + peerConfig.getConfiguration());
conf.set(f0 + ".peerQuorumAddress", peerQuorumAddress);
HBaseConfiguration.setWithPrefix(conf, PEER_CONFIG_PREFIX, peerConfig.getConfiguration().entrySet());
} else {
assert this.f1 != null;
peerQuorumAddress = this.f1;
LOG.info("Peer Quorum Address: " +
peerQuorumAddress);
conf.set(f0 + ".peerQuorumAddress", peerQuorumAddress);
}
if (f3 != null) {LOG.info("Peer Table Name: " + f3);conf.set(f0 + ".peerTableName", f3);
}
conf.setInt(f0 + ".versions", versions);
LOG.info("Number of version: " + versions);
conf.setInt(f0 + ".recompareTries", reCompareTries);
conf.setInt(f0 + ".recompareBackoffExponent",
reCompareBackoffExponent);
conf.setInt(f0
+ ".recompareThreads", reCompareThreads);
// Set Snapshot specific parameters
if (peerSnapshotName != null) {conf.set(f0 + ".peerSnapshotName", peerSnapshotName);
// for verifyRep by snapshot, choose a unique sub-directory under peerSnapshotTmpDir to
// restore snapshot.
Path restoreDir = new Path(peerSnapshotTmpDir, UUID.randomUUID().toString());
peerSnapshotTmpDir = restoreDir.toString();
conf.set(f0 + ".peerSnapshotTmpDir", peerSnapshotTmpDir);
conf.set(f0 + ".peerFSAddress", peerFSAddress);
conf.set(f0 + ".peerHBaseRootAddress", peerHBaseRootAddress);
// This is to create HDFS delegation token for peer cluster in case of secured
conf.setStrings(MRJobConfig.JOB_NAMENODES, peerFSAddress, conf.get(HConstants.HBASE_DIR));}Job job
= Job.getInstance(conf, conf.get(JOB_NAME_CONF_KEY, (f0 + "_") + tableName));
job.setJarByClass(VerifyReplication.class);
Scan scan = new Scan();
scan.setTimeRange(startTime, endTime);
scan.setRaw(includeDeletedCells);
scan.setCacheBlocks(false);
if (batch > 0) {
scan.setBatch(batch);
}
if (versions >= 0) {
scan.readVersions(versions);
LOG.info("Number of versions set to " + versions);
}
if (families != null) {
String[] fams = families.split(",");
for (String fam : fams) {
scan.addFamily(Bytes.toBytes(fam));
}
}
setRowPrefixFilter(scan, rowPrefixes);
if (sourceSnapshotName != null) {
Path snapshotTempPath = new Path(f2);
LOG.info((("Using source snapshot-" + sourceSnapshotName) + " with temp dir:") + f2);
TableMapReduceUtil.initTableSnapshotMapperJob(sourceSnapshotName, scan, VerifyReplication.Verifier.class, null, null, job, true, snapshotTempPath);
restoreSnapshotForPeerCluster(conf, peerQuorumAddress);
} else {
TableMapReduceUtil.initTableMapperJob(tableName, scan, VerifyReplication.Verifier.class, null, null, job);
}
Configuration peerClusterConf;
if (peerId != null) {
assert peerConfigPair != null;
peerClusterConf = peerConfigPair.getSecond();
} else {
peerClusterConf = HBaseConfiguration.createClusterConf(conf, peerQuorumAddress, PEER_CONFIG_PREFIX);
}
// Obtain the auth token from peer cluster
TableMapReduceUtil.initCredentialsForCluster(job, peerClusterConf);
job.setOutputFormatClass(NullOutputFormat.class);
job.setNumReduceTasks(0);
return job;
} | 3.26 |
hbase_VerifyReplication_map_rdh | /**
* Map method that compares every scanned row with the equivalent from a distant cluster.
*
* @param row
* The current table row key.
* @param value
* The columns.
* @param context
* The current context.
* @throws IOException
* When something is broken with the data.
*/
@Override
public void map(ImmutableBytesWritable row, final Result value, Context context) throws IOException {
if (replicatedScanner == null) {
Configuration conf = context.getConfiguration();
reCompareTries = conf.getInt(f0
+ ".recompareTries", 0);
reCompareBackoffExponent = conf.getInt(f0 + ".recompareBackoffExponent", 1);
sleepMsBeforeReCompare = conf.getInt(f0 + ".sleepMsBeforeReCompare", 0);
if (sleepMsBeforeReCompare > 0) {
reCompareTries = Math.max(reCompareTries, 1);
}
delimiter = conf.get(f0 + ".delimiter", "");
verbose = conf.getBoolean(f0 + ".verbose", false);
batch = conf.getInt(f0 + ".batch", -1);
final Scan scan = new Scan();
if (batch > 0)
{
scan.setBatch(batch);
}
scan.setCacheBlocks(false);
scan.setCaching(conf.getInt(TableInputFormat.SCAN_CACHEDROWS, 1));
long startTime = conf.getLong(f0 +
".startTime", 0);
long endTime = conf.getLong(f0 + ".endTime", Long.MAX_VALUE);
String families = conf.get(f0 + ".families", null);
if (families != null) {
String[] fams = families.split(",");
for (String fam : fams) {
scan.addFamily(Bytes.toBytes(fam));
}
}
boolean includeDeletedCells = conf.getBoolean(f0 + ".includeDeletedCells", false);
scan.setRaw(includeDeletedCells);
String rowPrefixes =
conf.get(f0 + ".rowPrefixes", null);
setRowPrefixFilter(scan, rowPrefixes);
scan.setTimeRange(startTime, endTime);
int versions = conf.getInt(f0 + ".versions", -1);
LOG.info("Setting number of version inside map as: " + versions);
if (versions >= 0) {
scan.readVersions(versions);
}
int reCompareThreads = conf.getInt(f0 + ".recompareThreads", 0);
reCompareExecutor = buildReCompareExecutor(reCompareThreads, context);
TableName tableName = TableName.valueOf(conf.get(f0 + ".tableName"));
sourceConnection = ConnectionFactory.createConnection(conf);
sourceTable = sourceConnection.getTable(tableName);
tableScan = scan;
final InputSplit tableSplit = context.getInputSplit();
String zkClusterKey = conf.get(f0 + ".peerQuorumAddress"); Configuration peerConf = HBaseConfiguration.createClusterConf(conf, zkClusterKey, PEER_CONFIG_PREFIX);
String peerName = peerConf.get(f0 + ".peerTableName", tableName.getNameAsString());
TableName peerTableName = TableName.valueOf(peerName);
replicatedConnection = ConnectionFactory.createConnection(peerConf);
replicatedTable = replicatedConnection.getTable(peerTableName);
scan.withStartRow(value.getRow());
byte[] endRow = null;
if (tableSplit instanceof TableSnapshotInputFormat.TableSnapshotRegionSplit) {
endRow = ((TableSnapshotInputFormat.TableSnapshotRegionSplit) (tableSplit)).getRegion().getEndKey();
} else {
endRow = ((TableSplit) (tableSplit)).getEndRow();
}
scan.withStopRow(endRow);
String peerSnapshotName = conf.get(f0 + ".peerSnapshotName", null);
if (peerSnapshotName != null) {
String peerSnapshotTmpDir = conf.get(f0 + ".peerSnapshotTmpDir", null);
String peerFSAddress = conf.get(f0 + ".peerFSAddress", null);
String peerHBaseRootAddress = conf.get(f0 + ".peerHBaseRootAddress", null);
FileSystem.setDefaultUri(peerConf, peerFSAddress);
CommonFSUtils.setRootDir(peerConf, new Path(peerHBaseRootAddress));
LOG.info((((((("Using peer snapshot:" + peerSnapshotName) + " with temp dir:") + peerSnapshotTmpDir) + " peer root uri:") + CommonFSUtils.getRootDir(peerConf)) + " peerFSAddress:")
+ peerFSAddress);
replicatedScanner = new TableSnapshotScanner(peerConf, CommonFSUtils.getRootDir(peerConf), new Path(peerFSAddress, peerSnapshotTmpDir), peerSnapshotName, scan, true);
} else {
replicatedScanner = replicatedTable.getScanner(scan);
}
currentCompareRowInPeerTable = replicatedScanner.next();
}
while (true) {
if (currentCompareRowInPeerTable == null) {
// reach the region end of peer table, row only in source table
logFailRowAndIncreaseCounter(context, Counters.ONLY_IN_SOURCE_TABLE_ROWS, value, null);
break;
}
int rowCmpRet = Bytes.compareTo(value.getRow(), currentCompareRowInPeerTable.getRow());if (rowCmpRet == 0) {// rowkey is same, need to compare the content of the row
try {
Result.compareResults(value, currentCompareRowInPeerTable, false);
context.getCounter(Counters.GOODROWS).increment(1);
if (verbose) {
LOG.info((("Good row key: " + delimiter) + Bytes.toStringBinary(value.getRow())) + delimiter);
}
} catch (Exception e) {
logFailRowAndIncreaseCounter(context,
Counters.CONTENT_DIFFERENT_ROWS, value, currentCompareRowInPeerTable);
}
currentCompareRowInPeerTable =
replicatedScanner.next();
break;
} else if (rowCmpRet < 0) {
// row only exists in source table
logFailRowAndIncreaseCounter(context, Counters.ONLY_IN_SOURCE_TABLE_ROWS, value, null);
break;
} else {
// row only exists in peer table
logFailRowAndIncreaseCounter(context, Counters.ONLY_IN_PEER_TABLE_ROWS, null, currentCompareRowInPeerTable);
currentCompareRowInPeerTable = replicatedScanner.next();
}
}
} | 3.26 |
hbase_VerifyReplication_printUsage_rdh | /* @param errorMsg Error message. Can be null. */
private static void printUsage(final String errorMsg) {
if ((errorMsg != null) && (errorMsg.length() > 0)) {
System.err.println("ERROR: " + errorMsg);
}
System.err.println((((("Usage: verifyrep [--starttime=X]" + " [--endtime=Y] [--families=A] [--row-prefixes=B] [--delimiter=] [--recompareSleep=] ") + "[--recompareThreads=] [--recompareTries=] [--recompareBackoffExponent=]") + "[--batch=] [--verbose] [--peerTableName=] [--sourceSnapshotName=P] ") + "[--sourceSnapshotTmpDir=Q] [--peerSnapshotName=R] [--peerSnapshotTmpDir=S] ") + "[--peerFSAddress=T] [--peerHBaseRootAddress=U] <peerid|peerQuorumAddress> <tablename>");
System.err.println();
System.err.println("Options:");
System.err.println(" starttime beginning of the time range");
System.err.println(" without endtime means from starttime to forever");
System.err.println(" endtime end of the time range");
System.err.println(" versions number of cell versions to verify");
System.err.println(" batch batch count for scan, note that" + " result row counts will no longer be actual number of rows when you use this option");
System.err.println(" raw includes raw scan if given in options");
System.err.println(" families comma-separated list of families to copy");
System.err.println(" row-prefixes comma-separated list of row key prefixes to filter on ");
System.err.println(" delimiter the delimiter used in display around rowkey");
System.err.println(" recompareSleep milliseconds to sleep before recompare row, " + "default value is 0 which disables the recompare.");
System.err.println(" recompareThreads number of threads to run recompares in");
System.err.println(" recompareTries number of recompare attempts before incrementing " + "the BADROWS counter. Defaults to 1 recompare");
System.out.println((" recompareBackoffExponent exponential multiplier to increase " + "recompareSleep after each recompare attempt, ") + "default value is 0 which results in a constant sleep time");
System.err.println(" verbose logs row keys of good rows");
System.err.println(" peerTableName Peer Table Name");
System.err.println(" sourceSnapshotName Source Snapshot Name");
System.err.println(" sourceSnapshotTmpDir Tmp location to restore source table snapshot");
System.err.println(" peerSnapshotName Peer Snapshot Name");
System.err.println(" peerSnapshotTmpDir Tmp location to restore peer table snapshot");
System.err.println(" peerFSAddress Peer cluster Hadoop FS address");
System.err.println(" peerHBaseRootAddress Peer cluster HBase root location");
System.err.println();
System.err.println("Args:");
System.err.println(" peerid Id of the peer used for verification," + " must match the one given for replication");
System.err.println(" peerQuorumAddress quorumAdress of the peer used for verification. The " + "format is zk_quorum:zk_port:zk_hbase_path");
System.err.println(" tablename Name of the table to verify");
System.err.println();
System.err.println("Examples:");
System.err.println(" To verify the data replicated from TestTable for a 1 hour window with peer #5 ");
System.err.println((" $ hbase " + "org.apache.hadoop.hbase.mapreduce.replication.VerifyReplication") + " --starttime=1265875194289 --endtime=1265878794289 5 TestTable ");
System.err.println();
System.err.println(" To verify the data in TestTable between the cluster runs VerifyReplication and cluster-b");
System.err.println(" Assume quorum address for cluster-b is" + " cluster-b-1.example.com,cluster-b-2.example.com,cluster-b-3.example.com:2181:/cluster-b");
System.err.println(((" $ hbase org.apache.hadoop.hbase.mapreduce.replication.VerifyReplication \\\n" + " cluster-b-1.example.com,cluster-b-2.example.com,cluster-b-3.example.com:") + "2181:/cluster-b \\\n") + " TestTable");
System.err.println();
System.err.println(" To verify the data in TestTable between the secured cluster runs VerifyReplication" + " and insecure cluster-b");
System.err.println((((" $ hbase org.apache.hadoop.hbase.mapreduce.replication.VerifyReplication \\\n" + " -D verifyrep.peer.hbase.security.authentication=simple \\\n") + " cluster-b-1.example.com,cluster-b-2.example.com,cluster-b-3.example.com:") + "2181:/cluster-b \\\n") + " TestTable");
System.err.println();
System.err.println(" To verify the data in TestTable between" + " the secured cluster runs VerifyReplication and secured cluster-b");
System.err.println(" Assume cluster-b uses different kerberos principal, cluster-b/_HOST@E" + ", for master and regionserver kerberos principal from another cluster");
System.err.println((((((" $ hbase org.apache.hadoop.hbase.mapreduce.replication.VerifyReplication \\\n" + " -D verifyrep.peer.hbase.regionserver.kerberos.principal=") + "cluster-b/[email protected] \\\n") +
" -D verifyrep.peer.hbase.master.kerberos.principal=cluster-b/[email protected] \\\n") + " cluster-b-1.example.com,cluster-b-2.example.com,cluster-b-3.example.com:") + "2181:/cluster-b \\\n") + " TestTable");System.err.println();
System.err.println(" To verify the data in TestTable between the insecure cluster runs VerifyReplication" + " and secured cluster-b");
System.err.println(((((((" $ hbase org.apache.hadoop.hbase.mapreduce.replication.VerifyReplication \\\n" + " -D verifyrep.peer.hbase.security.authentication=kerberos \\\n") + " -D verifyrep.peer.hbase.regionserver.kerberos.principal=") + "cluster-b/[email protected] \\\n") + " -D verifyrep.peer.hbase.master.kerberos.principal=cluster-b/[email protected] \\\n") + " cluster-b-1.example.com,cluster-b-2.example.com,cluster-b-3.example.com:") + "2181:/cluster-b \\\n") + " TestTable");
} | 3.26 |
hbase_VerifyReplication_main_rdh | /**
* Main entry point.
*
* @param args
* The command line parameters.
* @throws Exception
* When running the job fails.
*/
public static void main(String[] args) throws Exception {int res = ToolRunner.run(HBaseConfiguration.create(), new VerifyReplication(), args);
System.exit(res);
} | 3.26 |
hbase_HtmlQuoting_needsQuoting_rdh | /**
* Does the given string need to be quoted?
*
* @param str
* the string to check
* @return does the string contain any of the active html characters?
*/
public static boolean needsQuoting(String str) {
if (str == null) {
return false;
}
byte[] bytes = Bytes.toBytes(str);
return needsQuoting(bytes, 0, bytes.length);
} | 3.26 |
hbase_HtmlQuoting_unquoteHtmlChars_rdh | /**
* Remove HTML quoting from a string.
*
* @param item
* the string to unquote
* @return the unquoted string
*/
public static String unquoteHtmlChars(String item) {
if (item == null) {
return null;
}
int next = item.indexOf('&');
// nothing was quoted
if (next == (-1)) {
return item;
}
int len = item.length();
int posn = 0;
StringBuilder buffer = new StringBuilder();
while (next != (-1)) {
buffer.append(item.substring(posn, next));
if (item.startsWith("&", next)) {
buffer.append('&');
next += 5;
} else if (item.startsWith("'", next)) {
buffer.append('\'');
next += 6;
} else if (item.startsWith(">", next)) {
buffer.append('>');
next += 4;
} else if (item.startsWith("<",
next)) {
buffer.append('<');
next += 4;
} else if (item.startsWith(""", next)) { buffer.append('"');
next += 6;} else {
int end = item.indexOf(';', next) + 1;
if (end == 0) {
end = len;
}
throw new IllegalArgumentException("Bad HTML quoting for " + item.substring(next, end));
}
posn = next;
next = item.indexOf('&', posn);
}
buffer.append(item.substring(posn, len));
return buffer.toString();
} | 3.26 |
hbase_HtmlQuoting_quoteOutputStream_rdh | /**
* Return an output stream that quotes all of the output.
*
* @param out
* the stream to write the quoted output to
* @return a new stream that the application show write to
*/public static OutputStream quoteOutputStream(final OutputStream out) {
return new OutputStream() {
private byte[]
data = new byte[1];
@Override
public void write(byte[] data, int off, int len) throws IOException {
quoteHtmlChars(out, data, off, len);
}
@Override
public void write(int b) throws IOException {
data[0] = ((byte) (b));
quoteHtmlChars(out, data, 0, 1);
}
@Override
public void flush() throws IOException {
out.flush();
}
@Override
public void close() throws IOException {
out.close();
}};
} | 3.26 |
hbase_HtmlQuoting_quoteHtmlChars_rdh | /**
* Quote the given item to make it html-safe.
*
* @param item
* the string to quote
* @return the quoted string
*/
public static String quoteHtmlChars(String item) {
if (item == null) {
return null;
}
byte[] bytes = Bytes.toBytes(item);
if (needsQuoting(bytes, 0, bytes.length)) {
ByteArrayOutputStream buffer = new ByteArrayOutputStream();
try {
quoteHtmlChars(buffer, bytes, 0, bytes.length);
} catch (IOException ioe) {
// Won't happen, since it is a bytearrayoutputstream
}
return buffer.toString();
} else {
return item;
}
} | 3.26 |
hbase_ZKWatcher_getMetaReplicaNodesAndWatchChildren_rdh | /**
* Same as {@link #getMetaReplicaNodes()} except that this also registers a watcher on base znode
* for subsequent CREATE/DELETE operations on child nodes.
*/
public List<String> getMetaReplicaNodesAndWatchChildren() throws KeeperException {
List<String> childrenOfBaseNode = ZKUtil.listChildrenAndWatchForNewChildren(this, znodePaths.baseZNode);
return filterMetaReplicaNodes(childrenOfBaseNode);
} | 3.26 |
hbase_ZKWatcher_setZnodeAclsRecursive_rdh | /**
* Set the znode perms recursively. This will do post-order recursion, so that baseZnode ACLs will
* be set last in case the master fails in between.
*
* @param znode
* the ZNode to set the permissions for
*/
private void setZnodeAclsRecursive(String znode) throws KeeperException, InterruptedException {
List<String> children = recoverableZooKeeper.getChildren(znode, false);
for (String child : children) {
setZnodeAclsRecursive(ZNodePaths.joinZNode(znode, child));
}
List<ACL> acls = m1(znode, true);
LOG.info("Setting ACLs for znode:{} , acl:{}", znode, acls);recoverableZooKeeper.setAcl(znode, acls, -1);
} | 3.26 |
hbase_ZKWatcher_interruptedExceptionNoThrow_rdh | /**
* Log the InterruptedException and interrupt current thread
*
* @param ie
* The IterruptedException to log
* @param throwLater
* Whether we will throw the exception latter
*/
public void interruptedExceptionNoThrow(InterruptedException ie, boolean throwLater) {
LOG.debug(prefix("Received InterruptedException, will interrupt current thread" + (throwLater ? " and rethrow a SystemErrorException" : "")), ie);
// At least preserve interrupt.
Thread.currentThread().interrupt();
} | 3.26 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.