name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hbase_JVMClusterUtil_startup_rdh | /**
* Start the cluster. Waits until there is a primary master initialized and returns its address.
*
* @return Address to use contacting primary master.
*/
public static String startup(final List<JVMClusterUtil.MasterThread> masters, final List<JVMClusterUtil.RegionServerThread> regionservers) throws IOException {
// Implementation note: This method relies on timed sleeps in a loop. It's not great, and
// should probably be re-written to use actual synchronization objects, but it's ok for now
Configuration configuration = null;
if ((masters == null) || masters.isEmpty()) {
return null;}
for (JVMClusterUtil.MasterThread t : masters) {
configuration = t.getMaster().getConfiguration();
t.start();}
// Wait for an active master
// having an active master before starting the region threads allows
// then to succeed on their connection to master
final int v8 = (configuration != null) ? Integer.parseInt(configuration.get("hbase.master.start.timeout.localHBaseCluster", "30000")) : 30000;
waitForEvent(v8, "active", () -> findActiveMaster(masters) != null);
if (regionservers != null) {
for (JVMClusterUtil.RegionServerThread t : regionservers) {
t.start();
}
}
// Wait for an active master to be initialized (implies being master)
// with this, when we return the cluster is complete
final int initTimeout = (configuration != null) ? Integer.parseInt(configuration.get("hbase.master.init.timeout.localHBaseCluster", "200000")) : 200000;
waitForEvent(initTimeout, "initialized", () -> {
JVMClusterUtil.MasterThread t = findActiveMaster(masters);
// master thread should never be null at this point, but let's keep the check anyway
return (t != null) && t.master.isInitialized();
});
return findActiveMaster(masters).master.getServerName().toString();
} | 3.26 |
hbase_JVMClusterUtil_waitForEvent_rdh | /**
* Utility method to wait some time for an event to occur, and then return control to the caller.
*
* @param millis
* How long to wait, in milliseconds.
* @param action
* The action that we are waiting for. Will be used in log message if the event does
* not occur.
* @param check
* A Supplier that will be checked periodically to produce an updated true/false
* result indicating if the expected event has happened or not.
* @throws InterruptedIOException
* If we are interrupted while waiting for the event.
* @throws RuntimeException
* If we reach the specified timeout while waiting for the event.
*/
private static void waitForEvent(long millis, String action, Supplier<Boolean> check) throws InterruptedIOException {long v12 = System.nanoTime() + TimeUnit.MILLISECONDS.toNanos(millis);
while (true) {
if (check.get()) {
return;
}
if (System.nanoTime() > v12) {
String msg = ((("Master not " + action) + " after ") + millis) + "ms";
Threads.printThreadInfo(System.out, "Thread dump because: " + msg);
throw new RuntimeException(msg);
}
try {
Thread.sleep(100);
} catch (InterruptedException e) {
throw ((InterruptedIOException) (new InterruptedIOException().initCause(e)));
}
}
} | 3.26 |
hbase_JVMClusterUtil_getMaster_rdh | /**
* Returns the master
*/
public HMaster getMaster() {
return this.master;} | 3.26 |
hbase_Compression_createPlainCompressionStream_rdh | /**
* Creates a compression stream without any additional wrapping into buffering streams.
*/public CompressionOutputStream createPlainCompressionStream(OutputStream downStream, Compressor compressor) throws IOException {
CompressionCodec codec = getCodec(conf);
((Configurable) (codec)).getConf().setInt("io.file.buffer.size", 32 * 1024);
return codec.createOutputStream(downStream, compressor);
} | 3.26 |
hbase_Compression_getClassLoaderForCodec_rdh | /**
* Returns the classloader to load the Codec class from.
*/
private static ClassLoader getClassLoaderForCodec() {
ClassLoader cl = Thread.currentThread().getContextClassLoader();
if (cl == null) {
cl = Compression.class.getClassLoader();
}
if (cl == null) {cl = ClassLoader.getSystemClassLoader();
}
if (cl == null) {
throw
new RuntimeException("A ClassLoader to load the Codec could not be determined");
}
return cl;} | 3.26 |
hbase_Compression_buildCodec_rdh | /**
* Load a codec implementation for an algorithm using the supplied configuration.
*
* @param conf
* the configuration to use
* @param algo
* the algorithm to implement
*/
private static CompressionCodec buildCodec(final Configuration conf, final Algorithm algo) {
try
{
String
codecClassName = conf.get(algo.confKey, algo.confDefault);
if (codecClassName == null) {
throw new RuntimeException("No codec configured for " + algo.confKey);
}
Class<?> codecClass = getClassLoaderForCodec().loadClass(codecClassName);
CompressionCodec codec = ((CompressionCodec) (ReflectionUtils.newInstance(codecClass, new Configuration(conf))));
LOG.info("Loaded codec {} for compression algorithm {}", codec.getClass().getCanonicalName(), algo.name());
return codec;
} catch (ClassNotFoundException e) {
throw new RuntimeException(e);
}
} | 3.26 |
hbase_AvlUtil_remove_rdh | /**
* Remove a node from the tree
*
* @param head
* the head of the linked list
* @param node
* the node to remove from the list
* @return the new head of the list
*/
public static <TNode extends AvlLinkedNode> TNode remove(TNode head, TNode node) {
assert isLinked(node) : node + " is not linked";
if (node != node.iterNext) {
node.iterPrev.iterNext = node.iterNext;
node.iterNext.iterPrev = node.iterPrev;
head = (head == node) ? ((TNode) (node.iterNext)) : head;
} else {
head = null;}
node.iterNext = null;
node.iterPrev = null;
return head;
} | 3.26 |
hbase_AvlUtil_visit_rdh | /**
* Visit each node of the tree
*
* @param root
* the current root of the tree
* @param visitor
* the AvlNodeVisitor instance
*/
public static <TNode extends AvlNode> void visit(final TNode root, final AvlNodeVisitor<TNode> visitor) {
if (root == null)
return;
final AvlTreeIterator<TNode> iterator = new AvlTreeIterator<>(root);
boolean visitNext = true;
while (visitNext && iterator.hasNext()) {
visitNext =
visitor.visitNode(iterator.next());}
} | 3.26 |
hbase_AvlUtil_isLinked_rdh | /**
* Return true if the node is linked to a list, false otherwise
*/
public static <TNode extends AvlLinkedNode> boolean isLinked(TNode node) {
return
(node.iterPrev != null) && (node.iterNext != null);
} | 3.26 |
hbase_AvlUtil_readPrev_rdh | /**
* Return the predecessor of the current node
*
* @param node
* the current node
* @return the predecessor of the current node
*/
public static <TNode extends AvlLinkedNode> TNode readPrev(TNode node) {
return ((TNode) (node.iterPrev));
} | 3.26 |
hbase_AvlUtil_seekTo_rdh | /**
* Reset the iterator, and seeks to the specified key
*
* @param root
* the current root of the tree
* @param key
* the key for the node we are trying to find
* @param keyComparator
* the comparator to use to match node and key
*/public void seekTo(final TNode root, final Object key, final AvlKeyComparator<TNode> keyComparator) {
current = null;
height = 0;
TNode node = root;
while (node != null) {if (keyComparator.compareKey(node, key) >= 0) {
if (node.avlLeft != null) {
stack[height++] = node;
node = ((TNode) (node.avlLeft));
} else {
current = node;
return;
}
} else if (node.avlRight != null) {
stack[height++] = node;
node = ((TNode) (node.avlRight));
} else {
if (height > 0) {
TNode parent = ((TNode) (stack[--height]));
while (node == parent.avlRight) {
if (height == 0) {
current = null;
return;
}
node = parent;
parent
= ((TNode) (stack[--height]));
}
current = parent;
return;
}
current = null;
return;
}
} } | 3.26 |
hbase_AvlUtil_get_rdh | /**
* Return the node that matches the specified key or null in case of node not found.
*
* @param root
* the current root of the tree
* @param key
* the key for the node we are trying to find
* @param keyComparator
* the comparator to use to match node and key
* @return the node that matches the specified key or null in case of node not found.
*/
public static <TNode extends AvlNode> TNode
get(TNode root, final Object key, final AvlKeyComparator<TNode> keyComparator) {
while (root != null) {int cmp =
keyComparator.compareKey(root, key);
if (cmp > 0) {
root = ((TNode) (root.avlLeft));
} else if
(cmp < 0) {
root = ((TNode) (root.avlRight));
} else {
return ((TNode) (root));
}
}
return null;
} | 3.26 |
hbase_AvlUtil_prepend_rdh | /**
* Prepend a node to the tree before a specific node
*
* @param head
* the head of the linked list
* @param base
* the node which we want to add the {@code node} before it
* @param node
* the node which we want to add it before the {@code base} node
*/
public static <TNode extends AvlLinkedNode> TNode prepend(TNode head, TNode base, TNode node) {
assert !isLinked(node) : node + " is already linked";
node.iterNext = base;
node.iterPrev = base.iterPrev;
base.iterPrev.iterNext = node;
base.iterPrev = node;
return head == base ? node : head;
} | 3.26 |
hbase_AvlUtil_getLast_rdh | /**
* Return the last node of the tree.
*
* @param root
* the current root of the tree
* @return the last (max) node of the tree
*/
public static <TNode extends AvlNode> TNode getLast(TNode root) {
if (root != null) {
while (root.avlRight != null) {
root = ((TNode) (root.avlRight));
}
}
return
root;
} | 3.26 |
hbase_AvlUtil_m0_rdh | /**
* Return the first node of the tree.
*
* @param root
* the current root of the tree
* @return the first (min) node of the tree
*/
public static <TNode extends AvlNode> TNode m0(TNode root) {
if (root != null) {
while (root.avlLeft != null) {
root = ((TNode) (root.avlLeft));
}
}
return root;
} | 3.26 |
hbase_AvlUtil_insert_rdh | /**
* Insert a node into the tree. This is useful when you want to create a new node or replace the
* content depending if the node already exists or not. Using AvlInsertOrReplace class you can
* return the node to add/replace.
*
* @param root
* the current root of the tree
* @param key
* the key for the node we are trying to insert
* @param keyComparator
* the comparator to use to match node and key
* @param insertOrReplace
* the class to use to insert or replace the node
* @return the new root of the tree
*/
public static <TNode extends AvlNode> TNode insert(TNode root, Object key, final AvlKeyComparator<TNode> keyComparator, final
AvlInsertOrReplace<TNode> insertOrReplace) {
if (root == null) {
return insertOrReplace.insert(key);
}
int cmp = keyComparator.compareKey(root, key);
if (cmp < 0) {
root.avlLeft = insert(((TNode) (root.avlLeft)), key, keyComparator, insertOrReplace);
} else if (cmp > 0) {
root.avlRight = insert(((TNode)
(root.avlRight)), key, keyComparator, insertOrReplace);
} else {
TNode left = ((TNode) (root.avlLeft));
TNode right
= ((TNode) (root.avlRight));
root = insertOrReplace.replace(key, root);
root.avlLeft = left;root.avlRight = right;
return root;
}
return balance(root);
} | 3.26 |
hbase_AvlUtil_m1_rdh | /**
* Reset the iterator, and seeks to the first (min) node of the tree
*
* @param root
* the current root of the tree
*/
public void m1(final TNode root) {
current = root;
height = 0;
if (root != null) {
while (current.avlLeft != null) {
stack[height++] = current;
current = ((TNode) (current.avlLeft));
} }
} | 3.26 |
hbase_AvlUtil_appendList_rdh | /**
* Append a list of nodes to the tree
*
* @param head
* the head of the current linked list
* @param otherHead
* the head of the list to append to the current list
* @return the new head of the current list
*/
public static <TNode extends AvlLinkedNode> TNode appendList(TNode head, TNode otherHead) {
if (head == null)
return otherHead;
if (otherHead == null)return head;
TNode tail = ((TNode) (head.iterPrev));
TNode otherTail = ((TNode) (otherHead.iterPrev));
tail.iterNext = otherHead;
otherHead.iterPrev = tail;
otherTail.iterNext = head;
head.iterPrev = otherTail;
return head;
} | 3.26 |
hbase_AvlUtil_readNext_rdh | /**
* Return the successor of the current node
*
* @param node
* the current node
* @return the successor of the current node
*/
public static <TNode extends AvlLinkedNode> TNode readNext(TNode node) {
return ((TNode) (node.iterNext));
} | 3.26 |
hbase_AvlUtil_append_rdh | /**
* Append a node to the tree
*
* @param head
* the head of the linked list
* @param node
* the node to add to the tail of the list
* @return the new head of the list
*/
public static <TNode extends AvlLinkedNode> TNode append(TNode head, TNode node) {
assert !isLinked(node) : node + " is already linked";
if (head != null) {
TNode tail =
((TNode)
(head.iterPrev));
tail.iterNext = node;
node.iterNext = head;
node.iterPrev = tail;
head.iterPrev = node;
return
head;
}node.iterNext = node;
node.iterPrev = node;
return node;
} | 3.26 |
hbase_CompactionRequestImpl_getPriority_rdh | /**
* Gets the priority for the request
*/@Override
public int getPriority() {
return priority;
} | 3.26 |
hbase_CompactionRequestImpl_getSize_rdh | /**
* Gets the total size of all StoreFiles in compaction
*/
@Override
public long getSize() {
return totalSize;
} | 3.26 |
hbase_CompactionRequestImpl_setPriority_rdh | /**
* Sets the priority for the request
*/
public void setPriority(int p) {
this.priority = p;
} | 3.26 |
hbase_CompactionRequestImpl_m1_rdh | /**
* Sets the region/store name, for logging.
*/
public void m1(String regionName, String storeName) {
this.regionName = regionName;
this.storeName = storeName;
} | 3.26 |
hbase_CompactionRequestImpl_recalculateSize_rdh | /**
* Recalculate the size of the compaction based on current files.
*/
private void recalculateSize() {
this.totalSize = filesToCompact.stream().map(HStoreFile::getReader).mapToLong(r -> r != null ? r.length() : 0L).sum();
} | 3.26 |
hbase_ZKMainServer_hasServer_rdh | /**
*
* @param args
* the arguments to check
* @return True if argument strings have a '-server' in them.
*/
private static boolean hasServer(final String[] args) {
return (args.length
> 0) && args[0].equals(SERVER_ARG);
} | 3.26 |
hbase_ZKMainServer_runCmdLine_rdh | /**
* Run the command-line args passed. Calls System.exit when done.
*
* @throws IOException
* in case of a network failure
* @throws InterruptedException
* if the ZooKeeper client closes
* @throws CliException
* if the ZooKeeper exception happens in cli command
*/
void runCmdLine() throws IOException, InterruptedException, CliException {
processCmd(this.cl);
System.exit(0);
} | 3.26 |
hbase_ZKMainServer_hasCommandLineArguments_rdh | /**
*
* @param args
* the arguments to check for command-line arguments
* @return True if command-line arguments were passed.
*/
private static boolean hasCommandLineArguments(final String[] args) {
if (hasServer(args)) {
if (args.length < 2) {
throw new IllegalStateException("-server param but no value");
}
return args.length > 2;
}
return args.length > 0;
} | 3.26 |
hbase_ZKMainServer_main_rdh | /**
* Run the tool.
*
* @param args
* Command line arguments. First arg is path to zookeepers file.
*/
public static void main(String[] args) throws Exception {
String[] newArgs = args;
if (!hasServer(args)) {
// Add the zk ensemble from configuration if none passed on command-line.
Configuration conf = HBaseConfiguration.create();
String hostport
= new ZKMainServer().parse(conf);
if ((hostport != null) &&
(hostport.length() > 0)) {
newArgs = new String[args.length + 2];
System.arraycopy(args, 0, newArgs, 2, args.length);
newArgs[0] = "-server";
newArgs[1] = hostport;
}
}
// If command-line arguments, run our hack so they are executed.
// ZOOKEEPER-1897 was committed to zookeeper-3.4.6 but elsewhere in this class we say
// 3.4.6 breaks command-processing; TODO.
if (hasCommandLineArguments(args)) {
HACK_UNTIL_ZOOKEEPER_1897_ZooKeeperMain zkm = new HACK_UNTIL_ZOOKEEPER_1897_ZooKeeperMain(newArgs);
zkm.runCmdLine();
} else {
ZooKeeperMain.main(newArgs);
}
} | 3.26 |
hbase_Permission_getVersion_rdh | /**
* Returns the object version number
*/
@Override
public byte getVersion() {
return VERSION;
} | 3.26 |
hbase_Permission_newBuilder_rdh | /**
* Build a table permission
*
* @param tableName
* the specific table name
* @return table permission builder
*/
public static Builder newBuilder(TableName tableName) {
return new
Builder(tableName);
} | 3.26 |
hbase_Permission_implies_rdh | /**
* check if given action is granted
*
* @param action
* action to be checked
* @return true if granted, false otherwise
*/
public boolean implies(Action action) {
return actions.contains(action);
} | 3.26 |
hbase_Permission_equalsExceptActions_rdh | /**
* Check if two permission equals regardless of actions. It is useful when merging a new
* permission with an existed permission which needs to check two permissions's fields.
*
* @param obj
* instance
* @return true if equals, false otherwise
*/
public boolean equalsExceptActions(Object obj) {
return obj instanceof Permission;
} | 3.26 |
hbase_ReplicationSourceShipper_shipEdits_rdh | /**
* Do the shipping logic
*/
private void shipEdits(WALEntryBatch entryBatch) {
List<Entry> entries
= entryBatch.getWalEntries();
int sleepMultiplier = 0;
if (entries.isEmpty()) {
updateLogPosition(entryBatch);
return;
}
int currentSize = ((int) (entryBatch.getHeapSize()));
source.getSourceMetrics().setTimeStampNextToReplicate(entries.get(entries.size() - 1).getKey().getWriteTime());
while (isActive()) {
try {
try {
source.tryThrottle(currentSize);
} catch (InterruptedException e) {
LOG.debug("Interrupted while sleeping for throttling control");
Thread.currentThread().interrupt();
// current thread might be interrupted to terminate
// directly go back to while() for confirm this
continue;
}
// create replicateContext here, so the entries can be GC'd upon return from this call
// stack
ReplicationEndpoint.ReplicateContext replicateContext = new ReplicationEndpoint.ReplicateContext();
replicateContext.setEntries(entries).setSize(currentSize);
replicateContext.setWalGroupId(walGroupId);
replicateContext.setTimeout(getAdaptiveTimeout(this.shipEditsTimeout, sleepMultiplier));
long startTimeNs = System.nanoTime();
// send the edits to the endpoint. Will block until the edits are shipped and acknowledged
boolean replicated = source.getReplicationEndpoint().replicate(replicateContext);
long endTimeNs = System.nanoTime();
if (!replicated)
{
continue;
} else {
sleepMultiplier = Math.max(sleepMultiplier - 1, 0);
}
// Clean up hfile references
for (Entry entry : entries) {
cleanUpHFileRefs(entry.getEdit());
LOG.trace("shipped entry {}: ", entry);
}
// Log and clean up WAL logs
updateLogPosition(entryBatch);
// offsets totalBufferUsed by deducting shipped batchSize (excludes bulk load size)
// this sizeExcludeBulkLoad has to use same calculation that when calling
// acquireBufferQuota() in ReplicationSourceWALReader because they maintain
// same variable: totalBufferUsed
source.postShipEdits(entries, entryBatch.getUsedBufferSize());
// FIXME check relationship between wal group and overall
source.getSourceMetrics().shipBatch(entryBatch.getNbOperations(), currentSize, entryBatch.getNbHFiles());
source.getSourceMetrics().setAgeOfLastShippedOp(entries.get(entries.size() - 1).getKey().getWriteTime(), walGroupId);
source.getSourceMetrics().updateTableLevelMetrics(entryBatch.getWalEntriesWithSize());
if (LOG.isTraceEnabled()) {
LOG.debug("Replicated {} entries or {} operations in {} ms", entries.size(), entryBatch.getNbOperations(), (endTimeNs - startTimeNs) / 1000000);
}
break;
} catch (Exception ex) {
source.getSourceMetrics().incrementFailedBatches();
LOG.warn("{} threw unknown exception:", source.getReplicationEndpoint().getClass().getName(), ex);
if (sleepForRetries("ReplicationEndpoint threw exception", sleepForRetries, sleepMultiplier, maxRetriesMultiplier)) {
sleepMultiplier++;
}
}
}
} | 3.26 |
hbase_ReplicationSourceShipper_clearWALEntryBatch_rdh | /**
* Attempts to properly update <code>ReplicationSourceManager.totalBufferUser</code>, in case
* there were unprocessed entries batched by the reader to the shipper, but the shipper didn't
* manage to ship those because the replication source is being terminated. In that case, it
* iterates through the batched entries and decrease the pending entries size from
* <code>ReplicationSourceManager.totalBufferUser</code>
* <p/>
* <b>NOTES</b> 1) This method should only be called upon replication source termination. It
* blocks waiting for both shipper and reader threads termination, to make sure no race conditions
* when updating <code>ReplicationSourceManager.totalBufferUser</code>. 2) It <b>does not</b>
* attempt to terminate reader and shipper threads. Those <b>must</b> have been triggered
* interruption/termination prior to calling this method.
*/
void clearWALEntryBatch() {
long timeout = EnvironmentEdgeManager.currentTime() + this.shipEditsTimeout;
while (this.isAlive() || this.entryReader.isAlive()) {
try {
if (EnvironmentEdgeManager.currentTime() >= timeout) {
LOG.warn("Shipper clearWALEntryBatch method timed out whilst waiting reader/shipper " + "thread to stop. Not cleaning buffer usage. Shipper alive: {}; Reader alive: {}", this.source.getPeerId(), this.isAlive(), this.entryReader.isAlive());
return;
} else {
// Wait both shipper and reader threads to stop
Thread.sleep(this.sleepForRetries);
}
} catch (InterruptedException e) {
LOG.warn("{} Interrupted while waiting {} to stop on clearWALEntryBatch. " + "Not cleaning buffer usage: {}", this.source.getPeerId(), this.getName(), e);
return;
}
}
long totalReleasedBytes = 0;
while (true) {
WALEntryBatch batch = entryReader.entryBatchQueue.poll();
if (batch == null) {
break;
}
totalReleasedBytes += source.getSourceManager().releaseWALEntryBatchBufferQuota(batch);
} if (LOG.isTraceEnabled()) {
LOG.trace("Decrementing totalBufferUsed by {}B while stopping Replication WAL Readers.", totalReleasedBytes);
}} | 3.26 |
hbase_RollingStatCalculator_fillWithZeros_rdh | /**
* Returns an array of given size initialized with zeros
*/
private long[] fillWithZeros(int size) {
long[] zeros = new long[size];
for (int i = 0; i < size; i++) {
zeros[i] = 0L;
}
return zeros;
} | 3.26 |
hbase_RollingStatCalculator_removeData_rdh | /**
* Update the statistics after removing the given data value
*/
private void removeData(long data) {
currentSum = currentSum - ((double) (data));
currentSqrSum = currentSqrSum - (((double) (data)) * data);
numberOfDataValues--;
} | 3.26 |
hbase_RollingStatCalculator_insertDataValue_rdh | /**
* Inserts given data value to array of data values to be considered for statistics calculation
*/
public void insertDataValue(long data) {
// if current number of data points already equals rolling period and rolling period is
// non-zero then remove one data and update the statistics
if ((numberOfDataValues >= rollingPeriod) && (rollingPeriod > 0)) {
this.removeData(dataValues[currentIndexPosition]);
}
numberOfDataValues++;
currentSum = currentSum + ((double) (data));currentSqrSum = currentSqrSum + (((double) (data)) * data);
if (rollingPeriod > 0) {
dataValues[currentIndexPosition] =
data;
currentIndexPosition = (currentIndexPosition + 1) % rollingPeriod;
}
} | 3.26 |
hbase_RollingStatCalculator_getDeviation_rdh | /**
* Returns deviation of the data values that are in the current list of data values
*/
public double getDeviation() {
double variance = (currentSqrSum - ((currentSum * currentSum) / ((double) (numberOfDataValues)))) / numberOfDataValues;
return Math.sqrt(variance);
} | 3.26 |
hbase_RotateFile_delete_rdh | /**
* Deletes the two files used for rotating data. If any of the files cannot be deleted, an
* IOException is thrown.
*
* @throws IOException
* if there is an error deleting either file
*/
public void delete() throws IOException {
Path next = files[nextFile];
// delete next file first, and then the current file, so when failing to delete, we can still
// read the correct data
if (fs.exists(next) && (!fs.delete(next, false))) {
throw new IOException("Can not delete " + next);
}
Path current = files[1 - nextFile];
if (fs.exists(current) && (!fs.delete(current, false))) {
throw new IOException("Can not delete " + current);
}
} | 3.26 |
hbase_RotateFile_write_rdh | /**
* Writes the given data to the next file in the rotation, with a timestamp calculated based on
* the previous timestamp and the current time to make sure it is greater than the previous
* timestamp. The method also deletes the previous file, which is no longer needed.
* <p/>
* Notice that, for a newly created {@link RotateFile} instance, you need to call {@link #read()}
* first to initialize the nextFile index, before calling this method.
*
* @param data
* the data to be written to the file
* @throws IOException
* if an I/O error occurs while writing the data to the file
*/
public void write(byte[] data) throws IOException {
if (data.length > maxFileSize) {
throw new IOException((("Data size " + data.length) + " is greater than max allowed size ") + maxFileSize);
}long timestamp = Math.max(prevTimestamp + 1, EnvironmentEdgeManager.currentTime());
m0(fs, files[nextFile], timestamp, data);
prevTimestamp = timestamp;
nextFile = 1 - nextFile;
try {
fs.delete(files[nextFile], false);
} catch (IOException e) {
// we will create new file with overwrite = true, so not a big deal here, only for speed up
// loading as we do not need to read this file when loading
LOG.debug("Failed to delete old file {}, ignoring the exception", files[nextFile], e);
}
} | 3.26 |
hbase_Tag_getValueAsByte_rdh | /**
* Converts the value bytes of the given tag into a byte value
*
* @param tag
* The Tag
* @return value as byte
*/
public static byte getValueAsByte(Tag tag) {
if (tag.hasArray()) {
return
tag.getValueArray()[tag.getValueOffset()];
}
return ByteBufferUtils.toByte(tag.getValueByteBuffer(), tag.getValueOffset());
} | 3.26 |
hbase_Tag_cloneValue_rdh | /**
* Returns tag value in a new byte array. Primarily for use client-side. If server-side, use
* {@link Tag#getValueArray()} with appropriate {@link Tag#getValueOffset()} and
* {@link Tag#getValueLength()} instead to save on allocations.
*
* @param tag
* The Tag whose value to be returned
* @return tag value in a new byte array.
*/
public static byte[] cloneValue(Tag tag) {
int
tagLength = tag.getValueLength();
byte[]
tagArr = new byte[tagLength];
if (tag.hasArray()) {
Bytes.putBytes(tagArr, 0, tag.getValueArray(), tag.getValueOffset(), tagLength);
} else {
ByteBufferUtils.copyFromBufferToArray(tagArr, tag.getValueByteBuffer(), tag.getValueOffset(), 0, tagLength);
}
return tagArr;
} | 3.26 |
hbase_Tag_getValueAsLong_rdh | /**
* Converts the value bytes of the given tag into a long value
*
* @param tag
* The Tag
* @return value as long
*/
public static long getValueAsLong(Tag tag) {
if (tag.hasArray()) {
return Bytes.toLong(tag.getValueArray(), tag.getValueOffset(), tag.getValueLength());
}
return ByteBufferUtils.toLong(tag.getValueByteBuffer(), tag.getValueOffset());
} | 3.26 |
hbase_Tag_getValueAsString_rdh | /**
* Converts the value bytes of the given tag into a String value
*
* @param tag
* The Tag
* @return value as String
*/
public static String getValueAsString(Tag tag) {
if (tag.hasArray()) {
return Bytes.toString(tag.getValueArray(), tag.getValueOffset(), tag.getValueLength());
}
return Bytes.toString(cloneValue(tag));
} | 3.26 |
hbase_MapReduceHFileSplitterJob_usage_rdh | /**
* Print usage
*
* @param errorMsg
* Error message. Can be null.
*/
private void usage(final String errorMsg) {
if ((errorMsg != null) && (errorMsg.length() > 0)) {
System.err.println("ERROR: " + errorMsg);
}
System.err.println(("Usage: " + NAME) + " [options] <HFile inputdir(s)> <table>");
System.err.println("Read all HFile's for <table> and split them to <table> region boundaries.");
System.err.println("<table> table to load.\n");
System.err.println("To generate HFiles for a bulk data load, pass the option:");
System.err.println((" -D" + BULK_OUTPUT_CONF_KEY) + "=/path/for/output");
System.err.println("Other options:");
System.err.println((" -D " + JOB_NAME_CONF_KEY) + "=jobName - use the specified mapreduce job name for the HFile splitter");
System.err.println(("For performance also consider the following options:\n" + " -Dmapreduce.map.speculative=false\n") + " -Dmapreduce.reduce.speculative=false");
} | 3.26 |
hbase_MapReduceHFileSplitterJob_main_rdh | /**
* Main entry point.
*
* @param args
* The command line parameters.
* @throws Exception
* When running the job fails.
*/
public static void main(String[] args) throws Exception {
int ret = ToolRunner.run(new MapReduceHFileSplitterJob(HBaseConfiguration.create()), args);
System.exit(ret);
} | 3.26 |
hbase_MapReduceHFileSplitterJob_createSubmittableJob_rdh | /**
* Sets up the actual job.
*
* @param args
* The command line parameters.
* @return The newly created job.
* @throws IOException
* When setting up the job fails.
*/ public Job createSubmittableJob(String[] args) throws IOException {
Configuration conf = getConf();
String inputDirs = args[0]; String tabName = args[1];
conf.setStrings(TABLES_KEY,
tabName);
conf.set(FileInputFormat.INPUT_DIR, inputDirs);
Job job = Job.getInstance(conf, conf.get(JOB_NAME_CONF_KEY, (NAME + "_") + EnvironmentEdgeManager.currentTime()));
job.setJarByClass(MapReduceHFileSplitterJob.class);
job.setInputFormatClass(HFileInputFormat.class);
job.setMapOutputKeyClass(ImmutableBytesWritable.class);
String hfileOutPath = conf.get(BULK_OUTPUT_CONF_KEY);
if (hfileOutPath != null) {
LOG.debug((("add incremental job :" + hfileOutPath) + " from ") +
inputDirs);
TableName tableName = TableName.valueOf(tabName);job.setMapperClass(MapReduceHFileSplitterJob.HFileCellMapper.class);
job.setReducerClass(CellSortReducer.class);
Path outputDir = new Path(hfileOutPath);
FileOutputFormat.setOutputPath(job, outputDir);
job.setMapOutputValueClass(MapReduceExtendedCell.class);
try (Connection conn = ConnectionFactory.createConnection(conf);Table table = conn.getTable(tableName);RegionLocator regionLocator = conn.getRegionLocator(tableName)) {
HFileOutputFormat2.configureIncrementalLoad(job, table.getDescriptor(), regionLocator);
}
LOG.debug("success configuring load incremental job");
TableMapReduceUtil.addDependencyJars(job.getConfiguration(), Preconditions.class);
} else {
throw new IOException("No bulk output directory specified");
}
return job;
} | 3.26 |
hbase_MapReduceHFileSplitterJob_map_rdh | /**
* A mapper that just writes out cells. This one can be used together with {@link CellSortReducer}
*/static class HFileCellMapper extends Mapper<NullWritable, Cell, ImmutableBytesWritable, Cell> {
@Override
public void map(NullWritable key, Cell value, Context context) throws IOException, InterruptedException {
context.write(new ImmutableBytesWritable(CellUtil.cloneRow(value)), new MapReduceExtendedCell(value));
} | 3.26 |
hbase_MultiTableOutputFormat_write_rdh | /**
* Writes an action (Put or Delete) to the specified table. the table being updated. the update,
* either a put or a delete. if the action is not a put or a delete.
*/
@Override
public void write(ImmutableBytesWritable tableName, Mutation action) throws IOException {
BufferedMutator mutator = getBufferedMutator(tableName);
// The actions are not immutable, so we defensively copy them
if (action instanceof Put) {
Put put = new Put(((Put) (action)));
put.setDurability(useWriteAheadLogging ? Durability.SYNC_WAL : Durability.SKIP_WAL);
mutator.mutate(put);
} else if (action instanceof Delete) {
Delete delete = new Delete(((Delete) (action)));
mutator.mutate(delete);
} else
throw new IllegalArgumentException("action must be either Delete or Put");
} | 3.26 |
hbase_MultiTableOutputFormat_getBufferedMutator_rdh | /**
* the name of the table, as a string
*
* @return the named mutator if there is a problem opening a table
*/
BufferedMutator getBufferedMutator(ImmutableBytesWritable tableName) throws IOException {
if (this.connection == null) {
this.connection = ConnectionFactory.createConnection(conf);
}
if (!mutatorMap.containsKey(tableName)) {
LOG.debug(("Opening HTable \"" + Bytes.toString(tableName.get())) + "\" for writing");
BufferedMutator mutator = connection.getBufferedMutator(TableName.valueOf(tableName.get()));
mutatorMap.put(tableName, mutator);
}
return mutatorMap.get(tableName);
} | 3.26 |
hbase_MasterServices_m1_rdh | /**
* Returns return null if current is zk-based WAL splitting
*/
default SplitWALManager m1() {
return null;
} | 3.26 |
hbase_MasterServices_modifyTable_rdh | /**
* Modify the descriptor of an existing table
*
* @param tableName
* The table name
* @param descriptor
* The updated table descriptor
*/
default long modifyTable(final TableName tableName, final TableDescriptor descriptor, final long nonceGroup,
final long nonce) throws IOException {
return modifyTable(tableName, descriptor, nonceGroup, nonce, true);
} | 3.26 |
hbase_StaticUserWebFilter_m1_rdh | /**
* Retrieve the static username from the configuration.
*/
static String m1(Configuration conf)
{
String oldStyleUgi = conf.get(DEPRECATED_UGI_KEY);
if (oldStyleUgi != null) {
// We can't use the normal configuration deprecation mechanism here
// since we need to split out the username from the configured UGI.
LOG.warn(((DEPRECATED_UGI_KEY + " should not be used. Instead, use ") + HBASE_HTTP_STATIC_USER) + ".");
return Iterables.get(Splitter.on(',').split(oldStyleUgi), 0);
} else {
return conf.get(HBASE_HTTP_STATIC_USER, DEFAULT_HBASE_HTTP_STATIC_USER);
}
} | 3.26 |
hbase_RegexStringComparator_toByteArray_rdh | /**
* Returns The comparator serialized using pb
*/
@Override
public byte[] toByteArray() {
return engine.toByteArray();
} | 3.26 |
hbase_RegexStringComparator_m0_rdh | /**
* Specifies the {@link Charset} to use to convert the row key to a String.
* <p>
* The row key needs to be converted to a String in order to be matched against the regular
* expression. This method controls which charset is used to do this conversion.
* <p>
* If the row key is made of arbitrary bytes, the charset {@code ISO-8859-1} is recommended.
*
* @param charset
* The charset to use.
*/
public void m0(final Charset charset) {
engine.setCharset(charset.name());
} | 3.26 |
hbase_RegexStringComparator_areSerializedFieldsEqual_rdh | /**
* Returns true if and only if the fields of the comparator that are serialized are equal to the
* corresponding fields in other. Used for testing.
*/
@Override
boolean areSerializedFieldsEqual(ByteArrayComparable other) {
if (other == this) {
return true;
}if (!(other instanceof RegexStringComparator)) {
return false;
}RegexStringComparator comparator = ((RegexStringComparator) (other));
return (((super.areSerializedFieldsEqual(comparator) && engine.getClass().isInstance(comparator.getEngine())) && engine.getPattern().equals(comparator.getEngine().getPattern())) && (engine.getFlags() == comparator.getEngine().getFlags())) && engine.getCharset().equals(comparator.getEngine().getCharset());
} | 3.26 |
hbase_RegexStringComparator_m1_rdh | /**
* Parse a serialized representation of {@link RegexStringComparator}
*
* @param pbBytes
* A pb serialized {@link RegexStringComparator} instance
* @return An instance of {@link RegexStringComparator} made from <code>bytes</code>
* @throws DeserializationException
* if an error occurred
* @see #toByteArray
*/
public static RegexStringComparator m1(final byte[] pbBytes) throws DeserializationException
{
ComparatorProtos.RegexStringComparator proto;
try {proto = ComparatorProtos.RegexStringComparator.parseFrom(pbBytes);
} catch (InvalidProtocolBufferException e) {
throw new
DeserializationException(e);
}
RegexStringComparator comparator;
if (proto.hasEngine()) {
EngineType engine = EngineType.valueOf(proto.getEngine());
comparator = new RegexStringComparator(proto.getPattern(), proto.getPatternFlags(), engine);
} else {
comparator = new RegexStringComparator(proto.getPattern(), proto.getPatternFlags());
}
String charset = proto.getCharset();
if (charset.length() > 0) {
try {
comparator.getEngine().setCharset(charset);
} catch (IllegalCharsetNameException e) {
LOG.error("invalid charset", e);
}
}
return comparator;} | 3.26 |
hbase_RestoreTablesClient_restore_rdh | /**
* Restore operation. Stage 2: resolved Backup Image dependency
*
* @param backupManifestMap
* : tableName, Manifest
* @param sTableArray
* The array of tables to be restored
* @param tTableArray
* The array of mapping tables to restore to
* @throws IOException
* exception
*/
private void restore(HashMap<TableName, BackupManifest> backupManifestMap, TableName[] sTableArray, TableName[] tTableArray, boolean isOverwrite) throws IOException {
TreeSet<BackupImage> restoreImageSet = new
TreeSet<>();
for (int i = 0; i < sTableArray.length; i++) {
TableName table = sTableArray[i];
BackupManifest manifest = backupManifestMap.get(table);
// Get the image list of this backup for restore in time order from old
// to new.
List<BackupImage> list = new ArrayList<>();
list.add(manifest.getBackupImage());
TreeSet<BackupImage> set = new TreeSet<>(list);
List<BackupImage> depList = manifest.getDependentListByTable(table);
set.addAll(depList);
BackupImage[] arr = new BackupImage[set.size()];
set.toArray(arr);restoreImages(arr, table, tTableArray[i], isOverwrite);
restoreImageSet.addAll(list);
if ((restoreImageSet != null) && (!restoreImageSet.isEmpty())) {
LOG.info("Restore includes the following image(s):");
for (BackupImage image : restoreImageSet) {
LOG.info((("Backup: " + image.getBackupId()) + " ") + HBackupFileSystem.getTableBackupDir(image.getRootDir(), image.getBackupId(), table));
}
}
}
LOG.debug("restoreStage finished");
} | 3.26 |
hbase_RestoreTablesClient_checkTargetTables_rdh | /**
* Validate target tables.
*
* @param tTableArray
* target tables
* @param isOverwrite
* overwrite existing table
* @throws IOException
* exception
*/
private void checkTargetTables(TableName[] tTableArray, boolean isOverwrite) throws IOException {
ArrayList<TableName> existTableList = new
ArrayList<>();
ArrayList<TableName> disabledTableList = new ArrayList<>();
// check if the tables already exist
try (Admin admin = conn.getAdmin()) {
for (TableName tableName : tTableArray) {
if (admin.tableExists(tableName)) {
existTableList.add(tableName);
if (admin.isTableDisabled(tableName)) {
disabledTableList.add(tableName);
}
} else {
LOG.info(("HBase table " + tableName) + " does not exist. It will be created during restore process");
}
}
}
if (existTableList.size() > 0) {
if (!isOverwrite) {
LOG.error(((("Existing table (" + existTableList) + ") found in the restore target, please add ") +
"\"-o\" as overwrite option in the command if you mean") + " to restore to these existing tables");
throw new IOException("Existing table found in target while no \"-o\" " + "as overwrite option found");
} else if (disabledTableList.size() > 0) {
LOG.error("Found offline table in the restore target, " + "please enable them before restore with \"-overwrite\" option");
LOG.info("Offline table list in restore target: " + disabledTableList);
throw new IOException("Found offline table in the target when restore with \"-overwrite\" option");
}
}
} | 3.26 |
hbase_RestoreTablesClient_restoreImages_rdh | /**
* Restore operation handle each backupImage in array.
*
* @param images
* array BackupImage
* @param sTable
* table to be restored
* @param tTable
* table to be restored to
* @param truncateIfExists
* truncate table
* @throws IOException
* exception
*/
private void restoreImages(BackupImage[] images, TableName sTable, TableName tTable, boolean truncateIfExists) throws IOException {
// First image MUST be image of a FULL backup
BackupImage image = images[0];
String rootDir = image.getRootDir();
String backupId = image.getBackupId();
Path backupRoot =
new Path(rootDir);
RestoreTool restoreTool = new RestoreTool(conf, backupRoot, restoreRootDir, backupId);
Path tableBackupPath = HBackupFileSystem.getTableBackupPath(sTable, backupRoot, backupId);
String lastIncrBackupId = (images.length == 1) ? null : images[images.length - 1].getBackupId();
// We need hFS only for full restore (see the code)
BackupManifest manifest = HBackupFileSystem.getManifest(conf, backupRoot, backupId);
if (manifest.getType() == BackupType.FULL) {
LOG.info(((((("Restoring '" + sTable) + "' to '") + tTable) + "' from full") + " backup image ") + tableBackupPath.toString());
conf.set(JOB_NAME_CONF_KEY, (("Full_Restore-" + backupId) + "-") + tTable);
restoreTool.fullRestoreTable(conn, tableBackupPath,
sTable, tTable, truncateIfExists, lastIncrBackupId);
conf.unset(JOB_NAME_CONF_KEY);
} else {
// incremental Backup
throw new IOException("Unexpected backup type " + image.getType());
}
if (images.length == 1) {
// full backup restore done
return;
}
List<Path> dirList = new ArrayList<>();
// add full backup path
// full backup path comes first
for (int i = 1; i < images.length; i++) {
BackupImage im = images[i];
String fileBackupDir = HBackupFileSystem.getTableBackupDir(im.getRootDir(), im.getBackupId(),
sTable);
List<Path> list = getFilesRecursively(fileBackupDir);
dirList.addAll(list);
}
if (dirList.isEmpty()) {
LOG.warn(("Nothing has changed, so there is no need to restore '" + sTable) + "'");
return;
}
String dirs = StringUtils.join(dirList, ",");
LOG.info((((("Restoring '" + sTable) + "' to '") + tTable) + "' from log dirs: ") + dirs);
Path[] paths = new Path[dirList.size()];
dirList.toArray(paths);
conf.set(JOB_NAME_CONF_KEY, (("Incremental_Restore-" + backupId) + "-") + tTable);
restoreTool.incrementalRestoreTable(conn, tableBackupPath, paths, new TableName[]{ sTable }, new TableName[]{ tTable }, lastIncrBackupId);
LOG.info((sTable + " has been successfully restored to ") + tTable);
} | 3.26 |
hbase_MasterProcedureManager_execProcedure_rdh | /**
* Execute a distributed procedure on cluster
*
* @param desc
* Procedure description
*/
public void execProcedure(ProcedureDescription desc) throws IOException {
} | 3.26 |
hbase_MasterProcedureManager_execProcedureWithRet_rdh | /**
* Execute a distributed procedure on cluster with return data.
*
* @param desc
* Procedure description
* @return data returned from the procedure execution, null if no data
*/
public byte[] execProcedureWithRet(ProcedureDescription desc) throws IOException {
return null;
} | 3.26 |
hbase_UncompressedBlockSizePredicator_updateLatestBlockSizes_rdh | /**
* Empty implementation. Does nothing.
*
* @param uncompressed
* the uncompressed size of last block written.
* @param compressed
* the compressed size of last block written.
*/
@Override
public void updateLatestBlockSizes(HFileContext context, int uncompressed, int compressed) {
} | 3.26 |
hbase_UncompressedBlockSizePredicator_shouldFinishBlock_rdh | /**
* Dummy implementation that always returns true. This means, we will be only considering the
* block uncompressed size for deciding when to finish a block.
*
* @param uncompressed
* true if the block should be finished.
*/
@Override
public boolean shouldFinishBlock(int uncompressed) {
return true;} | 3.26 |
hbase_KeyValueScanner_getScannerOrder_rdh | /**
* Get the order of this KeyValueScanner. This is only relevant for StoreFileScanners. This is
* required for comparing multiple files to find out which one has the latest data.
* StoreFileScanners are ordered from 0 (oldest) to newest in increasing order.
*/
default long getScannerOrder() {
return 0;
} | 3.26 |
hbase_BloomFilterFactory_getMaxFold_rdh | /**
* Returns the value for Bloom filter max fold in the given configuration
*/
public static int getMaxFold(Configuration conf) {
return conf.getInt(IO_STOREFILE_BLOOM_MAX_FOLD, MAX_ALLOWED_FOLD_FACTOR);
} | 3.26 |
hbase_BloomFilterFactory_getMaxKeys_rdh | /**
* Returns max key for the Bloom filter from the configuration
*/
public static int getMaxKeys(Configuration conf) {return conf.getInt(IO_STOREFILE_BLOOM_MAX_KEYS, (128 * 1000) * 1000);
}
/**
* Creates a new general (Row or RowCol) Bloom filter at the time of
* {@link org.apache.hadoop.hbase.regionserver.HStoreFile} | 3.26 |
hbase_BloomFilterFactory_createFromMeta_rdh | /**
* Instantiates the correct Bloom filter class based on the version provided in the meta block
* data.
*
* @param meta
* the byte array holding the Bloom filter's metadata, including version information
* @param reader
* the {@link HFile} reader to use to lazily load Bloom filter blocks
* @return an instance of the correct type of Bloom filter
*/
public static BloomFilter createFromMeta(DataInput meta, HFile.Reader reader) throws IllegalArgumentException, IOException {
return createFromMeta(meta, reader, null);
} | 3.26 |
hbase_BloomFilterFactory_isGeneralBloomEnabled_rdh | /**
* Returns true if general Bloom (Row or RowCol) filters are enabled in the given configuration
*/
public static boolean isGeneralBloomEnabled(Configuration conf) {return conf.getBoolean(IO_STOREFILE_BLOOM_ENABLED, true);
} | 3.26 |
hbase_BloomFilterFactory_getErrorRate_rdh | /**
* Returns the Bloom filter error rate in the given configuration
*/
public static float getErrorRate(Configuration conf) {
return conf.getFloat(IO_STOREFILE_BLOOM_ERROR_RATE, ((float) (0.01)));
} | 3.26 |
hbase_BloomFilterFactory_getBloomBlockSize_rdh | /**
* Returns the compound Bloom filter block size from the configuration
*/
public static int getBloomBlockSize(Configuration conf) {
return conf.getInt(IO_STOREFILE_BLOOM_BLOCK_SIZE, 128 * 1024);
} | 3.26 |
hbase_BloomFilterFactory_isDeleteFamilyBloomEnabled_rdh | /**
* Returns true if Delete Family Bloom filters are enabled in the given configuration
*/
public static boolean isDeleteFamilyBloomEnabled(Configuration conf) {
return conf.getBoolean(IO_STOREFILE_DELETEFAMILY_BLOOM_ENABLED, true);
} | 3.26 |
hbase_ClusterSchemaServiceImpl_checkIsRunning_rdh | // All below are synchronized so consistent view on whether running or not.
private synchronized void checkIsRunning() throws ServiceNotRunningException {
if (!isRunning()) {
throw new ServiceNotRunningException();
}
} | 3.26 |
hbase_SlowLogTableAccessor_getRowKey_rdh | /**
* Create rowKey: currentTime APPEND slowLogPayload.hashcode Scan on slowlog table should keep
* records with sorted order of time, however records added at the very same time could be in
* random order.
*
* @param slowLogPayload
* SlowLogPayload to process
* @return rowKey byte[]
*/
private static byte[] getRowKey(final TooSlowLog.SlowLogPayload slowLogPayload) {
String v5 = String.valueOf(slowLogPayload.hashCode());
String lastFiveDig = v5.substring(v5.length() > 5 ? v5.length() - 5 : 0);
if (lastFiveDig.startsWith("-")) {
lastFiveDig = String.valueOf(ThreadLocalRandom.current().nextInt(99999));
}
final long currentTime = EnvironmentEdgeManager.currentTime();
final String timeAndHashcode = currentTime + lastFiveDig;
final long rowKeyLong = Long.parseLong(timeAndHashcode);
return Bytes.toBytes(rowKeyLong);
} | 3.26 |
hbase_SlowLogTableAccessor_addSlowLogRecords_rdh | /**
* Add slow/large log records to hbase:slowlog table
*
* @param slowLogPayloads
* List of SlowLogPayload to process
* @param connection
* connection
*/
public static void addSlowLogRecords(final List<TooSlowLog.SlowLogPayload> slowLogPayloads, Connection connection) {
List<Put> puts = new ArrayList<>(slowLogPayloads.size());
for (TooSlowLog.SlowLogPayload slowLogPayload : slowLogPayloads) {
final byte[] rowKey = getRowKey(slowLogPayload);
final Put put = new Put(rowKey).setDurability(Durability.SKIP_WAL).setPriority(HConstants.NORMAL_QOS).addColumn(HConstants.SLOWLOG_INFO_FAMILY, Bytes.toBytes("call_details"), Bytes.toBytes(slowLogPayload.getCallDetails())).addColumn(HConstants.SLOWLOG_INFO_FAMILY, Bytes.toBytes("client_address"), Bytes.toBytes(slowLogPayload.getClientAddress())).addColumn(HConstants.SLOWLOG_INFO_FAMILY, Bytes.toBytes("method_name"), Bytes.toBytes(slowLogPayload.getMethodName())).addColumn(HConstants.SLOWLOG_INFO_FAMILY, Bytes.toBytes("param"), Bytes.toBytes(slowLogPayload.getParam())).addColumn(HConstants.SLOWLOG_INFO_FAMILY, Bytes.toBytes("processing_time"), Bytes.toBytes(Integer.toString(slowLogPayload.getProcessingTime()))).addColumn(HConstants.SLOWLOG_INFO_FAMILY, Bytes.toBytes("queue_time"), Bytes.toBytes(Integer.toString(slowLogPayload.getQueueTime()))).addColumn(HConstants.SLOWLOG_INFO_FAMILY, Bytes.toBytes("region_name"), Bytes.toBytes(slowLogPayload.getRegionName())).addColumn(HConstants.SLOWLOG_INFO_FAMILY, Bytes.toBytes("response_size"), Bytes.toBytes(Long.toString(slowLogPayload.getResponseSize()))).addColumn(HConstants.SLOWLOG_INFO_FAMILY, Bytes.toBytes("block_bytes_scanned"), Bytes.toBytes(Long.toString(slowLogPayload.getBlockBytesScanned()))).addColumn(HConstants.SLOWLOG_INFO_FAMILY, Bytes.toBytes("server_class"), Bytes.toBytes(slowLogPayload.getServerClass())).addColumn(HConstants.SLOWLOG_INFO_FAMILY, Bytes.toBytes("start_time"), Bytes.toBytes(Long.toString(slowLogPayload.getStartTime()))).addColumn(HConstants.SLOWLOG_INFO_FAMILY, Bytes.toBytes("type"), Bytes.toBytes(slowLogPayload.getType().name())).addColumn(HConstants.SLOWLOG_INFO_FAMILY, Bytes.toBytes("username"), Bytes.toBytes(slowLogPayload.getUserName()));
puts.add(put);
}
try {
doPut(connection, puts);
} catch (Exception e) {
LOG.warn("Failed to add slow/large log records to hbase:slowlog table.", e);
}
} | 3.26 |
hbase_HBaseReplicationEndpoint_getReplicationSink_rdh | /**
* Get a randomly-chosen replication sink to replicate to.
*
* @return a replication sink to replicate to
*/
protected synchronized SinkPeer getReplicationSink() throws IOException {
if (sinkServers.isEmpty()) {
LOG.info("Current list of sinks is out of date or empty, updating");
chooseSinks();}
if (sinkServers.isEmpty()) {
throw new IOException("No replication sinks are available");
}
ServerName
serverName = sinkServers.get(ThreadLocalRandom.current().nextInt(sinkServers.size()));
return new SinkPeer(serverName, conn.getRegionServerAdmin(serverName));
} | 3.26 |
hbase_HBaseReplicationEndpoint_reconnect_rdh | /**
* A private method used to re-establish a zookeeper session with a peer cluster.
*/
private void reconnect(KeeperException ke) {
if (((ke instanceof ConnectionLossException) || (ke instanceof SessionExpiredException)) || (ke instanceof AuthFailedException))
{
String clusterKey = ctx.getPeerConfig().getClusterKey();
LOG.warn("Lost the ZooKeeper connection for peer {}", clusterKey, ke);
try {
reloadZkWatcher();
} catch (IOException io) {
LOG.warn("Creation of ZookeeperWatcher failed for peer {}", clusterKey, io);}
}
} | 3.26 |
hbase_HBaseReplicationEndpoint_reportSinkSuccess_rdh | /**
* Report that a {@code SinkPeer} successfully replicated a chunk of data. The SinkPeer that had a
* failed replication attempt on it
*/
protected synchronized void reportSinkSuccess(SinkPeer sinkPeer) {
badReportCounts.remove(sinkPeer.getServerName());
} | 3.26 |
hbase_HBaseReplicationEndpoint_fetchSlavesAddresses_rdh | /**
* Get the list of all the region servers from the specified peer
*
* @return list of region server addresses or an empty list if the slave is unavailable
*/
protected List<ServerName> fetchSlavesAddresses()
{
List<String> children = null;
try {
synchronized(zkwLock) {
children = ZKUtil.listChildrenAndWatchForNewChildren(zkw, zkw.getZNodePaths().rsZNode);
}
} catch (KeeperException ke) {
if (LOG.isDebugEnabled()) {
LOG.debug("Fetch slaves addresses failed", ke);
}
reconnect(ke);
}
if (children == null) {
return Collections.emptyList();
}
List<ServerName> addresses = new ArrayList<>(children.size());
for (String child : children) {
addresses.add(ServerName.parseServerName(child));
}
return addresses; } | 3.26 |
hbase_HBaseReplicationEndpoint_getPeerUUID_rdh | // Synchronize peer cluster connection attempts to avoid races and rate
// limit connections when multiple replication sources try to connect to
// the peer cluster. If the peer cluster is down we can get out of control
// over time.
@Override
public UUID getPeerUUID() {
UUID peerUUID = null;
try {
synchronized(zkwLock) {
peerUUID = ZKClusterId.getUUIDForCluster(zkw);
}} catch (KeeperException ke) {
reconnect(ke);
}
return peerUUID;
} | 3.26 |
hbase_HBaseReplicationEndpoint_reportBadSink_rdh | /**
* Report a {@code SinkPeer} as being bad (i.e. an attempt to replicate to it failed). If a single
* SinkPeer is reported as bad more than replication.bad.sink.threshold times, it will be removed
* from the pool of potential replication targets.
*
* @param sinkPeer
* The SinkPeer that had a failed replication attempt on it
*/
protected synchronized void reportBadSink(SinkPeer sinkPeer) {ServerName serverName = sinkPeer.getServerName();
int badReportCount = badReportCounts.compute(serverName, (k, v) -> v == null ? 1 : v + 1);
if (badReportCount > badSinkThreshold) {
this.sinkServers.remove(serverName);
if (sinkServers.isEmpty()) {
chooseSinks();
}
}
} | 3.26 |
hbase_HBaseReplicationEndpoint_reloadZkWatcher_rdh | /**
* Closes the current ZKW (if not null) and creates a new one
*
* @throws IOException
* If anything goes wrong connecting
*/
private void reloadZkWatcher() throws IOException {
synchronized(zkwLock) {
if (zkw != null) {
zkw.close();
}
zkw = new ZKWatcher(ctx.getConfiguration(), "connection to cluster: " + ctx.getPeerId(), this);
zkw.registerListener(new PeerRegionServerListener(this));
}
} | 3.26 |
hbase_RegionPlan_getSource_rdh | /**
* Get the source server for the plan for this region.
*
* @return server info for source
*/
public ServerName getSource() {
return source;
} | 3.26 |
hbase_RegionPlan_compareTo_rdh | /**
* Compare the region info.
*
* @param other
* region plan you are comparing against
*/
@Override
public int compareTo(RegionPlan other) {
return compareTo(this, other);
} | 3.26 |
hbase_RegionPlan_m0_rdh | /**
* Get the destination server for the plan for this region.
*
* @return server info for destination
*/
public ServerName m0() {
return dest;
} | 3.26 |
hbase_RegionPlan_getRegionName_rdh | /**
* Get the encoded region name for the region this plan is for.
*
* @return Encoded region name
*/
public String getRegionName() {
return this.hri.getEncodedName();
} | 3.26 |
hbase_RegionPlan_setDestination_rdh | /**
* Set the destination server for the plan for this region.
*/
public void setDestination(ServerName dest) {
this.dest = dest;
} | 3.26 |
hbase_SnapshotManifest_getRegionManifestsMap_rdh | /**
* Get all the Region Manifest from the snapshot. This is an helper to get a map with the region
* encoded name
*/
public Map<String, SnapshotRegionManifest> getRegionManifestsMap() {if ((regionManifests == null) || regionManifests.isEmpty())
return null;
HashMap<String, SnapshotRegionManifest> regionsMap = new HashMap<>(regionManifests.size());
for (SnapshotRegionManifest manifest : regionManifests) {
String regionName = getRegionNameFromManifest(manifest);
regionsMap.put(regionName, manifest);
}
return regionsMap;
} | 3.26 |
hbase_SnapshotManifest_writeDataManifest_rdh | /* Write the SnapshotDataManifest file */
private void writeDataManifest(final SnapshotDataManifest manifest) throws IOException {
try (FSDataOutputStream stream = workingDirFs.create(new Path(workingDir, f0))) {
manifest.writeTo(stream);
}
} | 3.26 |
hbase_SnapshotManifest_getTableDescriptor_rdh | /**
* Get the table descriptor from the Snapshot
*/
public TableDescriptor getTableDescriptor() {
return this.htd;
} | 3.26 |
hbase_SnapshotManifest_getRegionNameFromManifest_rdh | /**
* Extract the region encoded name from the region manifest
*/
static String getRegionNameFromManifest(final SnapshotRegionManifest manifest) {
byte[] regionName = RegionInfo.createRegionName(ProtobufUtil.toTableName(manifest.getRegionInfo().getTableName()), manifest.getRegionInfo().getStartKey().toByteArray(), manifest.getRegionInfo().getRegionId(), true);return RegionInfo.encodeRegionName(regionName);
} | 3.26 |
hbase_SnapshotManifest_readDataManifest_rdh | /* Read the SnapshotDataManifest file */
private SnapshotDataManifest readDataManifest() throws IOException {
try (FSDataInputStream
in = workingDirFs.open(new Path(workingDir, f0))) {
CodedInputStream cin = CodedInputStream.newInstance(in);
cin.setSizeLimit(manifestSizeLimit);
return SnapshotDataManifest.parseFrom(cin);
} catch (FileNotFoundException e) {return null;
} catch (InvalidProtocolBufferException e)
{
throw new CorruptedSnapshotException("unable to parse data manifest "
+ e.getMessage(), e);
}
} | 3.26 |
hbase_SnapshotManifest_getSnapshotFormat_rdh | /* Return the snapshot format */
private static int getSnapshotFormat(final SnapshotDescription desc) {
return desc.hasVersion() ? desc.getVersion() : SnapshotManifestV1.DESCRIPTOR_VERSION;
} | 3.26 |
hbase_SnapshotManifest_m1_rdh | /**
* Load the information in the SnapshotManifest. Called by SnapshotManifest.open() If the format
* is v2 and there is no data-manifest, means that we are loading an in-progress snapshot. Since
* we support rolling-upgrades, we loook for v1 and v2 regions format.
*/
private void m1() throws IOException
{
switch (getSnapshotFormat(desc)) {
case SnapshotManifestV1.DESCRIPTOR_VERSION :
{
this.htd = FSTableDescriptors.getTableDescriptorFromFs(workingDirFs, workingDir);
ThreadPoolExecutor tpool = createExecutor("SnapshotManifestLoader");
try {
this.regionManifests = SnapshotManifestV1.loadRegionManifests(conf, tpool, rootFs, workingDir, desc);
} finally { tpool.shutdown();
}
break;
}
case SnapshotManifestV2.DESCRIPTOR_VERSION :
{
SnapshotDataManifest dataManifest = readDataManifest();
if (dataManifest != null) {
htd = ProtobufUtil.toTableDescriptor(dataManifest.getTableSchema());
regionManifests = dataManifest.getRegionManifestsList();
} else {
// Compatibility, load the v1 regions
// This happens only when the snapshot is in-progress and the cache wants to refresh.
List<SnapshotRegionManifest> v1Regions;List<SnapshotRegionManifest> v2Regions;
ThreadPoolExecutor tpool = createExecutor("SnapshotManifestLoader");
try {
v1Regions = SnapshotManifestV1.loadRegionManifests(conf, tpool, rootFs,
workingDir, desc);
v2Regions = SnapshotManifestV2.loadRegionManifests(conf, tpool, rootFs, workingDir, desc, manifestSizeLimit);
} catch (InvalidProtocolBufferException e) {
throw new CorruptedSnapshotException("unable to parse region manifest " + e.getMessage(), e);} finally {
tpool.shutdown();
}
if ((v1Regions != null) && (v2Regions != null)) {
regionManifests = new ArrayList<>(v1Regions.size() + v2Regions.size());
regionManifests.addAll(v1Regions);
regionManifests.addAll(v2Regions);
} else if (v1Regions != null) {
regionManifests = v1Regions;
} else {
regionManifests = v2Regions;
}
}
break;
}
default :
throw new CorruptedSnapshotException("Invalid Snapshot version: " + desc.getVersion(), ProtobufUtil.createSnapshotDesc(desc));
}
} | 3.26 |
hbase_SnapshotManifest_getSnapshotDescription_rdh | /**
* Get the SnapshotDescription
*/
public SnapshotDescription getSnapshotDescription() {
return this.desc;} | 3.26 |
hbase_SnapshotManifest_m0_rdh | /**
* Creates a 'manifest' for the specified region, by reading directly from the HRegion object.
* This is used by the "online snapshot" when the table is enabled.
*/
public void m0(final HRegion region) throws IOException {
// Get the ManifestBuilder/RegionVisitor
RegionVisitor visitor = createRegionVisitor(desc);
// Visit the region and add it to the manifest
addRegion(region, visitor);
} | 3.26 |
hbase_SnapshotManifest_addRegion_rdh | /**
* Creates a 'manifest' for the specified region, by reading directly from the disk. This is used
* by the "offline snapshot" when the table is disabled.
*/
public void addRegion(final Path
tableDir, final RegionInfo regionInfo) throws IOException {
// Get the ManifestBuilder/RegionVisitor
RegionVisitor visitor = createRegionVisitor(desc);
// Visit the region and add it to the manifest
addRegion(tableDir, regionInfo, visitor);
} | 3.26 |
hbase_SnapshotManifest_create_rdh | /**
* Return a SnapshotManifest instance, used for writing a snapshot. There are two usage pattern: -
* The Master will create a manifest, add the descriptor, offline regions and consolidate the
* snapshot by writing all the pending stuff on-disk. manifest = SnapshotManifest.create(...)
* manifest.addRegion(tableDir, hri) manifest.consolidate() - The RegionServer will create a
* single region manifest manifest = SnapshotManifest.create(...) manifest.addRegion(region)
*/
public static SnapshotManifest create(final Configuration conf, final FileSystem fs, final Path workingDir, final SnapshotDescription desc, final ForeignExceptionSnare monitor) throws IOException {
return create(conf, fs, workingDir, desc, monitor,
null);
} | 3.26 |
hbase_SnapshotManifest_open_rdh | /**
* Return a SnapshotManifest instance with the information already loaded in-memory.
* SnapshotManifest manifest = SnapshotManifest.open(...) TableDescriptor htd =
* manifest.getDescriptor() for (SnapshotRegionManifest regionManifest:
* manifest.getRegionManifests()) hri = regionManifest.getRegionInfo() for
* (regionManifest.getFamilyFiles()) ...
*/
public static SnapshotManifest open(final Configuration conf, final FileSystem fs, final Path workingDir, final SnapshotDescription desc) throws IOException {
SnapshotManifest manifest = new SnapshotManifest(conf, fs, workingDir, desc, null, null);
manifest.m1();
return manifest;
} | 3.26 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.