name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hbase_Result_setStatistics_rdh | /**
* Set load information about the region to the information about the result
*
* @param loadStats
* statistics about the current region from which this was returned
*/
@InterfaceAudience.Private
public void
setStatistics(RegionLoadStats loadStats) {
this.stats = loadStats;
} | 3.26 |
hbase_Result_isStale_rdh | /**
* Whether or not the results are coming from possibly stale data. Stale results might be returned
* if {@link Consistency} is not STRONG for the query.
*
* @return Whether or not the results are coming from possibly stale data.
*/
public boolean isStale() {
return stale;
} | 3.26 |
hbase_Result_value_rdh | /**
* Returns the value of the first column in the Result.
*
* @return value of the first column
*/
public byte[] value() {
if (isEmpty()) {
return null;
}
return CellUtil.cloneValue(cells[0]);
} | 3.26 |
hbase_Result_toString_rdh | /**
*/
@Override
public String toString()
{
StringBuilder sb = new StringBuilder();
sb.append("keyvalues=");
if (isEmpty()) {
sb.append("NONE");
return sb.toString();
}sb.append("{");
boolean moreThanOne = false;
for (Cell kv : this.cells) {
if (moreThanOne)
{
sb.append(", ");
} else {
moreThanOne = true;
}
sb.append(kv.toString());
}
sb.append("}");
return sb.toString();
} | 3.26 |
hbase_Result_size_rdh | /**
* Returns the size of the underlying Cell []
*/
public int size() {
return this.cells == null ? 0 : this.cells.length;
} | 3.26 |
hbase_Result_containsColumn_rdh | /**
* Checks for existence of a value for the specified column (empty or not).
*
* @param family
* family name
* @param foffset
* family offset
* @param flength
* family length
* @param qualifier
* column qualifier
* @param qoffset
* qualifier offset
* @param qlength
* qualifier length
* @return true if at least one value exists in the result, false if not
*/
public boolean containsColumn(byte[] family, int foffset, int flength, byte[] qualifier, int qoffset, int qlength) {
return getColumnLatestCell(family, foffset, flength, qualifier,
qoffset, qlength) != null;
} | 3.26 |
hbase_Result_create_rdh | /**
* Instantiate a Result with the specified List of KeyValues. <br>
* <strong>Note:</strong> You must ensure that the keyvalues are already sorted.
*
* @param cells
* List of cells
*/
public static Result create(List<Cell> cells) {
return create(cells, null);
} | 3.26 |
hbase_Result_compareResults_rdh | /**
* Does a deep comparison of two Results, down to the byte arrays.
*
* @param res1
* first result to compare
* @param res2
* second result to compare
* @param verbose
* includes string representation for all cells in the exception if true; otherwise
* include rowkey only
* @throws Exception
* Every difference is throwing an exception
*/
public static void compareResults(Result res1, Result res2, boolean verbose) throws Exception {
if (res2 == null) {
throw new Exception("There wasn't enough rows, we stopped at " + Bytes.toStringBinary(res1.getRow()));
}
if (res1.size() != res2.size()) {
if (verbose) {
throw new Exception((("This row doesn't have the same number of KVs: " + res1) + " compared to ") + res2);
} else {
throw new Exception(((((("This row doesn't have the same number of KVs: row=" + Bytes.toStringBinary(res1.getRow()))
+ ", ") + res1.size()) + " cells are compared to ") + res2.size()) + " cells");
}
}
Cell[] ourKVs = res1.rawCells();
Cell[] replicatedKVs = res2.rawCells();
for (int i = 0; i < res1.size(); i++) {
if (((!ourKVs[i].equals(replicatedKVs[i])) || (!CellUtil.matchingValue(ourKVs[i], replicatedKVs[i])))
|| (!CellUtil.matchingTags(ourKVs[i], replicatedKVs[i]))) {
if (verbose) {
throw new Exception((("This result was different: " + res1) + " compared to ") + res2);
} else {
throw new Exception("This result was different: row=" + Bytes.toStringBinary(res1.getRow()));
}
}
}
} | 3.26 |
hbase_Result_m0_rdh | /**
* Map of families to all versions of its qualifiers and values.
* <p>
* Returns a three level Map of the form:
* <code>Map&family,Map<qualifier,Map<timestamp,value>>></code>
* <p>
* Note: All other map returning methods make use of this map internally.
*
* @return map from families to qualifiers to versions
*/
public NavigableMap<byte[], NavigableMap<byte[], NavigableMap<Long, byte[]>>>
m0() {if (this.familyMap != null) {
return this.familyMap;
}
if (isEmpty()) {
return null;
}
this.familyMap =
new TreeMap<>(Bytes.BYTES_COMPARATOR);
for (Cell kv : this.cells)
{
byte[] family = CellUtil.cloneFamily(kv);
NavigableMap<byte[], NavigableMap<Long, byte[]>> columnMap = familyMap.get(family);
if (columnMap == null) {
columnMap = new TreeMap<>(Bytes.BYTES_COMPARATOR);
familyMap.put(family, columnMap);}
byte[] v26 = CellUtil.cloneQualifier(kv);
NavigableMap<Long, byte[]> versionMap = columnMap.get(v26);
if (versionMap == null) {
versionMap
= new TreeMap<>(new Comparator<Long>() {
@Override
public int compare(Long l1, Long l2) {
return l2.compareTo(l1);
}
});
columnMap.put(v26, versionMap);
}
Long timestamp = kv.getTimestamp();
byte[] value = CellUtil.cloneValue(kv);
versionMap.put(timestamp, value);
}
return this.familyMap;
} | 3.26 |
hbase_Result_getValue_rdh | /**
* Get the latest version of the specified column. Note: this call clones the value content of the
* hosting Cell. See {@link #getValueAsByteBuffer(byte[], byte[])}, etc., or {@link #listCells()}
* if you would avoid the cloning.
*
* @param family
* family name
* @param qualifier
* column qualifier
* @return value of latest version of column, null if none found
*/
public byte[] getValue(byte[] family, byte[] qualifier) {
Cell kv = getColumnLatestCell(family, qualifier);
if (kv == null) {
return null;
}
return CellUtil.cloneValue(kv);
} | 3.26 |
hbase_Result_containsEmptyColumn_rdh | /**
* Checks if the specified column contains an empty value (a zero-length byte array).
*
* @param family
* family name
* @param foffset
* family offset
* @param flength
* family length
* @param qualifier
* column qualifier
* @param qoffset
* qualifier offset
* @param qlength
* qualifier length
* @return whether or not a latest value exists and is empty
*/
public boolean containsEmptyColumn(byte[] family, int foffset, int flength, byte[] qualifier, int qoffset, int qlength) {
Cell kv = getColumnLatestCell(family, foffset, flength, qualifier, qoffset, qlength);return (kv != null) && (kv.getValueLength() == 0);
} | 3.26 |
hbase_Result_binarySearch_rdh | /**
* Searches for the latest value for the specified column.
*
* @param kvs
* the array to search
* @param family
* family name
* @param foffset
* family offset
* @param flength
* family length
* @param qualifier
* column qualifier
* @param qoffset
* qualifier offset
* @param qlength
* qualifier length
* @return the index where the value was found, or -1 otherwise
*/
protected int binarySearch(final Cell[] kvs, final byte[] family, final int foffset, final int flength, final byte[] qualifier, final int qoffset, final int qlength) {
double keyValueSize = ((double) (KeyValue.getKeyValueDataStructureSize(kvs[0].getRowLength(), flength, qlength, 0)));
byte[]
buffer = localBuffer.get();
if ((buffer == null) || (keyValueSize > buffer.length)) {
// pad to the smallest multiple of the pad width
buffer = new byte[((int) (Math.ceil(keyValueSize / PAD_WIDTH))) * PAD_WIDTH];
localBuffer.set(buffer);
}
Cell searchTerm = KeyValueUtil.createFirstOnRow(buffer, 0, kvs[0].getRowArray(), kvs[0].getRowOffset(), kvs[0].getRowLength(), family, foffset, flength, qualifier, qoffset, qlength);
// pos === ( -(insertion point) - 1)
int pos = Arrays.binarySearch(kvs, searchTerm, CellComparator.getInstance());
// never will exact match
if (pos < 0) {
pos = (pos + 1) * (-1);
// pos is now insertion point
}
if (pos == kvs.length) {
return -1;// doesn't exist
}
return pos;
} | 3.26 |
hbase_Result_getTotalSizeOfCells_rdh | /**
* Get total size of raw cells
*
* @return Total size.
*/
public static long getTotalSizeOfCells(Result result) {
long size = 0;
if (result.isEmpty()) {
return size;
}
for (Cell v53 : result.rawCells()) {
size
+= v53.heapSize();
}
return size;
} | 3.26 |
hbase_Result_isCursor_rdh | /**
* Return true if this Result is a cursor to tell users where the server has scanned. In this
* Result the only meaningful method is {@link #getCursor()}. {@code while (r = scanner.next() && r != null) {
* if(r.isCursor()){
* // scanning is not end, it is a cursor, save its row key and close scanner if you want, or
* // just continue the loop to call next(). } else { // just like before } } // scanning is end}
* {@link Scan#setNeedCursorResult(boolean)} {@link Cursor} {@link #getCursor()}
*/
public boolean isCursor() {
return cursor != null;
} | 3.26 |
hbase_Result_getValueAsByteBuffer_rdh | /**
* Returns the value wrapped in a new <code>ByteBuffer</code>.
*
* @param family
* family name
* @param foffset
* family offset
* @param flength
* family length
* @param qualifier
* column qualifier
* @param qoffset
* qualifier offset
* @param qlength
* qualifier length
* @return the latest version of the column, or <code>null</code> if none found
*/
public ByteBuffer getValueAsByteBuffer(byte[] family, int foffset, int flength, byte[] qualifier, int qoffset, int qlength) {
Cell kv = getColumnLatestCell(family, foffset, flength, qualifier, qoffset, qlength);
if (kv == null) {
return null;
}
return ByteBuffer.wrap(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength()).asReadOnlyBuffer();
} | 3.26 |
hbase_Result_getStats_rdh | /**
* Returns the associated statistics about the region from which this was returned. Can be
* <tt>null</tt> if stats are disabled.
*/
public RegionLoadStats getStats() {
return stats;
} | 3.26 |
hbase_Result_getCursor_rdh | /**
* Return the cursor if this Result is a cursor result. {@link Scan#setNeedCursorResult(boolean)}
* {@link Cursor} {@link #isCursor()}
*/
public Cursor getCursor() {
return cursor;
} | 3.26 |
hbase_Result_listCells_rdh | /**
* Create a sorted list of the Cell's in this result. Since HBase 0.20.5 this is equivalent to
* raw().
*
* @return sorted List of Cells; can be null if no cells in the result
*/
public List<Cell> listCells() {
return isEmpty() ? null : Arrays.asList(rawCells());
}
/**
* Return the Cells for the specific column. The Cells are sorted in the {@link CellComparator} | 3.26 |
hbase_Result_getFamilyMap_rdh | /**
* Map of qualifiers to values.
* <p>
* Returns a Map of the form: <code>Map<qualifier,value></code>
*
* @param family
* column family to get
* @return map of qualifiers to values
*/
public NavigableMap<byte[], byte[]> getFamilyMap(byte[] family) {
if (this.familyMap == null) {
m0();
}
if (isEmpty()) {
return null;
}
NavigableMap<byte[], byte[]> returnMap = new TreeMap<>(Bytes.BYTES_COMPARATOR);
NavigableMap<byte[], NavigableMap<Long, byte[]>> qualifierMap = familyMap.get(family);
if (qualifierMap == null) {
return returnMap;
}
for (Map.Entry<byte[], NavigableMap<Long, byte[]>> entry : qualifierMap.entrySet()) {
byte[] value = entry.getValue().get(entry.getValue().firstKey());
returnMap.put(entry.getKey(), value);
}
return returnMap;
} | 3.26 |
hbase_Result_m1_rdh | /**
* All methods modifying state of Result object must call this method to ensure that special
* purpose immutable Results can't be accidentally modified.
*/
private void m1() {
if (readonly == true) {
throw new UnsupportedOperationException("Attempting to modify readonly EMPTY_RESULT!");
}
} | 3.26 |
hbase_Result_mayHaveMoreCellsInRow_rdh | /**
* For scanning large rows, the RS may choose to return the cells chunk by chunk to prevent OOM or
* timeout. This flag is used to tell you if the current Result is the last one of the current
* row. False means this Result is the last one. True means there MAY be more cells belonging to
* the current row. If you don't use {@link Scan#setAllowPartialResults(boolean)} or
* {@link Scan#setBatch(int)}, this method will always return false because the Result must
* contains all cells in one Row.
*/
public boolean mayHaveMoreCellsInRow() {
return mayHaveMoreCellsInRow;
} | 3.26 |
hbase_Result_containsNonEmptyColumn_rdh | /**
* Checks if the specified column contains a non-empty value (not a zero-length byte array).
*
* @param family
* family name
* @param foffset
* family offset
* @param flength
* family length
* @param qualifier
* column qualifier
* @param qoffset
* qualifier offset
* @param qlength
* qualifier length
* @return whether or not a latest value exists and is not empty
*/
public boolean containsNonEmptyColumn(byte[] family, int foffset, int flength, byte[] qualifier, int qoffset, int qlength) {
Cell kv = getColumnLatestCell(family, foffset, flength, qualifier, qoffset, qlength);
return (kv != null) && (kv.getValueLength() > 0);
} | 3.26 |
hbase_Result_createCompleteResult_rdh | /**
* Forms a single result from the partial results in the partialResults list. This method is
* useful for reconstructing partial results on the client side.
*
* @param partialResults
* list of partial results
* @return The complete result that is formed by combining all of the partial results together
* @throws IOException
* A complete result cannot be formed because the results in the partial list
* come from different rows
*/
public static Result createCompleteResult(Iterable<Result> partialResults) throws IOException {
if (partialResults == null) {
return Result.create(Collections.emptyList(), null, false);
}
List<Cell> cells = new ArrayList<>();
boolean v46 = false;
byte[] prevRow = null;
byte[] currentRow = null;
for (Iterator<Result> iter = partialResults.iterator(); iter.hasNext();) {
Result r = iter.next();
currentRow = r.getRow();
if ((prevRow !=
null) && (!Bytes.equals(prevRow, currentRow))) {
throw new IOException(("Cannot form complete result. Rows of partial results do not match." + " Partial Results: ") + partialResults);
}
// Ensure that all Results except the last one are marked as partials. The last result
// may not be marked as a partial because Results are only marked as partials when
// the scan on the server side must be stopped due to reaching the maxResultSize.
// Visualizing it makes it easier to understand:
// maxResultSize: 2 cells
// (-x-) represents cell number x in a row
// Example: row1: -1- -2- -3- -4- -5- (5 cells total)
// How row1 will be returned by the server as partial Results:
// Result1: -1- -2- (2 cells, size limit reached, mark as partial)
// Result2: -3- -4- (2 cells, size limit reached, mark as partial)
// Result3: -5- (1 cell, size limit NOT reached, NOT marked as partial)
if (iter.hasNext() && (!r.mayHaveMoreCellsInRow())) {
throw new IOException(("Cannot form complete result. Result is missing partial flag. " + "Partial Results: ") + partialResults); }
prevRow = currentRow;
v46 = v46 || r.isStale();
for (Cell c : r.rawCells()) {
cells.add(c);
}
}
return Result.create(cells, null, v46);
} | 3.26 |
hbase_Result_loadValue_rdh | /**
* Loads the latest version of the specified column into the provided <code>ByteBuffer</code>.
* <p>
* Does not clear or flip the buffer.
*
* @param family
* family name
* @param foffset
* family offset
* @param flength
* family length
* @param qualifier
* column qualifier
* @param qoffset
* qualifier offset
* @param qlength
* qualifier length
* @param dst
* the buffer where to write the value
* @return <code>true</code> if a value was found, <code>false</code> otherwise
* @throws BufferOverflowException
* there is insufficient space remaining in the buffer
*/
public boolean loadValue(byte[] family, int foffset, int flength, byte[] qualifier, int qoffset, int qlength, ByteBuffer dst) throws
BufferOverflowException {
Cell kv = getColumnLatestCell(family, foffset, flength, qualifier, qoffset, qlength);
if (kv == null) {
return false;
}
dst.put(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength());return true;
} | 3.26 |
hbase_ServerStatistics_update_rdh | /**
* Good enough attempt. Last writer wins. It doesn't really matter which one gets to update, as
* something gets set
*/
public void update(byte[] region, RegionLoadStats currentStats) {RegionStatistics regionStat = this.stats.get(region);
if (regionStat == null) {
regionStat = new RegionStatistics();
this.stats.put(region, regionStat);
}
regionStat.update(currentStats);} | 3.26 |
hbase_HelloHBase_createNamespaceAndTable_rdh | /**
* Invokes Admin#createNamespace and Admin#createTable to create a namespace with a table that has
* one column-family.
*
* @param admin
* Standard Admin object
* @throws IOException
* If IO problem encountered
*/
static void createNamespaceAndTable(final Admin admin) throws IOException
{
if (!namespaceExists(admin, MY_NAMESPACE_NAME)) {
System.out.println(("Creating Namespace ["
+ MY_NAMESPACE_NAME) + "].");
admin.createNamespace(NamespaceDescriptor.create(MY_NAMESPACE_NAME).build());
}
if (!admin.tableExists(MY_TABLE_NAME)) {
System.out.println(((("Creating Table [" + MY_TABLE_NAME.getNameAsString()) + "], with one Column Family [") + Bytes.toString(MY_COLUMN_FAMILY_NAME)) + "].");
TableDescriptor desc = TableDescriptorBuilder.newBuilder(MY_TABLE_NAME).setColumnFamily(ColumnFamilyDescriptorBuilder.of(MY_COLUMN_FAMILY_NAME)).build();
admin.createTable(desc);
}
} | 3.26 |
hbase_HelloHBase_deleteNamespaceAndTable_rdh | /**
* Invokes Admin#disableTable, Admin#deleteTable, and Admin#deleteNamespace to disable/delete
* Table and delete Namespace.
*
* @param admin
* Standard Admin object
* @throws IOException
* If IO problem is encountered
*/
static void deleteNamespaceAndTable(final Admin admin) throws IOException {
if (admin.tableExists(MY_TABLE_NAME)) {
System.out.println(("Disabling/deleting Table [" + MY_TABLE_NAME.getNameAsString()) + "].");
admin.disableTable(MY_TABLE_NAME);// Disable a table before deleting it.
admin.deleteTable(MY_TABLE_NAME);
}
if (namespaceExists(admin, MY_NAMESPACE_NAME)) {
System.out.println(("Deleting Namespace [" + MY_NAMESPACE_NAME) + "].");
admin.deleteNamespace(MY_NAMESPACE_NAME);
}
} | 3.26 |
hbase_HelloHBase_namespaceExists_rdh | /**
* Checks to see whether a namespace exists.
*
* @param admin
* Standard Admin object
* @param namespaceName
* Name of namespace
* @return true If namespace exists
* @throws IOException
* If IO problem encountered
*/
static boolean namespaceExists(final Admin admin, final String namespaceName) throws IOException {
try {
admin.getNamespaceDescriptor(namespaceName);
} catch (NamespaceNotFoundException e) {
return false;
}
return true;
} | 3.26 |
hbase_HelloHBase_putRowToTable_rdh | /**
* Invokes Table#put to store a row (with two new columns created 'on the fly') into the table.
*
* @param table
* Standard Table object (used for CRUD operations).
* @throws IOException
* If IO problem encountered
*/
static void putRowToTable(final Table table) throws IOException {
table.put(new Put(MY_ROW_ID).addColumn(MY_COLUMN_FAMILY_NAME, MY_FIRST_COLUMN_QUALIFIER, Bytes.toBytes("Hello")).addColumn(MY_COLUMN_FAMILY_NAME, MY_SECOND_COLUMN_QUALIFIER, Bytes.toBytes("World!")));
System.out.println((((((((((((("Row [" + Bytes.toString(MY_ROW_ID)) + "] was put into Table [") + table.getName().getNameAsString()) + "] in HBase;\n") + " the row's two columns (created 'on the fly') are: [") + Bytes.toString(MY_COLUMN_FAMILY_NAME)) + ":") + Bytes.toString(MY_FIRST_COLUMN_QUALIFIER)) + "] and [") + Bytes.toString(MY_COLUMN_FAMILY_NAME)) + ":") + Bytes.toString(MY_SECOND_COLUMN_QUALIFIER)) + "]");
} | 3.26 |
hbase_HelloHBase_deleteRow_rdh | /**
* Invokes Table#delete to delete test data (i.e. the row)
*
* @param table
* Standard Table object
* @throws IOException
* If IO problem is encountered
*/
static void deleteRow(final Table table) throws IOException {
System.out.println(((("Deleting row [" + Bytes.toString(MY_ROW_ID)) + "] from Table [") + table.getName().getNameAsString()) + "].");
table.delete(new Delete(MY_ROW_ID));
} | 3.26 |
hbase_HelloHBase_getAndPrintRowContents_rdh | /**
* Invokes Table#get and prints out the contents of the retrieved row.
*
* @param table
* Standard Table object
* @throws IOException
* If IO problem encountered
*/
static void getAndPrintRowContents(final Table table) throws IOException {
Result row =
table.get(new Get(MY_ROW_ID));
System.out.println(((("Row [" + Bytes.toString(row.getRow())) + "] was retrieved from Table [") + table.getName().getNameAsString()) + "] in HBase, with the following content:");
for (Entry<byte[], NavigableMap<byte[], byte[]>> colFamilyEntry : row.getNoVersionMap().entrySet()) {
String columnFamilyName = Bytes.toString(colFamilyEntry.getKey());
System.out.println((" Columns in Column Family [" + columnFamilyName) + "]:");
for
(Entry<byte[], byte[]> columnNameAndValueMap : colFamilyEntry.getValue().entrySet()) {
System.out.println(((((" Value of Column [" + columnFamilyName) + ":")
+ Bytes.toString(columnNameAndValueMap.getKey())) + "] == ") + Bytes.toString(columnNameAndValueMap.getValue()));
}
}
} | 3.26 |
hbase_CatalogJanitorReport_isEmpty_rdh | /**
* Returns True if an 'empty' lastReport -- no problems found.
*/
public boolean isEmpty() {return ((this.holes.isEmpty() && this.overlaps.isEmpty()) && this.unknownServers.isEmpty()) && this.emptyRegionInfo.isEmpty();
} | 3.26 |
hbase_HBaseConfiguration_createClusterConf_rdh | /**
* Generates a {@link Configuration} instance by applying property overrides prefixed by a cluster
* profile key to the base Configuration. Override properties are extracted by the
* {@link #subset(Configuration, String)} method, then the merged on top of the base Configuration
* and returned.
*
* @param baseConf
* the base configuration to use, containing prefixed override properties
* @param clusterKey
* the ZooKeeper quorum cluster key to apply, or {@code null} if none
* @param overridePrefix
* the property key prefix to match for override properties, or {@code null}
* if none
* @return the merged configuration with override properties and cluster key applied
*/
public static Configuration createClusterConf(Configuration baseConf, String clusterKey, String overridePrefix) throws IOException {
Configuration clusterConf = HBaseConfiguration.create(baseConf);
if ((clusterKey != null) && (!clusterKey.isEmpty())) {
applyClusterKeyToConf(clusterConf, clusterKey);
}
if ((overridePrefix != null) && (!overridePrefix.isEmpty())) {
Configuration clusterSubset = HBaseConfiguration.subset(clusterConf, overridePrefix);
HBaseConfiguration.merge(clusterConf, clusterSubset);
}
return clusterConf;
} | 3.26 |
hbase_HBaseConfiguration_main_rdh | /**
* For debugging. Dump configurations to system output as xml format. Master and RS configurations
* can also be dumped using http services. e.g. "curl http://master:16010/dump"
*/
public static void main(String[] args) throws Exception {
HBaseConfiguration.create().writeXml(System.out);
} | 3.26 |
hbase_HBaseConfiguration_addDeprecatedKeys_rdh | /**
* The hbase.ipc.server.reservoir.initial.max and hbase.ipc.server.reservoir.initial.buffer.size
* were introduced in HBase2.0.0, while in HBase3.0.0 the two config keys will be replaced by
* hbase.server.allocator.max.buffer.count and hbase.server.allocator.buffer.size. Also the
* hbase.ipc.server.reservoir.enabled will be replaced by hbase.server.allocator.pool.enabled.
* Keep the three old config keys here for HBase2.x compatibility. <br>
* HBASE-24667: This config hbase.regionserver.hostname.disable.master.reversedns will be replaced
* by hbase.unsafe.regionserver.hostname.disable.master.reversedns. Keep the old config keys here
* for backward compatibility. <br>
* Note: Before Hadoop-3.3, we must call the addDeprecations method before creating the
* Configuration object to work correctly. After this bug is fixed in hadoop-3.3, there will be no
* order problem.
*
* @see <a href="https://issues.apache.org/jira/browse/HADOOP-15708">HADOOP-15708</a>
*/
private static void addDeprecatedKeys() {
Configuration.addDeprecations(new DeprecationDelta[]{ new DeprecationDelta("hbase.regionserver.hostname", "hbase.unsafe.regionserver.hostname"), new DeprecationDelta("hbase.regionserver.hostname.disable.master.reversedns", "hbase.unsafe.regionserver.hostname.disable.master.reversedns"), new DeprecationDelta("hbase.offheapcache.minblocksize", "hbase.blockcache.minblocksize"), new DeprecationDelta("hbase.ipc.server.reservoir.enabled", "hbase.server.allocator.pool.enabled"), new DeprecationDelta("hbase.ipc.server.reservoir.initial.max", "hbase.server.allocator.max.buffer.count"), new DeprecationDelta("hbase.ipc.server.reservoir.initial.buffer.size", "hbase.server.allocator.buffer.size"), new DeprecationDelta("hlog.bulk.output", "wal.bulk.output"), new DeprecationDelta("hlog.input.tables", "wal.input.tables"), new DeprecationDelta("hlog.input.tablesmap", "wal.input.tablesmap"), new DeprecationDelta("hbase.master.mob.ttl.cleaner.period", "hbase.master.mob.cleaner.period"), new DeprecationDelta("hbase.normalizer.min.region.count", "hbase.normalizer.merge.min.region.count") });
} | 3.26 |
hbase_HBaseConfiguration_getPassword_rdh | /**
* Get the password from the Configuration instance using the getPassword method if it exists. If
* not, then fall back to the general get method for configuration elements.
*
* @param conf
* configuration instance for accessing the passwords
* @param alias
* the name of the password element
* @param defPass
* the default password
* @return String password or default password
*/
public static String getPassword(Configuration conf, String alias, String defPass) throws IOException {
String v10;
char[] p = conf.getPassword(alias);
if (p != null) {
LOG.debug("Config option {} was found through the Configuration getPassword method.", alias);
v10 = new String(p);
} else {
LOG.debug("Config option {} was not found. Using provided default value", alias);
v10 = defPass;}
return v10;
} | 3.26 |
hbase_HBaseConfiguration_applyClusterKeyToConf_rdh | /**
* Apply the settings in the given key to the given configuration, this is used to communicate
* with distant clusters
*
* @param conf
* configuration object to configure
* @param key
* string that contains the 3 required configuratins
*/
private static void applyClusterKeyToConf(Configuration conf, String key) throws IOException {
ZKConfig.ZKClusterKey zkClusterKey = ZKConfig.transformClusterKey(key);
conf.set(HConstants.ZOOKEEPER_QUORUM, zkClusterKey.getQuorumString());
conf.setInt(HConstants.ZOOKEEPER_CLIENT_PORT, zkClusterKey.getClientPort());
conf.set(HConstants.ZOOKEEPER_ZNODE_PARENT, zkClusterKey.getZnodeParent());
// Without the right registry, the above configs are useless. Also, we don't use setClass()
// here because the ConnectionRegistry* classes are not resolvable from this module.
// This will be broken if ZkConnectionRegistry class gets renamed or moved. Is there a better
// way?
LOG.info("Overriding client registry implementation to {}", HConstants.ZK_CONNECTION_REGISTRY_CLASS);
conf.set(HConstants.CLIENT_CONNECTION_REGISTRY_IMPL_CONF_KEY,
HConstants.ZK_CONNECTION_REGISTRY_CLASS);
} | 3.26 |
hbase_HBaseConfiguration_create_rdh | /**
* Creates a Configuration with HBase resources
*
* @param that
* Configuration to clone.
* @return a Configuration created with the hbase-*.xml files plus the given configuration.
*/
public static Configuration create(final Configuration that) {
Configuration conf = create();
merge(conf, that);
return conf;
} | 3.26 |
hbase_HBaseConfiguration_m1_rdh | /**
* Returns whether to show HBase Configuration in servlet
*/
public static boolean m1() {
boolean isShowConf = false;
try {
if (Class.forName("org.apache.hadoop.conf.ConfServlet") != null) {
isShowConf = true;
}
} catch (LinkageError e) {
// should we handle it more aggressively in addition to log the error?
LOG.warn("Error thrown: ", e);
} catch (ClassNotFoundException ce) {
LOG.debug("ClassNotFound: ConfServlet");
// ignore
}return isShowConf;
} | 3.26 |
hbase_HBaseConfiguration_subset_rdh | /**
* Returns a subset of the configuration properties, matching the given key prefix. The prefix is
* stripped from the return keys, ie. when calling with a prefix of "myprefix", the entry
* "myprefix.key1 = value1" would be returned as "key1 = value1". If an entry's key matches the
* prefix exactly ("myprefix = value2"), it will <strong>not</strong> be included in the results,
* since it would show up as an entry with an empty key.
*/
public static Configuration subset(Configuration
srcConf, String prefix) {
Configuration newConf = new Configuration(false);
for (Map.Entry<String, String> entry : srcConf) {
if (entry.getKey().startsWith(prefix)) {
String newKey = entry.getKey().substring(prefix.length());
// avoid entries that would produce an empty key
if (!newKey.isEmpty()) {
newConf.set(newKey, entry.getValue());
}
}
}
return newConf;}
/**
* Sets all the entries in the provided {@code Map<String, String>} as properties in the given
* {@code Configuration}. Each property will have the specified prefix prepended, so that the
* configuration entries are keyed by {@code prefix + entry.getKey()} | 3.26 |
hbase_HBaseConfiguration_merge_rdh | /**
* Merge two configurations.
*
* @param destConf
* the configuration that will be overwritten with items from the srcConf
* @param srcConf
* the source configuration
*/
public static void merge(Configuration
destConf, Configuration srcConf) {
for (Map.Entry<String, String> e : srcConf) {
destConf.set(e.getKey(), e.getValue());
}
} | 3.26 |
hbase_ByteBufferKeyValue_equals_rdh | /**
* Needed doing 'contains' on List. Only compares the key portion, not the value.
*/
@Override
public boolean equals(Object other) {
if (!(other instanceof Cell)) {
return false;
}
return CellUtil.equals(this, ((Cell) (other)));
} | 3.26 |
hbase_ByteBufferKeyValue_hashCode_rdh | /**
* In line with {@link #equals(Object)}, only uses the key portion, not the value.
*/
@Override
public int hashCode() {
return calculateHashForKey(this);
} | 3.26 |
hbase_SimpleMutableByteRange_putVLong_rdh | // Copied from com.google.protobuf.CodedOutputStream v2.5.0 writeRawVarint64
@Override
public int putVLong(int index, long val) {
int rPos = 0;while (true) {
if ((val & (~0x7f)) == 0) {
bytes[(offset + index) + rPos] = ((byte) (val));
break;
} else {
bytes[(offset + index) + rPos] = ((byte) ((val & 0x7f) | 0x80));
val >>>=
7;}
rPos++;
}
clearHashCache();
return rPos + 1;
} | 3.26 |
hbase_SimpleMutableByteRange_deepCopy_rdh | // end copied from protobuf
@Override
public ByteRange deepCopy() {SimpleMutableByteRange clone = new SimpleMutableByteRange(deepCopyToNewArray());
if (isHashCached()) {
clone.hash = hash;
}
return clone;
} | 3.26 |
hbase_TsvImporterTextMapper_setup_rdh | /**
* Handles initializing this class with objects specific to it (i.e., the parser). Common
* initialization that might be leveraged by a subclass is done in <code>doSetup</code>. Hence a
* subclass may choose to override this method and call <code>doSetup</code> as well before
* handling it's own custom params.
*/
@Overrideprotected void setup(Context context) {
doSetup(context);Configuration conf = context.getConfiguration();parser = new ImportTsv.TsvParser(conf.get(ImportTsv.COLUMNS_CONF_KEY), separator);
if (parser.getRowKeyColumnIndex() == (-1)) {
throw new RuntimeException("No row key column specified");
}
} | 3.26 |
hbase_TsvImporterTextMapper_map_rdh | /**
* Convert a line of TSV text into an HBase table row.
*/
@Override
public void map(LongWritable offset, Text value, Context context) throws IOException {
try {
Pair<Integer, Integer> rowKeyOffests = parser.parseRowKey(value.getBytes(), value.getLength());
ImmutableBytesWritable rowKey = new ImmutableBytesWritable(value.getBytes(), rowKeyOffests.getFirst(), rowKeyOffests.getSecond());
context.write(rowKey, value);
} catch (ImportTsv | IllegalArgumentException badLine) {
if (logBadLines) {
System.err.println(value);
}
System.err.println((("Bad line at offset: " + offset.get()) + ":\n") + badLine.getMessage());
if (skipBadLines) {
incrementBadLineCount(1);
return;
}
throw new IOException(badLine);
} catch (InterruptedException e) {
LOG.error("Interrupted while emitting TSV text", e);
Thread.currentThread().interrupt();
}
} | 3.26 |
hbase_TsvImporterTextMapper_doSetup_rdh | /**
* Handles common parameter initialization that a subclass might want to leverage.
*/
protected void doSetup(Context context) {
Configuration conf = context.getConfiguration();
// If a custom separator has been used,
// decode it back from Base64 encoding.
separator = conf.get(ImportTsv.SEPARATOR_CONF_KEY);
if (separator == null) {
separator = ImportTsv.DEFAULT_SEPARATOR;
} else {
separator
= new String(Base64.getDecoder().decode(separator));
}
skipBadLines = context.getConfiguration().getBoolean(ImportTsv.SKIP_LINES_CONF_KEY, true);
logBadLines = context.getConfiguration().getBoolean(ImportTsv.LOG_BAD_LINES_CONF_KEY, false);
badLineCount = context.getCounter("ImportTsv", "Bad Lines");
} | 3.26 |
hbase_ThriftServer_main_rdh | /**
* Start up the Thrift2 server.
*/
public static void main(String[] args) throws Exception {
final Configuration conf = HBaseConfiguration.create();// for now, only time we return is on an argument error.
final int status = ToolRunner.run(conf, new ThriftServer(conf), args);
System.exit(status);
} | 3.26 |
hbase_BackupRestoreFactory_getRestoreJob_rdh | /**
* Gets backup restore job
*
* @param conf
* configuration
* @return backup restore job instance
*/
public static RestoreJob getRestoreJob(Configuration conf) {
Class<? extends RestoreJob> cls = conf.getClass(HBASE_INCR_RESTORE_IMPL_CLASS, MapReduceRestoreJob.class, RestoreJob.class);
RestoreJob service = ReflectionUtils.newInstance(cls, conf);
service.setConf(conf);
return service;
} | 3.26 |
hbase_BackupRestoreFactory_getBackupCopyJob_rdh | /**
* Gets backup copy job
*
* @param conf
* configuration
* @return backup copy job instance
*/
public static BackupCopyJob getBackupCopyJob(Configuration conf) {
Class<? extends BackupCopyJob> cls = conf.getClass(HBASE_BACKUP_COPY_IMPL_CLASS, MapReduceBackupCopyJob.class, BackupCopyJob.class);
BackupCopyJob v3 = ReflectionUtils.newInstance(cls, conf);
v3.setConf(conf);
return v3;
} | 3.26 |
hbase_BackupRestoreFactory_getBackupMergeJob_rdh | /**
* Gets backup merge job
*
* @param conf
* configuration
* @return backup merge job instance
*/
public static BackupMergeJob getBackupMergeJob(Configuration conf) {
Class<? extends BackupMergeJob> cls = conf.getClass(HBASE_BACKUP_MERGE_IMPL_CLASS, MapReduceBackupMergeJob.class, BackupMergeJob.class);
BackupMergeJob service = ReflectionUtils.newInstance(cls, conf);
service.setConf(conf);
return service;
} | 3.26 |
hbase_PreviousBlockCompressionRatePredicator_shouldFinishBlock_rdh | /**
* Returns <b>true</b> if the passed uncompressed size is larger than the limit calculated by
* <code>updateLatestBlockSizes</code>.
*
* @param uncompressed
* true if the block should be finished.
*/
@Override
public boolean shouldFinishBlock(int uncompressed) {
if (uncompressed >= configuredMaxBlockSize) {
return uncompressed >= adjustedBlockSize;
}
return false;
} | 3.26 |
hbase_ExcludeDatanodeManager_tryAddExcludeDN_rdh | /**
* Try to add a datanode to the regionserver excluding cache
*
* @param datanodeInfo
* the datanode to be added to the excluded cache
* @param cause
* the cause that the datanode is hope to be excluded
* @return True if the datanode is added to the regionserver excluding cache, false otherwise
*/
public boolean tryAddExcludeDN(DatanodeInfo datanodeInfo, String cause) {
boolean alreadyMarkedSlow = getExcludeDNs().containsKey(datanodeInfo);
if (!alreadyMarkedSlow) {
excludeDNsCache.put(datanodeInfo, EnvironmentEdgeManager.currentTime());
f0.info("Added datanode: {} to exclude cache by [{}] success, current excludeDNsCache size={}", datanodeInfo, cause, excludeDNsCache.size());
return true;
}f0.debug("Try add datanode {} to exclude cache by [{}] failed, " + "current exclude DNs are {}", datanodeInfo, cause, getExcludeDNs().keySet());
return false;
} | 3.26 |
hbase_SortedCompactionPolicy_getNextMajorCompactTime_rdh | /**
* Returns When to run next major compaction
*/
public long getNextMajorCompactTime(Collection<HStoreFile> filesToCompact) {
/**
* Default to {@link org.apache.hadoop.hbase.HConstants#DEFAULT_MAJOR_COMPACTION_PERIOD}.
*/
long period = comConf.getMajorCompactionPeriod();
if (period <= 0) {
return period;
}
/**
* Default to {@link org.apache.hadoop.hbase.HConstants#DEFAULT_MAJOR_COMPACTION_JITTER}, that
* is, +/- 3.5 days (7 days * 0.5).
*/
double jitterPct = comConf.getMajorCompactionJitter();
if (jitterPct <= 0) {
return period;
}
// deterministic jitter avoids a major compaction storm on restart
OptionalInt seed = StoreUtils.getDeterministicRandomSeed(filesToCompact);
if (seed.isPresent()) {
long jitter = Math.round(period * jitterPct);
// Synchronized to ensure one user of random instance at a time.
synchronized(RNG) {
RNG.setSeed(seed.getAsInt());
return (period + jitter) - Math.round((2L * jitter) * RNG.nextDouble());
}
} else {
return 0L;
}
} | 3.26 |
hbase_SortedCompactionPolicy_filterBulk_rdh | /**
*
* @param candidates
* pre-filtrate
*/
protected void filterBulk(ArrayList<HStoreFile> candidates) {
candidates.removeIf(HStoreFile::excludeFromMinorCompaction);
} | 3.26 |
hbase_SortedCompactionPolicy_throttleCompaction_rdh | /**
*
* @param compactionSize
* Total size of some compaction
* @return whether this should be a large or small compaction
*/
@Override
public boolean throttleCompaction(long compactionSize) {
return compactionSize > comConf.getThrottlePoint();
} | 3.26 |
hbase_SortedCompactionPolicy_selectCompaction_rdh | /**
*
* @param candidateFiles
* candidate files, ordered from oldest to newest by seqId. We rely on
* DefaultStoreFileManager to sort the files by seqId to guarantee
* contiguous compaction based on seqId for data consistency.
* @return subset copy of candidate list that meets compaction criteria
*/
public CompactionRequestImpl selectCompaction(Collection<HStoreFile> candidateFiles, List<HStoreFile> filesCompacting, boolean isUserCompaction, boolean mayUseOffPeak, boolean forceMajor) throws IOException {
// Preliminary compaction subject to filters
ArrayList<HStoreFile> candidateSelection = new ArrayList<>(candidateFiles);
// Stuck and not compacting enough (estimate). It is not guaranteed that we will be
// able to compact more if stuck and compacting, because ratio policy excludes some
// non-compacting files from consideration during compaction (see getCurrentEligibleFiles).
int futureFiles = (filesCompacting.isEmpty()) ? 0
: 1;
boolean mayBeStuck = ((candidateFiles.size() - filesCompacting.size()) +
futureFiles) >= storeConfigInfo.getBlockingFileCount();
candidateSelection = getCurrentEligibleFiles(candidateSelection, filesCompacting);
LOG.debug(((((((("Selecting compaction from " + candidateFiles.size()) + " store files, ") + filesCompacting.size()) + " compacting, ") + candidateSelection.size()) + " eligible, ") + storeConfigInfo.getBlockingFileCount()) + " blocking");// If we can't have all files, we cannot do major anyway
boolean isAllFiles = candidateFiles.size() == candidateSelection.size();
if (!(forceMajor && isAllFiles)) {
candidateSelection = skipLargeFiles(candidateSelection, mayUseOffPeak);isAllFiles = candidateFiles.size() == candidateSelection.size();
}
// Try a major compaction if this is a user-requested major compaction,
// or if we do not have too many files to compact and this was requested as a major compaction
boolean isTryingMajor = ((forceMajor && isAllFiles) && isUserCompaction) || (((forceMajor && isAllFiles) || shouldPerformMajorCompaction(candidateSelection)) && (candidateSelection.size() < comConf.getMaxFilesToCompact()));
// Or, if there are any references among the candidates.
boolean isAfterSplit
= StoreUtils.hasReferences(candidateSelection);
CompactionRequestImpl result
= createCompactionRequest(candidateSelection, isTryingMajor || isAfterSplit, mayUseOffPeak, mayBeStuck);
result.setAfterSplit(isAfterSplit);
ArrayList<HStoreFile> filesToCompact = Lists.newArrayList(result.getFiles());
removeExcessFiles(filesToCompact, isUserCompaction, isTryingMajor);
result.updateFiles(filesToCompact);
isAllFiles = candidateFiles.size() == filesToCompact.size();
result.setOffPeak(((!filesToCompact.isEmpty()) && (!isAllFiles)) && mayUseOffPeak);
result.setIsMajor(isTryingMajor && isAllFiles, isAllFiles);
return result;
} | 3.26 |
hbase_SortedCompactionPolicy_removeExcessFiles_rdh | /**
*
* @param candidates
* pre-filtrate
*/
protected void removeExcessFiles(ArrayList<HStoreFile> candidates, boolean isUserCompaction, boolean isMajorCompaction) {
int excess = candidates.size() - comConf.getMaxFilesToCompact();
if (excess > 0) {
if (isMajorCompaction && isUserCompaction) {
LOG.debug(("Warning, compacting more than " + comConf.getMaxFilesToCompact()) + " files because of a user-requested major compaction");
} else {
LOG.debug(("Too many admissible files. Excluding "
+ excess) + " files from compaction candidates");
candidates.subList(comConf.getMaxFilesToCompact(), candidates.size()).clear();
}
}
} | 3.26 |
hbase_SortedCompactionPolicy_checkMinFilesCriteria_rdh | /**
*
* @param candidates
* pre-filtrate
* @return filtered subset forget the compactionSelection if we don't have enough files
*/
protected ArrayList<HStoreFile> checkMinFilesCriteria(ArrayList<HStoreFile> candidates, int minFiles) {
if (candidates.size() < minFiles) {
if (LOG.isDebugEnabled()) {
LOG.debug(((("Not compacting files because we only have " + candidates.size()) + " files ready for compaction. Need ") + minFiles) + " to initiate.");
}
candidates.clear();
}
return candidates;
} | 3.26 |
hbase_DataBlockEncoding_isCorrectEncoder_rdh | /**
* Check if given encoder has this id.
*
* @param encoder
* encoder which id will be checked
* @param encoderId
* id which we except
* @return true if id is right for given encoder, false otherwise
* @exception IllegalArgumentException
* thrown when there is no matching data block encoder
*/
public static boolean isCorrectEncoder(DataBlockEncoder encoder, short encoderId) {
DataBlockEncoding algorithm = getEncodingById(encoderId);
String encoderCls = encoder.getClass().getName();
return encoderCls.equals(algorithm.encoderCls);
} | 3.26 |
hbase_DataBlockEncoding_getDataBlockEncoderById_rdh | /**
* Find and create data block encoder for given id;
*
* @param encoderId
* id of data block encoder.
* @return Newly created data block encoder.
*/
public static DataBlockEncoder getDataBlockEncoderById(short encoderId) {
return getEncodingById(encoderId).getEncoder();
} | 3.26 |
hbase_DataBlockEncoding_getNameInBytes_rdh | /**
* Returns name converted to bytes.
*/
public byte[] getNameInBytes() {
return Bytes.toBytes(toString());
} | 3.26 |
hbase_DataBlockEncoding_writeIdInBytes_rdh | /**
* Writes id bytes to the given array starting from offset.
*
* @param dest
* output array
* @param offset
* starting offset of the output array
*/
// System.arraycopy is static native. Nothing we can do this until we have minimum JDK 9.
@SuppressWarnings("UnsafeFinalization")
public void writeIdInBytes(byte[] dest, int offset) throws IOException {
System.arraycopy(idInBytes, 0, dest, offset, ID_SIZE);
} | 3.26 |
hbase_DataBlockEncoding_getNameFromId_rdh | /**
* Find and return the name of data block encoder for the given id.
*
* @param encoderId
* id of data block encoder
* @return name, same as used in options in column family
*/
public static String getNameFromId(short encoderId) {
return getEncodingById(encoderId).toString();
} | 3.26 |
hbase_DataBlockEncoding_getEncoder_rdh | /**
* Return new data block encoder for given algorithm type.
*
* @return data block encoder if algorithm is specified, null if none is selected.
*/
public DataBlockEncoder getEncoder() {
if
((encoder == null) && (id != 0)) {
// lazily create the encoder
encoder = createEncoder(encoderCls);
}
return encoder;
} | 3.26 |
hbase_MemStore_startReplayingFromWAL_rdh | /**
* This message intends to inform the MemStore that next coming updates are going to be part of
* the replaying edits from WAL
*/
default void startReplayingFromWAL() {
return;
} | 3.26 |
hbase_MemStore_stopReplayingFromWAL_rdh | /**
* This message intends to inform the MemStore that the replaying edits from WAL are done
*/
default void
stopReplayingFromWAL() {
return;
} | 3.26 |
hbase_MutableRegionInfo_isMetaRegion_rdh | /**
* Returns true if this region is a meta region
*/
@Override
public boolean isMetaRegion() {
return tableName.equals(TableName.META_TABLE_NAME);
} | 3.26 |
hbase_MutableRegionInfo_containsRow_rdh | /**
* Return true if the given row falls in this region.
*/
@Override
public boolean containsRow(byte[] row) {
CellComparator cellComparator = CellComparatorImpl.getCellComparator(tableName);
return (cellComparator.compareRows(row, startKey) >= 0) && ((cellComparator.compareRows(row, endKey) < 0) || Bytes.equals(endKey, HConstants.EMPTY_BYTE_ARRAY));
} | 3.26 |
hbase_MutableRegionInfo_getTable_rdh | /**
* Get current table name of the region
*/
@Override
public TableName getTable() {
return this.tableName;
} | 3.26 |
hbase_MutableRegionInfo_getEncodedName_rdh | /**
* Returns the encoded region name
*/
@Override
public String getEncodedName() {
return this.encodedName;} | 3.26 |
hbase_MutableRegionInfo_getReplicaId_rdh | /**
* Returns the region replica id
*
* @return returns region replica id
*/
@Override
public int getReplicaId() {
return replicaId;
} | 3.26 |
hbase_MutableRegionInfo_getEndKey_rdh | /**
* Returns the endKey
*/
@Override
public byte[] getEndKey() {
return endKey;
} | 3.26 |
hbase_MutableRegionInfo_isSplit_rdh | /**
* Returns True if has been split and has daughters.
*/
@Override
public boolean isSplit() {
return this.split;} | 3.26 |
hbase_MutableRegionInfo_isSplitParent_rdh | /**
*
* @return True if this is a split parent region.
* @deprecated since 3.0.0 and will be removed in 4.0.0, Use {@link #isSplit()} instead.
* @see <a href="https://issues.apache.org/jira/browse/HBASE-25210">HBASE-25210</a>
*/@Override
@Deprecated
public boolean isSplitParent() {
if (!isSplit()) {
return false;
}
if (!isOffline()) {
LOG.warn("Region is split but NOT offline: " + getRegionNameAsString());
}
return true;
} | 3.26 |
hbase_MutableRegionInfo_hashCode_rdh | /**
*
* @see Object#hashCode()
*/ @Override
public int hashCode() {
return this.hashCode;
} | 3.26 |
hbase_MutableRegionInfo_containsRange_rdh | /**
* Returns true if the given inclusive range of rows is fully contained by this region. For
* example, if the region is foo,a,g and this is passed ["b","c"] or ["a","c"] it will return
* true, but if this is passed ["b","z"] it will return false.
*
* @throws IllegalArgumentException
* if the range passed is invalid (ie. end < start)
*/
@Override
public boolean containsRange(byte[] rangeStartKey, byte[] rangeEndKey) {
CellComparator cellComparator = CellComparatorImpl.getCellComparator(tableName);
if (cellComparator.compareRows(rangeStartKey, rangeEndKey) > 0) {
throw new IllegalArgumentException((("Invalid range: " + Bytes.toStringBinary(rangeStartKey)) + " > ") + Bytes.toStringBinary(rangeEndKey));
}
boolean firstKeyInRange = cellComparator.compareRows(rangeStartKey, startKey) >= 0;
boolean lastKeyInRange = (cellComparator.compareRows(rangeEndKey,
endKey) < 0) || Bytes.equals(endKey, HConstants.EMPTY_BYTE_ARRAY);
return firstKeyInRange && lastKeyInRange;
} | 3.26 |
hbase_MutableRegionInfo_getRegionNameAsString_rdh | /**
* Returns Region name as a String for use in logging, etc.
*/
@Override
public String getRegionNameAsString() {
return RegionInfo.getRegionNameAsString(this, this.regionName);
} | 3.26 |
hbase_MutableRegionInfo_getRegionId_rdh | /**
* Returns the regionId
*/
@Override
public long getRegionId() {
return regionId;
} | 3.26 |
hbase_MutableRegionInfo_isOffline_rdh | /**
*
* @return True if this region is offline.
* @deprecated since 3.0.0 and will be removed in 4.0.0
* @see <a href="https://issues.apache.org/jira/browse/HBASE-25210">HBASE-25210</a>
*/
@Override
@Deprecated
public boolean isOffline() {
return this.offLine;
} | 3.26 |
hbase_MutableRegionInfo_setOffline_rdh | /**
* The parent of a region split is offline while split daughters hold references to the parent.
* Offlined regions are closed.
*
* @param offLine
* Set online/offline status.
*/
public MutableRegionInfo setOffline(boolean offLine) {
this.offLine = offLine;
return this;
} | 3.26 |
hbase_MutableRegionInfo_getShortNameToLog_rdh | /**
* Returns Return a short, printable name for this region (usually encoded name) for us logging.
*/
@Override
public String getShortNameToLog() {
return RegionInfo.prettyPrint(this.getEncodedName());
} | 3.26 |
hbase_MutableRegionInfo_toString_rdh | /**
*
* @see Object#toString()
*/
@Override
public String toString() {
return ((((((((((((("{ENCODED => " + getEncodedName()) + ", ") + HConstants.NAME) + " => '") + Bytes.toStringBinary(this.regionName)) + "', STARTKEY => '") + Bytes.toStringBinary(this.startKey)) + "', ENDKEY => '") + Bytes.toStringBinary(this.endKey)) + "'") + (isOffline() ? ", OFFLINE => true" : "")) + (isSplit() ? ", SPLIT => true" : "")) + (replicaId > 0 ? ", REPLICA_ID => " +
replicaId : "")) + "}";
} | 3.26 |
hbase_MutableRegionInfo_setSplit_rdh | /**
* Change the split status flag.
*
* @param split
* set split status
*/
public MutableRegionInfo setSplit(boolean split) {
this.split = split;
return this;
} | 3.26 |
hbase_MutableRegionInfo_getStartKey_rdh | /**
* Returns the startKey
*/
@Override
public byte[] getStartKey() {
return startKey;
} | 3.26 |
hbase_MutableRegionInfo_getRegionName_rdh | /**
*
* @return the regionName as an array of bytes.
* @see #getRegionNameAsString()
*/
@Override
public byte[] getRegionName() {
return regionName;
} | 3.26 |
hbase_Coprocessor_stop_rdh | /**
* Called by the {@link CoprocessorEnvironment} during it's own shutdown to stop the coprocessor.
*/
default void stop(CoprocessorEnvironment env) throws IOException {
}
/**
* Coprocessor endpoints providing protobuf services should override this method.
*
* @return Iterable of {@link Service} | 3.26 |
hbase_IndividualBytesFieldCell_getQualifierArray_rdh | // 3) Qualifier
@Override
public byte[] getQualifierArray() {
// Qualifier could be null
return qualifier == null ? HConstants.EMPTY_BYTE_ARRAY : qualifier;
} | 3.26 |
hbase_IndividualBytesFieldCell_getTypeByte_rdh | // 5) Type
@Override
public byte getTypeByte() {
return type;
} | 3.26 |
hbase_IndividualBytesFieldCell_clone_rdh | /**
* Implement Cloneable interface
*/
@Overridepublic Object clone() throws CloneNotSupportedException {
return super.clone();// only a shadow copy
} | 3.26 |
hbase_IndividualBytesFieldCell_getTimestamp_rdh | // 4) Timestamp
@Override
public long getTimestamp() {
return timestamp;
} | 3.26 |
hbase_IndividualBytesFieldCell_getRowArray_rdh | /**
* Implement Cell interface
*/
// 1) Row
@Override
public byte[] getRowArray() {
// If row is null, the constructor will reject it, by {@link KeyValue#checkParameters()},
// so it is safe to return row without checking.
return row;
} | 3.26 |
hbase_IndividualBytesFieldCell_getFamilyArray_rdh | // 2) Family
@Override
public byte[] getFamilyArray() {
// Family could be null
return family == null ? HConstants.EMPTY_BYTE_ARRAY : family;
} | 3.26 |
hbase_IndividualBytesFieldCell_heapSize_rdh | /**
* Implement HeapSize interface
*/
@Override
public long heapSize() {// Size of array headers are already included into overhead, so do not need to include it for
// each byte array
return ((((heapOverhead()// overhead, with array headers included
+ ClassSize.align(getRowLength()))// row
+ ClassSize.align(getFamilyLength()))// family
+ ClassSize.align(getQualifierLength()))// qualifier
+ ClassSize.align(getValueLength()))// value
+ ClassSize.align(getTagsLength());// tags
} | 3.26 |
hbase_IndividualBytesFieldCell_getValueArray_rdh | // 7) Value
@Override
public byte[] getValueArray() {
// Value could be null
return value == null ? HConstants.EMPTY_BYTE_ARRAY : value;
} | 3.26 |
hbase_IndividualBytesFieldCell_getSequenceId_rdh | // 6) Sequence id
@Override
public long getSequenceId() {
return seqId;
} | 3.26 |
hbase_HealthReport_getHealthReport_rdh | /**
* Gets the health report of the region server.
*/
String getHealthReport() {
return f0;
} | 3.26 |
hbase_FileArchiverNotifierFactoryImpl_get_rdh | /**
* Returns the {@link FileArchiverNotifier} instance for the given {@link TableName}.
*
* @param tn
* The table to obtain a notifier for
* @return The notifier for the given {@code tablename}.
*/
public FileArchiverNotifier get(Connection conn, Configuration conf, FileSystem fs, TableName tn) {
// Ensure that only one instance is exposed to callers
return CACHE.computeIfAbsent(tn, key -> new FileArchiverNotifierImpl(conn, conf, fs, key));
} | 3.26 |
hbase_DemoClient_bytes_rdh | // Helper to translate strings to UTF8 bytes
private byte[] bytes(String s) {
return Bytes.toBytes(s);
} | 3.26 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.