name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hbase_ScanWildcardColumnTracker_checkVersions_rdh | /**
* {@inheritDoc } This receives puts *and* deletes. Deletes do not count as a version, but rather
* take the version of the previous put (so eventually all but the last can be reclaimed).
*/
@Override
public MatchCode checkVersions(Cell cell, long timestamp, byte type,
boolean ignoreCount) throws IOException {
if (columnCell == null) {
// first iteration.
resetCell(cell);
if (ignoreCount) {
return MatchCode.INCLUDE;
}
// do not count a delete marker as another version
return checkVersion(type, timestamp);
}
int cmp = comparator.compareQualifiers(cell, this.columnCell);
if (cmp == 0) {
if (ignoreCount) {return MatchCode.INCLUDE;
}
// If column matches, check if it is a duplicate timestamp
if (sameAsPreviousTSAndType(timestamp, type)) {
return MatchCode.SKIP;
}
return checkVersion(type, timestamp);
}
resetTSAndType();
// new col > old col
if (cmp > 0) {
// switched columns, lets do something.x
resetCell(cell);
if (ignoreCount) {
return MatchCode.INCLUDE;
}
return checkVersion(type, timestamp);
}
// new col < oldcol
// WARNING: This means that very likely an edit for some other family
// was incorrectly stored into the store for this one. Throw an exception,
// because this might lead to data corruption.
throw new IOException(("ScanWildcardColumnTracker.checkColumn ran into a column actually " + "smaller than the previous column: ") + Bytes.toStringBinary(CellUtil.cloneQualifier(cell)));
} | 3.26 |
hbase_ScanWildcardColumnTracker_getColumnHint_rdh | /**
* Used by matcher and scan/get to get a hint of the next column to seek to after checkColumn()
* returns SKIP. Returns the next interesting column we want, or NULL there is none (wildcard
* scanner).
*
* @return The column count.
*/
@Override
public ColumnCount getColumnHint() {
return null;
} | 3.26 |
hbase_InstancePending_m0_rdh | /**
* Returns the instance given by the method {@link #prepare}. This is an uninterruptible blocking
* method and the interruption flag will be set just before returning if any.
*/
T m0() {
InstanceHolder<T> instanceHolder;
boolean interrupted = false;
while ((instanceHolder = this.instanceHolder) == null) {
try {
pendingLatch.await();
} catch (InterruptedException e) {
interrupted = true;}
}
if (interrupted) {
Thread.currentThread().interrupt();
}
return instanceHolder.instance;
} | 3.26 |
hbase_InstancePending_prepare_rdh | /**
* Associates the given instance for the method {@link #get}. This method should be called once,
* and {@code instance} should be non-null. This method is expected to call as soon as possible
* because the method {@code get} is uninterruptibly blocked until this method is called.
*/
void prepare(T instance) {
assert instance != null;
instanceHolder = new InstanceHolder<>(instance);
pendingLatch.countDown();
} | 3.26 |
hbase_HRegionLocation_getRegion_rdh | /**
* Returns regionInfo
*/
public RegionInfo
getRegion() {
return regionInfo;
} | 3.26 |
hbase_HRegionLocation_toString_rdh | /**
*
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
return (((("region=" + (this.regionInfo == null ? "null" : this.regionInfo.getRegionNameAsString())) + ", hostname=") + this.serverName) + ", seqNum=") + seqNum;
} | 3.26 |
hbase_HRegionLocation_getHostnamePort_rdh | /**
* Returns String made of hostname and port formatted as per
* {@link Addressing#createHostAndPortStr(String, int)}
*/
public String getHostnamePort() {
return Addressing.createHostAndPortStr(this.getHostname(), this.getPort());
} | 3.26 |
hbase_HRegionLocation_hashCode_rdh | /**
*
* @see java.lang.Object#hashCode()
*/
@Override
public int hashCode() {
return this.serverName.hashCode();
} | 3.26 |
hbase_HRegionLocation_equals_rdh | /**
*
* @see java.lang.Object#equals(java.lang.Object)
*/
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null) {
return false;
}
if (!(o instanceof HRegionLocation)) {
return false;
}
return this.compareTo(((HRegionLocation) (o))) == 0;
} | 3.26 |
hbase_EndpointObserver_preEndpointInvocation_rdh | /**
* Called before an Endpoint service method is invoked. The request message can be altered by
* returning a new instance. Throwing an exception will abort the invocation. Calling
* {@link org.apache.hadoop.hbase.coprocessor.ObserverContext#bypass()} has no effect in this
* hook.
*
* @param ctx
* the environment provided by the region server
* @param service
* the endpoint service
* @param request
* Request message expected by given {@code Service}'s method (by the name
* {@code methodName}).
* @param methodName
* the invoked service method
* @return the possibly modified message
*/ default Message preEndpointInvocation(ObserverContext<RegionCoprocessorEnvironment>
ctx, Service service, String methodName, Message request) throws IOException {
return request;
}
/**
* Called after an Endpoint service method is invoked. The response message can be altered using
* the builder.
*
* @param ctx
* the environment provided by the region server
* @param service
* the endpoint service
* @param methodName
* the invoked service method
* @param request
* Request message expected by given {@code Service}'s method (by the name
* {@code methodName} | 3.26 |
hbase_CacheStats_getDataMissCount_rdh | // All of the counts of misses and hits.
public long getDataMissCount() { return dataMissCount.sum();
} | 3.26 |
hbase_Scan_getStartRow_rdh | /**
* Returns the startrow
*/
public byte[] getStartRow() {
return this.startRow;
} | 3.26 |
hbase_Scan_getRowOffsetPerColumnFamily_rdh | /**
* Method for retrieving the scan's offset per row per column family (#kvs to be skipped)
*
* @return row offset
*/
public int getRowOffsetPerColumnFamily() {
return this.storeOffset;
} | 3.26 |
hbase_Scan_getFilter_rdh | /**
* Returns RowFilter
*/
@Override
public Filter getFilter() {
return filter;
} | 3.26 |
hbase_Scan_getCaching_rdh | /**
* Returns caching the number of rows fetched when calling next on a scanner
*/
public int getCaching() {
return this.caching;
} | 3.26 |
hbase_Scan_withStopRow_rdh | /**
* Set the stop row of the scan.
* <p>
* The scan will include rows that are lexicographically less than (or equal to if
* {@code inclusive} is {@code true}) the provided stopRow.
* <p>
* <b>Note:</b> <strong>Do NOT use this in combination with {@link #setRowPrefixFilter(byte[])} or
* {@link #setStartStopRowForPrefixScan(byte[])}.</strong> Doing so will make the scan result
* unexpected or even undefined.
* </p>
*
* @param stopRow
* row to end at
* @param inclusive
* whether we should include the stop row when scan
* @throws IllegalArgumentException
* if stopRow does not meet criteria for a row key (when length
* exceeds {@link HConstants#MAX_ROW_LENGTH})
*/
public Scan withStopRow(byte[] stopRow, boolean inclusive) {
if (Bytes.len(stopRow) > HConstants.MAX_ROW_LENGTH) {
throw new IllegalArgumentException((("stopRow's length must be less than or equal to " + HConstants.MAX_ROW_LENGTH) + " to meet the criteria") + " for a row key.");
}
this.f0 = stopRow;
this.includeStopRow
= inclusive;
return this;
}
/**
* <p>
* Set a filter (using stopRow and startRow) so the result set only contains rows where the rowKey
* starts with the specified prefix.
* </p>
* <p>
* This is a utility method that converts the desired rowPrefix into the appropriate values for
* the startRow and stopRow to achieve the desired result.
* </p>
* <p>
* This can safely be used in combination with setFilter.
* </p>
* <p>
* <strong>This CANNOT be used in combination with withStartRow and/or withStopRow.</strong> Such
* a combination will yield unexpected and even undefined results.
* </p>
*
* @param rowPrefix
* the prefix all rows must start with. (Set <i>null</i> to remove the filter.)
* @deprecated since 2.5.0, will be removed in 4.0.0. The name of this method is considered to be
confusing as it does not use a {@link Filter} but uses setting the startRow and
stopRow instead. Use {@link #setStartStopRowForPrefixScan(byte[])} | 3.26 |
hbase_Scan_setTimeRange_rdh | /**
* Get versions of columns only within the specified timestamp range, [minStamp, maxStamp). Note,
* default maximum versions to return is 1. If your time range spans more than one version and you
* want all versions returned, up the number of versions beyond the default.
*
* @param minStamp
* minimum timestamp value, inclusive
* @param maxStamp
* maximum timestamp value, exclusive
* @see #readAllVersions()
* @see #readVersions(int)
*/
public Scan setTimeRange(long minStamp, long maxStamp) throws IOException {
f1 = TimeRange.between(minStamp, maxStamp);
return this;
} | 3.26 |
hbase_Scan_setStartStopRowForPrefixScan_rdh | /**
* <p>
* Set a filter (using stopRow and startRow) so the result set only contains rows where the rowKey
* starts with the specified prefix.
* </p>
* <p>
* This is a utility method that converts the desired rowPrefix into the appropriate values for
* the startRow and stopRow to achieve the desired result.
* </p>
* <p>
* This can safely be used in combination with setFilter.
* </p>
* <p>
* <strong>This CANNOT be used in combination with withStartRow and/or withStopRow.</strong> Such
* a combination will yield unexpected and even undefined results.
* </p>
*
* @param rowPrefix
* the prefix all rows must start with. (Set <i>null</i> to remove the filter.)
*/
public Scan setStartStopRowForPrefixScan(byte[] rowPrefix) {
if (rowPrefix == null) {
withStartRow(HConstants.EMPTY_START_ROW);
withStopRow(HConstants.EMPTY_END_ROW);
} else {
this.withStartRow(rowPrefix);
this.withStopRow(ClientUtil.calculateTheClosestNextRowKeyForPrefix(rowPrefix));
}
return this;
} | 3.26 |
hbase_Scan_getMaxResultSize_rdh | /**
* Returns the maximum result size in bytes. See {@link #setMaxResultSize(long)}
*/
public long getMaxResultSize() {
return maxResultSize;
} | 3.26 |
hbase_Scan_getReadType_rdh | /**
* Returns the read type for this scan
*/
public ReadType getReadType() {
return readType;
} | 3.26 |
hbase_Scan_hasFilter_rdh | /**
* Returns true is a filter has been specified, false if not
*/
public boolean hasFilter() {
return filter != null;
} | 3.26 |
hbase_Scan_setCaching_rdh | /**
* Set the number of rows for caching that will be passed to scanners. If not set, the
* Configuration setting {@link HConstants#HBASE_CLIENT_SCANNER_CACHING} will apply. Higher
* caching values will enable faster scanners but will use more memory.
*
* @param caching
* the number of rows for caching
*/
public Scan setCaching(int caching) {
this.caching = caching;
return this;
} | 3.26 |
hbase_Scan_getAllowPartialResults_rdh | /**
* Returns true when the constructor of this scan understands that the results they will see may
* only represent a partial portion of a row. The entire row would be retrieved by subsequent
* calls to {@link ResultScanner#next()}
*/
public boolean getAllowPartialResults() {
return allowPartialResults;
} | 3.26 |
hbase_Scan_setRowOffsetPerColumnFamily_rdh | /**
* Set offset for the row per Column Family.
*
* @param offset
* is the number of kvs that will be skipped.
*/
public Scan setRowOffsetPerColumnFamily(int offset) {
this.storeOffset =
offset;
return this;
} | 3.26 |
hbase_Scan_includeStartRow_rdh | /**
* Returns if we should include start row when scan
*/
public boolean
includeStartRow() {
return includeStartRow;
} | 3.26 |
hbase_Scan_setFamilyMap_rdh | /**
* Setting the familyMap
*
* @param familyMap
* map of family to qualifier
*/
public Scan setFamilyMap(Map<byte[], NavigableSet<byte[]>> familyMap) {
this.familyMap = familyMap;
return this;
} | 3.26 |
hbase_Scan_setCacheBlocks_rdh | /**
* Set whether blocks should be cached for this Scan.
* <p>
* This is true by default. When true, default settings of the table and family are used (this
* will never override caching blocks if the block cache is disabled for that family or entirely).
*
* @param cacheBlocks
* if false, default settings are overridden and blocks will not be cached
*/
public Scan setCacheBlocks(boolean cacheBlocks) {
this.cacheBlocks = cacheBlocks;
return this;
} | 3.26 |
hbase_Scan_setReversed_rdh | /**
* Set whether this scan is a reversed one
* <p>
* This is false by default which means forward(normal) scan.
*
* @param reversed
* if true, scan will be backward order
*/
public Scan setReversed(boolean reversed) {
this.reversed = reversed;return this;
} | 3.26 |
hbase_Scan_toMap_rdh | /**
* Compile the details beyond the scope of getFingerprint (row, columns, timestamps, etc.) into a
* Map along with the fingerprinted information. Useful for debugging, logging, and administration
* tools.
*
* @param maxCols
* a limit on the number of columns output prior to truncation
*/
@Override
public Map<String,
Object> toMap(int maxCols) {
// start with the fingerpring map and build on top of it
Map<String, Object> map = getFingerprint();
// map from families to column list replaces fingerprint's list of families
Map<String, List<String>> familyColumns = new HashMap<>();
map.put("families", familyColumns);
// add scalar information first
map.put("startRow", Bytes.toStringBinary(this.startRow));
map.put("stopRow", Bytes.toStringBinary(this.f0));
map.put("maxVersions", this.maxVersions);
map.put("batch", this.batch);
map.put("caching", this.caching);
map.put("maxResultSize", this.maxResultSize);
map.put("cacheBlocks", this.cacheBlocks);
map.put("loadColumnFamiliesOnDemand", this.loadColumnFamiliesOnDemand);
List<Long> timeRange = new ArrayList<>(2);
timeRange.add(this.f1.getMin());
timeRange.add(this.f1.getMax());
map.put("timeRange", timeRange);
int colCount = 0;
// iterate through affected families and list out up to maxCols columns
for
(Map.Entry<byte[], NavigableSet<byte[]>> entry : this.familyMap.entrySet()) {
List<String> columns = new ArrayList<>();
familyColumns.put(Bytes.toStringBinary(entry.getKey()), columns);
if (entry.getValue() == null) {
colCount++;
--maxCols;
columns.add("ALL");
} else {
colCount += entry.getValue().size();
if (maxCols <= 0) {
continue;
}
for (byte[] column : entry.getValue()) {
if ((--maxCols) <= 0) {
continue;
}
columns.add(Bytes.toStringBinary(column));
}
}
}
map.put("totalColumns", colCount);
if (this.filter != null) {
map.put("filter", this.filter.toString());
}
// add the id if set
if (getId() != null) {
map.put("id", getId());
}
return
map;
} | 3.26 |
hbase_Scan_addColumn_rdh | /**
* Get the column from the specified family with the specified qualifier.
* <p>
* Overrides previous calls to addFamily for this family.
*
* @param family
* family name
* @param qualifier
* column qualifier
*/
public Scan addColumn(byte[] family, byte[] qualifier) {
NavigableSet<byte[]> set = familyMap.get(family);
if (set == null) {
set = new TreeSet<>(Bytes.BYTES_COMPARATOR);
familyMap.put(family, set);
}
if (qualifier == null) {
qualifier = HConstants.EMPTY_BYTE_ARRAY;
}
set.add(qualifier);
return this;
} | 3.26 |
hbase_Scan_getBatch_rdh | /**
* Returns maximum number of values to return for a single call to next()
*/
public int getBatch() {
return this.batch;
} | 3.26 |
hbase_Scan_setTimestamp_rdh | /**
* Get versions of columns with the specified timestamp. Note, default maximum versions to return
* is 1. If your time range spans more than one version and you want all versions returned, up the
* number of versions beyond the defaut.
*
* @param timestamp
* version timestamp
* @see #readAllVersions()
* @see #readVersions(int)
*/
public Scan setTimestamp(long timestamp)
{
try {
f1 = TimeRange.at(timestamp);
} catch (Exception e) {// This should never happen, unless integer overflow or something extremely wrong...
LOG.error("TimeRange failed, likely caused by integer overflow. ", e);
throw e;
}
return this;
} | 3.26 |
hbase_Scan_setNeedCursorResult_rdh | /**
* When the server is slow or we scan a table with many deleted data or we use a sparse filter,
* the server will response heartbeat to prevent timeout. However the scanner will return a Result
* only when client can do it. So if there are many heartbeats, the blocking time on
* ResultScanner#next() may be very long, which is not friendly to online services. Set this to
* true then you can get a special Result whose #isCursor() returns true and is not contains any
* real data. It only tells you where the server has scanned. You can call next to continue
* scanning or open a new scanner with this row key as start row whenever you want. Users can get
* a cursor when and only when there is a response from the server but we can not return a Result
* to users, for example, this response is a heartbeat or there are partial cells but users do not
* allow partial result. Now the cursor is in row level which means the special Result will only
* contains a row key. {@link Result#isCursor()} {@link Result#getCursor()} {@link Cursor}
*/
public Scan setNeedCursorResult(boolean needCursorResult) {
this.f2 = needCursorResult;
return this;
} | 3.26 |
hbase_Scan_getMvccReadPoint_rdh | /**
* Get the mvcc read point used to open a scanner.
*/
long getMvccReadPoint() {
return mvccReadPoint;
} | 3.26 |
hbase_Scan_getLimit_rdh | /**
* Returns the limit of rows for this scan
*/
public int getLimit() {
return limit;
} | 3.26 |
hbase_Scan_m2_rdh | /**
* Set the mvcc read point to -1 which means do not use it.
*/
Scan m2() {
return setMvccReadPoint(-1L);
} | 3.26 |
hbase_Scan_readAllVersions_rdh | /**
* Get all available versions.
*/
public Scan readAllVersions() {
this.maxVersions = Integer.MAX_VALUE;
return this;
} | 3.26 |
hbase_Scan_getFamilyMap_rdh | /**
* Getting the familyMap
*/
public Map<byte[], NavigableSet<byte[]>> getFamilyMap() {
return this.familyMap;
} | 3.26 |
hbase_Scan_setMaxResultsPerColumnFamily_rdh | /**
* Set the maximum number of values to return per row per Column Family
*
* @param limit
* the maximum number of values returned / row / CF
*/
public Scan setMaxResultsPerColumnFamily(int limit) {
this.storeLimit = limit;
return this;
} | 3.26 |
hbase_Scan_getMaxResultsPerColumnFamily_rdh | /**
* Returns maximum number of values to return per row per CF
*/
public int getMaxResultsPerColumnFamily() {
return this.storeLimit;
} | 3.26 |
hbase_Scan_isRaw_rdh | /**
* Returns True if this Scan is in "raw" mode.
*/
public boolean isRaw() {
byte[] attr = getAttribute(RAW_ATTR);
return attr == null ? false : Bytes.toBoolean(attr);
} | 3.26 |
hbase_Scan_setAllowPartialResults_rdh | /**
* Setting whether the caller wants to see the partial results when server returns
* less-than-expected cells. It is helpful while scanning a huge row to prevent OOM at client. By
* default this value is false and the complete results will be assembled client side before being
* delivered to the caller.
*
* @see Result#mayHaveMoreCellsInRow()
* @see #setBatch(int)
*/
public Scan setAllowPartialResults(final boolean allowPartialResults) {
this.allowPartialResults = allowPartialResults;
return this;
} | 3.26 |
hbase_Scan_setMaxResultSize_rdh | /**
* Set the maximum result size. The default is -1; this means that no specific maximum result size
* will be set for this scan, and the global configured value will be used instead. (Defaults to
* unlimited).
*
* @param maxResultSize
* The maximum result size in bytes.
*/
public Scan setMaxResultSize(long maxResultSize) {
this.maxResultSize = maxResultSize;
return this; } | 3.26 |
hbase_Scan_getMaxVersions_rdh | /**
* Returns the max number of versions to fetch
*/
public int getMaxVersions() {
return this.maxVersions;
} | 3.26 |
hbase_Scan_createScanFromCursor_rdh | /**
* Create a new Scan with a cursor. It only set the position information like start row key. The
* others (like cfs, stop row, limit) should still be filled in by the user.
* {@link Result#isCursor()} {@link Result#getCursor()} {@link Cursor}
*/
public static Scan createScanFromCursor(Cursor cursor) {
return new Scan().withStartRow(cursor.getRow());
} | 3.26 |
hbase_Scan_setReadType_rdh | /**
* Set the read type for this scan.
* <p>
* Notice that we may choose to use pread even if you specific {@link ReadType#STREAM} here. For
* example, we will always use pread if this is a get scan.
*/
public Scan setReadType(ReadType readType) {
this.readType = readType;return this;
} | 3.26 |
hbase_Scan_isReversed_rdh | /**
* Get whether this scan is a reversed one.
*
* @return true if backward scan, false if forward(default) scan
*/
public boolean isReversed() {
return reversed;
} | 3.26 |
hbase_Scan_readVersions_rdh | /**
* Get up to the specified number of versions of each column.
*
* @param versions
* specified number of versions for each column
*/
public Scan readVersions(int versions) {
this.maxVersions = versions;
return this;
} | 3.26 |
hbase_Scan_m0_rdh | /**
* Get all columns from the specified family.
* <p>
* Overrides previous calls to addColumn for this family.
*
* @param family
* family name
*/
public Scan m0(byte[] family) {
familyMap.remove(family);
familyMap.put(family, null);
return this;
} | 3.26 |
hbase_Scan_numFamilies_rdh | /**
* Returns the number of families in familyMap
*/
public int numFamilies() {
if (hasFamilies()) {return this.familyMap.size();
}
return 0;
} | 3.26 |
hbase_Scan_includeStopRow_rdh | /**
* Returns if we should include stop row when scan
*/
public boolean includeStopRow() {
return includeStopRow;} | 3.26 |
hbase_Scan_setRaw_rdh | /**
* Enable/disable "raw" mode for this scan. If "raw" is enabled the scan will return all delete
* marker and deleted rows that have not been collected, yet. This is mostly useful for Scan on
* column families that have KEEP_DELETED_ROWS enabled. It is an error to specify any column when
* "raw" is set.
*
* @param raw
* True/False to enable/disable "raw" mode.
*/
public Scan setRaw(boolean raw) {
setAttribute(RAW_ATTR, Bytes.toBytes(raw));
return this;
} | 3.26 |
hbase_Scan_setOneRowLimit_rdh | /**
* Call this when you only want to get one row. It will set {@code limit} to {@code 1}, and also
* set {@code readType} to {@link ReadType#PREAD}.
*/
public Scan setOneRowLimit() {
return setLimit(1).setReadType(ReadType.PREAD);
} | 3.26 |
hbase_Scan_getStopRow_rdh | /**
* Returns the stoprow
*/
public byte[] getStopRow() {
return this.f0;
} | 3.26 |
hbase_Scan_withStartRow_rdh | /**
* Set the start row of the scan.
* <p>
* If the specified row does not exist, or the {@code inclusive} is {@code false}, the Scanner
* will start from the next closest row after the specified row.
* <p>
* <b>Note:</b> <strong>Do NOT use this in combination with {@link #setRowPrefixFilter(byte[])} or
* {@link #setStartStopRowForPrefixScan(byte[])}.</strong> Doing so will make the scan result
* unexpected or even undefined.
* </p>
*
* @param startRow
* row to start scanner at or after
* @param inclusive
* whether we should include the start row when scan
* @throws IllegalArgumentException
* if startRow does not meet criteria for a row key (when length
* exceeds {@link HConstants#MAX_ROW_LENGTH})
*/
public Scan withStartRow(byte[] startRow, boolean inclusive) {
if (Bytes.len(startRow) > HConstants.MAX_ROW_LENGTH) {
throw new IllegalArgumentException((("startRow's length must be less than or equal to " + HConstants.MAX_ROW_LENGTH) + " to meet the criteria") + " for a row key.");
}
this.startRow = startRow;
this.includeStartRow = inclusive;
return this;
} | 3.26 |
hbase_Scan_getFingerprint_rdh | /**
* Compile the table and column family (i.e. schema) information into a String. Useful for parsing
* and aggregation by debugging, logging, and administration tools.
*/
@Overridepublic Map<String, Object> getFingerprint() {
Map<String, Object> map = new HashMap<>();
List<String> v13 = new ArrayList<>();
if (this.familyMap.isEmpty()) {
map.put("families", "ALL");return map;
} else {
map.put("families", v13);
}
for (Map.Entry<byte[], NavigableSet<byte[]>> entry : this.familyMap.entrySet()) {
v13.add(Bytes.toStringBinary(entry.getKey()));
}
return map;
} | 3.26 |
hbase_Scan_isScanMetricsEnabled_rdh | /**
* Returns True if collection of scan metrics is enabled. For advanced users.
*/
public boolean isScanMetricsEnabled() {byte[] attr = getAttribute(Scan.SCAN_ATTRIBUTES_METRICS_ENABLE);
return attr == null ? false : Bytes.toBoolean(attr);
} | 3.26 |
hbase_Scan_setBatch_rdh | /**
* Set the maximum number of cells to return for each call to next(). Callers should be aware that
* this is not equivalent to calling {@link #setAllowPartialResults(boolean)}. If you don't allow
* partial results, the number of cells in each Result must equal to your batch setting unless it
* is the last Result for current row. So this method is helpful in paging queries. If you just
* want to prevent OOM at client, use setAllowPartialResults(true) is better.
*
* @param batch
* the maximum number of values
* @see Result#mayHaveMoreCellsInRow()
*/
public Scan setBatch(int batch) {
if (this.hasFilter() && this.filter.hasFilterRow()) {
throw new IncompatibleFilterException("Cannot set batch on a scan using a filter" + " that returns true for filter.hasFilterRow");
}
this.batch = batch;
return this;
} | 3.26 |
hbase_Scan_setScanMetricsEnabled_rdh | /**
* Enable collection of {@link ScanMetrics}. For advanced users.
*
* @param enabled
* Set to true to enable accumulating scan metrics
*/
public Scan
setScanMetricsEnabled(final boolean enabled) {
setAttribute(Scan.SCAN_ATTRIBUTES_METRICS_ENABLE, Bytes.toBytes(Boolean.valueOf(enabled)));
return this;
} | 3.26 |
hbase_Scan_setLimit_rdh | /**
* Set the limit of rows for this scan. We will terminate the scan if the number of returned rows
* reaches this value.
* <p>
* This condition will be tested at last, after all other conditions such as stopRow, filter, etc.
*
* @param limit
* the limit of rows for this scan
*/public Scan setLimit(int limit) {
this.limit = limit;
return this;
} | 3.26 |
hbase_Scan_getFamilies_rdh | /**
* Returns the keys of the familyMap
*/
public byte[][] getFamilies() {
if (hasFamilies()) {
return this.familyMap.keySet().toArray(new byte[0][0]);
}
return null;
} | 3.26 |
hbase_Scan_hasFamilies_rdh | /**
* Returns true if familyMap is non empty, false otherwise
*/
public boolean hasFamilies() {
return !this.familyMap.isEmpty();
} | 3.26 |
hbase_Scan_setMvccReadPoint_rdh | /**
* Set the mvcc read point used to open a scanner.
*/
Scan setMvccReadPoint(long mvccReadPoint) {
this.mvccReadPoint =
mvccReadPoint;
return this;
} | 3.26 |
hbase_Address_equals_rdh | // Don't use HostAndPort equals... It is wonky including
// ipv6 brackets
@Override
public boolean equals(Object other) {
if (this ==
other) {
return true;
}
if (other instanceof Address) {
Address that = ((Address) (other));
return this.getHostName().equals(that.getHostName()) && (this.getPort() == that.getPort());
}
return false;
} | 3.26 |
hbase_Address_m0_rdh | /**
*
* @deprecated Use {@link #getHostName()} instead
*/
@Deprecatedpublic String m0() {
return this.hostAndPort.getHost();
} | 3.26 |
hbase_DeleteNamespaceProcedure_m1_rdh | /**
* Delete the namespace directories from the file system
*
* @param env
* MasterProcedureEnv
* @param namespaceName
* name of the namespace in string format
*/
private static void m1(MasterProcedureEnv env, String namespaceName) throws IOException {
MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
FileSystem
fs = mfs.getFileSystem(); Path p = CommonFSUtils.getNamespaceDir(mfs.getRootDir(), namespaceName);
try {for (FileStatus status : fs.listStatus(p)) {
if (!HConstants.HBASE_NON_TABLE_DIRS.contains(status.getPath().getName())) {
throw new IOException("Namespace directory contains table dir: " + status.getPath());
}
}
if (!fs.delete(CommonFSUtils.getNamespaceDir(mfs.getRootDir(), namespaceName), true)) {
throw new IOException("Failed to remove namespace: " + namespaceName);
}
} catch (FileNotFoundException e) {
// File already deleted, continue
LOG.debug("deleteDirectory throws exception: " + e);
}
} | 3.26 |
hbase_DeleteNamespaceProcedure_removeNamespaceQuota_rdh | /**
* remove quota for the namespace
*
* @param env
* MasterProcedureEnv
* @param namespaceName
* name of the namespace in string format
*/
private static void removeNamespaceQuota(final MasterProcedureEnv env, final String namespaceName) throws IOException {
env.getMasterServices().getMasterQuotaManager().removeNamespaceQuota(namespaceName);
} | 3.26 |
hbase_DeleteNamespaceProcedure_deleteNamespace_rdh | /**
* delete the row from the ns family in meta table.
*
* @param env
* MasterProcedureEnv
* @param namespaceName
* name of the namespace in string format
*/
private static void deleteNamespace(MasterProcedureEnv env, String namespaceName) throws IOException {
getTableNamespaceManager(env).deleteNamespace(namespaceName);
} | 3.26 |
hbase_DeleteNamespaceProcedure_prepareDelete_rdh | /**
* Action before any real action of deleting namespace.
*
* @param env
* MasterProcedureEnv
*/
private boolean prepareDelete(final MasterProcedureEnv env) throws IOException {
if (getTableNamespaceManager(env).doesNamespaceExist(namespaceName) == false) {
setFailure("master-delete-namespace", new NamespaceNotFoundException(namespaceName));
return false;
}
if (NamespaceDescriptor.RESERVED_NAMESPACES.contains(namespaceName)) {
setFailure("master-delete-namespace", new ConstraintException(("Reserved namespace " + namespaceName) +
" cannot be removed."));
return false;
}
int tableCount = 0;
try {
tableCount = env.getMasterServices().listTableDescriptorsByNamespace(namespaceName).size();
} catch (FileNotFoundException fnfe) {
setFailure("master-delete-namespace", new NamespaceNotFoundException(namespaceName));
return false;
}
if (tableCount > 0) {
setFailure("master-delete-namespace", new ConstraintException(((("Only empty namespaces can be removed. Namespace " + namespaceName) + " has ") + tableCount)
+ " tables"));return
false;
}
// This is used for rollback
nsDescriptor = getTableNamespaceManager(env).get(namespaceName);
return true;
} | 3.26 |
hbase_StorageClusterStatusModel_setRegions_rdh | /**
*
* @param regions
* the total number of regions served by the cluster
*/
public void setRegions(int regions) {
this.regions = regions;
} | 3.26 |
hbase_StorageClusterStatusModel_getTotalStaticBloomSizeKB_rdh | /**
* Returns The total size of static bloom, in KB
*/
@XmlAttribute
public int getTotalStaticBloomSizeKB() {return totalStaticBloomSizeKB;
} | 3.26 |
hbase_StorageClusterStatusModel_m1_rdh | /**
*
* @param storefileIndexSizeKB
* total size of store file indexes, in KB
*/
public void m1(long storefileIndexSizeKB) {
this.storefileIndexSizeKB = storefileIndexSizeKB;
} | 3.26 |
hbase_StorageClusterStatusModel_getDeadNodes_rdh | // workaround https://github.com/FasterXML/jackson-dataformat-xml/issues/192
@XmlElement(name = "Node")
@XmlElementWrapper(name = "DeadNodes")
@JsonProperty("DeadNodes")
public List<String> getDeadNodes() {
return deadNodes;
} | 3.26 |
hbase_StorageClusterStatusModel_setTotalCompactingKVs_rdh | /**
*
* @param totalCompactingKVs
* The total compacting key values in currently running compaction
*/public void setTotalCompactingKVs(long totalCompactingKVs) {
this.totalCompactingKVs = totalCompactingKVs;
} | 3.26 |
hbase_StorageClusterStatusModel_getLiveNode_rdh | /**
*
* @param index
* the index
* @return the region server model
*/
public Node getLiveNode(int index) {
return liveNodes.get(index);
} | 3.26 |
hbase_StorageClusterStatusModel_setAverageLoad_rdh | /**
*
* @param averageLoad
* the average load of region servers in the cluster
*/
public void setAverageLoad(double averageLoad) {
this.averageLoad = averageLoad;
} | 3.26 |
hbase_StorageClusterStatusModel_getHeapSizeMB_rdh | /**
* Returns the current heap size, in MB
*/
@XmlAttribute
public int
getHeapSizeMB() {
return heapSizeMB;
} | 3.26 |
hbase_StorageClusterStatusModel_setRequests_rdh | /**
*
* @param requests
* the total number of requests per second handled by the cluster
*/
public void setRequests(long requests) {
this.requests = requests;
} | 3.26 |
hbase_StorageClusterStatusModel_getMaxHeapSizeMB_rdh | /**
* Returns the maximum heap size, in MB
*/
@XmlAttribute
public int getMaxHeapSizeMB() {
return maxHeapSizeMB;
} | 3.26 |
hbase_StorageClusterStatusModel_getWriteRequestsCount_rdh | /**
* Returns the current total write requests made to region
*/
@XmlAttribute
public long getWriteRequestsCount() {
return writeRequestsCount;
} | 3.26 |
hbase_StorageClusterStatusModel_getStorefiles_rdh | /**
* Returns the number of store files
*/
@XmlAttributepublic int getStorefiles() {return storefiles;
} | 3.26 |
hbase_StorageClusterStatusModel_getDeadNode_rdh | /**
*
* @param index
* the index
* @return the dead region server's name
*/
public String getDeadNode(int index) {
return deadNodes.get(index);
} | 3.26 |
hbase_StorageClusterStatusModel_getRegions_rdh | /**
* Returns the total number of regions served by the cluster
*/
@XmlAttribute
public int getRegions() {
return regions;
} | 3.26 |
hbase_StorageClusterStatusModel_setCurrentCompactedKVs_rdh | /**
*
* @param currentCompactedKVs
* The completed count of key values in currently running
* compaction
*/
public void setCurrentCompactedKVs(long currentCompactedKVs) {
this.currentCompactedKVs = currentCompactedKVs;
} | 3.26 |
hbase_StorageClusterStatusModel_setRootIndexSizeKB_rdh | /**
*
* @param rootIndexSizeKB
* The current total size of root-level indexes for the region, in KB
*/public void setRootIndexSizeKB(int rootIndexSizeKB) {
this.rootIndexSizeKB = rootIndexSizeKB;
} | 3.26 |
hbase_StorageClusterStatusModel_getStores_rdh | /**
* Returns the number of stores
*/
@XmlAttribute
public int getStores() {
return stores;
} | 3.26 |
hbase_StorageClusterStatusModel_getRegion_rdh | /**
*
* @param index
* the index
* @return the region name
*/
public Region getRegion(int index) {
return regions.get(index);
} | 3.26 |
hbase_StorageClusterStatusModel_m0_rdh | /**
*
* @param storefileSizeMB
* total size of store files, in MB
*/
public void m0(int storefileSizeMB) {
this.storefileSizeMB = storefileSizeMB;
} | 3.26 |
hbase_StorageClusterStatusModel_setMemStoreSizeMB_rdh | /**
*
* @param memstoreSizeMB
* memstore size, in MB
*/
public void setMemStoreSizeMB(int memstoreSizeMB) {
this.memstoreSizeMB = memstoreSizeMB;
} | 3.26 |
hbase_StorageClusterStatusModel_getTotalStaticIndexSizeKB_rdh | /**
* Returns The total size of static index, in KB
*/@XmlAttributepublic int getTotalStaticIndexSizeKB() {
return totalStaticIndexSizeKB;
} | 3.26 |
hbase_StorageClusterStatusModel_getRootIndexSizeKB_rdh | /**
* Returns The current total size of root-level indexes for the region, in KB.
*/
@XmlAttribute
public int getRootIndexSizeKB() {
return rootIndexSizeKB;
} | 3.26 |
hbase_StorageClusterStatusModel_addRegion_rdh | /**
* Add a region name to the list
*
* @param name
* the region name
*/
public void addRegion(byte[] name, int stores, int storefiles, int
storefileSizeMB, int memstoreSizeMB, long storefileIndexSizeKB, long readRequestsCount, long cpRequestsCount, long writeRequestsCount, int rootIndexSizeKB, int totalStaticIndexSizeKB, int totalStaticBloomSizeKB, long totalCompactingKVs, long currentCompactedKVs) {
regions.add(new Region(name, stores, storefiles, storefileSizeMB, memstoreSizeMB, storefileIndexSizeKB, readRequestsCount, cpRequestsCount, writeRequestsCount, rootIndexSizeKB, totalStaticIndexSizeKB, totalStaticBloomSizeKB, totalCompactingKVs, currentCompactedKVs));
} | 3.26 |
hbase_StorageClusterStatusModel_setStores_rdh | /**
*
* @param stores
* the number of stores
*/
public void setStores(int stores) {
this.stores = stores;
} | 3.26 |
hbase_StorageClusterStatusModel_getName_rdh | /**
* Returns the region name
*/
@XmlAttribute
public byte[] getName() {
return name;
} | 3.26 |
hbase_StorageClusterStatusModel_setLiveNodes_rdh | /**
*
* @param nodes
* the list of live node models
*/
public void setLiveNodes(List<Node> nodes) {
this.liveNodes = nodes;
} | 3.26 |
hbase_StorageClusterStatusModel_getCurrentCompactedKVs_rdh | /**
* Returns The number of current compacted key-values
*/
@XmlAttribute
public long getCurrentCompactedKVs() {
return currentCompactedKVs;
} | 3.26 |
hbase_StorageClusterStatusModel_setHeapSizeMB_rdh | /**
*
* @param heapSizeMB
* the current heap size, in MB
*/
public void setHeapSizeMB(int heapSizeMB) {this.heapSizeMB = heapSizeMB;
} | 3.26 |
hbase_StorageClusterStatusModel_setReadRequestsCount_rdh | /**
*
* @param readRequestsCount
* The current total read requests made to region
*/
public void setReadRequestsCount(long readRequestsCount) {
this.readRequestsCount = readRequestsCount;
} | 3.26 |
hbase_StorageClusterStatusModel_setTotalStaticIndexSizeKB_rdh | /**
*
* @param totalStaticIndexSizeKB
* The total size of all index blocks, not just the root level,
* in KB.
*/
public void setTotalStaticIndexSizeKB(int totalStaticIndexSizeKB) {
this.totalStaticIndexSizeKB = totalStaticIndexSizeKB;
} | 3.26 |
hbase_StorageClusterStatusModel_m2_rdh | /**
*
* @param startCode
* the region server's start code
*/
public void m2(long startCode) {
this.startCode = startCode;
} | 3.26 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.