name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hbase_ScannerModel_setBatch_rdh | /**
*
* @param batch
* the number of cells to return in batch
*/
public void setBatch(int batch) {
this.batch = batch;
} | 3.26 |
hbase_ScannerModel_setEndTime_rdh | /**
*
* @param endTime
* the upper bound on timestamps of values of interest
*/
public void setEndTime(long endTime) {
this.endTime = endTime;
} | 3.26 |
hbase_ScannerModel_setLimit_rdh | /**
*
* @param limit
* the number of rows can fetch of each scanner at lifetime
*/
public void setLimit(int limit) {
this.limit = limit;
} | 3.26 |
hbase_ScannerModel_getMaxVersions_rdh | /**
* Returns maximum number of versions to return
*/
@XmlAttribute
public int getMaxVersions() {
return maxVersions;
} | 3.26 |
hbase_ScannerModel_getEndRow_rdh | /**
* Returns end row
*/
@XmlAttribute
public byte[] getEndRow() {
return endRow;
} | 3.26 |
hbase_ScannerModel_m0_rdh | /**
* Returns the lower bound on timestamps of items of interest
*/
@XmlAttribute
public long m0() {
return startTime;
} | 3.26 |
hbase_ScannerModel_setCacheBlocks_rdh | /**
*
* @param value
* true if HFile blocks should be cached on the servers for this scan, false
* otherwise
*/
public void setCacheBlocks(boolean value) {
this.cacheBlocks = value;
} | 3.26 |
hbase_ScannerModel_hasStartRow_rdh | /**
* Returns true if a start row was specified
*/
public boolean hasStartRow() {
return !Bytes.equals(startRow, HConstants.EMPTY_START_ROW);
} | 3.26 |
hbase_ScannerModel_setEndRow_rdh | /**
*
* @param endRow
* end row
*/
public void setEndRow(byte[] endRow) {
this.endRow = endRow;
} | 3.26 |
hbase_ScannerModel_getStartRow_rdh | /**
* Returns start row
*/
@XmlAttribute
public byte[] getStartRow() {
return startRow;
} | 3.26 |
hbase_ScannerModel_addLabel_rdh | /**
* Add a visibility label to the scan
*/
public void addLabel(String label) {
labels.add(label);
} | 3.26 |
hbase_ScannerModel_setFilter_rdh | /**
*
* @param filter
* the filter specification
*/
public void setFilter(String filter) {
this.filter = filter;
} | 3.26 |
hbase_ScannerModel_stringifyFilter_rdh | /**
*
* @param filter
* the filter
* @return the JSON representation of the filter
*/
public static String stringifyFilter(final Filter filter) throws Exception {
return getJasonProvider().locateMapper(ScannerModel.FilterModel.class, MediaType.APPLICATION_JSON_TYPE).writeValueAsString(new FilterModel(filter));
} | 3.26 |
hbase_ScannerModel_fromScan_rdh | /**
*
* @param scan
* the scan specification
*/
public static ScannerModel fromScan(Scan scan) throws Exception {
ScannerModel
model = new ScannerModel();
model.setStartRow(scan.getStartRow());
model.setEndRow(scan.getStopRow());
Map<byte[], NavigableSet<byte[]>> families = scan.getFamilyMap();
if (families != null) {
for (Map.Entry<byte[], NavigableSet<byte[]>> entry : families.entrySet()) {
if (entry.getValue() != null) {
for (byte[] qualifier : entry.getValue()) {
model.addColumn(Bytes.add(entry.getKey(), COLUMN_DIVIDER, qualifier));
}
} else {
model.addColumn(entry.getKey());
}
}
}
model.setStartTime(scan.getTimeRange().getMin());
model.setEndTime(scan.getTimeRange().getMax());
int caching = scan.getCaching();
if (caching > 0) {
model.setCaching(caching);
}
int batch = scan.getBatch();
if (batch > 0) {
model.setBatch(batch);
}
int maxVersions = scan.getMaxVersions();
if (maxVersions > 0) {
model.setMaxVersions(maxVersions);
}
if (scan.getLimit() > 0) {
model.setLimit(scan.getLimit());
}Filter
filter = scan.getFilter();
if (filter != null) {
model.setFilter(stringifyFilter(filter));
}// Add the visbility labels if found in the attributes
Authorizations authorizations = scan.getAuthorizations();
if (authorizations != null) {
List<String> labels = authorizations.getLabels();
for (String label : labels) {
model.addLabel(label);}
}
return model;
} | 3.26 |
hbase_ScannerModel_getBatch_rdh | /**
* Returns the number of cells to return in batch
*/
@XmlAttribute
public int getBatch() {
return batch;
} | 3.26 |
hbase_ScannerModel_getLimit_rdh | /**
* Returns the limit specification
*/
@XmlAttribute
public int getLimit() {
return limit;
} | 3.26 |
hbase_ScannerModel_setMaxVersions_rdh | /**
*
* @param maxVersions
* maximum number of versions to return
*/public void setMaxVersions(int maxVersions) {
this.maxVersions = maxVersions;
} | 3.26 |
hbase_ScannerModel_buildFilter_rdh | /**
*
* @param s
* the JSON representation of the filter
* @return the filter
*/
public static Filter buildFilter(String s) throws Exception {
FilterModel model = getJasonProvider().locateMapper(ScannerModel.FilterModel.class, MediaType.APPLICATION_JSON_TYPE).readValue(s, ScannerModel.FilterModel.class);
return model.build();
} | 3.26 |
hbase_HFileArchiveManager_enableHFileBackup_rdh | /**
* Turn on auto-backups of HFiles on the specified table.
* <p>
* When HFiles would be deleted from the hfile archive, they are instead preserved.
*
* @param table
* name of the table for which to preserve hfiles.
* @return <tt>this</tt> for chaining.
* @throws KeeperException
* if we can't reach zookeeper to update the hfile cleaner.
*/
public HFileArchiveManager enableHFileBackup(byte[] table) throws KeeperException {
enable(this.zooKeeper, table);
return this;
} | 3.26 |
hbase_HFileArchiveManager_isArchivingEnabled_rdh | /**
* Check to see if the table is currently marked for archiving
*
* @param table
* name of the table to check
* @return <tt>true</tt> if the archive znode for that table exists, <tt>false</tt> if not
* @throws KeeperException
* if an unexpected zookeeper error occurs
*/
public boolean isArchivingEnabled(byte[] table) throws KeeperException {
String tableNode = this.getTableNode(table);
return ZKUtil.checkExists(zooKeeper, tableNode) >= 0;
} | 3.26 |
hbase_HFileArchiveManager_disableHFileBackup_rdh | /**
* Disable long-term archival of all hfiles for all tables in the cluster.
*
* @return <tt>this</tt> for chaining.
* @throws IOException
* if the number of attempts is exceeded
*/
public HFileArchiveManager disableHFileBackup() throws IOException {
LOG.debug("Disabling backups on all tables.");
try {
ZKUtil.deleteNodeRecursively(this.zooKeeper, archiveZnode);
return this;
} catch (KeeperException e) {
throw new IOException("Unexpected ZK exception!", e);
}
} | 3.26 |
hbase_HFileArchiveManager_disable_rdh | /**
* Disable all archiving of files for a given table
* <p>
* Inherently an <b>asynchronous operation</b>.
*
* @param zooKeeper
* watcher for the ZK cluster
* @param table
* name of the table to disable
* @throws KeeperException
* if an unexpected ZK connection issues occurs
*/
private void disable(ZKWatcher zooKeeper, byte[] table) throws KeeperException {
// ensure the latest state of the archive node is found
zooKeeper.syncOrTimeout(archiveZnode);
// if the top-level archive node is gone, then we are done
if (ZKUtil.checkExists(zooKeeper, archiveZnode) < 0) {
return;
}
// delete the table node, from the archive
String tableNode = this.getTableNode(table);
// make sure the table is the latest version so the delete takes
zooKeeper.syncOrTimeout(tableNode);
LOG.debug("Attempting to delete table node:"
+ tableNode);
ZKUtil.deleteNodeRecursively(zooKeeper, tableNode);
} | 3.26 |
hbase_HFileArchiveManager_getTableNode_rdh | /**
* Get the zookeeper node associated with archiving the given table
*
* @param table
* name of the table to check
* @return znode for the table's archive status
*/
private String getTableNode(byte[] table) {
return ZNodePaths.joinZNode(archiveZnode, Bytes.toString(table));
} | 3.26 |
hbase_HadoopCompressor_maxCompressedLength_rdh | // Package private
int maxCompressedLength(int len) {
return compressor.maxCompressedLength(len);
} | 3.26 |
hbase_StreamUtils_readByte_rdh | /**
* Read a byte from the given stream using the read method, and throw EOFException if it returns
* -1, like the implementation in {@code DataInputStream}.
* <p/>
* This is useful because casting the return value of read method into byte directly will make us
* lose the ability to check whether there is a byte and its value is -1 or we reach EOF, as
* casting int -1 to byte also returns -1.
*/
public static byte readByte(InputStream in) throws IOException {
int r = in.read();
if (r < 0) {
throw new EOFException();
}
return ((byte) (r));
} | 3.26 |
hbase_RegionNormalizerManager_planSkipped_rdh | /**
* Call-back for the case where plan couldn't be executed due to constraint violation, such as
* namespace quota.
*
* @param type
* type of plan that was skipped.
*/public void planSkipped(NormalizationPlan.PlanType type) {
// TODO: this appears to be used only for testing.
if (worker != null) {
worker.planSkipped(type);}} | 3.26 |
hbase_RegionNormalizerManager_setNormalizerOn_rdh | /**
* Set region normalizer on/off
*
* @param normalizerOn
* whether normalizer should be on or off
*/
public void setNormalizerOn(boolean normalizerOn) throws IOException {
regionNormalizerStateStore.set(normalizerOn);
} | 3.26 |
hbase_RegionNormalizerManager_getSkippedCount_rdh | /**
* Retrieve a count of the number of times plans of type {@code type} were submitted but skipped.
*
* @param type
* type of plan for which skipped count is to be returned
*/
public long getSkippedCount(NormalizationPlan.PlanType type) {
// TODO: this appears to be used only for testing.
return worker == null ? 0 : worker.getSkippedCount(type);
} | 3.26 |
hbase_RegionNormalizerManager_getSplitPlanCount_rdh | /**
* Return the number of times a {@link SplitNormalizationPlan} has been submitted.
*/
public long getSplitPlanCount() {
return worker == null ? 0 : worker.getSplitPlanCount();
}
/**
* Return the number of times a {@link MergeNormalizationPlan} | 3.26 |
hbase_RegionNormalizerManager_normalizeRegions_rdh | /**
* Submit tables for normalization.
*
* @param tables
* a list of tables to submit.
* @param isHighPriority
* {@code true} when these requested tables should skip to the front of the
* queue.
* @return {@code true} when work was queued, {@code false} otherwise.
*/
public boolean normalizeRegions(List<TableName> tables, boolean isHighPriority) {
if (workQueue == null) {
return false;
}
if (isHighPriority) {
workQueue.putAllFirst(tables);
} else {
workQueue.putAll(tables);
}
return true;
} | 3.26 |
hbase_RowCountEndpoint_start_rdh | /**
* Stores a reference to the coprocessor environment provided by the
* {@link org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost} from the region where this
* coprocessor is loaded. Since this is a coprocessor endpoint, it always expects to be loaded on
* a table region, so always expects this to be an instance of
* {@link RegionCoprocessorEnvironment}.
*
* @param env
* the environment provided by the coprocessor host
* @throws IOException
* if the provided environment is not an instance of
* {@code RegionCoprocessorEnvironment}
*/
@Override
public void start(CoprocessorEnvironment env) throws IOException {
if (env instanceof RegionCoprocessorEnvironment)
{this.env = ((RegionCoprocessorEnvironment) (env));
} else {
throw new CoprocessorException("Must be loaded on a table region!");
}
} | 3.26 |
hbase_RowCountEndpoint_getRowCount_rdh | /**
* Returns a count of the rows in the region where this coprocessor is loaded.
*/
@Override
public void getRowCount(RpcController controller, CountRequest request, RpcCallback<CountResponse> done) {
Scan scan = new Scan();
scan.setFilter(new FirstKeyOnlyFilter());
CountResponse response = null;
InternalScanner v2 = null;
try {
v2 = env.getRegion().getScanner(scan);
List<Cell> results = new ArrayList<>();
boolean hasMore = false;
byte[] lastRow = null;
long count = 0;
do {
hasMore = v2.next(results);
for (Cell kv : results) {
byte[] currentRow = CellUtil.cloneRow(kv);
if ((lastRow == null) || (!Bytes.equals(lastRow, currentRow))) {
lastRow = currentRow;
count++;
}
}
results.clear();
} while (hasMore );
response = CountResponse.newBuilder().setCount(count).build();
} catch (IOException ioe) {
CoprocessorRpcUtils.setControllerException(controller, ioe);
} finally {
if (v2 !=
null) {
IOUtils.closeQuietly(v2);
}
}done.run(response);
} | 3.26 |
hbase_RowCountEndpoint_getKeyValueCount_rdh | /**
* Returns a count of all KeyValues in the region where this coprocessor is loaded.
*/
@Override
public void getKeyValueCount(RpcController controller, CountRequest request, RpcCallback<CountResponse> done) {
CountResponse response = null;
InternalScanner scanner = null;
try {
scanner = env.getRegion().getScanner(new Scan());
List<Cell> results = new ArrayList<>();
boolean hasMore = false;
long count = 0;
do {
hasMore = scanner.next(results);
count += Iterables.size(results);
results.clear();
} while (hasMore );
response = CountResponse.newBuilder().setCount(count).build();
} catch (IOException ioe) {
CoprocessorRpcUtils.setControllerException(controller,
ioe);
} finally {
if (scanner != null) {
IOUtils.closeQuietly(scanner);
}
}done.run(response);
} | 3.26 |
hbase_RowCountEndpoint_getServices_rdh | /**
* Just returns a reference to this object, which implements the RowCounterService interface.
*/
@Override
public Iterable<Service> getServices() {
return Collections.singleton(this);
} | 3.26 |
hbase_RegionCoprocessorHost_postPut_rdh | /**
*
* @param put
* The Put object
* @param edit
* The WALEdit object.
* @exception IOException
* Exception
*/
public void postPut(final Put put, final WALEdit edit) throws IOException {
if (coprocEnvironments.isEmpty()) {
return;
}
execOperation(new RegionObserverOperationWithoutResult() {
@Override
public void call(RegionObserver observer) throws IOException {
observer.postPut(this, put, edit);
}
});
} | 3.26 |
hbase_RegionCoprocessorHost_postScannerNext_rdh | /**
*
* @param s
* the scanner
* @param results
* the result set returned by the region server
* @param limit
* the maximum number of results to return
* @return 'has more' indication to give to client
* @exception IOException
* Exception
*/
public boolean postScannerNext(final InternalScanner s, final List<Result> results, final int limit, boolean hasMore) throws IOException {
if (this.coprocEnvironments.isEmpty()) {
return hasMore;
}
return execOperationWithResult(new ObserverOperationWithResult<RegionObserver, Boolean>(regionObserverGetter, hasMore) {
@Override public Boolean m5(RegionObserver observer) throws IOException {
return observer.postScannerNext(this, s, results, limit, getResult());
}
});
} | 3.26 |
hbase_RegionCoprocessorHost_postFlush_rdh | /**
* Invoked after a memstore flush
*/
public void postFlush(HStore store, HStoreFile storeFile, FlushLifeCycleTracker tracker) throws IOException {
if (coprocEnvironments.isEmpty()) {
return;
}
execOperation(new RegionObserverOperationWithoutResult() {
@Override
public void call(RegionObserver observer) throws IOException {
observer.postFlush(this, store, storeFile, tracker);
}
});
} | 3.26 |
hbase_RegionCoprocessorHost_preCompactSelection_rdh | /**
* Called prior to selecting the {@link HStoreFile}s for compaction from the list of currently
* available candidates.
* <p>
* Supports Coprocessor 'bypass' -- 'bypass' is how this method indicates that it changed the
* passed in <code>candidates</code>.
*
* @param store
* The store where compaction is being requested
* @param candidates
* The currently available store files
* @param tracker
* used to track the life cycle of a compaction
* @param user
* the user
*/
public boolean preCompactSelection(final HStore store, final
List<HStoreFile> candidates, final CompactionLifeCycleTracker tracker, final User user) throws IOException {
if (coprocEnvironments.isEmpty()) {
return false;
}
boolean bypassable = true;return execOperation(new RegionObserverOperationWithoutResult(user, bypassable) {
@Override
public void call(RegionObserver observer) throws IOException {
observer.preCompactSelection(this, store, candidates, tracker);
}
});
} | 3.26 |
hbase_RegionCoprocessorHost_preMemStoreCompactionCompactScannerOpen_rdh | /**
* Invoked before create StoreScanner for in memory compaction.
*/
public ScanInfo preMemStoreCompactionCompactScannerOpen(HStore store) throws IOException {
CustomizedScanInfoBuilder builder = new CustomizedScanInfoBuilder(store.getScanInfo());
execOperation(coprocEnvironments.isEmpty() ? null : new RegionObserverOperationWithoutResult() {
@Override
public void call(RegionObserver observer) throws IOException
{observer.preMemStoreCompactionCompactScannerOpen(this, store, builder);
}
});
return builder.build();
} | 3.26 |
hbase_RegionCoprocessorHost_getRegion_rdh | /**
* Returns the region
*/
@Override
public Region getRegion() {
return
region;
} | 3.26 |
hbase_RegionCoprocessorHost_preCompact_rdh | /**
* Called prior to rewriting the store files selected for compaction
*
* @param store
* the store being compacted
* @param scanner
* the scanner used to read store data during compaction
* @param scanType
* type of Scan
* @param tracker
* used to track the life cycle of a compaction
* @param request
* the compaction request
* @param user
* the user
* @return Scanner to use (cannot be null!)
*/
public InternalScanner preCompact(final HStore store, final InternalScanner scanner, final ScanType scanType, final CompactionLifeCycleTracker tracker, final CompactionRequest request, final User user) throws IOException {
InternalScanner defaultResult = scanner;
if (coprocEnvironments.isEmpty()) {
return defaultResult;
}
return execOperationWithResult(new ObserverOperationWithResult<RegionObserver, InternalScanner>(regionObserverGetter, defaultResult, user) {
@Override
public InternalScanner call(RegionObserver observer) throws IOException {
InternalScanner v21 = observer.preCompact(this, store, getResult(), scanType, tracker, request);
if (v21 == null) {
throw new CoprocessorException("Null Scanner return disallowed!");
}
return v21;
}
});
} | 3.26 |
hbase_RegionCoprocessorHost_preFlush_rdh | /**
* Invoked before a memstore flush
*/
public void preFlush(FlushLifeCycleTracker tracker) throws IOException {
execOperation(coprocEnvironments.isEmpty() ? null : new RegionObserverOperationWithoutResult() {
@Override
public void call(RegionObserver observer) throws IOException {
observer.preFlush(this, tracker);
}
});
} | 3.26 |
hbase_RegionCoprocessorHost_postScannerFilterRow_rdh | /**
* This will be called by the scan flow when the current scanned row is being filtered out by the
* filter.
*
* @param s
* the scanner
* @param curRowCell
* The cell in the current row which got filtered out
* @return whether more rows are available for the scanner or not
*/
public boolean postScannerFilterRow(final InternalScanner s, final Cell curRowCell) throws IOException {
// short circuit for performance
boolean defaultResult = true;
if (!hasCustomPostScannerFilterRow) {
return defaultResult;}
if (this.coprocEnvironments.isEmpty()) {
return defaultResult;
}
return execOperationWithResult(new ObserverOperationWithResult<RegionObserver, Boolean>(regionObserverGetter, defaultResult) { @Override
public Boolean call(RegionObserver observer) throws IOException {
return observer.postScannerFilterRow(this, s, curRowCell, getResult());
}
});
} | 3.26 |
hbase_RegionCoprocessorHost_preIncrement_rdh | /**
* Supports Coprocessor 'bypass'.
*
* @param increment
* increment object
* @param edit
* The WALEdit object.
* @return result to return to client if default operation should be bypassed, null otherwise
* @throws IOException
* if an error occurred on the coprocessor
*/
public Result
preIncrement(final Increment increment, final WALEdit edit) throws IOException {
boolean bypassable = true;
Result v40 = null;
if (coprocEnvironments.isEmpty()) {
return v40;
}
return execOperationWithResult(new ObserverOperationWithResult<RegionObserver, Result>(regionObserverGetter, v40, bypassable) {
@Override
public Result call(RegionObserver observer) throws
IOException {
return observer.preIncrement(this, increment,
edit);
}
});
} | 3.26 |
hbase_RegionCoprocessorHost_m2_rdh | /**
* Called prior to opening store scanner for compaction.
*/
public ScanInfo m2(HStore store, ScanType scanType, CompactionLifeCycleTracker tracker, CompactionRequest request, User user) throws IOException {
if (coprocEnvironments.isEmpty()) {
return store.getScanInfo();
}
CustomizedScanInfoBuilder builder = new CustomizedScanInfoBuilder(store.getScanInfo());
execOperation(new RegionObserverOperationWithoutResult(user) {
@Override
public void call(RegionObserver observer) throws IOException {
observer.preCompactScannerOpen(this, store, scanType, builder, tracker, request);
}
});
return
builder.build();
} | 3.26 |
hbase_RegionCoprocessorHost_postBulkLoadHFile_rdh | /**
*
* @param familyPaths
* pairs of { CF, file path } submitted for bulk load
* @param map
* Map of CF to List of file paths for the final loaded files
*/
public void postBulkLoadHFile(final List<Pair<byte[], String>> familyPaths, Map<byte[], List<Path>> map) throws IOException {
if (this.coprocEnvironments.isEmpty()) {
return;
}
execOperation(coprocEnvironments.isEmpty() ? null : new
RegionObserverOperationWithoutResult() {
@Override
public void call(RegionObserver observer) throws IOException
{
observer.postBulkLoadHFile(this, familyPaths, map);
}
});
} | 3.26 |
hbase_RegionCoprocessorHost_preScannerClose_rdh | /**
* Supports Coprocessor 'bypass'.
*
* @param s
* the scanner
* @return true if default behavior should be bypassed, false otherwise
* @exception IOException
* Exception
*/
// Should this be bypassable?
public boolean preScannerClose(final InternalScanner
s) throws IOException {return execOperation(coprocEnvironments.isEmpty() ? null : new RegionObserverOperationWithoutResult(true) {
@Override
public void call(RegionObserver observer) throws IOException {
observer.preScannerClose(this, s);
}
});
} | 3.26 |
hbase_RegionCoprocessorHost_postCompact_rdh | /**
* Called after the store compaction has completed.
*
* @param store
* the store being compacted
* @param resultFile
* the new store file written during compaction
* @param tracker
* used to track the life cycle of a compaction
* @param request
* the compaction request
* @param user
* the user
*/
public void postCompact(final HStore store, final HStoreFile resultFile, final CompactionLifeCycleTracker tracker, final CompactionRequest request, final User user) throws IOException {
execOperation(coprocEnvironments.isEmpty() ? null : new RegionObserverOperationWithoutResult(user) {
@Override
public void call(RegionObserver observer) throws IOException {
observer.postCompact(this, store, resultFile, tracker, request);
}
});
} | 3.26 |
hbase_RegionCoprocessorHost_postScannerOpen_rdh | /**
*
* @param scan
* the Scan specification
* @param s
* the scanner
* @return the scanner instance to use
* @exception IOException
* Exception
*/
public RegionScanner postScannerOpen(final Scan scan, RegionScanner s) throws IOException {
if (this.coprocEnvironments.isEmpty()) {
return s;
}return execOperationWithResult(new ObserverOperationWithResult<RegionObserver, RegionScanner>(regionObserverGetter, s) {
@Override
public RegionScanner m4(RegionObserver observer) throws IOException {
return observer.postScannerOpen(this, scan, getResult());
}
});
} | 3.26 |
hbase_RegionCoprocessorHost_postInstantiateDeleteTracker_rdh | /**
*
* @deprecated Since 2.0 with out any replacement and will be removed in 3.0
*/
@Deprecated
public DeleteTracker postInstantiateDeleteTracker(DeleteTracker result) throws IOException {
if (this.coprocEnvironments.isEmpty()) {
return result;
}
return execOperationWithResult(new ObserverOperationWithResult<RegionObserver, DeleteTracker>(regionObserverGetter, result)
{
@Override
public DeleteTracker call(RegionObserver observer) throws IOException {
return observer.postInstantiateDeleteTracker(this, getResult());
}
});
} | 3.26 |
hbase_RegionCoprocessorHost_postOpen_rdh | /**
* Invoked after a region open
*/
public void postOpen() {
if (coprocEnvironments.isEmpty()) {
return;
}
try {
execOperation(new RegionObserverOperationWithoutResult() {
@Override
public void call(RegionObserver observer) throws IOException
{
observer.postOpen(this);
}
});
} catch (IOException e) {
LOG.warn(e.toString(), e);
}
} | 3.26 |
hbase_RegionCoprocessorHost_preGet_rdh | // RegionObserver support
/**
* Supports Coprocessor 'bypass'.
*
* @param get
* the Get request
* @param results
* What to return if return is true/'bypass'.
* @return true if default processing should be bypassed.
* @exception IOException
* Exception
*/
public boolean preGet(final Get get, final List<Cell> results) throws IOException {
if (coprocEnvironments.isEmpty()) {
return false;
}
boolean bypassable = true;
return execOperation(new RegionObserverOperationWithoutResult(bypassable) {
@Override
public void call(RegionObserver observer) throws IOException {
observer.preGetOp(this, get, results);
}
});
} | 3.26 |
hbase_RegionCoprocessorHost_postMemStoreCompaction_rdh | /**
* Invoked after in memory compaction.
*/
public void
postMemStoreCompaction(HStore store) throws IOException {
execOperation(coprocEnvironments.isEmpty() ? null : new RegionObserverOperationWithoutResult() {
@Override
public void call(RegionObserver observer) throws IOException {
observer.postMemStoreCompaction(this, store);
}
});
} | 3.26 |
hbase_RegionCoprocessorHost_preExists_rdh | /**
* Supports Coprocessor 'bypass'.
*
* @param get
* the Get request
* @return true or false to return to client if bypassing normal operation, or null otherwise
* @exception IOException
* Exception
*/
public Boolean preExists(final Get get) throws IOException {
boolean bypassable = true;
boolean defaultResult = false;
if (coprocEnvironments.isEmpty()) {
return null;
}
return execOperationWithResult(new ObserverOperationWithResult<RegionObserver, Boolean>(regionObserverGetter, defaultResult, bypassable) {
@Override
public Boolean call(RegionObserver observer) throws IOException {
return observer.preExists(this, get, getResult());
}
});
} | 3.26 |
hbase_RegionCoprocessorHost_postDelete_rdh | /**
*
* @param delete
* The Delete object
* @param edit
* The WALEdit object.
* @exception IOException
* Exception
*/
public void postDelete(final Delete delete, final WALEdit edit) throws IOException {
execOperation(coprocEnvironments.isEmpty()
? null : new RegionObserverOperationWithoutResult() {
@Overridepublic void call(RegionObserver observer) throws IOException {
observer.postDelete(this, delete, edit);
}
});
} | 3.26 |
hbase_RegionCoprocessorHost_preAppend_rdh | /**
* Supports Coprocessor 'bypass'.
*
* @param append
* append object
* @param edit
* The WALEdit object.
* @return result to return to client if default operation should be bypassed, null otherwise
* @throws IOException
* if an error occurred on the coprocessor
*/
public Result preAppend(final Append append, final WALEdit edit) throws IOException {
boolean bypassable
= true;
Result defaultResult = null;
if (this.coprocEnvironments.isEmpty()) {
return defaultResult;}
return execOperationWithResult(new ObserverOperationWithResult<RegionObserver, Result>(regionObserverGetter, defaultResult, bypassable) {
@Override
public Result call(RegionObserver observer) throws IOException {
return observer.preAppend(this, append, edit);
}
});
} | 3.26 |
hbase_RegionCoprocessorHost_postGet_rdh | /**
*
* @param get
* the Get request
* @param results
* the result set
* @exception IOException
* Exception
*/
public void postGet(final Get get, final List<Cell> results) throws IOException {
if (coprocEnvironments.isEmpty()) {return;
}
execOperation(new RegionObserverOperationWithoutResult() {
@Override
public void call(RegionObserver observer) throws IOException {observer.postGetOp(this, get, results);
}
});
} | 3.26 |
hbase_RegionCoprocessorHost_hasCustomPostScannerFilterRow_rdh | /* Whether any configured CPs override postScannerFilterRow hook */
public boolean hasCustomPostScannerFilterRow() {
return hasCustomPostScannerFilterRow;
} | 3.26 |
hbase_RegionCoprocessorHost_preMemStoreCompactionCompact_rdh | /**
* Invoked before compacting memstore.
*/
public InternalScanner preMemStoreCompactionCompact(HStore store, InternalScanner scanner) throws
IOException {
if (coprocEnvironments.isEmpty()) {
return scanner;
}
return execOperationWithResult(new ObserverOperationWithResult<RegionObserver, InternalScanner>(regionObserverGetter, scanner) {
@Override
public InternalScanner call(RegionObserver observer) throws IOException {
return observer.preMemStoreCompactionCompact(this, store, getResult());
}
});
} | 3.26 |
hbase_RegionCoprocessorHost_preOpen_rdh | // ////////////////////////////////////////////////////////////////////////////////////////////////
// Observer operations
// ////////////////////////////////////////////////////////////////////////////////////////////////
// ////////////////////////////////////////////////////////////////////////////////////////////////
// Observer operations
// ////////////////////////////////////////////////////////////////////////////////////////////////
/**
* Invoked before a region open.
*
* @throws IOException
* Signals that an I/O exception has occurred.
*/
public void preOpen()
throws IOException {
if (coprocEnvironments.isEmpty()) {return;
}
execOperation(new RegionObserverOperationWithoutResult() {
@Override
public void m0(RegionObserver observer) throws IOException {
observer.preOpen(this);
}
});
} | 3.26 |
hbase_RegionCoprocessorHost_preIncrementAfterRowLock_rdh | /**
* Supports Coprocessor 'bypass'.
*
* @param increment
* increment object
* @return result to return to client if default operation should be bypassed, null otherwise
* @throws IOException
* if an error occurred on the coprocessor
*/
public Result preIncrementAfterRowLock(final Increment increment) throws IOException {
boolean bypassable = true;
Result defaultResult = null;
if (coprocEnvironments.isEmpty()) {
return defaultResult;
}
return execOperationWithResult(new ObserverOperationWithResult<RegionObserver, Result>(regionObserverGetter, defaultResult, bypassable) {
@Override
public Result call(RegionObserver observer) throws IOException {
return observer.preIncrementAfterRowLock(this, increment);
}
});
} | 3.26 |
hbase_RegionCoprocessorHost_preStoreScannerOpen_rdh | /**
* Called before open store scanner for user scan.
*/
public ScanInfo preStoreScannerOpen(HStore store, Scan scan) throws IOException {
if (coprocEnvironments.isEmpty())
return store.getScanInfo();
CustomizedScanInfoBuilder builder = new CustomizedScanInfoBuilder(store.getScanInfo(), scan);
execOperation(new RegionObserverOperationWithoutResult()
{
@Override
public void call(RegionObserver observer) throws IOException { observer.preStoreScannerOpen(this, store, builder);
}
});
return builder.build();
} | 3.26 |
hbase_RegionCoprocessorHost_postStoreFileReaderOpen_rdh | /**
*
* @param fs
* fileystem to read from
* @param p
* path to the file
* @param in
* {@link FSDataInputStreamWrapper}
* @param size
* Full size of the file
* @param r
* original reference file. This will be not null only when reading a split file.
* @param reader
* the base reader instance
* @return The reader to use
*/
public StoreFileReader postStoreFileReaderOpen(final FileSystem fs, final Path p, final FSDataInputStreamWrapper in, final long size, final CacheConfig cacheConf, final Reference r, final StoreFileReader reader) throws IOException {
if (this.coprocEnvironments.isEmpty()) {
return reader;}
return execOperationWithResult(new ObserverOperationWithResult<RegionObserver, StoreFileReader>(regionObserverGetter, reader) {
@Override
public StoreFileReader call(RegionObserver observer) throws IOException {
return observer.postStoreFileReaderOpen(this, fs, p, in, size, cacheConf, r, getResult());
}
});
} | 3.26 |
hbase_RegionCoprocessorHost_preFlushScannerOpen_rdh | /**
* Invoked before create StoreScanner for flush.
*/
public ScanInfo preFlushScannerOpen(HStore store, FlushLifeCycleTracker tracker) throws IOException {
if (coprocEnvironments.isEmpty()) {
return store.getScanInfo();
}
CustomizedScanInfoBuilder builder = new CustomizedScanInfoBuilder(store.getScanInfo());
execOperation(new RegionObserverOperationWithoutResult() {
@Override
public void call(RegionObserver observer) throws IOException {
observer.preFlushScannerOpen(this, store, builder, tracker);
}
});
return builder.build();
} | 3.26 |
hbase_RegionCoprocessorHost_testTableCoprocessorAttrs_rdh | /**
* Sanity check the table coprocessor attributes of the supplied schema. Will throw an exception
* if there is a problem.
*/
public static void testTableCoprocessorAttrs(final Configuration conf, final TableDescriptor htd) throws IOException {
String pathPrefix = UUID.randomUUID().toString();
for (TableCoprocessorAttribute attr : getTableCoprocessorAttrsFromSchema(conf, htd)) {
if (attr.getPriority() < 0) {
throw new IOException(("Priority for coprocessor " + attr.getClassName()) + " cannot be less than 0");
}
ClassLoader old = Thread.currentThread().getContextClassLoader();
try {
ClassLoader cl;
if (attr.getPath() != null) {cl = CoprocessorClassLoader.getClassLoader(attr.getPath(), CoprocessorHost.class.getClassLoader(), pathPrefix, conf);
} else { cl = CoprocessorHost.class.getClassLoader();
}
Thread.currentThread().setContextClassLoader(cl);
if (cl instanceof CoprocessorClassLoader) {
String[] includedClassPrefixes = null;
if (conf.get(HConstants.CP_HTD_ATTR_INCLUSION_KEY) != null) {
String prefixes = attr.conf.get(HConstants.CP_HTD_ATTR_INCLUSION_KEY);
includedClassPrefixes = prefixes.split(";");
}
((CoprocessorClassLoader) (cl)).loadClass(attr.getClassName(), includedClassPrefixes);
} else {
cl.loadClass(attr.getClassName());
}
} catch
(ClassNotFoundException e) {
throw new IOException(("Class " + attr.getClassName()) + " cannot be loaded", e); } finally {
Thread.currentThread().setContextClassLoader(old);
}
}
} | 3.26 |
hbase_RegionCoprocessorHost_postIncrement_rdh | /**
*
* @param increment
* increment object
* @param result
* the result returned by postIncrement
* @param edit
* The WALEdit object.
* @throws IOException
* if an error occurred on the coprocessor
*/
public Result postIncrement(final Increment increment, Result result, final WALEdit edit) throws IOException {
if (this.coprocEnvironments.isEmpty()) {
return result;
}
return execOperationWithResult(new ObserverOperationWithResult<RegionObserver, Result>(regionObserverGetter, result) {
@Override
public Result call(RegionObserver observer) throws IOException {
return observer.postIncrement(this, increment, getResult(), edit);
}
});
} | 3.26 |
hbase_RegionCoprocessorHost_postAppend_rdh | /**
*
* @param append
* Append object
* @param result
* the result returned by the append
* @param edit
* The WALEdit object.
* @throws IOException
* if an error occurred on the coprocessor
*/
public Result postAppend(final Append append, final Result result, final WALEdit edit) throws IOException {
if (this.coprocEnvironments.isEmpty()) {
return result;
}
return execOperationWithResult(new ObserverOperationWithResult<RegionObserver, Result>(regionObserverGetter, result) {
@Override
public Result call(RegionObserver observer) throws IOException {
return observer.postAppend(this, append, result, edit);
}});
} | 3.26 |
hbase_RegionCoprocessorHost_preDelete_rdh | /**
* Supports Coprocessor 'bypass'.
*
* @param delete
* The Delete object
* @param edit
* The WALEdit object.
* @return true if default processing should be bypassed
* @exception IOException
* Exception
*/
public boolean preDelete(final Delete delete, final WALEdit edit) throws IOException {
if (this.coprocEnvironments.isEmpty()) {
return false;
}
boolean bypassable = true;
return execOperation(new RegionObserverOperationWithoutResult(bypassable) {
@Override
public void call(RegionObserver observer) throws IOException {
observer.preDelete(this, delete, edit);
}
});
} | 3.26 |
hbase_RegionCoprocessorHost_postExists_rdh | /**
*
* @param get
* the Get request
* @param result
* the result returned by the region server
* @return the result to return to the client
* @exception IOException
* Exception
*/
public boolean postExists(final Get get, boolean result) throws IOException {
if (this.coprocEnvironments.isEmpty()) {
return result;
}
return execOperationWithResult(new ObserverOperationWithResult<RegionObserver, Boolean>(regionObserverGetter, result) {
@Overridepublic Boolean call(RegionObserver observer) throws IOException {
return observer.postExists(this, get, getResult());
}
});
} | 3.26 |
hbase_RegionCoprocessorHost_preScannerOpen_rdh | /**
*
* @param scan
* the Scan specification
* @exception IOException
* Exception
*/
public void preScannerOpen(final Scan scan) throws IOException {
execOperation(coprocEnvironments.isEmpty() ? null : new RegionObserverOperationWithoutResult() {
@Override
public void call(RegionObserver observer) throws IOException {
observer.preScannerOpen(this, scan);
}
});
} | 3.26 |
hbase_RegionCoprocessorHost_postScannerClose_rdh | /**
*
* @exception IOException
* Exception
*/
public void postScannerClose(final InternalScanner s) throws IOException {
execOperation(coprocEnvironments.isEmpty() ? null : new RegionObserverOperationWithoutResult() {
@Override public void call(RegionObserver observer) throws IOException {
observer.postScannerClose(this, s);
}
});
} | 3.26 |
hbase_RegionCoprocessorHost_prePrepareBulkLoad_rdh | // ///////////////////////////////////////////////////////////////////////////////////////////////
// BulkLoadObserver hooks
// ///////////////////////////////////////////////////////////////////////////////////////////////
public void prePrepareBulkLoad(User user) throws IOException {
execOperation(coprocEnvironments.isEmpty() ? null : new BulkLoadObserverOperation(user) {
@Override
protected void
call(BulkLoadObserver observer) throws IOException {
observer.prePrepareBulkLoad(this);
}
});
} | 3.26 |
hbase_RegionCoprocessorHost_postClose_rdh | /**
* Invoked after a region is closed
*
* @param abortRequested
* true if the server is aborting
*/
public void postClose(final boolean abortRequested) {
try {
execOperation(new RegionObserverOperationWithoutResult() {
@Override
public void m1(RegionObserver observer) throws IOException {
observer.postClose(this, abortRequested);
}
@Override
public void postEnvCall() {shutdown(this.getEnvironment());
}
});
} catch (IOException e) {
LOG.warn(e.toString(), e);
}
} | 3.26 |
hbase_RegionCoprocessorHost_postReplayWALs_rdh | /**
*
* @param info
* the RegionInfo for this region
* @param edits
* the file of recovered edits
* @throws IOException
* Exception
*/
public void postReplayWALs(final RegionInfo info, final Path edits) throws IOException {
execOperation(coprocEnvironments.isEmpty() ? null : new RegionObserverOperationWithoutResult() {
@Override
public void call(RegionObserver observer) throws IOException {
observer.postReplayWALs(this, info, edits);
}
});
} | 3.26 |
hbase_RegionCoprocessorHost_preBulkLoadHFile_rdh | /**
*
* @param familyPaths
* pairs of { CF, file path } submitted for bulk load
*/
public void preBulkLoadHFile(final List<Pair<byte[], String>>
familyPaths) throws IOException {execOperation(coprocEnvironments.isEmpty() ? null : new RegionObserverOperationWithoutResult() {
@Override
public void call(RegionObserver observer) throws IOException {observer.preBulkLoadHFile(this, familyPaths);
}
});
} | 3.26 |
hbase_RegionCoprocessorHost_prePut_rdh | /**
* Supports Coprocessor 'bypass'.
*
* @param put
* The Put object
* @param edit
* The WALEdit object.
* @return true if default processing should be bypassed
* @exception IOException
* Exception
*/
public boolean prePut(final Put put, final WALEdit edit) throws IOException {
if (coprocEnvironments.isEmpty()) {
return false;
}
boolean v28 = true;
return execOperation(new RegionObserverOperationWithoutResult(v28) {
@Override
public void call(RegionObserver observer) throws IOException {
observer.prePut(this, put, edit);
}
});
} | 3.26 |
hbase_RegionCoprocessorHost_preScannerNext_rdh | /**
*
* @param s
* the scanner
* @param results
* the result set returned by the region server
* @param limit
* the maximum number of results to return
* @return 'has next' indication to client if bypassing default behavior, or null otherwise
* @exception IOException
* Exception
*/
public Boolean preScannerNext(final InternalScanner s, final List<Result> results, final int limit) throws IOException {
boolean bypassable =
true;
boolean defaultResult = false;
if (coprocEnvironments.isEmpty()) {
return null;
}
return execOperationWithResult(new ObserverOperationWithResult<RegionObserver, Boolean>(regionObserverGetter, defaultResult, bypassable) {
@Override
public Boolean call(RegionObserver observer) throws IOException {
return observer.preScannerNext(this, s, results, limit, getResult());
}
});
} | 3.26 |
hbase_RegionCoprocessorHost_preClose_rdh | /**
* Invoked before a region is closed
*
* @param abortRequested
* true if the server is aborting
*/
public void preClose(final boolean abortRequested) throws IOException {
execOperation(new RegionObserverOperationWithoutResult() {
@Override
public
void call(RegionObserver observer) throws IOException {
observer.preClose(this, abortRequested);
}
});
} | 3.26 |
hbase_RegionCoprocessorHost_preMemStoreCompaction_rdh | /**
* Invoked before in memory compaction.
*/
public void
preMemStoreCompaction(HStore store) throws IOException {
execOperation(coprocEnvironments.isEmpty() ? null : new RegionObserverOperationWithoutResult() {
@Override
public void call(RegionObserver observer) throws IOException {
observer.preMemStoreCompaction(this, store);
}
});
} | 3.26 |
hbase_RegionCoprocessorHost_postCompactSelection_rdh | /**
* Called after the {@link HStoreFile}s to be compacted have been selected from the available
* candidates.
*
* @param store
* The store where compaction is being requested
* @param selected
* The store files selected to compact
* @param tracker
* used to track the life cycle of a compaction
* @param request
* the compaction request
* @param user
* the user
*/
public void postCompactSelection(final HStore store, final List<HStoreFile> selected, final CompactionLifeCycleTracker tracker, final CompactionRequest
request, final User user) throws IOException {
if (coprocEnvironments.isEmpty()) {
return;
}
execOperation(new RegionObserverOperationWithoutResult(user) {
@Override
public void call(RegionObserver observer) throws IOException { observer.postCompactSelection(this, store, selected, tracker,
request);
}
});
} | 3.26 |
hbase_WALKeyImpl_addClusterId_rdh | /**
* Marks that the cluster with the given clusterId has consumed the change
*/
public void addClusterId(UUID clusterId)
{
if (!clusterIds.contains(clusterId)) {
clusterIds.add(clusterId);
}
} | 3.26 |
hbase_WALKeyImpl_getWriteTime_rdh | /**
* Returns the write time
*/
@Override
public long getWriteTime() {
return this.writeTime;
} | 3.26 |
hbase_WALKeyImpl_setOrigLogSeqNum_rdh | /**
* Used to set original sequenceId for WALKeyImpl during WAL replay
*/
public void setOrigLogSeqNum(final long sequenceId) {
this.origLogSeqNum = sequenceId;
} | 3.26 |
hbase_WALKeyImpl_m0_rdh | /**
* Drop this instance's tablename byte array and instead hold a reference to the provided
* tablename. This is not meant to be a general purpose setter - it's only used to collapse
* references to conserve memory.
*/
void m0(TableName tablename) {
// We should not use this as a setter - only to swap
// in a new reference to the same table name.
assert tablename.equals(this.tablename);
this.tablename = tablename;
} | 3.26 |
hbase_WALKeyImpl_setSequenceId_rdh | // For deserialization. DO NOT USE. See setWriteEntry below.
@InterfaceAudience.Private
protected void setSequenceId(long sequenceId) {
this.sequenceId = sequenceId;
} | 3.26 |
hbase_WALKeyImpl_internEncodedRegionName_rdh | /**
* Drop this instance's region name byte array and instead hold a reference to the provided region
* name. This is not meant to be a general purpose setter - it's only used to collapse references
* to conserve memory.
*/
void internEncodedRegionName(byte[] encodedRegionName) {
// We should not use this as a setter - only to swap
// in a new reference to the same table name.
assert Bytes.equals(this.encodedRegionName, encodedRegionName);
this.encodedRegionName = encodedRegionName;
} | 3.26 |
hbase_WALKeyImpl_getNonce_rdh | /**
* Returns The nonce
*/
@Override
public long getNonce() {
return nonce;
} | 3.26 |
hbase_WALKeyImpl_getEncodedRegionName_rdh | /**
* Returns encoded region name
*/
@Override
public byte[] getEncodedRegionName() {
return encodedRegionName;
} | 3.26 |
hbase_WALKeyImpl_getWriteEntry_rdh | /**
* Use it to complete mvcc transaction. This WALKeyImpl was part of (the transaction is started
* when you call append; see the comment on FSHLog#append). To complete call
* {@link MultiVersionConcurrencyControl#complete(MultiVersionConcurrencyControl.WriteEntry)} or
* {@link MultiVersionConcurrencyControl#complete(MultiVersionConcurrencyControl.WriteEntry)}
*
* @return A WriteEntry gotten from local WAL subsystem.
* @see #setWriteEntry(MultiVersionConcurrencyControl.WriteEntry)
*/
public WriteEntry getWriteEntry() {
return this.writeEntry;
} | 3.26 |
hbase_WALKeyImpl_getOrigLogSeqNum_rdh | /**
* Return a positive long if current WALKeyImpl is created from a replay edit; a replay edit is an
* edit that came in when replaying WALs of a crashed server.
*
* @return original sequence number of the WALEdit
*/
@Override
public long getOrigLogSeqNum() {
return this.origLogSeqNum;
} | 3.26 |
hbase_WALKeyImpl_getNonceGroup_rdh | /**
* Returns The nonce group
*/
@Override
public long getNonceGroup() {
return nonceGroup;
} | 3.26 |
hbase_WALKeyImpl_getTableName_rdh | /**
* Returns table name
*/
@Override
public TableName getTableName() {
return tablename;
} | 3.26 |
hbase_WALKeyImpl_getSequenceId_rdh | /**
* SequenceId is only available post WAL-assign. Calls before this will get you a
* {@link SequenceId#NO_SEQUENCE_ID}. See the comment on FSHLog#append and #getWriteNumber in this
* method for more on when this sequenceId comes available.
*
* @return long the new assigned sequence number
*/
@Override
public long getSequenceId() {
return this.sequenceId;
} | 3.26 |
hbase_WALKeyImpl_getClusterIds_rdh | /**
* Returns the set of cluster Ids that have consumed the change
*/
public List<UUID> getClusterIds() {
return clusterIds;
} | 3.26 |
hbase_ReplicationSourceManager_m1_rdh | /**
* Get the ReplicationPeers used by this ReplicationSourceManager
*
* @return the ReplicationPeers used by this ReplicationSourceManager
*/
public ReplicationPeers m1() {
return this.replicationPeers;
} | 3.26 |
hbase_ReplicationSourceManager_claimQueue_rdh | /**
* Claim a replication queue.
* <p/>
* We add a flag to indicate whether we are called by ReplicationSyncUp. For normal claiming queue
* operation, we are the last step of a SCP, so we can assume that all the WAL files are under
* oldWALs directory. But for ReplicationSyncUp, we may want to claim the replication queue for a
* region server which has not been processed by SCP yet, so we still need to look at its WALs
* directory.
*
* @param queueId
* the replication queue id we want to claim
* @param syncUp
* whether we are called by ReplicationSyncUp
*/
void claimQueue(ReplicationQueueId queueId, boolean syncUp) {
// Wait a bit before transferring the queues, we may be shutting down.
// This sleep may not be enough in some cases.
try {
Thread.sleep(f0 +
((long) (ThreadLocalRandom.current().nextFloat() * f0)));
} catch (InterruptedException e) {
LOG.warn("Interrupted while waiting before transferring a queue.");
Thread.currentThread().interrupt();
}
// We try to lock that rs' queue directory
if (server.isStopped()) {
LOG.info("Not transferring queue since we are shutting down");
return;
}
// After claim the queues from dead region server, we will skip to start the
// RecoveredReplicationSource if the peer has been removed. but there's possible that remove a
// peer with peerId = 2 and add a peer with peerId = 2 again during failover. So we need to get
// a copy of the replication peer first to decide whether we should start the
// RecoveredReplicationSource. If the latest peer is not the old peer, we should also skip to
// start the RecoveredReplicationSource, Otherwise the rs will abort (See HBASE-20475).
String peerId = queueId.getPeerId();
ReplicationPeerImpl oldPeer = replicationPeers.getPeer(peerId);
if (oldPeer == null) {
LOG.info("Not transferring queue since the replication peer {} for queue {} does not exist", peerId, queueId);return;
}
Map<String, ReplicationGroupOffset> offsets;
try {
offsets = queueStorage.claimQueue(queueId, server.getServerName());
} catch (ReplicationException e) {
LOG.error("ReplicationException: cannot claim dead region ({})'s replication queue", queueId.getServerName(), e);
server.abort("Failed to claim queue from dead regionserver.", e);
return;}if (offsets.isEmpty()) {
// someone else claimed the queue
return;
}
ServerName sourceRS = queueId.getServerWALsBelongTo();
ReplicationQueueId claimedQueueId = queueId.claim(server.getServerName());
ReplicationPeerImpl peer = replicationPeers.getPeer(peerId);
if ((peer == null) || (peer != oldPeer)) {
LOG.warn("Skipping failover for peer {} of node {}, peer is null", peerId, sourceRS);
deleteQueue(claimedQueueId);
return;
}
ReplicationSourceInterface src;
try {src = createSource(new ReplicationQueueData(claimedQueueId, ImmutableMap.copyOf(offsets)), peer);
} catch (IOException e) {
LOG.error("Can not create replication source for peer {} and queue {}", peerId, claimedQueueId, e);
server.abort("Failed to create replication source after claiming queue.", e); return;
}
PriorityQueue<Path> v95;
try {
v95 = getWALFilesToReplicate(sourceRS, syncUp, offsets);
} catch (IOException e) {
LOG.error("Can not list wal files for peer {} and queue {}", peerId, queueId, e);
server.abort("Can not list wal files after claiming queue.", e);
return;
}
// synchronized on oldsources to avoid adding recovered source for the to-be-removed peer
synchronized(oldsources) {
addRecoveredSource(src, oldPeer, claimedQueueId, v95);
}
} | 3.26 |
hbase_ReplicationSourceManager_getSources_rdh | /**
* Get a list of all the normal sources of this rs
*
* @return list of all normal sources
*/
public List<ReplicationSourceInterface> getSources() {
return new ArrayList<>(this.sources.values());
} | 3.26 |
hbase_ReplicationSourceManager_releaseWALEntryBatchBufferQuota_rdh | /**
* To release the buffer quota of {@link WALEntryBatch} which acquired by
* {@link ReplicationSourceManager#acquireWALEntryBufferQuota}.
*
* @return the released buffer quota size.
*/
long releaseWALEntryBatchBufferQuota(WALEntryBatch walEntryBatch) {
long usedBufferSize = walEntryBatch.getUsedBufferSize();
if (usedBufferSize > 0) {
this.releaseBufferQuota(usedBufferSize);
}
return usedBufferSize;
}
/**
* Add the size to {@link ReplicationSourceManager#totalBufferUsed} and check if it exceeds
* {@link ReplicationSourceManager#totalBufferLimit}.
*
* @return true if {@link ReplicationSourceManager#totalBufferUsed} exceeds
{@link ReplicationSourceManager#totalBufferLimit} | 3.26 |
hbase_ReplicationSourceManager_join_rdh | /**
* Terminate the replication on this region server
*/
public void join() {
this.executor.shutdown();
for (ReplicationSourceInterface source : this.sources.values()) {source.terminate("Region server is closing");
}
synchronized(oldsources) {
for (ReplicationSourceInterface source : this.oldsources) {
source.terminate("Region server is closing");
}
}
} | 3.26 |
hbase_ReplicationSourceManager_logPositionAndCleanOldLogs_rdh | /**
* This method will log the current position to storage. And also clean old logs from the
* replication queue.
*
* @param source
* the replication source
* @param entryBatch
* the wal entry batch we just shipped
*/
public void logPositionAndCleanOldLogs(ReplicationSourceInterface source, WALEntryBatch entryBatch) {
String walName = entryBatch.getLastWalPath().getName();
String walPrefix = AbstractFSWALProvider.getWALPrefixFromWALName(walName);
// if end of file, we just set the offset to -1 so we know that this file has already been fully
// replicated, otherwise we need to compare the file length
ReplicationGroupOffset offset = new ReplicationGroupOffset(walName, entryBatch.isEndOfFile() ? -1 : entryBatch.getLastWalPosition());
interruptOrAbortWhenFail(() -> this.queueStorage.setOffset(source.getQueueId(), walPrefix, offset, entryBatch.getLastSeqIds()));
cleanOldLogs(walName, entryBatch.isEndOfFile(),
source);
} | 3.26 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.