name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hbase_ReplicationSourceManager_postLogRoll_rdh | // public because of we call it in TestReplicationEmptyWALRecovery
public void postLogRoll(Path newLog) throws IOException {
String logName = newLog.getName();
String logPrefix = AbstractFSWALProvider.getWALPrefixFromWALName(logName);
// synchronized on latestPaths to avoid the new open source miss the new log
synchronized(this.latestPaths) {
// synchronized on walsById to avoid race with cleanOldLogs
synchronized(this.walsById) {
// Update walsById map
for (Map.Entry<ReplicationQueueId, Map<String, NavigableSet<String>>> v67 : this.walsById.entrySet()) {
ReplicationQueueId v68 = v67.getKey();
String peerId = v68.getPeerId();Map<String, NavigableSet<String>> v70 = v67.getValue();
boolean existingPrefix = false;for (Map.Entry<String, NavigableSet<String>> walsEntry : v70.entrySet()) {
SortedSet<String> wals = walsEntry.getValue();
if (this.sources.isEmpty()) {
// If there's no slaves, don't need to keep the old wals since
// we only consider the last one when a new slave comes in
wals.clear();
}
if (logPrefix.equals(walsEntry.getKey())) {
wals.add(logName);
existingPrefix = true;
}
}
if (!existingPrefix) {
// The new log belongs to a new group, add it into this peer
LOG.debug("Start tracking logs for wal group {} for peer {}", logPrefix, peerId);
NavigableSet<String> wals = new TreeSet<>();
wals.add(logName);
v70.put(logPrefix, wals);
}
}
}
// Add to latestPaths
latestPaths.put(logPrefix, newLog);}
// This only updates the sources we own, not the recovered ones
for (ReplicationSourceInterface v75 : this.sources.values()) {
v75.enqueueLog(newLog);
LOG.trace("Enqueued {} to source {} while performing postLogRoll operation.", newLog, v75.getQueueId());
}
} | 3.26 |
hbase_ReplicationSourceManager_cleanOldLogs_rdh | /**
* Cleans a log file and all older logs from replication queue. Called when we are sure that a log
* file is closed and has no more entries.
*
* @param log
* Path to the log
* @param inclusive
* whether we should also remove the given log file
* @param source
* the replication source
*/
void cleanOldLogs(String log, boolean inclusive, ReplicationSourceInterface source) {
String logPrefix = AbstractFSWALProvider.getWALPrefixFromWALName(log);
if (source.isRecovered()) {
NavigableSet<Path> wals = walsByIdRecoveredQueues.get(source.getQueueId()).get(logPrefix);
if (wals != null) {// here we just want to compare the timestamp, so it is OK to just create a fake WAL path
NavigableSet<String> walsToRemove = wals.headSet(new Path(oldLogDir, log), inclusive).stream().map(Path::getName).collect(Collectors.toCollection(TreeSet::new));if (walsToRemove.isEmpty()) {
return;
}
cleanOldLogs(walsToRemove, source);walsToRemove.clear();
}
} else {
NavigableSet<String> wals;
NavigableSet<String> walsToRemove;
// synchronized on walsById to avoid race with postLogRoll
synchronized(this.walsById) {
wals = walsById.get(source.getQueueId()).get(logPrefix);
if (wals == null) {
return;
}
walsToRemove = wals.headSet(log, inclusive);
if (walsToRemove.isEmpty()) {
return;
}
walsToRemove = new TreeSet<>(walsToRemove);
}
// cleanOldLogs may spend some time, especially for sync replication where we may want to
// remove remote wals as the remote cluster may have already been down, so we do it outside
// the lock to avoid block preLogRoll
cleanOldLogs(walsToRemove, source);
// now let's remove the files in the set
synchronized(this.walsById) {
wals.removeAll(walsToRemove);
}
}
} | 3.26 |
hbase_ReplicationSourceManager_getWALs_rdh | /**
* Get a copy of the wals of the normal sources on this rs
*
* @return a sorted set of wal names
*/
@RestrictedApi(explanation = "Should only be called in tests", link = "", allowedOnPath = ".*/src/test/.*")
public Map<ReplicationQueueId, Map<String, NavigableSet<String>>> getWALs() {
return Collections.unmodifiableMap(walsById);
} | 3.26 |
hbase_ReplicationSourceManager_getOldSources_rdh | /**
* Get a list of all the recovered sources of this rs
*
* @return list of all recovered sources
*/
public List<ReplicationSourceInterface> getOldSources() {
return this.oldsources;
} | 3.26 |
hbase_ReplicationSourceManager_getFs_rdh | /**
* Get the handle on the local file system
*
* @return Handle on the local file system
*/
public FileSystem getFs() {
return this.fs;
} | 3.26 |
hbase_ReplicationSourceManager_getStats_rdh | /**
* Get a string representation of all the sources' metrics
*/
public String getStats() {
StringBuilder stats = new StringBuilder();
// Print stats that apply across all Replication Sources
stats.append("Global stats: ");
stats.append("WAL Edits Buffer Used=").append(m0()).append("B, Limit=").append(getTotalBufferLimit()).append("B\n");
for (ReplicationSourceInterface source : this.sources.values()) {
stats.append(("Normal source for cluster " + source.getPeerId()) + ": ");
stats.append(source.getStats() + "\n");
}
for (ReplicationSourceInterface oldSource : oldsources) {
stats.append(("Recovered source for cluster/machine(s) " + oldSource.getPeerId()) + ": ");
stats.append(oldSource.getStats() + "\n");
}return stats.toString();
} | 3.26 |
hbase_ReplicationSourceManager_refreshSources_rdh | /**
* Close the previous replication sources of this peer id and open new sources to trigger the new
* replication state changes or new replication config changes. Here we don't need to change
* replication queue storage and only to enqueue all logs to the new replication source
*
* @param peerId
* the id of the replication peer
*/
public void refreshSources(String
peerId) throws ReplicationException, IOException {
String terminateMessage = ("Peer " + peerId) + " state or config changed. Will close the previous replication source and open a new one";
ReplicationPeer peer = replicationPeers.getPeer(peerId);
ReplicationQueueId queueId = new ReplicationQueueId(server.getServerName(), peerId);
ReplicationSourceInterface src;
// synchronized on latestPaths to avoid missing the new log
synchronized(this.latestPaths) {
ReplicationSourceInterface toRemove = this.sources.remove(peerId);
if (toRemove != null) {LOG.info("Terminate replication source for " + toRemove.getPeerId());
// Do not clear metrics
toRemove.terminate(terminateMessage, null, false);
}
src = createRefreshedSource(queueId, peer);
this.sources.put(peerId, src);for (NavigableSet<String> walsByGroup : walsById.get(queueId).values()) {
walsByGroup.forEach(wal -> src.enqueueLog(new Path(this.logDir, wal)));
}}
LOG.info("Startup replication source for " + src.getPeerId());
src.startup();
List<ReplicationSourceInterface> toStartup =
new ArrayList<>();
// synchronized on oldsources to avoid race with NodeFailoverWorker
synchronized(this.oldsources) {List<ReplicationQueueId> oldSourceQueueIds = new ArrayList<>();
for (Iterator<ReplicationSourceInterface> iter = this.oldsources.iterator(); iter.hasNext();) {
ReplicationSourceInterface oldSource = iter.next();
if (oldSource.getPeerId().equals(peerId)) {
oldSourceQueueIds.add(oldSource.getQueueId());
oldSource.terminate(terminateMessage);
iter.remove();
}
}
for (ReplicationQueueId oldSourceQueueId : oldSourceQueueIds) {
ReplicationSourceInterface recoveredReplicationSource = createRefreshedSource(oldSourceQueueId, peer);
this.oldsources.add(recoveredReplicationSource);
for (NavigableSet<Path> walsByGroup : walsByIdRecoveredQueues.get(oldSourceQueueId).values()) {
walsByGroup.forEach(wal -> recoveredReplicationSource.enqueueLog(wal));
}
toStartup.add(recoveredReplicationSource);
}
}
for (ReplicationSourceInterface replicationSource : toStartup) {
replicationSource.startup();
}} | 3.26 |
hbase_ReplicationSourceManager_getTotalBufferLimit_rdh | /**
* Returns the maximum size in bytes of edits held in memory which are pending replication across
* all sources inside this RegionServer.
*/
public long getTotalBufferLimit() {
return totalBufferLimit;
} | 3.26 |
hbase_ReplicationSourceManager_addPeer_rdh | /**
* <ol>
* <li>Add peer to replicationPeers</li>
* <li>Add the normal source and related replication queue</li>
* <li>Add HFile Refs</li>
* </ol>
*
* @param peerId
* the id of replication peer
*/
public void addPeer(String peerId) throws IOException {
boolean added = false;
try {
added = this.replicationPeers.addPeer(peerId);
} catch (ReplicationException e) {
throw new IOException(e);
}
if (added) {
addSource(peerId, false);
}} | 3.26 |
hbase_ReplicationSourceManager_drainSources_rdh | /**
* <p>
* This is used when we transit a sync replication peer to {@link SyncReplicationState#STANDBY}.
* </p>
* <p>
* When transiting to {@link SyncReplicationState#STANDBY}, we can remove all the pending wal
* files for a replication peer as we do not need to replicate them any more. And this is
* necessary, otherwise when we transit back to {@link SyncReplicationState#DOWNGRADE_ACTIVE}
* later, the stale data will be replicated again and cause inconsistency.
* </p>
* <p>
* See HBASE-20426 for more details.
* </p>
*
* @param peerId
* the id of the sync replication peer
*/
public void drainSources(String peerId) throws IOException, ReplicationException {
String terminateMessage = ("Sync replication peer " + peerId) + " is transiting to STANDBY. Will close the previous replication source and open a new one";ReplicationPeer peer = replicationPeers.getPeer(peerId);
assert peer.getPeerConfig().isSyncReplication();
ReplicationQueueId queueId = new ReplicationQueueId(server.getServerName(), peerId);
// TODO: use empty initial offsets for now, revisit when adding support for sync replication
ReplicationSourceInterface src = createSource(new
ReplicationQueueData(queueId, ImmutableMap.of()), peer);
// synchronized here to avoid race with postLogRoll where we add new log to source and also
// walsById.
ReplicationSourceInterface toRemove;
ReplicationQueueData queueData;
synchronized(latestPaths) {
// Here we make a copy of all the remaining wal files and then delete them from the
// replication queue storage after releasing the lock. It is not safe to just remove the old
// map from walsById since later we may fail to update the replication queue storage, and when
// we retry next time, we can not know the wal files that needs to be set to the replication
// queue storage
ImmutableMap.Builder<String, ReplicationGroupOffset>
builder = ImmutableMap.builder();
synchronized(walsById) {
walsById.get(queueId).forEach((group, wals) -> {
if (!wals.isEmpty()) {
builder.put(group, new ReplicationGroupOffset(wals.last(), -1));}
});
}
queueData = new ReplicationQueueData(queueId, builder.build());
src = createSource(queueData, peer);
toRemove = sources.put(peerId, src);
if (toRemove != null) {
LOG.info("Terminate replication source for " + toRemove.getPeerId());
toRemove.terminate(terminateMessage);
toRemove.getSourceMetrics().clear();
}
}
for (Map.Entry<String, ReplicationGroupOffset> entry : queueData.getOffsets().entrySet()) {
queueStorage.setOffset(queueId, entry.getKey(), entry.getValue(), Collections.emptyMap());
}
LOG.info("Startup replication source for " + src.getPeerId());src.startup();
synchronized(walsById) {
Map<String, NavigableSet<String>> wals = walsById.get(queueId);
queueData.getOffsets().forEach((group, offset) -> {
NavigableSet<String> v30 = wals.get(group);if (v30 != null) {
v30.headSet(offset.getWal(), true).clear();
}
});
}
// synchronized on oldsources to avoid race with NodeFailoverWorker. Since NodeFailoverWorker is
// a background task, we will delete the file from replication queue storage under the lock to
// simplify the logic.
synchronized(this.oldsources) {
for (Iterator<ReplicationSourceInterface> iter = oldsources.iterator(); iter.hasNext();) {
ReplicationSourceInterface oldSource = iter.next();
if (oldSource.getPeerId().equals(peerId)) {
ReplicationQueueId oldSourceQueueId = oldSource.getQueueId();
oldSource.terminate(terminateMessage);
oldSource.getSourceMetrics().clear();
queueStorage.removeQueue(oldSourceQueueId);
walsByIdRecoveredQueues.remove(oldSourceQueueId);
iter.remove();
}
}
}
} | 3.26 |
hbase_ReplicationSourceManager_getLogDir_rdh | /**
* Get the directory where wals are stored by their RSs
*
* @return the directory where wals are stored by their RSs
*/
public Path getLogDir() {
return this.logDir;
} | 3.26 |
hbase_ReplicationSourceManager_getSource_rdh | /**
* Get the normal source for a given peer
*
* @return the normal source for the give peer if it exists, otherwise null.
*/
public ReplicationSourceInterface getSource(String peerId) {
return this.sources.get(peerId);
} | 3.26 |
hbase_ReplicationSourceManager_getWALFilesToReplicate_rdh | // sorted from oldest to newest
private PriorityQueue<Path> getWALFilesToReplicate(ServerName sourceRS, boolean syncUp, Map<String, ReplicationGroupOffset> offsets) throws IOException {
List<Path> walFiles =
AbstractFSWALProvider.getArchivedWALFiles(conf, sourceRS, URLEncoder.encode(sourceRS.toString(), StandardCharsets.UTF_8.name()));
if (syncUp) {
// we also need to list WALs directory for ReplicationSyncUp
walFiles.addAll(AbstractFSWALProvider.getWALFiles(conf, sourceRS));
}
PriorityQueue<Path> walFilesPQ = new PriorityQueue<>(AbstractFSWALProvider.TIMESTAMP_COMPARATOR);
// sort the wal files and also filter out replicated files
for (Path file : walFiles) {
String walGroupId = AbstractFSWALProvider.getWALPrefixFromWALName(file.getName());
ReplicationGroupOffset groupOffset = offsets.get(walGroupId);
if (shouldReplicate(groupOffset, file.getName())) {
walFilesPQ.add(file);
} else {
LOG.debug("Skip enqueuing log {} because it is before the start offset {}", file.getName(), groupOffset);
}}
return walFilesPQ;
} | 3.26 |
hbase_ReplicationSourceManager_addSource_rdh | /**
* Add a normal source for the given peer on this region server. Meanwhile, add new replication
* queue to storage. For the newly added peer, we only need to enqueue the latest log of each wal
* group and do replication.
* <p/>
* We add a {@code init} parameter to indicate whether this is part of the initialization process.
* If so, we should skip adding the replication queues as this may introduce dead lock on region
* server start up and hbase:replication table online.
*
* @param peerId
* the id of the replication peer
* @param init
* whether this call is part of the initialization process
* @return the source that was created
*/
void addSource(String
peerId, boolean init) throws IOException {
ReplicationPeer peer = replicationPeers.getPeer(peerId);if (ReplicationUtils.LEGACY_REGION_REPLICATION_ENDPOINT_NAME.equals(peer.getPeerConfig().getReplicationEndpointImpl())) {
// we do not use this endpoint for region replication any more, see HBASE-26233
LOG.info("Legacy region replication peer found, skip adding: {}", peer.getPeerConfig());
return;
}
ReplicationQueueId queueId = new ReplicationQueueId(server.getServerName(), peerId);
ReplicationSourceInterface src = createSource(new ReplicationQueueData(queueId, ImmutableMap.of()), peer);
// synchronized on latestPaths to avoid missing the new log
synchronized(this.latestPaths) {
this.sources.put(peerId, src);
Map<String, NavigableSet<String>> walsByGroup = new HashMap<>();
this.walsById.put(queueId, walsByGroup);
// Add the latest wal to that source's queue
if (!latestPaths.isEmpty()) {
for (Map.Entry<String, Path> walPrefixAndPath : latestPaths.entrySet()) {
Path walPath = walPrefixAndPath.getValue();
NavigableSet<String> wals = new TreeSet<>();
wals.add(walPath.getName());
walsByGroup.put(walPrefixAndPath.getKey(), wals);
if (!init) {// Abort RS and throw exception to make add peer failed
// Ideally we'd better use the current file size as offset so we can skip replicating
// the data before adding replication peer, but the problem is that the file may not end
// at a valid entry's ending, and the current WAL Reader implementation can not deal
// with reading from the middle of a WAL entry. Can improve later.
abortAndThrowIOExceptionWhenFail(() -> this.queueStorage.setOffset(queueId, walPrefixAndPath.getKey(), new ReplicationGroupOffset(walPath.getName(), 0), Collections.emptyMap()));
}
src.enqueueLog(walPath);
LOG.trace("Enqueued {} to source {} during source creation.", walPath, src.getQueueId());
}
}
}
ReplicationPeerConfig peerConfig = peer.getPeerConfig();
if (peerConfig.isSyncReplication()) {
syncReplicationPeerMappingManager.add(peer.getId(), peerConfig);
}
src.startup();
} | 3.26 |
hbase_ReplicationSourceManager_removeSource_rdh | /**
* Clear the metrics and related replication queue of the specified old source
*
* @param src
* source to clear
*/
void removeSource(ReplicationSourceInterface src) {
LOG.info("Done with the queue " + src.getQueueId());
this.sources.remove(src.getPeerId());
// Delete queue from storage and memory
deleteQueue(src.getQueueId());
this.walsById.remove(src.getQueueId());
} | 3.26 |
hbase_ReplicationSourceManager_interruptOrAbortWhenFail_rdh | /**
* Refresh replication source will terminate the old source first, then the source thread will be
* interrupted. Need to handle it instead of abort the region server.
*/
private void interruptOrAbortWhenFail(ReplicationQueueOperation op) {
try {
op.exec();
} catch (ReplicationException e) {
if ((((e.getCause() != null) && (e.getCause() instanceof KeeperException.SystemErrorException)) && (e.getCause().getCause() != null)) && (e.getCause().getCause() instanceof InterruptedException)) {
// ReplicationRuntimeException(a RuntimeException) is thrown out here. The reason is
// that thread is interrupted deep down in the stack, it should pass the following
// processing logic and propagate to the most top layer which can handle this exception
// properly. In this specific case, the top layer is ReplicationSourceShipper#run().
throw new ReplicationRuntimeException("Thread is interrupted, the replication source may be terminated", e.getCause().getCause());
}
server.abort("Failed to operate on replication queue", e);
}
} | 3.26 |
hbase_ReplicationSourceManager_getOldLogDir_rdh | /**
* Get the directory where wals are archived
*
* @return the directory where wals are archived
*/
public Path getOldLogDir() {
return this.oldLogDir;
} | 3.26 |
hbase_ReplicationSourceManager_removeRecoveredSource_rdh | /**
* Clear the metrics and related replication queue of the specified old source
*
* @param src
* source to clear
*/
private boolean removeRecoveredSource(ReplicationSourceInterface src) {
if (!this.oldsources.remove(src)) { return false;}
LOG.info("Done with the recovered queue {}", src.getQueueId());
// Delete queue from storage and memory
deleteQueue(src.getQueueId());
this.walsByIdRecoveredQueues.remove(src.getQueueId());
return true;
} | 3.26 |
hbase_ReplicationSourceManager_shouldReplicate_rdh | /**
* Check whether we should replicate the given {@code wal}.
*
* @param wal
* the file name of the wal
* @return {@code true} means we should replicate the given {@code wal}, otherwise {@code false}.
*/
private boolean shouldReplicate(ReplicationGroupOffset offset, String wal) {
// skip replicating meta wals
if (AbstractFSWALProvider.isMetaFile(wal)) { return false;
}
return ReplicationOffsetUtil.shouldReplicate(offset, wal);
} | 3.26 |
hbase_ReplicationSourceManager_deleteQueue_rdh | /**
* Delete a complete queue of wals associated with a replication source
*
* @param queueId
* the id of replication queue to delete
*/
private void deleteQueue(ReplicationQueueId queueId) {
abortWhenFail(() -> this.queueStorage.removeQueue(queueId));} | 3.26 |
hbase_ReplicationSourceManager_removePeer_rdh | /**
* <ol>
* <li>Remove peer for replicationPeers</li>
* <li>Remove all the recovered sources for the specified id and related replication queues</li>
* <li>Remove the normal source and related replication queue</li>
* <li>Remove HFile Refs</li>
* </ol>
*
* @param peerId
* the id of the replication peer
*/
public void removePeer(String peerId) {
ReplicationPeer peer = replicationPeers.removePeer(peerId);
String terminateMessage = "Replication stream was removed by a user";
List<ReplicationSourceInterface> oldSourcesToDelete = new ArrayList<>();
// synchronized on oldsources to avoid adding recovered source for the to-be-removed peer
// see NodeFailoverWorker.run
synchronized(this.oldsources) {
// First close all the recovered sources for this peer
for (ReplicationSourceInterface src : oldsources) {
if (peerId.equals(src.getPeerId())) {
oldSourcesToDelete.add(src);
}
}
for (ReplicationSourceInterface src : oldSourcesToDelete) {
src.terminate(terminateMessage);
removeRecoveredSource(src);
}
}
LOG.info("Number of deleted recovered sources for {}: {}", peerId, oldSourcesToDelete.size());
// Now close the normal source for this peer
ReplicationSourceInterface srcToRemove = this.sources.get(peerId);
if
(srcToRemove != null) {
srcToRemove.terminate(terminateMessage);
removeSource(srcToRemove);
}
ReplicationPeerConfig peerConfig = peer.getPeerConfig();
if (peerConfig.isSyncReplication()) {
syncReplicationPeerMappingManager.remove(peerId, peerConfig);
}
} | 3.26 |
hbase_ReplicationSourceManager_acquireWALEntryBufferQuota_rdh | /**
* Acquire the buffer quota for {@link Entry} which is added to {@link WALEntryBatch}.
*
* @param entry
* the wal entry which is added to {@link WALEntryBatch} and should acquire buffer
* quota.
* @return true if we should clear buffer and push all
*/
boolean acquireWALEntryBufferQuota(WALEntryBatch walEntryBatch, Entry entry) {
long entrySize = walEntryBatch.incrementUsedBufferSize(entry);
return this.acquireBufferQuota(entrySize);
} | 3.26 |
hbase_ReplicationSourceManager_init_rdh | /**
* Adds a normal source per registered peer cluster.
*/
void init() throws IOException {
for (String id : this.replicationPeers.getAllPeerIds()) {
addSource(id, true);
}
} | 3.26 |
hbase_ReplicationSourceManager_createSource_rdh | /**
*
* @return a new 'classic' user-space replication source.
* @param queueId
* the id of the replication queue to associate the ReplicationSource with.
* @see #createCatalogReplicationSource(RegionInfo) for creating a ReplicationSource for meta.
*/
private ReplicationSourceInterface createSource(ReplicationQueueData queueData, ReplicationPeer replicationPeer) throws IOException {
ReplicationSourceInterface src = ReplicationSourceFactory.create(conf,
queueData.getId());
// Init the just created replication source. Pass the default walProvider's wal file length
// provider. Presumption is we replicate user-space Tables only. For hbase:meta region replica
// replication, see #createCatalogReplicationSource().
WALFileLengthProvider walFileLengthProvider = (this.walFactory.getWALProvider() != null) ? this.walFactory.getWALProvider().getWALFileLengthProvider() : p -> OptionalLong.empty();
src.init(conf, fs, this, queueStorage, replicationPeer, server, queueData, clusterId, walFileLengthProvider, new MetricsSource(queueData.getId().toString()));
return src;
} | 3.26 |
hbase_ReplicationSourceManager_releaseBufferQuota_rdh | /**
* To release the buffer quota which acquired by
* {@link ReplicationSourceManager#acquireBufferQuota}.
*/void releaseBufferQuota(long size) {
if (size < 0) {
throw new IllegalArgumentException("size should not less than 0");
}
addTotalBufferUsed(-size);
} | 3.26 |
hbase_ActivePolicyEnforcement_getPolicies_rdh | /**
* Returns an unmodifiable version of the active {@link SpaceViolationPolicyEnforcement}s.
*/
public Map<TableName, SpaceViolationPolicyEnforcement> getPolicies() {
return Collections.unmodifiableMap(activePolicies);
} | 3.26 |
hbase_ActivePolicyEnforcement_getLocallyCachedPolicies_rdh | /**
* Returns an unmodifiable version of the policy enforcements that were cached because they are
* not in violation of their quota.
*/
Map<TableName, SpaceViolationPolicyEnforcement> getLocallyCachedPolicies() {return Collections.unmodifiableMap(locallyCachedPolicies);
} | 3.26 |
hbase_ActivePolicyEnforcement_getPolicyEnforcement_rdh | /**
* Returns the proper {@link SpaceViolationPolicyEnforcement} implementation for the given table.
* If the given table does not have a violation policy enforced, a "no-op" policy will be returned
* which always allows an action.
*
* @param tableName
* The table to fetch the policy for.
* @return A non-null {@link SpaceViolationPolicyEnforcement} instance.
*/
public SpaceViolationPolicyEnforcement getPolicyEnforcement(TableName tableName) {
SpaceViolationPolicyEnforcement policy = activePolicies.get(Objects.requireNonNull(tableName));
if (policy == null) {
synchronized(locallyCachedPolicies) {
// When we don't have an policy enforcement for the table, there could be one of two cases:
// 1) The table has no quota defined
// 2) The table is not in violation of its quota
// In both of these cases, we want to make sure that access remains fast and we minimize
// object creation. We can accomplish this by locally caching policies instead of creating
// a new instance of the policy each time.
policy = locallyCachedPolicies.get(tableName);
// We have already created/cached the enforcement, use it again. `activePolicies` and
// `snapshots` are immutable, thus this policy is valid for the lifetime of `this`.
if (policy != null) {
return policy;
}
// Create a PolicyEnforcement for this table and snapshot. The snapshot may be null
// which is OK.
policy = factory.createWithoutViolation(rss, tableName, snapshots.get(tableName));
// Cache the policy we created
locallyCachedPolicies.put(tableName, policy);
}
}
return policy;
} | 3.26 |
hbase_HBaseFsckRepair_forceOfflineInZK_rdh | /**
* In 0.90, this forces an HRI offline by setting the RegionTransitionData in ZK to have
* HBCK_CODE_NAME as the server. This is a special case in the AssignmentManager that attempts an
* assign call by the master. This doesn't seem to work properly in the updated version of 0.92+'s
* hbck so we use assign to force the region into transition. This has the side-effect of
* requiring a RegionInfo that considers regionId (timestamp) in comparators that is addressed by
* HBASE-5563.
*/
private static void forceOfflineInZK(Admin admin, final
RegionInfo region) throws ZooKeeperConnectionException, KeeperException, IOException, InterruptedException {
admin.assign(region.getRegionName());
} | 3.26 |
hbase_HBaseFsckRepair_createHDFSRegionDir_rdh | /**
* Creates, flushes, and closes a new region.
*/
public static HRegion
createHDFSRegionDir(Configuration conf, RegionInfo hri, TableDescriptor htd) throws IOException {// Create HRegion
Path root = CommonFSUtils.getRootDir(conf);
HRegion region = HRegion.createHRegion(hri, root, conf, htd, null);
// Close the new region to flush to disk. Close log file too.
region.close();
return region;
} | 3.26 |
hbase_HBaseFsckRepair_removeParentInMeta_rdh | /* Remove parent */
public static void removeParentInMeta(Configuration conf, RegionInfo hri) throws IOException {
throw new UnsupportedOperationException("HBCK1 is read-only now, use HBCK2 instead");
} | 3.26 |
hbase_HBaseFsckRepair_fixUnassigned_rdh | /**
* Fix unassigned by creating/transition the unassigned ZK node for this region to OFFLINE state
* with a special flag to tell the master that this is a forced operation by HBCK. This assumes
* that info is in META.
*/
public static void fixUnassigned(Admin admin, RegionInfo region) throws IOException, KeeperException, InterruptedException {
// Force ZK node to OFFLINE so master assigns
forceOfflineInZK(admin, region);
} | 3.26 |
hbase_HBaseFsckRepair_waitUntilAssigned_rdh | /* Should we check all assignments or just not in RIT? */
public static void waitUntilAssigned(Admin admin, RegionInfo region) throws IOException, InterruptedException {
long timeout = admin.getConfiguration().getLong("hbase.hbck.assign.timeout", 120000);long expiration =
timeout + EnvironmentEdgeManager.currentTime();
while (EnvironmentEdgeManager.currentTime() < expiration) {try {
boolean inTransition = false;for (RegionState rs : admin.getClusterMetrics(EnumSet.of(Option.REGIONS_IN_TRANSITION)).getRegionStatesInTransition()) {
if (RegionInfo.COMPARATOR.compare(rs.getRegion(), region) == 0) {
inTransition = true;
break;
}
}
if (!inTransition) {// yay! no longer RIT
return;
}
// still in rit
LOG.info(("Region still in transition, waiting for " + "it to become assigned: ") + region);
} catch (IOException e) {
LOG.warn("Exception when waiting for region to become assigned," + " retrying", e);
}
Thread.sleep(1000);
}
throw new IOException((((("Region " + region) + " failed to move out of ") + "transition within timeout ")
+ timeout) + "ms");
} | 3.26 |
hbase_HBaseFsckRepair_fixMetaHoleOnlineAndAddReplicas_rdh | /**
* Puts the specified RegionInfo into META with replica related columns
*/
public static void fixMetaHoleOnlineAndAddReplicas(Configuration conf, RegionInfo hri, Collection<ServerName> servers, int numReplicas) throws IOException {Connection conn = ConnectionFactory.createConnection(conf);
Table meta = conn.getTable(TableName.META_TABLE_NAME);
Put put = MetaTableAccessor.makePutFromRegionInfo(hri);
if (numReplicas > 1) {
Random rand = ThreadLocalRandom.current();
ServerName[] serversArr
= servers.toArray(new ServerName[servers.size()]);
for (int i = 1; i < numReplicas; i++) {
ServerName sn = serversArr[rand.nextInt(serversArr.length)];
// the column added here is just to make sure the master is able to
// see the additional replicas when it is asked to assign. The
// final value of these columns will be different and will be updated
// by the actual regionservers that start hosting the respective replicas
MetaTableAccessor.addLocation(put, sn, sn.getStartcode(), i);
}
}
meta.put(put);
meta.close();
conn.close();
} | 3.26 |
hbase_WALUtil_writeFlushMarker_rdh | /**
* Write a flush marker indicating a start / abort or a complete of a region flush
* <p/>
* This write is for internal use only. Not for external client consumption.
*/
public static WALKeyImpl writeFlushMarker(WAL wal, NavigableMap<byte[], Integer> replicationScope, RegionInfo hri, final FlushDescriptor f, boolean sync, MultiVersionConcurrencyControl mvcc, RegionReplicationSink sink) throws IOException {
WALKeyImpl walKey = doFullMarkerAppendTransaction(wal, replicationScope, hri, WALEdit.createFlushWALEdit(hri, f), mvcc, null, sync, sink);
if (LOG.isTraceEnabled()) {
LOG.trace("Appended flush marker " + TextFormat.shortDebugString(f));
}
return walKey;
} | 3.26 |
hbase_WALUtil_writeBulkLoadMarkerAndSync_rdh | /**
* Write a log marker that a bulk load has succeeded and is about to be committed. This write is
* for internal use only. Not for external client consumption.
*
* @param wal
* The log to write into.
* @param replicationScope
* The replication scope of the families in the HRegion
* @param hri
* A description of the region in the table that we are bulk loading into.
* @param desc
* A protocol buffers based description of the client's bulk loading
* request
* @return walKey with sequenceid filled out for this bulk load marker
* @throws IOException
* We will throw an IOException if we can not append to the HLog.
*/
public static WALKeyImpl writeBulkLoadMarkerAndSync(final WAL wal, final NavigableMap<byte[], Integer> replicationScope, final
RegionInfo hri, final WALProtos.BulkLoadDescriptor desc, final MultiVersionConcurrencyControl mvcc, final RegionReplicationSink sink) throws IOException {
WALKeyImpl walKey = writeMarker(wal, replicationScope, hri, WALEdit.createBulkLoadEvent(hri, desc), mvcc, null, sink);
if (LOG.isTraceEnabled()) {
LOG.trace("Appended Bulk Load marker " + TextFormat.shortDebugString(desc));
}
return walKey;
} | 3.26 |
hbase_WALUtil_getWALBlockSize_rdh | /**
* Public because of FSHLog. Should be package-private
*
* @param isRecoverEdits
* the created writer is for recovered edits or WAL. For recovered edits, it
* is true and for WAL it is false.
*/
public static long getWALBlockSize(Configuration conf, FileSystem fs, Path dir, boolean isRecoverEdits) throws IOException {
long defaultBlockSize = CommonFSUtils.getDefaultBlockSize(fs, dir) * 2;
if (isRecoverEdits) {
return conf.getLong("hbase.regionserver.recoverededits.blocksize", defaultBlockSize);
}
return conf.getLong(WAL_BLOCK_SIZE, defaultBlockSize);
} | 3.26 |
hbase_WALUtil_writeRegionEventMarker_rdh | /**
* Write a region open marker indicating that the region is opened. This write is for internal use
* only. Not for external client consumption.
*/
public static WALKeyImpl writeRegionEventMarker(WAL wal, NavigableMap<byte[], Integer> replicationScope, RegionInfo hri, RegionEventDescriptor r, MultiVersionConcurrencyControl mvcc, RegionReplicationSink sink) throws
IOException {
WALKeyImpl walKey = writeMarker(wal, replicationScope, hri, WALEdit.createRegionEventWALEdit(hri, r), mvcc, null, sink);if (LOG.isTraceEnabled()) {
LOG.trace("Appended region event marker " + TextFormat.shortDebugString(r));
}
return walKey;
} | 3.26 |
hbase_WALUtil_doFullMarkerAppendTransaction_rdh | /**
* A 'full' WAL transaction involves starting an mvcc transaction followed by an append, an
* optional sync, and then a call to complete the mvcc transaction. This method does it all. Good
* for case of adding a single edit or marker to the WAL.
* <p/>
* This write is for internal use only. Not for external client consumption.
*
* @return WALKeyImpl that was added to the WAL.
*/
private static WALKeyImpl doFullMarkerAppendTransaction(final WAL wal, final NavigableMap<byte[], Integer> replicationScope, final RegionInfo hri, final WALEdit edit, final MultiVersionConcurrencyControl mvcc, final Map<String, byte[]> extendedAttributes, final boolean sync, final RegionReplicationSink sink) throws IOException {
// TODO: Pass in current time to use?
WALKeyImpl walKey = createWALKey(hri, mvcc, replicationScope, extendedAttributes);
long trx = MultiVersionConcurrencyControl.NONE;
try {
trx = wal.appendMarker(hri, walKey, edit);
WriteEntry writeEntry = walKey.getWriteEntry();
if (sink != null) {
writeEntry.attachCompletionAction(() -> sink.add(walKey, edit, RpcServer.getCurrentServerCallWithCellScanner().orElse(null)));
}
if (sync) {
wal.sync(trx);
}
// Call complete only here because these are markers only. They are not for clients to read.
mvcc.complete(writeEntry);
} catch (IOException ioe) {
if (walKey.getWriteEntry() != null) {
mvcc.complete(walKey.getWriteEntry());
}
/**
* Here we do not abort the RegionServer for {@link WALSyncTimeoutIOException} as
* {@link HRegion#doWALAppend} does,because WAL Marker just records the internal state and
* seems it is no need to always abort the RegionServer when {@link WAL#sync} timeout,it is
* the internal state transition that determines whether RegionServer is aborted or not.
*/
throw ioe;
}
return walKey;
} | 3.26 |
hbase_WALUtil_m0_rdh | /**
* Write the marker that a compaction has succeeded and is about to be committed. This provides
* info to the HMaster to allow it to recover the compaction if this regionserver dies in the
* middle. It also prevents the compaction from finishing if this regionserver has already lost
* its lease on the log.
* <p/>
* This write is for internal use only. Not for external client consumption.
*
* @param mvcc
* Used by WAL to get sequence Id for the waledit.
*/
public static WALKeyImpl m0(WAL wal, NavigableMap<byte[], Integer> replicationScope, RegionInfo hri, final CompactionDescriptor c, MultiVersionConcurrencyControl mvcc, RegionReplicationSink sink) throws IOException {
WALKeyImpl walKey = writeMarker(wal, replicationScope, hri, WALEdit.createCompaction(hri, c), mvcc, null, sink);
if (LOG.isTraceEnabled()) {
LOG.trace("Appended compaction marker " + TextFormat.shortDebugString(c));
}
return walKey;
} | 3.26 |
hbase_ScanResultConsumerBase_onScanMetricsCreated_rdh | /**
* If {@code scan.isScanMetricsEnabled()} returns true, then this method will be called prior to
* all other methods in this interface to give you the {@link ScanMetrics} instance for this scan
* operation. The {@link ScanMetrics} instance will be updated on-the-fly during the scan, you can
* store it somewhere to get the metrics at any time if you want.
*/
default void onScanMetricsCreated(ScanMetrics scanMetrics) {
} | 3.26 |
hbase_HbckReport_getRegionInfoMap_rdh | /**
* This map contains the state of all hbck items. It maps from encoded region name to
* HbckRegionInfo structure. The information contained in HbckRegionInfo is used to detect and
* correct consistency (hdfs/meta/deployment) problems.
*/
public Map<String, HbckRegionInfo> getRegionInfoMap() {
return regionInfoMap;
} | 3.26 |
hbase_HbckReport_getCheckingEndTimestamp_rdh | /**
* Used for web ui to show when the HBCK checking report generated.
*/
public Instant getCheckingEndTimestamp()
{
return checkingEndTimestamp;
} | 3.26 |
hbase_HbckReport_getOrphanRegionsOnRS_rdh | /**
* The regions only opened on RegionServers, but no region info in meta.
*/
public Map<String, ServerName> getOrphanRegionsOnRS() {
return orphanRegionsOnRS;
} | 3.26 |
hbase_HbckReport_getCheckingStartTimestamp_rdh | /**
* Used for web ui to show when the HBCK checking started.
*/
public Instant getCheckingStartTimestamp() {
return checkingStartTimestamp;
} | 3.26 |
hbase_HbckReport_getInconsistentRegions_rdh | /**
* The inconsistent regions. There are three case: case 1. Master thought this region opened, but
* no regionserver reported it. case 2. Master thought this region opened on Server1, but
* regionserver reported Server2 case 3. More than one regionservers reported opened this region
*/
public Map<String, Pair<ServerName, List<ServerName>>> getInconsistentRegions() {
return f0;
} | 3.26 |
hbase_PermissionStorage_getUserPermissions_rdh | /**
* Returns the currently granted permissions for a given table/namespace with associated
* permissions based on the specified column family, column qualifier and user name.
*
* @param conf
* the configuration
* @param entryName
* Table name or the namespace
* @param cf
* Column family
* @param cq
* Column qualifier
* @param user
* User name to be filtered from permission as requested
* @param hasFilterUser
* true if filter user is provided, otherwise false.
* @return List of UserPermissions
* @throws IOException
* on failure
*/
public static List<UserPermission> getUserPermissions(Configuration
conf, byte[] entryName, byte[] cf, byte[] cq, String user, boolean hasFilterUser) throws IOException {
ListMultimap<String, UserPermission> allPerms = getPermissions(conf, entryName, null, cf, cq, user, hasFilterUser);
List<UserPermission> perms = new ArrayList<>();
for (Map.Entry<String, UserPermission> entry : allPerms.entries()) {
perms.add(entry.getValue());
}
return perms;
} | 3.26 |
hbase_PermissionStorage_removeUserPermission_rdh | /**
* Removes a previously granted permission from the stored access control lists. The
* {@link TablePermission} being removed must exactly match what is stored -- no wildcard matching
* is attempted. Ie, if user "bob" has been granted "READ" access to the "data" table, but only to
* column family plus qualifier "info:colA", then trying to call this method with only user "bob"
* and the table name "data" (but without specifying the column qualifier "info:colA") will have
* no effect.
*
* @param conf
* the configuration
* @param userPerm
* the details of the permission to be revoked
* @param t
* acl table
* @throws IOException
* if there is an error accessing the metadata table
*/
public static void removeUserPermission(Configuration conf, UserPermission userPerm, Table t) throws IOException {
if ((null == userPerm.getPermission().getActions()) || (userPerm.getPermission().getActions().length == 0)) {
removePermissionRecord(conf, userPerm, t);
} else {
// Get all the global user permissions from the acl table
List<UserPermission> permsList = getUserPermissions(conf, m0(userPerm.getPermission()), null, null, null,
false);
List<Permission.Action> remainingActions = new ArrayList<>();
List<Permission.Action> dropActions = Arrays.asList(userPerm.getPermission().getActions());
for (UserPermission perm : permsList) {
// Find the user and remove only the requested permissions
if (perm.getUser().equals(userPerm.getUser())) {
for (Permission.Action oldAction : perm.getPermission().getActions()) {
if (!dropActions.contains(oldAction)) {
remainingActions.add(oldAction);
}
}
if (!remainingActions.isEmpty()) {
perm.getPermission().setActions(remainingActions.toArray(new Permission.Action[remainingActions.size()]));addUserPermission(conf, perm, t);
} else {
removePermissionRecord(conf, userPerm, t);
}
break;
}
}
}if (f0.isDebugEnabled()) {
f0.debug("Removed permission " + userPerm.toString());
}
} | 3.26 |
hbase_PermissionStorage_addUserPermission_rdh | /**
* Stores a new user permission grant in the access control lists table.
*
* @param conf
* the configuration
* @param userPerm
* the details of the permission to be granted
* @param t
* acl table instance. It is closed upon method return.
* @throws IOException
* in the case of an error accessing the metadata table
*/
public static void addUserPermission(Configuration conf, UserPermission userPerm, Table t, boolean mergeExistingPermissions) throws IOException {
Permission permission = userPerm.getPermission();
Permission[] v1 = permission.getActions();
byte[] rowKey = m0(permission);
Put p = new Put(rowKey);
byte[] key = userPermissionKey(userPerm);
if ((v1 == null) || (v1.length == 0)) {String msg = ("No actions associated with user '" + userPerm.getUser()) + "'";
f0.warn(msg);
throw new IOException(msg);
}
Set<Permission.Action> actionSet = new TreeSet<Permission.Action>();
if (mergeExistingPermissions) {
List<UserPermission> perms = getUserPermissions(conf, rowKey, null, null, null, false);
UserPermission
currentPerm = null;
for (UserPermission perm : perms) {
if (userPerm.equalsExceptActions(perm)) {
currentPerm = perm;
break;
}
}
if ((currentPerm != null) && (currentPerm.getPermission().getActions() != null)) {
actionSet.addAll(Arrays.asList(currentPerm.getPermission().getActions()));
}
}
// merge current action with new action.
actionSet.addAll(Arrays.asList(v1));
// serialize to byte array.
byte[] value = new byte[actionSet.size()];
int index = 0;
for (Permission.Action action : actionSet) {
value[index++] = action.code(); }
p.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(p.getRow()).setFamily(ACL_LIST_FAMILY).setQualifier(key).setTimestamp(p.getTimestamp()).setType(Type.Put).setValue(value).build());
if (f0.isDebugEnabled()) {
f0.debug((((("Writing permission with rowKey " + Bytes.toString(rowKey)) + " ") + Bytes.toString(key)) + ": ") + Bytes.toStringBinary(value));
}try {
t.put(p);
} finally {
t.close();
}
} | 3.26 |
hbase_PermissionStorage_parsePermissions_rdh | /**
* Parse and filter permission based on the specified column family, column qualifier and user
* name.
*/
private static ListMultimap<String, UserPermission> parsePermissions(byte[] entryName, Result result, byte[] cf, byte[] cq, String user, boolean hasFilterUser) {
ListMultimap<String, UserPermission> perms = ArrayListMultimap.create();
if ((result != null) && (result.size() > 0)) {
for (Cell kv : result.rawCells()) {
Pair<String, Permission> permissionsOfUserOnTable = parsePermissionRecord(entryName, kv, cf, cq, hasFilterUser, user);
if (permissionsOfUserOnTable != null) {
String username =
permissionsOfUserOnTable.getFirst();
Permission permission = permissionsOfUserOnTable.getSecond();
perms.put(username, new
UserPermission(username, permission));
}
}
}
return perms;
} | 3.26 |
hbase_PermissionStorage_removeTablePermissions_rdh | /**
* Remove specified table column from the acl table.
*/
static void removeTablePermissions(Configuration conf, TableName tableName, byte[]
column, Table t) throws IOException {
if (f0.isDebugEnabled()) {
f0.debug((("Removing permissions of removed column " + Bytes.toString(column)) + " from table ") + tableName);
}
removeTablePermissions(tableName, column, t, true);
} | 3.26 |
hbase_PermissionStorage_userPermissionKey_rdh | /**
* Build qualifier key from user permission: username username,family username,family,qualifier
*/static byte[] userPermissionKey(UserPermission permission) {
byte[] key = Bytes.toBytes(permission.getUser());
byte[] qualifier = null;
byte[] family = null;
if (permission.getPermission().getAccessScope() == Scope.TABLE) {
TablePermission tablePermission = ((TablePermission) (permission.getPermission()));
family = tablePermission.getFamily();
qualifier = tablePermission.getQualifier();
}
if ((family != null) && (family.length > 0)) {
key = Bytes.add(key, Bytes.add(new byte[]{ ACL_KEY_DELIMITER }, family));if ((qualifier != null) && (qualifier.length > 0)) {
key = Bytes.add(key, Bytes.add(new byte[]{ ACL_KEY_DELIMITER }, qualifier));
}
}
return key;
} | 3.26 |
hbase_PermissionStorage_getUserTablePermissions_rdh | /**
* Returns the currently granted permissions for a given table as the specified user plus
* associated permissions.
*/
public static List<UserPermission> getUserTablePermissions(Configuration conf, TableName tableName, byte[] cf, byte[] cq, String userName, boolean hasFilterUser) throws IOException {
return getUserPermissions(conf, tableName == null ? null : tableName.getName(), cf, cq, userName, hasFilterUser);
} | 3.26 |
hbase_PermissionStorage_getPermissions_rdh | /**
* Reads user permission assignments stored in the <code>l:</code> column family of the first
* table row in <code>_acl_</code>.
* <p>
* See {@link PermissionStorage class documentation} for the key structure used for storage.
* </p>
*/
static ListMultimap<String, UserPermission> getPermissions(Configuration conf, byte[] entryName, Table t, byte[] cf, byte[] cq, String user, boolean hasFilterUser) throws IOException {
if (entryName == null) {
entryName = ACL_GLOBAL_NAME;
}
// for normal user tables, we just read the table row from _acl_
ListMultimap<String, UserPermission> perms = ArrayListMultimap.create();
Get get = new Get(entryName);
get.addFamily(ACL_LIST_FAMILY);
Result row = null;
if (t == null) {
try (Connection connection = ConnectionFactory.createConnection(conf)) {
try (Table table = connection.getTable(ACL_TABLE_NAME)) {
row = table.get(get);
}
}
} else {
row = t.get(get);
}
if (!row.isEmpty()) {
perms = parsePermissions(entryName, row, cf, cq, user, hasFilterUser);
} else {f0.info((("No permissions found in " + ACL_TABLE_NAME)
+ " for acl entry ") + Bytes.toString(entryName));
}
return perms;
} | 3.26 |
hbase_PermissionStorage_removeNamespacePermissions_rdh | /**
* Remove specified namespace from the acl table.
*/
static void removeNamespacePermissions(Configuration conf, String namespace, Table t) throws IOException {
Delete d = new Delete(Bytes.toBytes(toNamespaceEntry(namespace)));
d.addFamily(ACL_LIST_FAMILY);
if (f0.isDebugEnabled()) {
f0.debug("Removing permissions of removed namespace " + namespace);
}try {
t.delete(d);
} finally {
t.close();}
} | 3.26 |
hbase_PermissionStorage_writePermissionsAsBytes_rdh | /**
* Writes a set of permissions as {@link org.apache.hadoop.io.Writable} instances and returns the
* resulting byte array. Writes a set of permission [user: table permission]
*/
public static byte[]
writePermissionsAsBytes(ListMultimap<String, UserPermission> perms, Configuration conf) {
return ProtobufUtil.prependPBMagic(AccessControlUtil.toUserTablePermissions(perms).toByteArray());
} | 3.26 |
hbase_PermissionStorage_isAclTable_rdh | /**
* Returns {@code true} if the given table is {@code _acl_} metadata table.
*/
static boolean isAclTable(TableDescriptor desc) {
return ACL_TABLE_NAME.equals(desc.getTableName());
} | 3.26 |
hbase_PermissionStorage_loadAll_rdh | /**
* Load all permissions from the region server holding {@code _acl_}, primarily intended for
* testing purposes.
*/
static Map<byte[], ListMultimap<String, UserPermission>> loadAll(Configuration conf) throws IOException {
Map<byte[], ListMultimap<String, UserPermission>> allPerms = new TreeMap<>(Bytes.BYTES_RAWCOMPARATOR);
// do a full scan of _acl_, filtering on only first table region rows
Scan scan = new Scan();
scan.addFamily(ACL_LIST_FAMILY);
ResultScanner scanner = null;
// TODO: Pass in a Connection rather than create one each time.
try (Connection connection = ConnectionFactory.createConnection(conf)) {
try (Table table =
connection.getTable(ACL_TABLE_NAME)) {
scanner = table.getScanner(scan);
try
{
for (Result row : scanner) {
ListMultimap<String, UserPermission> resultPerms = parsePermissions(row.getRow(), row, null, null, null, false);
allPerms.put(row.getRow(), resultPerms);
}} finally
{
if (scanner != null) {
scanner.close();
}
}
}
}
return allPerms;
} | 3.26 |
hbase_PermissionStorage_isAclRegion_rdh | /**
* Returns {@code true} if the given region is part of the {@code _acl_} metadata table.
*/
static boolean isAclRegion(Region region) {
return ACL_TABLE_NAME.equals(region.getTableDescriptor().getTableName());
} | 3.26 |
hbase_InclusiveCombinedBlockCache_cacheBlock_rdh | /**
*
* @param cacheKey
* The block's cache key.
* @param buf
* The block contents wrapped in a ByteBuffer.
* @param inMemory
* Whether block should be treated as in-memory. This parameter is only useful for
* the L1 lru cache.
*/ @Override
public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory) {
// This is the inclusive part of the combined block cache.
// Every block is placed into both block caches.
l1Cache.cacheBlock(cacheKey, buf, inMemory);
// This assumes that insertion into the L2 block cache is either async or very fast.
l2Cache.cacheBlock(cacheKey, buf, inMemory);
} | 3.26 |
hbase_DynamicMetricsRegistry_info_rdh | /**
* Returns the info object of the metrics registry
*/
public MetricsInfo info() {
return metricsInfo;
} | 3.26 |
hbase_DynamicMetricsRegistry_getGaugeInt_rdh | /**
* Get a MetricMutableGaugeInt from the storage. If it is not there atomically put it.
*
* @param gaugeName
* name of the gauge to create or get.
* @param potentialStartingValue
* value of the new gauge if we have to create it.
*/
public MutableGaugeInt getGaugeInt(String gaugeName, int potentialStartingValue) {
// Try and get the guage.
MutableMetric metric = metricsMap.get(gaugeName);
// If it's not there then try and put a new one in the storage.
if (metric == null) {
// Create the potential new gauge.
MutableGaugeInt newGauge = new MutableGaugeInt(new
MetricsInfoImpl(gaugeName, ""), potentialStartingValue);
// Try and put the gauge in. This is atomic.
metric = metricsMap.putIfAbsent(gaugeName, newGauge);
// If the value we get back is null then the put was successful and we will return that.
// otherwise gaugeInt should contain the thing that was in before the put could be completed.
if (metric == null) {
return newGauge;
}
}
if (!(metric instanceof MutableGaugeInt)) {
throw new MetricsException(("Metric already exists in registry for metric name: " + gaugeName) + " and not of type MetricMutableGaugeInr");
}
return ((MutableGaugeInt) (metric));
} | 3.26 |
hbase_DynamicMetricsRegistry_newGauge_rdh | /**
* Create a mutable long integer gauge
*
* @param info
* metadata of the metric
* @param iVal
* initial value
* @return a new gauge object
*/
public MutableGaugeLong
newGauge(MetricsInfo info, long iVal) {
MutableGaugeLong ret =
new MutableGaugeLong(info, iVal);
return addNewMetricIfAbsent(info.name(), ret, MutableGaugeLong.class);
} | 3.26 |
hbase_DynamicMetricsRegistry_newRate_rdh | /**
* Create a mutable rate metric (for throughput measurement)
*
* @param name
* of the metric
* @param desc
* description
* @param extended
* produce extended stat (stdev/min/max etc.) if true
* @return a new mutable rate metric object
*/
public MutableRate newRate(String name, String desc, boolean extended) {
return newRate(name, desc, extended, true);
} | 3.26 |
hbase_DynamicMetricsRegistry_newSizeHistogram_rdh | /**
* Create a new histogram with size range counts.
*
* @param name
* The name of the histogram
* @param desc
* The description of the data in the histogram.
* @return A new MutableSizeHistogram
*/
public MutableSizeHistogram newSizeHistogram(String name, String desc) {
MutableSizeHistogram histo = new MutableSizeHistogram(name, desc);
return addNewMetricIfAbsent(name, histo, MutableSizeHistogram.class);
} | 3.26 |
hbase_DynamicMetricsRegistry_getTag_rdh | /**
* Get a tag by name
*
* @param name
* of the tag
* @return the tag object
*/
public MetricsTag getTag(String name) {
return tagsMap.get(name);
} | 3.26 |
hbase_DynamicMetricsRegistry_setContext_rdh | /**
* Set the metrics context tag
*
* @param name
* of the context
* @return the registry itself as a convenience
*/
public DynamicMetricsRegistry setContext(String name) {
return tag(MsInfo.Context, name, true);
} | 3.26 |
hbase_DynamicMetricsRegistry_getCounter_rdh | /**
* Get a MetricMutableCounterLong from the storage. If it is not there atomically put it.
*
* @param counterName
* Name of the counter to get
* @param potentialStartingValue
* starting value if we have to create a new counter
*/ public MutableFastCounter getCounter(String counterName, long potentialStartingValue) {
// See getGauge for description on how this works.
MutableMetric counter = metricsMap.get(counterName);
if (counter == null) {
MutableFastCounter newCounter = new MutableFastCounter(new MetricsInfoImpl(counterName, ""), potentialStartingValue);
counter = metricsMap.putIfAbsent(counterName, newCounter);
if (counter == null) {
return newCounter;
}
}
if (!(counter instanceof MutableCounter)) {
throw new MetricsException(("Metric already exists in registry for metric name: " + counterName) + " and not of type MutableCounter");
}
return ((MutableFastCounter) (counter));
} | 3.26 |
hbase_DynamicMetricsRegistry_newHistogram_rdh | /**
* Create a new histogram.
*
* @param name
* The name of the histogram
* @param desc
* The description of the data in the histogram.
* @return A new MutableHistogram
*/
public MutableHistogram newHistogram(String name, String desc) {
MutableHistogram histo = new MutableHistogram(name,
desc);
return addNewMetricIfAbsent(name, histo, MutableHistogram.class);} | 3.26 |
hbase_DynamicMetricsRegistry_get_rdh | /**
* Get a metric by name
*
* @param name
* of the metric
* @return the metric object
*/
public MutableMetric get(String name) {
return metricsMap.get(name);
} | 3.26 |
hbase_DynamicMetricsRegistry_snapshot_rdh | /**
* Sample all the mutable metrics and put the snapshot in the builder
*
* @param builder
* to contain the metrics snapshot
* @param all
* get all the metrics even if the values are not changed.
*/
public void snapshot(MetricsRecordBuilder builder, boolean all)
{
for (MetricsTag tag : tags()) {builder.add(tag);
}
for (MutableMetric metric : metrics()) {
metric.snapshot(builder, all);}
} | 3.26 |
hbase_DynamicMetricsRegistry_newTimeHistogram_rdh | /**
* Create a new histogram with time range counts.
*
* @param name
* The name of the histogram
* @param desc
* The description of the data in the histogram.
* @return A new MutableTimeHistogram
*/
public MutableTimeHistogram
newTimeHistogram(String name, String desc) {
MutableTimeHistogram histo = new MutableTimeHistogram(name, desc);
return addNewMetricIfAbsent(name, histo, MutableTimeHistogram.class);
} | 3.26 |
hbase_DynamicMetricsRegistry_newStat_rdh | /**
* Create a mutable metric with stats
*
* @param name
* of the metric
* @param desc
* metric description
* @param sampleName
* of the metric (e.g., "Ops")
* @param valueName
* of the metric (e.g., "Time" or "Latency")
* @return a new mutable metric object
*/
public MutableStat newStat(String name, String desc, String sampleName, String valueName) {
return newStat(name, desc, sampleName, valueName, false);
} | 3.26 |
hbase_DynamicMetricsRegistry_add_rdh | /**
* Add sample to a stat metric by name.
*
* @param name
* of the metric
* @param value
* of the snapshot to add
*/
public void add(String name, long value) {
MutableMetric m = metricsMap.get(name);
if (m != null) {
if (m instanceof MutableStat) {
((MutableStat) (m)).add(value);
} else {
throw new MetricsException("Unsupported add(value) for metric " + name);
}
} else {
metricsMap.put(name, newRate(name));// default is a rate metric
add(name, value);
}
} | 3.26 |
hbase_DynamicMetricsRegistry_getGauge_rdh | /**
* Get a MetricMutableGaugeLong from the storage. If it is not there atomically put it.
*
* @param gaugeName
* name of the gauge to create or get.
* @param potentialStartingValue
* value of the new gauge if we have to create it.
*/
public MutableGaugeLong
getGauge(String gaugeName, long potentialStartingValue) {
// Try and get the guage.
MutableMetric metric = metricsMap.get(gaugeName);
// If it's not there then try and put a new one in the storage.
if (metric == null) {
// Create the potential new gauge.
MutableGaugeLong newGauge = new MutableGaugeLong(new MetricsInfoImpl(gaugeName, ""), potentialStartingValue);// Try and put the gauge in. This is atomic.
metric = metricsMap.putIfAbsent(gaugeName, newGauge);
// If the value we get back is null then the put was successful and we will return that.
// otherwise gaugeLong should contain the thing that was in before the put could be completed.
if (metric == null) {
return newGauge;
}
}
if (!(metric instanceof MutableGaugeLong)) {
throw new MetricsException(("Metric already exists in registry for metric name: " + gaugeName) + " and not of type MetricMutableGaugeLong");
}
return ((MutableGaugeLong) (metric));
} | 3.26 |
hbase_DynamicMetricsRegistry_newCounter_rdh | /**
* Create a mutable long integer counter
*
* @param info
* metadata of the metric
* @param iVal
* initial value
* @return a new counter object
*/
public MutableFastCounter newCounter(MetricsInfo info, long iVal) {
MutableFastCounter ret = new MutableFastCounter(info, iVal);
return addNewMetricIfAbsent(info.name(), ret, MutableFastCounter.class);
} | 3.26 |
hbase_DynamicMetricsRegistry_tag_rdh | /**
* Add a tag to the metrics
*
* @param info
* metadata of the tag
* @param value
* of the tag
* @param override
* existing tag if true
* @return the registry (for keep adding tags etc.)
*/
public DynamicMetricsRegistry tag(MetricsInfo info, String value, boolean override) {
MetricsTag tag = Interns.tag(info, value);
if (!override) {
MetricsTag existing = tagsMap.putIfAbsent(info.name(), tag);
if (existing != null) {
throw new MetricsException(("Tag " + info.name()) + " already exists!");
}
return this;
}
tagsMap.put(info.name(), tag);
return this;
} | 3.26 |
hbase_SingleColumnValueFilter_getQualifier_rdh | /**
* Returns the qualifier
*/
public byte[] getQualifier() {
return columnQualifier;
} | 3.26 |
hbase_SingleColumnValueFilter_setFilterIfMissing_rdh | /**
* Set whether entire row should be filtered if column is not found.
* <p>
* If true, the entire row will be skipped if the column is not found.
* <p>
* If false, the row will pass if the column is not found. This is default.
*
* @param filterIfMissing
* flag
*/
public void setFilterIfMissing(boolean
filterIfMissing) {
this.f0 = filterIfMissing;
} | 3.26 |
hbase_SingleColumnValueFilter_getLatestVersionOnly_rdh | /**
* Get whether only the latest version of the column value should be compared. If true, the row
* will be returned if only the latest version of the column value matches. If false, the row will
* be returned if any version of the column value matches. The default is true.
*
* @return return value
*/
public boolean getLatestVersionOnly()
{
return latestVersionOnly;
} | 3.26 |
hbase_SingleColumnValueFilter_getComparator_rdh | /**
* Returns the comparator
*/
public ByteArrayComparable getComparator() {
return comparator;
} | 3.26 |
hbase_SingleColumnValueFilter_setLatestVersionOnly_rdh | /**
* Set whether only the latest version of the column value should be compared. If true, the row
* will be returned if only the latest version of the column value matches. If false, the row will
* be returned if any version of the column value matches. The default is true.
*
* @param latestVersionOnly
* flag
*/
public void setLatestVersionOnly(boolean latestVersionOnly) {
this.latestVersionOnly = latestVersionOnly;
} | 3.26 |
hbase_SingleColumnValueFilter_parseFrom_rdh | /**
* Parse a serialized representation of {@link SingleColumnValueFilter}
*
* @param pbBytes
* A pb serialized {@link SingleColumnValueFilter} instance
* @return An instance of {@link SingleColumnValueFilter} made from <code>bytes</code>
* @throws DeserializationException
* if an error occurred
* @see #toByteArray
*/
public static SingleColumnValueFilter parseFrom(final byte[] pbBytes) throws DeserializationException {
FilterProtos.SingleColumnValueFilter proto;
try {
proto = FilterProtos.SingleColumnValueFilter.parseFrom(pbBytes); } catch (InvalidProtocolBufferException e) {
throw new DeserializationException(e);
}
final CompareOperator compareOp = CompareOperator.valueOf(proto.getCompareOp().name());
final ByteArrayComparable comparator;
try {
comparator = ProtobufUtil.toComparator(proto.getComparator());
} catch (IOException ioe) {
throw new DeserializationException(ioe);
}
return new SingleColumnValueFilter(proto.hasColumnFamily() ? proto.getColumnFamily().toByteArray() : null, proto.hasColumnQualifier() ? proto.getColumnQualifier().toByteArray() : null,
compareOp, comparator, proto.getFilterIfMissing(), proto.getLatestVersionOnly());
} | 3.26 |
hbase_SingleColumnValueFilter_isFamilyEssential_rdh | /**
* The only CF this filter needs is given column family. So, it's the only essential column in
* whole scan. If filterIfMissing == false, all families are essential, because of possibility of
* skipping the rows without any data in filtered CF.
*/
@Override
public boolean isFamilyEssential(byte[] name) {
return (!this.f0) || Bytes.equals(name, this.columnFamily);
} | 3.26 |
hbase_SingleColumnValueFilter_toByteArray_rdh | /**
* Returns The filter serialized using pb
*/
@Override
public byte[] toByteArray() {
return convert().toByteArray();
} | 3.26 |
hbase_SingleColumnValueFilter_areSerializedFieldsEqual_rdh | /**
* Returns true if and only if the fields of the filter that are serialized are equal to the
* corresponding fields in other. Used for testing.
*/
@Override
boolean areSerializedFieldsEqual(Filter o) {
if (o == this)
return true;
if (!(o instanceof SingleColumnValueFilter))
return false;
SingleColumnValueFilter other = ((SingleColumnValueFilter) (o));
return ((((Bytes.equals(this.m0(), other.m0()) && Bytes.equals(this.getQualifier(), other.getQualifier())) && this.op.equals(other.op)) && this.getComparator().areSerializedFieldsEqual(other.getComparator())) && (this.getFilterIfMissing() == other.getFilterIfMissing())) && (this.getLatestVersionOnly() == other.getLatestVersionOnly());
} | 3.26 |
hbase_AdvancedScanResultConsumer_onHeartbeat_rdh | /**
* Indicate that there is a heartbeat message but we have not cumulated enough cells to call
* {@link #onNext(Result[], ScanController)}.
* <p>
* Note that this method will always be called when RS returns something to us but we do not have
* enough cells to call {@link #onNext(Result[], ScanController)}. Sometimes it may not be a
* 'heartbeat' message for RS, for example, we have a large row with many cells and size limit is
* exceeded before sending all the cells for this row. For RS it does send some data to us and the
* time limit has not been reached, but we can not return the data to client so here we call this
* method to tell client we have already received something.
* <p>
* This method give you a chance to terminate a slow scan operation.
*
* @param controller
* used to suspend or terminate the scan. Notice that the {@code controller}
* instance is only valid within the scope of onHeartbeat method. You can only
* call its method in onHeartbeat, do NOT store it and call it later outside
* onHeartbeat.
*/
default void onHeartbeat(ScanController controller) {
} | 3.26 |
hbase_Query_getACL_rdh | /**
* Returns The serialized ACL for this operation, or null if none
*/
public byte[] getACL() {
return
getAttribute(AccessControlConstants.OP_ATTRIBUTE_ACL);
} | 3.26 |
hbase_Query_setAuthorizations_rdh | /**
* Sets the authorizations to be used by this Query
*/
public Query setAuthorizations(Authorizations authorizations) {
this.setAttribute(VisibilityConstants.VISIBILITY_LABELS_ATTR_KEY, ProtobufUtil.toAuthorizations(authorizations).toByteArray());
return this;
} | 3.26 |
hbase_Query_getReplicaId_rdh | /**
* Returns region replica id where Query will fetch data from.
*
* @return region replica id or -1 if not set.
*/
public int getReplicaId() {
return this.targetReplicaId;
} | 3.26 |
hbase_Query_setConsistency_rdh | /**
* Sets the consistency level for this operation
*
* @param consistency
* the consistency level
*/
public Query setConsistency(Consistency consistency) {
this.consistency = consistency;
return this;
} | 3.26 |
hbase_Query_getIsolationLevel_rdh | /**
* Returns The isolation level of this query. If no isolation level was set for this query object,
* then it returns READ_COMMITTED.
*/
public IsolationLevel getIsolationLevel() {
byte[] attr = getAttribute(ISOLATION_LEVEL);
return attr == null ? IsolationLevel.READ_COMMITTED : IsolationLevel.fromBytes(attr);
} | 3.26 |
hbase_Query_setLoadColumnFamiliesOnDemand_rdh | /**
* Set the value indicating whether loading CFs on demand should be allowed (cluster default is
* false). On-demand CF loading doesn't load column families until necessary, e.g. if you filter
* on one column, the other column family data will be loaded only for the rows that are included
* in result, not all rows like in normal case. With column-specific filters, like
* SingleColumnValueFilter w/filterIfMissing == true, this can deliver huge perf gains when
* there's a cf with lots of data; however, it can also lead to some inconsistent results, as
* follows: - if someone does a concurrent update to both column families in question you may get
* a row that never existed, e.g. for { rowKey = 5, { cat_videos => 1 }, { video => "my cat"
* } } someone puts rowKey 5 with { cat_videos => 0 }, { video => "my dog" }, concurrent
* scan filtering on "cat_videos == 1" can get { rowKey = 5, { cat_videos => 1 }, { video =>
* "my dog" } }. - if there's a concurrent split and you have more than 2 column families, some
* rows may be missing some column families.
*/
public Query setLoadColumnFamiliesOnDemand(boolean value) {
this.loadColumnFamiliesOnDemand = value;
return this;
} | 3.26 |
hbase_Query_getAuthorizations_rdh | /**
* Returns The authorizations this Query is associated with. n
*/
public Authorizations getAuthorizations() throws DeserializationException { byte[] authorizationsBytes = this.getAttribute(VisibilityConstants.VISIBILITY_LABELS_ATTR_KEY);
if (authorizationsBytes == null)
return null;
return ProtobufUtil.toAuthorizations(authorizationsBytes);
} | 3.26 |
hbase_Query_getConsistency_rdh | /**
* Returns the consistency level for this operation
*
* @return the consistency level
*/
public Consistency getConsistency() {
return consistency;
} | 3.26 |
hbase_Query_setACL_rdh | /**
* Set the ACL for the operation.
*
* @param perms
* A map of permissions for a user or users
*/
public Query setACL(Map<String, Permission> perms) {
ListMultimap<String, Permission> permMap = ArrayListMultimap.create();
for (Map.Entry<String, Permission> entry : perms.entrySet()) {
permMap.put(entry.getKey(), entry.getValue());
}
setAttribute(AccessControlConstants.OP_ATTRIBUTE_ACL, AccessControlUtil.toUsersAndPermissions(permMap).toByteArray());
return this;
} | 3.26 |
hbase_Query_getColumnFamilyTimeRange_rdh | /**
* Returns A map of column families to time ranges
*/public Map<byte[], TimeRange> getColumnFamilyTimeRange() {
return this.colFamTimeRangeMap;
} | 3.26 |
hbase_Query_setIsolationLevel_rdh | /**
* Set the isolation level for this query. If the isolation level is set to READ_UNCOMMITTED, then
* this query will return data from committed and uncommitted transactions. If the isolation level
* is set to READ_COMMITTED, then this query will return data from committed transactions only. If
* a isolation level is not explicitly set on a Query, then it is assumed to be READ_COMMITTED.
*
* @param level
* IsolationLevel for this query
*/
public Query setIsolationLevel(IsolationLevel level) {
setAttribute(ISOLATION_LEVEL, level.toBytes());
return this;
} | 3.26 |
hbase_Query_doLoadColumnFamiliesOnDemand_rdh | /**
* Get the logical value indicating whether on-demand CF loading should be allowed.
*/
public boolean doLoadColumnFamiliesOnDemand() {
return (this.loadColumnFamiliesOnDemand != null) && this.loadColumnFamiliesOnDemand;
} | 3.26 |
hbase_Query_setColumnFamilyTimeRange_rdh | /**
* Get versions of columns only within the specified timestamp range, [minStamp, maxStamp) on a
* per CF bases. Note, default maximum versions to return is 1. If your time range spans more than
* one version and you want all versions returned, up the number of versions beyond the default.
* Column Family time ranges take precedence over the global time range.
*
* @param cf
* the column family for which you want to restrict
* @param minStamp
* minimum timestamp value, inclusive
* @param maxStamp
* maximum timestamp value, exclusive
*/
public Query setColumnFamilyTimeRange(byte[] cf, long minStamp, long maxStamp) {
colFamTimeRangeMap.put(cf, TimeRange.between(minStamp, maxStamp));
return this;
} | 3.26 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.