name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hbase_ProcedureStore_cleanup_rdh | /**
* Will be called by the framework to give the store a chance to do some clean up works.
* <p/>
* Notice that this is for periodical clean up work, not for the clean up after close, if you want
* to close the store just call the {@link #stop(boolean)} method above.
*/default void cleanup()
{
} | 3.26 |
hbase_ProcedureStore_postSync_rdh | /**
* triggered when the store sync is completed.
*/default
void postSync() {
} | 3.26 |
hbase_Compactor_postCompactScannerOpen_rdh | /**
* Calls coprocessor, if any, to create scanners - after normal scanner creation.
*
* @param request
* Compaction request.
* @param scanType
* Scan type.
* @param scanner
* The default scanner created for compaction.
* @return Scanner scanner to use (usually the default); null if compaction should not proceed.
*/
private InternalScanner postCompactScannerOpen(CompactionRequestImpl request, ScanType scanType, InternalScanner scanner, User user) throws IOException {
if (store.getCoprocessorHost() == null) {
return scanner;
}
return store.getCoprocessorHost().preCompact(store, scanner, scanType, request.getTracker(), request, user);
} | 3.26 |
hbase_Compactor_getFileDetails_rdh | /**
* Extracts some details about the files to compact that are commonly needed by compactors.
*
* @param filesToCompact
* Files.
* @param allFiles
* Whether all files are included for compaction
* @parma major If major compaction
* @return The result.
*/
private FileDetails getFileDetails(Collection<HStoreFile> filesToCompact, boolean allFiles, boolean major) throws IOException {
FileDetails fd = new FileDetails();
long oldestHFileTimestampToKeepMVCC = EnvironmentEdgeManager.currentTime() - ((((1000L * 60) * 60) * 24) * this.keepSeqIdPeriod);
for (HStoreFile file
: filesToCompact) {
if (allFiles && (file.getModificationTimestamp() < oldestHFileTimestampToKeepMVCC)) {
// when isAllFiles is true, all files are compacted so we can calculate the smallest
// MVCC value to keep
if (fd.minSeqIdToKeep < file.getMaxMemStoreTS()) {
fd.minSeqIdToKeep = file.getMaxMemStoreTS();
}
}
long seqNum = file.getMaxSequenceId();
fd.maxSeqId = Math.max(fd.maxSeqId, seqNum);
StoreFileReader r = file.getReader();
if (r == null) {
LOG.warn("Null reader for " + file.getPath());
continue;
}// NOTE: use getEntries when compacting instead of getFilterEntries, otherwise under-sized
// blooms can cause progress to be miscalculated or if the user switches bloom
// type (e.g. from ROW to ROWCOL)
long keyCount = r.getEntries();
fd.maxKeyCount += keyCount;
// calculate the latest MVCC readpoint in any of the involved store files
Map<byte[], byte[]> fileInfo = r.loadFileInfo();
// calculate the total size of the compacted files
fd.totalCompactedFilesSize += r.length();
byte[] tmp = null;
// Get and set the real MVCCReadpoint for bulk loaded files, which is the
// SeqId number.
if (r.isBulkLoaded()) {
fd.maxMVCCReadpoint = Math.max(fd.maxMVCCReadpoint, r.getSequenceID());
} else {
tmp = fileInfo.get(Writer.MAX_MEMSTORE_TS_KEY);
if (tmp != null) {
fd.maxMVCCReadpoint = Math.max(fd.maxMVCCReadpoint, Bytes.toLong(tmp));
}
}
tmp = fileInfo.get(HFileInfo.MAX_TAGS_LEN);
if (tmp != null) {
fd.maxTagsLength = Math.max(fd.maxTagsLength, Bytes.toInt(tmp));
}
// If required, calculate the earliest put timestamp of all involved storefiles.
// This is used to remove family delete marker during compaction.
long earliestPutTs = 0;
if (allFiles) {
tmp = fileInfo.get(EARLIEST_PUT_TS);
if (tmp == null) {
// There's a file with no information, must be an old one
// assume we have very old puts
fd.earliestPutTs = earliestPutTs = PrivateConstants.OLDEST_TIMESTAMP;
} else {
earliestPutTs = Bytes.toLong(tmp);
fd.earliestPutTs = Math.min(fd.earliestPutTs, earliestPutTs);
}
}
tmp = fileInfo.get(TIMERANGE_KEY);fd.f0
= (tmp == null) ? HConstants.LATEST_TIMESTAMP :
TimeRangeTracker.parseFrom(tmp).getMax();
LOG.debug("Compacting {}, keycount={}, bloomtype={}, size={}, " + "encoding={}, compression={}, seqNum={}{}", file.getPath() == null ? null : file.getPath().getName(), keyCount, r.getBloomFilterType().toString(), TraditionalBinaryPrefix.long2String(r.length(), "", 1), r.getHFileReader().getDataBlockEncoding(), major ? majorCompactionCompression : minorCompactionCompression, seqNum, allFiles ? ", earliestPutTs=" +
earliestPutTs : "");
}
return fd;} | 3.26 |
hbase_Compactor_getProgress_rdh | /**
* Return the aggregate progress for all currently active compactions.
*/public CompactionProgress getProgress() {
synchronized(progressSet) {
long totalCompactingKVs = 0;
long currentCompactedKVs = 0;
long totalCompactedSize = 0;
for (CompactionProgress progress : progressSet) {
totalCompactingKVs += progress.totalCompactingKVs;
currentCompactedKVs += progress.currentCompactedKVs;
totalCompactedSize += progress.totalCompactedSize;
}
CompactionProgress result = new CompactionProgress(totalCompactingKVs);
result.currentCompactedKVs = currentCompactedKVs;
result.totalCompactedSize = totalCompactedSize;
return result;
}
} | 3.26 |
hbase_Compactor_createScanner_rdh | /**
*
* @param store
* The store.
* @param scanners
* Store file scanners.
* @param smallestReadPoint
* Smallest MVCC read point.
* @param earliestPutTs
* Earliest put across all files.
* @param dropDeletesFromRow
* Drop deletes starting with this row, inclusive. Can be null.
* @param dropDeletesToRow
* Drop deletes ending with this row, exclusive. Can be null.
* @return A compaction scanner.
*/
protected InternalScanner createScanner(HStore store, ScanInfo scanInfo, List<StoreFileScanner> scanners, long smallestReadPoint, long earliestPutTs, byte[] dropDeletesFromRow, byte[] dropDeletesToRow) throws IOException
{
return new StoreScanner(store, scanInfo, scanners, smallestReadPoint, earliestPutTs, dropDeletesFromRow, dropDeletesToRow);
} | 3.26 |
hbase_Compactor_createWriter_rdh | /**
* Creates a writer for a new file.
*
* @param fd
* The file details.
* @return Writer for a new StoreFile
* @throws IOException
* if creation failed
*/
protected final StoreFileWriter createWriter(FileDetails fd, boolean shouldDropBehind, boolean major, Consumer<Path> writerCreationTracker) throws IOException {
// When all MVCC readpoints are 0, don't write them.
// See HBASE-8166, HBASE-12600, and HBASE-13389.
return store.getStoreEngine().createWriter(createParams(fd, shouldDropBehind, major, writerCreationTracker));
} | 3.26 |
hbase_Compactor_createFileScanners_rdh | /**
* Creates file scanners for compaction.
*
* @param filesToCompact
* Files.
* @return Scanners.
*/
private List<StoreFileScanner> createFileScanners(Collection<HStoreFile> filesToCompact, long smallestReadPoint, boolean useDropBehind) throws IOException {
return StoreFileScanner.getScannersForCompaction(filesToCompact, useDropBehind, smallestReadPoint);
} | 3.26 |
hbase_Compactor_performCompaction_rdh | /**
* Performs the compaction.
*
* @param fd
* FileDetails of cell sink writer
* @param scanner
* Where to read from.
* @param writer
* Where to write to.
* @param smallestReadPoint
* Smallest read point.
* @param cleanSeqId
* When true, remove seqId(used to be mvcc) value which is <=
* smallestReadPoint
* @param request
* compaction request.
* @param progress
* Progress reporter.
* @return Whether compaction ended; false if it was interrupted for some reason.
*/
protected boolean performCompaction(FileDetails fd, InternalScanner scanner, CellSink writer, long smallestReadPoint, boolean cleanSeqId, ThroughputController throughputController, CompactionRequestImpl request, CompactionProgress
progress) throws
IOException {
assert writer instanceof ShipperListener;
long bytesWrittenProgressForLog = 0;
long bytesWrittenProgressForShippedCall = 0;
// Since scanner.next() can return 'false' but still be delivering data,
// we have to use a do/while loop.
List<Cell> cells = new ArrayList<>();
long currentTime = EnvironmentEdgeManager.currentTime();
long
lastMillis = 0;
if (LOG.isDebugEnabled()) {
lastMillis = currentTime;
}
CloseChecker closeChecker = new
CloseChecker(conf, currentTime);
String compactionName = ThroughputControlUtil.getNameForThrottling(store, "compaction");
long now = 0;
boolean hasMore;
ScannerContext scannerContext = ScannerContext.newBuilder().setBatchLimit(compactionKVMax).setSizeLimit(LimitScope.BETWEEN_CELLS, Long.MAX_VALUE, Long.MAX_VALUE, compactScannerSizeLimit).build();
throughputController.start(compactionName);
Shipper v31 = (scanner instanceof Shipper) ? ((Shipper) (scanner)) : null;
long shippedCallSizeLimit = ((long) (request.getFiles().size())) * this.store.getColumnFamilyDescriptor().getBlocksize();
try {
do {
hasMore = scanner.next(cells, scannerContext);
currentTime = EnvironmentEdgeManager.currentTime();
if (LOG.isDebugEnabled()) {
now = currentTime;
}
if (closeChecker.isTimeLimit(store, currentTime)) {
progress.cancel();
return false;
}
// output to writer:
Cell lastCleanCell = null;
long lastCleanCellSeqId = 0;
for (Cell c
: cells) {
if (cleanSeqId && (c.getSequenceId() <= smallestReadPoint)) {
lastCleanCell = c;
lastCleanCellSeqId = c.getSequenceId();
PrivateCellUtil.setSequenceId(c, 0);
} else {
lastCleanCell = null;
lastCleanCellSeqId = 0;
}
writer.append(c);
int len = c.getSerializedSize();
++progress.currentCompactedKVs;progress.totalCompactedSize += len;
bytesWrittenProgressForShippedCall += len;
if (LOG.isDebugEnabled()) {
bytesWrittenProgressForLog += len;
}
throughputController.control(compactionName, len);
if (closeChecker.isSizeLimit(store, len)) {
progress.cancel();
return false;
}
}
if ((v31 != null) && (bytesWrittenProgressForShippedCall > shippedCallSizeLimit)) {
if (lastCleanCell != null) {
// HBASE-16931, set back sequence id to avoid affecting scan order unexpectedly.
// ShipperListener will do a clone of the last cells it refer, so need to set back
// sequence id before ShipperListener.beforeShipped
PrivateCellUtil.setSequenceId(lastCleanCell, lastCleanCellSeqId);
}
// Clone the cells that are in the writer so that they are freed of references,
// if they are holding any.
((ShipperListener) (writer)).beforeShipped();
// The SHARED block references, being read for compaction, will be kept in prevBlocks
// list(See HFileScannerImpl#prevBlocks). In case of scan flow, after each set of cells
// being returned to client, we will call shipped() which can clear this list. Here by
// we are doing the similar thing. In between the compaction (after every N cells
// written with collective size of 'shippedCallSizeLimit') we will call shipped which
// may clear prevBlocks list.
v31.shipped();
bytesWrittenProgressForShippedCall = 0;
}
if (lastCleanCell != null) {
// HBASE-16931, set back sequence id to avoid affecting scan order unexpectedly
PrivateCellUtil.setSequenceId(lastCleanCell, lastCleanCellSeqId);
}
// Log the progress of long running compactions every minute if
// logging at DEBUG level
if (LOG.isDebugEnabled()) {
if ((now - lastMillis) >= COMPACTION_PROGRESS_LOG_INTERVAL) {
String rate = String.format("%.2f", (bytesWrittenProgressForLog / 1024.0) / ((now - lastMillis) / 1000.0));
LOG.debug("Compaction progress: {} {}, rate={} KB/sec, throughputController is {}", compactionName, progress, rate, throughputController);
lastMillis = now;
bytesWrittenProgressForLog = 0;
}
}
cells.clear();
} while (hasMore );
} catch (InterruptedException e) {
progress.cancel();
throw new InterruptedIOException("Interrupted while control throughput of compacting " + compactionName);
} finally {
// Clone last cell in the final because writer will append last cell when committing. If
// don't clone here and once the scanner get closed, then the memory of last cell will be
// released. (HBASE-22582)
((ShipperListener) (writer)).beforeShipped();
throughputController.finish(compactionName);
}
progress.complete();
return true;
} | 3.26 |
hbase_MetricsRegionServer_incrementRegionSizeReportingChoreTime_rdh | /**
*
* @see MetricsRegionServerQuotaSource#incrementRegionSizeReportingChoreTime(long)
*/
public void incrementRegionSizeReportingChoreTime(long time) {
quotaSource.incrementRegionSizeReportingChoreTime(time);
} | 3.26 |
hbase_MetricsRegionServer_incrementNumRegionSizeReportsSent_rdh | /**
*
* @see MetricsRegionServerQuotaSource#incrementNumRegionSizeReportsSent(long)
*/
public void incrementNumRegionSizeReportsSent(long numReportsSent) {
quotaSource.incrementNumRegionSizeReportsSent(numReportsSent);
} | 3.26 |
hbase_CellModel_setColumn_rdh | /**
*
* @param column
* the column to set
*/
public void setColumn(byte[] column)
{
this.column = column;
} | 3.26 |
hbase_CellModel_getTimestamp_rdh | /**
* Returns the timestamp
*/
public long getTimestamp() {
return timestamp;
} | 3.26 |
hbase_CellModel_hasUserTimestamp_rdh | /**
* Returns true if the timestamp property has been specified by the user
*/
public boolean hasUserTimestamp() {
return timestamp != HConstants.LATEST_TIMESTAMP;
} | 3.26 |
hbase_CellModel_getValue_rdh | /**
* Returns the value
*/
public byte[] getValue() {
return value;
} | 3.26 |
hbase_CellModel_setValue_rdh | /**
*
* @param value
* the value to set
*/
public void setValue(byte[] value) {
this.value = value;
} | 3.26 |
hbase_CellModel_getColumn_rdh | /**
* Returns the column
*/
public byte[] getColumn() {
return column;
} | 3.26 |
hbase_CellModel_setTimestamp_rdh | /**
*
* @param timestamp
* the timestamp to set
*/
public void setTimestamp(long timestamp) {
this.timestamp = timestamp;
} | 3.26 |
hbase_MetricRegistriesLoader_load_rdh | /**
* Creates a {@link MetricRegistries} instance using the corresponding {@link MetricRegistries}
* available to {@link ServiceLoader} on the classpath. If no instance is found, then default
* implementation will be loaded.
*
* @return A {@link MetricRegistries} implementation.
*/
static MetricRegistries load(List<MetricRegistries> availableImplementations) {
if
(availableImplementations.size() == 1) {
// One and only one instance -- what we want/expect
MetricRegistries impl = availableImplementations.get(0);
LOG.info("Loaded MetricRegistries " + impl.getClass());
return impl;
} else if (availableImplementations.isEmpty()) {
try
{
return ReflectionUtils.newInstance(((Class<MetricRegistries>) (Class.forName(defaultClass))));
} catch (ClassNotFoundException e) {
throw new RuntimeException(e);
}
} else {
// Tell the user they're doing something wrong, and choose the first impl.
StringBuilder sb = new StringBuilder();
for (MetricRegistries factory :
availableImplementations) {
if (sb.length() > 0) {
sb.append(", ");
}
sb.append(factory.getClass());
}
LOG.warn((("Found multiple MetricRegistries implementations: " + sb) + ". Using first found implementation: ") + availableImplementations.get(0));
return availableImplementations.get(0);
}
} | 3.26 |
hbase_FavoredNodesManager_filterNonFNApplicableRegions_rdh | /**
* Filter and return regions for which favored nodes is not applicable.
*
* @return set of regions for which favored nodes is not applicable
*/
public static Set<RegionInfo> filterNonFNApplicableRegions(Collection<RegionInfo> regions) {
return regions.stream().filter(r -> !isFavoredNodeApplicable(r)).collect(Collectors.toSet());
} | 3.26 |
hbase_FavoredNodesManager_isFavoredNodeApplicable_rdh | /**
* Favored nodes are not applicable for system tables. We will use this to check before we apply
* any favored nodes logic on a region.
*/
public static boolean isFavoredNodeApplicable(RegionInfo regionInfo) {
return !regionInfo.getTable().isSystemTable();
} | 3.26 |
hbase_FavoredNodesManager_getReplicaLoad_rdh | /**
* Get the replica count for the servers provided.
* <p/>
* For each server, replica count includes three counts for primary, secondary and tertiary. If a
* server is the primary favored node for 10 regions, secondary for 5 and tertiary for 1, then the
* list would be [10, 5, 1]. If the server is newly added to the cluster is not a favored node for
* any region, the replica count would be [0, 0, 0].
*/
public synchronized Map<ServerName, List<Integer>> getReplicaLoad(List<ServerName> servers) {
Map<ServerName, List<Integer>> result = Maps.newHashMap();
for (ServerName sn : servers) {
ServerName v16 = ServerName.valueOf(sn.getAddress(), NON_STARTCODE);
List<Integer> countList = Lists.newArrayList();
if (primaryRSToRegionMap.containsKey(v16)) {
countList.add(primaryRSToRegionMap.get(v16).size());
} else {
countList.add(0);
}
if (secondaryRSToRegionMap.containsKey(v16)) {
countList.add(secondaryRSToRegionMap.get(v16).size());
} else {
countList.add(0);
}
if (teritiaryRSToRegionMap.containsKey(v16)) {
countList.add(teritiaryRSToRegionMap.get(v16).size());
} else {
countList.add(0);
}
result.put(sn, countList);
}return result;
} | 3.26 |
hbase_FavoredNodesManager_getFavoredNodesWithDNPort_rdh | /**
* This should only be used when sending FN information to the region servers. Instead of sending
* the region server port, we use the datanode port. This helps in centralizing the DN port logic
* in Master. The RS uses the port from the favored node list as hints.
*/
public synchronized List<ServerName> getFavoredNodesWithDNPort(RegionInfo regionInfo) {
if (getFavoredNodes(regionInfo) == null) {
return null;
}
List<ServerName> fnWithDNPort = Lists.newArrayList();
for (ServerName sn : getFavoredNodes(regionInfo)) {
fnWithDNPort.add(ServerName.valueOf(sn.getHostname(), f0, NON_STARTCODE));
}
return fnWithDNPort;
} | 3.26 |
hbase_QuotaSettings_buildSetQuotaRequestProto_rdh | /**
* Convert a QuotaSettings to a protocol buffer SetQuotaRequest. This is used internally by the
* Admin client to serialize the quota settings and send them to the master.
*/
@InterfaceAudience.Private
public static SetQuotaRequest buildSetQuotaRequestProto(final QuotaSettings settings) {
SetQuotaRequest.Builder builder = SetQuotaRequest.newBuilder();
if (settings.getUserName() != null) {
builder.setUserName(settings.getUserName());
}
if (settings.getTableName() != null) {
builder.setTableName(ProtobufUtil.toProtoTableName(settings.getTableName()));
}
if (settings.m0() != null) {
builder.setNamespace(settings.m0());
}
if (settings.getRegionServer() != null) {
builder.setRegionServer(settings.getRegionServer());
}
settings.setupSetQuotaRequest(builder);
return builder.build();
} | 3.26 |
hbase_QuotaSettings_buildFromProto_rdh | /**
* Converts the protocol buffer request into a QuotaSetting POJO. Arbitrarily enforces that the
* request only contain one "limit", despite the message allowing multiple. The public API does
* not allow such use of the message.
*
* @param request
* The protocol buffer request.
* @return A {@link QuotaSettings} POJO.
*/
@InterfaceAudience.Private
public static QuotaSettings buildFromProto(SetQuotaRequest request) {
String username = null;
if (request.hasUserName()) {
username = request.getUserName();
}
TableName tableName = null;
if (request.hasTableName()) {
tableName = ProtobufUtil.toTableName(request.getTableName());
}
String namespace = null;
if (request.hasNamespace()) {
namespace = request.getNamespace();
}
String regionServer = null;
if (request.hasRegionServer()) {
regionServer = request.getRegionServer();
}
if (request.hasBypassGlobals()) {
// Make sure we don't have either of the two below limits also included
if (request.hasSpaceLimit() || request.hasThrottle()) {
throw new IllegalStateException("SetQuotaRequest has multiple limits: " + TextFormat.shortDebugString(request));
}
return new QuotaGlobalsSettingsBypass(username, tableName, namespace, regionServer, request.getBypassGlobals());
} else if (request.hasSpaceLimit()) {
// Make sure we don't have the below limit as well
if (request.hasThrottle()) {
throw new IllegalStateException("SetQuotaRequests has multiple limits: " + TextFormat.shortDebugString(request));
}
// Sanity check on the pb received.
if (!request.getSpaceLimit().hasQuota()) {
throw new IllegalArgumentException("SpaceLimitRequest is missing the expected SpaceQuota.");
}
return QuotaSettingsFactory.fromSpace(tableName, namespace, request.getSpaceLimit().getQuota());
} else if (request.hasThrottle()) {
return new ThrottleSettings(username, tableName, namespace, regionServer, request.getThrottle());
} else {
throw new IllegalStateException("Unhandled SetRequestRequest state");
}
} | 3.26 |
hbase_QuotaSettings_validateQuotaTarget_rdh | /**
* Validates that settings being merged into {@code this} is targeting the same "subject", e.g.
* user, table, namespace.
*
* @param mergee
* The quota settings to be merged into {@code this}.
* @throws IllegalArgumentException
* if the subjects are not equal.
*/
void validateQuotaTarget(QuotaSettings mergee) {
if (!Objects.equals(getUserName(), mergee.getUserName())) {
throw new IllegalArgumentException("Mismatched user names on settings to merge");
}
if (!Objects.equals(getTableName(), mergee.getTableName())) {
throw new IllegalArgumentException("Mismatched table names on settings to merge");}
if (!Objects.equals(m0(), mergee.m0())) {
throw new IllegalArgumentException("Mismatched namespace on settings to merge");
}
if (!Objects.equals(getRegionServer(), mergee.getRegionServer())) {
throw new IllegalArgumentException("Mismatched region server on settings to merge");
}
} | 3.26 |
hbase_SaslClientAuthenticationProviders_m0_rdh | /**
* Returns the number of providers that have been registered.
*/
public int m0() {
return providers.size();
} | 3.26 |
hbase_SaslClientAuthenticationProviders_selectProvider_rdh | /**
* Chooses the best authentication provider and corresponding token given the HBase cluster
* identifier and the user.
*/
public Pair<SaslClientAuthenticationProvider, Token<? extends TokenIdentifier>> selectProvider(String clusterId, User clientUser) {
return selector.selectProvider(clusterId, clientUser);
} | 3.26 |
hbase_SaslClientAuthenticationProviders_addProviderIfNotExists_rdh | /**
* Adds the given {@code provider} to the set, only if an equivalent provider does not already
* exist in the set.
*/
static void addProviderIfNotExists(SaslClientAuthenticationProvider provider, HashMap<Byte, SaslClientAuthenticationProvider> providers) {
Byte code = provider.getSaslAuthMethod().getCode();
SaslClientAuthenticationProvider existingProvider = providers.get(code);
if (existingProvider != null) {
throw new RuntimeException((("Already registered authentication provider with " + code) + " ") + existingProvider.getClass());
}
providers.put(code, provider);
} | 3.26 |
hbase_SaslClientAuthenticationProviders_reset_rdh | /**
* Removes the cached singleton instance of {@link SaslClientAuthenticationProviders}.
*/
public static synchronized void reset() {
providersRef.set(null);
} | 3.26 |
hbase_SaslClientAuthenticationProviders_instantiate_rdh | /**
* Instantiates all client authentication providers and returns an instance of
* {@link SaslClientAuthenticationProviders}.
*/
static SaslClientAuthenticationProviders instantiate(Configuration conf) {
ServiceLoader<SaslClientAuthenticationProvider> loader = ServiceLoader.load(SaslClientAuthenticationProvider.class);
HashMap<Byte, SaslClientAuthenticationProvider> providerMap = new HashMap<>();
for (SaslClientAuthenticationProvider provider : loader) {
addProviderIfNotExists(provider, providerMap);
}
addExplicitProviders(conf, providerMap);
Collection<SaslClientAuthenticationProvider> providers = Collections.unmodifiableCollection(providerMap.values());
if (f0.isTraceEnabled()) {
String loadedProviders = providers.stream().map(provider ->
provider.getClass().getName()).collect(Collectors.joining(", "));
f0.trace("Found SaslClientAuthenticationProviders {}", loadedProviders);
}
AuthenticationProviderSelector
selector = instantiateSelector(conf, providers);
return new SaslClientAuthenticationProviders(providers, selector);
} | 3.26 |
hbase_SaslClientAuthenticationProviders_instantiateSelector_rdh | /**
* Instantiates the ProviderSelector implementation from the provided configuration.
*/
static AuthenticationProviderSelector
instantiateSelector(Configuration conf, Collection<SaslClientAuthenticationProvider> providers) {
Class<? extends AuthenticationProviderSelector> clz = conf.getClass(SELECTOR_KEY, BuiltInProviderSelector.class, AuthenticationProviderSelector.class);
try {
AuthenticationProviderSelector selector = clz.getConstructor().newInstance();
selector.configure(conf, providers);
if (f0.isTraceEnabled()) {
f0.trace("Loaded ProviderSelector {}", selector.getClass());
}
return selector;
} catch (InstantiationException | IllegalAccessException | NoSuchMethodException | InvocationTargetException e) {
throw new RuntimeException((("Failed to instantiate " + clz) + " as the ProviderSelector defined by ") + SELECTOR_KEY, e);
}
} | 3.26 |
hbase_SaslClientAuthenticationProviders_getSimpleProvider_rdh | /**
* Returns the provider and token pair for SIMPLE authentication. This method is a "hack" while
* SIMPLE authentication for HBase does not flow through the SASL codepath.
*/
public Pair<SaslClientAuthenticationProvider, Token<? extends TokenIdentifier>> getSimpleProvider() {
Optional<SaslClientAuthenticationProvider> optional = providers.stream().filter(p -> p instanceof SimpleSaslClientAuthenticationProvider).findFirst();
return new Pair<>(optional.get(), null);
} | 3.26 |
hbase_SaslClientAuthenticationProviders_getInstance_rdh | /**
* Returns a singleton instance of {@link SaslClientAuthenticationProviders}.
*/
public static synchronized SaslClientAuthenticationProviders getInstance(Configuration conf) {
SaslClientAuthenticationProviders providers = providersRef.get();
if (providers == null) {
providers = instantiate(conf);
providersRef.set(providers);
}
return providers;
} | 3.26 |
hbase_SaslClientAuthenticationProviders_addExplicitProviders_rdh | /**
* Extracts and instantiates authentication providers from the configuration.
*/
static void addExplicitProviders(Configuration conf, HashMap<Byte, SaslClientAuthenticationProvider> providers) {
for (String implName : conf.getStringCollection(EXTRA_PROVIDERS_KEY)) {
Class<?> clz;
// Load the class from the config
try {
clz = Class.forName(implName);
} catch (ClassNotFoundException e) {
f0.warn("Failed to load SaslClientAuthenticationProvider {}", implName, e);
continue;
}
// Make sure it's the right type
if (!SaslClientAuthenticationProvider.class.isAssignableFrom(clz)) {
f0.warn("Ignoring SaslClientAuthenticationProvider {} because it is not an instance of" + " SaslClientAuthenticationProvider", clz);
continue;
}
// Instantiate it
SaslClientAuthenticationProvider provider;
try {
provider = ((SaslClientAuthenticationProvider) (clz.getConstructor().newInstance()));
} catch (InstantiationException | IllegalAccessException | NoSuchMethodException | InvocationTargetException e) {
f0.warn("Failed to instantiate SaslClientAuthenticationProvider {}", clz, e);
continue;
}
// Add it to our set, only if it doesn't conflict with something else we've
// already registered.
addProviderIfNotExists(provider, providers);
}
} | 3.26 |
hbase_ExtendedCellBuilderFactory_create_rdh | /**
* Allows creating a cell with the given CellBuilderType.
*
* @param type
* the type of CellBuilder(DEEP_COPY or SHALLOW_COPY).
* @return the cell that is created
*/
public static ExtendedCellBuilder create(CellBuilderType type) {
switch (type) {
case SHALLOW_COPY :
return new IndividualBytesFieldCellBuilder();
case DEEP_COPY :
return new KeyValueBuilder();
default :
throw new UnsupportedOperationException(("The type:" + type) + " is unsupported");
}
} | 3.26 |
hbase_RegionRemoteProcedureBase_reportTransition_rdh | // should be called with RegionStateNode locked, to avoid race with the execute method below
void reportTransition(MasterProcedureEnv env, RegionStateNode regionNode, ServerName serverName, TransitionCode transitionCode, long seqId) throws IOException {
if (state !=
RegionRemoteProcedureBaseState.REGION_REMOTE_PROCEDURE_DISPATCH) {
// should be a retry
return;
}
if (!targetServer.equals(serverName)) {
throw new UnexpectedStateException((((((("Received report from " + serverName) + ", expected ") + targetServer) + ", ") + regionNode) + ", proc=") + this);
}
checkTransition(regionNode, transitionCode, seqId);
// this state means we have received the report from RS, does not mean the result is fine, as we
// may received a FAILED_OPEN.
this.state = RegionRemoteProcedureBaseState.REGION_REMOTE_PROCEDURE_REPORT_SUCCEED;
this.transitionCode =
transitionCode;
this.seqId = seqId;
// Persist the transition code and openSeqNum(if provided).
// We should not update the hbase:meta directly as this may cause races when master restarts,
// as the old active master may incorrectly report back to RS and cause the new master to hang
// on a OpenRegionProcedure forever. See HBASE-22060 and HBASE-22074 for more details.
boolean succ = false;
try {
persistAndWake(env, regionNode);
succ = true;
} finally {
if (!succ)
{
this.state = RegionRemoteProcedureBaseState.REGION_REMOTE_PROCEDURE_DISPATCH;
this.transitionCode = null;
this.seqId =
HConstants.NO_SEQNUM;
}
}
try {
updateTransitionWithoutPersistingToMeta(env, regionNode, transitionCode, seqId);
} catch (IOException e) {
throw new AssertionError("should not happen", e);}
} | 3.26 |
hbase_RegionRemoteProcedureBase_persistAndWake_rdh | // A bit strange but the procedure store will throw RuntimeException if we can not persist the
// state, so upper layer should take care of this...
private void persistAndWake(MasterProcedureEnv env, RegionStateNode regionNode) {
env.getMasterServices().getMasterProcedureExecutor().getStore().update(this);
regionNode.getProcedureEvent().wake(env.getProcedureScheduler());
} | 3.26 |
hbase_UnsafeAccess_getAsLong_rdh | /**
* Reads bytes at the given offset as a long value.
*
* @return long value at offset
*/
private static long getAsLong(ByteBuffer buf, int offset) {
if (buf.isDirect()) {
return HBasePlatformDependent.getLong(directBufferAddress(buf) + offset);
}
return HBasePlatformDependent.getLong(buf.array(), (BYTE_ARRAY_BASE_OFFSET
+ buf.arrayOffset()) + offset);
} | 3.26 |
hbase_UnsafeAccess_toLong_rdh | /**
* Reads a long value at the given Object's offset considering it was written in big-endian
* format.
*
* @return long value at offset
*/
public static long toLong(Object ref, long offset) {
if (LITTLE_ENDIAN) {
return Long.reverseBytes(HBasePlatformDependent.getLong(ref, offset));
}
return HBasePlatformDependent.getLong(ref, offset);
} | 3.26 |
hbase_UnsafeAccess_m0_rdh | /**
* Converts a byte array to a long value considering it was written in big-endian format.
*
* @param bytes
* byte array
* @param offset
* offset into array
* @return the long value
*/
public static long m0(byte[] bytes, int offset) {
if (LITTLE_ENDIAN) {
return Long.reverseBytes(HBasePlatformDependent.getLong(bytes, offset + BYTE_ARRAY_BASE_OFFSET));
} else {
return HBasePlatformDependent.getLong(bytes, offset + BYTE_ARRAY_BASE_OFFSET);
}
} | 3.26 |
hbase_UnsafeAccess_toInt_rdh | /**
* Reads a int value at the given Object's offset considering it was written in big-endian format.
*
* @return int value at offset
*/
public static int
toInt(Object ref, long offset) {
if (LITTLE_ENDIAN) {
return Integer.reverseBytes(HBasePlatformDependent.getInt(ref, offset));
}
return HBasePlatformDependent.getInt(ref, offset);
} | 3.26 |
hbase_UnsafeAccess_putLong_rdh | /**
* Put a long value out to the specified BB position in big-endian format.
*
* @param buf
* the byte buffer
* @param offset
* position in the buffer
* @param val
* long to write out
* @return incremented offset
*/
public static int putLong(ByteBuffer buf,
int offset, long val) {
if (LITTLE_ENDIAN) {
val =
Long.reverseBytes(val);
}
if (buf.isDirect()) {
HBasePlatformDependent.putLong(directBufferAddress(buf) + offset, val);
} else {
HBasePlatformDependent.putLong(buf.array(), (BYTE_ARRAY_BASE_OFFSET + buf.arrayOffset()) + offset, val);
}
return offset + Bytes.SIZEOF_LONG;
} | 3.26 |
hbase_UnsafeAccess_getAsInt_rdh | /**
* Reads bytes at the given offset as an int value.
*
* @return int value at offset
*/
private static int getAsInt(ByteBuffer buf, int offset) {
if (buf.isDirect()) {
return HBasePlatformDependent.getInt(directBufferAddress(buf) + offset);
}
return HBasePlatformDependent.getInt(buf.array(), (BYTE_ARRAY_BASE_OFFSET + buf.arrayOffset()) + offset);
} | 3.26 |
hbase_UnsafeAccess_copy_rdh | /**
* Copies specified number of bytes from given offset of {@code src} buffer into the {@code dest}
* buffer.
*
* @param src
* source buffer
* @param srcOffset
* offset into source buffer
* @param dest
* destination buffer
* @param destOffset
* offset into destination buffer
* @param length
* length of data to copy
*/
public static void copy(ByteBuffer src, int srcOffset, ByteBuffer dest, int destOffset, int length) {
long srcAddress;
long destAddress;
Object srcBase = null;
Object destBase = null;
if (src.isDirect()) {
srcAddress = srcOffset + directBufferAddress(src);
} else {
srcAddress = (((long) (srcOffset)) + src.arrayOffset()) + BYTE_ARRAY_BASE_OFFSET;
srcBase = src.array();
}
if (dest.isDirect()) {
destAddress = destOffset +
directBufferAddress(dest);
} else {
destAddress = (destOffset + BYTE_ARRAY_BASE_OFFSET) + dest.arrayOffset();
destBase = dest.array();
}
m1(srcBase, srcAddress, destBase, destAddress, length);
} | 3.26 |
hbase_UnsafeAccess_putInt_rdh | /**
* Put an int value out to the specified ByteBuffer offset in big-endian format.
*
* @param buf
* the ByteBuffer to write to
* @param offset
* offset in the ByteBuffer
* @param val
* int to write out
* @return incremented offset
*/
public static int putInt(ByteBuffer buf, int offset, int val) {
if (LITTLE_ENDIAN) {
val = Integer.reverseBytes(val);
}
if (buf.isDirect()) {
HBasePlatformDependent.putInt(directBufferAddress(buf) + offset, val);
} else {HBasePlatformDependent.putInt(buf.array(), (offset + buf.arrayOffset()) +
BYTE_ARRAY_BASE_OFFSET, val);
}
return offset + Bytes.SIZEOF_INT;
} | 3.26 |
hbase_UnsafeAccess_putShort_rdh | // APIs to add primitives to BBs
/**
* Put a short value out to the specified BB position in big-endian format.
*
* @param buf
* the byte buffer
* @param offset
* position in the buffer
* @param val
* short to write out
* @return incremented offset
*/
public static int putShort(ByteBuffer buf, int offset, short val) {
if (LITTLE_ENDIAN) {
val =
Short.reverseBytes(val);
}
if (buf.isDirect()) {
HBasePlatformDependent.putShort(directBufferAddress(buf) + offset, val);} else {
HBasePlatformDependent.putShort(buf.array(), (BYTE_ARRAY_BASE_OFFSET + buf.arrayOffset()) + offset, val);
}
return offset + Bytes.SIZEOF_SHORT;
} | 3.26 |
hbase_UnsafeAccess_toByte_rdh | /**
* Returns the byte at the given offset of the object
*
* @return the byte at the given offset
*/public static byte toByte(Object ref, long offset) {
return HBasePlatformDependent.getByte(ref, offset);
} | 3.26 |
hbase_UnsafeAccess_toShort_rdh | /**
* Reads a short value at the given Object's offset considering it was written in big-endian
* format.
*
* @return short value at offset
*/public static short toShort(Object ref, long offset) {
if (LITTLE_ENDIAN) {
return Short.reverseBytes(HBasePlatformDependent.getShort(ref, offset));}
return HBasePlatformDependent.getShort(ref, offset);
} | 3.26 |
hbase_UnsafeAccess_putByte_rdh | /**
* Put a byte value out to the specified BB position in big-endian format.
*
* @param buf
* the byte buffer
* @param offset
* position in the buffer
* @param b
* byte to write out
* @return incremented offset
*/
public static int putByte(ByteBuffer buf, int offset, byte b) {
if (buf.isDirect()) {
HBasePlatformDependent.putByte(directBufferAddress(buf) + offset, b);
} else {
HBasePlatformDependent.putByte(buf.array(), (BYTE_ARRAY_BASE_OFFSET + buf.arrayOffset()) + offset, b);
}
return offset + 1;
} | 3.26 |
hbase_UnsafeAccess_getAsShort_rdh | /**
* Reads bytes at the given offset as a short value.
*
* @return short value at offset
*/
private static short getAsShort(ByteBuffer buf, int offset) {
if (buf.isDirect()) {
return HBasePlatformDependent.getShort(directBufferAddress(buf) + offset);
}
return HBasePlatformDependent.getShort(buf.array(),
(BYTE_ARRAY_BASE_OFFSET + buf.arrayOffset()) + offset);
} | 3.26 |
hbase_ScanInfo_customize_rdh | /**
* Used by CP users for customizing max versions, ttl, keepDeletedCells, min versions, and time to
* purge deletes.
*/
ScanInfo customize(int maxVersions, long ttl, KeepDeletedCells keepDeletedCells, int minVersions, long timeToPurgeDeletes)
{
return new ScanInfo(family, minVersions, maxVersions, ttl, keepDeletedCells, timeToPurgeDeletes, comparator, f0, usePread, cellsPerTimeoutCheck, parallelSeekEnabled, f1, newVersionBehavior);
} | 3.26 |
hbase_BlockingRpcCallback_run_rdh | /**
* Called on completion of the RPC call with the response object, or {@code null} in the case of
* an error.
*
* @param parameter
* the response object or {@code null} if an error occurred
*/
@Override
public void run(R parameter) {
synchronized(this) {
result = parameter;
resultSet = true;
this.notifyAll();}
} | 3.26 |
hbase_BlockingRpcCallback_get_rdh | /**
* Returns the parameter passed to {@link #run(Object)} or {@code null} if a null value was
* passed. When used asynchronously, this method will block until the {@link #run(Object)} method
* has been called.
*
* @return the response object or {@code null} if no response was passed
*/
public synchronized R get() throws IOException {
while (!resultSet) {
try {this.wait();
} catch (InterruptedException ie) {
InterruptedIOException exception = new InterruptedIOException(ie.getMessage());
exception.initCause(ie);
throw exception;
}
}
return result;
} | 3.26 |
hbase_HashTable_getCurrentKey_rdh | /**
* Get the current key
*
* @return the current key or null if there is no current key
*/
public ImmutableBytesWritable getCurrentKey() {
return key;
} | 3.26 |
hbase_HashTable_next_rdh | /**
* Read the next key/hash pair. Returns true if such a pair exists and false when at the end
* of the data.
*/
public boolean next() throws IOException {
if (cachedNext) {
cachedNext = false;return true;
}
key = new ImmutableBytesWritable();
hash
= new ImmutableBytesWritable();
while (true) {
boolean hasNext = mapFileReader.next(key, hash);
if (hasNext) {
return true;
}
hashFileIndex++;
if (hashFileIndex < TableHash.this.numHashFiles) {
mapFileReader.close();
openHashFile();
} else {
key = null;
hash = null;
return false;
}
}
} | 3.26 |
hbase_HashTable_getCurrentHash_rdh | /**
* Get the current hash
*
* @return the current hash or null if there is no current hash
*/
public ImmutableBytesWritable getCurrentHash() {
return hash;
} | 3.26 |
hbase_HashTable_selectPartitions_rdh | /**
* Choose partitions between row ranges to hash to a single output file Selects region
* boundaries that fall within the scan range, and groups them into the desired number of
* partitions.
*/void selectPartitions(Pair<byte[][], byte[][]> regionStartEndKeys) {
List<byte[]> startKeys =
new ArrayList<>();for (int i = 0; i < regionStartEndKeys.getFirst().length; i++) {
byte[] regionStartKey = regionStartEndKeys.getFirst()[i];
byte[] regionEndKey = regionStartEndKeys.getSecond()[i];
// if scan begins after this region, or starts before this region, then drop this region
// in other words:
// IF (scan begins before the end of this region
// AND scan ends before the start of this region)
// THEN include this region
if (((isTableStartRow(startRow) || isTableEndRow(regionEndKey)) || (Bytes.compareTo(startRow, regionEndKey) < 0)) && ((isTableEndRow(stopRow) || isTableStartRow(regionStartKey)) ||
(Bytes.compareTo(stopRow, regionStartKey) > 0))) {
startKeys.add(regionStartKey);
}
}int numRegions = startKeys.size();
if (numHashFiles == 0) {
numHashFiles = numRegions / 100;}
if (numHashFiles == 0) {
numHashFiles = 1;
}
if (numHashFiles > numRegions) {
// can't partition within regions
numHashFiles = numRegions;
}
// choose a subset of start keys to group regions into ranges
partitions = new ArrayList<>(numHashFiles - 1);
// skip the first start key as it is not a partition between ranges.
for (long i = 1; i < numHashFiles; i++) {
int splitIndex = ((int) ((numRegions * i) / numHashFiles));
partitions.add(new ImmutableBytesWritable(startKeys.get(splitIndex)));
}
} | 3.26 |
hbase_HashTable_main_rdh | /**
* Main entry point.
*/
public static void main(String[] args) throws Exception {
int ret = ToolRunner.run(new HashTable(HBaseConfiguration.create()), args);
System.exit(ret);
} | 3.26 |
hbase_HashTable_newReader_rdh | /**
* Open a TableHash.Reader starting at the first hash at or after the given key.
*/
public Reader newReader(Configuration conf, ImmutableBytesWritable startKey) throws IOException {
return new Reader(conf, startKey);
} | 3.26 |
hbase_WALEntryStream_getPosition_rdh | /**
* Returns the position of the last Entry returned by next()
*/
public long getPosition() {
return currentPositionOfEntry;
} | 3.26 |
hbase_WALEntryStream_readNextEntryAndRecordReaderPosition_rdh | /**
* Returns whether the file is opened for writing.
*/
private Pair<WALTailingReader.State, Boolean> readNextEntryAndRecordReaderPosition() {
OptionalLong v13;
if (logQueue.getQueueSize(walGroupId) > 1) {
// if there are more than one files in queue, although it is possible that we are
// still trying to write the trailer of the file and it is not closed yet, we can
// make sure that we will not write any WAL entries to it any more, so it is safe
// to just let the upper layer try to read the whole file without limit
v13 = OptionalLong.empty();
} else {
// if there is only one file in queue, check whether it is still being written to
// we must call this before actually reading from the reader, as this method will acquire the
// rollWriteLock. This is very important, as we will enqueue the new WAL file in postLogRoll,
// and before this happens, we could have already finished closing the previous WAL file. If
// we do not acquire the rollWriteLock and return whether the current file is being written
// to, we may finish reading the previous WAL file and start to read the next one, before it
// is enqueued into the logQueue, thus lead to an empty logQueue and make the shipper think
// the queue is already ended and quit. See HBASE-28114 and related issues for more details.
// in the future, if we want to optimize the logic here, for example, do not call this method
// every time, or do not acquire rollWriteLock in the implementation of this method, we need
// to carefully review the optimized implementation
v13
= walFileLengthProvider.getLogFileSizeIfBeingWritten(currentPath);
}
WALTailingReader.Result readResult = reader.next(v13.orElse(-1));
long readerPos = readResult.getEntryEndPos();
Entry readEntry = readResult.getEntry();
if (readResult.getState() == State.NORMAL) {
f0.trace("reading entry: {} ", readEntry);
metrics.incrLogEditsRead();
metrics.incrLogReadInBytes(readerPos - currentPositionOfEntry);
// record current entry and reader position
currentEntry = readResult.getEntry();
this.currentPositionOfReader = readerPos;
} else {
f0.trace("reading entry failed with: {}", readResult.getState());
// set current entry to null
currentEntry = null;
try {
this.currentPositionOfReader = reader.getPosition();
} catch (IOException e) {
f0.warn("failed to get current position of reader", e);
if (readResult.getState().resetCompression()) {
return Pair.newPair(State.ERROR_AND_RESET_COMPRESSION, v13.isPresent());
}
}
}
return Pair.newPair(readResult.getState(), v13.isPresent());} | 3.26 |
hbase_WALEntryStream_close_rdh | /**
* {@inheritDoc }
*/
@Override
public void close() {closeReader();
} | 3.26 |
hbase_WALEntryStream_hasNext_rdh | /**
* Try advance the stream if there is no entry yet. See the javadoc for {@link HasNext} for more
* details about the meanings of the return values.
* <p/>
* You can call {@link #peek()} or {@link #next()} to get the actual {@link Entry} if this method
* returns {@link HasNext#YES}.
*/
public HasNext
hasNext() {
if
(currentEntry == null) {
return tryAdvanceEntry();
} else {return HasNext.YES;
}
} | 3.26 |
hbase_WALEntryStream_getCurrentPath_rdh | /**
* Returns the {@link Path} of the current WAL
*/
public Path getCurrentPath() {
return currentPath;
} | 3.26 |
hbase_WALEntryStream_peek_rdh | /**
* Returns the next WAL entry in this stream but does not advance.
* <p/>
* Must call {@link #hasNext()} first before calling this method, and if you have already called
* {@link #next()} to consume the current entry, you need to call {@link #hasNext()} again to
* advance the stream before calling this method again, otherwise it will always return
* {@code null}
* <p/>
* The reason here is that, we need to use the return value of {@link #hasNext()} to tell upper
* layer to retry or not, so we can not wrap the {@link #hasNext()} call inside {@link #peek()} or
* {@link #next()} as they have their own return value.
*
* @see #hasNext()
* @see #next()
*/public Entry peek() {
return currentEntry;
}
/**
* Returns the next WAL entry in this stream and advance the stream. Will throw
* {@link IllegalStateException} if you do not call {@link #hasNext()} before calling this method.
* Please see the javadoc of {@link #peek()} method to see why we need this.
*
* @throws IllegalStateException
* Every time you want to call this method, please call
* {@link #hasNext()} first, otherwise a
* {@link IllegalStateException} | 3.26 |
hbase_EncryptionTest_testCipherProvider_rdh | /**
* Check that the configured cipher provider can be loaded and initialized, or throw an exception.
*/
public static void testCipherProvider(final Configuration conf) throws IOException {
String providerClassName = conf.get(HConstants.CRYPTO_CIPHERPROVIDER_CONF_KEY, DefaultCipherProvider.class.getName());
Boolean result = cipherProviderResults.get(providerClassName);
if (result == null) {
try {Encryption.getCipherProvider(conf);
cipherProviderResults.put(providerClassName, true);
} catch (Exception e) {
// most likely a RuntimeException
cipherProviderResults.put(providerClassName, false); throw new IOException((("Cipher provider " + providerClassName) + " failed test: ") + e.getMessage(), e);
}
} else if (!result) {
throw
new IOException(("Cipher provider " + providerClassName) + " previously failed test");}
} | 3.26 |
hbase_EncryptionTest_testKeyProvider_rdh | /**
* Check that the configured key provider can be loaded and initialized, or throw an exception.
*/
public static void testKeyProvider(final Configuration conf) throws IOException {
String providerClassName = conf.get(HConstants.CRYPTO_KEYPROVIDER_CONF_KEY, KeyStoreKeyProvider.class.getName());
Boolean result = keyProviderResults.get(providerClassName);
if (result == null) {
try {
Encryption.getKeyProvider(conf);keyProviderResults.put(providerClassName, true);
} catch (Exception e) {// most likely a RuntimeException
keyProviderResults.put(providerClassName, false);
throw new IOException((("Key provider "
+ providerClassName) + " failed test: ") + e.getMessage(), e);
}
} else if (!result) {
throw new IOException(("Key provider " + providerClassName) + " previously failed test");
}
} | 3.26 |
hbase_EncryptionTest_testEncryption_rdh | /**
* Check that the specified cipher can be loaded and initialized, or throw an exception. Verifies
* key and cipher provider configuration as a prerequisite for cipher verification. Also verifies
* if encryption is enabled globally.
*
* @param conf
* HBase configuration
* @param cipher
* chiper algorith to use for the column family
* @param key
* encryption key
* @throws IOException
* in case of encryption configuration error
*/
public static void testEncryption(final Configuration conf, final String cipher, byte[] key) throws IOException {
if (cipher == null) {
return;
}
if (!Encryption.isEncryptionEnabled(conf)) {
String message = String.format("Cipher %s failed test: encryption is disabled on the cluster", cipher);
throw new IOException(message);
}
testKeyProvider(conf);
testCipherProvider(conf);
Boolean result =
cipherResults.get(cipher);
if (result == null) {
try {
Encryption.Context v6 = Encryption.newContext(conf);
v6.setCipher(Encryption.getCipher(conf, cipher));
if (key == null) {
// Make a random key since one was not provided
v6.setKey(v6.getCipher().getRandomKey());
} else {
// This will be a wrapped key from schema
v6.setKey(EncryptionUtil.unwrapKey(conf, conf.get(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY, "hbase"), key));
}
byte[] iv = null;
if (v6.getCipher().getIvLength() > 0) {
iv = new byte[v6.getCipher().getIvLength()];
Bytes.secureRandom(iv);
}
byte[] plaintext = new byte[1024];
Bytes.random(plaintext);
ByteArrayOutputStream out = new ByteArrayOutputStream();
Encryption.encrypt(out, new ByteArrayInputStream(plaintext), v6, iv);
byte[] ciphertext = out.toByteArray();
out.reset();
Encryption.decrypt(out, new ByteArrayInputStream(ciphertext), plaintext.length, v6, iv);
byte[] test = out.toByteArray();
if (!Bytes.equals(plaintext, test)) {
throw new IOException("Did not pass encrypt/decrypt test");
}
cipherResults.put(cipher, true);
} catch (Exception e) {
cipherResults.put(cipher, false);throw new IOException((("Cipher " + cipher) + " failed test: ") + e.getMessage(), e);
}
} else if (!result) {
throw new IOException(("Cipher " + cipher) + " previously failed test");
}
} | 3.26 |
hbase_MutableFastCounter_incr_rdh | /**
* Increment the value by a delta
*
* @param delta
* of the increment
*/
public void incr(long delta) {
counter.add(delta);
setChanged();
} | 3.26 |
hbase_RawShort_decodeShort_rdh | /**
* Read a {@code short} value from the buffer {@code buff}.
*/
public short decodeShort(byte[] buff, int offset) {
return Bytes.toShort(buff, offset);
} | 3.26 |
hbase_RawShort_encodeShort_rdh | /**
* Write instance {@code val} into buffer {@code buff}.
*/
public int encodeShort(byte[] buff, int offset, short val) {
return Bytes.putShort(buff, offset, val);
} | 3.26 |
hbase_BulkLoadObserver_preCleanupBulkLoad_rdh | /**
* Called as part of SecureBulkLoadEndpoint.cleanupBulkLoad() RPC call. It can't bypass the
* default action, e.g., ctx.bypass() won't have effect. If you need to get the region or table
* name, get it from the <code>ctx</code> as follows:
* <code>code>ctx.getEnvironment().getRegion()</code>. Use getRegionInfo to fetch the encodedName
* and use getDescriptor() to get the tableName.
*
* @param ctx
* the environment to interact with the framework and master
*/
default void preCleanupBulkLoad(ObserverContext<RegionCoprocessorEnvironment> ctx) throws IOException {
} | 3.26 |
hbase_BulkLoadObserver_prePrepareBulkLoad_rdh | /**
* Coprocessors implement this interface to observe and mediate bulk load operations. <br>
* <br>
* <h3>Exception Handling</h3> For all functions, exception handling is done as follows:
* <ul>
* <li>Exceptions of type {@link IOException} are reported back to client.</li>
* <li>For any other kind of exception:
* <ul>
* <li>If the configuration {@link CoprocessorHost#ABORT_ON_ERROR_KEY} is set to true, then the
* server aborts.</li>
* <li>Otherwise, coprocessor is removed from the server and
* {@link org.apache.hadoop.hbase.DoNotRetryIOException} is returned to the client.</li>
* </ul>
* </li>
* </ul>
*/
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC)@InterfaceStability.Evolvingpublic interface BulkLoadObserver {
/**
* Called as part of SecureBulkLoadEndpoint.prepareBulkLoad() RPC call. It can't bypass the
* default action, e.g., ctx.bypass() won't have effect. If you need to get the region or table
* name, get it from the <code>ctx</code> as follows:
* <code>code>ctx.getEnvironment().getRegion()</code>. Use getRegionInfo to fetch the encodedName
* and use getDescriptor() to get the tableName.
*
* @param ctx
* the environment to interact with the framework and master
*/
default void prePrepareBulkLoad(ObserverContext<RegionCoprocessorEnvironment> ctx) throws IOException {
} | 3.26 |
hbase_MultithreadedTableMapper_getNumberOfThreads_rdh | /**
* The number of threads in the thread pool that will run the map function.
*
* @param job
* the job
* @return the number of threads
*/
public static int getNumberOfThreads(JobContext job)
{
return job.getConfiguration().getInt(NUMBER_OF_THREADS, 10);
} | 3.26 |
hbase_MultithreadedTableMapper_getMapperClass_rdh | /**
* Get the application's mapper class.
*
* @param <K2>
* the map's output key type
* @param <V2>
* the map's output value type
* @param job
* the job
* @return the mapper class to run
*/
@SuppressWarnings("unchecked")
public static <K2, V2> Class<Mapper<ImmutableBytesWritable, Result, K2, V2>>
getMapperClass(JobContext job) {
return ((Class<Mapper<ImmutableBytesWritable, Result, K2, V2>>) (job.getConfiguration().getClass(MAPPER_CLASS, Mapper.class)));} | 3.26 |
hbase_MultithreadedTableMapper_setNumberOfThreads_rdh | /**
* Set the number of threads in the pool for running maps.
*
* @param job
* the job to modify
* @param threads
* the new number of threads
*/
public static void setNumberOfThreads(Job job, int threads) {
job.getConfiguration().setInt(NUMBER_OF_THREADS, threads);
} | 3.26 |
hbase_MultithreadedTableMapper_run_rdh | /**
* Run the application's maps using a thread pool.
*/
@Override
public void run(Context context) throws IOException, InterruptedException {
f0 = context;
int numberOfThreads = getNumberOfThreads(context);
mapClass = getMapperClass(context);
if (LOG.isDebugEnabled()) {LOG.debug(("Configuring multithread runner to use " + numberOfThreads) + " threads");
}
executor = Executors.newFixedThreadPool(numberOfThreads);
for (int i = 0; i < numberOfThreads; ++i) {
MapRunner thread = new MapRunner(context);
executor.execute(thread);
}
executor.shutdown();while (!executor.isTerminated()) {
// wait till all the threads are done
Thread.sleep(1000);
}
} | 3.26 |
hbase_AbstractFSWALProvider_findArchivedLog_rdh | /**
* Find the archived WAL file path if it is not able to locate in WALs dir.
*
* @param path
* - active WAL file path
* @param conf
* - configuration
* @return archived path if exists, null - otherwise
* @throws IOException
* exception
*/
public static Path findArchivedLog(Path path, Configuration conf) throws IOException {
// If the path contains oldWALs keyword then exit early.
if (path.toString().contains(HConstants.HREGION_OLDLOGDIR_NAME)) {
return null;}Path walRootDir = CommonFSUtils.getWALRootDir(conf);
FileSystem fs = path.getFileSystem(conf);
// Try finding the log in old dir
Path oldLogDir = new Path(walRootDir, HConstants.HREGION_OLDLOGDIR_NAME);
Path archivedLogLocation = new Path(oldLogDir, path.getName());
if (fs.exists(archivedLogLocation)) {
LOG.info((("Log " + path) + " was moved to ") + archivedLogLocation);
return archivedLogLocation;
}
ServerName serverName = getServerNameFromWALDirectoryName(path);
if (serverName == null) {
LOG.warn("Can not extract server name from path {}, " + "give up searching the separated old log dir", path);
return null;
}
// Try finding the log in separate old log dir
oldLogDir = new Path(walRootDir, new StringBuilder(HConstants.HREGION_OLDLOGDIR_NAME).append(Path.SEPARATOR).append(serverName.getServerName()).toString());
archivedLogLocation = new Path(oldLogDir, path.getName());if (fs.exists(archivedLogLocation)) { LOG.info((("Log " + path) + " was moved to ") + archivedLogLocation);
return archivedLogLocation;
}
LOG.error("Couldn't locate log: " +
path);
return null;
} | 3.26 |
hbase_AbstractFSWALProvider_isMetaFile_rdh | /**
* Returns True if String ends in {@link #META_WAL_PROVIDER_ID}
*/
public static boolean isMetaFile(String p) {
return (p != null) && p.endsWith(META_WAL_PROVIDER_ID);
} | 3.26 |
hbase_AbstractFSWALProvider_getLogFileSize_rdh | /**
* returns the size of rolled WAL files.
*/
public static long getLogFileSize(WAL wal) {
return ((AbstractFSWAL<?>) (wal)).getLogFileSize();
} | 3.26 |
hbase_AbstractFSWALProvider_getArchivedWALFiles_rdh | /**
* List all the old wal files for a dead region server.
* <p/>
* Initially added for supporting replication, where we need to get the wal files to replicate for
* a dead region server.
*/
public static List<Path> getArchivedWALFiles(Configuration conf, ServerName serverName, String logPrefix) throws IOException {
Path walRootDir = CommonFSUtils.getWALRootDir(conf);
FileSystem v13 = walRootDir.getFileSystem(conf);
List<Path> archivedWalFiles = new ArrayList<>();
// list both the root old wal dir and the separate old wal dir, so we will not miss any files if
// the SEPARATE_OLDLOGDIR config is changed
Path oldWalDir = new Path(walRootDir, HConstants.HREGION_OLDLOGDIR_NAME);
try {
for
(FileStatus status : v13.listStatus(oldWalDir, p -> p.getName().startsWith(logPrefix))) {
if (status.isFile()) {
archivedWalFiles.add(status.getPath());
}
}
} catch (FileNotFoundException e) {
LOG.info("Old WAL dir {} not exists", oldWalDir);
return Collections.emptyList();
}
Path separatedOldWalDir = new Path(oldWalDir, serverName.toString());
try {
for (FileStatus status : v13.listStatus(separatedOldWalDir, p -> p.getName().startsWith(logPrefix))) {
if (status.isFile()) {
archivedWalFiles.add(status.getPath());
}
}
} catch (FileNotFoundException e) {
LOG.info("Seprated old WAL dir {} not exists", separatedOldWalDir);
}
return archivedWalFiles;
} | 3.26 |
hbase_AbstractFSWALProvider_doInit_rdh | /**
*
* @param factory
* factory that made us, identity used for FS layout. may not be null
* @param conf
* may not be null
* @param providerId
* differentiate between providers from one factory, used for FS layout. may be
* null
*/
@Override
protected void doInit(WALFactory factory, Configuration conf, String
providerId) throws IOException {
this.providerId = providerId;
// get log prefix
StringBuilder sb = new StringBuilder().append(factory.factoryId);
if (providerId != null) {
if (providerId.startsWith(WAL_FILE_NAME_DELIMITER)) {
sb.append(providerId);
} else {
sb.append(WAL_FILE_NAME_DELIMITER).append(providerId);
}
}
logPrefix
= sb.toString();
doInit(conf);
} | 3.26 |
hbase_AbstractFSWALProvider_getWALFiles_rdh | /**
* List all the wal files for a logPrefix.
*/
public static List<Path>
getWALFiles(Configuration c, ServerName serverName) throws IOException {
Path walRoot = new Path(CommonFSUtils.getWALRootDir(c), HConstants.HREGION_LOGDIR_NAME);
FileSystem v20 = walRoot.getFileSystem(c);
List<Path> walFiles = new ArrayList<>();
Path walDir = new Path(walRoot, serverName.toString());
try {
for (FileStatus status :
v20.listStatus(walDir)) {
if (status.isFile()) {
walFiles.add(status.getPath());
}
}
} catch (FileNotFoundException e) {
LOG.info("WAL dir {} not exists", walDir);
}
return walFiles;
} | 3.26 |
hbase_AbstractFSWALProvider_getWALArchiveDirectoryName_rdh | /**
* Construct the directory name for all old WALs on a given server. The default old WALs dir looks
* like: <code>hbase/oldWALs</code>. If you config hbase.separate.oldlogdir.by.regionserver to
* true, it looks like <code>hbase//oldWALs/kalashnikov.att.net,61634,1486865297088</code>.
*
* @param serverName
* Server name formatted as described in {@link ServerName}
* @return the relative WAL directory name
*/
public static String getWALArchiveDirectoryName(Configuration conf, final String serverName) {
StringBuilder dirName = new StringBuilder(HConstants.HREGION_OLDLOGDIR_NAME);
if (conf.getBoolean(SEPARATE_OLDLOGDIR, DEFAULT_SEPARATE_OLDLOGDIR)) {
dirName.append(Path.SEPARATOR);
dirName.append(serverName);
}
return dirName.toString();
} | 3.26 |
hbase_AbstractFSWALProvider_getCurrentFileName_rdh | /**
* return the current filename from the current wal.
*/
public static Path getCurrentFileName(final WAL wal) {
return ((AbstractFSWAL<?>) (wal)).getCurrentFileName();
} | 3.26 |
hbase_AbstractFSWALProvider_getNumRolledLogFiles_rdh | /**
* returns the number of rolled WAL files.
*/
public static int getNumRolledLogFiles(WAL wal) {
return ((AbstractFSWAL<?>) (wal)).getNumRolledLogFiles();
} | 3.26 |
hbase_AbstractFSWALProvider_getTimestamp_rdh | /**
* Split a WAL filename to get a start time. WALs usually have the time we start writing to them
* with as part of their name, usually the suffix. Sometimes there will be an extra suffix as when
* it is a WAL for the meta table. For example, WALs might look like this
* <code>10.20.20.171%3A60020.1277499063250</code> where <code>1277499063250</code> is the
* timestamp. Could also be a meta WAL which adds a '.meta' suffix or a synchronous replication
* WAL which adds a '.syncrep' suffix. Check for these. File also may have no timestamp on it. For
* example the recovered.edits files are WALs but are named in ascending order. Here is an
* example: 0000000000000016310. Allow for this.
*
* @param name
* Name of the WAL file.
* @return Timestamp or {@link #NO_TIMESTAMP}.
*/
public static long
getTimestamp(String name) {
Matcher matcher = WAL_FILE_NAME_PATTERN.matcher(name);
return matcher.matches() ? Long.parseLong(matcher.group(2)) : NO_TIMESTAMP;
} | 3.26 |
hbase_AbstractFSWALProvider_recoverLease_rdh | // For HBASE-15019
public static void recoverLease(Configuration conf, Path path) {
try {
final FileSystem v38 = CommonFSUtils.getCurrentFileSystem(conf);
RecoverLeaseFSUtils.recoverFileLease(v38, path, conf, new CancelableProgressable() {
@Override
public boolean progress() {
LOG.debug("Still trying to recover WAL lease: " + path);
return true;
}
});
} catch (IOException e) {
LOG.warn("unable to recover lease for WAL: " + path, e);
}
} | 3.26 |
hbase_AbstractFSWALProvider_getTS_rdh | /**
* Split a path to get the start time For example: 10.20.20.171%3A60020.1277499063250 Could also
* be a meta WAL which adds a '.meta' suffix or a synchronous replication WAL which adds a
* '.syncrep' suffix. Check.
*
* @param p
* path to split
* @return start time
*/
public static long getTS(Path p) {
return getTimestamp(p.getName());
} | 3.26 |
hbase_AbstractFSWALProvider_getNumLogFiles0_rdh | /**
* iff the given WALFactory is using the DefaultWALProvider for meta and/or non-meta, count the
* number of files (rolled and active). if either of them aren't, count 0 for that provider.
*/
@Override
protected long getNumLogFiles0() {
T log = this.wal;
return log == null ? 0 : log.getNumLogFiles();
} | 3.26 |
hbase_AbstractFSWALProvider_requestLogRoll_rdh | /**
* request a log roll, but don't actually do it.
*/
static void requestLogRoll(final WAL wal) {
((AbstractFSWAL<?>) (wal)).requestLogRoll();
} | 3.26 |
hbase_AbstractFSWALProvider_m1_rdh | /**
* It returns the file create timestamp (the 'FileNum') from the file name. For name format see
* {@link #validateWALFilename(String)} public until remaining tests move to o.a.h.h.wal
*
* @param wal
* must not be null
* @return the file number that is part of the WAL file name
*/
public static long m1(final WAL wal) {
final Path walPath = ((AbstractFSWAL<?>) (wal)).getCurrentFileName();
if (walPath == null) {
throw new IllegalArgumentException("The WAL path couldn't be null");
}
String name = walPath.getName();
long timestamp = getTimestamp(name);
if (timestamp == NO_TIMESTAMP) {
throw new IllegalArgumentException(name + " is not a valid wal file name");
}
return timestamp;
}
/**
* A WAL file name is of the format: <wal-name>{@link #WAL_FILE_NAME_DELIMITER} | 3.26 |
hbase_AbstractFSWALProvider_parseServerNameFromWALName_rdh | /**
* Parse the server name from wal prefix. A wal's name is always started with a server name in non
* test code.
*
* @throws IllegalArgumentException
* if the name passed in is not started with a server name
* @return the server name
*/
public static ServerName parseServerNameFromWALName(String name) {
String decoded;
try {
decoded = URLDecoder.decode(name, StandardCharsets.UTF_8.name());
} catch (UnsupportedEncodingException e) {
throw new AssertionError("should never happen", e);
}
Matcher matcher = SERVER_NAME_PATTERN.matcher(decoded);
if (matcher.find())
{
return ServerName.valueOf(matcher.group());
} else {
throw new IllegalArgumentException(name + " is not started with a server name");
}
} | 3.26 |
hbase_AbstractFSWALProvider_getWALPrefixFromWALName_rdh | /**
* Get prefix of the log from its name, assuming WAL name in format of
* log_prefix.filenumber.log_suffix
*
* @param name
* Name of the WAL to parse
* @return prefix of the log
* @throws IllegalArgumentException
* if the name passed in is not a valid wal file name
* @see AbstractFSWAL#getCurrentFileName()
*/
public static String getWALPrefixFromWALName(String name) {
return getWALNameGroupFromWALName(name, 1);
} | 3.26 |
hbase_AccessController_hasFamilyQualifierPermission_rdh | /**
* Returns <code>true</code> if the current user is allowed the given action over at least one of
* the column qualifiers in the given column families.
*/
private boolean hasFamilyQualifierPermission(User user, Action perm, RegionCoprocessorEnvironment env, Map<byte[], ? extends Collection<byte[]>> familyMap) throws IOException {
RegionInfo hri = env.getRegion().getRegionInfo();
TableName tableName = hri.getTable();
if (user == null) {
return false;
}
if ((familyMap != null) && (familyMap.size() > 0)) {
// at least one family must be allowed
for (Map.Entry<byte[], ? extends Collection<byte[]>> family : familyMap.entrySet()) {
if ((family.getValue() != null) && (!family.getValue().isEmpty())) {
for (byte[] qualifier : family.getValue()) {if (getAuthManager().authorizeUserTable(user, tableName, family.getKey(), qualifier, perm)) {
return true;
}
}
} else if (getAuthManager().authorizeUserFamily(user, tableName, family.getKey(), perm)) {
return true;
}
}
} else if
(LOG.isDebugEnabled()) {
LOG.debug("Empty family map passed for permission check");
}
return false;
} | 3.26 |
hbase_AccessController_checkForReservedTagPresence_rdh | // Checks whether incoming cells contain any tag with type as ACL_TAG_TYPE. This tag
// type is reserved and should not be explicitly set by user.
private void checkForReservedTagPresence(User user, Mutation m) throws IOException {
// No need to check if we're not going to throw
if (!authorizationEnabled) {
m.setAttribute(TAG_CHECK_PASSED, TRUE);
return;
}
// Superusers are allowed to store cells unconditionally.
if (Superusers.isSuperUser(user)) {
m.setAttribute(TAG_CHECK_PASSED, TRUE);
return;}
// We already checked (prePut vs preBatchMutation)
if (m.getAttribute(TAG_CHECK_PASSED) != null) {
return;
}
for (CellScanner cellScanner = m.cellScanner(); cellScanner.advance();) {
Iterator<Tag> tagsItr = PrivateCellUtil.tagsIterator(cellScanner.current());
while (tagsItr.hasNext()) {
if (tagsItr.next().getType() == PermissionStorage.ACL_TAG_TYPE) {
throw new AccessDeniedException("Mutation contains cell with reserved type tag");
}
}
}
m.setAttribute(TAG_CHECK_PASSED, TRUE);
} | 3.26 |
hbase_AccessController_start_rdh | /* ---- MasterObserver implementation ---- */
@Override
public void start(CoprocessorEnvironment env) throws IOException {
CompoundConfiguration conf = new CompoundConfiguration();
conf.add(env.getConfiguration());
authorizationEnabled = AccessChecker.isAuthorizationSupported(conf);
if (!authorizationEnabled) {
LOG.warn("AccessController has been loaded with authorization checks DISABLED!");
}
shouldCheckExecPermission = conf.getBoolean(AccessControlConstants.EXEC_PERMISSION_CHECKS_KEY, AccessControlConstants.DEFAULT_EXEC_PERMISSION_CHECKS);
cellFeaturesEnabled = HFile.getFormatVersion(conf) >= HFile.MIN_FORMAT_VERSION_WITH_TAGS;
if (!cellFeaturesEnabled) {
LOG.info(((("A minimum HFile version of " + HFile.MIN_FORMAT_VERSION_WITH_TAGS) + " is required to persist cell ACLs. Consider setting ") + HFile.FORMAT_VERSION_KEY) + " accordingly.");
}
if (env instanceof MasterCoprocessorEnvironment) {
// if running on HMaster
MasterCoprocessorEnvironment mEnv = ((MasterCoprocessorEnvironment) (env));
if (mEnv instanceof HasMasterServices) {
MasterServices masterServices = ((HasMasterServices) (mEnv)).getMasterServices();
zkPermissionWatcher = masterServices.getZKPermissionWatcher();
accessChecker = masterServices.getAccessChecker();
}
} else if (env instanceof RegionServerCoprocessorEnvironment) {
RegionServerCoprocessorEnvironment rsEnv = ((RegionServerCoprocessorEnvironment) (env));
if (rsEnv instanceof HasRegionServerServices) {
RegionServerServices rsServices = ((HasRegionServerServices) (rsEnv)).getRegionServerServices();
zkPermissionWatcher = rsServices.getZKPermissionWatcher();
accessChecker = rsServices.getAccessChecker();
}
} else if (env instanceof RegionCoprocessorEnvironment) {
// if running at region
regionEnv = ((RegionCoprocessorEnvironment) (env));
conf.addBytesMap(regionEnv.getRegion().getTableDescriptor().getValues());
compatibleEarlyTermination = conf.getBoolean(AccessControlConstants.CF_ATTRIBUTE_EARLY_OUT, AccessControlConstants.DEFAULT_ATTRIBUTE_EARLY_OUT);
if (regionEnv instanceof HasRegionServerServices) {
RegionServerServices rsServices = ((HasRegionServerServices) (regionEnv)).getRegionServerServices();
zkPermissionWatcher = rsServices.getZKPermissionWatcher();
accessChecker = rsServices.getAccessChecker();
}
}
Preconditions.checkState(zkPermissionWatcher != null, "ZKPermissionWatcher is null");
Preconditions.checkState(accessChecker != null, "AccessChecker is null");
// set the user-provider.
this.userProvider = UserProvider.instantiate(env.getConfiguration());
tableAcls = new MapMaker().weakValues().makeMap();
} | 3.26 |
hbase_AccessController_checkCoveringPermission_rdh | /**
* Determine if cell ACLs covered by the operation grant access. This is expensive.
*
* @return false if cell ACLs failed to grant access, true otherwise
*/
private boolean checkCoveringPermission(User user, OpType request, RegionCoprocessorEnvironment e, byte[] row, Map<byte[], ? extends Collection<?>> familyMap, long opTs, Action... actions) throws IOException {
if (!cellFeaturesEnabled) {
return false;
}
long cellGrants = 0;
long latestCellTs = 0;
Get get
=
new Get(row);
// Only in case of Put/Delete op, consider TS within cell (if set for individual cells).
// When every cell, within a Mutation, can be linked with diff TS we can not rely on only one
// version. We have to get every cell version and check its TS against the TS asked for in
// Mutation and skip those Cells which is outside this Mutation TS.In case of Put, we have to
// consider only one such passing cell. In case of Delete we have to consider all the cell
// versions under this passing version. When Delete Mutation contains columns which are a
// version delete just consider only one version for those column cells.
boolean considerCellTs = (request == OpType.PUT) || (request == OpType.DELETE);
if (considerCellTs) {
get.readAllVersions();
} else {
get.readVersions(1);
}boolean diffCellTsFromOpTs = false;
for (Map.Entry<byte[], ? extends Collection<?>> entry : familyMap.entrySet()) {
byte[] col = entry.getKey();
// TODO: HBASE-7114 could possibly unify the collection type in family
// maps so we would not need to do this
if (entry.getValue() instanceof Set) {
Set<byte[]> set = ((Set<byte[]>) (entry.getValue()));
if ((set == null) || set.isEmpty()) {
get.addFamily(col);
} else {
for (byte[] qual : set) {
get.addColumn(col, qual);
}
}} else if (entry.getValue() instanceof List) {
List<Cell> v32 = ((List<Cell>) (entry.getValue()));
if ((v32 == null) || v32.isEmpty()) {
get.addFamily(col);
} else {
// In case of family delete, a Cell will be added into the list with Qualifier as null.
for (Cell cell : v32) {
if ((cell.getQualifierLength() == 0) && ((cell.getTypeByte() == Type.DeleteFamily.getCode()) || (cell.getTypeByte() == Type.DeleteFamilyVersion.getCode()))) {
get.addFamily(col);
} else {
get.addColumn(col, CellUtil.cloneQualifier(cell));
}
if
(considerCellTs) {
long cellTs = cell.getTimestamp();
latestCellTs = Math.max(latestCellTs, cellTs);
diffCellTsFromOpTs = diffCellTsFromOpTs || (opTs != cellTs);
}
} }} else if (entry.getValue() == null) {
get.addFamily(col);
}
else {
throw new RuntimeException("Unhandled collection type " + entry.getValue().getClass().getName());
}
}
// We want to avoid looking into the future. So, if the cells of the
// operation specify a timestamp, or the operation itself specifies a
// timestamp, then we use the maximum ts found. Otherwise, we bound
// the Get to the current server time. We add 1 to the timerange since
// the upper bound of a timerange is exclusive yet we need to examine
// any cells found there inclusively.
long latestTs = Math.max(opTs, latestCellTs);
if ((latestTs == 0) || (latestTs
== HConstants.LATEST_TIMESTAMP)) {
latestTs = EnvironmentEdgeManager.currentTime();
}
get.setTimeRange(0, latestTs + 1);
// In case of Put operation we set to read all versions. This was done to consider the case
// where columns are added with TS other than the Mutation TS. But normally this wont be the
// case with Put. There no need to get all versions but get latest version only.
if ((!diffCellTsFromOpTs) && (request == OpType.PUT)) {
get.readVersions(1);
}
if (LOG.isTraceEnabled()) {
LOG.trace("Scanning for cells with " + get);
}
// This Map is identical to familyMap. The key is a BR rather than byte[].
// It will be easy to do gets over this new Map as we can create get keys over the Cell cf by
// new SimpleByteRange(cell.familyArray, cell.familyOffset, cell.familyLen)
Map<ByteRange, List<Cell>> familyMap1 = new HashMap<>();
for (Entry<byte[], ? extends Collection<?>> entry : familyMap.entrySet()) {
if (entry.getValue() instanceof List) {
familyMap1.put(new SimpleMutableByteRange(entry.getKey()), ((List<Cell>) (entry.getValue())));
}
}
RegionScanner scanner = getRegion(e).getScanner(new Scan(get));
List<Cell> cells = Lists.newArrayList();
Cell prevCell = null;
ByteRange curFam = new SimpleMutableByteRange();
boolean curColAllVersions = request == OpType.DELETE;
long curColCheckTs = opTs;
boolean foundColumn = false;
try {
boolean more = false;
ScannerContext scannerContext = ScannerContext.newBuilder().setBatchLimit(1).build();
do {
cells.clear();
// scan with limit as 1 to hold down memory use on wide rows
more = scanner.next(cells, scannerContext);
for (Cell cell : cells) {
if (LOG.isTraceEnabled()) {
LOG.trace("Found cell " + cell);
}
boolean colChange
= (prevCell == null) || (!CellUtil.matchingColumn(prevCell, cell));if (colChange)
foundColumn = false;
prevCell = cell;
if ((!curColAllVersions) && foundColumn) {
continue;
}
if (colChange && considerCellTs) {
curFam.set(cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength());
List<Cell> cols = familyMap1.get(curFam);
for (Cell col :
cols) {
// null/empty qualifier is used to denote a Family delete. The TS and delete type
// associated with this is applicable for all columns within the family. That is
// why the below (col.getQualifierLength() == 0) check.
if (((col.getQualifierLength() == 0) && (request == OpType.DELETE)) || CellUtil.matchingQualifier(cell, col)) {
byte type = col.getTypeByte();
if (considerCellTs) {
curColCheckTs = col.getTimestamp();
}
// For a Delete op we pass allVersions as true. When a Delete Mutation contains
// a version delete for a column no need to check all the covering cells within
// that column. Check all versions when Type is DeleteColumn or DeleteFamily
// One version delete types are Delete/DeleteFamilyVersion
curColAllVersions = (Type.DeleteColumn.getCode() == type) ||
(Type.DeleteFamily.getCode() == type);
break;
}
}
}
if (cell.getTimestamp() > curColCheckTs) {
// Just ignore this cell. This is not a covering cell.
continue;
}
foundColumn = true;
for (Action action : actions) {
// Are there permissions for this user for the cell?
if (!getAuthManager().authorizeCell(user, getTableName(e), cell, action)) {
// We can stop if the cell ACL denies access
return false;
}
}cellGrants++;
}
} while (more );
} catch (AccessDeniedException ex) {
throw ex;
} catch (IOException ex) {
LOG.error("Exception while getting cells to calculate covering permission", ex);
} finally {
scanner.close();
}
// We should not authorize unless we have found one or more cell ACLs that
// grant access. This code is used to check for additional permissions
// after no table or CF grants are found.
return cellGrants > 0;
} | 3.26 |
hbase_AccessController_requireScannerOwner_rdh | /**
* Verify, when servicing an RPC, that the caller is the scanner owner. If so, we assume that
* access control is correctly enforced based on the checks performed in preScannerOpen()
*/
private void requireScannerOwner(InternalScanner s) throws AccessDeniedException {
if (!RpcServer.isInRpcCallContext()) {
return;
}
String v163 = RpcServer.getRequestUserName().orElse(null);
String owner = scannerOwners.get(s);
if ((authorizationEnabled && (owner != null)) && (!owner.equals(v163))) {
throw new AccessDeniedException(("User '" + v163) + "' is not the scanner owner!");
}} | 3.26 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.