name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hbase_AccessController_preCreateTable_rdh | /**
* ********************************* Observer implementations **********************************
*/
@Override
public void preCreateTable(ObserverContext<MasterCoprocessorEnvironment> c, TableDescriptor desc, RegionInfo[] regions)
throws IOException {
Set<byte[]> families = desc.getColumnFamilyNames();
Map<byte[], Set<byte[]>> familyMap = new TreeMap<>(Bytes.BYTES_COMPARATOR);
for (byte[] family : families) {
familyMap.put(family, null);
}
requireNamespacePermission(c, "createTable", desc.getTableName().getNamespaceAsString(), desc.getTableName(), familyMap, Action.ADMIN, Action.CREATE);
} | 3.26 |
hbase_AccessController_permissionGranted_rdh | /**
* Check the current user for authorization to perform a specific action against the given set of
* row data.
*
* @param opType
* the operation type
* @param user
* the user
* @param e
* the coprocessor environment
* @param families
* the map of column families to qualifiers present in the request
* @param actions
* the desired actions
* @return an authorization result
*/
private AuthResult permissionGranted(OpType opType, User user, RegionCoprocessorEnvironment e, Map<byte[], ? extends Collection<?>> families, Action... actions) {
AuthResult result = null;
for (Action action : actions) {
result = accessChecker.permissionGranted(opType.m1(), user, action, e.getRegion().getRegionInfo().getTable(), families);
if (!result.isAllowed()) {
return result;
}
}
return result;
} | 3.26 |
hbase_AccessController_getActiveUser_rdh | /**
* Returns the active user to which authorization checks should be applied. If we are in the
* context of an RPC call, the remote user is used, otherwise the currently logged in user is
* used.
*/
private User getActiveUser(ObserverContext<?> ctx) throws IOException {
// for non-rpc handling, fallback to system user
Optional<User> optionalUser = ctx.getCaller();
if (optionalUser.isPresent()) {return optionalUser.get();
}
return userProvider.getCurrent();
}
/**
*
* @deprecated since 2.2.0 and will be removed in 4.0.0. Use
{@link Admin#hasUserPermissions(String, List)} | 3.26 |
hbase_AccessController_createACLTable_rdh | /**
* Create the ACL table
*/
private static void createACLTable(Admin admin) throws IOException {
/**
* Table descriptor for ACL table
*/
ColumnFamilyDescriptor v86 = ColumnFamilyDescriptorBuilder.newBuilder(PermissionStorage.ACL_LIST_FAMILY).setMaxVersions(1).setInMemory(true).setBlockCacheEnabled(true).setBlocksize(8 * 1024).setBloomFilterType(BloomType.NONE).setScope(HConstants.REPLICATION_SCOPE_LOCAL).build();
TableDescriptor td = TableDescriptorBuilder.newBuilder(PermissionStorage.ACL_TABLE_NAME).setColumnFamily(v86).build();
admin.createTable(td);
} | 3.26 |
hbase_AccessController_preCleanupBulkLoad_rdh | /**
* Authorization security check for SecureBulkLoadProtocol.cleanupBulkLoad()
*
* @param ctx
* the context
*/
@Override
public void preCleanupBulkLoad(ObserverContext<RegionCoprocessorEnvironment> ctx) throws IOException
{
requireAccess(ctx, "preCleanupBulkLoad", ctx.getEnvironment().getRegion().getTableDescriptor().getTableName(), Action.ADMIN, Action.CREATE);
} | 3.26 |
hbase_AccessController_preBulkLoadHFile_rdh | /**
* Verifies user has CREATE or ADMIN privileges on the Column Families involved in the
* bulkLoadHFile request. Specific Column Write privileges are presently ignored.
*/
@Override
public void preBulkLoadHFile(ObserverContext<RegionCoprocessorEnvironment> ctx, List<Pair<byte[], String>> familyPaths) throws IOException {
User user = getActiveUser(ctx);
for (Pair<byte[], String> el : familyPaths) {
accessChecker.requirePermission(user, "preBulkLoadHFile", ctx.getEnvironment().getRegion().getTableDescriptor().getTableName(), el.getFirst(), null, null, Action.ADMIN, Action.CREATE);
}
} | 3.26 |
hbase_AccessController_getRegionObserver_rdh | /**
* ********************************* Observer/Service Getters **********************************
*/
@Override
public Optional<RegionObserver> getRegionObserver() {
return Optional.of(this);
} | 3.26 |
hbase_AccessController_prePrepareBulkLoad_rdh | /**
* Authorization check for SecureBulkLoadProtocol.prepareBulkLoad()
*
* @param ctx
* the context
*/
@Override
public void prePrepareBulkLoad(ObserverContext<RegionCoprocessorEnvironment> ctx) throws IOException {
requireAccess(ctx, "prePrepareBulkLoad", ctx.getEnvironment().getRegion().getTableDescriptor().getTableName(), Action.ADMIN, Action.CREATE);
} | 3.26 |
hbase_AccessController_preEndpointInvocation_rdh | /* ---- EndpointObserver implementation ---- */
@Override
public Message preEndpointInvocation(ObserverContext<RegionCoprocessorEnvironment> ctx, Service service, String methodName, Message request) throws IOException {
// Don't intercept calls to our own AccessControlService, we check for
// appropriate permissions in the service handlers
if (shouldCheckExecPermission && (!(service instanceof AccessControlService))) {
m0(ctx, ((("invoke(" + service.getDescriptorForType().getName()) + ".") + methodName) + ")", getTableName(ctx.getEnvironment()), null, null, Action.EXEC);
}
return request;
} | 3.26 |
hbase_AccessController_updateACL_rdh | /**
* Writes all table ACLs for the tables in the given Map up into ZooKeeper znodes. This is called
* to synchronize ACL changes following {@code _acl_} table updates.
*/
private void updateACL(RegionCoprocessorEnvironment e, final Map<byte[], List<Cell>> familyMap) {
Set<byte[]> entries = new TreeSet<>(Bytes.BYTES_RAWCOMPARATOR);
for (Map.Entry<byte[], List<Cell>> f : familyMap.entrySet()) {
List<Cell> cells = f.getValue();
for (Cell cell : cells) {
if (CellUtil.matchingFamily(cell, PermissionStorage.ACL_LIST_FAMILY)) {
entries.add(CellUtil.cloneRow(cell));
}
}
}
Configuration conf = regionEnv.getConfiguration();byte[] currentEntry
= null;
// TODO: Here we are already on the ACL region. (And it is single
// region) We can even just get the region from the env and do get
// directly. The short circuit connection would avoid the RPC overhead
// so no socket communication, req write/read .. But we have the PB
// to and fro conversion overhead. get req is converted to PB req
// and results are converted to PB results 1st and then to POJOs
// again. We could have avoided such at least in ACL table context..
try (Table t =
e.getConnection().getTable(PermissionStorage.ACL_TABLE_NAME)) {
for (byte[] entry : entries) {
currentEntry = entry;
ListMultimap<String, UserPermission> perms = PermissionStorage.getPermissions(conf, entry, t, null, null, null, false);
byte[] serialized = PermissionStorage.writePermissionsAsBytes(perms, conf);
zkPermissionWatcher.writeToZookeeper(entry, serialized);
}
} catch (IOException ex) {
LOG.error(("Failed updating permissions mirror for '" + (currentEntry == null ? "null" : Bytes.toString(currentEntry))) + "'", ex);}
} | 3.26 |
hbase_AccessController_preOpen_rdh | /* ---- RegionObserver implementation ---- */
@Override
public void preOpen(ObserverContext<RegionCoprocessorEnvironment> c) throws
IOException
{
RegionCoprocessorEnvironment env = c.getEnvironment();final Region region = env.getRegion();
if (region == null) {LOG.error("NULL region from RegionCoprocessorEnvironment in preOpen()");
} else {
RegionInfo regionInfo = region.getRegionInfo();
if (regionInfo.getTable().isSystemTable()) {
checkSystemOrSuperUser(getActiveUser(c));
} else {
requirePermission(c, "preOpen", Action.ADMIN);
}
}
} | 3.26 |
hbase_AuthMethod_m1_rdh | /**
* Return the SASL mechanism name
*/
public String m1() {
return mechanismName;
} | 3.26 |
hbase_AuthMethod_read_rdh | /**
* Read from in
*/
public static AuthMethod read(DataInput in) throws IOException {
return m0(in.readByte());
} | 3.26 |
hbase_AuthMethod_write_rdh | /**
* Write to out
*/
public void write(DataOutput
out) throws IOException {
out.write(code);
} | 3.26 |
hbase_AuthMethod_m0_rdh | /**
* Return the object represented by the code.
*/
public static AuthMethod m0(byte code) {
final int i = (code & 0xff) - FIRST_CODE;
return (i < 0) || (i
>= values().length) ? null : values()[i];
} | 3.26 |
hbase_BloomFilterMetrics_getNegativeResultsCount_rdh | /**
* Returns Current value for bloom negative results count
*/
public long getNegativeResultsCount() {return negativeResults.sum();
} | 3.26 |
hbase_BloomFilterMetrics_incrementRequests_rdh | /**
* Increment bloom request count, and negative result count if !passed
*/
public void incrementRequests(boolean passed) {
requests.increment();
if (!passed) {
negativeResults.increment();
}
} | 3.26 |
hbase_BloomFilterMetrics_getRequestsCount_rdh | /**
* Returns Current value for bloom requests count
*/
public long getRequestsCount() {
return requests.sum();
} | 3.26 |
hbase_BloomFilterMetrics_m0_rdh | /**
* Returns Current value for requests which could have used bloom filters but wasn't defined or
* loaded.
*/
public long m0() {
return eligibleRequests.sum();} | 3.26 |
hbase_BloomFilterMetrics_incrementEligible_rdh | /**
* Increment for cases where bloom filter could have been used but wasn't defined or loaded.
*/
public void incrementEligible() {
eligibleRequests.increment();
} | 3.26 |
hbase_Filter_parseFrom_rdh | /**
* Concrete implementers can signal a failure condition in their code by throwing an
* {@link IOException}.
*
* @param pbBytes
* A pb serialized {@link Filter} instance
* @return An instance of {@link Filter} made from <code>bytes</code>
* @throws DeserializationException
* if an error occurred
* @see #toByteArray
*/
public static Filter parseFrom(final byte[] pbBytes) throws DeserializationException {
throw new DeserializationException("parseFrom called on base Filter, but should be called on derived type");
}
/**
* Concrete implementers can signal a failure condition in their code by throwing an
* {@link IOException} | 3.26 |
hbase_Filter_setReversed_rdh | /**
* alter the reversed scan flag
*
* @param reversed
* flag
*/ public void setReversed(boolean reversed) {
this.reversed = reversed;
} | 3.26 |
hbase_Filter_filterCell_rdh | /**
* A way to filter based on the column family, column qualifier and/or the column value. Return
* code is described below. This allows filters to filter only certain number of columns, then
* terminate without matching ever column. If filterRowKey returns true, filterCell needs to be
* consistent with it. filterCell can assume that filterRowKey has already been called for the
* row. If your filter returns <code>ReturnCode.NEXT_ROW</code>, it should return
* <code>ReturnCode.NEXT_ROW</code> until {@link #reset()} is called just in case the caller calls
* for the next row. Concrete implementers can signal a failure condition in their code by
* throwing an {@link IOException}.
*
* @param c
* the Cell in question
* @return code as described below
* @throws IOException
* in case an I/O or an filter specific failure needs to be signaled.
* @see Filter.ReturnCode
*/public ReturnCode
filterCell(final Cell c) throws IOException
{
return ReturnCode.INCLUDE;
}
/**
* Give the filter a chance to transform the passed KeyValue. If the Cell is changed a new Cell
* object must be returned.
*
* @see org.apache.hadoop.hbase.KeyValue#shallowCopy() The transformed KeyValue is what is
eventually returned to the client. Most filters will return the passed KeyValue unchanged.
* @see org.apache.hadoop.hbase.filter.KeyOnlyFilter#transformCell(Cell) for an example of a
transformation. Concrete implementers can signal a failure condition in their code by
throwing an {@link IOException} | 3.26 |
hbase_MetricsStochasticBalancer_initSource_rdh | /**
* This function overrides the initSource in the MetricsBalancer, use
* MetricsStochasticBalancerSource instead of the MetricsBalancerSource.
*/
@Override
protected void initSource() {
stochasticSource = CompatibilitySingletonFactory.getInstance(MetricsStochasticBalancerSource.class);
} | 3.26 |
hbase_MetricsStochasticBalancer_updateMetricsSize_rdh | /**
* Updates the number of metrics reported to JMX
*/
public void updateMetricsSize(int size) {
stochasticSource.updateMetricsSize(size);
} | 3.26 |
hbase_MetricsStochasticBalancer_balancerStatus_rdh | /**
* Updates the balancer status tag reported to JMX
*/
@Override
public void balancerStatus(boolean status) {
stochasticSource.updateBalancerStatus(status);
} | 3.26 |
hbase_MetricsStochasticBalancer_updateStochasticCost_rdh | /**
* Reports stochastic load balancer costs to JMX
*/
public void updateStochasticCost(String tableName, String costFunctionName, String costFunctionDesc, Double value) {
stochasticSource.updateStochasticCost(tableName, costFunctionName, costFunctionDesc, value);
} | 3.26 |
hbase_JarFinder_getJar_rdh | /**
* Returns the full path to the Jar containing the class. It always return a JAR.
*
* @param klass
* class.
* @return path to the Jar containing the class.
*/
public static String getJar(Class klass) {
Preconditions.checkNotNull(klass, "klass");
ClassLoader loader = klass.getClassLoader();
if (loader != null) {
String class_file = klass.getName().replaceAll("\\.", "/") + ".class";
try {
for (Enumeration itr = loader.getResources(class_file); itr.hasMoreElements();) {
URL url = ((URL) (itr.nextElement()));
String path = url.getPath();
if (path.startsWith("file:")) {
path = path.substring("file:".length());
}
path = URLDecoder.decode(path, "UTF-8");
if ("jar".equals(url.getProtocol())) {
path = URLDecoder.decode(path, "UTF-8");
return path.replaceAll("!.*$", "");
} else if ("file".equals(url.getProtocol())) {
String klassName = klass.getName();
klassName = klassName.replace(".", "/") + ".class";
path = path.substring(0, path.length() - klassName.length());
File baseDir = new File(path);
File testDir = new File(System.getProperty("test.build.dir", "target/test-dir"));
testDir = testDir.getAbsoluteFile();
if (!testDir.exists()) {
testDir.mkdirs();
}
File tempJar = File.createTempFile("hadoop-", "", testDir);
tempJar = new File(tempJar.getAbsolutePath() + ".jar");
tempJar.deleteOnExit();
createJar(baseDir, tempJar);
return tempJar.getAbsolutePath();
}}
} catch (IOException e) {
throw new RuntimeException(e);
}
}
return null;
} | 3.26 |
hbase_LongComparator_toByteArray_rdh | /**
* Returns The comparator serialized using pb
*/
@Override
public byte[] toByteArray() {
ComparatorProtos.LongComparator.Builder builder = ComparatorProtos.LongComparator.newBuilder();
builder.setComparable(ProtobufUtil.toByteArrayComparable(this.value));
return builder.build().toByteArray();} | 3.26 |
hbase_LongComparator_areSerializedFieldsEqual_rdh | /**
* Returns true if and only if the fields of the comparator that are serialized are equal to the
* corresponding fields in other. Used for testing.
*/
boolean areSerializedFieldsEqual(LongComparator other) {
if (other == this) {
return true;
}if (other == null) {
return false;
}
return super.areSerializedFieldsEqual(other);
} | 3.26 |
hbase_LongComparator_parseFrom_rdh | /**
* Parses a serialized representation of {@link LongComparator}
*
* @param pbBytes
* A pb serialized {@link LongComparator} instance
* @return An instance of {@link LongComparator} made from <code>bytes</code>
* @throws DeserializationException
* if an error occurred
* @see #toByteArray
*/
public static LongComparator parseFrom(final byte[] pbBytes) throws DeserializationException {
ComparatorProtos.LongComparator
proto;
try {
proto = ComparatorProtos.LongComparator.parseFrom(pbBytes);
} catch (InvalidProtocolBufferException e) {
throw new DeserializationException(e);
}
return new LongComparator(Bytes.toLong(proto.getComparable().getValue().toByteArray()));
} | 3.26 |
hbase_QuotaObserverChore_setTableQuotaSnapshot_rdh | /**
* Stores the quota state for the given table.
*/
void setTableQuotaSnapshot(TableName
table, SpaceQuotaSnapshot snapshot) {
this.tableQuotaSnapshots.put(table, snapshot);
} | 3.26 |
hbase_QuotaObserverChore_getInitialDelay_rdh | /**
* Extracts the initial delay for the chore from the configuration.
*
* @param conf
* The configuration object.
* @return The configured chore initial delay or the default value in the given timeunit.
* @see #getTimeUnit(Configuration)
*/
static long getInitialDelay(Configuration conf) {
return conf.getLong(QUOTA_OBSERVER_CHORE_DELAY_KEY, QUOTA_OBSERVER_CHORE_DELAY_DEFAULT);
} | 3.26 |
hbase_QuotaObserverChore_getTableQuotaSnapshots_rdh | /**
* Returns an unmodifiable view over the current {@link SpaceQuotaSnapshot} objects for each HBase
* table with a quota defined.
*/
public Map<TableName, SpaceQuotaSnapshot> getTableQuotaSnapshots() {
return readOnlyTableQuotaSnapshots;
} | 3.26 |
hbase_QuotaObserverChore_processNamespacesWithQuotas_rdh | /**
* Processes each namespace which has a quota defined and moves all of the tables contained in
* that namespace into or out of violation of the quota. Tables which are already in violation of
* a quota at the table level which <em>also</em> have a reside in a namespace with a violated
* quota will not have the namespace quota enacted. The table quota takes priority over the
* namespace quota.
*
* @param namespacesWithQuotas
* The set of namespaces that have quotas defined
* @param tablesByNamespace
* A mapping of namespaces and the tables contained in those
* namespaces
*/
void processNamespacesWithQuotas(final Set<String> namespacesWithQuotas, final Multimap<String, TableName> tablesByNamespace) throws IOException {
long numNamespacesInViolation = 0L;
for (String namespace : namespacesWithQuotas) {
// Get the quota definition for the namespace
final SpaceQuota spaceQuota = f3.getSpaceQuota(namespace);
if (spaceQuota == null) {
if (LOG.isDebugEnabled()) {LOG.debug(("Could not get Namespace space quota for " + namespace) + ", maybe it was recently deleted.");
}
continue;
}
final SpaceQuotaSnapshot
currentSnapshot = f3.getCurrentState(namespace);
final SpaceQuotaSnapshot v21 = f3.getTargetState(namespace, spaceQuota);
if (LOG.isTraceEnabled()) {
LOG.trace((((("Processing " + namespace) + " with current=") + currentSnapshot) + ", target=") + v21);
}
updateNamespaceQuota(namespace, currentSnapshot, v21, tablesByNamespace);
if (v21.getQuotaStatus().isInViolation()) {
numNamespacesInViolation++;
}
}
// Report the number of namespaces in violation
if (metrics != null) {
metrics.setNumNamespacesInSpaceQuotaViolation(numNamespacesInViolation);
}
} | 3.26 |
hbase_QuotaObserverChore_fetchAllTablesWithQuotasDefined_rdh | /**
* Computes the set of all tables that have quotas defined. This includes tables with quotas
* explicitly set on them, in addition to tables that exist namespaces which have a quota defined.
*/
TablesWithQuotas fetchAllTablesWithQuotasDefined() throws IOException {
final Scan scan = QuotaTableUtil.makeScan(null);
final TablesWithQuotas tablesWithQuotas = new TablesWithQuotas(conn, conf);
try (final QuotaRetriever scanner = new QuotaRetriever()) {
scanner.init(conn, scan);
for (QuotaSettings quotaSettings : scanner) {
// Only one of namespace and tablename should be 'null'
final String namespace = quotaSettings.getNamespace();
final TableName tableName = quotaSettings.getTableName();
if (QuotaType.SPACE != quotaSettings.getQuotaType()) {
continue;
}
if (namespace != null) {
assert tableName ==
null;
// Collect all of the tables in the namespace
TableName[] tablesInNS = conn.getAdmin().listTableNamesByNamespace(namespace);
for (TableName tableUnderNs : tablesInNS) {
if (LOG.isTraceEnabled()) {
LOG.trace(((("Adding " + tableUnderNs) + " under ") + namespace) + " as having a namespace quota");
}
tablesWithQuotas.addNamespaceQuotaTable(tableUnderNs);
}
} else {
assert tableName != null;
if (LOG.isTraceEnabled()) {
LOG.trace(("Adding " + tableName) + " as having table quota.");}
// namespace is already null, must be a non-null tableName
tablesWithQuotas.addTableQuotaTable(tableName);
}
}return tablesWithQuotas;
}
} | 3.26 |
hbase_QuotaObserverChore_updateTableQuota_rdh | /**
* Updates the hbase:quota table with the new quota policy for this <code>table</code> if
* necessary.
*
* @param table
* The table being checked
* @param currentSnapshot
* The state of the quota on this table from the previous invocation.
* @param targetSnapshot
* The state the quota should be in for this table.
*/
void updateTableQuota(TableName table, SpaceQuotaSnapshot currentSnapshot, SpaceQuotaSnapshot targetSnapshot) throws IOException {
final SpaceQuotaStatus currentStatus = currentSnapshot.getQuotaStatus();
final SpaceQuotaStatus targetStatus = targetSnapshot.getQuotaStatus();
// If we're changing something, log it.
if (!currentSnapshot.equals(targetSnapshot)) {
this.snapshotNotifier.transitionTable(table, targetSnapshot);
// Update it in memory
tableSnapshotStore.setCurrentState(table, targetSnapshot);
// If the target is none, we're moving out of violation. Update the hbase:quota table
SpaceViolationPolicy currPolicy = currentStatus.getPolicy().orElse(null);
SpaceViolationPolicy targetPolicy = targetStatus.getPolicy().orElse(null);
if (!targetStatus.isInViolation()) {
// In case of Disable SVP, we need to enable the table as it moves out of violation
if (isDisableSpaceViolationPolicy(currPolicy, targetPolicy)) {
QuotaUtil.enableTableIfNotEnabled(conn, table);
}
if (LOG.isDebugEnabled()) {
LOG.debug(table + " moved into observance of table space quota.");
}
} else {
// We're either moving into violation or changing violation policies
if ((currPolicy != targetPolicy) && (SpaceViolationPolicy.DISABLE == currPolicy)) {
// In case of policy switch, we need to enable the table if current policy is Disable SVP
QuotaUtil.enableTableIfNotEnabled(conn, table);
} else if (SpaceViolationPolicy.DISABLE == targetPolicy) {
// In case of Disable SVP, we need to disable the table as it moves into violation
QuotaUtil.disableTableIfNotDisabled(conn, table);
}if (LOG.isDebugEnabled()) {
LOG.debug((table + " moved into violation of table space quota with policy of ") + targetPolicy);
}
}
} else if (LOG.isTraceEnabled()) {
// Policies are the same, so we have nothing to do except log this. Don't need to re-update
// the quota table
if (!currentStatus.isInViolation()) {
LOG.trace(table + " remains in observance of quota.");
} else {
LOG.trace(table + " remains in violation of quota.");
}
}
} | 3.26 |
hbase_QuotaObserverChore_updateNamespaceQuota_rdh | /**
* Updates the hbase:quota table with the target quota policy for this <code>namespace</code> if
* necessary.
*
* @param namespace
* The namespace being checked
* @param currentSnapshot
* The state of the quota on this namespace from the previous invocation
* @param targetSnapshot
* The state the quota should be in for this namespace
* @param tablesByNamespace
* A mapping of tables in namespaces.
*/
void updateNamespaceQuota(String namespace, SpaceQuotaSnapshot currentSnapshot, SpaceQuotaSnapshot targetSnapshot, final Multimap<String, TableName> tablesByNamespace) throws IOException {
final SpaceQuotaStatus targetStatus = targetSnapshot.getQuotaStatus();
// When the policies differ, we need to move into or out of violation
if (!currentSnapshot.equals(targetSnapshot)) {
// We want to have a policy of "NONE", moving out of violation
if (!targetStatus.isInViolation()) {
for (TableName tableInNS : tablesByNamespace.get(namespace)) {
// If there is a quota on this table in violation
if (tableSnapshotStore.getCurrentState(tableInNS).getQuotaStatus().isInViolation()) {
// Table-level quota violation policy is being applied here.
if (LOG.isTraceEnabled()) {
LOG.trace(("Not activating Namespace violation policy because a Table violation" + " policy is already in effect for ") +
tableInNS);
}
} else {
LOG.info(tableInNS + " moving into observance of namespace space quota");
this.snapshotNotifier.transitionTable(tableInNS, targetSnapshot);
}
}
// We want to move into violation at the NS level
} else {
// Moving tables in the namespace into violation or to a different violation policy
for (TableName tableInNS : tablesByNamespace.get(namespace)) {
final SpaceQuotaSnapshot tableQuotaSnapshot = tableSnapshotStore.getCurrentState(tableInNS);
final boolean hasTableQuota = !Objects.equals(QuotaSnapshotStore.NO_QUOTA, tableQuotaSnapshot);
if (hasTableQuota && tableQuotaSnapshot.getQuotaStatus().isInViolation()) {
// Table-level quota violation policy is being applied here.
if (LOG.isTraceEnabled()) {
LOG.trace(("Not activating Namespace violation policy because a Table violation" + " policy is already in effect for ") + tableInNS);
}
} else {
// No table quota present or a table quota present that is not in violation
LOG.info((tableInNS + " moving into violation of namespace space quota with policy ") + targetStatus.getPolicy());this.snapshotNotifier.transitionTable(tableInNS, targetSnapshot);
}
}
}
// Update the new state in memory for this namespace
f3.setCurrentState(namespace, targetSnapshot);
} else // Policies are the same
if (!targetStatus.isInViolation()) {
// Both are NONE, so we remain in observance
if (LOG.isTraceEnabled()) {
LOG.trace(namespace + " remains in observance of quota.");
}
} else {
// Namespace quota is still in violation, need to enact if the table quota is not
// taking priority.
for (TableName tableInNS : tablesByNamespace.get(namespace)) {
// Does a table policy exist
if
(tableSnapshotStore.getCurrentState(tableInNS).getQuotaStatus().isInViolation()) {
// Table-level quota violation policy is being applied here.
if (LOG.isTraceEnabled()) {
LOG.trace(("Not activating Namespace violation policy because Table violation" + " policy is already in effect for ") + tableInNS);}
} else {
// No table policy, so enact namespace policy
LOG.info(tableInNS + " moving into violation of namespace space quota");
this.snapshotNotifier.transitionTable(tableInNS, targetSnapshot);
}
}
}
} | 3.26 |
hbase_QuotaObserverChore_getTablesByNamespace_rdh | /**
* Returns a view of all tables that reside in a namespace with a namespace quota, grouped by
* the namespace itself.
*/
public Multimap<String, TableName> getTablesByNamespace() {
Multimap<String, TableName> tablesByNS = HashMultimap.create();
for (TableName tn : tablesWithNamespaceQuotas) {
tablesByNS.put(tn.getNamespaceAsString(), tn);
}
return tablesByNS;
} | 3.26 |
hbase_QuotaObserverChore_filterInsufficientlyReportedTables_rdh | /**
* Filters out all tables for which the Master currently doesn't have enough region space
* reports received from RegionServers yet.
*/
public Set<TableName> filterInsufficientlyReportedTables(QuotaSnapshotStore<TableName> tableStore) throws IOException {
final double percentRegionsReportedThreshold = m1(getConfiguration());
Set<TableName> tablesToRemove = new HashSet<>();
for (TableName table : Iterables.concat(tablesWithTableQuotas, tablesWithNamespaceQuotas)) {
// Don't recompute a table we've already computed
if (tablesToRemove.contains(table)) {
continue;
}final int v52 = getNumRegions(table);
// If the table doesn't exist (no regions), bail out.
if (v52 == 0) {
if (LOG.isTraceEnabled()) {
LOG.trace(("Filtering " + table) + " because no regions were reported");
}
tablesToRemove.add(table);
continue;
}
final int reportedRegionsInQuota = getNumReportedRegions(table, tableStore);
final double ratioReported = ((double) (reportedRegionsInQuota)) / v52;
if (ratioReported < percentRegionsReportedThreshold) {
if (LOG.isTraceEnabled()) {
LOG.trace(((((("Filtering " + table) + " because ") + reportedRegionsInQuota) + " of ") + v52) + " regions were reported.");
}
tablesToRemove.add(table);
} else if (LOG.isTraceEnabled()) {
LOG.trace(((((("Retaining " + table) + " because ") + reportedRegionsInQuota) + " of ") + v52) + " regions were reported.");
}
}
for (TableName tableToRemove : tablesToRemove) {
tablesWithTableQuotas.remove(tableToRemove);
tablesWithNamespaceQuotas.remove(tableToRemove);
}
return tablesToRemove;
} | 3.26 |
hbase_QuotaObserverChore_addTableQuotaTable_rdh | /**
* Adds a table with a table quota.
*/
public void
addTableQuotaTable(TableName tn) {
tablesWithTableQuotas.add(tn);
} | 3.26 |
hbase_QuotaObserverChore_pruneOldRegionReports_rdh | /**
* Removes region reports over a certain age.
*/
void pruneOldRegionReports() {
final long now = EnvironmentEdgeManager.currentTime();
final long pruneTime = now - regionReportLifetimeMillis;
final int numRemoved = quotaManager.pruneEntriesOlderThan(pruneTime, this);
if (LOG.isTraceEnabled()) {
LOG.trace(((("Removed " + numRemoved) + " old region size reports that were older than ") + pruneTime) + ".");
}} | 3.26 |
hbase_QuotaObserverChore_getNumRegions_rdh | /**
* Computes the total number of regions in a table.
*/
int getNumRegions(TableName table) throws IOException {
List<RegionInfo> regions = this.conn.getAdmin().getRegions(table);
if (regions == null) {
return 0;
}
// Filter the region replicas if any and return the original number of regions for a table.
RegionReplicaUtil.removeNonDefaultRegions(regions);
return regions.size();
} | 3.26 |
hbase_QuotaObserverChore_getPeriod_rdh | /**
* Extracts the period for the chore from the configuration.
*
* @param conf
* The configuration object.
* @return The configured chore period or the default value in the given timeunit.
* @see #getTimeUnit(Configuration)
*/
static int getPeriod(Configuration conf) {
return conf.getInt(QUOTA_OBSERVER_CHORE_PERIOD_KEY, QUOTA_OBSERVER_CHORE_PERIOD_DEFAULT);
} | 3.26 |
hbase_QuotaObserverChore_getNumReportedRegions_rdh | /**
* Computes the number of regions reported for a table.
*/
int getNumReportedRegions(TableName table, QuotaSnapshotStore<TableName> tableStore) throws IOException {
return Iterables.size(tableStore.filterBySubject(table));
} | 3.26 |
hbase_QuotaObserverChore_getNamespaceQuotaTables_rdh | /**
* Returns an unmodifiable view of all tables in namespaces that have namespace quotas.
*/
public Set<TableName> getNamespaceQuotaTables() {
return Collections.unmodifiableSet(tablesWithNamespaceQuotas);
} | 3.26 |
hbase_QuotaObserverChore_setNamespaceQuotaSnapshot_rdh | /**
* Stores the given {@code snapshot} for the given {@code namespace} in this chore.
*/
void setNamespaceQuotaSnapshot(String namespace, SpaceQuotaSnapshot snapshot) {
this.f2.put(namespace, snapshot);
} | 3.26 |
hbase_QuotaObserverChore_isDisableSpaceViolationPolicy_rdh | /**
* Method to check whether we are dealing with DISABLE {@link SpaceViolationPolicy}. In such a
* case, currPolicy or/and targetPolicy will be having DISABLE policy.
*
* @param currPolicy
* currently set space violation policy
* @param targetPolicy
* new space violation policy
* @return true if is DISABLE space violation policy; otherwise false
*/
private boolean isDisableSpaceViolationPolicy(final SpaceViolationPolicy currPolicy, final SpaceViolationPolicy targetPolicy) {
return (SpaceViolationPolicy.DISABLE == currPolicy) || (SpaceViolationPolicy.DISABLE == targetPolicy);
} | 3.26 |
hbase_QuotaObserverChore_m0_rdh | /**
* Extracts the time unit for the chore period and initial delay from the configuration. The
* configuration value for {@link #QUOTA_OBSERVER_CHORE_TIMEUNIT_KEY} must correspond to a
* {@link TimeUnit} value.
*
* @param conf
* The configuration object.
* @return The configured time unit for the chore period and initial delay or the default value.
*/
static TimeUnit m0(Configuration conf) {
return TimeUnit.valueOf(conf.get(f0, QUOTA_OBSERVER_CHORE_TIMEUNIT_DEFAULT)); } | 3.26 |
hbase_QuotaObserverChore_getNamespaceQuotaSnapshot_rdh | /**
* Fetches the {@link SpaceQuotaSnapshot} for the given namespace from this chore.
*/
SpaceQuotaSnapshot getNamespaceQuotaSnapshot(String namespace) {
SpaceQuotaSnapshot state = this.f2.get(namespace);
if (state == null) {
// No tracked state implies observance.
return QuotaSnapshotStore.NO_QUOTA;
}
return state;
} | 3.26 |
hbase_QuotaObserverChore_m1_rdh | /**
* Extracts the percent of Regions for a table to have been reported to enable quota violation
* state change.
*
* @param conf
* The configuration object.
* @return The percent of regions reported to use.
*/
static Double m1(Configuration conf) {
return conf.getDouble(f1, QUOTA_OBSERVER_CHORE_REPORT_PERCENT_DEFAULT);
} | 3.26 |
hbase_QuotaObserverChore_hasNamespaceQuota_rdh | /**
* Returns true if the table exists in a namespace with a namespace quota.
*/
public boolean hasNamespaceQuota(TableName tn) {
return tablesWithNamespaceQuotas.contains(tn);} | 3.26 |
hbase_QuotaObserverChore_getNamespaceQuotaSnapshots_rdh | /**
* Returns an unmodifiable view over the current {@link SpaceQuotaSnapshot} objects for each HBase
* namespace with a quota defined.
*/
public Map<String, SpaceQuotaSnapshot> getNamespaceQuotaSnapshots() {return readOnlyNamespaceSnapshots;
} | 3.26 |
hbase_QuotaObserverChore_processTablesWithQuotas_rdh | /**
* Processes each {@code TableName} which has a quota defined and moves it in or out of violation
* based on the space use.
*
* @param tablesWithTableQuotas
* The HBase tables which have quotas defined
*/
void processTablesWithQuotas(final Set<TableName> tablesWithTableQuotas) throws IOException {
long numTablesInViolation = 0L;
for (TableName table : tablesWithTableQuotas) {
final SpaceQuota spaceQuota = tableSnapshotStore.getSpaceQuota(table);
if (spaceQuota == null) {
if (LOG.isDebugEnabled()) {
LOG.debug(("Unexpectedly did not find a space quota for " + table) + ", maybe it was recently deleted.");
}
continue;
}
final
SpaceQuotaSnapshot v15 = tableSnapshotStore.getCurrentState(table);
final SpaceQuotaSnapshot targetSnapshot
= tableSnapshotStore.getTargetState(table,
spaceQuota);
if (LOG.isTraceEnabled()) {
LOG.trace((((("Processing " + table) + " with current=") + v15) + ", target=") + targetSnapshot);
}
updateTableQuota(table, v15, targetSnapshot);
if (targetSnapshot.getQuotaStatus().isInViolation()) {
numTablesInViolation++;
}
}
// Report the number of tables in violation
if (metrics
!= null) {
metrics.setNumTableInSpaceQuotaViolation(numTablesInViolation);
}
} | 3.26 |
hbase_QuotaObserverChore_hasTableQuota_rdh | /**
* Returns true if the given table has a table quota.
*/
public boolean hasTableQuota(TableName tn) {
return tablesWithTableQuotas.contains(tn);
} | 3.26 |
hbase_QuotaObserverChore_addNamespaceQuotaTable_rdh | /**
* Adds a table with a namespace quota.
*/
public void addNamespaceQuotaTable(TableName tn) {
tablesWithNamespaceQuotas.add(tn);
} | 3.26 |
hbase_QuotaObserverChore_getTableQuotaTables_rdh | /**
* Returns an unmodifiable view of all tables with table quotas.
*/
public Set<TableName> getTableQuotaTables() {
return Collections.unmodifiableSet(tablesWithTableQuotas);
} | 3.26 |
hbase_QuotaObserverChore_getTableQuotaSnapshot_rdh | /**
* Fetches the {@link SpaceQuotaSnapshot} for the given table.
*/
SpaceQuotaSnapshot getTableQuotaSnapshot(TableName table) {
SpaceQuotaSnapshot state = this.tableQuotaSnapshots.get(table);
if (state == null) {
// No tracked state implies observance.
return QuotaSnapshotStore.NO_QUOTA;
}
return state;
} | 3.26 |
hbase_ClaimReplicationQueueRemoteProcedure_shouldSkip_rdh | // check whether ReplicationSyncUp has already done the work for us, if so, we should skip
// claiming the replication queues and deleting them instead.
private boolean shouldSkip(MasterProcedureEnv env) throws IOException {
MasterFileSystem mfs = env.getMasterFileSystem();
Path syncUpDir = new Path(mfs.getRootDir(), ReplicationSyncUp.INFO_DIR);
return mfs.getFileSystem().exists(new Path(syncUpDir, getServerName().getServerName()));
} | 3.26 |
hbase_ReplicationPeerConfig_isSyncReplication_rdh | /**
* Use remote wal dir to decide whether a peer is sync replication peer
*/
public boolean isSyncReplication() {
return !StringUtils.isBlank(this.remoteWALDir);
} | 3.26 |
hbase_ReplicationPeerConfig_needToReplicate_rdh | /**
* Decide whether the table need replicate to the peer cluster
*
* @param table
* name of the table
* @return true if the table need replicate to the peer cluster
*/
public boolean needToReplicate(TableName
table) {return needToReplicate(table, null);
} | 3.26 |
hbase_CreateStoreFileWriterParams_includeMVCCReadpoint_rdh | /**
* Whether to include MVCC or not
*/
public CreateStoreFileWriterParams includeMVCCReadpoint(boolean includeMVCCReadpoint) {
this.includeMVCCReadpoint = includeMVCCReadpoint;
return this;
} | 3.26 |
hbase_CreateStoreFileWriterParams_isCompaction_rdh | /**
* Whether we are creating a new file in a compaction
*/
public CreateStoreFileWriterParams isCompaction(boolean isCompaction) {
this.isCompaction = isCompaction;
return this;
} | 3.26 |
hbase_CreateStoreFileWriterParams_compression_rdh | /**
* Set the compression algorithm to use
*/
public CreateStoreFileWriterParams compression(Compression.Algorithm compression) {
this.compression = compression;
return this;} | 3.26 |
hbase_CreateStoreFileWriterParams_includesTag_rdh | /**
* Whether to includesTag or not
*/
public CreateStoreFileWriterParams includesTag(boolean
includesTag) {
this.includesTag = includesTag;
return this;
} | 3.26 |
hbase_CompactionLifeCycleTracker_afterExecution_rdh | /**
* Called after compaction is executed by CompactSplitThread.
* <p>
* Requesting compaction on a region can lead to multiple compactions on different stores, so we
* will pass the {@link Store} in to tell you the store we operate on.
*/default void afterExecution(Store store) {
} | 3.26 |
hbase_CompactionLifeCycleTracker_notExecuted_rdh | /**
* Called if the compaction request is failed for some reason.
*/
default void notExecuted(Store store, String reason) {
} | 3.26 |
hbase_TerminatedWrapper_encode_rdh | /**
* Write instance {@code val} into buffer {@code dst}.
*
* @throws IllegalArgumentException
* when the encoded representation of {@code val} contains the
* {@code term} sequence.
*/
@Override
public int encode(PositionedByteRange dst, T val) {
final int start = dst.getPosition();
int written = wrapped.encode(dst, val);
PositionedByteRange b = dst.shallowCopy();
b.setLength(dst.getPosition());
b.setPosition(start);
if ((-1)
!= terminatorPosition(b)) {
dst.setPosition(start);
throw new IllegalArgumentException("Encoded value contains terminator sequence.");
}
dst.put(term);
return written + term.length;
} | 3.26 |
hbase_TerminatedWrapper_terminatorPosition_rdh | /**
* Return the position at which {@code term} begins within {@code src}, or {@code -1} if
* {@code term} is not found.
*/
protected int terminatorPosition(PositionedByteRange src) {byte[] a = src.getBytes();
final int offset = src.getOffset();
int v2;
SKIP : for (v2 = src.getPosition(); v2 < src.getLength(); v2++) {
if (a[offset + v2] != term[0]) {
continue;
}
int j;
for (j = 1; (j < term.length) && ((offset + j) < src.getLength()); j++) {
if (a[(offset + v2) + j] != term[j]) {
continue SKIP;
}
}
if
(j == term.length) {
return v2;// success
}
}
return -1; }
/**
* Skip {@code src} | 3.26 |
hbase_ModifyRegionUtils_createRegions_rdh | /**
* Create new set of regions on the specified file-system. NOTE: that you should add the regions
* to hbase:meta after this operation.
*
* @param exec
* Thread Pool Executor
* @param conf
* {@link Configuration}
* @param rootDir
* Root directory for HBase instance
* @param tableDescriptor
* description of the table
* @param newRegions
* {@link RegionInfo} that describes the regions to create
* @param task
* {@link RegionFillTask} custom code to populate region after creation
*/
public static List<RegionInfo> createRegions(final ThreadPoolExecutor exec, final Configuration conf, final Path rootDir, final TableDescriptor tableDescriptor, final RegionInfo[] newRegions, final RegionFillTask task) throws IOException {
if (newRegions == null)
return null;
int regionNumber = newRegions.length;
CompletionService<RegionInfo> completionService = new ExecutorCompletionService<>(exec);List<RegionInfo> regionInfos = new
ArrayList<>();
for (final RegionInfo newRegion : newRegions) {
completionService.submit(new Callable<RegionInfo>() {
@Override
public RegionInfo call() throws IOException {
return createRegion(conf, rootDir, tableDescriptor, newRegion, task);
}
});
}
try {
// wait for all regions to finish creation
for (int i = 0; i < regionNumber; i++) {
regionInfos.add(completionService.take().get());
}
} catch (InterruptedException e) {
LOG.error(("Caught " + e) + " during region creation");
throw new InterruptedIOException(e.getMessage());
} catch (ExecutionException e) {
throw new IOException(e);
}
return regionInfos;
}
/**
* Create new set of regions on the specified file-system.
*
* @param conf
* {@link Configuration}
* @param rootDir
* Root directory for HBase instance
* @param tableDescriptor
* description of the table
* @param newRegion
* {@link RegionInfo} that describes the region to create
* @param task
* {@link RegionFillTask} | 3.26 |
hbase_ModifyRegionUtils_editRegions_rdh | /**
* Execute the task on the specified set of regions.
*
* @param exec
* Thread Pool Executor
* @param regions
* {@link RegionInfo} that describes the regions to edit
* @param task
* {@link RegionFillTask} custom code to edit the region
*/
public static void editRegions(final ThreadPoolExecutor exec, final Collection<RegionInfo> regions, final RegionEditTask task) throws IOException {
final ExecutorCompletionService<Void> completionService = new ExecutorCompletionService<>(exec);
for (final RegionInfo hri : regions) {
completionService.submit(new Callable<Void>() {
@Override
public Void call() throws IOException {
task.editRegion(hri);
return null;
}
});
}try {
for (RegionInfo hri : regions) {
completionService.take().get();
}
} catch (InterruptedException e) {
throw new InterruptedIOException(e.getMessage());
} catch (ExecutionException e) {
throw new
IOException(e.getCause());
}
} | 3.26 |
hbase_ClientTokenUtil_m0_rdh | /**
* Obtain and return an authentication token for the current user.
*
* @param conn
* The HBase cluster connection
* @throws IOException
* if a remote error or serialization problem occurs.
* @return the authentication token instance
*/
@InterfaceAudience.Private
static Token<AuthenticationTokenIdentifier> m0(Connection conn) throws IOException {
Table meta = null;
try {
injectFault();
meta = conn.getTable(TableName.META_TABLE_NAME);
CoprocessorRpcChannel rpcChannel = meta.coprocessorService(HConstants.EMPTY_START_ROW);
AuthenticationProtos.AuthenticationService.BlockingInterface service = AuthenticationProtos.AuthenticationService.newBlockingStub(rpcChannel);
AuthenticationProtos.GetAuthenticationTokenResponse response =
service.getAuthenticationToken(null, AuthenticationProtos.GetAuthenticationTokenRequest.getDefaultInstance());
return toToken(response.getToken());
} catch (ServiceException se) {
throw ProtobufUtil.handleRemoteException(se);
} finally {
if (meta != null) {
meta.close();
}
}
} | 3.26 |
hbase_ClientTokenUtil_obtainToken_rdh | /**
* Obtain and return an authentication token for the given user.
*
* @param conn
* The HBase cluster connection
* @param user
* The user to obtain a token for
* @return the authentication token instance
*/
@InterfaceAudience.Private
static Token<AuthenticationTokenIdentifier> obtainToken(final Connection conn, User user) throws IOException, InterruptedException {
return user.runAs(new PrivilegedExceptionAction<Token<AuthenticationTokenIdentifier>>() {
@Override
public Token<AuthenticationTokenIdentifier> run() throws Exception {return obtainToken(conn);
}
});
} | 3.26 |
hbase_ClientTokenUtil_obtainAndCacheToken_rdh | /**
* Obtain an authentication token for the given user and add it to the user's credentials.
*
* @param conn
* The HBase cluster connection
* @param user
* The user for whom to obtain the token
* @throws IOException
* If making a remote call to the authentication service fails
* @throws InterruptedException
* If executing as the given user is interrupted
*/
public static void obtainAndCacheToken(final Connection conn, User user) throws IOException, InterruptedException {
try {
Token<AuthenticationTokenIdentifier> token = obtainToken(conn, user);
if (token == null) {
throw new IOException("No token returned for user " + user.getName());
}
if (LOG.isDebugEnabled()) {
LOG.debug((("Obtained token " + token.getKind().toString()) + " for user ") + user.getName());
}
user.addToken(token);
} catch (IOException |
InterruptedException
| RuntimeException e) {
throw e;
} catch (Exception e) {
throw new UndeclaredThrowableException(e, "Unexpected exception obtaining token for user " + user.getName());
}
} | 3.26 |
hbase_ClientTokenUtil_toToken_rdh | /**
* Converts a protobuf Token message back into a Token instance.
*
* @param proto
* the protobuf Token message
* @return the Token instance
*/
@InterfaceAudience.Private
static Token<AuthenticationTokenIdentifier> toToken(AuthenticationProtos.Token proto) {
return new Token<>(proto.hasIdentifier() ? proto.getIdentifier().toByteArray() : null, proto.hasPassword() ? proto.getPassword().toByteArray() : null, AuthenticationTokenIdentifier.AUTH_TOKEN_TYPE, proto.hasService() ? new Text(proto.getService().toStringUtf8()) : null);
} | 3.26 |
hbase_ProcedureEvent_suspendIfNotReady_rdh | /**
* Returns true if event is not ready and adds procedure to suspended queue, else returns false.
*/
public synchronized boolean suspendIfNotReady(Procedure proc) {
if (!ready) {
suspendedProcedures.addLast(proc);
}
return !ready;
} | 3.26 |
hbase_ProcedureEvent_wakeInternal_rdh | /**
* Only to be used by ProcedureScheduler implementations. Reason: To wake up multiple events,
* locking sequence is schedLock --> synchronized (event) To wake up an event, both schedLock()
* and synchronized(event) are required. The order is schedLock() --> synchronized(event) because
* when waking up multiple events simultaneously, we keep the scheduler locked until all
* procedures suspended on these events have been added back to the queue (Maybe it's not
* required? Evaluate!) To avoid deadlocks, we want to keep the locking order same even when
* waking up single event. That's why, {@link #wake(AbstractProcedureScheduler)} above uses the
* same code path as used when waking up multiple events. Access should remain package-private.
*/
public synchronized void wakeInternal(AbstractProcedureScheduler procedureScheduler) {
if (ready && (!suspendedProcedures.isEmpty())) {
LOG.warn("Found procedures suspended in a ready event! Size=" + suspendedProcedures.size());
}
ready = true;
if (LOG.isTraceEnabled()) {
LOG.trace("Unsuspend " + toString());
}
// wakeProcedure adds to the front of queue, so we start from last in the
// waitQueue' queue, so that the procedure which was added first goes in the front for
// the scheduler queue.
procedureScheduler.addFront(suspendedProcedures.descendingIterator());
suspendedProcedures.clear();
} | 3.26 |
hbase_ProcedureEvent_wakeIfSuspended_rdh | /**
* Wakes up the suspended procedures only if the given {@code proc} is waiting on this event.
* <p/>
* Mainly used by region assignment to reject stale OpenRegionProcedure/CloseRegionProcedure. Use
* with caution as it will cause performance issue if there are lots of procedures waiting on the
* event.
*/
public synchronized boolean wakeIfSuspended(AbstractProcedureScheduler procedureScheduler, Procedure<?> proc) {
if (suspendedProcedures.stream().anyMatch(p -> p.getProcId() == proc.getProcId())) {
wake(procedureScheduler);
return true;
}
return false;
} | 3.26 |
hbase_ProcedureEvent_getSuspendedProcedures_rdh | /**
* Access to suspendedProcedures is 'synchronized' on this object, but it's fine to return it here
* for tests.
*/
public ProcedureDeque getSuspendedProcedures() {
return suspendedProcedures;
} | 3.26 |
hbase_ProcedureEvent_wakeEvents_rdh | /**
* Wakes up all the given events and puts the procedures waiting on them back into
* ProcedureScheduler queues.
*/
public static void wakeEvents(AbstractProcedureScheduler scheduler, ProcedureEvent... events) {
scheduler.wakeEvents(events);
} | 3.26 |
hbase_ProcedureEvent_wake_rdh | /**
* Wakes up the suspended procedures by pushing them back into scheduler queues and sets the event
* as ready. See {@link #wakeInternal(AbstractProcedureScheduler)} for why this is not
* synchronized.
*/
public void wake(AbstractProcedureScheduler procedureScheduler) {
procedureScheduler.wakeEvents(new ProcedureEvent[]{ this });
} | 3.26 |
hbase_RawLong_decodeLong_rdh | /**
* Read a {@code long} value from the buffer {@code buff}.
*/
public long decodeLong(byte[] buff, int offset) {
return Bytes.toLong(buff, offset);
} | 3.26 |
hbase_RawLong_encodeLong_rdh | /**
* Write instance {@code val} into buffer {@code buff}.
*/
public int encodeLong(byte[] buff, int offset, long val) {
return Bytes.putLong(buff, offset, val);
} | 3.26 |
hbase_FuzzyRowFilter_trimTrailingZeroes_rdh | /**
* For forward scanner, next cell hint should not contain any trailing zeroes unless they are part
* of fuzzyKeyMeta hint = '\x01\x01\x01\x00\x00' will skip valid row '\x01\x01\x01'
*
* @param toInc
* - position of incremented byte
* @return trimmed version of result
*/
private static byte[] trimTrailingZeroes(byte[] result, byte[] fuzzyKeyMeta,
int toInc) {
int off = (fuzzyKeyMeta.length >= result.length) ? result.length - 1 :
fuzzyKeyMeta.length - 1;
for (; off >= 0; off--) {
if (fuzzyKeyMeta[off] != 0)
break;
}
if (off < toInc)
off = toInc;byte[] retValue = new byte[off + 1];
System.arraycopy(result, 0, retValue, 0, retValue.length);
return retValue;
} | 3.26 |
hbase_FuzzyRowFilter_m0_rdh | /**
* Parse a serialized representation of {@link FuzzyRowFilter}
*
* @param pbBytes
* A pb serialized {@link FuzzyRowFilter} instance
* @return An instance of {@link FuzzyRowFilter} made from <code>bytes</code>
* @throws DeserializationException
* if an error occurred
* @see #toByteArray
*/
public static FuzzyRowFilter m0(final byte[] pbBytes) throws DeserializationException {
FilterProtos.FuzzyRowFilter proto;
try {
proto
= FilterProtos.FuzzyRowFilter.parseFrom(pbBytes);
} catch (InvalidProtocolBufferException e) {
throw new DeserializationException(e);
}
int count = proto.getFuzzyKeysDataCount();
ArrayList<Pair<byte[], byte[]>> fuzzyKeysData = new ArrayList<>(count);
for (int i = 0; i < count; ++i) {
BytesBytesPair current = proto.getFuzzyKeysData(i);
byte[] keyBytes = current.getFirst().toByteArray();
byte[] keyMeta = current.getSecond().toByteArray();
fuzzyKeysData.add(new Pair<>(keyBytes, keyMeta));
}
return new FuzzyRowFilter(fuzzyKeysData);
} | 3.26 |
hbase_FuzzyRowFilter_areSerializedFieldsEqual_rdh | /**
* Returns true if and only if the fields of the filter that are serialized are equal to the
* corresponding fields in other. Used for testing.
*/
@Override
boolean areSerializedFieldsEqual(Filter o) {
if (o == this) {
return true;
}if (!(o instanceof FuzzyRowFilter)) {
return false;
}
FuzzyRowFilter other
= ((FuzzyRowFilter) (o));
if (this.fuzzyKeysData.size() != other.fuzzyKeysData.size())
return false;
for (int i = 0; i < fuzzyKeysData.size(); ++i) {
Pair<byte[], byte[]> thisData = this.fuzzyKeysData.get(i);
Pair<byte[], byte[]> otherData = other.fuzzyKeysData.get(i);
if (!(Bytes.equals(thisData.getFirst(), otherData.getFirst()) && Bytes.equals(thisData.getSecond(), otherData.getSecond()))) {
return false;
}
}
return true;
} | 3.26 |
hbase_FuzzyRowFilter_toByteArray_rdh | /**
* Returns The filter serialized using pb
*/
@Override
public byte[] toByteArray() {
FilterProtos.FuzzyRowFilter.Builder builder = FilterProtos.FuzzyRowFilter.newBuilder();
for (Pair<byte[], byte[]> v24 : fuzzyKeysData) {
BytesBytesPair.Builder bbpBuilder = BytesBytesPair.newBuilder();
bbpBuilder.setFirst(UnsafeByteOperations.unsafeWrap(v24.getFirst()));
bbpBuilder.setSecond(UnsafeByteOperations.unsafeWrap(v24.getSecond()));
builder.addFuzzyKeysData(bbpBuilder);
}return builder.build().toByteArray();
} | 3.26 |
hbase_FuzzyRowFilter_preprocessMask_rdh | /**
* We need to preprocess mask array, as since we treat 2's as unfixed positions and -1 (0xff) as
* fixed positions
*
* @return mask array
*/private byte[] preprocessMask(byte[]
mask) {
if (!UNSAFE_UNALIGNED) {
// do nothing
return mask;
}
if (isPreprocessedMask(mask))
return mask;
for (int i = 0; i < mask.length; i++) {
if (mask[i] == 0) {
mask[i] = -1;// 0 -> -1
} else if (mask[i] == 1) {
mask[i] = 2;// 1 -> 2
} | 3.26 |
hbase_HFileInfo_isReservedFileInfoKey_rdh | /**
* Return true if the given file info key is reserved for internal use.
*/
public static boolean isReservedFileInfoKey(byte[] key) {
return Bytes.startsWith(key, HFileInfo.f0);
} | 3.26 |
hbase_HFileInfo_write_rdh | /**
* Write out this instance on the passed in <code>out</code> stream. We write it as a protobuf.
*
* @see #read(DataInputStream)
*/
void write(final DataOutputStream out) throws IOException {
HFileProtos.FileInfoProto.Builder builder = HFileProtos.FileInfoProto.newBuilder();
for (Map.Entry<byte[], byte[]> e : this.map.entrySet()) {
HBaseProtos.BytesBytesPair.Builder bbpBuilder = HBaseProtos.BytesBytesPair.newBuilder();
bbpBuilder.setFirst(UnsafeByteOperations.unsafeWrap(e.getKey()));
bbpBuilder.setSecond(UnsafeByteOperations.unsafeWrap(e.getValue())); builder.addMapEntry(bbpBuilder.build()); }
out.write(ProtobufMagic.PB_MAGIC);
builder.build().writeDelimitedTo(out);
} | 3.26 |
hbase_HFileInfo_read_rdh | /**
* Populate this instance with what we find on the passed in <code>in</code> stream. Can
* deserialize protobuf of old Writables format.
*
* @see #write(DataOutputStream)
*/
void read(final DataInputStream in) throws IOException {
// This code is tested over in TestHFileReaderV1 where we read an old hfile w/ this new code.
int pblen = ProtobufUtil.lengthOfPBMagic();
byte[]
pbuf = new byte[pblen];
if (in.markSupported()) {
in.mark(pblen);
}
int
v5 = in.read(pbuf);
if (v5 != pblen) {
throw new IOException((("read=" + v5) + ", wanted=") + pblen);
}
if (ProtobufUtil.isPBMagicPrefix(pbuf)) {
parsePB(HFileProtos.FileInfoProto.parseDelimitedFrom(in));
} else if (in.markSupported()) {
in.reset();
parseWritable(in);
} else {
// We cannot use BufferedInputStream, it consumes more than we read from the underlying IS
ByteArrayInputStream bais = new ByteArrayInputStream(pbuf);SequenceInputStream sis = new SequenceInputStream(bais, in);// Concatenate input streams
// TODO: Am I leaking anything here wrapping the passed in stream? We are not calling
// close on the wrapped streams but they should be let go after we leave this context?
// I see that we keep a reference to the passed in inputstream but since we no longer
// have a reference to this after we leave, we should be ok.
parseWritable(new DataInputStream(sis));
}
} | 3.26 |
hbase_HFileInfo_append_rdh | /**
* Append the given key/value pair to the file info, optionally checking the key prefix.
*
* @param k
* key to add
* @param v
* value to add
* @param checkPrefix
* whether to check that the provided key does not start with the reserved
* prefix
* @return this file info object
* @throws IOException
* if the key or value is invalid
* @throws NullPointerException
* if {@code key} or {@code value} is {@code null}
*/
public HFileInfo append(final byte[] k, final byte[] v, final boolean checkPrefix) throws IOException {
Objects.requireNonNull(k, "key cannot be null");
Objects.requireNonNull(v, "value cannot be null");
if (checkPrefix && isReservedFileInfoKey(k)) {
throw new IOException(("Keys with a " + HFileInfo.RESERVED_PREFIX) + " are reserved");
}
put(k, v);
return this;
} | 3.26 |
hbase_HFileInfo_parseWritable_rdh | /**
* Now parse the old Writable format. It was a list of Map entries. Each map entry was a key and a
* value of a byte []. The old map format had a byte before each entry that held a code which was
* short for the key or value type. We know it was a byte [] so in below we just read and dump it.
*/
void parseWritable(final DataInputStream in) throws IOException {
// First clear the map.
// Otherwise we will just accumulate entries every time this method is called.
this.map.clear();
// Read the number of entries in the map
int entries = in.readInt();
// Then read each key/value pair
for (int i = 0; i < entries; i++) {
byte[] key = Bytes.readByteArray(in);// We used to read a byte that encoded the class type.
// Read and ignore it because it is always byte [] in hfile
in.readByte();
byte[] value = Bytes.readByteArray(in);
this.map.put(key, value);}
} | 3.26 |
hbase_HFileInfo_initMetaAndIndex_rdh | /**
* should be called after initTrailerAndContext
*/
public void initMetaAndIndex(HFile.Reader reader) throws IOException {
ReaderContext context = reader.getContext();
try {
HFileBlock.FSReader blockReader = reader.getUncachedBlockReader();
// Initialize an block iterator, and parse load-on-open blocks in the following.
blockIter = blockReader.blockRange(trailer.getLoadOnOpenDataOffset(), context.getFileSize() - trailer.getTrailerSize());
// Data index. We also read statistics about the block index written after
// the root level.
HFileBlock dataBlockRootIndex = blockIter.nextBlockWithBlockType(BlockType.ROOT_INDEX);
HFileBlock metaBlockIndex = blockIter.nextBlockWithBlockType(BlockType.ROOT_INDEX);
loadMetaInfo(blockIter, hfileContext);HFileIndexBlockEncoder indexBlockEncoder = HFileIndexBlockEncoderImpl.createFromFileInfo(this);
this.dataIndexReader = new HFileBlockIndex.CellBasedKeyBlockIndexReaderV2(trailer.createComparator(), trailer.getNumDataIndexLevels(), indexBlockEncoder);
dataIndexReader.readMultiLevelIndexRoot(dataBlockRootIndex, trailer.getDataIndexCount());
reader.setDataBlockIndexReader(dataIndexReader);
// Meta index.
this.metaIndexReader = new HFileBlockIndex.ByteArrayKeyBlockIndexReader(1);
metaIndexReader.readRootIndex(metaBlockIndex, trailer.getMetaIndexCount());
reader.setMetaBlockIndexReader(metaIndexReader);
reader.setDataBlockEncoder(HFileDataBlockEncoderImpl.createFromFileInfo(this));
// Load-On-Open info
HFileBlock b;
while ((b = blockIter.nextBlock()) != null) {
loadOnOpenBlocks.add(b);
}
// close the block reader
context.getInputStreamWrapper().unbuffer();
} catch (Throwable t) {
IOUtils.closeQuietly(context.getInputStreamWrapper(), e -> LOG.warn("failed to close input stream wrapper", e));
throw new CorruptHFileException("Problem reading data index and meta index from file " + context.getFilePath(), t);}
} | 3.26 |
hbase_HFileInfo_m0_rdh | /**
* Fill our map with content of the pb we read off disk
*
* @param fip
* protobuf message to read
*/
void m0(final HFileProtos.FileInfoProto fip) {
this.map.clear();
for (BytesBytesPair pair : fip.getMapEntryList()) {
this.map.put(pair.getFirst().toByteArray(), pair.getSecond().toByteArray());}
} | 3.26 |
hbase_HFileInfo_checkFileVersion_rdh | /**
* File version check is a little sloppy. We read v3 files but can also read v2 files if their
* content has been pb'd; files written with 0.98.
*/
private void checkFileVersion(Path path) {
int majorVersion = trailer.getMajorVersion();
if (majorVersion == getMajorVersion()) {
return;
}
int minorVersion = trailer.getMinorVersion();
if ((majorVersion == 2) && (minorVersion >= MIN_V2_MINOR_VERSION_WITH_PB)) {
return;
}
// We can read v3 or v2 versions of hfile.
throw new IllegalArgumentException(((((((("Invalid HFile version: major=" + trailer.getMajorVersion()) + ", minor=")
+ trailer.getMinorVersion()) + ": expected at least ") + "major=2 and minor=") + MAX_MINOR_VERSION) + ", path=") + path);
} | 3.26 |
hbase_ByteArrayComparable_parseFrom_rdh | /**
* Parse a serialized representation of {@link ByteArrayComparable}
*
* @param pbBytes
* A pb serialized {@link ByteArrayComparable} instance
* @return An instance of {@link ByteArrayComparable} made from <code>bytes</code>
* @see #toByteArray
*/
@SuppressWarnings("DoNotCallSuggester")public static
ByteArrayComparable parseFrom(final byte[] pbBytes)
throws DeserializationException {
throw new DeserializationException("parseFrom called on base ByteArrayComparable, but should be called on derived type");
} | 3.26 |
hbase_ByteArrayComparable_m0_rdh | /**
* Return true if and only if the fields of the comparator that are serialized are equal to the
* corresponding fields in other.
*/
boolean m0(ByteArrayComparable other) {
if (other == this) {
return true;
}
return Bytes.equals(this.getValue(), other.getValue());
} | 3.26 |
hbase_ServerCommandLine_logProcessInfo_rdh | /**
* Logs information about the currently running JVM process including the environment variables.
* Logging of env vars can be disabled by setting {@code "hbase.envvars.logging.disabled"} to
* {@code "true"}.
* <p>
* If enabled, you can also exclude environment variables containing certain substrings by setting
* {@code "hbase.envvars.logging.skipwords"} to comma separated list of such substrings.
*/
public static void logProcessInfo(Configuration conf) {
logHBaseConfigs(conf);
// log environment variables unless asked not to
if ((conf == null) || (!conf.getBoolean("hbase.envvars.logging.disabled", false))) {
Set<String> skipWords = new HashSet<>(DEFAULT_SKIP_WORDS);
if (conf != null) {
String[] confSkipWords = conf.getStrings("hbase.envvars.logging.skipwords");
if (confSkipWords != null) {
skipWords.addAll(Arrays.asList(confSkipWords));
}
}
nextEnv : for (Entry<String, String> entry : System.getenv().entrySet()) {
String key =
entry.getKey().toLowerCase(Locale.ROOT);
String value = entry.getValue().toLowerCase(Locale.ROOT);
// exclude variables which may contain skip words
for (String skipWord : skipWords) {
if (key.contains(skipWord) || value.contains(skipWord))
continue nextEnv;
}
LOG.info("env:" + entry);
}
}
// and JVM info
logJVMInfo();
} | 3.26 |
hbase_ServerCommandLine_usage_rdh | /**
* Print usage information for this command line.
*
* @param message
* if not null, print this message before the usage info.
*/protected void usage(String message) {
if (message != null) {
System.err.println(message);
System.err.println("");
}
System.err.println(getUsage());
} | 3.26 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.