name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hbase_MasterProcedureUtil_submitProcedure_rdh | /**
* Helper used to deal with submitting procs with nonce. Internally the
* NonceProcedureRunnable.run() will be called only if no one else registered the nonce. any
* Exception thrown by the run() method will be collected/handled and rethrown. <code>
* long procId = MasterProcedureUtil.submitProcedure(
* new NonceProcedureRunnable(procExec, nonceGroup, nonce) {
* {@literal @}Override
* public void run() {
* cpHost.preOperation();
* submitProcedure(new MyProc());
* cpHost.postOperation();
* }
* });
* </code>
*/
public static long submitProcedure(final NonceProcedureRunnable runnable) throws IOException {
final ProcedureExecutor<MasterProcedureEnv> procExec = runnable.getProcedureExecutor();
final long procId = procExec.registerNonce(runnable.getNonceKey());
if (procId >= 0)
return procId;
// someone already registered the nonce
try {runnable.run();
} catch (IOException e) {
procExec.setFailureResultForNonce(runnable.getNonceKey(), runnable.getDescription(), procExec.getEnvironment().getRequestUser(), e);
throw e;
} finally {
procExec.unregisterNonceIfProcedureWasNotSubmitted(runnable.getNonceKey());
}
return runnable.getProcId();
}
/**
* Pattern used to validate a Procedure WAL file name see
* {@link #validateProcedureWALFilename(String)} | 3.26 |
hbase_MasterProcedureUtil_getServerPriority_rdh | /**
* Return the priority for the given procedure. For now we only have two priorities, 100 for
* server carrying meta, and 1 for others.
*/
public static int getServerPriority(ServerProcedureInterface proc) {
return proc.hasMetaTableRegion() ? 100 : 1; } | 3.26 |
hbase_MasterProcedureUtil_unwrapRemoteIOException_rdh | /**
* This is a version of unwrapRemoteIOException that can do DoNotRetryIOE. We need to throw DNRIOE
* to clients if a failed Procedure else they will keep trying. The default
* proc.getException().unwrapRemoteException doesn't have access to DNRIOE from the procedure2
* module.
*/
public static IOException unwrapRemoteIOException(Procedure<?> proc) {
Exception e = proc.getException().unwrapRemoteException();
// Do not retry ProcedureExceptions!
return e instanceof ProcedureException ? new DoNotRetryIOException(e) : proc.getException().unwrapRemoteIOException();
} | 3.26 |
hbase_MasterProcedureUtil_validateProcedureWALFilename_rdh | /**
* A Procedure WAL file name is of the format: pv-<wal-id>.log where wal-id is 20 digits.
*
* @param filename
* name of the file to validate
* @return <tt>true</tt> if the filename matches a Procedure WAL, <tt>false</tt> otherwise
*/
public static boolean validateProcedureWALFilename(String filename) {
return PATTERN.matcher(filename).matches();
} | 3.26 |
hbase_RegionServerCoprocessorHost_preStop_rdh | // ////////////////////////////////////////////////////////////////////////////////////////////////
// RegionServerObserver operations
// ////////////////////////////////////////////////////////////////////////////////////////////////
public void preStop(String message, User user) throws IOException {
// While stopping the region server all coprocessors method should be executed first then the
// coprocessor should be cleaned up.
if (coprocEnvironments.isEmpty()) {
return;
}
execShutdown(new RegionServerObserverOperation(user) {
@Override
public void call(RegionServerObserver observer) throws IOException {
observer.preStopRegionServer(this);
}
@Override
public void postEnvCall() {
// invoke coprocessor stop method
shutdown(this.getEnvironment());
}
});
} | 3.26 |
hbase_ClientMetaTableAccessor_getTableStopRowForMeta_rdh | /**
* Returns stop row for scanning META according to query type
*/
public static byte[] getTableStopRowForMeta(TableName tableName, QueryType type) {
if (tableName == null) {
return null;
}
final byte[] stopRow;
switch (type) {
case REGION :
case REPLICATION :
{
stopRow = new byte[tableName.getName().length + 3];
System.arraycopy(tableName.getName(), 0, stopRow, 0, tableName.getName().length);
stopRow[stopRow.length - 3] = ' ';
stopRow[stopRow.length - 2] = HConstants.DELIMITER;
stopRow[stopRow.length - 1] = HConstants.DELIMITER;
break;
}
case ALL :
case f0 :
default :
{
stopRow = new byte[tableName.getName().length + 1];
System.arraycopy(tableName.getName(), 0, stopRow, 0, tableName.getName().length);
stopRow[stopRow.length -
1] = ' ';
break;
}
}
return stopRow;
} | 3.26 |
hbase_ClientMetaTableAccessor_getRegionLocations_rdh | /**
* Returns an HRegionLocationList extracted from the result.
*/
private static Optional<RegionLocations> getRegionLocations(Result r) {
return
Optional.ofNullable(CatalogFamilyFormat.getRegionLocations(r));
} | 3.26 |
hbase_ClientMetaTableAccessor_getRegionLocationWithEncodedName_rdh | /**
* Returns the HRegionLocation from meta for the given encoded region name
*/
public static CompletableFuture<Optional<HRegionLocation>> getRegionLocationWithEncodedName(AsyncTable<?> metaTable, byte[] encodedRegionName) {
CompletableFuture<Optional<HRegionLocation>> future = new CompletableFuture<>();
addListener(metaTable.scanAll(new Scan().setReadType(ReadType.PREAD).addFamily(HConstants.CATALOG_FAMILY)), (results, err) -> {
if (err != null) {
future.completeExceptionally(err);
return;}
String encodedRegionNameStr = Bytes.toString(encodedRegionName);
results.stream().filter(result -> !result.isEmpty()).filter(result -> CatalogFamilyFormat.getRegionInfo(result) != null).forEach(result -> {
getRegionLocations(result).ifPresent(locations -> {
for (HRegionLocation location : locations.getRegionLocations()) {
if ((location != null) && encodedRegionNameStr.equals(location.getRegion().getEncodedName())) {
future.complete(Optional.of(location));
return;
}
}
});
});
future.complete(Optional.empty());
});
return future;
} | 3.26 |
hbase_ClientMetaTableAccessor_getResults_rdh | /**
* Returns Collected results; wait till visits complete to collect all possible results
*/
List<T> getResults() {
return this.results;
} | 3.26 |
hbase_ClientMetaTableAccessor_getRegionLocation_rdh | /**
* Returns the HRegionLocation from meta for the given region
*/
public static CompletableFuture<Optional<HRegionLocation>> getRegionLocation(AsyncTable<?> metaTable, byte[] regionName) {
CompletableFuture<Optional<HRegionLocation>> future = new CompletableFuture<>();
try {
RegionInfo parsedRegionInfo = CatalogFamilyFormat.parseRegionInfoFromRegionName(regionName);
addListener(metaTable.get(new Get(CatalogFamilyFormat.getMetaKeyForRegion(parsedRegionInfo)).addFamily(HConstants.CATALOG_FAMILY)), (r, err) -> {if (err != null) {
future.completeExceptionally(err);
return;
}
future.complete(getRegionLocations(r).map(locations -> locations.getRegionLocation(parsedRegionInfo.getReplicaId())));
});
} catch (IOException parseEx) {
LOG.warn("Failed to parse the passed region name: " + Bytes.toStringBinary(regionName));
future.completeExceptionally(parseEx);}
return future;
} | 3.26 |
hbase_ClientMetaTableAccessor_scanMeta_rdh | /**
* Performs a scan of META table for given table.
*
* @param metaTable
* scanner over meta table
* @param startRow
* Where to start the scan
* @param stopRow
* Where to stop the scan
* @param type
* scanned part of meta
* @param maxRows
* maximum rows to return
* @param visitor
* Visitor invoked against each row
*/
private static CompletableFuture<Void> scanMeta(AsyncTable<AdvancedScanResultConsumer> metaTable, byte[] startRow, byte[] stopRow, QueryType type, int maxRows, final Visitor visitor) {
int rowUpperLimit = (maxRows > 0)
? maxRows : Integer.MAX_VALUE;
Scan scan = getMetaScan(metaTable, rowUpperLimit);
for (byte[] family : type.getFamilies()) {scan.addFamily(family);
}
if (startRow != null) {
scan.withStartRow(startRow);
}
if (stopRow != null)
{
scan.withStopRow(stopRow);
}
if (LOG.isDebugEnabled()) {
LOG.debug(((((((("Scanning META" + " starting at row=") + Bytes.toStringBinary(scan.getStartRow())) + " stopping at row=") + Bytes.toStringBinary(scan.getStopRow())) + " for max=") + rowUpperLimit) + " with caching=") + scan.getCaching());
}
CompletableFuture<Void> future = new CompletableFuture<Void>();
// Get the region locator's meta replica mode.
CatalogReplicaMode metaReplicaMode = CatalogReplicaMode.fromString(metaTable.getConfiguration().get(LOCATOR_META_REPLICAS_MODE, CatalogReplicaMode.NONE.toString()));
if (metaReplicaMode == CatalogReplicaMode.LOAD_BALANCE) {
addListener(metaTable.getDescriptor(), (desc, error) -> {
if (error != null) {
LOG.error("Failed to get meta table descriptor, error: ", error);
future.completeExceptionally(error);
return;
}
int numOfReplicas = desc.getRegionReplication();
if (numOfReplicas > 1) {
int
replicaId = ThreadLocalRandom.current().nextInt(numOfReplicas);
// When the replicaId is 0, do not set to Consistency.TIMELINE
if (replicaId > 0) {
scan.setReplicaId(replicaId);
scan.setConsistency(Consistency.TIMELINE);
}
}
metaTable.scan(scan,
new MetaTableScanResultConsumer(rowUpperLimit, visitor, future));
});
} else
{
if (metaReplicaMode ==
CatalogReplicaMode.HEDGED_READ) {
scan.setConsistency(Consistency.TIMELINE);
}
metaTable.scan(scan, new MetaTableScanResultConsumer(rowUpperLimit, visitor, future));
}
return future;
} | 3.26 |
hbase_ClientMetaTableAccessor_getTableStartRowForMeta_rdh | /**
* Returns start row for scanning META according to query type
*/
public static byte[] getTableStartRowForMeta(TableName tableName, QueryType type) {
if (tableName == null) {
return null;
}switch (type) {
case REGION :
case REPLICATION :
{
byte[] startRow = new byte[tableName.getName().length + 2];
System.arraycopy(tableName.getName(), 0, startRow, 0, tableName.getName().length);
startRow[startRow.length - 2] = HConstants.DELIMITER;
startRow[startRow.length - 1] = HConstants.DELIMITER;
return startRow;
}
case ALL :
case f0 :
default :
{
return tableName.getName();
}
}
} | 3.26 |
hbase_CompactionPolicy_setConf_rdh | /**
* Inform the policy that some configuration has been change, so cached value should be updated it
* any.
*/
public void setConf(Configuration conf) {
this.comConf = new CompactionConfiguration(conf, this.storeConfigInfo);
} | 3.26 |
hbase_CompactionPolicy_getConf_rdh | /**
* Returns The current compaction configuration settings.
*/public CompactionConfiguration getConf() {
return this.comConf;
} | 3.26 |
hbase_KeyLocker_acquireLocks_rdh | /**
* Acquire locks for a set of keys. The keys will be sorted internally to avoid possible deadlock.
*
* @throws ClassCastException
* if the given {@code keys} contains elements that are not mutually
* comparable
*/
public Map<K, Lock> acquireLocks(Set<? extends K> keys) {
Object[] keyArray = keys.toArray();
Arrays.sort(keyArray);
lockPool.purge();Map<K, Lock> locks = new LinkedHashMap<>(keyArray.length);
for (Object o : keyArray) {
@SuppressWarnings("unchecked")
K key = ((K) (o));
ReentrantLock lock = lockPool.get(key);
locks.put(key, lock);
}
for (Lock lock : locks.values()) {
lock.lock();
}
return locks;
} | 3.26 |
hbase_KeyLocker_acquireLock_rdh | /**
* Return a lock for the given key. The lock is already locked.
*/
public ReentrantLock acquireLock(K key) {
if (key == null)
throw new IllegalArgumentException("key must not be null");
lockPool.purge();
ReentrantLock lock = lockPool.get(key);
lock.lock();
return lock;
} | 3.26 |
hbase_BackupInfo_setIncrTimestampMap_rdh | /**
* Set the new region server log timestamps after distributed log roll
*
* @param prevTableSetTimestampMap
* table timestamp map
*/
public void setIncrTimestampMap(Map<TableName, Map<String, Long>> prevTableSetTimestampMap) {
this.incrTimestampMap = prevTableSetTimestampMap;} | 3.26 |
hbase_BackupInfo_compareTo_rdh | /**
* We use only time stamps to compare objects during sort operation
*/
@Override
public int compareTo(BackupInfo o) {
Long thisTS = Long.valueOf(this.getBackupId().substring(this.getBackupId().lastIndexOf("_") + 1));
Long otherTS = Long.valueOf(o.getBackupId().substring(o.getBackupId().lastIndexOf("_") + 1));
return thisTS.compareTo(otherTS);
} | 3.26 |
hbase_BackupInfo_getProgress_rdh | /**
* Get current progress
*/
public int
getProgress() {
return progress;
} | 3.26 |
hbase_BackupInfo_getIncrTimestampMap_rdh | /**
* Get new region server log timestamps after distributed log roll
*
* @return new region server log timestamps
*/
public Map<TableName, Map<String, Long>> getIncrTimestampMap() {
return this.incrTimestampMap;
} | 3.26 |
hbase_BackupInfo_setProgress_rdh | /**
* Set progress (0-100%)
*
* @param p
* progress value
*/
public void setProgress(int p) {
this.progress = p;
} | 3.26 |
hbase_VisibilityLabelsCache_getUserAuthsAsOrdinals_rdh | /**
* Returns the list of ordinals of labels associated with the user
*
* @param user
* Not null value.
* @return the list of ordinals
*/
public Set<Integer> getUserAuthsAsOrdinals(String user) {
this.lock.readLock().lock();
try {
Set<Integer> auths = userAuths.get(user);
return auths == null ? EMPTY_SET : auths;
} finally {
this.lock.readLock().unlock();
}
} | 3.26 |
hbase_VisibilityLabelsCache_getLabelsCount_rdh | /**
* Returns The total number of visibility labels.
*/
public int getLabelsCount() {
this.lock.readLock().lock();
try {
return this.labels.size();
} finally {
this.lock.readLock().unlock();
}
} | 3.26 |
hbase_VisibilityLabelsCache_createAndGet_rdh | /**
* Creates the singleton instance, if not yet present, and returns the same.
*
* @return Singleton instance of VisibilityLabelsCache
*/
@SuppressWarnings(value = "MS_EXPOSE_REP", justification = "singleton pattern")
public static synchronized VisibilityLabelsCache createAndGet(ZKWatcher watcher, Configuration conf) throws IOException {
// VisibilityLabelService#init() for different regions (in same RS) passes same instance of
// watcher as all get the instance from RS.
// watcher != instance.zkVisibilityWatcher.getWatcher() - This check is needed only in UTs with
// RS restart. It will be same JVM in which RS restarts and instance will be not null. But the
// watcher associated with existing instance will be stale as the restarted RS will have new
// watcher with it.
if ((instance == null) || (watcher != instance.zkVisibilityWatcher.getWatcher())) {
instance = new VisibilityLabelsCache(watcher, conf);
}
return instance;
}
/**
*
* @return Singleton instance of VisibilityLabelsCache when this is called before calling
{@link #createAndGet(ZKWatcher, Configuration)} | 3.26 |
hbase_VisibilityLabelsCache_getGroupAuthsAsOrdinals_rdh | /**
* Returns the list of ordinals of labels associated with the groups
*
* @return the list of ordinals
*/
public Set<Integer> getGroupAuthsAsOrdinals(String[] groups) {
this.lock.readLock().lock();
try {
Set<Integer> authOrdinals = new HashSet<>();
if ((groups != null) && (groups.length > 0)) {
Set<Integer> groupAuthOrdinals = null;
for (String group : groups) {
groupAuthOrdinals = groupAuths.get(group);
if ((groupAuthOrdinals != null) && (!groupAuthOrdinals.isEmpty())) {
authOrdinals.addAll(groupAuthOrdinals);
}
}
}
return authOrdinals.isEmpty() ? EMPTY_SET : authOrdinals;
} finally {
this.lock.readLock().unlock();
}
} | 3.26 |
hbase_JmxCacheBuster_restart_rdh | /**
* Restarts the stopped service.
*
* @see #stop()
*/
public static void restart() {
stopped.set(false);
} | 3.26 |
hbase_JmxCacheBuster_m0_rdh | /**
* For JMX to forget about all previously exported metrics.
*/
public static void m0() {
if (LOG.isTraceEnabled()) {
LOG.trace("clearing JMX Cache" + StringUtils.stringifyException(new Exception()));
}
// If there are more then 100 ms before the executor will run then everything should be merged.
ScheduledFuture future = fut.get();
if ((future != null) && ((!future.isDone()) && (future.getDelay(TimeUnit.MILLISECONDS) > 100))) {// BAIL OUT
return;
}
if (stopped.get()) {
return;
}
future = executor.getExecutor().schedule(new JmxCacheBusterRunnable(), 5,
TimeUnit.SECONDS);
fut.set(future);
} | 3.26 |
hbase_JmxCacheBuster_m1_rdh | /**
* Stops the clearing of JMX metrics and restarting the Hadoop metrics system. This is needed for
* some test environments where we manually inject sources or sinks dynamically.
*/
public static void m1() {
stopped.set(true);
ScheduledFuture future = fut.get();
future.cancel(false);
} | 3.26 |
hbase_IdentityTableMapper_initJob_rdh | /**
* Use this before submitting a TableMap job. It will appropriately set up the job.
*
* @param table
* The table name.
* @param scan
* The scan with the columns to scan.
* @param mapper
* The mapper class.
* @param job
* The job configuration.
* @throws IOException
* When setting up the job fails.
*/
@SuppressWarnings("rawtypes")
public static void initJob(String table, Scan scan, Class<? extends TableMapper> mapper, Job job) throws IOException {
TableMapReduceUtil.initTableMapperJob(table, scan, mapper, ImmutableBytesWritable.class, Result.class, job);
} | 3.26 |
hbase_IdentityTableMapper_map_rdh | /**
* Pass the key, value to reduce.
*
* @param key
* The current key.
* @param value
* The current value.
* @param context
* The current context.
* @throws IOException
* When writing the record fails.
* @throws InterruptedException
* When the job is aborted.
*/
public void map(ImmutableBytesWritable key, Result value, Context context) throws IOException, InterruptedException {
context.write(key, value);
} | 3.26 |
hbase_BaseSourceImpl_setGauge_rdh | /**
* Set a single gauge to a value.
*
* @param gaugeName
* gauge name
* @param value
* the new value of the gauge.
*/
@Override
public void setGauge(String gaugeName, long value) {
MutableGaugeLong gaugeInt = metricsRegistry.getGauge(gaugeName, value);
gaugeInt.set(value);
} | 3.26 |
hbase_BaseSourceImpl_removeMetric_rdh | /**
* Remove a named gauge.
*
* @param key
* the key of the gauge to remove
*/
@Override
public void removeMetric(String key) {
metricsRegistry.removeMetric(key);
JmxCacheBuster.clearJmxCache();
} | 3.26 |
hbase_BaseSourceImpl_incCounters_rdh | /**
* Increment a named counter by some value.
*
* @param key
* the name of the counter
* @param delta
* the ammount to increment
*/
@Override
public void incCounters(String key, long delta) {
MutableFastCounter v3 = metricsRegistry.getCounter(key, 0L);
v3.incr(delta);
} | 3.26 |
hbase_BaseSourceImpl_incGauge_rdh | /**
* Add some amount to a gauge.
*
* @param gaugeName
* The name of the gauge to increment.
* @param delta
* The amount to increment the gauge by.
*/
@Override
public void incGauge(String gaugeName, long delta) {
MutableGaugeLong gaugeInt = metricsRegistry.getGauge(gaugeName, 0L);
gaugeInt.incr(delta);
} | 3.26 |
hbase_BaseSourceImpl_decGauge_rdh | /**
* Decrease the value of a named gauge.
*
* @param gaugeName
* The name of the gauge.
* @param delta
* the ammount to subtract from a gauge value.
*/
@Override
public void decGauge(String gaugeName, long delta) {
MutableGaugeLong gaugeInt = metricsRegistry.getGauge(gaugeName, 0L);
gaugeInt.decr(delta);
} | 3.26 |
hbase_MemStoreCompactorSegmentsIterator_createScanner_rdh | /**
* Creates the scanner for compacting the pipeline.
*
* @return the scanner
*/
private InternalScanner createScanner(HStore store, List<KeyValueScanner> scanners) throws IOException {
InternalScanner scanner = null;
boolean success = false;
try {
RegionCoprocessorHost cpHost = store.getCoprocessorHost();
ScanInfo scanInfo;
if (cpHost != null) {
scanInfo = cpHost.preMemStoreCompactionCompactScannerOpen(store);
} else {
scanInfo = store.getScanInfo();
}
scanner = new StoreScanner(store, scanInfo, scanners, ScanType.COMPACT_RETAIN_DELETES, store.getSmallestReadPoint(), PrivateConstants.OLDEST_TIMESTAMP);
if (cpHost !=
null) {
InternalScanner scannerFromCp = cpHost.preMemStoreCompactionCompact(store, scanner);
if (scannerFromCp == null) {
throw new CoprocessorException("Got a null InternalScanner when calling" + " preMemStoreCompactionCompact which is not acceptable");
}
success = true;
return scannerFromCp;
} else {
success = true;
return scanner;
}
} finally {
if (!success) {
Closeables.close(scanner,
true);
scanners.forEach(KeyValueScanner::close);
}
}
} | 3.26 |
hbase_RawDouble_decodeDouble_rdh | /**
* Read a {@code double} value from the buffer {@code buff}.
*/
public double decodeDouble(byte[] buff, int offset) {
double val = Bytes.toDouble(buff, offset);
return val;
} | 3.26 |
hbase_RawDouble_encodeDouble_rdh | /**
* Write instance {@code val} into buffer {@code buff}.
*/
public int encodeDouble(byte[] buff, int offset, double val) {
return Bytes.putDouble(buff, offset, val);
} | 3.26 |
hbase_RegionReplicaUtil_addReplicas_rdh | /**
* Create any replicas for the regions (the default replicas that was already created is passed to
* the method)
*
* @param regions
* existing regions
* @param oldReplicaCount
* existing replica count
* @param newReplicaCount
* updated replica count due to modify table
* @return the combined list of default and non-default replicas
*/
public static List<RegionInfo> addReplicas(final List<RegionInfo> regions, int oldReplicaCount, int newReplicaCount) {
if ((newReplicaCount - 1) <= 0) {
return regions;
}List<RegionInfo> hRegionInfos = new ArrayList<>(newReplicaCount * regions.size());
for (RegionInfo ri
: regions) {
if (RegionReplicaUtil.isDefaultReplica(ri) && ((!ri.isOffline()) || ((!ri.isSplit()) && (!ri.isSplitParent())))) {
// region level replica index starts from 0. So if oldReplicaCount was 2 then the max
// replicaId for
// the existing regions would be 1
for (int j = oldReplicaCount; j < newReplicaCount; j++) {
hRegionInfos.add(RegionReplicaUtil.getRegionInfoForReplica(ri,
j));
}
}
}
hRegionInfos.addAll(regions); return hRegionInfos;
} | 3.26 |
hbase_RegionReplicaUtil_isDefaultReplica_rdh | /**
* Returns true if this region is a default replica for the region
*/
public static boolean isDefaultReplica(RegionInfo hri) {
return
hri.getReplicaId() == DEFAULT_REPLICA_ID;
} | 3.26 |
hbase_RegionReplicaUtil_removeNonDefaultRegions_rdh | /**
* Removes the non-default replicas from the passed regions collection
*/
public static void removeNonDefaultRegions(Collection<RegionInfo> regions) {Iterator<RegionInfo> iterator = regions.iterator();
while (iterator.hasNext()) {
RegionInfo hri = iterator.next();
if (!RegionReplicaUtil.isDefaultReplica(hri)) {
iterator.remove();
}
}
} | 3.26 |
hbase_RecoverableZooKeeper_m1_rdh | /**
* getChildren is an idempotent operation. Retry before throwing exception
*
* @return List of children znodes
*/
public List<String> m1(String path, boolean watch) throws KeeperException, InterruptedException {return getChildren(path, null, watch);
} | 3.26 |
hbase_RecoverableZooKeeper_filterByPrefix_rdh | /**
* Filters the given node list by the given prefixes. This method is all-inclusive--if any element
* in the node list starts with any of the given prefixes, then it is included in the result.
*
* @param nodes
* the nodes to filter
* @param prefixes
* the prefixes to include in the result
* @return list of every element that starts with one of the prefixes
*/
private static List<String> filterByPrefix(List<String> nodes, String... prefixes) {
List<String> lockChildren = new ArrayList<>();
for (String child : nodes) {
for (String prefix : prefixes) {
if (child.startsWith(prefix)) {
lockChildren.add(child);
break;
}
}
}
return lockChildren;
} | 3.26 |
hbase_RecoverableZooKeeper_exists_rdh | /**
* exists is an idempotent operation. Retry before throwing exception
*
* @return A Stat instance
*/public Stat exists(String path, boolean watch) throws KeeperException, InterruptedException {
return exists(path, null, watch);
} | 3.26 |
hbase_RecoverableZooKeeper_getData_rdh | /**
* getData is an idempotent operation. Retry before throwing exception
*/
public byte[] getData(String path, boolean watch, Stat stat) throws KeeperException, InterruptedException {
return getData(path, null, watch, stat);
} | 3.26 |
hbase_RecoverableZooKeeper_setData_rdh | /**
* setData is NOT an idempotent operation. Retry may cause BadVersion Exception Adding an
* identifier field into the data to check whether badversion is caused by the result of previous
* correctly setData
*
* @return Stat instance
*/
public Stat setData(String path, byte[] data, int version) throws KeeperException, InterruptedException {final Span span = TraceUtil.createSpan("RecoverableZookeeper.setData");
try (Scope ignored = span.makeCurrent()) {
RetryCounter retryCounter = retryCounterFactory.create();
byte[] newData = ZKMetadata.appendMetaData(id, data);
boolean isRetry = false;
while (true) {
try {
span.setStatus(StatusCode.OK);
return checkZk().setData(path, newData, version);
} catch (KeeperException e) {
switch (e.code()) {
case CONNECTIONLOSS :
case OPERATIONTIMEOUT
:
case REQUESTTIMEOUT :
TraceUtil.setError(span, e);
retryOrThrow(retryCounter, e, "setData");
break;
case BADVERSION :
if (isRetry) {
// try to verify whether the previous setData success or not
try {
Stat stat = new Stat();
byte[] revData = checkZk().getData(path, false, stat);
if (Bytes.compareTo(revData, newData) == 0) {// the bad version is caused by previous successful setData
return stat;
}
} catch (KeeperException keeperException) {
// the ZK is not reliable at this moment. just throwing exception
TraceUtil.setError(span, e);
throw keeperException;
}
}
// throw other exceptions and verified bad version exceptions
default :
TraceUtil.setError(span, e);
throw e;
}
}
retryCounter.sleepUntilNextRetry();
isRetry = true;
}
} finally {
span.end();
}
} | 3.26 |
hbase_RecoverableZooKeeper_multi_rdh | /**
* Run multiple operations in a transactional manner. Retry before throwing exception
*/
public List<OpResult> multi(Iterable<Op> ops) throws KeeperException, InterruptedException {
final Span span = TraceUtil.createSpan("RecoverableZookeeper.multi");
try (Scope ignored = span.makeCurrent()) {
RetryCounter retryCounter = retryCounterFactory.create();
Iterable<Op> multiOps = m2(ops);
while (true) {
try {
span.setStatus(StatusCode.OK);
return checkZk().multi(multiOps);
} catch (KeeperException e) {
switch (e.code()) {
case CONNECTIONLOSS :
case OPERATIONTIMEOUT :
case REQUESTTIMEOUT :
TraceUtil.setError(span, e);
retryOrThrow(retryCounter, e, "multi");
break;
default :
TraceUtil.setError(span, e);
throw e;
}
}
retryCounter.sleepUntilNextRetry();
}
} finally {
span.end();
}
} | 3.26 |
hbase_RecoverableZooKeeper_getAcl_rdh | /**
* getAcl is an idempotent operation. Retry before throwing exception
*
* @return list of ACLs
*/
public List<ACL> getAcl(String path, Stat stat) throws KeeperException, InterruptedException {
final Span span = TraceUtil.createSpan("RecoverableZookeeper.getAcl");
try (Scope ignored = span.makeCurrent()) {
RetryCounter retryCounter = retryCounterFactory.create();
while (true) {
try {
span.setStatus(StatusCode.OK);
return
checkZk().getACL(path, stat);
} catch (KeeperException e) {
switch (e.code()) {
case CONNECTIONLOSS :
case OPERATIONTIMEOUT :
case REQUESTTIMEOUT :
TraceUtil.setError(span, e);
retryOrThrow(retryCounter, e, "getAcl");
break;
default :
TraceUtil.setError(span, e);
throw e;
}
}
retryCounter.sleepUntilNextRetry();
}
} finally {
span.end();
}
} | 3.26 |
hbase_RecoverableZooKeeper_getMaxMultiSizeLimit_rdh | /**
* Returns the maximum size (in bytes) that should be included in any single multi() call. NB:
* This is an approximation, so there may be variance in the msg actually sent over the wire.
* Please be sure to set this approximately, with respect to your ZK server configuration for
* jute.maxbuffer.
*/
public int getMaxMultiSizeLimit() {
return maxMultiSize;
} | 3.26 |
hbase_RecoverableZooKeeper_m0_rdh | /**
* See {@link #connect(Configuration, String, Watcher, String)}
*/
public static RecoverableZooKeeper
m0(Configuration conf, Watcher watcher) throws IOException {
String ensemble = ZKConfig.getZKQuorumServersString(conf);return connect(conf, ensemble, watcher);
} | 3.26 |
hbase_RecoverableZooKeeper_create_rdh | /**
* <p>
* NONSEQUENTIAL create is idempotent operation. Retry before throwing exceptions. But this
* function will not throw the NodeExist exception back to the application.
* </p>
* <p>
* But SEQUENTIAL is NOT idempotent operation. It is necessary to add identifier to the path to
* verify, whether the previous one is successful or not.
* </p>
*/
public String create(String path, byte[] data, List<ACL> acl, CreateMode createMode) throws KeeperException,
InterruptedException {
final Span span = TraceUtil.createSpan("RecoverableZookeeper.create");
try (Scope ignored
= span.makeCurrent()) {
byte[] newData = ZKMetadata.appendMetaData(id, data);
switch (createMode) {
case EPHEMERAL :case PERSISTENT :
span.setStatus(StatusCode.OK);
return createNonSequential(path, newData, acl, createMode);
case EPHEMERAL_SEQUENTIAL :
case PERSISTENT_SEQUENTIAL :
span.setStatus(StatusCode.OK);
return createSequential(path, newData, acl, createMode);default :
final IllegalArgumentException e = new IllegalArgumentException("Unrecognized CreateMode: " + createMode);
TraceUtil.setError(span, e);
throw e;
}
}
finally {
span.end();
}
} | 3.26 |
hbase_RecoverableZooKeeper_m2_rdh | /**
* Convert Iterable of {@link org.apache.zookeeper.Op} we got into the ZooKeeper.Op instances to
* actually pass to multi (need to do this in order to appendMetaData).
*/
private Iterable<Op> m2(Iterable<Op> ops) throws UnsupportedOperationException {
if (ops == null) {
return null;
}
List<Op> preparedOps = new LinkedList<>();
for (Op op : ops) {
if (op.getType() == OpCode.create) {
CreateRequest create = ((CreateRequest) (op.toRequestRecord()));
preparedOps.add(Op.create(create.getPath(), ZKMetadata.appendMetaData(id,
create.getData()), create.getAcl(), create.getFlags()));
} else if (op.getType() == OpCode.delete) {
// no need to appendMetaData for delete
preparedOps.add(op);
} else if (op.getType() == OpCode.setData) {
SetDataRequest setData = ((SetDataRequest) (op.toRequestRecord()));
preparedOps.add(Op.setData(setData.getPath(), ZKMetadata.appendMetaData(id, setData.getData()), setData.getVersion()));
} else {
throw
new UnsupportedOperationException("Unexpected ZKOp type: "
+ op.getClass().getName());
}
}
return preparedOps;
} | 3.26 |
hbase_RecoverableZooKeeper_getChildren_rdh | /**
* getChildren is an idempotent operation. Retry before throwing exception
*
* @return List of children znodes
*/
public List<String> getChildren(String path, Watcher watcher) throws KeeperException, InterruptedException {
return getChildren(path, watcher, null);
} | 3.26 |
hbase_RecoverableZooKeeper_connect_rdh | /**
* Creates a new connection to ZooKeeper, pulling settings and ensemble config from the specified
* configuration object using methods from {@link ZKConfig}. Sets the connection status monitoring
* watcher to the specified watcher.
*
* @param conf
* configuration to pull ensemble and other settings from
* @param watcher
* watcher to monitor connection changes
* @param ensemble
* ZooKeeper servers quorum string
* @param identifier
* value used to identify this client instance.
* @return connection to zookeeper
* @throws IOException
* if unable to connect to zk or config problem
*/
public static RecoverableZooKeeper connect(Configuration conf, String ensemble, Watcher watcher, final String identifier) throws IOException {
if (ensemble == null) {
throw new IOException("Unable to determine ZooKeeper ensemble");
}
int timeout = conf.getInt(HConstants.ZK_SESSION_TIMEOUT, HConstants.DEFAULT_ZK_SESSION_TIMEOUT);
if (LOG.isTraceEnabled()) {
LOG.trace("{} opening connection to ZooKeeper ensemble={}",
identifier, ensemble);
}
int retry = conf.getInt("zookeeper.recovery.retry", 3);
int retryIntervalMillis = conf.getInt("zookeeper.recovery.retry.intervalmill", 1000);
int maxSleepTime = conf.getInt("zookeeper.recovery.retry.maxsleeptime", 60000);
int multiMaxSize = conf.getInt("zookeeper.multi.max.size", 1024 * 1024);
return new RecoverableZooKeeper(ensemble, timeout, watcher, retry, retryIntervalMillis, maxSleepTime, identifier, multiMaxSize);
} | 3.26 |
hbase_RecoverableZooKeeper_setAcl_rdh | /**
* setAcl is an idempotent operation. Retry before throwing exception
*
* @return list of ACLs
*/
public Stat setAcl(String path, List<ACL> acls, int version) throws KeeperException, InterruptedException {
final Span span = TraceUtil.createSpan("RecoverableZookeeper.setAcl");
try (Scope ignored = span.makeCurrent()) {
RetryCounter retryCounter = retryCounterFactory.create();
while (true) {
try {
span.setStatus(StatusCode.OK);
return checkZk().setACL(path, acls, version);
} catch (KeeperException e) {switch (e.code()) {
case CONNECTIONLOSS :
case OPERATIONTIMEOUT :
TraceUtil.setError(span, e);
retryOrThrow(retryCounter, e, "setAcl");
break;
default :
TraceUtil.setError(span, e);
throw e;
}
}
retryCounter.sleepUntilNextRetry();
}
} finally {
span.end();
}
} | 3.26 |
hbase_RecoverableZooKeeper_delete_rdh | /**
* delete is an idempotent operation. Retry before throwing exception. This function will not
* throw NoNodeException if the path does not exist.
*/
public void delete(String path, int version) throws InterruptedException, KeeperException {
final Span span = TraceUtil.createSpan("RecoverableZookeeper.delete");
try (Scope ignored = span.makeCurrent()) {
RetryCounter retryCounter = retryCounterFactory.create();
boolean isRetry = false;// False for first attempt, true for all retries.
while (true) {
try {
checkZk().delete(path, version);
span.setStatus(StatusCode.OK);
return;
} catch (KeeperException e) {
switch (e.code()) {
case NONODE :
if (isRetry) {
LOG.debug((("Node " + path) + " already deleted. Assuming a ") + "previous attempt succeeded.");
return;
}
LOG.debug("Node {} already deleted, retry={}", path, isRetry);
TraceUtil.setError(span, e);
throw e;
case CONNECTIONLOSS :
case OPERATIONTIMEOUT :
case REQUESTTIMEOUT :
TraceUtil.setError(span, e);retryOrThrow(retryCounter, e, "delete");
break;
default :
TraceUtil.setError(span, e);
throw e;
}
}
retryCounter.sleepUntilNextRetry();
isRetry = true;
} } finally {
span.end();
}
} | 3.26 |
hbase_DumpReplicationQueues_getTotalWALSize_rdh | /**
* return total size in bytes from a list of WALs
*/
private long getTotalWALSize(FileSystem fs, List<String> wals, ServerName server) {
long size = 0;
FileStatus fileStatus;
for (String wal : wals) {
try {
fileStatus = new WALLink(getConf(), server.getServerName(), wal).getFileStatus(fs);
} catch
(IOException e) {
if (e instanceof FileNotFoundException) {
numWalsNotFound++;
LOG.warn(("WAL " + wal) + " couldn't be found, skipping", e);
} else {LOG.warn(("Can't get file status of WAL " + wal) + ", skipping", e);
}
continue;
}
size += fileStatus.getLen();
}
totalSizeOfWALs
+= size;
return size;
} | 3.26 |
hbase_DumpReplicationQueues_main_rdh | /**
* Main
*/
public static void main(String[]
args) throws Exception {
Configuration conf = HBaseConfiguration.create();
int ret = ToolRunner.run(conf, new DumpReplicationQueues(), args);
System.exit(ret);
} | 3.26 |
hbase_MobStoreEngine_createCompactor_rdh | /**
* Creates the DefaultMobCompactor.
*/@Override
protected void createCompactor(Configuration conf, HStore store) throws IOException {
String className = conf.get(MOB_COMPACTOR_CLASS_KEY, DefaultMobStoreCompactor.class.getName());
try {
compactor = ReflectionUtils.instantiateWithCustomCtor(className, new Class[]{ Configuration.class, HStore.class }, new Object[]{ conf, store });
} catch (RuntimeException e)
{
throw new IOException(("Unable to load configured compactor '" + className) + "'", e);
}
} | 3.26 |
hbase_ConstantSizeRegionSplitPolicy_isExceedSize_rdh | /**
* Returns true if region size exceed the sizeToCheck
*/
protected final boolean isExceedSize(long sizeToCheck) {
if (overallHRegionFiles) {
long sumSize = 0;
for (HStore store : region.getStores()) {
sumSize += store.getSize();
}
if (sumSize > sizeToCheck) {
LOG.debug("Should split because region size is big enough " + "sumSize={}, sizeToCheck={}", StringUtils.humanSize(sumSize), StringUtils.humanSize(sizeToCheck));
return true;
}
} else {
for (HStore store
: region.getStores()) {
long size = store.getSize();
if (size > sizeToCheck) {
LOG.debug("Should split because {} size={}, sizeToCheck={}", store.getColumnFamilyName(), StringUtils.humanSize(size), StringUtils.humanSize(sizeToCheck));
return true;
}
}
}
return false;
} | 3.26 |
hbase_FileSystemUtilizationChore_m0_rdh | /**
* Computes total FileSystem size for the given {@link Region}.
*
* @param r
* The region
* @return The size, in bytes, of the Region.
*/
long m0(Region r) {
long regionSize = 0L;
for (Store store : r.getStores()) {
regionSize += store.getHFilesSize();
}
if (LOG.isTraceEnabled()) {
LOG.trace((("Size of " + r) + " is ") + regionSize);
}
return regionSize;
} | 3.26 |
hbase_FileSystemUtilizationChore_getInitialDelay_rdh | /**
* Extracts the initial delay for the chore from the configuration.
*
* @param conf
* The configuration object.
* @return The configured chore initial delay or the default value.
*/
static long getInitialDelay(Configuration conf) {
return conf.getLong(FS_UTILIZATION_CHORE_DELAY_KEY, FS_UTILIZATION_CHORE_DELAY_DEFAULT);
}
/**
* Extracts the time unit for the chore period and initial delay from the configuration. The
* configuration value for {@link #FS_UTILIZATION_CHORE_TIMEUNIT_KEY} must correspond to a
* {@link TimeUnit} | 3.26 |
hbase_FileSystemUtilizationChore_getPeriod_rdh | /**
* Extracts the period for the chore from the configuration.
*
* @param conf
* The configuration object.
* @return The configured chore period or the default value.
*/
static int getPeriod(Configuration conf) {
return conf.getInt(FS_UTILIZATION_CHORE_PERIOD_KEY,
FS_UTILIZATION_CHORE_PERIOD_DEFAULT);
} | 3.26 |
hbase_FileSystemUtilizationChore_getRegionSizeStore_rdh | // visible for testing
RegionSizeStore getRegionSizeStore() {
return rs.getRegionServerSpaceQuotaManager().getRegionSizeStore();
} | 3.26 |
hbase_FileSystemUtilizationChore_getLeftoverRegions_rdh | /**
* Returns an {@link Iterator} over the Regions which were skipped last invocation of the chore.
*
* @return Regions from the previous invocation to process, or null.
*/
Iterator<Region> getLeftoverRegions() {
return leftoverRegions;
} | 3.26 |
hbase_FileSystemUtilizationChore_setLeftoverRegions_rdh | /**
* Sets a new collection of Regions as leftovers.
*/
void setLeftoverRegions(Iterator<Region> newLeftovers) {
this.leftoverRegions = newLeftovers;
} | 3.26 |
hbase_RegionStateNode_isSplit_rdh | /**
* Return whether the region has been split and not online.
* <p/>
* In this method we will test both region info and state, and will return true if either of the
* test returns true. Please see the comments in
* {@link AssignmentManager#markRegionAsSplit(RegionInfo, ServerName, RegionInfo, RegionInfo)} for
* more details on why we need to test two conditions.
*/
public boolean isSplit() {
return regionInfo.isSplit() || isInState(State.SPLIT);
} | 3.26 |
hbase_RegionStateNode_m0_rdh | /**
* Put region into OFFLINE mode (set state and clear location).
*
* @return Last recorded server deploy
*/public ServerName m0() {setState(State.OFFLINE);
return setRegionLocation(null);
} | 3.26 |
hbase_RegionStateNode_isInState_rdh | /**
* Notice that, we will return true if {@code expected} is empty.
* <p/>
* This is a bit strange but we need this logic, for example, we can change the state to OPENING
* from any state, as in SCP we will not change the state to CLOSED before opening the region.
*/
public boolean isInState(State... expected) {
if (expected.length == 0) {
return true;
}
return getState().matches(expected);
} | 3.26 |
hbase_RegionStateNode_transitionState_rdh | /**
* Set new {@link State} but only if currently in <code>expected</code> State (if not, throw
* {@link UnexpectedStateException}.
*/
public void transitionState(final State update, final
State... expected) throws UnexpectedStateException {
if
(!setState(update, expected)) {
throw new UnexpectedStateException((((("Expected " + Arrays.toString(expected)) + " so could move to ") + update) + " but current state=") + getState());
}
} | 3.26 |
hbase_RegionStateNode_setState_rdh | /**
*
* @param update
* new region state this node should be assigned.
* @param expected
* current state should be in this given list of expected states
* @return true, if current state is in expected list; otherwise false.
*/
public boolean setState(final State update, final State... expected) {
if (!isInState(expected)) {
return false;
}
this.state = update;
this.lastUpdate = EnvironmentEdgeManager.currentTime();
return true;
} | 3.26 |
hbase_CellBuilderFactory_create_rdh | /**
* Create a CellBuilder instance.
*
* @param type
* indicates which memory copy is used in building cell.
* @return An new CellBuilder
*/
public static CellBuilder create(CellBuilderType type) {
switch (type) {
case SHALLOW_COPY :
return new IndividualBytesFieldCellBuilder();
case DEEP_COPY :
return
new KeyValueBuilder();
default :
throw
new UnsupportedOperationException(("The type:" + type)
+ " is unsupported");
}
} | 3.26 |
hbase_RawInteger_decodeInt_rdh | /**
* Read an {@code int} value from the buffer {@code buff}.
*/
public int decodeInt(byte[] buff, int offset) {
return Bytes.toInt(buff, offset);
} | 3.26 |
hbase_RawInteger_encodeInt_rdh | /**
* Write instance {@code val} into buffer {@code buff}.
*/
public int encodeInt(byte[] buff, int offset, int val) {
return Bytes.putInt(buff, offset, val);
} | 3.26 |
hbase_ThreadMonitoring_formatThreadInfo_rdh | /**
* Format the given ThreadInfo object as a String.
*
* @param indent
* a prefix for each line, used for nested indentation
*/
public static String formatThreadInfo(ThreadInfo threadInfo, String indent) { StringBuilder sb = new StringBuilder();
appendThreadInfo(sb, threadInfo, indent);
return sb.toString();
} | 3.26 |
hbase_MurmurHash3_hash_rdh | /**
* Returns the MurmurHash3_x86_32 hash.
*/
@SuppressWarnings("SF")
@Override
public <T>
int hash(HashKey<T> hashKey, int initval) {
final int c1 = 0xcc9e2d51;
final int c2 = 0x1b873593;int length = hashKey.length();
int h1 = initval;
int roundedEnd = length & 0xfffffffc;// round down to 4 byte block
for (int i = 0; i < roundedEnd; i += 4) {
// little endian load order
int k1 = (((hashKey.get(i) & 0xff) | ((hashKey.get(i + 1) & 0xff) << 8)) | ((hashKey.get(i + 2) & 0xff) << 16)) | (hashKey.get(i + 3) << 24);
k1 *= c1;
k1 = (k1 << 15) | (k1 >>> 17);// ROTL32(k1,15);
k1 *= c2;
h1 ^= k1;
h1 = (h1 << 13) | (h1 >>> 19);// ROTL32(h1,13);
h1 = (h1 * 5) + 0xe6546b64;
}
// tail
int k1 = 0;
switch (length & 0x3) {
case 3 :
k1 = (hashKey.get(roundedEnd + 2) & 0xff) << 16;
// FindBugs SF_SWITCH_FALLTHROUGH
case 2
:
k1 |= (hashKey.get(roundedEnd + 1) & 0xff) << 8;
// FindBugs SF_SWITCH_FALLTHROUGH
case 1 :
k1 |= hashKey.get(roundedEnd) & 0xff;
k1 *= c1;
k1 = (k1 << 15) | (k1 >>> 17);// ROTL32(k1,15);
k1 *= c2;
h1 ^= k1;
default :
// fall out
}
// finalization
h1 ^= length;
// fmix(h1);
h1 ^= h1 >>> 16;
h1 *= 0x85ebca6b;
h1 ^= h1 >>> 13;
h1 *= 0xc2b2ae35; h1 ^= h1 >>> 16;
return h1;
} | 3.26 |
hbase_RpcThrottleStorage_switchRpcThrottle_rdh | /**
* Store the rpc throttle value.
*
* @param enable
* Set to <code>true</code> to enable, <code>false</code> to disable.
* @throws IOException
* if an unexpected io exception occurs
*/
public void switchRpcThrottle(boolean enable) throws IOException {
try {byte[] upData = Bytes.toBytes(enable);
ZKUtil.createSetData(zookeeper, rpcThrottleZNode, upData);
} catch (KeeperException
e) {
throw new IOException("Failed to store rpc throttle", e);
}
} | 3.26 |
hbase_AuthUtil_isAuthRenewalChoreEnabled_rdh | /**
* Returns true if the chore to automatically renew Kerberos tickets (from keytabs) should be
* started. The default is true.
*/
static boolean isAuthRenewalChoreEnabled(Configuration conf) {
return conf.getBoolean(HBASE_CLIENT_AUTOMATIC_KEYTAB_RENEWAL_KEY, HBASE_CLIENT_AUTOMATIC_KEYTAB_RENEWAL_DEFAULT);
} | 3.26 |
hbase_AuthUtil_toGroupEntry_rdh | /**
* Returns the group entry with the group prefix for a group principal.
*/
@InterfaceAudience.Private
public static String toGroupEntry(String name) {
return GROUP_PREFIX + name;
} | 3.26 |
hbase_AuthUtil_loginClient_rdh | /**
* For kerberized cluster, return login user (from kinit or from keytab if specified). For
* non-kerberized cluster, return system user.
*
* @param conf
* configuartion file
* @throws IOException
* login exception
*/
@InterfaceAudience.Private
public static User loginClient(Configuration conf) throws IOException {
UserProvider
provider = UserProvider.instantiate(conf);
User user = provider.getCurrent();
boolean securityOn = provider.isHBaseSecurityEnabled() && provider.isHadoopSecurityEnabled();
if (securityOn) {
boolean fromKeytab = provider.shouldLoginFromKeytab();
if (user.getUGI().hasKerberosCredentials()) {
// There's already a login user.
// But we should avoid misuse credentials which is a dangerous security issue,
// so here check whether user specified a keytab and a principal:
// 1. Yes, check if user principal match.
// a. match, just return.
// b. mismatch, login using keytab.
// 2. No, user may login through kinit, this is the old way, also just return.
if (fromKeytab) {
return checkPrincipalMatch(conf, user.getUGI().getUserName()) ? user : loginFromKeytabAndReturnUser(provider);
}
return user;
} else if (fromKeytab) {
// Kerberos is on and client specify a keytab and principal, but client doesn't login yet.
return loginFromKeytabAndReturnUser(provider);
}
}
return user;} | 3.26 |
hbase_AuthUtil_getAuthRenewalChore_rdh | /**
* Checks if security is enabled and if so, launches chore for refreshing kerberos ticket.
*
* @return a ScheduledChore for renewals.
*/
@InterfaceAudience.Private
public static ScheduledChore getAuthRenewalChore(final UserGroupInformation user, Configuration conf) {
if ((!user.hasKerberosCredentials()) || (!isAuthRenewalChoreEnabled(conf))) {
return null;
}
Stoppable stoppable = createDummyStoppable();
// if you're in debug mode this is useful to avoid getting spammed by the getTGT()
// you can increase this, keeping in mind that the default refresh window is 0.8
// e.g. 5min tgt * 0.8 = 4min refresh so interval is better be way less than 1min
final int CHECK_TGT_INTERVAL = 30 * 1000;// 30sec
return new ScheduledChore("RefreshCredentials", stoppable, CHECK_TGT_INTERVAL) {
@Override
protected void chore() {
try {
user.checkTGTAndReloginFromKeytab();
} catch (IOException e) {
LOG.error("Got exception while trying to refresh credentials: " + e.getMessage(), e);
}
}
};
}
/**
* Checks if security is enabled and if so, launches chore for refreshing kerberos ticket.
*
* @param conf
* the hbase service configuration
* @return a ScheduledChore for renewals, if needed, and null otherwise.
* @deprecated Deprecated since 2.2.0, this method will be
{@link org.apache.yetus.audience.InterfaceAudience.Private} | 3.26 |
hbase_AuthUtil_getGroupName_rdh | /**
* Returns the actual name for a group principal (stripped of the group prefix).
*/
@InterfaceAudience.Private
public static String getGroupName(String aclKey) {
if (!isGroupPrincipal(aclKey)) {
return aclKey;
}
return aclKey.substring(GROUP_PREFIX.length());
} | 3.26 |
hbase_AuthUtil_loginClientAsService_rdh | /**
* For kerberized cluster, return login user (from kinit or from keytab). Principal should be the
* following format: name/fully.qualified.domain.name@REALM. For non-kerberized cluster, return
* system user.
* <p>
* NOT recommend to use to method unless you're sure what you're doing, it is for canary only.
* Please use User#loginClient.
*
* @param conf
* configuration file
* @throws IOException
* login exception
*/
private static User loginClientAsService(Configuration conf) throws IOException {
UserProvider provider = UserProvider.instantiate(conf);
if (provider.isHBaseSecurityEnabled() &&
provider.isHadoopSecurityEnabled()) {
try {
if (provider.shouldLoginFromKeytab()) {
String host = Strings.domainNamePointerToHostName(DNS.getDefaultHost(conf.get("hbase.client.dns.interface", "default"), conf.get("hbase.client.dns.nameserver", "default")));
provider.login(HBASE_CLIENT_KEYTAB_FILE, f0, host);
}
} catch (UnknownHostException e) {
LOG.error("Error resolving host name: " + e.getMessage(), e);
throw e;
} catch (IOException e) {
LOG.error("Error while trying to perform the initial login: " + e.getMessage(), e);
throw e;
}
}
return provider.getCurrent();
} | 3.26 |
hbase_AuthUtil_isGroupPrincipal_rdh | /**
* Returns whether or not the given name should be interpreted as a group principal. Currently
* this simply checks if the name starts with the special group prefix character ("@").
*/
@InterfaceAudience.Private
public static boolean isGroupPrincipal(String name) {
return (name != null) && name.startsWith(GROUP_PREFIX);
} | 3.26 |
hbase_RawBytesTerminated_encode_rdh | /**
* Write {@code val} into {@code dst}, respecting {@code offset} and {@code length}.
*
* @return number of bytes written.
*/
public int encode(PositionedByteRange dst, byte[] val, int voff, int vlen) {
return ((RawBytes) (wrapped)).encode(dst, val, voff, vlen);
} | 3.26 |
hbase_RawBytesTerminated_decode_rdh | /**
* Read a {@code byte[]} from the buffer {@code src}.
*/
public byte[] decode(PositionedByteRange src, int length) {
return ((RawBytes) (wrapped)).decode(src, length);
} | 3.26 |
hbase_ReplicationPeerConfigUtil_m2_rdh | /**
* Helper method to add/removev base peer configs from Configuration to ReplicationPeerConfig This
* merges the user supplied peer configuration
* {@link org.apache.hadoop.hbase.replication.ReplicationPeerConfig} with peer configs provided as
* property hbase.replication.peer.base.configs in hbase configuration. Expected format for this
* hbase configuration is "k1=v1;k2=v2,v2_1;k3=""". If value is empty, it will remove the existing
* key-value from peer config.
*
* @param conf
* Configuration
* @return ReplicationPeerConfig containing updated configs.
*/
public static ReplicationPeerConfig m2(Configuration conf, ReplicationPeerConfig receivedPeerConfig) {
ReplicationPeerConfigBuilder copiedPeerConfigBuilder = ReplicationPeerConfig.newBuilder(receivedPeerConfig);
Map<String, String> receivedPeerConfigMap = receivedPeerConfig.getConfiguration();
String basePeerConfigs = conf.get(HBASE_REPLICATION_PEER_BASE_CONFIG, "");
if (basePeerConfigs.length() != 0) {
Map<String, String> basePeerConfigMap = Splitter.on(';').trimResults().omitEmptyStrings().withKeyValueSeparator("=").split(basePeerConfigs); for (Map.Entry<String, String> entry : basePeerConfigMap.entrySet()) {
String configName = entry.getKey();
String configValue = entry.getValue();
// If the config is provided with empty value, for eg. k1="",
// we remove it from peer config. Providing config with empty value
// is required so that it doesn't remove any other config unknowingly.
if (Strings.isNullOrEmpty(configValue)) {
copiedPeerConfigBuilder.removeConfiguration(configName);
} else if (!receivedPeerConfigMap.getOrDefault(configName, "").equals(configValue)) {
// update the configuration if exact config and value doesn't exists
copiedPeerConfigBuilder.putConfiguration(configName, configValue);
}
}
}
return copiedPeerConfigBuilder.build();
} | 3.26 |
hbase_ReplicationPeerConfigUtil_parsePeerFrom_rdh | /**
* Parse the serialized representation of a peer configuration.
*
* @param bytes
* Content of a peer znode.
* @return ClusterKey parsed from the passed bytes.
* @throws DeserializationException
* deserialization exception
*/
public static ReplicationPeerConfig parsePeerFrom(final byte[] bytes) throws DeserializationException {
if (ProtobufUtil.isPBMagicPrefix(bytes)) {
int pbLen = ProtobufUtil.lengthOfPBMagic();
ReplicationProtos.ReplicationPeer.Builder builder = ReplicationProtos.ReplicationPeer.newBuilder();
ReplicationProtos.ReplicationPeer peer;
try {
ProtobufUtil.mergeFrom(builder, bytes, pbLen, bytes.length - pbLen);
peer = builder.build();
} catch (IOException e) {
throw new DeserializationException(e);
}
return convert(peer);
} else {
if ((bytes == null) || (bytes.length <=
0)) {
throw new DeserializationException("Bytes to deserialize should not be empty.");
}
return ReplicationPeerConfig.newBuilder().setClusterKey(Bytes.toString(bytes)).build();
}
} | 3.26 |
hbase_ReplicationPeerConfigUtil_convert_rdh | /**
* Convert TableCFs Object to String. Output String Format:
* ns1.table1:cf1,cf2;ns2.table2:cfA,cfB;table3
*/
public static String convert(ReplicationProtos[] tableCFs) {
StringBuilder sb = new StringBuilder();
for (int i = 0, n = tableCFs.length; i < n; i++) {
ReplicationProtos.TableCF tableCF = tableCFs[i];
String namespace = tableCF.getTableName().getNamespace().toStringUtf8();
if (StringUtils.isNotEmpty(namespace)) {
sb.append(namespace).append(".").append(tableCF.getTableName().getQualifier().toStringUtf8()).append(":");
} else {
sb.append(tableCF.getTableName().toString()).append(":");
}
for (int j = 0; j < tableCF.getFamiliesCount(); j++) {sb.append(tableCF.getFamilies(j).toStringUtf8()).append(",");
}
sb.deleteCharAt(sb.length() - 1).append(";");
}
if (sb.length() > 0) {
sb.deleteCharAt(sb.length() - 1);
}
return sb.toString();
} | 3.26 |
hbase_ReplicationPeerConfigUtil_convert2Map_rdh | /**
* Convert tableCFs Object to Map.
*/
public static Map<TableName, List<String>> convert2Map(ReplicationProtos[] tableCFs) {
if ((tableCFs == null) || (tableCFs.length == 0)) {
return null;
}
Map<TableName, List<String>> tableCFsMap = new HashMap<>();
for (int i = 0, n = tableCFs.length;
i < n; i++) {
ReplicationProtos.TableCF tableCF = tableCFs[i];
List<String> families = new ArrayList<>();
for (int j = 0, m = tableCF.getFamiliesCount(); j < m; j++) {
families.add(tableCF.getFamilies(j).toStringUtf8());
}
if (families.size() > 0) {
tableCFsMap.put(ProtobufUtil.toTableName(tableCF.getTableName()), families);
} else {
tableCFsMap.put(ProtobufUtil.toTableName(tableCF.getTableName()), null);
}
}
return tableCFsMap;
} | 3.26 |
hbase_ReplicationPeerConfigUtil_parseTableCFs_rdh | /**
* Parse bytes into TableCFs. It is used for backward compatibility. Old format bytes have no
* PB_MAGIC Header
*/
public static TableCF[] parseTableCFs(byte[] bytes) throws IOException {
if (bytes == null) {
return
null;
}
return ReplicationPeerConfigUtil.convert(Bytes.toString(bytes));
} | 3.26 |
hbase_ReplicationPeerConfigUtil_getPeerClusterConfiguration_rdh | /**
* Returns the configuration needed to talk to the remote slave cluster.
*
* @param conf
* the base configuration
* @param peer
* the description of replication peer
* @return the configuration for the peer cluster, null if it was unable to get the configuration
* @throws IOException
* when create peer cluster configuration failed
*/public static Configuration getPeerClusterConfiguration(Configuration conf, ReplicationPeerDescription peer) throws IOException {
ReplicationPeerConfig peerConfig = peer.getPeerConfig();
Configuration otherConf;
try {
otherConf = HBaseConfiguration.createClusterConf(conf, peerConfig.getClusterKey());
} catch (IOException e) {
throw new IOException("Can't get peer configuration for peerId=" + peer.getPeerId(), e);
}
if (!peerConfig.getConfiguration().isEmpty()) {
CompoundConfiguration compound = new CompoundConfiguration();
compound.add(otherConf);
compound.addStringMap(peerConfig.getConfiguration());
return compound;
}
return otherConf;
} | 3.26 |
hbase_ReplicationPeerConfigUtil_m1_rdh | /**
* Convert tableCFs string into Map.
*/
public static Map<TableName, List<String>> m1(String tableCFsConfig) {
ReplicationProtos[] tableCFs = convert(tableCFsConfig);
return convert2Map(tableCFs);
} | 3.26 |
hbase_ReplicationPeerConfigUtil_m0_rdh | /**
* Get TableCF in TableCFs, if not exist, return null.
*/
public static TableCF m0(ReplicationProtos[] tableCFs, String table) {
for (int i
= 0, n = tableCFs.length; i < n; i++) {
ReplicationProtos.TableCF tableCF = tableCFs[i];
if
(tableCF.getTableName().getQualifier().toStringUtf8().equals(table)) {
return tableCF;
}
}return null;
} | 3.26 |
hbase_ReplicationPeerConfigUtil_toByteArray_rdh | /**
* Returns Serialized protobuf of <code>peerConfig</code> with pb magic prefix prepended suitable
* for use as content of a this.peersZNode; i.e. the content of PEER_ID znode under
* /hbase/replication/peers/PEER_ID
*/
public static byte[] toByteArray(final ReplicationPeerConfig peerConfig) {
byte[] bytes = convert(peerConfig).toByteArray();
return ProtobufUtil.prependPBMagic(bytes);
} | 3.26 |
hbase_MiniHBaseCluster_stopRegionServer_rdh | /**
* Shut down the specified region server cleanly
*
* @param serverNumber
* Used as index into a list.
* @param shutdownFS
* True is we are to shutdown the filesystem as part of this regionserver's
* shutdown. Usually we do but you do not want to do this if you are running
* multiple regionservers in a test and you shut down one before end of the
* test.
* @return the region server that was stopped
*/
public RegionServerThread stopRegionServer(int serverNumber, final boolean shutdownFS) {
JVMClusterUtil.RegionServerThread server = f0.getRegionServers().get(serverNumber);
LOG.info("Stopping " + server.toString());
server.getRegionServer().stop("Stopping rs " + serverNumber);
return server;
} | 3.26 |
hbase_MiniHBaseCluster_compact_rdh | /**
* Call flushCache on all regions of the specified table.
*/
public void compact(TableName tableName,
boolean major) throws IOException {
for (JVMClusterUtil.RegionServerThread t : this.f0.getRegionServers()) {
for (HRegion r : t.getRegionServer().getOnlineRegionsLocalContext()) {
if (r.getTableDescriptor().getTableName().equals(tableName)) {
if (RegionReplicaUtil.isDefaultReplica(r.getRegionInfo())) {
r.compact(major);
}
}
}
}
} | 3.26 |
hbase_MiniHBaseCluster_flushcache_rdh | /**
* Call flushCache on all regions of the specified table.
*/
public void flushcache(TableName tableName) throws IOException {
for (JVMClusterUtil.RegionServerThread t : this.f0.getRegionServers())
{
for (HRegion r : t.getRegionServer().getOnlineRegionsLocalContext()) {
if (r.getTableDescriptor().getTableName().equals(tableName)) {
executeFlush(r);
}
}
}
} | 3.26 |
hbase_MiniHBaseCluster_killAll_rdh | /**
* Do a simulated kill all masters and regionservers. Useful when it is impossible to bring the
* mini-cluster back for clean shutdown.
*/
public void killAll() {
// Do backups first.
MasterThread activeMaster = null;
for (MasterThread masterThread : getMasterThreads()) {
if (!masterThread.getMaster().isActiveMaster()) {
masterThread.getMaster().abort("killAll");
} else {
activeMaster = masterThread;
}
}
// Do active after.
if (activeMaster != null) {
activeMaster.getMaster().abort("killAll");
}
for (RegionServerThread rst : getRegionServerThreads()) {
rst.getRegionServer().abort("killAll");
}
} | 3.26 |
hbase_MiniHBaseCluster_waitForActiveAndReadyMaster_rdh | /**
* Blocks until there is an active master and that master has completed initialization.
*
* @return true if an active master becomes available. false if there are no masters left.
*/
@Override
public boolean waitForActiveAndReadyMaster(long timeout) throws IOException {
List<JVMClusterUtil.MasterThread> mts;
long start = EnvironmentEdgeManager.currentTime();
while ((!(mts = getMasterThreads()).isEmpty()) && ((EnvironmentEdgeManager.currentTime() - start) < timeout)) {
for (JVMClusterUtil.MasterThread mt : mts) {
if
(mt.getMaster().isActiveMaster() && mt.getMaster().isInitialized()) {
return true;
}
}
Threads.sleep(100);
}
return false;
} | 3.26 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.