name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hbase_ByteBuffAllocator_createOnHeap_rdh | /**
* Initialize an {@link ByteBuffAllocator} which only allocate ByteBuffer from on-heap, it's
* designed for testing purpose or disabled reservoir case.
*
* @return allocator to allocate on-heap ByteBuffer.
*/
private static ByteBuffAllocator createOnHeap() {
return new ByteBuffAllocator(false, 0, DEFAULT_BUFFER_SIZE, Integer.MAX_VALUE);
} | 3.26 |
hbase_ByteBuffAllocator_allocate_rdh | /**
* Allocate size bytes from the ByteBufAllocator, Note to call the {@link ByteBuff#release()} if
* no need any more, otherwise the memory leak happen in NIO ByteBuffer pool.
*
* @param size
* to allocate
* @return an ByteBuff with the desired size.
*/
public ByteBuff allocate(int size) {
if (size < 0)
{
throw new IllegalArgumentException("size to allocate should >=0");
} // If disabled the reservoir, just allocate it from on-heap.
if ((!isReservoirEnabled()) || (size == 0)) {
return
ByteBuff.wrap(allocateOnHeap(size));
}
int reminder =
size % bufSize;
int len = (size / bufSize) + (reminder > 0 ? 1 : 0);
List<ByteBuffer> bbs = new ArrayList<>(len);
// Allocate from ByteBufferPool until the remaining is less than minSizeForReservoirUse or
// reservoir is exhausted.
int v16 = size;
while (v16 >= minSizeForReservoirUse) {
ByteBuffer bb = this.getBuffer();
if (bb == null) {
break;
}
bbs.add(bb);
v16 -= bufSize;}
int lenFromReservoir = bbs.size();
if (v16 > 0) {
// If the last ByteBuffer is too small or the reservoir can not provide more ByteBuffers, we
// just allocate the ByteBuffer from on-heap.
bbs.add(allocateOnHeap(v16));
}
ByteBuff bb;
// we only need a recycler if we successfully pulled from the pool
// this matters for determining whether to add leak detection in RefCnt
if (lenFromReservoir == 0) {
bb = ByteBuff.wrap(bbs);
} else {
bb = ByteBuff.wrap(bbs, () -> {
for (int i = 0; i < lenFromReservoir; i++) {
this.putbackBuffer(bbs.get(i));
}
}); }
bb.limit(size);
return bb;
} | 3.26 |
hbase_ByteBuffAllocator_allocateOneBuffer_rdh | /**
* Allocate an buffer with buffer size from ByteBuffAllocator, Note to call the
* {@link ByteBuff#release()} if no need any more, otherwise the memory leak happen in NIO
* ByteBuffer pool.
*
* @return an ByteBuff with the buffer size.
*/public SingleByteBuff allocateOneBuffer() {if (isReservoirEnabled()) {
ByteBuffer bb = getBuffer();
if (bb != null) {
return new SingleByteBuff(() -> putbackBuffer(bb), bb);
}
}
// Allocated from heap, let the JVM free its memory.
return ((SingleByteBuff) (ByteBuff.wrap(allocateOnHeap(bufSize))));} | 3.26 |
hbase_ByteBuffAllocator_clean_rdh | /**
* Free all direct buffers if allocated, mainly used for testing.
*/
public void clean() {
while (!buffers.isEmpty()) {
ByteBuffer v21 = buffers.poll();
if (v21.isDirect()) {
UnsafeAccess.freeDirectBuffer(v21);
}
}
this.usedBufCount.set(0);
this.maxPoolSizeInfoLevelLogged = false;
this.poolAllocationBytes.reset();this.heapAllocationBytes.reset();
this.lastPoolAllocationBytes = 0;
this.lastHeapAllocationBytes = 0;
} | 3.26 |
hbase_CoprocessorRpcUtils_setControllerException_rdh | /**
* Stores an exception encountered during RPC invocation so it can be passed back through to the
* client.
*
* @param controller
* the controller instance provided by the client when calling the service
* @param ioe
* the exception encountered
*/
public static void setControllerException(RpcController controller, IOException ioe) {
if (controller == null) {
return;
}
if
(controller instanceof ServerRpcController) {
((ServerRpcController) (controller)).setFailedOn(ioe);
} else {
controller.setFailed(StringUtils.stringifyException(ioe));
}
} | 3.26 |
hbase_CoprocessorRpcUtils_run_rdh | /**
* Called on completion of the RPC call with the response object, or {@code null} in the case of
* an error.
*
* @param parameter
* the response object or {@code null} if an error occurred
*/
@Override
public void run(R parameter) {
synchronized(this) {
result = parameter;
resultSet = true;
this.notifyAll();
}
} | 3.26 |
hbase_CoprocessorRpcUtils_get_rdh | /**
* Returns the parameter passed to {@link #run(Object)} or {@code null} if a null value was
* passed. When used asynchronously, this method will block until the {@link #run(Object)}
* method has been called.
*
* @return the response object or {@code null} if no response was passed
*/public synchronized R get() throws IOException {
while (!resultSet) {
try {
this.wait();
} catch (InterruptedException ie) {
InterruptedIOException
exception = new
InterruptedIOException(ie.getMessage());
exception.initCause(ie);
throw exception;
}
}
return result;
} | 3.26 |
hbase_CoprocessorRpcUtils_getServiceName_rdh | /**
* Returns the name to use for coprocessor service calls. For core HBase services (in the hbase.pb
* protobuf package), this returns the unqualified name in order to provide backward compatibility
* across the package name change. For all other services, the fully-qualified service name is
* used.
*/
public static String getServiceName(Descriptors.ServiceDescriptor service) {
if (service.getFullName().startsWith(hbaseServicePackage)) {
return service.getName();
}
return service.getFullName();
} | 3.26 |
hbase_MasterMaintenanceModeTracker_start_rdh | /**
* Starts the tracking of whether master is in Maintenance Mode.
*/
public void start() {watcher.registerListener(this);
update();
} | 3.26 |
hbase_IPCUtil_wrapException_rdh | /**
* Takes an Exception, the address, and if pertinent, the RegionInfo for the Region we were trying
* to connect to and returns an IOException with the input exception as the cause. The new
* exception provides the stack trace of the place where the exception is thrown and some extra
* diagnostics information.
* <p/>
* Notice that we will try our best to keep the original exception type when creating a new
* exception, especially for the 'connection' exceptions, as it is used to determine whether this
* is a network issue or the remote side tells us clearly what is wrong, which is important
* deciding whether to retry. If it is not possible to create a new exception with the same type,
* for example, the {@code error} is not an {@link IOException}, an {@link IOException} will be
* created.
*
* @param addr
* target address
* @param error
* the relevant exception
* @return an exception to throw
* @see ClientExceptionsUtil#isConnectionException(Throwable)
*/
static IOException wrapException(Address addr, RegionInfo regionInfo, Throwable error) {
if (error instanceof ConnectException) {
// connection refused; include the host:port in the error
return ((IOException) (new
ConnectException((("Call to " + getCallTarget(addr, regionInfo)) + " failed on connection exception: ") + error).initCause(error)));
} else
if (error instanceof SocketTimeoutException) {
return ((IOException) (new SocketTimeoutException((("Call to " + getCallTarget(addr, regionInfo)) +
" failed because ") + error).initCause(error)));
} else
if (error instanceof ConnectionClosingException) {
return new ConnectionClosingException((("Call to " + getCallTarget(addr, regionInfo)) + " failed on local exception: ") + error, error);
} else if (error instanceof ServerTooBusyException) {
// we already have address in the exception message
return ((IOException) (error));
} else if (error instanceof DoNotRetryIOException) {
// try our best to keep the original exception type
try {
return ((IOException) (error.getClass().asSubclass(DoNotRetryIOException.class).getConstructor(String.class).newInstance((("Call to " + getCallTarget(addr, regionInfo)) + " failed on local exception: ") + error).initCause(error)));
} catch (InstantiationException | IllegalAccessException | IllegalArgumentException | InvocationTargetException | NoSuchMethodException | SecurityException e) {
// just ignore, will just new a DoNotRetryIOException instead below
}
return new DoNotRetryIOException((("Call to " + getCallTarget(addr, regionInfo)) + " failed on local exception: ") + error, error);
} else
if (error instanceof ConnectionClosedException) {
return new ConnectionClosedException((("Call to " + getCallTarget(addr, regionInfo)) + " failed on local exception: ") + error, error);
} else if
(error instanceof CallTimeoutException) {
return new CallTimeoutException((("Call to " + getCallTarget(addr, regionInfo)) + " failed on local exception: ") + error, error);
} else if (error instanceof ClosedChannelException) {
// ClosedChannelException does not have a constructor which takes a String but it is a
// connection exception so we keep its original type
return ((IOException) (error));
} else if (error instanceof TimeoutException) {
// TimeoutException is not an IOException, let's convert it to TimeoutIOException.
return new TimeoutIOException((("Call to " + getCallTarget(addr, regionInfo)) + " failed on local exception: ") + error, error);
} else {
// try our best to keep the original exception type
if (error instanceof IOException) {
try {
return ((IOException) (error.getClass().asSubclass(IOException.class).getConstructor(String.class).newInstance((("Call to " + getCallTarget(addr, regionInfo)) + " failed on local exception: ") + error).initCause(error)));
} catch (InstantiationException | IllegalAccessException | IllegalArgumentException | InvocationTargetException
| NoSuchMethodException | SecurityException e) {
// just ignore, will just new an IOException instead below
}
}
return new HBaseIOException((("Call to " + getCallTarget(addr, regionInfo)) + " failed on local exception: ") + error, error);
}
} | 3.26 |
hbase_IPCUtil_getTotalSizeWhenWrittenDelimited_rdh | /**
* Returns Size on the wire when the two messages are written with writeDelimitedTo
*/
public static int getTotalSizeWhenWrittenDelimited(Message... messages) {
int totalSize = 0;
for (Message m : messages) {
if (m == null) {
continue;
}
totalSize += m.getSerializedSize();
totalSize += CodedOutputStream.computeUInt32SizeNoTag(m.getSerializedSize());
}
Preconditions.checkArgument(totalSize < Integer.MAX_VALUE);
return totalSize;
} | 3.26 |
hbase_IPCUtil_createRemoteException_rdh | /**
*
* @param e
* exception to be wrapped
* @return RemoteException made from passed <code>e</code>
*/
static RemoteException createRemoteException(final ExceptionResponse e) {
String innerExceptionClassName = e.getExceptionClassName();
boolean doNotRetry = e.getDoNotRetry();
boolean serverOverloaded = e.hasServerOverloaded() && e.getServerOverloaded();
return e.hasHostname() ? // If a hostname then add it to the RemoteWithExtrasException
new RemoteWithExtrasException(innerExceptionClassName, e.getStackTrace(), e.getHostname(), e.getPort(), doNotRetry, serverOverloaded) : new RemoteWithExtrasException(innerExceptionClassName, e.getStackTrace(), doNotRetry, serverOverloaded);
} | 3.26 |
hbase_IPCUtil_write_rdh | /**
* Write out header, param, and cell block if there is one.
*
* @param dos
* Stream to write into
* @param header
* to write
* @param param
* to write
* @param cellBlock
* to write
* @return Total number of bytes written.
* @throws IOException
* if write action fails
*/
public static int write(final OutputStream dos, final Message header, final Message param, final ByteBuf cellBlock) throws IOException {
// Must calculate total size and write that first so other side can read it all in in one
// swoop. This is dictated by how the server is currently written. Server needs to change
// if we are to be able to write without the length prefixing.
int totalSize = IPCUtil.getTotalSizeWhenWrittenDelimited(header, param);
if (cellBlock != null) {
totalSize += cellBlock.readableBytes();
}
return write(dos, header, param, cellBlock, totalSize);
} | 3.26 |
hbase_ExplicitColumnTracker_reset_rdh | // Called between every row.
@Override
public void reset() {
this.index = 0;
this.f0 = this.columns[this.index];
for (ColumnCount col : this.columns) {
col.setCount(0);
}
resetTS();
} | 3.26 |
hbase_ExplicitColumnTracker_checkColumn_rdh | /**
* {@inheritDoc }
*/
@Overridepublic MatchCode checkColumn(Cell cell, byte type) {
// delete markers should never be passed to an
// *Explicit*ColumnTracker
assert !PrivateCellUtil.isDelete(type);
do {
// No more columns left, we are done with this query
if (done()) {
return MatchCode.SEEK_NEXT_ROW;// done_row
}
// No more columns to match against, done with storefile
if (this.f0 == null) {
return MatchCode.SEEK_NEXT_ROW;// done_row
}
// Compare specific column to current column
int ret = CellUtil.compareQualifiers(cell, f0.getBuffer(), f0.getOffset(), f0.getLength());
// Column Matches. Return include code. The caller would call checkVersions
// to limit the number of versions.
if (ret == 0) {
return MatchCode.INCLUDE;
}
resetTS();
if (ret < 0) {
// The current KV is smaller than the column the ExplicitColumnTracker
// is interested in, so seek to that column of interest.
return MatchCode.SEEK_NEXT_COL;
}
// The current KV is bigger than the column the ExplicitColumnTracker
// is interested in. That means there is no more data for the column
// of interest. Advance the ExplicitColumnTracker state to next
// column of interest, and check again.
if (ret > 0) {
++this.index;
if (done()) {
// No more to match, do not include, done with this row.
return MatchCode.SEEK_NEXT_ROW;// done_row
}
// This is the recursive case.
this.f0 = this.columns[this.index];
}
} while (true );
} | 3.26 |
hbase_SnapshotSegmentScanner_getScannerOrder_rdh | /**
*
* @see KeyValueScanner#getScannerOrder()
*/
@Override
public long getScannerOrder() {
return 0;
} | 3.26 |
hbase_AsyncTableBuilder_setMaxRetries_rdh | /**
* Set the max retry times for an operation. Usually it is the max attempt times minus 1.
* <p>
* Operation timeout and max attempt times(or max retry times) are both limitations for retrying,
* we will stop retrying when we reach any of the limitations.
*
* @see #setMaxAttempts(int)
* @see #setOperationTimeout(long, TimeUnit)
*/
default AsyncTableBuilder<C> setMaxRetries(int maxRetries) {
return setMaxAttempts(retries2Attempts(maxRetries));
} | 3.26 |
hbase_BlockingRpcClient_createConnection_rdh | /**
* Creates a connection. Can be overridden by a subclass for testing.
*
* @param remoteId
* - the ConnectionId to use for the connection creation.
*/
@Override
protected BlockingRpcConnection createConnection(ConnectionId remoteId) throws IOException {
return new BlockingRpcConnection(this, remoteId);
} | 3.26 |
hbase_HbckRegionInfo_loadHdfsRegioninfo_rdh | /**
* Read the .regioninfo file from the file system. If there is no .regioninfo, add it to the
* orphan hdfs region list.
*/
public void loadHdfsRegioninfo(Configuration conf) throws IOException {
Path regionDir = getHdfsRegionDir();
if (regionDir == null) {
if (m0() == RegionInfo.DEFAULT_REPLICA_ID) {
// Log warning only for default/ primary replica with no region dir
LOG.warn((("No HDFS region dir found: " + this) + " meta=") + metaEntry);
}
return;
}
if (hdfsEntry.hri != null) {
// already loaded data
return;
}
FileSystem fs = FileSystem.get(conf);
RegionInfo hri = HRegionFileSystem.loadRegionInfoFileContent(fs, regionDir);
LOG.debug("RegionInfo read: " + hri.toString());hdfsEntry.hri = hri;
} | 3.26 |
hbase_RecoverLeaseFSUtils_getLogMessageDetail_rdh | /**
* Returns Detail to append to any log message around lease recovering.
*/
private static String getLogMessageDetail(final int nbAttempt, final Path p, final long startWaiting) {
return ((((("attempt=" + nbAttempt) + " on file=") + p) + " after ") + (EnvironmentEdgeManager.currentTime() - startWaiting)) + "ms";
} | 3.26 |
hbase_RecoverLeaseFSUtils_recoverFileLease_rdh | /**
* Recover the lease from HDFS, retrying multiple times.
*/
public static void recoverFileLease(FileSystem fs, Path p, Configuration conf, CancelableProgressable reporter) throws IOException {
if (fs instanceof FilterFileSystem)
{
fs =
((FilterFileSystem) (fs)).getRawFileSystem();
}
// lease recovery not needed for local file system case.
if (!(fs instanceof DistributedFileSystem)) {
return;
}
recoverDFSFileLease(((DistributedFileSystem) (fs)), p, conf, reporter);
} | 3.26 |
hbase_RecoverLeaseFSUtils_isFileClosed_rdh | /**
* Call HDFS-4525 isFileClosed if it is available.
*
* @return True if file is closed.
*/
private static boolean isFileClosed(final DistributedFileSystem dfs, final Method m, final Path p) {
try {
return ((Boolean) (m.invoke(dfs, p)));
} catch (SecurityException e) {
LOG.warn("No access", e);
} catch (Exception e) {
LOG.warn("Failed invocation for " + p.toString(), e);
}
return false;
} | 3.26 |
hbase_RecoverLeaseFSUtils_recoverLease_rdh | /**
* Try to recover the lease.
*
* @return True if dfs#recoverLease came by true.
*/
private static boolean recoverLease(final DistributedFileSystem dfs, final int nbAttempt, final Path p, final long startWaiting) throws FileNotFoundException {
boolean v10 = false;
try {
v10 = dfs.recoverLease(p);
LOG.info((v10 ? "Recovered lease, " : "Failed to recover lease, ") + getLogMessageDetail(nbAttempt, p, startWaiting));
} catch (IOException e) {
if ((e instanceof LeaseExpiredException) && e.getMessage().contains("File does not exist")) {
// This exception comes out instead of FNFE, fix it
throw new FileNotFoundException("The given WAL wasn't found at " + p);
} else if (e
instanceof FileNotFoundException)
{
throw ((FileNotFoundException) (e));
}
LOG.warn(getLogMessageDetail(nbAttempt, p, startWaiting), e);
}
return v10;
} | 3.26 |
hbase_IdentityTableMap_m1_rdh | /**
* Pass the key, value to reduce
*/
public void m1(ImmutableBytesWritable key,
Result value, OutputCollector<ImmutableBytesWritable, Result> output,
Reporter reporter) throws IOException {
// convert
output.collect(key, value);
} | 3.26 |
hbase_IdentityTableMap_m0_rdh | /**
* Use this before submitting a TableMap job. It will appropriately set up the JobConf.
*
* @param table
* table name
* @param columns
* columns to scan
* @param mapper
* mapper class
* @param job
* job configuration
*/
@SuppressWarnings("unchecked")
public static void m0(String table, String columns, Class<? extends TableMap> mapper, JobConf job) {
TableMapReduceUtil.initTableMapJob(table, columns, mapper, ImmutableBytesWritable.class, Result.class, job);
} | 3.26 |
hbase_Classes_extendedForName_rdh | /**
* Utilities for class manipulation.
*/
@InterfaceAudience.Privatepublic class Classes {
/**
* Equivalent of {@link Class#forName(String)} which also returns classes for primitives like
* <code>boolean</code>, etc. The name of the class to retrieve. Can be either a normal class or a
* primitive class.
*
* @return The class specified by <code>className</code> If the requested class can not be found.
*/
public static Class<?> extendedForName(String className) throws ClassNotFoundException {
Class<?> valueType;
if (className.equals("boolean")) {
valueType = boolean.class;
} else if (className.equals("byte")) {
valueType = byte.class;
} else if (className.equals("short")) {
valueType = short.class;
} else if (className.equals("int")) {
valueType = int.class;
} else if (className.equals("long")) {
valueType = long.class;
} else if (className.equals("float")) {
valueType = float.class;
} else if (className.equals("double")) {
valueType = double.class;
} else if (className.equals("char")) {
valueType = char.class;
} else {
valueType = Class.forName(className);
}
return
valueType;
} | 3.26 |
hbase_NamespaceQuotaSnapshotStore_getQuotaForNamespace_rdh | /**
* Fetches the namespace quota. Visible for mocking/testing.
*/
Quotas getQuotaForNamespace(String namespace) throws IOException
{
return QuotaTableUtil.getNamespaceQuota(conn, namespace);
} | 3.26 |
hbase_BackupManifest_addDependentImage_rdh | /**
* Add dependent backup image for this backup.
*
* @param image
* The direct dependent backup image
*/
public void addDependentImage(BackupImage image) {
this.backupImage.addAncestor(image);
} | 3.26 |
hbase_BackupManifest_getTableList_rdh | /**
* Get the table set of this image.
*
* @return The table set list
*/
public List<TableName> getTableList() {
return
backupImage.getTableNames();
} | 3.26 |
hbase_BackupManifest_setIncrTimestampMap_rdh | /**
* Set the incremental timestamp map directly.
*
* @param incrTimestampMap
* timestamp map
*/
public void setIncrTimestampMap(Map<TableName, Map<String, Long>> incrTimestampMap) {
this.backupImage.setIncrTimeRanges(incrTimestampMap);
} | 3.26 |
hbase_BackupManifest_getBackupImage_rdh | /**
* Get this backup image.
*
* @return the backup image.
*/
public BackupImage getBackupImage() {
return backupImage;
} | 3.26 |
hbase_BackupManifest_store_rdh | /**
* TODO: fix it. Persist the manifest file.
*
* @throws BackupException
* if an error occurred while storing the manifest file.
*/
public void store(Configuration conf) throws BackupException {
byte[] data = backupImage.toProto().toByteArray();
// write the file, overwrite if already exist
Path manifestFilePath = new Path(HBackupFileSystem.getBackupPath(backupImage.getRootDir(), backupImage.getBackupId()), MANIFEST_FILE_NAME);
try (FSDataOutputStream out = manifestFilePath.getFileSystem(conf).create(manifestFilePath, true)) {
out.write(data);
} catch (IOException e) {
throw new BackupException(e.getMessage());
}
LOG.info("Manifest file stored to " + manifestFilePath);
} | 3.26 |
hbase_BackupManifest_getDependentListByTable_rdh | /**
* Get the dependent image list for a specific table of this backup in time order from old to new
* if want to restore to this backup image level.
*
* @param table
* table
* @return the backup image list for a table in time order
*/
public ArrayList<BackupImage> getDependentListByTable(TableName table) {
ArrayList<BackupImage> tableImageList = new ArrayList<>();
ArrayList<BackupImage> imageList = getRestoreDependentList(true);
for (BackupImage image : imageList) {
if (image.hasTable(table)) {
tableImageList.add(image);
if (image.getType()
== BackupType.FULL) {
break;
}
}
}
Collections.reverse(tableImageList);
return tableImageList;
} | 3.26 |
hbase_BackupManifest_canCoverImage_rdh | /**
* Check whether backup image set could cover a backup image or not.
*
* @param fullImages
* The backup image set
* @param image
* The target backup image
* @return true if fullImages can cover image, otherwise false
*/
public static boolean canCoverImage(ArrayList<BackupImage> fullImages, BackupImage image) {// fullImages can cover image only when the following conditions are satisfied:
// - each image of fullImages must not be an incremental image;
// - each image of fullImages must be taken after image has been taken;
// - sum table set of fullImages must cover the table set of image.
for (BackupImage image1 : fullImages) {
if
(image1.getType() == BackupType.INCREMENTAL) {
return false;
}
if (image1.getStartTs() < image.getStartTs()) {
return false;
}
} ArrayList<String> image1TableList = new ArrayList<>();
for (BackupImage image1 : fullImages) {
List<TableName> tableList = image1.getTableNames();
for (TableName table : tableList) {
image1TableList.add(table.getNameAsString());
}
}
ArrayList<String> image2TableList = new ArrayList<>();
List<TableName> v77 = image.getTableNames();
for (TableName table : v77) {
image2TableList.add(table.getNameAsString());
}
for (int i = 0; i < image2TableList.size(); i++) {
if (image1TableList.contains(image2TableList.get(i)) == false) {
return false;
}
}
LOG.debug("Full image set can cover image " + image.getBackupId());
return true;
} | 3.26 |
hbase_RSGroupInfoManagerImpl_getOnlineServers_rdh | /**
* Returns Set of online Servers named for their hostname and port (not ServerName).
*/
private Set<Address> getOnlineServers() {
return masterServices.getServerManager().getOnlineServers().keySet().stream().map(ServerName::getAddress).collect(Collectors.toSet());
} | 3.26 |
hbase_RSGroupInfoManagerImpl_getRegions_rdh | /**
* Returns List of Regions associated with this <code>server</code>.
*/private List<RegionInfo> getRegions(final Address server) {
LinkedList<RegionInfo> regions = new LinkedList<>();
for (Map.Entry<RegionInfo, ServerName> el : masterServices.getAssignmentManager().getRegionStates().getRegionAssignments().entrySet()) {
if (el.getValue() == null) {
continue;
}
if (el.getValue().getAddress().equals(server)) {
addRegion(regions, el.getKey());
}
}
for (RegionStateNode state : masterServices.getAssignmentManager().getRegionsInTransition()) {
if ((state.getRegionLocation() != null) && state.getRegionLocation().getAddress().equals(server)) {
addRegion(regions, state.getRegionInfo());
}
}
return regions;
} | 3.26 |
hbase_RSGroupInfoManagerImpl_migrate_rdh | // Migrate the table rs group info from RSGroupInfo into the table descriptor
// Notice that we do not want to block the initialize so this will be done in background, and
// during the migrating, the rs group info maybe incomplete and cause region to be misplaced.
private void migrate() { Thread migrateThread = new Thread(MIGRATE_THREAD_NAME) {
@Override
public void run() {
f0.info("Start migrating table rs group config");
while (!masterServices.isStopped()) {
Collection<RSGroupInfo> groups = holder.groupName2Group.values();
boolean hasTables = groups.stream().anyMatch(r -> !r.getTables().isEmpty());
if (!hasTables) {
break;
}
migrate(groups);
}
f0.info("Done migrating table rs group info");
}
};
migrateThread.setDaemon(true);
migrateThread.start();
} | 3.26 |
hbase_RSGroupInfoManagerImpl_updateCacheOfRSGroups_rdh | /**
* Update cache of rsgroups. Caller must be synchronized on 'this'.
*
* @param currentGroups
* Current list of Groups.
*/
private void updateCacheOfRSGroups(final Set<String> currentGroups) {
this.prevRSGroups.clear();
this.prevRSGroups.addAll(currentGroups);
} | 3.26 |
hbase_RSGroupInfoManagerImpl_resetRSGroupMap_rdh | /**
* Make changes visible. Caller must be synchronized on 'this'.
*/private void resetRSGroupMap(Map<String, RSGroupInfo> newRSGroupMap) {
this.holder = new RSGroupInfoHolder(newRSGroupMap);
} | 3.26 |
hbase_RSGroupInfoManagerImpl_getRSGroupAssignmentsByTable_rdh | /**
* This is an EXPENSIVE clone. Cloning though is the safest thing to do. Can't let out original
* since it can change and at least the load balancer wants to iterate this exported list. Load
* balancer should iterate over this list because cloned list will ignore disabled table and split
* parent region cases. This method is invoked by {@link #balanceRSGroup}
*
* @return A clone of current assignments for this group.
*/
Map<TableName, Map<ServerName, List<RegionInfo>>> getRSGroupAssignmentsByTable(TableStateManager tableStateManager, String groupName) throws IOException {Map<TableName, Map<ServerName, List<RegionInfo>>> result = Maps.newHashMap();
Set<TableName> tablesInGroupCache = new HashSet<>();for (Map.Entry<RegionInfo, ServerName> entry : masterServices.getAssignmentManager().getRegionStates().getRegionAssignments().entrySet()) {
RegionInfo region = entry.getKey();
TableName tn = region.getTable();
ServerName server = entry.getValue();
if (isTableInGroup(tn, groupName, tablesInGroupCache)) {
if (tableStateManager.isTableState(tn, State.DISABLED, State.DISABLING)) {
continue;
}
if (region.isSplitParent()) {
continue;
}
result.computeIfAbsent(tn, k -> new HashMap<>()).computeIfAbsent(server, k -> new ArrayList<>()).add(region);
}
}RSGroupInfo rsGroupInfo = getRSGroupInfo(groupName);
for (ServerName serverName : masterServices.getServerManager().getOnlineServers().keySet()) {
if (rsGroupInfo.containsServer(serverName.getAddress())) {
for (Map<ServerName, List<RegionInfo>> map : result.values()) {
map.computeIfAbsent(serverName, k -> Collections.emptyList());
}
}
}
return result;
} | 3.26 |
hbase_RSGroupInfoManagerImpl_moveServerRegionsFromGroup_rdh | /**
* Move every region from servers which are currently located on these servers, but should not be
* located there.
*
* @param movedServers
* the servers that are moved to new group
* @param srcGrpServers
* all servers in the source group, excluding the movedServers
* @param targetGroupName
* the target group
* @param sourceGroupName
* the source group
* @throws IOException
* if moving the server and tables fail
*/
private void moveServerRegionsFromGroup(Set<Address> movedServers, Set<Address> srcGrpServers, String targetGroupName, String sourceGroupName) throws IOException
{
moveRegionsBetweenGroups(movedServers, srcGrpServers, targetGroupName, sourceGroupName, rs -> getRegions(rs), info -> {
try {
String groupName = RSGroupUtil.getRSGroupInfo(masterServices, this, info.getTable()).map(RSGroupInfo::getName).orElse(RSGroupInfo.DEFAULT_GROUP);
return groupName.equals(targetGroupName);
} catch (IOException e) {
f0.warn("Failed to test group for region {} and target group {}", info, targetGroupName);
return false;
}
});
} | 3.26 |
hbase_RSGroupInfoManagerImpl_refresh_rdh | /**
* Read rsgroup info from the source of truth, the hbase:rsgroup table. Update zk cache. Called on
* startup of the manager.
*/
private synchronized void refresh(boolean forceOnline) throws IOException {
List<RSGroupInfo> groupList = new ArrayList<>();
// Overwrite anything read from zk, group table is source of truth
// if online read from GROUP table
if (forceOnline || isOnline()) {
f0.debug("Refreshing in Online mode.");
groupList.addAll(retrieveGroupListFromGroupTable());
} else {
f0.debug("Refreshing in Offline mode.");
groupList.addAll(retrieveGroupListFromZookeeper());
}
// This is added to the last of the list so it overwrites the 'default' rsgroup loaded
// from region group table or zk
groupList.add(new RSGroupInfo(RSGroupInfo.DEFAULT_GROUP,
getDefaultServers(groupList)));
// populate the data
HashMap<String, RSGroupInfo> newGroupMap = Maps.newHashMap();
for (RSGroupInfo group : groupList) {
newGroupMap.put(group.getName(), group);
}
resetRSGroupMap(newGroupMap);
updateCacheOfRSGroups(newGroupMap.keySet());
} | 3.26 |
hbase_RSGroupInfoManagerImpl_getDefaultServers_rdh | // Called by ServerEventsListenerThread. Presume it has lock on this manager when it runs.
private SortedSet<Address> getDefaultServers(List<RSGroupInfo> rsGroupInfoList) {
// Build a list of servers in other groups than default group, from rsGroupMap
Set<Address> serversInOtherGroup = new HashSet<>();
for (RSGroupInfo group : rsGroupInfoList) {
if (!RSGroupInfo.DEFAULT_GROUP.equals(group.getName())) {
// not default group
serversInOtherGroup.addAll(group.getServers());
}
}
// Get all online servers from Zookeeper and find out servers in default group
SortedSet<Address> defaultServers = Sets.newTreeSet();
for (ServerName serverName : masterServices.getServerManager().getOnlineServers().keySet()) {
Address server = Address.fromParts(serverName.getHostname(), serverName.getPort());
if (!serversInOtherGroup.contains(server)) {
// not in other groups
defaultServers.add(server);
}
}
return defaultServers;
} | 3.26 |
hbase_RSGroupInfoManagerImpl_checkForDeadOrOnlineServers_rdh | /**
* Check if the set of servers are belong to dead servers list or online servers list.
*
* @param servers
* servers to remove
*/
private void checkForDeadOrOnlineServers(Set<Address> servers) throws IOException {
// This ugliness is because we only have Address, not ServerName.
Set<Address> onlineServers = new HashSet<>();
List<ServerName> drainingServers = masterServices.getServerManager().getDrainingServersList();
for (ServerName server : masterServices.getServerManager().getOnlineServers().keySet()) {// Only online but not decommissioned servers are really online
if (!drainingServers.contains(server)) {
onlineServers.add(server.getAddress());
}
}
Set<Address> deadServers = new HashSet<>();
for (ServerName server : masterServices.getServerManager().getDeadServers().copyServerNames()) {
deadServers.add(server.getAddress());
}
for (Address address : servers) {
if (onlineServers.contains(address)) {
throw new DoNotRetryIOException(("Server " + address) + " is an online server, not allowed to remove.");
}
if (deadServers.contains(address)) {
throw
new DoNotRetryIOException((("Server " + address) + " is on the dead servers list,") + " Maybe it will come back again, not allowed to remove.");
}}
} | 3.26 |
hbase_DigestSaslServerAuthenticationProvider_handle_rdh | /**
* {@inheritDoc }
*/
@Override
public void handle(Callback[] callbacks) throws
InvalidToken,
UnsupportedCallbackException {
NameCallback nc = null;
PasswordCallback pc = null;
AuthorizeCallback ac = null;
for (Callback callback : callbacks) {
if (callback instanceof AuthorizeCallback) {
ac = ((AuthorizeCallback) (callback));
} else if (callback instanceof NameCallback) {
nc = ((NameCallback) (callback));
} else if (callback instanceof
PasswordCallback) {
pc = ((PasswordCallback) (callback));
} else if (callback instanceof RealmCallback) {continue;// realm is ignored
} else {
throw new UnsupportedCallbackException(callback, "Unrecognized SASL DIGEST-MD5 Callback");
}
}
if (pc != null) {
TokenIdentifier tokenIdentifier = HBaseSaslRpcServer.getIdentifier(nc.getDefaultName(), secretManager);
attemptingUser.set(tokenIdentifier.getUser());
char[] password = getPassword(tokenIdentifier);
if (LOG.isTraceEnabled()) {
LOG.trace("SASL server DIGEST-MD5 callback: setting password for client: {}", tokenIdentifier.getUser());}
pc.setPassword(password);
}
if (ac != null) {
// The authentication ID is the identifier (username) of the user who authenticated via
// SASL (the one who provided credentials). The authorization ID is who the remote user
// "asked" to be once they authenticated. This is akin to the UGI/JAAS "doAs" notion, e.g.
// authentication ID is the "real" user and authorization ID is the "proxy" user.
//
// For DelegationTokens: we do not expect any remote user with a delegation token to execute
// any RPCs as a user other than themselves. We disallow all cases where the real user
// does not match who the remote user wants to execute a request as someone else.
String authenticatedUserId = ac.getAuthenticationID();
String userRequestedToExecuteAs = ac.getAuthorizationID();
if (authenticatedUserId.equals(userRequestedToExecuteAs)) {
ac.setAuthorized(true);
if (LOG.isTraceEnabled()) {
String username = HBaseSaslRpcServer.getIdentifier(userRequestedToExecuteAs, secretManager).getUser().getUserName();
LOG.trace(("SASL server DIGEST-MD5 callback: setting " + "canonicalized client ID: ") + username);
}
ac.setAuthorizedID(userRequestedToExecuteAs);
} else {
ac.setAuthorized(false);
}
}
} | 3.26 |
hbase_BlockingRpcConnection_handleConnectionFailure_rdh | /**
* Handle connection failures If the current number of retries is equal to the max number of
* retries, stop retrying and throw the exception; Otherwise backoff N seconds and try connecting
* again. This Method is only called from inside setupIOstreams(), which is synchronized. Hence
* the sleep is synchronized; the locks will be retained.
*
* @param curRetries
* current number of retries
* @param maxRetries
* max number of retries allowed
* @param ioe
* failure reason
* @throws IOException
* if max number of retries is reached
*/
private void handleConnectionFailure(int curRetries, int maxRetries, IOException ioe) throws IOException {
m1();
// throw the exception if the maximum number of retries is reached
if ((curRetries >= maxRetries) || ExceptionUtil.isInterrupt(ioe)) {
throw ioe;
}
// otherwise back off and retry
try {
Thread.sleep(this.rpcClient.failureSleep);
} catch (InterruptedException ie) {
ExceptionUtil.rethrowIfInterrupt(ie);
}
if (LOG.isInfoEnabled()) {
LOG.info(((((("Retrying connect to server: " + remoteId.getAddress()) + " after sleeping ") + this.rpcClient.failureSleep) + "ms. Already tried ") + curRetries) + " time(s).");
}
} | 3.26 |
hbase_BlockingRpcConnection_readResponse_rdh | /* Receive a response. Because only one receiver, so no synchronization on in. */
private void readResponse() {
Call call = null;
boolean v35 = false;
try {// See HBaseServer.Call.setResponse for where we write out the response.
// Total size of the response. Unused. But have to read it in anyways.
int totalSize = in.readInt();
// Read the header
ResponseHeader responseHeader = ResponseHeader.parseDelimitedFrom(in);
int id = responseHeader.getCallId();
call = calls.remove(id);// call.done have to be set before leaving this method
v35 = (call != null) && (!call.isDone());
if (!v35) {
// So we got a response for which we have no corresponding 'call' here on the client-side.
// We probably timed out waiting, cleaned up all references, and now the server decides
// to return a response. There is nothing we can do w/ the response at this stage. Clean
// out the wire of the response so its out of the way and we can get other responses on
// this connection.
int readSoFar = getTotalSizeWhenWrittenDelimited(responseHeader);
int whatIsLeftToRead = totalSize - readSoFar;
IOUtils.skipFully(in, whatIsLeftToRead);
if (call != null) {
call.callStats.setResponseSizeBytes(totalSize);
call.callStats.setCallTimeMs(EnvironmentEdgeManager.currentTime() - call.callStats.getStartTime());
}
return;
}
if
(responseHeader.hasException()) {
ExceptionResponse exceptionResponse = responseHeader.getException();
RemoteException re = createRemoteException(exceptionResponse);
call.setException(re);
call.callStats.setResponseSizeBytes(totalSize);
call.callStats.setCallTimeMs(EnvironmentEdgeManager.currentTime() - call.callStats.getStartTime());
if (isFatalConnectionException(exceptionResponse)) {
synchronized(this) {
m2(re);
}
}
} else {
Message
value = null;
if (call.responseDefaultType != null) {
Message.Builder builder = call.responseDefaultType.newBuilderForType();
ProtobufUtil.mergeDelimitedFrom(builder, in);
value = builder.build();
}
CellScanner cellBlockScanner = null;
if (responseHeader.hasCellBlockMeta()) {
int v46 = responseHeader.getCellBlockMeta().getLength();
byte[] cellBlock = new byte[v46];
IOUtils.readFully(this.in, cellBlock, 0, cellBlock.length);
cellBlockScanner = this.rpcClient.cellBlockBuilder.createCellScanner(this.codec, this.compressor, cellBlock);
}
call.setResponse(value, cellBlockScanner);
call.callStats.setResponseSizeBytes(totalSize);
call.callStats.setCallTimeMs(EnvironmentEdgeManager.currentTime() - call.callStats.getStartTime());} } catch (IOException e) {
if (v35) {
call.setException(e);
}
if (e instanceof SocketTimeoutException) {
// Clean up open calls but don't treat this as a fatal condition,
// since we expect certain responses to not make it by the specified
// {@link ConnectionId#rpcTimeout}.
if (LOG.isTraceEnabled()) {
LOG.trace("ignored", e);
}
} else {
synchronized(this) {
m2(e);
}
}
}
} | 3.26 |
hbase_BlockingRpcConnection_m2_rdh | // close socket, reader, and clean up all pending calls.
private void m2(IOException e) {if (thread == null) {
return;
}
thread.interrupt();
thread = null;
m1();
if (callSender != null) {
callSender.cleanup(e);
}
for (Call call : calls.values()) {call.setException(e);
}
calls.clear();
} | 3.26 |
hbase_BlockingRpcConnection_shutdown_rdh | // release all resources, the connection will not be used any more.
@Override
public synchronized void shutdown() {
closed = true;
if (callSender != null) {
callSender.interrupt();
}
m2(new IOException(("connection to " + remoteId.getAddress()) + " closed"));
} | 3.26 |
hbase_BlockingRpcConnection_cleanup_rdh | /**
* Cleans the call not yet sent when we finish.
*/
public void cleanup(IOException e) {
IOException ie = new ConnectionClosingException(("Connection to " + remoteId.getAddress()) + " is closing.");
for (Call call : callsToWrite) {
call.setException(ie);
}
callsToWrite.clear();
} | 3.26 |
hbase_BlockingRpcConnection_m1_rdh | // just close socket input and output.
private void m1() {
IOUtils.closeStream(out);
IOUtils.closeStream(in);
IOUtils.closeSocket(socket);
out = null;
in = null;
socket = null;
} | 3.26 |
hbase_BlockingRpcConnection_setupConnection_rdh | // protected for write UT.
protected void setupConnection() throws IOException {
short ioFailures = 0;
short timeoutFailures = 0;
while (true) {
try {
this.socket = this.rpcClient.socketFactory.createSocket();
this.socket.setTcpNoDelay(this.rpcClient.isTcpNoDelay());this.socket.setKeepAlive(this.rpcClient.tcpKeepAlive);
if (this.rpcClient.localAddr != null) {
this.socket.bind(this.rpcClient.localAddr);
}
InetSocketAddress remoteAddr = getRemoteInetAddress(rpcClient.metrics);
NetUtils.connect(this.socket, remoteAddr, this.rpcClient.connectTO);
this.socket.setSoTimeout(this.rpcClient.readTO);
return;
} catch (SocketTimeoutException toe)
{
/* The max number of retries is 45, which amounts to 20s*45 = 15 minutes retries. */
if (LOG.isDebugEnabled()) {
LOG.debug("Received exception in connection setup.\n" + StringUtils.stringifyException(toe));
}
handleConnectionFailure(timeoutFailures++, this.rpcClient.maxRetries, toe);
} catch (IOException ie) {
if (LOG.isDebugEnabled()) {
LOG.debug("Received exception in connection setup.\n" + StringUtils.stringifyException(ie));
}
handleConnectionFailure(ioFailures++, this.rpcClient.maxRetries, ie);
}
}
} | 3.26 |
hbase_BlockingRpcConnection_run_rdh | /**
* Reads the call from the queue, write them on the socket.
*/
@Override
public void run() {
synchronized(BlockingRpcConnection.this) {
while (!closed) {
if (callsToWrite.isEmpty()) {
// We should use another monitor object here for better performance since the read
// thread also uses ConnectionImpl.this. But this makes the locking schema more
// complicated, can do it later as an optimization.
try {
BlockingRpcConnection.this.wait();
} catch (InterruptedException e) {
// Restore interrupt status
Thread.currentThread().interrupt();
}
// check if we need to quit, so continue the main loop instead of fallback.
continue;
}
Call call = callsToWrite.poll();
if (call.isDone()) {
continue;
}
try (Scope scope = call.span.makeCurrent()) {
writeRequest(call);
} catch (IOException e) {
// exception here means the call has not been added to the pendingCalls yet, so we need
// to fail it by our own.
LOG.debug("call write error for {}", call.toShortString());
call.setException(e);
m2(e);
}
}
}} | 3.26 |
hbase_BlockingRpcConnection_writeRequest_rdh | /**
* Initiates a call by sending the parameter to the remote server. Note: this is not called from
* the Connection thread, but by other threads.
*
* @see #readResponse()
*/
private void writeRequest(Call call) throws IOException {
ByteBuf cellBlock = null;
try {
cellBlock = this.rpcClient.cellBlockBuilder.buildCellBlock(this.codec, this.compressor, call.cells, PooledByteBufAllocator.DEFAULT);
CellBlockMeta cellBlockMeta;
if (cellBlock != null) {
cellBlockMeta = CellBlockMeta.newBuilder().setLength(cellBlock.readableBytes()).build();
} else {
cellBlockMeta
= null;
}
RequestHeader v32 = buildRequestHeader(call, cellBlockMeta);
setupIOstreams();
// Now we're going to write the call. We take the lock, then check that the connection
// is still valid, and, if so we do the write to the socket. If the write fails, we don't
// know where we stand, we have to close the connection.
if (Thread.interrupted()) {
throw new InterruptedIOException();
}
calls.put(call.id, call);// We put first as we don't want the connection to become idle.
// from here, we do not throw any exception to upper layer as the call has been tracked in
// the pending calls map.
try {
call.callStats.setRequestSizeBytes(write(this.out, v32, call.param, cellBlock));
} catch (Throwable t) {
if (LOG.isTraceEnabled()) {
LOG.trace("Error while writing {}", call.toShortString());
}
IOException e = IPCUtil.toIOE(t);
m2(e);
return;
}} finally {
if (cellBlock != null) {
cellBlock.release();
}
}
notifyAll();
} | 3.26 |
hbase_BlockingRpcConnection_writeConnectionHeader_rdh | /**
* Write the connection header.
*/
private void writeConnectionHeader() throws IOException {
boolean isCryptoAesEnable = false;
// check if Crypto AES is enabled
if (saslRpcClient != null) {
boolean saslEncryptionEnabled = QualityOfProtection.PRIVACY.getSaslQop().equalsIgnoreCase(saslRpcClient.getSaslQOP());
isCryptoAesEnable = saslEncryptionEnabled && conf.getBoolean(CRYPTO_AES_ENABLED_KEY, CRYPTO_AES_ENABLED_DEFAULT);
}
// if Crypto AES is enabled, set transformation and negotiate with server
if (isCryptoAesEnable) {
waitingConnectionHeaderResponse
= true;
}
this.out.write(connectionHeaderWithLength);
this.out.flush();
} | 3.26 |
hbase_BlockingRpcConnection_handleSaslConnectionFailure_rdh | /**
* If multiple clients with the same principal try to connect to the same server at the same time,
* the server assumes a replay attack is in progress. This is a feature of kerberos. In order to
* work around this, what is done is that the client backs off randomly and tries to initiate the
* connection again. The other problem is to do with ticket expiry. To handle that, a relogin is
* attempted.
* <p>
* The retry logic is governed by the {@link SaslClientAuthenticationProvider#canRetry()} method.
* Some providers have the ability to obtain new credentials and then re-attempt to authenticate
* with HBase services. Other providers will continue to fail if they failed the first time -- for
* those, we want to fail-fast.
* </p>
*/
private void handleSaslConnectionFailure(final int currRetries, final
int maxRetries, final Exception ex, final UserGroupInformation user) throws IOException, InterruptedException {
m1();
user.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws IOException, InterruptedException {
// A provider which failed authentication, but doesn't have the ability to relogin with
// some external system (e.g. username/password, the password either works or it doesn't)
if (!provider.canRetry()) {
LOG.warn("Exception encountered while connecting to the server " + remoteId.getAddress(), ex);
if (ex instanceof RemoteException) {
throw ((RemoteException) (ex));
}
if (ex instanceof SaslException) {
String msg = "SASL authentication failed." + " The most likely cause is missing or invalid credentials.";
throw new RuntimeException(msg, ex);
}
throw new IOException(ex);
}
// Other providers, like kerberos, could request a new ticket from a keytab. Let
// them try again.
if (currRetries < maxRetries) {
LOG.debug("Exception encountered while connecting to the server " + remoteId.getAddress(), ex);
// Invoke the provider to perform the relogin
provider.relogin();
// Get rid of any old state on the SaslClient
disposeSasl();
// have granularity of milliseconds
// we are sleeping with the Connection lock held but since this
// connection instance is being used for connecting to the server
// in question, it is okay
Thread.sleep(ThreadLocalRandom.current().nextInt(reloginMaxBackoff) + 1);
return null;
} else {
String msg = (("Failed to initiate connection for " + UserGroupInformation.getLoginUser().getUserName()) + " to ") + securityInfo.getServerPrincipal();
throw new IOException(msg, ex);
}
}
});
} | 3.26 |
hbase_SaslClientAuthenticationProvider_relogin_rdh | /**
* Executes any necessary logic to re-login the client. Not all implementations will have any
* logic that needs to be executed.
*/
default void relogin() throws IOException {
} | 3.26 |
hbase_SaslClientAuthenticationProvider_getRealUser_rdh | /**
* Returns the "real" user, the user who has the credentials being authenticated by the remote
* service, in the form of an {@link UserGroupInformation} object. It is common in the Hadoop
* "world" to have distinct notions of a "real" user and a "proxy" user. A "real" user is the user
* which actually has the credentials (often, a Kerberos ticket), but some code may be running as
* some other user who has no credentials. This method gives the authentication provider a chance
* to acknowledge this is happening and ensure that any RPCs are executed with the real user's
* credentials, because executing them as the proxy user would result in failure because no
* credentials exist to authenticate the RPC. Not all implementations will need to implement this
* method. By default, the provided User's UGI is returned directly.
*/
default UserGroupInformation getRealUser(User ugi) {
return ugi.getUGI();
} | 3.26 |
hbase_SaslClientAuthenticationProvider_canRetry_rdh | /**
* Returns true if the implementation is capable of performing some action which may allow a
* failed authentication to become a successful authentication. Otherwise, returns false
*/
default boolean canRetry() {
return false;
} | 3.26 |
hbase_LzmaCodec_getLevel_rdh | // Package private
static int getLevel(Configuration conf) {
return conf.getInt(LZMA_LEVEL_KEY, LZMA_LEVEL_DEFAULT);
} | 3.26 |
hbase_ZKVisibilityLabelWatcher_writeToZookeeper_rdh | /**
* Write a labels mirror or user auths mirror into zookeeper
*
* @param labelsOrUserAuths
* true for writing labels and false for user auths.
*/
public void writeToZookeeper(byte[] data, boolean labelsOrUserAuths) {
String znode = this.labelZnode;
if (!labelsOrUserAuths) {znode = this.userAuthsZnode;
}
try {
ZKUtil.updateExistingNodeData(watcher, znode, data, -1);
} catch (KeeperException e) {
LOG.error("Failed writing to " + znode, e);
watcher.abort(("Failed writing node " + znode) + " to zookeeper", e);
}
} | 3.26 |
hbase_TableDescriptor_matchReplicationScope_rdh | /**
* Check if the table's cfs' replication scope matched with the replication state
*
* @param enabled
* replication state
* @return true if matched, otherwise false
*/
default boolean matchReplicationScope(boolean enabled) {
boolean hasEnabled = false;
boolean hasDisabled = false;
for (ColumnFamilyDescriptor cf
: getColumnFamilies()) {
if (cf.getScope() != HConstants.REPLICATION_SCOPE_GLOBAL) {
hasDisabled = true;
} else {
hasEnabled = true;
}
}
if (hasEnabled &&
hasDisabled) {
return false;
}
if (hasEnabled) {
return enabled;
}
return !enabled;
} | 3.26 |
hbase_TableDescriptor_hasGlobalReplicationScope_rdh | /**
* Check if any of the table's cfs' replication scope are set to
* {@link HConstants#REPLICATION_SCOPE_GLOBAL}.
*
* @return {@code true} if we have, otherwise {@code false}.
*/
default boolean hasGlobalReplicationScope() {
return Stream.of(getColumnFamilies()).anyMatch(cf -> cf.getScope()
== HConstants.REPLICATION_SCOPE_GLOBAL);
} | 3.26 |
hbase_OperationWithAttributes_getId_rdh | /**
* This method allows you to retrieve the identifier for the operation if one was set.
*
* @return the id or null if not set
*/
public String getId() {
byte[] attr = getAttribute(ID_ATRIBUTE);
return attr == null
?
null : Bytes.toString(attr);
} | 3.26 |
hbase_TimeoutExceptionInjector_trigger_rdh | /**
* Trigger the timer immediately.
* <p>
* Exposed for testing.
*/
public void trigger() {
synchronized(timerTask) {
if
(this.complete) {
LOG.warn("Timer already completed, not triggering.");
return;
}
LOG.debug("Triggering timer immediately!");
this.timer.cancel();
this.timerTask.run();
}
} | 3.26 |
hbase_TimeoutExceptionInjector_start_rdh | /**
* Start a timer to fail a process if it takes longer than the expected time to complete.
* <p>
* Non-blocking.
*
* @throws IllegalStateException
* if the timer has already been marked done via {@link #complete()}
* or {@link #trigger()}
*/
public synchronized void start() throws IllegalStateException {
if (this.start >= 0) {
LOG.warn("Timer already started, can't be started again. Ignoring second request.");
return;
}
LOG.debug(("Scheduling process timer to run in: " + maxTime) + " ms");
timer.schedule(timerTask, maxTime);
this.start = EnvironmentEdgeManager.currentTime();
} | 3.26 |
hbase_TimeoutExceptionInjector_complete_rdh | /**
* For all time forward, do not throw an error because the process has completed.
*/
public void complete() {
synchronized(this.timerTask) {
if (this.complete) {
LOG.warn("Timer already marked completed, ignoring!");
return;}
if (LOG.isDebugEnabled()) {
LOG.debug("Marking timer as complete - no error notifications will be received for " + "this timer.");
}
this.complete = true;
}
this.timer.cancel();
} | 3.26 |
hbase_WALProcedureMap_merge_rdh | /**
* Merge the given {@link WALProcedureMap} into this one. The {@link WALProcedureMap} passed in
* will be cleared after merging.
*/public void merge(WALProcedureMap other)
{
other.procMap.forEach(procMap::putIfAbsent);
maxModifiedProcId = Math.max(maxModifiedProcId, other.maxModifiedProcId);
minModifiedProcId = Math.max(minModifiedProcId, other.minModifiedProcId);
other.procMap.clear();
other.maxModifiedProcId = Long.MIN_VALUE;
other.minModifiedProcId = Long.MAX_VALUE;
} | 3.26 |
hbase_HttpDoAsClient_bytes_rdh | // Helper to translate strings to UTF8 bytes
private byte[] bytes(String s) {
return Bytes.toBytes(s);
} | 3.26 |
hbase_AbstractRpcClient_configureRpcController_rdh | /**
* Configure an rpc controller
*
* @param controller
* to configure
* @return configured rpc controller
*/
protected HBaseRpcController configureRpcController(RpcController controller) {
HBaseRpcController hrc;
// TODO: Ideally we should not use an RpcController other than HBaseRpcController at client
// side. And now we may use ServerRpcController.
if ((controller !=
null) && (controller instanceof HBaseRpcController)) {
hrc = ((HBaseRpcController) (controller));
if (!hrc.hasCallTimeout()) {
hrc.setCallTimeout(rpcTimeout);
}
} else {
hrc = new HBaseRpcControllerImpl();
hrc.setCallTimeout(rpcTimeout);
}
return hrc;
} | 3.26 |
hbase_AbstractRpcClient_isTcpNoDelay_rdh | // for writing tests that want to throw exception when connecting.
protected boolean isTcpNoDelay() {
return tcpNoDelay;
} | 3.26 |
hbase_AbstractRpcClient_getConnection_rdh | /**
* Get a connection from the pool, or create a new one and add it to the pool. Connections to a
* given host/port are reused.
*/private T getConnection(ConnectionId remoteId) throws IOException {
if (failedServers.isFailedServer(remoteId.getAddress())) {
if (LOG.isDebugEnabled()) {
LOG.debug(("Not trying to connect to " + remoteId.getAddress()) + " this server is in the failed servers list");
}
throw new FailedServerException("This server is in the failed servers list: " + remoteId.getAddress());
}
T conn;
synchronized(connections) {
if (!running) {
throw new StoppedRpcClientException();
}
conn = connections.getOrCreate(remoteId, () -> createConnection(remoteId));
conn.setLastTouched(EnvironmentEdgeManager.currentTime());
}
return conn;
} | 3.26 |
hbase_AbstractRpcClient_getCompressor_rdh | /**
* Encapsulate the ugly casting and RuntimeException conversion in private method.
*
* @param conf
* configuration
* @return The compressor to use on this client.
*/
private static CompressionCodec getCompressor(final Configuration conf) {
String className = conf.get("hbase.client.rpc.compressor", null);
if ((className == null) || className.isEmpty()) {
return null;
}
try {
return Class.forName(className).asSubclass(CompressionCodec.class).getDeclaredConstructor().newInstance();
} catch (Exception e) {
throw new RuntimeException("Failed getting compressor " + className, e);
}
}
/**
* Return the pool type specified in the configuration, which must be set to either
* {@link org.apache.hadoop.hbase.util.PoolMap.PoolType#RoundRobin} or
* {@link org.apache.hadoop.hbase.util.PoolMap.PoolType#ThreadLocal}, otherwise default to the
* former. For applications with many user threads, use a small round-robin pool. For applications
* with few user threads, you may want to try using a thread-local pool. In any case, the number
* of {@link org.apache.hadoop.hbase.ipc.RpcClient} instances should not exceed the operating
* system's hard limit on the number of connections.
*
* @param config
* configuration
* @return either a {@link org.apache.hadoop.hbase.util.PoolMap.PoolType#RoundRobin} or
{@link org.apache.hadoop.hbase.util.PoolMap.PoolType#ThreadLocal} | 3.26 |
hbase_AbstractRpcClient_configureHBaseRpcController_rdh | /**
* Configure an hbase rpccontroller
*
* @param controller
* to configure
* @param channelOperationTimeout
* timeout for operation
* @return configured controller
*/
static HBaseRpcController configureHBaseRpcController(RpcController controller, int channelOperationTimeout) {
HBaseRpcController hrc;
if ((controller != null) && (controller instanceof HBaseRpcController)) {
hrc = ((HBaseRpcController) (controller));
if (!hrc.hasCallTimeout()) {
hrc.setCallTimeout(channelOperationTimeout);
}
} else {
hrc = new HBaseRpcControllerImpl();
hrc.setCallTimeout(channelOperationTimeout);
}
return hrc;
} | 3.26 |
hbase_AbstractRpcClient_getCodec_rdh | /**
* Encapsulate the ugly casting and RuntimeException conversion in private method.
*
* @return Codec to use on this client.
*/
protected Codec getCodec()
{
// For NO CODEC, "hbase.client.rpc.codec" must be configured with empty string AND
// "hbase.client.default.rpc.codec" also -- because default is to do cell block encoding.
String className = conf.get(HConstants.RPC_CODEC_CONF_KEY, getDefaultCodec(this.conf));
if ((className
== null) || (className.length() == 0)) {
return null;
}
try {return Class.forName(className).asSubclass(Codec.class).getDeclaredConstructor().newInstance();
} catch (Exception e) {
throw new RuntimeException("Failed getting codec " + className, e);
}
} | 3.26 |
hbase_AbstractRpcClient_cancelConnections_rdh | /**
* Interrupt the connections to the given ip:port server. This should be called if the server is
* known as actually dead. This will not prevent current operation to be retried, and, depending
* on their own behavior, they may retry on the same server. This can be a feature, for example at
* startup. In any case, they're likely to get connection refused (if the process died) or no
* route to host: i.e. their next retries should be faster and with a safe exception.
*/
@Override
public void cancelConnections(ServerName
sn)
{
synchronized(connections) {
for (T connection : connections.values()) {ConnectionId remoteId = connection.remoteId();
if ((remoteId.getAddress().getPort() == sn.getPort()) && remoteId.getAddress().getHostName().equals(sn.getHostname()))
{
LOG.info((("The server on " + sn.toString()) + " is dead - stopping the connection ") + connection.remoteId);
connections.remove(remoteId, connection);
connection.shutdown();
connection.cleanupConnection();
}
}
}
} | 3.26 |
hbase_AbstractRpcClient_callBlockingMethod_rdh | /**
* Make a blocking call. Throws exceptions if there are network problems or if the remote code
* threw an exception.
*
* @param ticket
* Be careful which ticket you pass. A new user will mean a new Connection.
* {@link UserProvider#getCurrent()} makes a new instance of User each time so will
* be a new Connection each time.
* @return A pair with the Message response and the Cell data (if any).
*/
private Message callBlockingMethod(Descriptors.MethodDescriptor md, HBaseRpcController hrc, Message param, Message returnType, final User ticket, final Address isa) throws ServiceException {
BlockingRpcCallback<Message> done = new
BlockingRpcCallback<>();
callMethod(md, hrc, param, returnType, ticket, isa, done);
Message val;
try {
val = done.get();
} catch (IOException e) {
throw new ServiceException(e);
}
if (hrc.failed()) {
throw new ServiceException(hrc.getFailed());
} else {
return val;
}
} | 3.26 |
hbase_RegionReplicationSink_replicated_rdh | /**
* Should be called regardless of the result of the replicating operation. Unless you still want
* to reuse this entry, otherwise you must call this method to release the possible off heap
* memories.
*/
void
replicated() {
if (rpcCall != null) {
rpcCall.releaseByWAL();
}
} | 3.26 |
hbase_RegionReplicationSink_waitUntilStopped_rdh | /**
* Make sure that we have finished all the replicating requests.
* <p/>
* After returning, we can make sure there will be no new replicating requests to secondary
* replicas.
* <p/>
* This is used to keep the replicating order the same with the WAL edit order when writing.
*/
public void waitUntilStopped() throws InterruptedException {
synchronized(entries) {while (!stopped) {
entries.wait();
}
}
} | 3.26 |
hbase_RegionReplicationSink_stop_rdh | /**
* Stop the replication sink.
* <p/>
* Usually this should only be called when you want to close a region.
*/
public void stop() {
synchronized(entries) {
stopping
= true;
clearAllEntries();
if (!sending) {
stopped = true;
entries.notifyAll();
}
}
} | 3.26 |
hbase_RegionReplicationSink_add_rdh | /**
* Add this edit to replication queue.
* <p/>
* The {@code rpcCall} is for retaining the cells if the edit is built within an rpc call and the
* rpc call has cell scanner, which is off heap.
*/
public void add(WALKeyImpl key, WALEdit edit, ServerCall<?> rpcCall) {if ((!tableDesc.hasRegionMemStoreReplication()) && (!edit.isMetaEdit())) {
// only replicate meta edit if region memstore replication is not enabled
return;
}
synchronized(entries) {
if (stopping) {
return;
}
if (edit.isMetaEdit()) {
// check whether we flushed all stores, which means we could drop all the previous edits,
// and also, recover from the previous failure of some replicas
for (Cell metaCell : edit.getCells()) {
getStartFlushAllDescriptor(metaCell).ifPresent(flushDesc -> {
long flushSequenceNumber
= flushDesc.getFlushSequenceNumber();
lastFlushedSequenceId = flushSequenceNumber;
long clearedCount = entries.size();
long v27 = clearAllEntries();
if (LOG.isDebugEnabled()) {LOG.debug("Got a flush all request with sequence id {}, clear {} pending" + " entries with size {}, clear failed replicas {}", flushSequenceNumber, clearedCount, StringUtils.TraditionalBinaryPrefix.long2String(v27, "", 1), failedReplicas);
}
failedReplicas.clear();
flushRequester.recordFlush(flushSequenceNumber);
});
}
}
if (failedReplicas.size() == (regionReplication - 1))
{
// this means we have marked all the replicas as failed, so just give up here
return;
}
SinkEntry entry =
new SinkEntry(key, edit, rpcCall);
entries.add(entry);
pendingSize += entry.size;
if (manager.increase(entry.size)) {
if (!sending) {
send();
}
} else {
// we have run out of the max pending size, drop all the edits, and mark all replicas as
// failed
clearAllEntries();
for (int replicaId = 1; replicaId <
regionReplication; replicaId++) {
failedReplicas.add(replicaId);
}
flushRequester.requestFlush(entry.key.getSequenceId());
}
}
} | 3.26 |
hbase_OrderedInt8_decodeByte_rdh | /**
* Read a {@code byte} value from the buffer {@code src}.
*
* @param src
* the {@link PositionedByteRange} to read the {@code byte} from
* @return the {@code byte} read from the buffer
*/
public byte decodeByte(PositionedByteRange src) {
return OrderedBytes.decodeInt8(src);
} | 3.26 |
hbase_OrderedInt8_encodeByte_rdh | /**
* Write instance {@code val} into buffer {@code dst}.
*
* @param dst
* the {@link PositionedByteRange} to write to
* @param val
* the value to write to {@code dst}
* @return the number of bytes written
*/
public int encodeByte(PositionedByteRange dst, byte val) {
return OrderedBytes.encodeInt8(dst, val, order);
} | 3.26 |
hbase_Addressing_createInetSocketAddressFromHostAndPortStr_rdh | /**
* Create a socket address
*
* @param hostAndPort
* Formatted as <code><hostname> ':' <port></code>
* @return An InetSocketInstance
*/
public static InetSocketAddress createInetSocketAddressFromHostAndPortStr(final String hostAndPort) {
return new InetSocketAddress(parseHostname(hostAndPort), parsePort(hostAndPort));
} | 3.26 |
hbase_Addressing_parsePort_rdh | /**
* Parse the port portion of a host-and-port string
*
* @param hostAndPort
* Formatted as <code><hostname> ':' <port></code>
* @return The port portion of <code>hostAndPort</code>
*/
public static int parsePort(final String
hostAndPort) {
int v1 = hostAndPort.lastIndexOf(HOSTNAME_PORT_SEPARATOR);
if (v1 < 0) {
throw new IllegalArgumentException("Not a host:port pair: " + hostAndPort);
}
return Integer.parseInt(hostAndPort.substring(v1 + 1));
} | 3.26 |
hbase_Addressing_isLocalAddress_rdh | /**
* Given an InetAddress, checks to see if the address is a local address, by comparing the address
* with all the interfaces on the node.
*
* @param addr
* address to check if it is local node's address
* @return true if the address corresponds to the local node
*/
public static boolean isLocalAddress(InetAddress
addr) {
// Check if the address is any local or loop back
boolean local = addr.isAnyLocalAddress() || addr.isLoopbackAddress();
// Check if the address is defined on any interface
if (!local) {
try {
local = NetworkInterface.getByInetAddress(addr) != null;
} catch (SocketException
e) {
local =
false;
}
}
return local;
} | 3.26 |
hbase_Addressing_inetSocketAddress2String_rdh | /**
* Given an InetSocketAddress object returns a String represent of it. This is a util method for
* Java 17. The toString() function of InetSocketAddress will flag the unresolved address with a
* substring in the string, which will result in unexpected problem. We should use this util
* function to get the string when we not sure whether the input address is resolved or not.
*
* @param address
* address to convert to a "host:port" String.
* @return the String represent of the given address, like "foo:1234".
*/
public static String inetSocketAddress2String(InetSocketAddress address) {
return address.isUnresolved() ? address.toString().replace("/<unresolved>", "") : address.toString();
} | 3.26 |
hbase_Addressing_parseHostname_rdh | /**
* Parse the hostname portion of a host-and-port string
*
* @param hostAndPort
* Formatted as <code><hostname> ':' <port></code>
* @return The hostname portion of <code>hostAndPort</code>
*/
public static String parseHostname(final String hostAndPort) {
int colonIndex = hostAndPort.lastIndexOf(HOSTNAME_PORT_SEPARATOR);
if (colonIndex
< 0) {
throw new IllegalArgumentException("Not a host:port pair: " + hostAndPort);
}
return hostAndPort.substring(0, colonIndex);
} | 3.26 |
hbase_FlushPolicyFactory_create_rdh | /**
* Create the FlushPolicy configured for the given table.
*/
public static FlushPolicy create(HRegion region, Configuration conf) throws IOException {
Class<? extends FlushPolicy> clazz = m0(region.getTableDescriptor(), conf);
FlushPolicy policy = ReflectionUtils.newInstance(clazz, conf);
policy.configureForRegion(region);
return policy;
} | 3.26 |
hbase_FavoredStochasticBalancer_getRandomGenerator_rdh | /**
* Returns any candidate generator in random
*/
@Override
protected CandidateGenerator getRandomGenerator() {return candidateGenerators.get(ThreadLocalRandom.current().nextInt(candidateGenerators.size()));
} | 3.26 |
hbase_FavoredStochasticBalancer_assignRegionToAvailableFavoredNode_rdh | /**
* Assign the region to primary if its available. If both secondary and tertiary are available,
* assign to the host which has less load. Else assign to secondary or tertiary whichever is
* available (in that order).
*/
private void assignRegionToAvailableFavoredNode(Map<ServerName, List<RegionInfo>> assignmentMapForFavoredNodes, RegionInfo region, ServerName primaryHost, ServerName secondaryHost, ServerName tertiaryHost) {
if (primaryHost != null) {
addRegionToMap(assignmentMapForFavoredNodes,
region, primaryHost);
} else if ((secondaryHost
!=
null) && (tertiaryHost != null)) {
// Assign the region to the one with a lower load (both have the desired hdfs blocks)
ServerName s;
ServerMetrics tertiaryLoad = provider.getLoad(tertiaryHost);
ServerMetrics secondaryLoad = provider.getLoad(secondaryHost);
if ((secondaryLoad != null) && (tertiaryLoad != null)) {
if (secondaryLoad.getRegionMetrics().size() < tertiaryLoad.getRegionMetrics().size()) {
s = secondaryHost;
} else {
s = tertiaryHost;
}
} else {
// We don't have one/more load, lets just choose a random node
s
= (ThreadLocalRandom.current().nextBoolean()) ? secondaryHost
: tertiaryHost;
}
addRegionToMap(assignmentMapForFavoredNodes, region,
s);
} else if (secondaryHost != null) {
addRegionToMap(assignmentMapForFavoredNodes, region, secondaryHost);
} else if (tertiaryHost != null) {
addRegionToMap(assignmentMapForFavoredNodes, region, tertiaryHost);
} else {
// No favored nodes are online, lets assign to BOGUS server
addRegionToMap(assignmentMapForFavoredNodes, region, BOGUS_SERVER_NAME);
}
} | 3.26 |
hbase_FavoredStochasticBalancer_retainAssignment_rdh | /**
* Reuse BaseLoadBalancer's retainAssignment, but generate favored nodes when its missing.
*/
@Override
@NonNull
public Map<ServerName, List<RegionInfo>> retainAssignment(Map<RegionInfo, ServerName> regions, List<ServerName> servers) throws HBaseIOException {
Map<ServerName, List<RegionInfo>> assignmentMap = Maps.newHashMap();
Map<ServerName, List<RegionInfo>> result = super.retainAssignment(regions,
servers);
if (result.isEmpty()) {
LOG.warn("Nothing to assign to, probably no servers or no regions");
return result;
}
// Lets check if favored nodes info is in META, if not generate now.
FavoredNodeAssignmentHelper helper = new FavoredNodeAssignmentHelper(servers, getConf());
helper.initialize();
LOG.debug("Generating favored nodes for regions missing them.");
Map<RegionInfo, List<ServerName>> regionFNMap = Maps.newHashMap();
try {
for (Map.Entry<ServerName, List<RegionInfo>> entry : result.entrySet()) {
ServerName sn = entry.getKey();
ServerName primary = ServerName.valueOf(sn.getHostname(), sn.getPort(), NON_STARTCODE);
for (RegionInfo hri : entry.getValue()) {
if (FavoredNodesManager.isFavoredNodeApplicable(hri))
{
List<ServerName> favoredNodes = fnm.getFavoredNodes(hri);
if ((favoredNodes == null) || (favoredNodes.size() < FAVORED_NODES_NUM)) { LOG.debug((("Generating favored nodes for: " + hri) +
" with primary: ") + primary);ServerName[] secondaryAndTertiaryNodes = helper.getSecondaryAndTertiary(hri, primary);if ((secondaryAndTertiaryNodes != null) && (secondaryAndTertiaryNodes.length == 2)) {
List<ServerName> newFavoredNodes = Lists.newArrayList();
newFavoredNodes.add(primary);newFavoredNodes.add(ServerName.valueOf(secondaryAndTertiaryNodes[0].getHostname(), secondaryAndTertiaryNodes[0].getPort(), NON_STARTCODE));
newFavoredNodes.add(ServerName.valueOf(secondaryAndTertiaryNodes[1].getHostname(), secondaryAndTertiaryNodes[1].getPort(), NON_STARTCODE));
regionFNMap.put(hri, newFavoredNodes);
addRegionToMap(assignmentMap, hri, sn);
} else {
throw new HBaseIOException((("Cannot generate secondary/tertiary FN for " + hri) + " generated ") + (secondaryAndTertiaryNodes != null ? secondaryAndTertiaryNodes : " nothing"));}
} else {List<ServerName> onlineFN = getOnlineFavoredNodes(servers, favoredNodes);
if (onlineFN.isEmpty()) {
// All favored nodes are dead, lets assign it to BOGUS
addRegionToMap(assignmentMap, hri, BOGUS_SERVER_NAME);
} else // Is primary not on FN? Less likely, but we can still take care of this.
if (FavoredNodesPlan.getFavoredServerPosition(favoredNodes, sn) != null) {
addRegionToMap(assignmentMap, hri, sn);
} else {
ServerName destination = onlineFN.get(ThreadLocalRandom.current().nextInt(onlineFN.size()));
LOG.warn((((((("Region: " + hri) +
" not hosted on favored nodes: ") + favoredNodes) + " current: ") + sn) + " moving to: ") + destination);
addRegionToMap(assignmentMap, hri, destination);
}
}
} else {
addRegionToMap(assignmentMap, hri, sn);
}
}
}
if (!regionFNMap.isEmpty()) {
LOG.debug("Updating FN in meta for missing regions, count: " + regionFNMap.size());
fnm.updateFavoredNodes(regionFNMap);
}
} catch (IOException e) {
throw new HBaseIOException("Cannot generate/update FN for regions: " + regionFNMap.keySet());
}
return assignmentMap;
} | 3.26 |
hbase_FavoredStochasticBalancer_roundRobinAssignment_rdh | /**
* Round robin assignment: Segregate the regions into two types: 1. The regions that have favored
* node assignment where at least one of the favored node is still alive. In this case, try to
* adhere to the current favored nodes assignment as much as possible - i.e., if the current
* primary is gone, then make the secondary or tertiary as the new host for the region (based on
* their current load). Note that we don't change the favored node assignments here (even though
* one or more favored node is currently down). That will be done by the admin operations. 2. The
* regions that currently don't have favored node assignments. Generate favored nodes for them and
* then assign. Generate the primary fn in round robin fashion and generate secondary and tertiary
* as per favored nodes constraints.
*/
@Override
@NonNullpublic Map<ServerName, List<RegionInfo>> roundRobinAssignment(List<RegionInfo> regions, List<ServerName> servers) throws HBaseIOException {
metricsBalancer.incrMiscInvocations();
Map<ServerName, List<RegionInfo>> assignmentMap = new HashMap<>();
if (regions.isEmpty()) {
return assignmentMap;
}
Set<RegionInfo> regionSet = new HashSet<>(regions);
try {
FavoredNodeAssignmentHelper helper = new FavoredNodeAssignmentHelper(servers, rackManager);
helper.initialize();
Set<RegionInfo> systemRegions = FavoredNodesManager.filterNonFNApplicableRegions(regionSet);
regionSet.removeAll(systemRegions);
// Assign all system regions
Map<ServerName, List<RegionInfo>> systemAssignments = super.roundRobinAssignment(Lists.newArrayList(systemRegions), servers);
// Segregate favored and non-favored nodes regions and assign accordingly.
Pair<Map<ServerName, List<RegionInfo>>,
List<RegionInfo>> segregatedRegions = segregateRegionsAndAssignRegionsWithFavoredNodes(regionSet, servers);
Map<ServerName, List<RegionInfo>> regionsWithFavoredNodesMap = segregatedRegions.getFirst();
Map<ServerName, List<RegionInfo>> regionsWithoutFN = generateFNForRegionsWithoutFN(helper, segregatedRegions.getSecond());
// merge the assignment maps
mergeAssignmentMaps(assignmentMap, systemAssignments);
mergeAssignmentMaps(assignmentMap, regionsWithFavoredNodesMap);
mergeAssignmentMaps(assignmentMap, regionsWithoutFN);
} catch (Exception ex) {
throw new HBaseIOException(("Encountered exception while doing favored-nodes assignment " + ex) + " Falling back to regular assignment", ex);
}
return assignmentMap;
} | 3.26 |
hbase_FavoredStochasticBalancer_generateFavoredNodesForMergedRegion_rdh | /**
* Generate favored nodes for a region during merge. Choose the FN from one of the sources to keep
* it simple.
*/
@Override
public void generateFavoredNodesForMergedRegion(RegionInfo merged, RegionInfo[] mergeParents) throws IOException {
updateFavoredNodesForRegion(merged, fnm.getFavoredNodes(mergeParents[0]));
} | 3.26 |
hbase_FavoredStochasticBalancer_generateFavoredNodesForDaughter_rdh | /**
* Generate Favored Nodes for daughters during region split.
* <p/>
* If the parent does not have FN, regenerates them for the daughters.
* <p/>
* If the parent has FN, inherit two FN from parent for each daughter and generate the remaining.
* The primary FN for both the daughters should be the same as parent. Inherit the secondary FN
* from the parent but keep it different for each daughter. Choose the remaining FN randomly. This
* would give us better distribution over a period of time after enough splits.
*/
@Override
public void generateFavoredNodesForDaughter(List<ServerName> servers, RegionInfo parent, RegionInfo regionA, RegionInfo regionB) throws IOException {Map<RegionInfo, List<ServerName>> result = new HashMap<>();
FavoredNodeAssignmentHelper helper = new FavoredNodeAssignmentHelper(servers, rackManager);
helper.initialize();
List<ServerName> parentFavoredNodes = fnm.getFavoredNodes(parent);
if (parentFavoredNodes == null) {
LOG.debug(("Unable to find favored nodes for parent, " + parent) + " generating new favored nodes for daughter");
result.put(regionA, helper.generateFavoredNodes(regionA));
result.put(regionB, helper.generateFavoredNodes(regionB));
} else {
// Lets get the primary and secondary from parent for regionA
Set<ServerName> regionAFN = getInheritedFNForDaughter(helper, parentFavoredNodes, PRIMARY, SECONDARY);
result.put(regionA, Lists.newArrayList(regionAFN));
// Lets get the primary and tertiary from parent for regionB
Set<ServerName> regionBFN = getInheritedFNForDaughter(helper, parentFavoredNodes, PRIMARY, TERTIARY);
result.put(regionB, Lists.newArrayList(regionBFN));
}
fnm.updateFavoredNodes(result);
} | 3.26 |
hbase_FavoredStochasticBalancer_randomAssignment_rdh | /**
* If we have favored nodes for a region, we will return one of the FN as destination. If favored
* nodes are not present for a region, we will generate and return one of the FN as destination.
* If we can't generate anything, lets fallback.
*/
@Override
public ServerName randomAssignment(RegionInfo regionInfo, List<ServerName> servers) throws HBaseIOException {
ServerName destination = null;
if (!FavoredNodesManager.isFavoredNodeApplicable(regionInfo)) {
return super.randomAssignment(regionInfo, servers);
}
metricsBalancer.incrMiscInvocations();
Configuration conf = getConf();
List<ServerName> favoredNodes = fnm.getFavoredNodes(regionInfo);
if ((favoredNodes == null) || favoredNodes.isEmpty()) {
// Generate new favored nodes and return primary
FavoredNodeAssignmentHelper helper = new FavoredNodeAssignmentHelper(servers, conf);
helper.initialize();
try {
favoredNodes = helper.generateFavoredNodes(regionInfo);
updateFavoredNodesForRegion(regionInfo, favoredNodes);
} catch (IOException e) {
LOG.warn("Encountered exception while doing favored-nodes (random)assignment " + e);
throw new
HBaseIOException(e);
}
}
List<ServerName> onlineServers = getOnlineFavoredNodes(servers, favoredNodes);
if (onlineServers.size() > 0) {
destination = onlineServers.get(ThreadLocalRandom.current().nextInt(onlineServers.size()));
}
boolean alwaysAssign = conf.getBoolean(FAVORED_ALWAYS_ASSIGN_REGIONS, true);
if ((destination == null) && alwaysAssign) {
LOG.warn(("Can't generate FN for region: " + regionInfo) + " falling back");
destination = super.randomAssignment(regionInfo, servers);
}
return destination;
} | 3.26 |
hbase_FavoredStochasticBalancer_getOnlineFavoredNodes_rdh | /**
* Return list of favored nodes that are online.
*/
private List<ServerName>
getOnlineFavoredNodes(List<ServerName> onlineServers, List<ServerName> serversWithoutStartCodes) {
if (serversWithoutStartCodes == null) {
return null;
} else {
List<ServerName> result = Lists.newArrayList();
for (ServerName sn :
serversWithoutStartCodes) {
for (ServerName online : onlineServers) {
if (ServerName.isSameAddress(sn, online)) {
result.add(online);
}
}
}
return result;
}
} | 3.26 |
hbase_FavoredStochasticBalancer_segregateRegionsAndAssignRegionsWithFavoredNodes_rdh | /**
* Return a pair - one with assignments when favored nodes are present and another with regions
* without favored nodes.
*/
private Pair<Map<ServerName, List<RegionInfo>>, List<RegionInfo>> segregateRegionsAndAssignRegionsWithFavoredNodes(Collection<RegionInfo> regions, List<ServerName> onlineServers) throws HBaseIOException {
// Since we expect FN to be present most of the time, lets create map with same size
Map<ServerName, List<RegionInfo>> assignmentMapForFavoredNodes = new HashMap<>(onlineServers.size());
List<RegionInfo> regionsWithNoFavoredNodes
= new ArrayList<>();
for (RegionInfo region : regions) {
List<ServerName> favoredNodes = fnm.getFavoredNodes(region);
ServerName primaryHost = null;
ServerName v19 = null;
ServerName tertiaryHost = null;
if ((favoredNodes != null) && (!favoredNodes.isEmpty())) {
for (ServerName s : favoredNodes) {
ServerName serverWithLegitStartCode = getServerFromFavoredNode(onlineServers, s);
if (serverWithLegitStartCode
!= null) {
FavoredNodesPlan.Position position = FavoredNodesPlan.getFavoredServerPosition(favoredNodes, s);
if (Position.PRIMARY.equals(position)) {
primaryHost = serverWithLegitStartCode;
} else if (Position.SECONDARY.equals(position)) {
v19 = serverWithLegitStartCode;
} else if (Position.TERTIARY.equals(position)) {
tertiaryHost = serverWithLegitStartCode;
}
}
}
assignRegionToAvailableFavoredNode(assignmentMapForFavoredNodes, region, primaryHost, v19, tertiaryHost);
} else {
regionsWithNoFavoredNodes.add(region);
}
}
return new Pair<>(assignmentMapForFavoredNodes,
regionsWithNoFavoredNodes);
} | 3.26 |
hbase_FavoredStochasticBalancer_balanceTable_rdh | /**
* For all regions correctly assigned to favored nodes, we just use the stochastic balancer
* implementation. For the misplaced regions, we assign a bogus server to it and AM takes care.
*/
@Override
protected List<RegionPlan> balanceTable(TableName tableName, Map<ServerName, List<RegionInfo>> loadOfOneTable) {
List<RegionPlan> regionPlans = Lists.newArrayList();
Map<ServerName, List<RegionInfo>> correctAssignments = new HashMap<>();
int misplacedRegions = 0;
for (Map.Entry<ServerName, List<RegionInfo>> entry : loadOfOneTable.entrySet()) {
ServerName current = entry.getKey();
List<RegionInfo> regions = Lists.newArrayList();
correctAssignments.put(current, regions);
for (RegionInfo v91 : entry.getValue()) {
List<ServerName> favoredNodes = fnm.getFavoredNodes(v91);
if ((FavoredNodesPlan.getFavoredServerPosition(favoredNodes, current) != null) || (!FavoredNodesManager.isFavoredNodeApplicable(v91))) {
regions.add(v91);
} else {
// No favored nodes, lets unassign.
LOG.warn((((("Region not on favored nodes, unassign. Region: " + v91) + " current: ") + current) +
" favored nodes: ") + favoredNodes);
try {
provider.unassign(v91);
} catch (IOException e) {
LOG.warn("Failed unassign", e);
continue;
}
RegionPlan rp
=
new RegionPlan(v91, null, null);
regionPlans.add(rp);
misplacedRegions++;
}
}
}
LOG.debug(("Found misplaced regions: " + misplacedRegions) + ", not on favored nodes.");
List<RegionPlan> regionPlansFromBalance = super.balanceTable(tableName, correctAssignments);
if (regionPlansFromBalance != null) {
regionPlans.addAll(regionPlansFromBalance);
}
return regionPlans;
} | 3.26 |
hbase_MasterProcedureUtil_getTablePriority_rdh | /**
* Return the priority for the given table. Now meta table is 3, other system tables are 2, and
* user tables are 1.
*/
public static int getTablePriority(TableName tableName) {
if (TableName.isMetaTableName(tableName)) {
return 3;
} else if (tableName.isSystemTable()) {
return 2;
} else {
return 1;
}
} | 3.26 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.