name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hbase_MetaTableLocator_getMetaRegionLocation_rdh | /**
* Gets the meta region location, if available. Does not block.
*
* @param zkw
* reference to the {@link ZKWatcher} which also contains configuration and
* operation
* @param replicaId
* the ID of the replica
* @return server name
*/
@RestrictedApi(explanation = "Should only be called in self or ZKUtil", link = "", allowedOnPath = ".*(MetaTableLocator|ZKDump)\\.java")
public static ServerName getMetaRegionLocation(final ZKWatcher zkw, int replicaId) {
try {
RegionState state = getMetaRegionState(zkw, replicaId);
return state.isOpened() ? state.getServerName() : null;
} catch
(KeeperException ke) {
return null;
}
}
/**
* Gets the meta region location, if available, and waits for up to the specified timeout if not
* immediately available. Given the zookeeper notification could be delayed, we will try to get
* the latest data.
*
* @param zkw
* reference to the {@link ZKWatcher} which also contains configuration and
* operation
* @param timeout
* maximum time to wait, in millis
* @return server name for server hosting meta region formatted as per {@link ServerName} | 3.26 |
hbase_MetaTableLocator_getMetaRegionState_rdh | /**
* Load the meta region state from the meta region server ZNode.
*
* @param zkw
* reference to the {@link ZKWatcher} which also contains configuration and
* operation
* @param replicaId
* the ID of the replica
* @throws KeeperException
* if a ZooKeeper operation fails
*/
public static RegionState getMetaRegionState(ZKWatcher zkw, int replicaId) throws KeeperException {
RegionState regionState = null;
try {
byte[] data = ZKUtil.getData(zkw, zkw.getZNodePaths().getZNodeForReplica(replicaId));
regionState = ProtobufUtil.parseMetaRegionStateFrom(data, replicaId);
}
catch (DeserializationException e) {
throw ZKUtil.convert(e);
} catch (InterruptedException e) {
Thread.currentThread().interrupt(); }
return regionState;
} | 3.26 |
hbase_MetaTableLocator_deleteMetaLocation_rdh | /**
* Deletes the location of <code>hbase:meta</code> in ZooKeeper.
*
* @param zookeeper
* zookeeper reference
* @throws KeeperException
* unexpected zookeeper exception
*/
@RestrictedApi(explanation = "Should only be called in tests", link = "", allowedOnPath = ".*/src/test/.*")
public static void deleteMetaLocation(ZKWatcher zookeeper) throws KeeperException {
deleteMetaLocation(zookeeper, RegionInfo.DEFAULT_REPLICA_ID);
} | 3.26 |
hbase_CompactionProgress_getTotalCompactingKVs_rdh | /**
* Returns the total compacting key values in currently running compaction
*/
public long getTotalCompactingKVs() {
if (totalCompactingKVs < currentCompactedKVs) {
LOG.debug("totalCompactingKVs={} less than currentCompactedKVs={}", totalCompactingKVs, currentCompactedKVs);
return currentCompactedKVs;
}
return totalCompactingKVs;
} | 3.26 |
hbase_CompactionProgress_getTotalCompactedSize_rdh | /**
* Returns the total data size processed by the currently running compaction, in bytes
*/
public long getTotalCompactedSize() {
return totalCompactedSize;
} | 3.26 |
hbase_CompactionProgress_getCurrentCompactedKvs_rdh | /**
* Returns the completed count of key values in currently running compaction
*/
public long getCurrentCompactedKvs() {
return currentCompactedKVs;} | 3.26 |
hbase_CompactionProgress_complete_rdh | /**
* Marks the compaction as complete by setting total to current KV count; Total KV count is an
* estimate, so there might be a discrepancy otherwise.
*/
public void complete() {
this.totalCompactingKVs = this.currentCompactedKVs;
} | 3.26 |
hbase_CompactionProgress_cancel_rdh | /**
* Cancels the compaction progress, setting things to 0.
*/
public void cancel() {
this.currentCompactedKVs = this.totalCompactingKVs = 0;
} | 3.26 |
hbase_CompactionProgress_getProgressPct_rdh | /**
* getter for calculated percent complete
*/
public float getProgressPct() {
return ((float) (currentCompactedKVs)) / getTotalCompactingKVs();
} | 3.26 |
hbase_AbstractStateMachineTableProcedure_checkTableModifiable_rdh | /**
* Check whether a table is modifiable - exists and either offline or online with config set
*
* @param env
* MasterProcedureEnv
*/
protected void checkTableModifiable(final MasterProcedureEnv env) throws IOException {
// Checks whether the table exists
if (!env.getMasterServices().getTableDescriptors().exists(getTableName())) {
throw new TableNotFoundException(getTableName());
}
} | 3.26 |
hbase_AbstractStateMachineTableProcedure_checkOnline_rdh | /**
* Check region is online.
*/
protected static void checkOnline(MasterProcedureEnv env, RegionInfo ri)
throws DoNotRetryRegionException {
RegionStateNode regionNode = env.getAssignmentManager().getRegionStates().getRegionStateNode(ri);
if (regionNode == null) {
throw new UnknownRegionException("No RegionState found for " + ri.getEncodedName());
}
regionNode.checkOnline();
} | 3.26 |
hbase_MetricsUserAggregateImpl_getActiveUser_rdh | /**
* Returns the active user to which authorization checks should be applied. If we are in the
* context of an RPC call, the remote user is used, otherwise the currently logged in user is
* used.
*/
private String getActiveUser()
{
Optional<User> user = RpcServer.getRequestUser();
if (!user.isPresent()) {
// for non-rpc handling, fallback to system user
try {
user = Optional.of(userProvider.getCurrent());
} catch (IOException ignore) {
}
}
return user.map(User::getShortName).orElse(null);
} | 3.26 |
hbase_BusyRegionSplitPolicy_updateRate_rdh | /**
* Update the blocked request rate based on number of blocked and total write requests in the last
* aggregation window, or since last call to this method, whichever is farthest in time. Uses
* weighted rate calculation based on the previous rate and new data.
*
* @return Updated blocked request rate.
*/
private synchronized float updateRate() {
float aggBlockedRate;
long curTime = EnvironmentEdgeManager.currentTime();
long newBlockedReqs = region.getBlockedRequestsCount();
long newWriteReqs = region.getWriteRequestsCount();
aggBlockedRate = (newBlockedReqs - blockedRequestCount) / ((newWriteReqs - writeRequestCount) + 1.0E-5F);
if ((curTime - prevTime) >= aggregationWindow) {
blockedRate = aggBlockedRate;
prevTime = curTime;
blockedRequestCount = newBlockedReqs;
writeRequestCount = newWriteReqs;
} else if ((curTime - startTime) >= aggregationWindow) {
// Calculate the aggregate blocked rate as the weighted sum of
// previous window's average blocked rate and blocked rate in this window so far.
float timeSlice = (curTime - prevTime) / (aggregationWindow + 0.0F);
aggBlockedRate = ((1 - timeSlice) * blockedRate) + (timeSlice * aggBlockedRate);
} else {
aggBlockedRate = 0.0F;
}
return aggBlockedRate;
} | 3.26 |
hbase_CoprocessorBlockingRpcCallback_run_rdh | /**
* Called on completion of the RPC call with the response object, or {@code null} in the case of
* an error.
*
* @param parameter
* the response object or {@code null} if an error occurred
*/
@Override
public void run(R parameter) {
synchronized(this) {
result = parameter;
resultSet = true;
this.notifyAll();
}
} | 3.26 |
hbase_CoprocessorBlockingRpcCallback_get_rdh | /**
* Returns the parameter passed to {@link #run(Object)} or {@code null} if a null value was
* passed. When used asynchronously, this method will block until the {@link #run(Object)} method
* has been called.
*
* @return the response object or {@code null} if no response was passed
*/
public synchronized R get() throws IOException {
while (!resultSet) {
try { this.wait();
} catch (InterruptedException ie) {
InterruptedIOException exception = new InterruptedIOException(ie.getMessage());
exception.initCause(ie);
throw exception;
}
}
return result;
} | 3.26 |
hbase_AsyncMetaRegionLocator_getRegionLocationInCache_rdh | // only used for testing whether we have cached the location for a region.
RegionLocations getRegionLocationInCache() {
return f0.get();
} | 3.26 |
hbase_AsyncMetaRegionLocator_m0_rdh | // only used for testing whether we have cached the location for a table.
int m0() {
RegionLocations locs = f0.get();
return locs != null ? locs.numNonNullElements()
: 0;} | 3.26 |
hbase_AsyncMetaRegionLocator_getRegionLocations_rdh | /**
* Get the region locations for meta region. If the location for the given replica is not
* available in the cached locations, then fetch from the HBase cluster.
* <p/>
* The <code>replicaId</code> parameter is important. If the region replication config for meta
* region is changed, then the cached region locations may not have the locations for new
* replicas. If we do not check the location for the given replica, we will always return the
* cached region locations and cause an infinite loop.
*/
CompletableFuture<RegionLocations> getRegionLocations(int replicaId, boolean reload) {
return ConnectionUtils.getOrFetch(f0, metaRelocateFuture, reload, registry::getMetaRegionLocations, locs -> isGood(locs, replicaId), "meta region location");
} | 3.26 |
hbase_FavoredNodesPlan_removeFavoredNodes_rdh | /**
* Remove a favored node assignment
*
* @return the list of favored region server for this region based on the plan
*/
List<ServerName> removeFavoredNodes(RegionInfo region) {
return favoredNodesMap.remove(region.getRegionNameAsString());
} | 3.26 |
hbase_FavoredNodesPlan_getFavoredServerPosition_rdh | /**
* Return the position of the server in the favoredNodes list. Assumes the favoredNodes list is of
* size 3.
*/
public static Position getFavoredServerPosition(List<ServerName> favoredNodes, ServerName server) {
if (((favoredNodes == null) || (server == null)) || (favoredNodes.size() != FavoredNodeAssignmentHelper.FAVORED_NODES_NUM)) {
return null;
}
for (Position p : Position.values()) {
if (ServerName.isSameAddress(favoredNodes.get(p.ordinal()), server)) {
return p;
}
}
return null;
} | 3.26 |
hbase_FavoredNodesPlan_updateFavoredNodesMap_rdh | /**
* Update an assignment to the plan
*/
public void updateFavoredNodesMap(RegionInfo region, List<ServerName> servers) {
if (((region == null) || (servers == null)) || servers.isEmpty()) {
return;
}
this.favoredNodesMap.put(region.getRegionNameAsString(), servers);
} | 3.26 |
hbase_FavoredNodesPlan_getAssignmentMap_rdh | /**
* Return the mapping between each region to its favored region server list.
*/
public Map<String, List<ServerName>> getAssignmentMap() {
// Make a deep copy so changes don't harm our copy of favoredNodesMap.
return this.favoredNodesMap.entrySet().stream().collect(Collectors.toMap(k -> k.getKey(), v -> new ArrayList<ServerName>(v.getValue())));
} | 3.26 |
hbase_FavoredNodesPlan_getFavoredNodes_rdh | /**
* Returns the list of favored region server for this region based on the plan
*/
public List<ServerName> getFavoredNodes(RegionInfo region) {
return favoredNodesMap.get(region.getRegionNameAsString());
} | 3.26 |
hbase_RegionNormalizerFactory_getRegionNormalizer_rdh | /**
* Create a region normalizer from the given conf.
*
* @param conf
* configuration
* @return {@link RegionNormalizer} implementation
*/
private static RegionNormalizer getRegionNormalizer(Configuration conf) {
// Create instance of Region Normalizer
Class<? extends RegionNormalizer> balancerKlass = conf.getClass(HConstants.HBASE_MASTER_NORMALIZER_CLASS, SimpleRegionNormalizer.class, RegionNormalizer.class);
return ReflectionUtils.newInstance(balancerKlass, conf);
} | 3.26 |
hbase_RegionNormalizerFactory_createNormalizerManager_rdh | // TODO: consolidate this down to MasterServices
public static RegionNormalizerManager createNormalizerManager(final Configuration conf, final MasterRegion masterRegion, final ZKWatcher zkWatcher, final HMaster master) throws DeserializationException, IOException, KeeperException {
final RegionNormalizer regionNormalizer = getRegionNormalizer(conf);
regionNormalizer.setMasterServices(master);
final RegionNormalizerStateStore stateStore = new RegionNormalizerStateStore(masterRegion, zkWatcher);
final RegionNormalizerChore chore = (master.isInMaintenanceMode()) ? null : new RegionNormalizerChore(master);
final RegionNormalizerWorkQueue<TableName> workQueue = (master.isInMaintenanceMode()) ? null : new RegionNormalizerWorkQueue<>();final RegionNormalizerWorker worker = (master.isInMaintenanceMode()) ? null : new RegionNormalizerWorker(conf, master, regionNormalizer, workQueue);
return new RegionNormalizerManager(stateStore, chore, workQueue, worker);} | 3.26 |
hbase_RegionServerSpaceQuotaManager_getRegionSizeStore_rdh | /**
* Returns the {@link RegionSizeStore} tracking filesystem utilization by each region.
*
* @return A {@link RegionSizeStore} implementation.
*/
public RegionSizeStore getRegionSizeStore() {
return regionSizeStore;
} | 3.26 |
hbase_RegionServerSpaceQuotaManager_copyActiveEnforcements_rdh | /**
* Returns the collection of tables which have quota violation policies enforced on this
* RegionServer.
*/
Map<TableName, SpaceViolationPolicyEnforcement> copyActiveEnforcements() {
// Allows reads to happen concurrently (or while the map is being updated)
return new HashMap<>(this.enforcedPolicies);
} | 3.26 |
hbase_RegionServerSpaceQuotaManager_updateQuotaSnapshot_rdh | /**
* Updates the current {@link SpaceQuotaSnapshot}s for the RegionServer.
*
* @param newSnapshots
* The space quota snapshots.
*/
public void updateQuotaSnapshot(Map<TableName, SpaceQuotaSnapshot> newSnapshots) {
currentQuotaSnapshots.set(Objects.requireNonNull(newSnapshots));
} | 3.26 |
hbase_RegionServerSpaceQuotaManager_enforceViolationPolicy_rdh | /**
* Enforces the given violationPolicy on the given table in this RegionServer.
*/
public void enforceViolationPolicy(TableName tableName, SpaceQuotaSnapshot snapshot) {
SpaceQuotaStatus status = snapshot.getQuotaStatus();
if (!status.isInViolation()) {
throw new IllegalStateException(tableName + " is not in violation. Violation policy should not be enabled.");
}
if (LOG.isTraceEnabled()) {LOG.trace((("Enabling violation policy enforcement on " + tableName) + " with policy ") + status.getPolicy());
}// Construct this outside of the lock
final SpaceViolationPolicyEnforcement enforcement = getFactory().create(getRegionServerServices(), tableName, snapshot);
// "Enables" the policy
// HBASE-XXXX: Should this synchronize on the actual table name instead of the map? That would
// allow policy enable/disable on different tables to happen concurrently. As written now, only
// one table will be allowed to transition at a time. This is probably OK, but not sure if
// it would become a bottleneck at large clusters/number of tables.
synchronized(enforcedPolicies) {
try {
enforcement.enable();
} catch (IOException e) {
LOG.error(("Failed to enable space violation policy for " + tableName) + ". This table will not enter violation.", e);
return;
}
enforcedPolicies.put(tableName, enforcement);
}
} | 3.26 |
hbase_RegionServerSpaceQuotaManager_buildFileArchiveRequest_rdh | /**
* Builds the protobuf message to inform the Master of files being archived.
*
* @param tn
* The table the files previously belonged to.
* @param archivedFiles
* The files and their size in bytes that were archived.
* @return The protobuf representation
*/
public FileArchiveNotificationRequest buildFileArchiveRequest(TableName tn, Collection<Entry<String, Long>> archivedFiles) {
RegionServerStatusProtos.FileArchiveNotificationRequest.Builder builder = RegionServerStatusProtos.FileArchiveNotificationRequest.newBuilder();
HBaseProtos.TableName protoTn = ProtobufUtil.toProtoTableName(tn);
for (Entry<String, Long> archivedFile : archivedFiles) {
RegionServerStatusProtos.FileArchiveNotificationRequest.FileWithSize fws = FileArchiveNotificationRequest.FileWithSize.newBuilder().setName(archivedFile.getKey()).setSize(archivedFile.getValue()).setTableName(protoTn).build();
builder.addArchivedFiles(fws);
}
final RegionServerStatusProtos.FileArchiveNotificationRequest request = builder.build();
if (LOG.isTraceEnabled()) {
LOG.trace("Reporting file archival to Master: " + TextFormat.shortDebugString(request));
}
return request;
} | 3.26 |
hbase_RegionServerSpaceQuotaManager_areCompactionsDisabled_rdh | /**
* Returns whether or not compactions should be disabled for the given <code>tableName</code> per
* a space quota violation policy. A convenience method.
*
* @param tableName
* The table to check
* @return True if compactions should be disabled for the table, false otherwise.
*/
public boolean areCompactionsDisabled(TableName tableName) {
SpaceViolationPolicyEnforcement enforcement = this.enforcedPolicies.get(Objects.requireNonNull(tableName));
if (enforcement != null) {
return enforcement.areCompactionsDisabled();
}
return false;
} | 3.26 |
hbase_RegionServerSpaceQuotaManager_disableViolationPolicyEnforcement_rdh | /**
* Disables enforcement on any violation policy on the given <code>tableName</code>.
*/
public void disableViolationPolicyEnforcement(TableName tableName) {if (LOG.isTraceEnabled()) {
LOG.trace("Disabling violation policy enforcement on "
+ tableName);
}
// "Disables" the policy
synchronized(enforcedPolicies) {
SpaceViolationPolicyEnforcement enforcement = enforcedPolicies.remove(tableName);
if (enforcement != null) {
try {
enforcement.disable();
} catch (IOException e) {
LOG.error(("Failed to disable space violation policy for " + tableName) + ". This table will remain in violation.", e);
enforcedPolicies.put(tableName, enforcement);
}
}
}
} | 3.26 |
hbase_RegionServerSpaceQuotaManager_isStarted_rdh | /**
* Returns if the {@code Chore} has been started.
*/
public boolean isStarted() {
return started;
} | 3.26 |
hbase_RegionServerSpaceQuotaManager_copyQuotaSnapshots_rdh | /**
* Copies the last {@link SpaceQuotaSnapshot}s that were recorded. The current view of what the
* RegionServer thinks the table's utilization is.
*/
public Map<TableName, SpaceQuotaSnapshot> copyQuotaSnapshots() {
return new HashMap<>(currentQuotaSnapshots.get());
} | 3.26 |
hbase_RegionServerSpaceQuotaManager_getActiveEnforcements_rdh | /**
* Creates an object well-suited for the RegionServer to use in verifying active policies.
*/
public ActivePolicyEnforcement getActiveEnforcements() {
return new ActivePolicyEnforcement(copyActiveEnforcements(), copyQuotaSnapshots(), rsServices);
}
/**
* Converts a map of table to {@link SpaceViolationPolicyEnforcement}s into
* {@link SpaceViolationPolicy} | 3.26 |
hbase_ReplicationPeerConfigBuilder_putAllPeerData_rdh | /**
* Sets all of the provided serialized peer configuration data.
*
* @return {@code this}
*/
@InterfaceAudience.Private
default ReplicationPeerConfigBuilder putAllPeerData(Map<byte[], byte[]> peerData) {
peerData.forEach(this::putPeerData);
return this;
} | 3.26 |
hbase_ReplicationPeerConfigBuilder_putAllConfiguration_rdh | /**
* Adds all of the provided "raw" configuration entries to {@code this}.
*
* @param configuration
* A collection of raw configuration entries
* @return {@code this}
*/
@InterfaceAudience.Private
default ReplicationPeerConfigBuilder putAllConfiguration(Map<String, String> configuration) {
configuration.forEach(this::putConfiguration);
return this;
} | 3.26 |
hbase_TableQuotaSnapshotStore_getSnapshotSizesForTable_rdh | /**
* Fetches any serialized snapshot sizes from the quota table for the {@code tn} provided. Any
* malformed records are skipped with a warning printed out.
*/
long getSnapshotSizesForTable(TableName tn) throws IOException {
try (Table quotaTable = conn.getTable(QuotaTableUtil.QUOTA_TABLE_NAME)) {
Scan s = QuotaTableUtil.createScanForSpaceSnapshotSizes(tn);
ResultScanner rs = quotaTable.getScanner(s);
try {
long size = 0L;
// Should just be a single row (for our table)
for (Result result : rs) {// May have multiple columns, one for each snapshot
CellScanner cs = result.cellScanner();
while (cs.advance()) {
Cell current = cs.current();
try {
long snapshotSize = QuotaTableUtil.parseSnapshotSize(current);
if (LOG.isTraceEnabled()) {
LOG.trace((("Saw snapshot size of " + snapshotSize) + " for ")
+ current);
}
size += snapshotSize;
} catch (InvalidProtocolBufferException e) {
LOG.warn("Failed to parse snapshot size from cell: " + current);
}
}
}
return size;
} finally {
if (null != rs) {
rs.close();}
}
}
} | 3.26 |
hbase_TableQuotaSnapshotStore_getQuotaForTable_rdh | /**
* Fetches the table quota. Visible for mocking/testing.
*/ Quotas getQuotaForTable(TableName table) throws IOException {
return QuotaTableUtil.getTableQuota(conn, table);
} | 3.26 |
hbase_DataBlockEncodingValidator_validateDBE_rdh | /**
* Check DataBlockEncodings of column families are compatible.
*
* @return number of column families with incompatible DataBlockEncoding
* @throws IOException
* if a remote or network exception occurs
*/
private int validateDBE() throws IOException {
int incompatibilities = 0;
LOG.info("Validating Data Block Encodings");
try (Connection connection = ConnectionFactory.createConnection(getConf());Admin admin = connection.getAdmin()) {
List<TableDescriptor> tableDescriptors
= admin.listTableDescriptors();
String encoding = "";
for (TableDescriptor td : tableDescriptors) {
ColumnFamilyDescriptor[] columnFamilies = td.getColumnFamilies();
for (ColumnFamilyDescriptor cfd : columnFamilies) {
try {
encoding = Bytes.toString(cfd.getValue(DATA_BLOCK_ENCODING));
// IllegalArgumentException will be thrown if encoding is incompatible with 2.0
DataBlockEncoding.valueOf(encoding);
} catch (IllegalArgumentException e) {
incompatibilities++;
LOG.warn("Incompatible DataBlockEncoding for table: {}, cf: {}, encoding: {}", td.getTableName().getNameAsString(), cfd.getNameAsString(), encoding);
}
}
}
}
if (incompatibilities > 0) {
LOG.warn(("There are {} column families with incompatible Data Block Encodings. Do not " + "upgrade until these encodings are converted to a supported one. ") + "Check https://s.apache.org/prefixtree for instructions.", incompatibilities);
}
else {
LOG.info("The used Data Block Encodings are compatible with HBase 2.0.");
}
return incompatibilities;
} | 3.26 |
hbase_FanOutOneBlockAsyncDFSOutputSaslHelper_wrapAndSetPayload_rdh | /**
* Create a ByteString from byte array without copying (wrap), and then set it as the payload
* for the builder.
*
* @param builder
* builder for HDFS DataTransferEncryptorMessage.
* @param payload
* byte array of payload.
*/
static void wrapAndSetPayload(DataTransferEncryptorMessageProto.Builder builder, byte[] payload) throws IOException {
Object byteStringObject;
try {
// byteStringObject = new LiteralByteString(payload);
byteStringObject = constructor.newInstance(payload);
// builder.setPayload(byteStringObject);
setPayloadMethod.invoke(builder, constructor.getDeclaringClass().cast(byteStringObject));
} catch (IllegalAccessException | InstantiationException e) {
throw new RuntimeException(e);
} catch (InvocationTargetException
e) {
Throwables.propagateIfPossible(e.getTargetException(), IOException.class);
throw new RuntimeException(e.getTargetException());
}
} | 3.26 |
hbase_SplitLogTask_parseFrom_rdh | /**
*
* @param data
* Serialized date to parse.
* @return An SplitLogTaskState instance made of the passed <code>data</code>
* @see #toByteArray()
*/
public static SplitLogTask parseFrom(final byte[] data) throws DeserializationException {
ProtobufUtil.expectPBMagicPrefix(data);try {
int prefixLen = ProtobufUtil.lengthOfPBMagic();
ZooKeeperProtos.SplitLogTask.Builder builder = ZooKeeperProtos.SplitLogTask.newBuilder();
ProtobufUtil.mergeFrom(builder, data, prefixLen, data.length - prefixLen);
return new SplitLogTask(builder.build());
} catch (IOException e) {
throw new DeserializationException(Bytes.toStringBinary(data, 0, 64), e);
}
} | 3.26 |
hbase_SplitLogTask_toByteArray_rdh | /**
*
* @return This instance serialized into a byte array
* @see #parseFrom(byte[])
*/
public byte[] toByteArray() {
// First create a pb ServerName. Then create a ByteString w/ the TaskState
// bytes in it. Finally create a SplitLogTaskState passing in the two
// pbs just created.
HBaseProtos.ServerName snpb = ProtobufUtil.toServerName(this.originServer);
ZooKeeperProtos.SplitLogTask slts = ZooKeeperProtos.SplitLogTask.newBuilder().setServerName(snpb).setState(this.state).build();
return ProtobufUtil.prependPBMagic(slts.toByteArray());
} | 3.26 |
hbase_AbstractRecoveredEditsOutputSink_deleteOneWithFewerEntries_rdh | // delete the one with fewer wal entries
private void deleteOneWithFewerEntries(RecoveredEditsWriter editsWriter, Path dst) throws IOException {
long dstMinLogSeqNum = -1L;
try (WALStreamReader reader = walSplitter.getWalFactory().createStreamReader(walSplitter.walFS, dst)) {
WAL.Entry entry = reader.next();
if (entry != null) {
dstMinLogSeqNum = entry.getKey().getSequenceId();
}
} catch (EOFException e) {
LOG.debug("Got EOF when reading first WAL entry from {}, an empty or broken WAL file?", dst, e);
}
if (editsWriter.minLogSeqNum < dstMinLogSeqNum) {
LOG.warn(((("Found existing old edits file. It could be the result of a previous failed" + " split attempt or we have duplicated wal entries. Deleting ") + dst) + ", length=") + walSplitter.walFS.getFileStatus(dst).getLen());
if (!walSplitter.walFS.delete(dst, false)) {
LOG.warn("Failed deleting of old {}", dst);
throw new IOException("Failed deleting of old " + dst);
}
} else {
LOG.warn((("Found existing old edits file and we have less entries. Deleting " + editsWriter.path) + ", length=") + walSplitter.walFS.getFileStatus(editsWriter.path).getLen());
if (!walSplitter.walFS.delete(editsWriter.path, false)) {
LOG.warn("Failed deleting of {}", editsWriter.path);
throw new IOException("Failed deleting of " + editsWriter.path);
}
}
} | 3.26 |
hbase_AbstractRecoveredEditsOutputSink_updateRegionMaximumEditLogSeqNum_rdh | /**
* Update region's maximum edit log SeqNum.
*/
void updateRegionMaximumEditLogSeqNum(WAL.Entry entry) {
synchronized(regionMaximumEditLogSeqNum) {
String regionName = Bytes.toString(entry.getKey().getEncodedRegionName());
Long currentMaxSeqNum = regionMaximumEditLogSeqNum.get(regionName);
if ((currentMaxSeqNum == null) || (entry.getKey().getSequenceId() > currentMaxSeqNum)) {
regionMaximumEditLogSeqNum.put(regionName, entry.getKey().getSequenceId());
}
}
} | 3.26 |
hbase_AbstractRecoveredEditsOutputSink_createRecoveredEditsWriter_rdh | /**
* Returns a writer that wraps a {@link WALProvider.Writer} and its Path. Caller should close.
*/
protected RecoveredEditsWriter createRecoveredEditsWriter(TableName tableName, byte[] region, long seqId) throws
IOException {
Path regionEditsPath = getRegionSplitEditsPath(tableName, region, seqId, walSplitter.getFileBeingSplit().getPath().getName(), walSplitter.getTmpDirName(), walSplitter.conf);
if (walSplitter.walFS.exists(regionEditsPath)) {
LOG.warn(((("Found old edits file. It could be the " + "result of a previous failed split attempt. Deleting ") + regionEditsPath) + ", length=") + walSplitter.walFS.getFileStatus(regionEditsPath).getLen());
if (!walSplitter.walFS.delete(regionEditsPath, false)) {
LOG.warn("Failed delete of old {}", regionEditsPath);
}
}
WALProvider.Writer w = walSplitter.createWriter(regionEditsPath);
final String msg = "Creating recovered edits writer path=" + regionEditsPath;
LOG.info(msg);
updateStatusWithMsg(msg);
return new RecoveredEditsWriter(region, regionEditsPath,
w, seqId);
} | 3.26 |
hbase_QuotaFilter_setUserFilter_rdh | /**
* Set the user filter regex
*
* @param regex
* the user filter
* @return the quota filter object
*/
public QuotaFilter setUserFilter(final String regex) { this.userRegex = regex;
hasFilters |= StringUtils.isNotEmpty(regex);
return this;
} | 3.26 |
hbase_QuotaFilter_getTypeFilters_rdh | /**
* Returns the QuotaType types that we want to filter one
*/
public Set<QuotaType> getTypeFilters() {
return types;
} | 3.26 |
hbase_QuotaFilter_setTableFilter_rdh | /**
* Set the table filter regex
*
* @param regex
* the table filter
* @return the quota filter object
*/
public QuotaFilter setTableFilter(final String regex) {
this.tableRegex = regex;
hasFilters |= StringUtils.isNotEmpty(regex);
return this;
} | 3.26 |
hbase_QuotaFilter_getRegionServerFilter_rdh | /**
* Returns the RegionServer filter regex
*/
public String getRegionServerFilter() {
return regionServerRegex;
} | 3.26 |
hbase_QuotaFilter_getUserFilter_rdh | /**
* Returns the User filter regex
*/
public String getUserFilter() {
return userRegex;
} | 3.26 |
hbase_QuotaFilter_setRegionServerFilter_rdh | /**
* Set the region server filter regex
*
* @param regex
* the region server filter
* @return the quota filter object
*/
public QuotaFilter setRegionServerFilter(final String regex) {
this.regionServerRegex = regex;
hasFilters |= StringUtils.isNotEmpty(regex);
return this;
} | 3.26 |
hbase_QuotaFilter_setNamespaceFilter_rdh | /**
* Set the namespace filter regex
*
* @param regex
* the namespace filter
* @return the quota filter object
*/
public QuotaFilter setNamespaceFilter(final String regex) {
this.namespaceRegex = regex;
hasFilters |= StringUtils.isNotEmpty(regex);
return this;
} | 3.26 |
hbase_QuotaFilter_isNull_rdh | /**
* Returns true if the filter is empty
*/
public boolean isNull() {
return !hasFilters;
} | 3.26 |
hbase_QuotaFilter_getTableFilter_rdh | /**
* Returns the Table filter regex
*/
public String getTableFilter() {
return tableRegex;
} | 3.26 |
hbase_QuotaFilter_getNamespaceFilter_rdh | /**
* Returns the Namespace filter regex
*/
public String getNamespaceFilter() {
return namespaceRegex;
} | 3.26 |
hbase_QuotaFilter_addTypeFilter_rdh | /**
* Add a type to the filter list
*
* @param type
* the type to filter on
* @return the quota filter object
*/
public QuotaFilter addTypeFilter(final QuotaType type) {
this.types.add(type);
hasFilters |= true;
return this;
} | 3.26 |
hbase_AbstractWALProvider_getRemoteWALPrefix_rdh | // Use a timestamp to make it identical. That means, after we transit the peer to DA/S and then
// back to A, the log prefix will be changed. This is used to simplify the implementation for
// replication source, where we do not need to consider that a terminated shipper could be added
// back.
private String getRemoteWALPrefix(String peerId) {
return (((factory.factoryId + "-") + EnvironmentEdgeManager.currentTime()) + "-") + peerId;
} | 3.26 |
hbase_LzmaCompressor_checkSizeAndGrow_rdh | // ByteBufferOutputStream will reallocate the output buffer if it is too small. We
// do not want that behavior here.
@Override
protected void checkSizeAndGrow(int extra) {
long capacityNeeded = curBuf.position() + ((long) (extra));
if (capacityNeeded > curBuf.limit())
{
throw new BufferOverflowException();
}
} | 3.26 |
hbase_LzmaCompressor_maxCompressedLength_rdh | // Package private
int maxCompressedLength(int len) {
return len + CompressionUtil.compressionOverhead(len);
} | 3.26 |
hbase_NamespaceAuditor_getRegionCountOfTable_rdh | /**
* Get region count for table
*
* @param tName
* - table name
* @return cached region count, or -1 if table status not found
* @throws IOException
* Signals that the namespace auditor has not been initialized
*/
public int
getRegionCountOfTable(TableName tName) throws IOException {
if (stateManager.isInitialized()) {
NamespaceTableAndRegionInfo state = stateManager.getState(tName.getNamespaceAsString());
return state != null ? state.getRegionCountOfTable(tName) : -1;
}
checkTableTypeAndThrowException(tName);
return -1;
} | 3.26 |
hbase_NamespaceAuditor_isInitialized_rdh | /**
* Checks if namespace auditor is initialized. Used only for testing.
*
* @return true, if is initialized
*/
public boolean isInitialized() {
return stateManager.isInitialized();
} | 3.26 |
hbase_NamespaceAuditor_getState_rdh | /**
*
* @param namespace
* The name of the namespace
* @return An instance of NamespaceTableAndRegionInfo
*/public NamespaceTableAndRegionInfo
getState(String namespace) {
if (stateManager.isInitialized()) {
return stateManager.getState(namespace);
}
return null;
} | 3.26 |
hbase_NamespaceAuditor_checkQuotaToCreateTable_rdh | /**
* Check quota to create table. We add the table information to namespace state cache, assuming
* the operation will pass. If the operation fails, then the next time namespace state chore runs
* namespace state cache will be corrected.
*
* @param tName
* - The table name to check quota.
* @param regions
* - Number of regions that will be added.
* @throws IOException
* Signals that an I/O exception has occurred.
*/
public void checkQuotaToCreateTable(TableName tName, int regions) throws IOException {
if (stateManager.isInitialized()) {
// We do this check to fail fast.
if (masterServices.getTableDescriptors().exists(tName)) {
throw new TableExistsException(tName);
}
stateManager.checkAndUpdateNamespaceTableCount(tName, regions);
}
else {
checkTableTypeAndThrowException(tName);
}
} | 3.26 |
hbase_NamespaceAuditor_checkQuotaToUpdateRegion_rdh | /**
* Check and update region count quota for an existing table.
*
* @param tName
* - table name for which region count to be updated.
* @param regions
* - Number of regions that will be added.
* @throws IOException
* Signals that an I/O exception has occurred.
*/
public void checkQuotaToUpdateRegion(TableName tName, int regions) throws IOException {
if (stateManager.isInitialized()) {
stateManager.checkAndUpdateNamespaceRegionCount(tName, regions);}
else {
checkTableTypeAndThrowException(tName);}
} | 3.26 |
hbase_BitComparator_getOperator_rdh | /**
* Returns the bitwise operator
*/
public BitwiseOp getOperator() {
return bitOperator;
} | 3.26 |
hbase_BitComparator_parseFrom_rdh | /**
* Parse a serialized representation of {@link BitComparator}
*
* @param pbBytes
* A pb serialized {@link BitComparator} instance
* @return An instance of {@link BitComparator} made from <code>bytes</code>
* @throws DeserializationException
* if an error occurred
* @see #toByteArray
*/
public static BitComparator parseFrom(final byte[]
pbBytes) throws DeserializationException {
ComparatorProtos.BitComparator proto;
try {
proto = ComparatorProtos.BitComparator.parseFrom(pbBytes);
} catch (InvalidProtocolBufferException e) {
throw new DeserializationException(e);
}
BitwiseOp bitwiseOp = BitwiseOp.valueOf(proto.getBitwiseOp().name());
return new BitComparator(proto.getComparable().getValue().toByteArray(), bitwiseOp);
} | 3.26 |
hbase_BitComparator_areSerializedFieldsEqual_rdh | /**
* Returns true if and only if the fields of the comparator that are serialized are equal to the
* corresponding fields in other. Used for testing.
*/
@Override
boolean areSerializedFieldsEqual(ByteArrayComparable other) {
if (other == this) {
return true;
}
if (!(other instanceof BitComparator)) {
return false;
}
BitComparator comparator = ((BitComparator) (other));
return super.areSerializedFieldsEqual(other) && this.getOperator().equals(comparator.getOperator());
} | 3.26 |
hbase_BitComparator_toByteArray_rdh | /**
* Returns The comparator serialized using pb
*/@Override
public byte[] toByteArray() {ComparatorProtos.BitComparator.Builder builder = ComparatorProtos.BitComparator.newBuilder();
builder.setComparable(ProtobufUtil.toByteArrayComparable(this.value));
ComparatorProtos.BitComparator.BitwiseOp bitwiseOpPb = BitComparator.BitwiseOp.valueOf(bitOperator.name());builder.setBitwiseOp(bitwiseOpPb);
return builder.build().toByteArray();
} | 3.26 |
hbase_ClusterStatusPublisher_generateDeadServersListToSend_rdh | /**
* Create the dead server to send. A dead server is sent NB_SEND times. We send at max
* MAX_SERVER_PER_MESSAGE at a time. if there are too many dead servers, we send the newly dead
* first.
*/protected List<ServerName>
generateDeadServersListToSend() {// We're getting the message sent since last time, and add them to the list
long since = EnvironmentEdgeManager.currentTime() - (messagePeriod * 2);
for (Pair<ServerName, Long> dead : getDeadServers(since)) {
lastSent.putIfAbsent(dead.getFirst(), 0);}
// We're sending the new deads first.
List<Map.Entry<ServerName, Integer>> entries = new ArrayList<>(lastSent.entrySet());
Collections.sort(entries, new Comparator<Map.Entry<ServerName, Integer>>() {
@Override
public int compare(Map.Entry<ServerName, Integer> o1, Map.Entry<ServerName, Integer> o2) {
return o1.getValue().compareTo(o2.getValue());
}
});
// With a limit of MAX_SERVER_PER_MESSAGE
int max = (entries.size() > MAX_SERVER_PER_MESSAGE) ? MAX_SERVER_PER_MESSAGE : entries.size();
List<ServerName> res = new ArrayList<>(max);
for (int i = 0; i < max; i++) {
Map.Entry<ServerName, Integer> toSend = entries.get(i);
if (toSend.getValue() >= (NB_SEND -
1)) {
lastSent.remove(toSend.getKey());
} else {
lastSent.replace(toSend.getKey(), toSend.getValue(), toSend.getValue() + 1);}
res.add(toSend.getKey());
}
return res;
} | 3.26 |
hbase_ClusterStatusPublisher_getDeadServers_rdh | /**
* Get the servers which died since a given timestamp. protected because it can be subclassed by
* the tests.
*/
protected List<Pair<ServerName, Long>> getDeadServers(long since) {
if
(master.getServerManager() == null) {
return Collections.emptyList();
}
return master.getServerManager().getDeadServers().copyDeadServersSince(since);
} | 3.26 |
hbase_MultiRowRangeFilter_resetExclusive_rdh | /**
* Resets the exclusive flag.
*/
public void resetExclusive() {
exclusive = false;
} | 3.26 |
hbase_MultiRowRangeFilter_isStartRowInclusive_rdh | /**
* Returns if start row is inclusive.
*/
public boolean isStartRowInclusive() {
return startRowInclusive;
} | 3.26 |
hbase_MultiRowRangeFilter_get_rdh | /**
* Gets the RowRange at the given offset.
*/
@SuppressWarnings({ "unchecked", "TypeParameterUnusedInFormals" })
public <T extends BasicRowRange> T get(int i) {
return ((T) (ranges.get(i)));
} | 3.26 |
hbase_MultiRowRangeFilter_hasFoundFirstRange_rdh | /**
* Returns true if the first matching row range was found.
*/
public boolean
hasFoundFirstRange() {
return foundFirstRange;
} | 3.26 |
hbase_MultiRowRangeFilter_getNextRangeIndex_rdh | /**
* Calculates the position where the given rowkey fits in the ranges list.
*
* @param rowKey
* the row key to calculate
* @return index the position of the row key
*/
public int getNextRangeIndex(byte[] rowKey)
{
BasicRowRange temp;
if (reversed) {
temp = new ReversedRowRange(null, true, rowKey, true);
} else {
temp = new RowRange(rowKey, true,
null, true);
}
// Because we make sure that `ranges` has the correct natural ordering (given it containing
// RowRange or ReverseRowRange objects). This keeps us from having to have two different
// implementations below.
final int index = Collections.binarySearch(ranges, temp);
if (index <
0) {
int insertionPosition = (-index)
- 1;
// check if the row key in the range before the insertion position
if ((insertionPosition != 0) && ranges.get(insertionPosition - 1).contains(rowKey)) {
return insertionPosition - 1;
}
// check if the row key is before the first range
if ((insertionPosition == 0) && (!ranges.get(insertionPosition).contains(rowKey))) {
return ROW_BEFORE_FIRST_RANGE;
}
if (!foundFirstRange) {
foundFirstRange = true;
}return insertionPosition;
}
// the row key equals one of the start keys, and the the range exclude the start key
if (ranges.get(index).isSearchRowInclusive() == false) {
exclusive = true;
}
return index;} | 3.26 |
hbase_MultiRowRangeFilter_parseFrom_rdh | /**
* Parse a serialized representation of {@link MultiRowRangeFilter}
*
* @param pbBytes
* A pb serialized instance
* @return An instance of {@link MultiRowRangeFilter}
* @throws DeserializationException
* if an error occurred
* @see #toByteArray
*/
public static MultiRowRangeFilter parseFrom(final byte[] pbBytes) throws DeserializationException {
FilterProtos.MultiRowRangeFilter proto;
try {
proto = FilterProtos.MultiRowRangeFilter.parseFrom(pbBytes);
} catch (InvalidProtocolBufferException e)
{
throw new DeserializationException(e);
}
int v12 = proto.getRowRangeListCount();List<FilterProtos.RowRange> rangeProtos = proto.getRowRangeListList();
List<RowRange> rangeList =
new ArrayList<>(v12);
for (FilterProtos.RowRange rangeProto : rangeProtos) {
RowRange range = new RowRange(rangeProto.hasStartRow() ? rangeProto.getStartRow().toByteArray() : null, rangeProto.getStartRowInclusive(), rangeProto.hasStopRow() ? rangeProto.getStopRow().toByteArray() : null, rangeProto.getStopRowInclusive());
rangeList.add(range);
}
return new MultiRowRangeFilter(rangeList);
} | 3.26 |
hbase_MultiRowRangeFilter_isInitialized_rdh | /**
* Returns true if this class has been initialized by calling {@link #initialize(boolean)}.
*/
public boolean isInitialized() {return initialized;
} | 3.26 |
hbase_MultiRowRangeFilter_flipAndReverseRanges_rdh | /**
* Rebuilds the sorted ranges (by startKey) into an equivalent sorted list of ranges, only by
* stopKey instead. Descending order and the ReversedRowRange compareTo implementation make sure
* that we can use Collections.binarySearch().
*/
static List<ReversedRowRange> flipAndReverseRanges(List<RowRange> ranges) {
List<ReversedRowRange> flippedRanges = new ArrayList<>(ranges.size());
for (int i = ranges.size() - 1; i >= 0;
i--) {
RowRange origRange = ranges.get(i);
ReversedRowRange newRowRange = new ReversedRowRange(origRange.startRow, origRange.startRowInclusive, origRange.stopRow, origRange.isStopRowInclusive());
flippedRanges.add(newRowRange);
}
return flippedRanges;
} | 3.26 |
hbase_MultiRowRangeFilter_areSerializedFieldsEqual_rdh | /**
* Returns true if and only if the fields of the filter that are serialized are equal to the
* corresponding fields in other. Used for testing.
*/
@Override
boolean areSerializedFieldsEqual(Filter o)
{
if (o == this) {
return true;
}
if (!(o instanceof MultiRowRangeFilter)) {
return false;}
MultiRowRangeFilter other = ((MultiRowRangeFilter) (o));if (this.rangeList.size() != other.rangeList.size())
return false;
for (int i = 0; i
< rangeList.size(); ++i) {
RowRange thisRange = this.rangeList.get(i);
RowRange otherRange = other.rangeList.get(i);
if (!(((Bytes.equals(thisRange.startRow, otherRange.startRow) && Bytes.equals(thisRange.stopRow, otherRange.stopRow)) && (thisRange.startRowInclusive == otherRange.startRowInclusive)) && (thisRange.stopRowInclusive == otherRange.stopRowInclusive))) {
return false;
}
}
return true;
} | 3.26 |
hbase_MultiRowRangeFilter_setFoundFirstRange_rdh | /**
* Sets {@link #foundFirstRange} to {@code true}, indicating that we found a matching row range.
*/
public void setFoundFirstRange() {
this.foundFirstRange = true;
} | 3.26 |
hbase_MultiRowRangeFilter_toByteArray_rdh | /**
* Returns The filter serialized using pb
*/
@Override
public byte[] toByteArray() {
FilterProtos.MultiRowRangeFilter.Builder builder = FilterProtos.MultiRowRangeFilter.newBuilder();
for (RowRange range : rangeList) {
if (range != null) {
FilterProtos.RowRange.Builder rangebuilder = FilterProtos.RowRange.newBuilder();
if (range.startRow != null)
rangebuilder.setStartRow(UnsafeByteOperations.unsafeWrap(range.startRow));
rangebuilder.setStartRowInclusive(range.startRowInclusive);if (range.stopRow != null)
rangebuilder.setStopRow(UnsafeByteOperations.unsafeWrap(range.stopRow));
rangebuilder.setStopRowInclusive(range.stopRowInclusive);
builder.addRowRangeList(rangebuilder.build());
}
}
return builder.build().toByteArray();
} | 3.26 |
hbase_MultiRowRangeFilter_isExclusive_rdh | /**
* Returns true if the current range's key is exclusive
*/
public boolean isExclusive() {return exclusive;
} | 3.26 |
hbase_MultiRowRangeFilter_isIterationComplete_rdh | /**
* Returns true if we exhausted searching all row ranges.
*/
public boolean isIterationComplete(int index) {
return index >= ranges.size();
} | 3.26 |
hbase_MultiRowRangeFilter_isStopRowInclusive_rdh | /**
* Returns if stop row is inclusive.
*/
public boolean isStopRowInclusive() {
return stopRowInclusive;
} | 3.26 |
hbase_RegionState_matches_rdh | // the region is CLOSED because of a RS crashes. Usually it is the same
// with CLOSED, but for some operations such as merge/split, we can not
// apply it to a region in this state, as it may lead to data loss as we
// may have some data in recovered edits.
public boolean matches(State... expected) {
for (State v0 : expected) {
if (this == v0) {
return true;
}
}
return false;
} | 3.26 |
hbase_RegionState_equals_rdh | /**
* Check if two states are the same, except timestamp
*/
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (!(obj
instanceof RegionState)) {
return false;
}
RegionState tmp = ((RegionState) (obj));
return ((RegionInfo.COMPARATOR.compare(tmp.hri, hri) == 0) && (tmp.state == state)) && (((serverName != null) && serverName.equals(tmp.serverName)) || ((tmp.serverName == null) && (serverName == null)));
} | 3.26 |
hbase_RegionState_isReadyToOffline_rdh | /**
* Check if a region state can transition to offline
*/
public boolean isReadyToOffline() {
return (((isMerged() || isSplit()) || isOffline()) || isSplittingNew()) || isMergingNew();
} | 3.26 |
hbase_RegionState_convert_rdh | /**
* Convert a protobuf HBaseProtos.RegionState to a RegionState
*
* @return the RegionState
*/
public static RegionState convert(ClusterStatusProtos.RegionState proto) {
return new RegionState(ProtobufUtil.toRegionInfo(proto.getRegionInfo()), State.convert(proto.getState()), proto.getStamp(), null);
} | 3.26 |
hbase_RegionState_hashCode_rdh | /**
* Don't count timestamp in hash code calculation
*/
@Override
public int hashCode() {
return ((serverName != null ? serverName.hashCode() * 11 :
0) + hri.hashCode()) + (5 * state.ordinal());
} | 3.26 |
hbase_RegionState_updateRitDuration_rdh | /**
* Update the duration of region in transition
*
* @param previousStamp
* previous RegionState's timestamp
*/
@InterfaceAudience.Private
void updateRitDuration(long
previousStamp) {
this.ritDuration += this.stamp - previousStamp;
} | 3.26 |
hbase_RegionState_isReadyToOnline_rdh | /**
* Check if a region state can transition to online
*/
public boolean isReadyToOnline() {
return (isOpened() || isSplittingNew()) || isMergingNew();
} | 3.26 |
hbase_RegionState_isUnassignable_rdh | /**
* Check if a region state is one of offline states that can't transition to pending_close/closing
* (unassign/offline)
*/
public static boolean isUnassignable(State state) {
return ((((state == State.MERGED) || (state == State.SPLIT)) || (state == State.OFFLINE)) || (state == State.SPLITTING_NEW)) || (state == State.MERGING_NEW);
} | 3.26 |
hbase_RegionState_toDescriptiveString_rdh | /**
* A slower (but more easy-to-read) stringification
*/
public String toDescriptiveString() {
long relTime = EnvironmentEdgeManager.currentTime() - stamp;
return ((((((((hri.getRegionNameAsString() + " state=") + state) + ", ts=") + new Date(stamp)) + " (") + (relTime / 1000)) + "s ago)") + ", server=") + serverName;
} | 3.26 |
hbase_RegionState_m0_rdh | /**
* Convert to protobuf ClusterStatusProtos.RegionState.State
*/
public State m0() {
ClusterStatusProtos.RegionState.State v1;
switch (this) {
case OFFLINE :
v1 = State.OFFLINE;
break;
case OPENING :
v1 = State.OPENING;
break;
case OPEN :
v1 = State.OPEN;
break;
case CLOSING :
v1 = State.CLOSING;
break;
case CLOSED :
v1 = State.CLOSED;
break;
case SPLITTING :
v1 = State.SPLITTING;
break;
case SPLIT :
v1 = State.SPLIT;
break;case FAILED_OPEN :
v1 = State.FAILED_OPEN;
break;
case FAILED_CLOSE :
v1 = State.FAILED_CLOSE;
break;
case MERGING :
v1 = State.MERGING;
break;
case MERGED :
v1 = State.MERGED;
break;
case SPLITTING_NEW :
v1 = State.SPLITTING_NEW;
break;
case MERGING_NEW :
v1 = State.MERGING_NEW;
break;
case ABNORMALLY_CLOSED :
v1 = State.ABNORMALLY_CLOSED;
break;
default :
throw new IllegalStateException("");
}
return v1;
} | 3.26 |
hbase_ZKLeaderManager_waitToBecomeLeader_rdh | /**
* Blocks until this instance has claimed the leader ZNode in ZooKeeper
*/
public void waitToBecomeLeader() {
while (!candidate.isStopped()) {
try {
if (ZKUtil.createEphemeralNodeAndWatch(watcher, leaderZNode, nodeId)) {
// claimed the leader znode
leaderExists.set(true);
if (LOG.isDebugEnabled()) {
LOG.debug(("Claimed the leader znode as '" + Bytes.toStringBinary(nodeId)) + "'");
}
return;
}
// if claiming the node failed, there should be another existing node
byte[] currentId = ZKUtil.getDataAndWatch(watcher, leaderZNode);
if ((currentId != null) && Bytes.equals(currentId, nodeId)) { // claimed with our ID, but we didn't grab it, possibly restarted?
LOG.info(("Found existing leader with our ID (" + Bytes.toStringBinary(nodeId)) + "), removing");
ZKUtil.deleteNode(watcher, leaderZNode);
leaderExists.set(false);
} else {
LOG.info("Found existing leader with ID: {}", Bytes.toStringBinary(currentId));
leaderExists.set(true);
}
} catch (KeeperException ke) {
watcher.abort("Unexpected error from ZK, stopping candidate", ke);
candidate.stop("Unexpected error from ZK: " + ke.getMessage());
return;
}
// wait for next chance
synchronized(f0) {
while (leaderExists.get() && (!candidate.isStopped())) {
try {
f0.wait();
} catch (InterruptedException ie) {
LOG.debug("Interrupted waiting on leader", ie);
}
}
}
}
} | 3.26 |
hbase_ZKLeaderManager_stepDownAsLeader_rdh | /**
* Removes the leader znode, if it is currently claimed by this instance.
*/
public void stepDownAsLeader() {
try {
synchronized(f0) {
if (!leaderExists.get()) {
return;
}
byte[] leaderId = ZKUtil.getData(watcher, leaderZNode);
if ((leaderId != null) && Bytes.equals(nodeId, leaderId)) {
LOG.info("Stepping down as leader");
ZKUtil.deleteNodeFailSilent(watcher, leaderZNode);
leaderExists.set(false);
} else {
LOG.info("Not current leader, no need to step down");
}
}
} catch (KeeperException ke) {
watcher.abort("Unhandled zookeeper exception removing leader node", ke);
candidate.stop("Unhandled zookeeper exception removing leader node: " + ke.getMessage());
} catch (InterruptedException e) {
watcher.abort("Unhandled zookeeper exception removing leader node", e);
candidate.stop("Unhandled zookeeper exception removing leader node: " + e.getMessage());
}
} | 3.26 |
hbase_TableModel_setName_rdh | /**
*
* @param name
* the name to set
*/
public void setName(String name) {
this.name = name;
} | 3.26 |
hbase_TableModel_toString_rdh | /* (non-Javadoc)
@see java.lang.Object#toString()
*/
@Override
public String
toString() {
return this.name;
} | 3.26 |
hbase_TableModel_getName_rdh | /**
* Returns the name
*/
@XmlAttribute
public String getName() {
return name;
} | 3.26 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.