name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
hbase_MiniHBaseCluster_suspendRegionServer_rdh
/** * Suspend the specified region server * * @param serverNumber * Used as index into a list. */ public RegionServerThread suspendRegionServer(int serverNumber) { JVMClusterUtil.RegionServerThread server = f0.getRegionServers().get(serverNumber); LOG.info("Suspending {}", server.toString()); server.suspend(); return server; }
3.26
hbase_MiniHBaseCluster_startRegionServer_rdh
/** * Starts a region server thread running * * @return New RegionServerThread */ public RegionServerThread startRegionServer() throws IOException { final Configuration newConf = HBaseConfiguration.create(conf); return startRegionServer(newConf); }
3.26
hbase_MiniHBaseCluster_startRegionServerAndWait_rdh
/** * Starts a region server thread and waits until its processed by master. Throws an exception when * it can't start a region server or when the region server is not processed by master within the * timeout. * * @return New RegionServerThread */ public RegionServerThread startRegionServerAndWait(long timeout) throws IOException { JVMClusterUtil.RegionServerThread t = startRegionServer(); ServerName rsServerName = t.getRegionServer().getServerName(); long start = EnvironmentEdgeManager.currentTime(); ClusterMetrics clusterStatus = getClusterMetrics(); while ((EnvironmentEdgeManager.currentTime() - start) < timeout) { if ((clusterStatus != null) && clusterStatus.getLiveServerMetrics().containsKey(rsServerName)) { return t; } Threads.sleep(100); } if (t.getRegionServer().isOnline()) { throw new IOException(("RS: " + rsServerName) + " online, but not processed by master"); } else { throw new IOException(("RS: " + rsServerName) + " is offline"); } }
3.26
hbase_MiniHBaseCluster_getMasterThread_rdh
/** * Returns the current active master thread, if available. * * @return the active MasterThread, null if none is active. */ public MasterThread getMasterThread() { for (MasterThread mt : f0.getLiveMasters()) { if (mt.getMaster().isActiveMaster()) { return mt; } } return null; }
3.26
hbase_MiniHBaseCluster_shutdown_rdh
/** * Shut down the mini HBase cluster */ @Override public void shutdown() throws IOException { if (this.f0 != null) { this.f0.shutdown(); }}
3.26
hbase_MiniHBaseCluster_handleReportForDutyResponse_rdh
/* @param currentfs We return this if we did not make a new one. @param uniqueName Same name used to help identify the created fs. @return A new fs instance if we are up on DistributeFileSystem. */ @Override protected void handleReportForDutyResponse(final RegionServerStartupResponse c) throws IOException { super.handleReportForDutyResponse(c); // Run this thread to shutdown our filesystem on way out. this.shutdownThread = new SingleFileSystemShutdownThread(getFileSystem()); }
3.26
hbase_MiniHBaseCluster_getLiveMasterThreads_rdh
/** * Returns List of live master threads (skips the aborted and the killed) */ public List<JVMClusterUtil.MasterThread> getLiveMasterThreads() { return this.f0.getLiveMasters(); }
3.26
hbase_MiniHBaseCluster_abortRegionServer_rdh
/** * Cause a region server to exit doing basic clean up only on its way out. * * @param serverNumber * Used as index into a list. */ public String abortRegionServer(int serverNumber) { HRegionServer server = getRegionServer(serverNumber); LOG.info("Aborting " + server.toString()); server.abort("Aborting for tests", new Exception("Trace info")); return server.toString(); }
3.26
hbase_MiniHBaseCluster_resumeRegionServer_rdh
/** * Resume the specified region server * * @param serverNumber * Used as index into a list. */ public RegionServerThread resumeRegionServer(int serverNumber) { JVMClusterUtil.RegionServerThread server = f0.getRegionServers().get(serverNumber); LOG.info("Resuming {}", server.toString()); server.resume(); return server; }
3.26
hbase_MiniHBaseCluster_getLiveRegionServerThreads_rdh
/** * Returns List of live region server threads (skips the aborted and the killed) */ public List<JVMClusterUtil.RegionServerThread> getLiveRegionServerThreads() { return this.f0.getLiveRegionServers(); }
3.26
hbase_MiniHBaseCluster_startMaster_rdh
/** * Starts a master thread running * * @return New RegionServerThread */ @SuppressWarnings(value = "ST_WRITE_TO_STATIC_FROM_INSTANCE_METHOD", justification = "Testing only, not a big deal") public MasterThread startMaster() throws IOException { Configuration c = HBaseConfiguration.create(conf); User user = HBaseTestingUtility.getDifferentUser(c, ".hfs." + (index++)); JVMClusterUtil.MasterThread t = null; try { t = f0.addMaster(c, f0.getMasters().size(), user); t.start(); } catch (InterruptedException ie) { throw new IOException("Interrupted adding master to cluster", ie); } conf.set(HConstants.MASTER_ADDRS_KEY, f0.getConfiguration().get(HConstants.MASTER_ADDRS_KEY)); return t;}
3.26
hbase_MiniHBaseCluster_waitOnMaster_rdh
/** * Wait for the specified master to stop. Removes this thread from list of running threads. * * @return Name of master that just went down. */ public String waitOnMaster(final int serverNumber) { return this.f0.waitOnMaster(serverNumber); }
3.26
hbase_MiniHBaseCluster_stopMaster_rdh
/** * Shut down the specified master cleanly * * @param serverNumber * Used as index into a list. * @param shutdownFS * True is we are to shutdown the filesystem as part of this master's * shutdown. Usually we do but you do not want to do this if you are running * multiple master in a test and you shut down one before end of the test. * @return the master that was stopped */ public MasterThread stopMaster(int serverNumber, final boolean shutdownFS) { JVMClusterUtil.MasterThread server = f0.getMasters().get(serverNumber); LOG.info("Stopping " + server.toString()); server.getMaster().stop("Stopping master " + serverNumber);return server; }
3.26
hbase_MiniHBaseCluster_countServedRegions_rdh
/** * Counts the total numbers of regions being served by the currently online region servers by * asking each how many regions they have. Does not look at hbase:meta at all. Count includes * catalog tables. * * @return number of regions being served by all region servers */public long countServedRegions() { long count = 0; for (JVMClusterUtil.RegionServerThread rst : getLiveRegionServerThreads()) { count += rst.getRegionServer().getNumberOfOnlineRegions(); } return count; }
3.26
hbase_MiniHBaseCluster_abortMaster_rdh
/** * Cause a master to exit without shutting down entire cluster. * * @param serverNumber * Used as index into a list. */ public String abortMaster(int serverNumber) { HMaster server = getMaster(serverNumber); LOG.info("Aborting " + server.toString()); server.abort("Aborting for tests", new Exception("Trace info")); return server.toString(); }
3.26
hbase_MiniHBaseCluster_getNumLiveRegionServers_rdh
/** * Returns Number of live region servers in the cluster currently. */ public int getNumLiveRegionServers() { return this.f0.getLiveRegionServers().size(); }
3.26
hbase_MiniHBaseCluster_join_rdh
/** * Wait for Mini HBase Cluster to shut down. */ public void join() { this.f0.join();}
3.26
hbase_MiniHBaseCluster_getRegionServer_rdh
/** * Grab a numbered region server of your choice. * * @return region server */ public HRegionServer getRegionServer(int serverNumber) { return f0.getRegionServer(serverNumber); }
3.26
hbase_MiniHBaseCluster_getMasterThreads_rdh
/** * Returns List of master threads. */ public List<JVMClusterUtil.MasterThread> getMasterThreads() { return this.f0.getMasters(); }
3.26
hbase_MiniHBaseCluster_waitOnRegionServer_rdh
/** * Wait for the specified region server to stop. Removes this thread from list of running threads. * * @return Name of region server that just went down. */ public String waitOnRegionServer(final int serverNumber) { return this.f0.waitOnRegionServer(serverNumber); }
3.26
hbase_MiniHBaseCluster_getMaster_rdh
/** * Returns the master at the specified index, if available. * * @return the active HMaster, null if none is active. */ public HMaster getMaster(final int serverNumber) { return this.f0.getMaster(serverNumber); }
3.26
hbase_SingleColumnValueExcludeFilter_filterRowCells_rdh
// Here we remove from row all key values from testing column @Overridepublic void filterRowCells(List<Cell> kvs) { Iterator<? extends Cell> it = kvs.iterator(); while (it.hasNext()) { // If the current column is actually the tested column, // we will skip it instead. if (CellUtil.matchingColumn(it.next(), this.columnFamily, this.columnQualifier)) { it.remove(); } } }
3.26
hbase_SingleColumnValueExcludeFilter_parseFrom_rdh
/** * Parse a serialized representation of {@link SingleColumnValueExcludeFilter} * * @param pbBytes * A pb serialized {@link SingleColumnValueExcludeFilter} instance * @return An instance of {@link SingleColumnValueExcludeFilter} made from <code>bytes</code> * @throws DeserializationException * if an error occurred * @see #toByteArray */ public static SingleColumnValueExcludeFilter parseFrom(final byte[] pbBytes) throws DeserializationException { FilterProtos.SingleColumnValueExcludeFilter proto; try { proto = FilterProtos.SingleColumnValueExcludeFilter.parseFrom(pbBytes); } catch (InvalidProtocolBufferException e) { throw new DeserializationException(e); } FilterProtos.SingleColumnValueFilter parentProto = proto.getSingleColumnValueFilter(); final CompareOperator compareOp = CompareOperator.valueOf(parentProto.getCompareOp().name()); final ByteArrayComparable comparator; try { comparator = ProtobufUtil.toComparator(parentProto.getComparator()); } catch (IOException ioe) { throw new DeserializationException(ioe); } return new SingleColumnValueExcludeFilter(parentProto.hasColumnFamily() ? parentProto.getColumnFamily().toByteArray() : null, parentProto.hasColumnQualifier() ? parentProto.getColumnQualifier().toByteArray() : null, compareOp, comparator, parentProto.getFilterIfMissing(), parentProto.getLatestVersionOnly());}
3.26
hbase_SingleColumnValueExcludeFilter_areSerializedFieldsEqual_rdh
/** * Returns true if and only if the fields of the filter that are serialized are equal to the * corresponding fields in other. Used for testing. */ @Override boolean areSerializedFieldsEqual(Filter o) { if (o == this) { return true; } if (!(o instanceof SingleColumnValueExcludeFilter)) { return false; } return super.areSerializedFieldsEqual(o); }
3.26
hbase_SingleColumnValueExcludeFilter_toByteArray_rdh
/** * Returns The filter serialized using pb */ @Override public byte[] toByteArray() { FilterProtos.SingleColumnValueExcludeFilter.Builder builder = FilterProtos.SingleColumnValueExcludeFilter.newBuilder(); builder.setSingleColumnValueFilter(super.convert()); return builder.build().toByteArray(); }
3.26
hbase_HMobStore_createScanner_rdh
/** * Gets the MobStoreScanner or MobReversedStoreScanner. In these scanners, a additional seeks in * the mob files should be performed after the seek in HBase is done. */ @Override protected KeyValueScanner createScanner(Scan scan, ScanInfo scanInfo, NavigableSet<byte[]> targetCols, long readPt) throws IOException { if (MobUtils.isRefOnlyScan(scan)) { Filter refOnlyFilter = new MobReferenceOnlyFilter(); Filter filter = scan.getFilter(); if (filter != null) { scan.setFilter(new FilterList(filter, refOnlyFilter)); } else { scan.setFilter(refOnlyFilter); } } return scan.isReversed() ? new ReversedMobStoreScanner(this, scanInfo, scan, targetCols, readPt) : new MobStoreScanner(this, scanInfo, scan, targetCols, readPt); }
3.26
hbase_HMobStore_getLocations_rdh
/** * * @param tableName * to look up locations for, can not be null * @return a list of location in order of working dir, archive dir. will not be null. */ public List<Path> getLocations(TableName tableName) throws IOException { List<Path> locations = map.get(tableName);if (locations == null) { IdLock.Entry lockEntry = keyLock.getLockEntry(tableName.hashCode()); try { locations = map.get(tableName); if (locations == null) { locations = new ArrayList<>(2); locations.add(MobUtils.getMobFamilyPath(conf, tableName, getColumnFamilyDescriptor().getNameAsString())); locations.add(HFileArchiveUtil.getStoreArchivePath(conf, tableName, MobUtils.getMobRegionInfo(tableName).getEncodedName(), getColumnFamilyDescriptor().getNameAsString())); map.put(tableName, locations); } } finally { keyLock.releaseLockEntry(lockEntry); } }return locations; }
3.26
hbase_HMobStore_commitFile_rdh
/** * Commits the mob file. * * @param sourceFile * The source file. * @param targetPath * The directory path where the source file is renamed to. */ public void commitFile(final Path sourceFile, Path targetPath) throws IOException { if (sourceFile == null) { return; } Path dstPath = new Path(targetPath, sourceFile.getName()); validateMobFile(sourceFile); if (sourceFile.equals(targetPath)) { LOG.info("File is already in the destination dir: {}", sourceFile); return; } LOG.info(" FLUSH Renaming flushed file from {} to {}", sourceFile, dstPath); Path parent = dstPath.getParent(); if (!getFileSystem().exists(parent)) { getFileSystem().mkdirs(parent); } if (!getFileSystem().rename(sourceFile, dstPath)) { throw new IOException((("Failed rename of " + sourceFile) + " to ") + dstPath); } }
3.26
hbase_HMobStore_validateMobFile_rdh
/** * Validates a mob file by opening and closing it. * * @param path * the path to the mob file */ private void validateMobFile(Path path) throws IOException { HStoreFile storeFile = null; try { storeFile = new HStoreFile(getFileSystem(), path, conf, getCacheConfig(), BloomType.NONE, isPrimaryReplicaStore()); storeFile.initReader(); } catch (IOException e) {LOG.error(("Fail to open mob file[" + path) + "], keep it in temp directory.", e); throw e; } finally { if (storeFile != null) { storeFile.closeStoreFile(false); } } }
3.26
hbase_HMobStore_createWriterInTmp_rdh
/** * Creates the writer for the mob file in temp directory. * * @param date * The date string, its format is yyyymmmdd. * @param basePath * The basic path for a temp directory. * @param maxKeyCount * The key count. * @param compression * The compression algorithm. * @param startKey * The start key. * @param isCompaction * If the writer is used in compaction. * @return The writer for the mob file. */ public StoreFileWriter createWriterInTmp(String date, Path basePath, long maxKeyCount, Compression.Algorithm compression, byte[] startKey, boolean isCompaction, Consumer<Path> writerCreationTracker) throws IOException { MobFileName mobFileName = MobFileName.create(startKey, date, UUID.randomUUID().toString().replaceAll("-", ""), getHRegion().getRegionInfo().getEncodedName()); return createWriterInTmp(mobFileName, basePath, maxKeyCount, compression, isCompaction, writerCreationTracker); }
3.26
hbase_HMobStore_resolve_rdh
/** * Reads the cell from the mob file. * * @param reference * The cell found in the HBase, its value is a path to a mob * file. * @param cacheBlocks * Whether the scanner should cache blocks. * @param readPt * the read point. * @param readEmptyValueOnMobCellMiss * Whether return null value when the mob file is missing or * corrupt. * @return The cell found in the mob file. */ public MobCell resolve(Cell reference, boolean cacheBlocks, long readPt, boolean readEmptyValueOnMobCellMiss) throws IOException { MobCell v13 = null; if (MobUtils.hasValidMobRefCellValue(reference)) { String fileName = MobUtils.getMobFileName(reference); Optional<TableName> tableName = MobUtils.getTableName(reference); if (tableName.isPresent()) { List<Path> locations = getLocations(tableName.get()); v13 = readCell(locations, fileName, reference, cacheBlocks, readPt, readEmptyValueOnMobCellMiss); } } if (v13 == null) { LOG.warn("The Cell result is null, assemble a new Cell with the same row,family," + "qualifier,timestamp,type and tags but with an empty value to return."); Cell cell = ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(reference.getRowArray(), reference.getRowOffset(), reference.getRowLength()).setFamily(reference.getFamilyArray(), reference.getFamilyOffset(), reference.getFamilyLength()).setQualifier(reference.getQualifierArray(), reference.getQualifierOffset(), reference.getQualifierLength()).setTimestamp(reference.getTimestamp()).setType(reference.getTypeByte()).setValue(HConstants.EMPTY_BYTE_ARRAY).setTags(reference.getTagsArray(), reference.getTagsOffset(), reference.getTagsLength()).build(); v13 = new MobCell(cell); } return v13; }
3.26
hbase_HMobStore_getPath_rdh
/** * Gets the mob file path. * * @return The mob file path. */ public Path getPath() { return mobFamilyPath; }
3.26
hbase_HMobStore_getTempDir_rdh
/** * Gets the temp directory. * * @return The temp directory. */ private Path getTempDir() { return new Path(homePath, MobConstants.TEMP_DIR_NAME); }
3.26
hbase_HMobStore_readCell_rdh
/** * Reads the cell from a mob file. The mob file might be located in different directories. 1. The * working directory. 2. The archive directory. Reads the cell from the files located in both of * the above directories. * * @param locations * The possible locations where the mob files are saved. * @param fileName * The file to be read. * @param search * The cell to be searched. * @param cacheMobBlocks * Whether the scanner should cache blocks. * @param readPt * the read point. * @param readEmptyValueOnMobCellMiss * Whether return null value when the mob file is missing or * corrupt. * @return The found cell. Null if there's no such a cell. */ private MobCell readCell(List<Path> locations, String fileName, Cell search, boolean cacheMobBlocks, long readPt, boolean readEmptyValueOnMobCellMiss) throws IOException { FileSystem fs = getFileSystem(); IOException ioe = null; for (Path location : locations) { MobFile file = null; Path path = new Path(location, fileName); try { file = f0.openFile(fs, path, getCacheConfig()); return readPt != (-1) ? file.readCell(search, cacheMobBlocks, readPt) : file.readCell(search, cacheMobBlocks); } catch (IOException e) { f0.evictFile(fileName); ioe = e; if ((e instanceof FileNotFoundException) || (e.getCause() instanceof FileNotFoundException)) { LOG.debug(("Fail to read the cell, the mob file " + path) + " doesn't exist", e); } else if (e instanceof CorruptHFileException) { LOG.error(("The mob file " + path) + " is corrupt", e); break; } else { throw e;} } finally { if (file != null) { f0.closeFile(file); } } } LOG.error(((("The mob file " + fileName) + " could not be found in the locations ") + locations) + " or it is corrupt"); if (readEmptyValueOnMobCellMiss) { return null; } else if ((ioe instanceof FileNotFoundException) || (ioe.getCause() instanceof FileNotFoundException)) { // The region is re-opened when FileNotFoundException is thrown. // This is not necessary when MOB files cannot be found, because the store files // in a region only contain the references to MOB files and a re-open on a region // doesn't help fix the lost MOB files. throw new DoNotRetryIOException(ioe); } else { throw ioe; } }
3.26
hbase_HMobStore_createStoreEngine_rdh
/** * Creates the mob store engine. */ @Override protected StoreEngine<?, ?, ?, ?> createStoreEngine(HStore store, Configuration conf, CellComparator cellComparator) throws IOException { MobStoreEngine engine = new MobStoreEngine(); engine.createComponentsOnce(conf, store, cellComparator); return engine; }
3.26
hbase_HMobStore_getConfiguration_rdh
/** * Gets current config. */ public Configuration getConfiguration() { return this.conf; }
3.26
hbase_OrderedBytesBase_isNullable_rdh
// almost all OrderedBytes implementations are nullable. @Override public boolean isNullable() { return true; }
3.26
hbase_OrderedBytesBase_isSkippable_rdh
// almost all OrderedBytes implementations are skippable. @Override public boolean isSkippable() { return true; }
3.26
hbase_MasterWalManager_splitMetaLog_rdh
/** * Specialized method to handle the splitting for meta WAL * * @param serverNames * logs belonging to these servers will be split */ public void splitMetaLog(final Set<ServerName> serverNames) throws IOException { splitLog(serverNames, META_FILTER); }
3.26
hbase_MasterWalManager_splitLog_rdh
/** * This method is the base split method that splits WAL files matching a filter. Callers should * pass the appropriate filter for meta and non-meta WALs. * * @param serverNames * logs belonging to these servers will be split; this will rename the log * directory out from under a soft-failed server */ public void splitLog(final Set<ServerName> serverNames, PathFilter filter) throws IOException { long splitTime = 0; long splitLogSize = 0; List<Path> logDirs = getLogDirs(serverNames); splitLogManager.handleDeadWorkers(serverNames); splitTime = EnvironmentEdgeManager.currentTime();splitLogSize = splitLogManager.splitLogDistributed(serverNames, logDirs, filter); splitTime = EnvironmentEdgeManager.currentTime() - splitTime; if (this.metricsMasterFilesystem != null) { if (filter == META_FILTER) { this.metricsMasterFilesystem.addMetaWALSplit(splitTime, splitLogSize); } else { this.metricsMasterFilesystem.addSplit(splitTime, splitLogSize); } }}
3.26
hbase_MasterWalManager_getSplittingServersFromWALDir_rdh
/** * Get Servernames which are currently splitting; paths have a '-splitting' suffix. */ public Set<ServerName> getSplittingServersFromWALDir() throws IOException { return getServerNamesFromWALDirPath(p -> p.getName().endsWith(AbstractFSWALProvider.SPLITTING_EXT)); }
3.26
hbase_MasterWalManager_checkFileSystem_rdh
/** * Checks to see if the file system is still accessible. If not, sets closed * * @return false if file system is not available */ private boolean checkFileSystem() { if (this.fsOk) { try { FSUtils.checkFileSystemAvailable(this.f0); FSUtils.checkDfsSafeMode(this.conf); } catch (IOException e) { services.abort("Shutting down HBase cluster: file system not available", e); this.fsOk = false; } } return this.fsOk; }
3.26
hbase_MasterWalManager_getServerNamesFromWALDirPath_rdh
/** * Returns listing of ServerNames found by parsing WAL directory paths in FS. */ public Set<ServerName> getServerNamesFromWALDirPath(final PathFilter filter) throws IOException { FileStatus[] walDirForServerNames = getWALDirPaths(filter); return Stream.of(walDirForServerNames).map(s -> { ServerName serverName = AbstractFSWALProvider.getServerNameFromWALDirectoryName(s.getPath()); if (serverName == null) { LOG.warn(("Log folder {} doesn't look like its name includes a " + "region server name; leaving in place. If you see later errors about missing ") + "write ahead logs they may be saved in this location.", s.getPath()); return null; } return serverName; }).filter(s -> s != null).collect(Collectors.toSet()); }
3.26
hbase_MasterWalManager_getWALDirPaths_rdh
/** * Returns List of all RegionServer WAL dirs; i.e. this.rootDir/HConstants.HREGION_LOGDIR_NAME. */ public FileStatus[] getWALDirPaths(final PathFilter filter) throws IOException { Path walDirPath = new Path(CommonFSUtils.getWALRootDir(conf), HConstants.HREGION_LOGDIR_NAME); FileStatus[] walDirForServerNames = CommonFSUtils.listStatus(f0, walDirPath, filter); return walDirForServerNames == null ? new FileStatus[0] : walDirForServerNames; } /** * Inspect the log directory to find dead servers which need recovery work * * @return A set of ServerNames which aren't running but still have WAL files left in file system * @deprecated With proc-v2, we can record the crash server with procedure store, so do not need to scan the wal directory to find out the splitting wal directory any more. Leave it here only because {@code RecoverMetaProcedure}
3.26
hbase_MasterWalManager_getOldLogDir_rdh
/** * Get the directory where old logs go * * @return the dir */ Path getOldLogDir() { return this.oldLogDir; }
3.26
hbase_MasterWalManager_archiveMetaLog_rdh
/** * The hbase:meta region may OPEN and CLOSE without issue on a server and then move elsewhere. On * CLOSE, the WAL for the hbase:meta table may not be archived yet (The WAL is only needed if * hbase:meta did not close cleanaly). Since meta region is no long on this server, the * ServerCrashProcedure won't split these leftover hbase:meta WALs, just leaving them in the WAL * splitting dir. If we try to delete the WAL splitting for the server, it fail since the dir is * not totally empty. We can safely archive these hbase:meta log; then the WAL dir can be deleted. * * @param serverName * the server to archive meta log */ public void archiveMetaLog(final ServerName serverName) { try { Path logDir = new Path(this.rootDir, AbstractFSWALProvider.getWALDirectoryName(serverName.toString())); Path splitDir = logDir.suffix(AbstractFSWALProvider.SPLITTING_EXT); if (f0.exists(splitDir)) { FileStatus[] logfiles = CommonFSUtils.listStatus(f0, splitDir, META_FILTER); if (logfiles != null) { for (FileStatus status : logfiles) { if (!status.isDir()) { Path newPath = AbstractFSWAL.getWALArchivePath(this.oldLogDir, status.getPath()); if (!CommonFSUtils.renameAndSetModifyTime(f0, status.getPath(), newPath)) { LOG.warn((("Unable to move " + status.getPath()) + " to ") + newPath); } else { LOG.debug((("Archived meta log " + status.getPath()) + " to ") + newPath); } } } } if (!f0.delete(splitDir, false)) { LOG.warn("Unable to delete log dir. Ignoring. " + splitDir); } } } catch (IOException ie) { LOG.warn("Failed archiving meta log for server " + serverName, ie); } }
3.26
hbase_MasterWalManager_getLiveServersFromWALDir_rdh
/** * Get Servernames that COULD BE 'alive'; excludes those that have a '-splitting' suffix as these * are already being split -- they cannot be 'alive'. */ public Set<ServerName> getLiveServersFromWALDir() throws IOException { return getServerNamesFromWALDirPath(p -> !p.getName().endsWith(AbstractFSWALProvider.SPLITTING_EXT)); }
3.26
hbase_MergeTableRegionsProcedure_createMergedRegionInfo_rdh
/** * Create merged region info by looking at passed in <code>regionsToMerge</code> to figure what * extremes for start and end keys to use; merged region needs to have an extent sufficient to * cover all regions-to-merge. */ private static RegionInfo createMergedRegionInfo(final RegionInfo[] regionsToMerge) { byte[] lowestStartKey = null; byte[] highestEndKey = null; // Region Id is a timestamp. Merged region's id can't be less than that of // merging regions else will insert at wrong location in hbase:meta (See HBASE-710). long highestRegionId = -1; for (RegionInfo ri : regionsToMerge) { if (lowestStartKey == null) { lowestStartKey = ri.getStartKey(); } else if (Bytes.compareTo(ri.getStartKey(), lowestStartKey) < 0) { lowestStartKey = ri.getStartKey(); } if (highestEndKey == null) { highestEndKey = ri.getEndKey(); } else if (ri.isLast() || (Bytes.compareTo(ri.getEndKey(), highestEndKey) > 0)) { highestEndKey = ri.getEndKey(); } highestRegionId = (ri.getRegionId() > highestRegionId) ? ri.getRegionId() : highestRegionId; } // Merged region is sorted between two merging regions in META return /* Add one so new merged region is highest */ RegionInfoBuilder.newBuilder(regionsToMerge[0].getTable()).setStartKey(lowestStartKey).setEndKey(highestEndKey).setSplit(false).setRegionId(highestRegionId + 1).build(); }
3.26
hbase_MergeTableRegionsProcedure_checkRegionsToMerge_rdh
/** * * @throws MergeRegionException * If unable to merge regions for whatever reasons. */ private static void checkRegionsToMerge(MasterProcedureEnv env, final RegionInfo[] regions, final boolean force) throws MergeRegionException { long count = Arrays.stream(regions).distinct().count(); if (regions.length != count) { throw new MergeRegionException((((("Duplicate regions specified; cannot merge a region to " + "itself. Passed in ") + regions.length) + " but only ") + count) + " unique."); } if (count < 2) { throw new MergeRegionException("Need two Regions at least to run a Merge"); } RegionInfo previous = null; for (RegionInfo ri : regions) { if (previous != null) { if (!previous.getTable().equals(ri.getTable())) { String msg = (("Can't merge regions from different tables: " + previous) + ", ") + ri; LOG.warn(msg); throw new MergeRegionException(msg); } if (((!force) && (!ri.isAdjacent(previous))) && (!ri.isOverlap(previous))) { String msg = ((("Unable to merge non-adjacent or non-overlapping regions '" + previous.getShortNameToLog()) + "', '") + ri.getShortNameToLog()) + "' when force=false"; LOG.warn(msg); throw new MergeRegionException(msg); } } if (ri.getReplicaId() != RegionInfo.DEFAULT_REPLICA_ID) { throw new MergeRegionException("Can't merge non-default replicas; " + ri); } try { checkOnline(env, ri); } catch (DoNotRetryRegionException dnrre) { throw new MergeRegionException(dnrre); } previous = ri;} }
3.26
hbase_MergeTableRegionsProcedure_setRegionStateToMerging_rdh
/** * Set the region states to MERGING state */ private void setRegionStateToMerging(final MasterProcedureEnv env) { // Set State.MERGING to regions to be merged RegionStates regionStates = env.getAssignmentManager().getRegionStates(); for (RegionInfo ri : this.regionsToMerge) { regionStates.getRegionStateNode(ri).setState(State.MERGING); } }
3.26
hbase_MergeTableRegionsProcedure_postRollBackMergeRegions_rdh
/** * Action after rollback a merge table regions action. */ private void postRollBackMergeRegions(final MasterProcedureEnv env) throws IOException {final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost(); if (cpHost != null) { cpHost.postRollBackMergeRegionsAction(regionsToMerge, getUser()); } }
3.26
hbase_MergeTableRegionsProcedure_isRollbackSupported_rdh
/* Check whether we are in the state that can be rolled back */ @Override protected boolean isRollbackSupported(final MergeTableRegionsState state) { switch (state) { case MERGE_TABLE_REGIONS_POST_OPERATION : case MERGE_TABLE_REGIONS_OPEN_MERGED_REGION : case MERGE_TABLE_REGIONS_POST_MERGE_COMMIT_OPERATION : case MERGE_TABLE_REGIONS_UPDATE_META : // It is not safe to rollback in these states. return false; default : break; } return true; }
3.26
hbase_MergeTableRegionsProcedure_createMergedRegion_rdh
/** * Create merged region. The way the merge works is that we make a 'merges' temporary directory in * the FIRST parent region to merge (Do not change this without also changing the rollback where * we look in this FIRST region for the merge dir). We then collect here references to all the * store files in all the parent regions including those of the FIRST parent region into a * subdirectory, named for the resultant merged region. We then call commitMergeRegion. It finds * this subdirectory of storefile references and moves them under the new merge region (creating * the region layout as side effect). After assign of the new merge region, we will run a * compaction. This will undo the references but the reference files remain in place until the * archiver runs (which it does on a period as a chore in the RegionServer that hosts the merge * region -- see CompactedHFilesDischarger). Once the archiver has moved aside the no-longer used * references, the merge region no longer has references. The catalog janitor will notice when it * runs next and it will remove the old parent regions. */ private void createMergedRegion(final MasterProcedureEnv env) throws IOException { final MasterFileSystem v32 = env.getMasterServices().getMasterFileSystem(); final Path tableDir = CommonFSUtils.getTableDir(v32.getRootDir(), regionsToMerge[0].getTable()); final FileSystem fs = v32.getFileSystem(); List<Path> mergedFiles = new ArrayList<>(); HRegionFileSystem mergeRegionFs = HRegionFileSystem.createRegionOnFileSystem(env.getMasterConfiguration(), fs, tableDir, mergedRegion); for (RegionInfo ri : this.regionsToMerge) { HRegionFileSystem regionFs = HRegionFileSystem.openRegionFromFileSystem(env.getMasterConfiguration(), fs, tableDir, ri, false); mergedFiles.addAll(mergeStoreFiles(env, regionFs, mergeRegionFs, mergedRegion)); } assert mergeRegionFs != null; mergeRegionFs.commitMergedRegion(mergedFiles, env); // Prepare to create merged regions env.getAssignmentManager().getRegionStates().getOrCreateRegionStateNode(mergedRegion).setState(State.MERGING_NEW);}
3.26
hbase_MergeTableRegionsProcedure_postCompletedMergeRegions_rdh
/** * Post merge region action * * @param env * MasterProcedureEnv */ private void postCompletedMergeRegions(final MasterProcedureEnv env) throws IOException { final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost(); if (cpHost != null) { cpHost.postCompletedMergeRegionsAction(regionsToMerge, mergedRegion, getUser()); } }
3.26
hbase_MergeTableRegionsProcedure_prepareMergeRegion_rdh
/** * Prepare merge and do some check */ private boolean prepareMergeRegion(final MasterProcedureEnv env) throws IOException { // Fail if we are taking snapshot for the given table TableName v18 = regionsToMerge[0].getTable(); if (env.getMasterServices().getSnapshotManager().isTakingSnapshot(v18)) { throw new MergeRegionException((("Skip merging regions " + RegionInfo.getShortNameToLog(regionsToMerge)) + ", because we are snapshotting ") + v18); } // Mostly this check is not used because we already check the switch before submit a merge // procedure. Just for safe, check the switch again. This procedure can be rollbacked if // the switch was set to false after submit. if (!env.getMasterServices().isSplitOrMergeEnabled(MasterSwitchType.MERGE)) { String regionsStr = Arrays.deepToString(this.regionsToMerge); LOG.warn("Merge switch is off! skip merge of " + regionsStr); setFailure(getClass().getSimpleName(), new IOException(("Merge of " + regionsStr) + " failed because merge switch is off")); return false; } if (!env.getMasterServices().getTableDescriptors().get(getTableName()).isMergeEnabled()) { String regionsStr = Arrays.deepToString(regionsToMerge); LOG.warn("Merge is disabled for the table! Skipping merge of {}", regionsStr); setFailure(getClass().getSimpleName(), new IOException(("Merge of " + regionsStr) + " failed as region merge is disabled for the table")); return false; } RegionStates regionStates = env.getAssignmentManager().getRegionStates(); RegionStateStore regionStateStore = env.getAssignmentManager().getRegionStateStore(); for (RegionInfo ri : this.regionsToMerge) { if (regionStateStore.hasMergeRegions(ri)) { String msg = (((((("Skip merging " + RegionInfo.getShortNameToLog(regionsToMerge)) + ", because a parent, ") + RegionInfo.getShortNameToLog(ri)) + ", has a merge qualifier ") + "(if a 'merge column' in parent, it was recently merged but still has outstanding ") + "references to its parents that must be cleared before it can participate in merge -- ") + "major compact it to hurry clearing of its references)"; LOG.warn(msg); throw new MergeRegionException(msg);} RegionState state = regionStates.getRegionState(ri.getEncodedName()); if (state == null) { throw new UnknownRegionException(RegionInfo.getShortNameToLog(ri) + " UNKNOWN (Has it been garbage collected?)"); } if (!state.isOpened()) { throw new MergeRegionException("Unable to merge regions that are NOT online: " + ri); } // Ask the remote regionserver if regions are mergeable. If we get an IOE, report it // along with the failure, so we can see why regions are not mergeable at this time. try { if (!isMergeable(env, state)) { setFailure(getClass().getSimpleName(), new MergeRegionException(((("Skip merging " + RegionInfo.getShortNameToLog(regionsToMerge)) + ", because a parent, ") + RegionInfo.getShortNameToLog(ri)) + ", is not mergeable")); return false; } } catch (IOException e) { IOException ioe = new IOException(RegionInfo.getShortNameToLog(ri) + " NOT mergeable", e); setFailure(getClass().getSimpleName(), ioe); return false; } } // Update region states to Merging setRegionStateToMerging(env); return true; }
3.26
hbase_MergeTableRegionsProcedure_getServerName_rdh
/** * The procedure could be restarted from a different machine. If the variable is null, we need to * retrieve it. * * @param env * MasterProcedureEnv */ private ServerName getServerName(final MasterProcedureEnv env) { if (regionLocation == null) { regionLocation = env.getAssignmentManager().getRegionStates().getRegionServerOfRegion(regionsToMerge[0]); // May still be null here but return null and let caller deal. // Means we lost the in-memory-only location. We are in recovery // or so. The caller should be able to deal w/ a null ServerName. // Let them go to the Balancer to find one to use instead. } return regionLocation; }
3.26
hbase_MergeTableRegionsProcedure_preMergeRegionsCommit_rdh
/** * Post merge region action * * @param env * MasterProcedureEnv */ private void preMergeRegionsCommit(final MasterProcedureEnv env) throws IOException { final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost(); if (cpHost != null) { @MetaMutationAnnotation final List<Mutation> metaEntries = new ArrayList<>(); cpHost.preMergeRegionsCommit(regionsToMerge, metaEntries, getUser()); try { for (Mutation p : metaEntries) { RegionInfo.parseRegionName(p.getRow()); } } catch (IOException e) { LOG.error("Row key of mutation from coprocessor is not parsable as region name. " + "Mutations from coprocessor should only be for hbase:meta table.", e); throw e; } } }
3.26
hbase_MergeTableRegionsProcedure_postMergeRegionsCommit_rdh
/** * Post merge region action * * @param env * MasterProcedureEnv */ private void postMergeRegionsCommit(final MasterProcedureEnv env) throws IOException { final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost(); if (cpHost != null) { cpHost.postMergeRegionsCommit(regionsToMerge, mergedRegion, getUser()); } }
3.26
hbase_MergeTableRegionsProcedure_preMergeRegions_rdh
/** * Pre merge region action * * @param env * MasterProcedureEnv */ private void preMergeRegions(final MasterProcedureEnv env) throws IOException {final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost(); if (cpHost != null) { cpHost.preMergeRegionsAction(regionsToMerge, getUser()); }// TODO: Clean up split and merge. Currently all over the place. try { env.getMasterServices().getMasterQuotaManager().onRegionMerged(this.mergedRegion); } catch (QuotaExceededException e) { // TODO: why is this here? merge requests can be submitted by actors other than the normalizer env.getMasterServices().getRegionNormalizerManager().planSkipped(PlanType.MERGE);throw e; } }
3.26
hbase_MergeTableRegionsProcedure_m1_rdh
/** * Clean up a merged region on rollback after failure. */ private void m1(final MasterProcedureEnv env) throws IOException { final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem(); TableName tn = this.regionsToMerge[0].getTable(); final Path tabledir = CommonFSUtils.getTableDir(mfs.getRootDir(), tn); final FileSystem fs = mfs.getFileSystem(); // See createMergedRegion above where we specify the merge dir as being in the // FIRST merge parent region. HRegionFileSystem regionFs = HRegionFileSystem.openRegionFromFileSystem(env.getMasterConfiguration(), fs, tabledir, regionsToMerge[0], false); regionFs.cleanupMergedRegion(mergedRegion); }
3.26
hbase_MergeTableRegionsProcedure_getMergedRegion_rdh
/** * Returns The merged region. Maybe be null if called to early or we failed. */ RegionInfo getMergedRegion() { return this.mergedRegion; }
3.26
hbase_MergeTableRegionsProcedure_rollbackCloseRegionsForMerge_rdh
/** * Rollback close regions */ private void rollbackCloseRegionsForMerge(MasterProcedureEnv env) throws IOException { AssignmentManagerUtil.reopenRegionsForRollback(env, Arrays.asList(regionsToMerge), getRegionReplication(env), getServerName(env)); }
3.26
hbase_MergeTableRegionsProcedure_updateMetaForMergedRegions_rdh
/** * Add merged region to META and delete original regions. */ private void updateMetaForMergedRegions(final MasterProcedureEnv env) throws IOException { env.getAssignmentManager().markRegionAsMerged(mergedRegion, getServerName(env), this.regionsToMerge); }
3.26
hbase_ReplicationSource_postShipEdits_rdh
// offsets totalBufferUsed by deducting shipped batchSize. @Override public void postShipEdits(List<Entry> entries, long batchSize) { if (throttler.isEnabled()) { throttler.addPushSize(batchSize); } totalReplicatedEdits.addAndGet(entries.size()); this.manager.releaseBufferQuota(batchSize); }
3.26
hbase_ReplicationSource_sleepForRetries_rdh
/** * Do the sleeping logic * * @param msg * Why we sleep * @param sleepMultiplier * by how many times the default sleeping time is augmented * @return True if <code>sleepMultiplier</code> is &lt; <code>maxRetriesMultiplier</code> */ private boolean sleepForRetries(String msg, int sleepMultiplier) { try { if (LOG.isTraceEnabled()) { LOG.trace("{} {}, sleeping {} times {}", logPeerId(), msg, sleepForRetries, sleepMultiplier); } Thread.sleep(this.sleepForRetries * sleepMultiplier); } catch (InterruptedException e) { if (LOG.isDebugEnabled()) { LOG.debug("{} Interrupted while sleeping between retries", logPeerId()); } Thread.currentThread().interrupt(); }return sleepMultiplier < maxRetriesMultiplier; }
3.26
hbase_ReplicationSource_checkError_rdh
// log the error, check if the error is OOME, or whether we should abort the server private void checkError(Thread t, Throwable error) { OOMEChecker.exitIfOOME(error, getClass().getSimpleName()); LOG.error("Unexpected exception in {} currentPath={}", t.getName(), getCurrentPath(), error); if (abortOnError) { server.abort("Unexpected exception in " + t.getName(), error); } }
3.26
hbase_ReplicationSource_init_rdh
/** * Instantiation method used by region servers * * @param conf * configuration to use * @param fs * file system to use * @param manager * replication manager to ping to * @param server * the server for this region server * @param queueData * the id and offsets of our replication queue * @param clusterId * unique UUID for the cluster * @param metrics * metrics for replication source */ @Override public void init(Configuration conf, FileSystem fs, ReplicationSourceManager manager, ReplicationQueueStorage queueStorage, ReplicationPeer replicationPeer, Server server, ReplicationQueueData queueData, UUID clusterId, WALFileLengthProvider walFileLengthProvider, MetricsSource metrics) throws IOException { this.server = server; this.conf = HBaseConfiguration.create(conf); this.waitOnEndpointSeconds = this.conf.getInt(WAIT_ON_ENDPOINT_SECONDS, DEFAULT_WAIT_ON_ENDPOINT_SECONDS); decorateConf(); // 1 second this.sleepForRetries = this.conf.getLong("replication.source.sleepforretries", 1000);// 5 minutes @ 1 sec per this.maxRetriesMultiplier = this.conf.getInt("replication.source.maxretriesmultiplier", 300); this.queueSizePerGroup = this.conf.getInt("hbase.regionserver.maxlogs", 32); this.logQueue = new ReplicationSourceLogQueue(conf, metrics, this); this.queueStorage = queueStorage;this.f0 = replicationPeer; this.manager = manager; this.fs = fs; this.metrics = metrics; this.clusterId = clusterId; this.f1 = queueData.getId(); this.f2 = queueData.getOffsets(); // A defaultBandwidth of '0' means no bandwidth; i.e. no throttling. defaultBandwidth = this.conf.getLong("replication.source.per.peer.node.bandwidth", 0); currentBandwidth = getCurrentBandwidth(); this.throttler = new ReplicationThrottler(((double) (currentBandwidth)) / 10.0); this.walFileLengthProvider = walFileLengthProvider; this.abortOnError = this.conf.getBoolean("replication.source.regionserver.abort", true); LOG.info("queueId={}, ReplicationSource: {}, currentBandwidth={}", f1, replicationPeer.getId(), this.currentBandwidth); }
3.26
hbase_ReplicationSource_getWalEntryFilter_rdh
/** * Call after {@link #initializeWALEntryFilter(UUID)} else it will be null. * * @return WAL Entry Filter Chain to use on WAL files filtering *out* WALEntry edits. */ WALEntryFilter getWalEntryFilter() { return walEntryFilter; }
3.26
hbase_StoreFileReader_passesTimerangeFilter_rdh
/** * Check if this storeFile may contain keys within the TimeRange that have not expired (i.e. not * older than oldestUnexpiredTS). * * @param tr * the timeRange to restrict * @param oldestUnexpiredTS * the oldest timestamp that is not expired, as determined by the column * family's TTL * @return false if queried keys definitely don't exist in this StoreFile */ boolean passesTimerangeFilter(TimeRange tr, long oldestUnexpiredTS) { return this.timeRange == null ? true : this.timeRange.includesTimeRange(tr) && (this.timeRange.getMax() >= oldestUnexpiredTS); } /** * Checks whether the given scan passes the Bloom filter (if present). Only checks Bloom filters * for single-row or single-row-column scans. Bloom filter checking for multi-gets is implemented * as part of the store scanner system (see {@link StoreFileScanner#seek(Cell)} and uses the * lower-level API {@link #passesGeneralRowBloomFilter(byte[], int, int)} and * {@link #passesGeneralRowColBloomFilter(Cell)}
3.26
hbase_StoreFileReader_passesGeneralRowBloomFilter_rdh
/** * A method for checking Bloom filters. Called directly from StoreFileScanner in case of a * multi-column query. * * @return True if passes */ private boolean passesGeneralRowBloomFilter(byte[] row, int rowOffset, int rowLen) { BloomFilter bloomFilter = this.generalBloomFilter; if (bloomFilter == null) { bloomFilterMetrics.incrementEligible(); return true; } // Used in ROW bloom byte[] key = null; if ((rowOffset != 0) || (rowLen != row.length)) { throw new AssertionError("For row-only Bloom filters the row must occupy the whole array"); } key = row; return checkGeneralBloomFilter(key, null, bloomFilter); }
3.26
hbase_StoreFileReader_incrementRefCount_rdh
/** * Indicate that the scanner has started reading with this reader. We need to increment the ref * count so reader is not close until some object is holding the lock */ void incrementRefCount() { storeFileInfo.increaseRefCount(); }
3.26
hbase_StoreFileReader_getFilterEntries_rdh
/** * The number of Bloom filter entries in this store file, or an estimate thereof, if the Bloom * filter is not loaded. This always returns an upper bound of the number of Bloom filter entries. * * @return an estimate of the number of Bloom filter entries in this file */ public long getFilterEntries() { return generalBloomFilter != null ? generalBloomFilter.getKeyCount() : reader.getEntries(); }
3.26
hbase_StoreFileReader_passesGeneralRowPrefixBloomFilter_rdh
/** * A method for checking Bloom filters. Called directly from StoreFileScanner in case of a * multi-column query. * * @return True if passes */private boolean passesGeneralRowPrefixBloomFilter(Scan scan) { BloomFilter bloomFilter = this.generalBloomFilter; if (bloomFilter == null) {bloomFilterMetrics.incrementEligible();return true; } byte[] row = scan.getStartRow(); byte[] rowPrefix; if (scan.isGetScan()) { rowPrefix = Bytes.copy(row, 0, Math.min(prefixLength, row.length)); } else { // For non-get scans // Find out the common prefix of startRow and stopRow. int commonLength = Bytes.findCommonPrefix(scan.getStartRow(), scan.getStopRow(), scan.getStartRow().length, scan.getStopRow().length, 0, 0); // startRow and stopRow don't have the common prefix. // Or the common prefix length is less than prefixLength if ((commonLength <= 0) || (commonLength < prefixLength)) { return true; } rowPrefix = Bytes.copy(row, 0, prefixLength); }return checkGeneralBloomFilter(rowPrefix, null, bloomFilter); }
3.26
hbase_StoreFileReader_passesGeneralRowColBloomFilter_rdh
/** * A method for checking Bloom filters. Called directly from StoreFileScanner in case of a * multi-column query. the cell to check if present in BloomFilter * * @return True if passes */ public boolean passesGeneralRowColBloomFilter(Cell cell) { BloomFilter bloomFilter = this.generalBloomFilter;if (bloomFilter == null) { bloomFilterMetrics.incrementEligible(); return true; } // Used in ROW_COL bloom Cell kvKey = null; // Already if the incoming key is a fake rowcol key then use it as it is if ((cell.getTypeByte() == Type.Maximum.getCode()) && (cell.getFamilyLength() == 0)) { kvKey = cell; } else { kvKey = PrivateCellUtil.createFirstOnRowCol(cell); } return checkGeneralBloomFilter(null, kvKey, bloomFilter); }
3.26
hbase_StoreFileReader_getRefCount_rdh
/** * Return the ref count associated with the reader whenever a scanner associated with the reader * is opened. */ int getRefCount() { return storeFileInfo.getRefCount(); }
3.26
hbase_StoreFileReader_readCompleted_rdh
/** * Indicate that the scanner has finished reading with this reader. We need to decrement the ref * count, and also, if this is not the common pread reader, we should close it. */ void readCompleted() { storeFileInfo.decreaseRefCount(); if (context.getReaderType() == ReaderType.STREAM) { try { reader.close(false); } catch (IOException e) { LOG.warn("failed to close stream reader", e);}}}
3.26
hbase_SaslAuthMethod_getSaslMechanism_rdh
/** * Returns the SASL mechanism used by this authentication method. */ public String getSaslMechanism() { return saslMech; }
3.26
hbase_SaslAuthMethod_getCode_rdh
/** * Returns the unique value to identify this authentication method among other HBase auth methods. */ public byte getCode() { return code; }
3.26
hbase_SaslAuthMethod_getName_rdh
/** * Returns the unique name to identify this authentication method among other HBase auth methods. */ public String getName() { return name; }
3.26
hbase_SaslAuthMethod_getAuthMethod_rdh
/** * Returns the Hadoop {@link AuthenticationMethod} for this method. */ public AuthenticationMethod getAuthMethod() { return method; }
3.26
hbase_MobStoreScanner_m0_rdh
/** * Firstly reads the cells from the HBase. If the cell are a reference cell (which has the * reference tag), the scanner need seek this cell from the mob file, and use the cell found from * the mob file as the result. */ @Overridepublic boolean m0(List<Cell> outResult, ScannerContext ctx) throws IOException { boolean result = super.next(outResult, ctx); if (!rawMobScan) { // retrieve the mob data if (outResult.isEmpty()) { return result; } long mobKVCount = 0; long mobKVSize = 0; for (int i = 0; i < outResult.size(); i++) { Cell cell = outResult.get(i); if (MobUtils.isMobReferenceCell(cell)) { MobCell mobCell = mobStore.resolve(cell, cacheMobBlocks, readPt, readEmptyValueOnMobCellMiss); mobKVCount++; mobKVSize += mobCell.getCell().getValueLength(); outResult.set(i, mobCell.getCell()); // Keep the MobCell here unless we shipped the RPC or close the scanner. referencedMobCells.add(mobCell); } } mobStore.updateMobScanCellsCount(mobKVCount); mobStore.updateMobScanCellsSize(mobKVSize); } return result; }
3.26
hbase_HbckChore_isRunning_rdh
/** * When running, the HBCK report may be changed later. */ public boolean isRunning() { return running; }
3.26
hbase_HbckChore_scanForMergedParentRegions_rdh
/** * Scan hbase:meta to get set of merged parent regions, this is a very heavy scan. * * @return Return generated {@link HashSet} */ private HashSet<String> scanForMergedParentRegions() throws IOException { HashSet<String> mergedParentRegions = new HashSet<>(); // Null tablename means scan all of meta. MetaTableAccessor.scanMetaForTableRegions(this.master.getConnection(), r -> { List<RegionInfo> mergeParents = CatalogFamilyFormat.getMergeRegions(r.rawCells()); if (mergeParents != null) { for (RegionInfo mergeRegion : mergeParents) {if (mergeRegion != null) { // This region is already being merged mergedParentRegions.add(mergeRegion.getEncodedName()); } } } return true; }, null); return mergedParentRegions; }
3.26
hbase_HbckChore_getLastReport_rdh
/** * Returns Returns last published Report that comes of last successful execution of this chore. */public HbckReport getLastReport() { return lastReport; }
3.26
hbase_ImplType_toString_rdh
/** * Returns <code>-option</code> */ @Override public String toString() { return "-" + option; }
3.26
hbase_ClusterMetrics_getRequestCount_rdh
/** * Returns the number of requests since last report */ default long getRequestCount() { return getLiveServerMetrics().entrySet().stream().flatMap(v -> v.getValue().getRegionMetrics().values().stream()).mapToLong(RegionMetrics::getRequestCount).sum(); }
3.26
hbase_ClusterMetrics_m1_rdh
/** * Returns the number of regions deployed on the cluster */ default int m1() { return getLiveServerMetrics().entrySet().stream().mapToInt(v -> v.getValue().getRegionMetrics().size()).sum(); }
3.26
hbase_ClusterMetrics_getAverageLoad_rdh
/** * Returns the average cluster load */ default double getAverageLoad() { int serverSize = getLiveServerMetrics().size(); if (serverSize == 0) { return 0; } return ((double) (m1())) / ((double) (serverSize)); }
3.26
hbase_ScannerModel_getCacheBlocks_rdh
/** * Returns true if HFile blocks should be cached on the servers for this scan, false otherwise */ @XmlAttribute public boolean getCacheBlocks() { return cacheBlocks; }
3.26
hbase_ScannerModel_getCaching_rdh
/** * Returns the number of rows that the scanner to fetch at once */ @XmlAttribute public int getCaching() { return f0; }
3.26
hbase_ScannerModel_getColumns_rdh
/** * Returns list of columns of interest in column:qualifier format, or empty for all */ @XmlElement(name = "column") public List<byte[]> getColumns() { return columns; }
3.26
hbase_ScannerModel_setStartRow_rdh
/** * * @param startRow * start row */ public void setStartRow(byte[] startRow) { this.startRow = startRow; }
3.26
hbase_ScannerModel_addColumn_rdh
/** * Add a column to the column set * * @param column * the column name, as &lt;column&gt;(:&lt;qualifier&gt;)? */ public void addColumn(byte[] column) { columns.add(column); }
3.26
hbase_ScannerModel_getEndTime_rdh
/** * Returns the upper bound on timestamps of items of interest */ @XmlAttribute public long getEndTime() { return endTime; }
3.26
hbase_ScannerModel_getJasonProvider_rdh
/** * Get the <code>JacksonJaxbJsonProvider</code> instance; * * @return A <code>JacksonJaxbJsonProvider</code>. */ private static JacksonJaxbJsonProvider getJasonProvider() { return JaxbJsonProviderHolder.INSTANCE; }
3.26
hbase_ScannerModel_setStartTime_rdh
/** * * @param startTime * the lower bound on timestamps of values of interest */ public void setStartTime(long startTime) { this.startTime = startTime; }
3.26
hbase_ScannerModel_setColumns_rdh
/** * * @param columns * list of columns of interest in column:qualifier format, or empty for all */public void setColumns(List<byte[]> columns) { this.columns = columns; }
3.26
hbase_ScannerModel_hasEndRow_rdh
/** * Returns true if an end row was specified */ public boolean hasEndRow() { return !Bytes.equals(endRow, HConstants.EMPTY_END_ROW); }
3.26
hbase_ScannerModel_setCaching_rdh
/** * * @param caching * the number of rows to fetch at once */ public void setCaching(int caching) { this.f0 = caching; }
3.26
hbase_ScannerModel_getFilter_rdh
/** * Returns the filter specification */ @XmlElement public String getFilter() { return filter; }
3.26