name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hbase_AbstractWALRoller_walRollFinished_rdh | /**
* Returns true if all WAL roll finished
*/
public boolean walRollFinished() {
// TODO add a status field of roll in RollController
return wals.values().stream().noneMatch(rc -> rc.needsRoll(EnvironmentEdgeManager.currentTime())) && isWaiting();
} | 3.26 |
hbase_MunkresAssignment_starInRow_rdh | /**
* Find a starred zero in a specified row. If there are no starred zeroes in the specified row,
* then null will be returned.
*
* @param r
* the index of the row to be searched
* @return pair of row and column indices of starred zero or null
*/
private Pair<Integer, Integer> starInRow(int r) {
for (int c = 0; c < cols; c++) {
if (mask[r][c] == STAR) {
return new Pair<>(r, c);
}
}
return null;
} | 3.26 |
hbase_MunkresAssignment_updateMin_rdh | /**
* A specified row has become covered, and a specified column has become uncovered. The least
* value per row may need to be updated.
*
* @param row
* the index of the row which was just covered
* @param col
* the index of the column which was just uncovered
*/
private void updateMin(int row, int col) {
// If the row is covered we want to ignore it as far as least values go.
leastInRow[row] = Float.POSITIVE_INFINITY;
for (int r = 0; r < rows; r++) {
// Since the column has only just been uncovered, it could not have any
// pending adjustments. Only covered rows can have pending adjustments
// and covered costs do not count toward row minimums. Therefore, we do
// not need to consider rowAdjust[r] or colAdjust[col].
if ((!rowsCovered[r]) && (cost[r][col] < leastInRow[r])) {
leastInRow[r] = cost[r][col];
leastInRowIndex[r] = col;
}
}
} | 3.26 |
hbase_MunkresAssignment_stepThree_rdh | /**
* Corresponds to step 3 of the original algorithm.
*/
private void stepThree() {
// Find the minimum uncovered cost.
float min = leastInRow[0];
for (int r = 1; r < rows; r++) {if (leastInRow[r] < min) {
min = leastInRow[r];
}
}
// Add the minimum cost to each of the costs in a covered row, or subtract
// the minimum cost from each of the costs in an uncovered column. As an
// optimization, do not actually modify the cost matrix yet, but track the
// adjustments that need to be made to each row and column.
for (int r = 0; r < rows; r++) {
if (rowsCovered[r]) {
rowAdjust[r] += min;}
}
for
(int c = 0; c < cols; c++) {
if (!colsCovered[c]) {
colAdjust[c] -= min;
}
}
// Since the cost matrix is not being updated yet, the minimum uncovered
// cost per row must be updated.
for (int r = 0; r < rows; r++) {
if (!colsCovered[leastInRowIndex[r]]) {
// The least value in this row was in an uncovered column, meaning that
// it would have had the minimum value subtracted from it, and therefore
// will still be the minimum value in that row.
leastInRow[r] -= min;
} else {
// The least value in this row was in a covered column and would not
// have had the minimum value subtracted from it, so the minimum value
// could be some in another column.
for (int c = 0; c < cols; c++) {
if (((cost[r][c] + colAdjust[c]) + rowAdjust[r]) < leastInRow[r]) {
leastInRow[r] = (cost[r][c] + colAdjust[c]) + rowAdjust[r];
leastInRowIndex[r] = c;
}
}
}
}
} | 3.26 |
hbase_MunkresAssignment_findUncoveredZero_rdh | /**
* Find a zero cost assignment which is not covered. If there are no zero cost assignments which
* are uncovered, then null will be returned.
*
* @return pair of row and column indices of an uncovered zero or null
*/
private Pair<Integer, Integer> findUncoveredZero() {
for (int r = 0; r < rows; r++) {
if (leastInRow[r] == 0) {
return new Pair<>(r, leastInRowIndex[r]);
}
}
return null;
} | 3.26 |
hbase_MunkresAssignment_solve_rdh | /**
* Get the optimal assignments. The returned array will have the same number of elements as the
* number of elements as the number of rows in the input cost matrix. Each element will indicate
* which column should be assigned to that row or -1 if no column should be assigned, i.e. if
* result[i] = j then row i should be assigned to column j. Subsequent invocations of this method
* will simply return the same object without additional computation.
*
* @return an array with the optimal assignments
*/
public int[] solve() {
// If this assignment problem has already been solved, return the known
// solution
if (assignments != null) {
return assignments;
}
m0();
// Find the optimal assignments.
while (!testIsDone()) {
while
(!stepOne()) {
stepThree();
}
stepTwo();
}
// Extract the assignments from the mask matrix.
if (transposed) {
assignments = new int[cols];
outer : for (int c = 0; c < cols; c++) {
for (int r = 0; r < rows; r++) {
if (mask[r][c] == STAR) {
assignments[c] = r;
continue outer;
}
}
// There is no assignment for this row of the input/output.
assignments[c] = -1;
}
} else {
assignments = new int[rows];
outer : for (int r = 0; r < rows; r++) {
for (int c = 0; c < cols; c++) {
if (mask[r][c] == STAR) {
assignments[r] = c;
continue outer;
}
}
}
}
// Once the solution has been computed, there is no need to keep any of the
// other internal structures. Clear all unnecessary internal references so
// the garbage collector may reclaim that memory.
cost = null;
mask = null;
rowsCovered = null;
colsCovered = null;
path = null;
leastInRow =
null;
leastInRowIndex = null;
rowAdjust = null;
colAdjust = null;
return
assignments;
} | 3.26 |
hbase_MunkresAssignment_m0_rdh | /**
* Corresponds to the "preliminaries" step of the original algorithm. Guarantees that the matrix
* is an equivalent non-negative matrix with at least one zero in each row.
*/
private void m0() {
for (int r = 0; r < rows; r++) {
// Find the minimum cost of each row.
float min = Float.POSITIVE_INFINITY;
for (int c = 0; c < cols; c++) {
min = Math.min(min, cost[r][c]);
}
// Subtract that minimum cost from each element in the row.
for (int c = 0; c < cols; c++) {
cost[r][c] -= min;
// If the element is now zero and there are no zeroes in the same row
// or column which are already starred, then star this one. There
// must be at least one zero because of subtracting the min cost.
if (((cost[r][c] == 0) && (!rowsCovered[r])) && (!colsCovered[c])) {
mask[r][c] = STAR;
// Cover this row and column so that no other zeroes in them can be
// starred.
rowsCovered[r] = true;
colsCovered[c] = true;
}
}
}
// Clear the covered rows and columns.
Arrays.fill(rowsCovered, false);Arrays.fill(colsCovered, false);
} | 3.26 |
hbase_MunkresAssignment_starInCol_rdh | /**
* Find a starred zero in the specified column. If there are no starred zeroes in the specified
* row, then null will be returned.
*
* @param c
* the index of the column to be searched
* @return pair of row and column indices of starred zero or null
*/
private Pair<Integer, Integer> starInCol(int c) {
for (int r = 0; r < rows; r++) {
if (mask[r][c]
== STAR) {
return new Pair<>(r, c);}
}
return null;
} | 3.26 |
hbase_MunkresAssignment_testIsDone_rdh | /**
* Test whether the algorithm is done, i.e. we have the optimal assignment. This occurs when there
* is exactly one starred zero in each row.
*
* @return true if the algorithm is done
*/
private boolean testIsDone() {
// Cover all columns containing a starred zero. There can be at most one
// starred zero per column. Therefore, a covered column has an optimal
// assignment.
for (int r = 0; r < rows; r++) { for (int c = 0; c < cols; c++) {
if (mask[r][c] == STAR) {
colsCovered[c] = true;
}
}
}
// Count the total number of covered columns.
int coveredCols = 0;
for (int c = 0; c < cols; c++) {
coveredCols += (colsCovered[c]) ? 1 : 0;
}
// Apply an row and column adjustments that are pending.
for (int r = 0; r < rows; r++) {
for (int c = 0; c <
cols; c++) {
cost[r][c] += rowAdjust[r];
cost[r][c] += colAdjust[c];
}
}
// Clear the pending row and column adjustments.
Arrays.fill(rowAdjust, 0);
Arrays.fill(colAdjust, 0);
// The covers on columns and rows may have been reset, recompute the least
// value for each row.
for (int r = 0; r < rows; r++) {
leastInRow[r] = Float.POSITIVE_INFINITY;
for (int c = 0; c < cols; c++) {
if (((!rowsCovered[r]) && (!colsCovered[c])) && (cost[r][c] < leastInRow[r])) {
leastInRow[r] = cost[r][c];
leastInRowIndex[r] = c;
}
}
}
// If all columns are covered, then we are done. Since there may be more
// columns than rows, we are also done if the number of covered columns is
// at least as great as the number of rows.
return (coveredCols == cols) || (coveredCols >= rows);
} | 3.26 |
hbase_MunkresAssignment_stepTwo_rdh | /**
* Corresponds to step 2 of the original algorithm.
*/
private void stepTwo() {
// Construct a path of alternating starred zeroes and primed zeroes, where
// each starred zero is in the same column as the previous primed zero, and
// each primed zero is in the same row as the previous starred zero. The
// path will always end in a primed zero.
while (true) {
Pair<Integer, Integer> star = starInCol(path.getLast().getSecond());
if (star != null) {
path.offerLast(star);
} else {
break;
}
Pair<Integer, Integer> prime = primeInRow(path.getLast().getFirst());
path.offerLast(prime);
}
// Augment path - unmask all starred zeroes and star all primed zeroes. All
// nodes in the path will be either starred or primed zeroes. The set of
// starred zeroes is independent and now one larger than before.
for (Pair<Integer, Integer>
p : path) {
if (mask[p.getFirst()][p.getSecond()] == STAR) {mask[p.getFirst()][p.getSecond()] = NONE;
} else {
mask[p.getFirst()][p.getSecond()] = STAR;
}
}
// Clear all covers from rows and columns.
Arrays.fill(rowsCovered, false);
Arrays.fill(colsCovered, false);
// Remove the prime mask from all primed zeroes.
for
(int r = 0; r < rows; r++) {for (int c = 0; c < cols; c++) {
if (mask[r][c] == PRIME) {
mask[r][c] = NONE;
}
}
}
} | 3.26 |
hbase_MunkresAssignment_stepOne_rdh | /**
* Corresponds to step 1 of the original algorithm.
*
* @return false if all zeroes are covered
*/
private boolean stepOne() {
while (true) {
Pair<Integer, Integer> zero = findUncoveredZero();
if (zero == null) {
// No uncovered zeroes, need to manipulate the cost matrix in step
// three.
return false;
} else {
// Prime the uncovered zero and find a starred zero in the same row.
mask[zero.getFirst()][zero.getSecond()] = PRIME;
Pair<Integer, Integer>
v22 = starInRow(zero.getFirst());
if (v22 != null) {
// Cover the row with both the newly primed zero and the starred zero.
// Since this is the only place where zeroes are primed, and we cover
// it here, and rows are only uncovered when primes are erased, then
// there can be at most one primed uncovered zero.
rowsCovered[v22.getFirst()] = true;
colsCovered[v22.getSecond()] = false;
updateMin(v22.getFirst(), v22.getSecond());
} else {
// Will go to step two after, where a path will be constructed,
// starting from the uncovered primed zero (there is only one). Since
// we have already found it, save it as the first node in the path.
path.clear();
path.offerLast(new Pair<>(zero.getFirst(), zero.getSecond()));
return
true;
}
}
}
} | 3.26 |
hbase_BooleanStateStore_set_rdh | /**
* Set the flag on/off.
*
* @param on
* true if the flag should be on, false otherwise
* @throws IOException
* if the operation fails
* @return returns the previous state
*/
public synchronized boolean set(boolean on) throws IOException {
byte[] state = toByteArray(on);
setState(state);
boolean prevOn = this.on;this.on = on;
return prevOn;
} | 3.26 |
hbase_BooleanStateStore_get_rdh | /**
* Returns true if the flag is on, otherwise false
*/
public boolean get() {
return on;
} | 3.26 |
hbase_OperationStatus_m0_rdh | /**
*/
public OperationStatusCode m0() {
return
code;
} | 3.26 |
hbase_OperationStatus_getExceptionMsg_rdh | /**
*/
public String getExceptionMsg() {
return exceptionMsg;
} | 3.26 |
hbase_WALSplitter_splitWAL_rdh | /**
* WAL splitting implementation, splits one WAL file.
*
* @param walStatus
* should be for an actual WAL file.
*/
SplitWALResult splitWAL(FileStatus walStatus, CancelableProgressable cancel) throws IOException {
Path wal = walStatus.getPath();
Preconditions.checkArgument(walStatus.isFile(), "Not a regular file " + wal.toString());
boolean corrupt = false;
int interval = conf.getInt("hbase.splitlog.report.interval.loglines", 1024);
boolean outputSinkStarted = false;
boolean cancelled = false;
int editsCount = 0;
int editsSkipped = 0;
MonitoredTask status = TaskMonitor.get().createStatus(("Splitting " + wal) + " to temporary staging area.", false, true);
WALStreamReader walReader = null;
this.fileBeingSplit = walStatus;
long startTS = EnvironmentEdgeManager.currentTime();
long length = walStatus.getLen();
String lengthStr = StringUtils.humanSize(length);
createOutputSinkAndEntryBuffers();
try {
String logStr = ((((("Splitting " + wal) + ", size=") + lengthStr) + " (") + length) + "bytes)";
f0.info(logStr);
status.setStatus(logStr);
if ((cancel != null) && (!cancel.progress())) {
cancelled = true;
return new SplitWALResult(false, corrupt);
}
walReader = getReader(walStatus, this.f3, cancel);if (walReader == null) {
f0.warn("Nothing in {}; empty?", wal);
return new SplitWALResult(true, corrupt);
}
f0.info("Open {} took {}ms", wal, EnvironmentEdgeManager.currentTime() - startTS);
int numOpenedFilesBeforeReporting = conf.getInt("hbase.splitlog.report.openedfiles", 3);
int numOpenedFilesLastCheck = 0;
outputSink.setReporter(cancel);
outputSink.setStatus(status);
outputSink.startWriterThreads();
outputSinkStarted = true;
Entry entry; startTS = EnvironmentEdgeManager.currentTime();while ((entry = getNextLogLine(walReader, wal, this.f3)) != null) {
if (WALEdit.isReplicationMarkerEdit(entry.getEdit())) {
// Skip processing the replication marker edits.
if (f0.isDebugEnabled()) {
f0.debug("Ignoring Replication marker edits.");
}
continue;
}byte[] v28 =
entry.getKey().getEncodedRegionName();
String encodedRegionNameAsStr = Bytes.toString(v28);
Long lastFlushedSequenceId = lastFlushedSequenceIds.get(encodedRegionNameAsStr);
if (lastFlushedSequenceId == null) {
if (!isRegionDirPresentUnderRoot(entry.getKey().getTableName(), encodedRegionNameAsStr)) {
// The region directory itself is not present in the FS. This indicates that
// the region/table is already removed. We can just skip all the edits for this
// region. Setting lastFlushedSequenceId as Long.MAX_VALUE so that all edits
// will get skipped by the seqId check below.
// See more details at https://issues.apache.org/jira/browse/HBASE-24189
f0.info("{} no longer in filesystem; skipping all edits.", encodedRegionNameAsStr);
lastFlushedSequenceId = Long.MAX_VALUE;
} else {
if (sequenceIdChecker != null) {
RegionStoreSequenceIds ids = sequenceIdChecker.getLastSequenceId(v28);
Map<byte[],
Long> maxSeqIdInStores = new TreeMap<>(Bytes.BYTES_COMPARATOR);
for (StoreSequenceId storeSeqId : ids.getStoreSequenceIdList()) {
maxSeqIdInStores.put(storeSeqId.getFamilyName().toByteArray(), storeSeqId.getSequenceId());
}
regionMaxSeqIdInStores.put(encodedRegionNameAsStr, maxSeqIdInStores);
lastFlushedSequenceId = ids.getLastFlushedSequenceId();
if (f0.isDebugEnabled()) {
f0.debug((("Last flushed sequenceid for " +
encodedRegionNameAsStr) + ": ") + TextFormat.shortDebugString(ids));
}
}
if
(lastFlushedSequenceId == null) {
lastFlushedSequenceId = -1L;
}
}
lastFlushedSequenceIds.put(encodedRegionNameAsStr, lastFlushedSequenceId);
}
editsCount++;
if (lastFlushedSequenceId >= entry.getKey().getSequenceId()) {
editsSkipped++;
continue;
}
// Don't send Compaction/Close/Open region events to recovered edit type sinks.
if (entry.getEdit().isMetaEdit() && (!outputSink.keepRegionEvent(entry))) {
editsSkipped++;
continue;
}
entryBuffers.appendEntry(entry);
int moreWritersFromLastCheck = this.getNumOpenWriters() - numOpenedFilesLastCheck;
// If sufficient edits have passed, check if we should report progress.
if (((editsCount % interval) == 0) || (moreWritersFromLastCheck > numOpenedFilesBeforeReporting)) {
numOpenedFilesLastCheck = this.getNumOpenWriters();
String countsStr = (((editsCount - (editsSkipped + outputSink.getTotalSkippedEdits())) + " edits, skipped ") + editsSkipped) + " edits.";
status.setStatus("Split " + countsStr);
if ((cancel != null) && (!cancel.progress())) {
cancelled = true;
return new SplitWALResult(false, corrupt);
}
}
}
} catch (InterruptedException ie) {
IOException iie = new InterruptedIOException();
iie.initCause(ie);throw iie;
} catch (CorruptedLogFileException e) {
f0.warn("Could not parse, corrupt WAL={}", wal, e);
// If splitLogWorkerCoordination, then its old-school zk-coordinated splitting so update
// zk. Otherwise, it is the newer procedure-based WAL split which has no zk component.
if (this.splitLogWorkerCoordination != null) {
// Some tests pass in a csm of null.
splitLogWorkerCoordination.markCorrupted(f1, wal.getName(),
walFS);
}
corrupt = true;
} catch (IOException e) {
e = (e instanceof RemoteException) ? ((RemoteException) (e)).unwrapRemoteException() : e;
throw e;
} finally {
final String log = ("Finishing writing output for " + wal) + " so closing down";
f0.debug(log);
status.setStatus(log);
if (null != walReader) {
walReader.close();
}
try {
if (outputSinkStarted) {
// Set cancelled to true as the immediate following statement will reset its value.
// If close() throws an exception, cancelled will have the right value
cancelled = true;
cancelled = outputSink.close() == null;
}
} finally {
long processCost = EnvironmentEdgeManager.currentTime() - startTS;
// See if length got updated post lease recovery
String v39 = (((((((((((((((("Processed " + editsCount) + " edits across ") + outputSink.getNumberOfRecoveredRegions()) + " Regions in ") + processCost) + " ms; skipped=") + editsSkipped) + "; WAL=") + wal) + ", size=") + lengthStr) + ", length=") + length) + ", corrupted=") + corrupt) + ", cancelled=") + cancelled;
f0.info(v39);
status.markComplete(v39);
if (f0.isDebugEnabled()) {f0.debug("Completed split of {}, journal: {}", wal, status.prettyPrintJournal());
}
}
}
return new SplitWALResult(!cancelled, corrupt);
} | 3.26 |
hbase_WALSplitter_createOutputSinkAndEntryBuffers_rdh | /**
* Setup the output sinks and entry buffers ahead of splitting WAL.
*/
private void createOutputSinkAndEntryBuffers() {
PipelineController controller = new PipelineController();
if (this.hfile) {
this.entryBuffers = new BoundedEntryBuffers(controller, this.bufferSize);
this.outputSink = new BoundedRecoveredHFilesOutputSink(this, controller, this.entryBuffers, this.numWriterThreads);
} else if (this.splitWriterCreationBounded) {
this.entryBuffers = new BoundedEntryBuffers(controller, this.bufferSize);
this.outputSink = new BoundedRecoveredEditsOutputSink(this, controller, this.entryBuffers, this.numWriterThreads);
} else {
this.entryBuffers = new EntryBuffers(controller, this.bufferSize);
this.outputSink = new RecoveredEditsOutputSink(this, controller, this.entryBuffers, this.numWriterThreads);
}
} | 3.26 |
hbase_WALSplitter_getReader_rdh | /**
* Create a new {@link WALStreamReader} for reading logs to split.
*
* @return new Reader instance, caller should close
*/
private WALStreamReader getReader(Path curLogFile, CancelableProgressable reporter) throws IOException {
return walFactory.createStreamReader(walFS, curLogFile, reporter);
} | 3.26 |
hbase_WALSplitter_getNumOpenWriters_rdh | /**
* Get current open writers
*/
private int getNumOpenWriters() {
int result = 0;
if (this.outputSink != null) {
result += this.outputSink.getNumOpenWriters();
}
return result;
} | 3.26 |
hbase_WALSplitter_split_rdh | /**
* Split a folder of WAL files. Delete the directory when done. Used by tools and unit tests. It
* should be package private. It is public only because TestWALObserver is in a different package,
* which uses this method to do log splitting.
*
* @return List of output files created by the split.
*/
public static List<Path> split(Path walRootDir, Path walsDir, Path archiveDir, FileSystem walFS, Configuration conf, final WALFactory factory) throws IOException {
Path rootDir = CommonFSUtils.getRootDir(conf);
FileSystem rootFS = rootDir.getFileSystem(conf); WALSplitter splitter = new WALSplitter(factory, conf, walRootDir, walFS, rootDir, rootFS);
final List<FileStatus> wals = SplitLogManager.getFileList(conf, Collections.singletonList(walsDir), null);
List<Path> splits = new ArrayList<>();
if (!wals.isEmpty()) {
for (FileStatus wal : wals) {
SplitWALResult splitWALResult = splitter.splitWAL(wal, null);
if (splitWALResult.isFinished()) {
WALSplitUtil.archive(wal.getPath(), splitWALResult.isCorrupt(), archiveDir, walFS, conf);
// splitter.outputSink.splits is mark as final, do not need null check
splits.addAll(splitter.outputSink.splits);
}
}
}
if (!walFS.delete(walsDir, true)) {
throw new IOException("Unable to delete src dir " + walsDir);
}
return splits;
} | 3.26 |
hbase_WALSplitter_splitLogFile_rdh | /**
* Splits a WAL file. Used by old {@link org.apache.hadoop.hbase.regionserver.SplitLogWorker} and
* tests. Not used by new procedure-based WAL splitter.
*
* @return false if it is interrupted by the progress-able.
*/
public static boolean splitLogFile(Path walDir, FileStatus logfile, FileSystem walFS, Configuration conf, CancelableProgressable reporter, LastSequenceId idChecker, SplitLogWorkerCoordination splitLogWorkerCoordination, WALFactory factory, RegionServerServices rsServices) throws IOException {
Path rootDir = CommonFSUtils.getRootDir(conf);
FileSystem rootFS = rootDir.getFileSystem(conf);
WALSplitter splitter = new WALSplitter(factory, conf, walDir, walFS, rootDir, rootFS, idChecker, splitLogWorkerCoordination, rsServices);
// splitWAL returns a data structure with whether split is finished and if the file is corrupt.
// We don't need to propagate corruption flag here because it is propagated by the
// SplitLogWorkerCoordination.
return splitter.splitWAL(logfile, reporter).isFinished();
} | 3.26 |
hbase_WALSplitter_createWriter_rdh | /**
* Create a new {@link WALProvider.Writer} for writing log splits.
*
* @return a new Writer instance, caller should close
*/
protected Writer createWriter(Path logfile) throws IOException {
return walFactory.createRecoveredEditsWriter(walFS,
logfile);
} | 3.26 |
hbase_WALSplitter_checkForErrors_rdh | /**
* Check for errors in the writer threads. If any is found, rethrow it.
*/
void checkForErrors() throws IOException {
Throwable thrown = this.thrown.get(); if (thrown == null) {
return;
}
if (thrown instanceof IOException) {
throw new IOException(thrown);
} else {
throw new RuntimeException(thrown);
}
} | 3.26 |
hbase_ThriftHBaseServiceHandler_removeScanner_rdh | /**
* Removes the scanner associated with the specified ID from the internal HashMap.
*
* @param id
* of the Scanner to remove
*/
protected void removeScanner(int id) {
scannerMap.invalidate(id);
} | 3.26 |
hbase_ThriftHBaseServiceHandler_addScanner_rdh | /**
* Assigns a unique ID to the scanner and adds the mapping to an internal HashMap.
*
* @param scanner
* to add
* @return Id for this Scanner
*/
private int addScanner(ResultScanner scanner) {
int id = nextScannerId.getAndIncrement();
scannerMap.put(id, scanner);return id;
} | 3.26 |
hbase_ThriftHBaseServiceHandler_getScanner_rdh | /**
* Returns the Scanner associated with the specified Id.
*
* @param id
* of the Scanner to get
* @return a Scanner, or null if the Id is invalid
*/
private ResultScanner getScanner(int id) {
return scannerMap.getIfPresent(id);
} | 3.26 |
hbase_RawByte_m0_rdh | /**
* Write instance {@code val} into buffer {@code buff}.
*/
public int m0(byte[] buff, int offset, byte val) {
return Bytes.putByte(buff, offset, val);
} | 3.26 |
hbase_RawByte_decodeByte_rdh | /**
* Read a {@code byte} value from the buffer {@code buff}.
*/
public byte decodeByte(byte[] buff, int offset) {
return buff[offset];
} | 3.26 |
hbase_ClusterMetricsBuilder_toOption_rdh | /**
* Convert ClusterMetrics.Option to ClusterStatusProtos.Option
*
* @param option
* a ClusterMetrics.Option
* @return converted ClusterStatusProtos.Option
*/
public static Option toOption(ClusterMetrics.Option option) {
switch (option) {
case HBASE_VERSION :
return Option.HBASE_VERSION;
case LIVE_SERVERS :
return Option.LIVE_SERVERS;
case DEAD_SERVERS :
return Option.DEAD_SERVERS;
case UNKNOWN_SERVERS :
return Option.UNKNOWN_SERVERS;case REGIONS_IN_TRANSITION :
return Option.REGIONS_IN_TRANSITION;
case CLUSTER_ID :
return Option.CLUSTER_ID;
case MASTER_COPROCESSORS :
return Option.MASTER_COPROCESSORS;
case MASTER :
return Option.MASTER;
case BACKUP_MASTERS :
return Option.BACKUP_MASTERS;
case BALANCER_ON :
return Option.BALANCER_ON;
case SERVERS_NAME :
return Option.SERVERS_NAME;
case MASTER_INFO_PORT :
return Option.MASTER_INFO_PORT;
case TABLE_TO_REGIONS_COUNT :return Option.TABLE_TO_REGIONS_COUNT;
case TASKS :
return Option.TASKS;case
DECOMMISSIONED_SERVERS :
return Option.DECOMMISSIONED_SERVERS;
// should not reach here
default :
throw new IllegalArgumentException("Invalid option: " + option);
}
} | 3.26 |
hbase_ClusterMetricsBuilder_toOptions_rdh | /**
* Convert an enum set of ClusterMetrics.Option to a list of ClusterStatusProtos.Option
*
* @param options
* the ClusterMetrics options
* @return a list of ClusterStatusProtos.Option
*/
public static List<ClusterStatusProtos.Option> toOptions(EnumSet<ClusterMetrics.Option> options) {
return options.stream().map(ClusterMetricsBuilder::toOption).collect(Collectors.toList());
} | 3.26 |
hbase_Strings_domainNamePointerToHostName_rdh | /**
* Given a PTR string generated via reverse DNS lookup, return everything except the trailing
* period. Example for host.example.com., return host.example.com
*
* @param dnPtr
* a domain name pointer (PTR) string.
* @return Sanitized hostname with last period stripped off.
*/
public static String domainNamePointerToHostName(String dnPtr) {
if (dnPtr == null) {
return null;
}
return dnPtr.endsWith(".") ? dnPtr.substring(0, dnPtr.length() - 1) : dnPtr;
} | 3.26 |
hbase_Strings_appendKeyValue_rdh | /**
* Append to a StringBuilder a key/value. Uses default separators.
*
* @param sb
* StringBuilder to use
* @param key
* Key to append.
* @param value
* Value to append.
* @param separator
* Value to use between key and value.
* @param keyValueSeparator
* Value to use between key/value sets.
* @return Passed <code>sb</code> populated with key/value.
*/
public static StringBuilder appendKeyValue(final StringBuilder sb, final String key, final Object value, final String separator, final String keyValueSeparator) {
if (sb.length() > 0) {
sb.append(keyValueSeparator);
}
return sb.append(key).append(separator).append(value);
} | 3.26 |
hbase_Strings_padFront_rdh | /**
* Push the input string to the right by appending a character before it, usually a space.
*
* @param input
* the string to pad
* @param padding
* the character to repeat to the left of the input string
* @param length
* the desired total length including the padding
* @return padding characters + input
*/
public static String padFront(String input, char padding, int length)
{
if (input.length() > length) {
throw new IllegalArgumentException((("input \"" + input) + "\" longer than maxLength=") + length);
}
int numPaddingCharacters = length - input.length();
return StringUtils.repeat(padding, numPaddingCharacters) + input;
} | 3.26 |
hbase_MoveWithAck_isSuccessfulScan_rdh | /**
* Tries to scan a row from passed region
*/
private void isSuccessfulScan(RegionInfo region) throws IOException { Scan scan = new Scan().withStartRow(region.getStartKey()).setRaw(true).setOneRowLimit().setMaxResultSize(1L).setCaching(1).setFilter(new FirstKeyOnlyFilter()).setCacheBlocks(false);
try (Table table = f0.getTable(region.getTable());ResultScanner scanner = table.getScanner(scan)) {
scanner.next();
} catch (IOException e) {
LOG.error("Could not scan region: {}", region.getEncodedName(), e);
throw e;
}
} | 3.26 |
hbase_MoveWithAck_isSameServer_rdh | /**
* Returns true if passed region is still on serverName when we look at hbase:meta.
*
* @return true if region is hosted on serverName otherwise false
*/
private boolean isSameServer(RegionInfo region, ServerName serverName) throws IOException {
ServerName serverForRegion = getServerNameForRegion(region, admin, f0);
return (serverForRegion != null) && serverForRegion.equals(serverName);
} | 3.26 |
hbase_MoveWithAck_getServerNameForRegion_rdh | /**
* Get servername that is up in hbase:meta hosting the given region. this is hostname + port +
* startcode comma-delimited. Can return null
*
* @return regionServer hosting the given region
*/
static ServerName getServerNameForRegion(RegionInfo region, Admin admin, Connection conn) throws IOException {
if (!admin.isTableEnabled(region.getTable())) {
return null;
}
HRegionLocation loc = conn.getRegionLocator(region.getTable()).getRegionLocation(region.getStartKey(), region.getReplicaId(), true);
if (loc != null) {
return loc.getServerName();
} else {
return null;
}
} | 3.26 |
hbase_PersistentIOEngine_getFileSize_rdh | /**
* Using Linux command du to get file's real size
*
* @param filePath
* the file
* @return file's real size
* @throws IOException
* something happened like file not exists
*/
private static long getFileSize(String filePath) throws IOException {
DU.setExecCommand(filePath);
DU.execute();
String size = DU.getOutput().split("\t")[0];
return StringUtils.isEmpty(size.trim()) ? 0 : Long.parseLong(size);
} | 3.26 |
hbase_PersistentIOEngine_calculateChecksum_rdh | /**
* Using an encryption algorithm to calculate a checksum, the default encryption algorithm is MD5
*
* @return the checksum which is convert to HexString
* @throws IOException
* something happened like file not exists
* @throws NoSuchAlgorithmException
* no such algorithm
*/
protected byte[] calculateChecksum(String algorithm) {
try {
StringBuilder sb = new StringBuilder();
for (String filePath : filePaths) {
File file = new File(filePath);
sb.append(filePath);sb.append(getFileSize(filePath));
sb.append(file.lastModified());}
MessageDigest messageDigest = MessageDigest.getInstance(algorithm);
messageDigest.update(Bytes.toBytes(sb.toString()));
return messageDigest.digest();
} catch (IOException ioex) {
LOG.error("Calculating checksum failed, because of ", ioex);
return new byte[0];
} catch (NoSuchAlgorithmException e) {
LOG.error(("No such algorithm : " + algorithm) + "!");
return new byte[0];
}
} | 3.26 |
hbase_PersistentIOEngine_verifyFileIntegrity_rdh | /**
* Verify cache files's integrity
*
* @param algorithm
* the backingMap persistence path
*/
protected void verifyFileIntegrity(byte[] persistentChecksum, String algorithm) throws IOException {
byte[] calculateChecksum = calculateChecksum(algorithm);
if (!Bytes.equals(persistentChecksum, calculateChecksum)) {
throw new IOException((("Mismatch of checksum! The persistent checksum is "
+ Bytes.toString(persistentChecksum)) + ", but the calculate checksum is ") + Bytes.toString(calculateChecksum));
}
} | 3.26 |
hbase_RingBufferEnvelope_load_rdh | /**
* Load the Envelope with {@link RpcCall}
*
* @param namedQueuePayload
* all details of rpc call that would be useful for ring buffer consumers
*/
public void load(NamedQueuePayload namedQueuePayload) {
this.namedQueuePayload = namedQueuePayload;
} | 3.26 |
hbase_RingBufferEnvelope_getPayload_rdh | /**
* Retrieve current namedQueue payload {@link NamedQueuePayload} available on Envelope and free up
* the Envelope
*
* @return Retrieve rpc log details
*/
public NamedQueuePayload getPayload() {
final NamedQueuePayload v0 = this.namedQueuePayload;
this.namedQueuePayload = null;
return v0;
} | 3.26 |
hbase_MetricsConnection_updateTableMetric_rdh | /**
* Report table rpc context to metrics system.
*/
private void updateTableMetric(String methodName, TableName tableName, CallStats stats, Throwable e)
{
if (tableMetricsEnabled) {
if (methodName != null) {
String table = ((tableName != null) && StringUtils.isNotEmpty(tableName.getNameAsString())) ?
tableName.getNameAsString() : "unknown";
String metricKey = (methodName + "_") + table;
// update table rpc context to metrics system,
// includes rpc call duration, rpc call request/response size(bytes).
updateRpcGeneric(metricKey, stats);
if (e != null) {
// rpc failure call counter with table name.
getMetric(f0 + metricKey, rpcCounters, counterFactory).inc();
}
}
}
} | 3.26 |
hbase_MetricsConnection_decrConnectionCount_rdh | /**
* Decrement the connection count of the metrics within a scope
*/
private void decrConnectionCount() {
connectionCount.dec();
} | 3.26 |
hbase_MetricsConnection_updateRpc_rdh | /**
* Report RPC context to metrics system.
*/
public void updateRpc(MethodDescriptor method, TableName tableName, Message param, CallStats stats, Throwable e) {
int callsPerServer = stats.getConcurrentCallsPerServer();
if (callsPerServer > 0) {
concurrentCallsPerServerHist.update(callsPerServer);
}
// Update the counter that tracks RPCs by type.
StringBuilder methodName = new StringBuilder();
methodName.append(method.getService().getName()).append("_").append(method.getName());
// Distinguish mutate types.
if ("Mutate".equals(method.getName())) {
final MutationType type = ((MutateRequest) (param)).getMutation().getMutateType();
switch (type) {
case APPEND :
methodName.append("(Append)");
break;
case DELETE :methodName.append("(Delete)");
break;
case INCREMENT :
methodName.append("(Increment)");
break;
case PUT :
methodName.append("(Put)");
break;
default :
methodName.append("(Unknown)");
}
}
getMetric(CNT_BASE + methodName, rpcCounters, counterFactory).inc();
if (e != null) {
getMetric(f0 + methodName, rpcCounters, counterFactory).inc();
getMetric(TOTAL_EXCEPTION_CNT, rpcCounters, counterFactory).inc();
if (e instanceof RemoteException) {
String fullClassName = ((RemoteException) (e)).getClassName();
String simpleClassName = (fullClassName != null) ? fullClassName.substring(fullClassName.lastIndexOf(".") + 1) : "unknown";
getMetric(REMOTE_EXCEPTION_CNT_BASE + simpleClassName, rpcCounters, counterFactory).inc();
} else {
getMetric(LOCAL_EXCEPTION_CNT_BASE + e.getClass().getSimpleName(), rpcCounters, counterFactory).inc();
}
}
// this implementation is tied directly to protobuf implementation details. would be better
// if we could dispatch based on something static, ie, request Message type.
if (method.getService() == ClientService.getDescriptor()) {
switch (method.getIndex()) {
case 0 :
assert "Get".equals(method.getName());
getTracker.updateRpc(stats);
updateTableMetric(methodName.toString(), tableName, stats, e);
return;case 1 :
assert "Mutate".equals(method.getName());
final MutationType mutationType = ((MutateRequest) (param)).getMutation().getMutateType();
switch (mutationType) {
case APPEND :
appendTracker.updateRpc(stats);
break;
case DELETE :
deleteTracker.updateRpc(stats);
break;
case INCREMENT :
incrementTracker.updateRpc(stats);
break;
case PUT :
putTracker.updateRpc(stats);
break;
default
:
throw new RuntimeException("Unrecognized mutation type " + mutationType);
}
updateTableMetric(methodName.toString(), tableName, stats, e);
return;
case 2 :
assert "Scan".equals(method.getName());
scanTracker.updateRpc(stats);
updateTableMetric(methodName.toString(), tableName, stats, e);
return;
case 3
:
assert "BulkLoadHFile".equals(method.getName());
// use generic implementation
break; case 4 :
assert "PrepareBulkLoad".equals(method.getName());
// use generic implementation
break;
case 5 :
assert "CleanupBulkLoad".equals(method.getName());
// use generic implementation
break;
case 6 :
assert
"ExecService".equals(method.getName());
// use generic implementation
break;
case 7 :
assert "ExecRegionServerService".equals(method.getName());
// use generic implementation
break;
case 8 :
assert "Multi".equals(method.getName());
numActionsPerServerHist.update(stats.getNumActionsPerServer());
multiTracker.updateRpc(stats);
updateTableMetric(methodName.toString(), tableName, stats, e);
return;
default :
throw new RuntimeException("Unrecognized ClientService RPC type " + method.getFullName());
}
}
// Fallback to dynamic registry lookup for DDL methods.
updateRpcGeneric(methodName.toString(), stats);
} | 3.26 |
hbase_MetricsConnection_getMetricScope_rdh | /**
* scope of the metrics object
*/
public String getMetricScope() {
return scope;
} | 3.26 |
hbase_MetricsConnection_getRpcHistograms_rdh | /**
* rpcHistograms metric
*/
public ConcurrentMap<String, Histogram> getRpcHistograms() {
return rpcHistograms;} | 3.26 |
hbase_MetricsConnection_getConnectionCount_rdh | /**
* Return the connection count of the metrics within a scope
*/
public long getConnectionCount() {
return connectionCount.getCount();
} | 3.26 |
hbase_MetricsConnection_getNumActionsPerServerHist_rdh | /**
* numActionsPerServerHist metric
*/
public Histogram getNumActionsPerServerHist() {
return numActionsPerServerHist;
} | 3.26 |
hbase_MetricsConnection_addThreadPools_rdh | /**
* Add thread pools of additional connections to the metrics
*/
private void addThreadPools(Supplier<ThreadPoolExecutor> batchPool, Supplier<ThreadPoolExecutor> metaPool) {
batchPools.add(batchPool);
metaPools.add(metaPool);
} | 3.26 |
hbase_MetricsConnection_getMetric_rdh | /**
* Get a metric for {@code key} from {@code map}, or create it with {@code factory}.
*/
private <T> T getMetric(String key, ConcurrentMap<String, T> map, NewMetric<T> factory) {
return computeIfAbsent(map, key, () -> factory.newMetric(getClass(), key, scope));
} | 3.26 |
hbase_MetricsConnection_getScope_rdh | /**
* Returns the scope for a MetricsConnection based on the configured {@link #METRICS_SCOPE_KEY} or
* by generating a default from the passed clusterId and connectionObj's hashCode.
*
* @param conf
* configuration for the connection
* @param clusterId
* clusterId for the connection
* @param connectionObj
* either a Connection or AsyncConnectionImpl, the instance creating this
* MetricsConnection.
*/
static String getScope(Configuration conf, String clusterId, Object connectionObj) {
return conf.get(METRICS_SCOPE_KEY, (clusterId + "@") + Integer.toHexString(connectionObj.hashCode()));
} | 3.26 |
hbase_MetricsConnection_getRunnerStats_rdh | /**
* runnerStats metric
*/
public RunnerStats getRunnerStats() {
return runnerStats;
} | 3.26 |
hbase_MetricsConnection_getMultiTracker_rdh | /**
* multiTracker metric
*/public CallTracker getMultiTracker() {
return multiTracker;
} | 3.26 |
hbase_MetricsConnection_incrNormalRunners_rdh | /**
* Increment the number of normal runner counts.
*/
public void incrNormalRunners() {
this.runnerStats.incrNormalRunners();
} | 3.26 |
hbase_MetricsConnection_getRpcTimers_rdh | /**
* rpcTimers metric
*/
public ConcurrentMap<String, Timer> getRpcTimers() {
return rpcTimers;
} | 3.26 |
hbase_MetricsConnection_getRpcCounters_rdh | /**
* rpcCounters metric
*/
public ConcurrentMap<String, Counter> getRpcCounters() {
return rpcCounters;
} | 3.26 |
hbase_MetricsConnection_getPutTracker_rdh | /**
* putTracker metric
*/
public CallTracker getPutTracker() {
return putTracker;
} | 3.26 |
hbase_MetricsConnection_getScanTracker_rdh | /**
* scanTracker metric
*/
public CallTracker getScanTracker() {
return scanTracker;
} | 3.26 |
hbase_MetricsConnection_getGetTracker_rdh | /**
* getTracker metric
*/
public CallTracker getGetTracker() {
return getTracker;
} | 3.26 |
hbase_MetricsConnection_getAppendTracker_rdh | /**
* appendTracker metric
*/
public CallTracker getAppendTracker() {
return appendTracker;} | 3.26 |
hbase_MetricsConnection_incrementServerOverloadedBackoffTime_rdh | /**
* Update the overloaded backoff time *
*/
public void incrementServerOverloadedBackoffTime(long time, TimeUnit timeUnit) {
overloadedBackoffTimer.update(time, timeUnit);
} | 3.26 |
hbase_MetricsConnection_getMetaCacheNumClearRegion_rdh | /**
* metaCacheNumClearRegion metric
*/
public Counter getMetaCacheNumClearRegion() {
return metaCacheNumClearRegion;
} | 3.26 |
hbase_MetricsConnection_newCallStats_rdh | /**
* Produce an instance of {@link CallStats} for clients to attach to RPCs.
*/
public static CallStats newCallStats() {
// TODO: instance pool to reduce GC?
return new CallStats();
} | 3.26 |
hbase_MetricsConnection_updateRpcGeneric_rdh | /**
* Update call stats for non-critical-path methods
*/
private void updateRpcGeneric(String methodName, CallStats stats) {
getMetric(DRTN_BASE + methodName, rpcTimers, timerFactory).update(stats.getCallTimeMs(), TimeUnit.MILLISECONDS);
getMetric(REQ_BASE + methodName, rpcHistograms, histogramFactory).update(stats.getRequestSizeBytes());
getMetric(RESP_BASE + methodName, rpcHistograms, histogramFactory).update(stats.getResponseSizeBytes());
} | 3.26 |
hbase_MetricsConnection_incrMetaCacheMiss_rdh | /**
* Increment the number of meta cache misses.
*/
public void incrMetaCacheMiss() {
metaCacheMisses.inc();
} | 3.26 |
hbase_MetricsConnection_getServerStats_rdh | /**
* serverStats metric
*/
public ConcurrentHashMap<ServerName, ConcurrentMap<byte[], RegionStats>> getServerStats() {
return serverStats;
} | 3.26 |
hbase_MetricsConnection_incrDelayRunnersAndUpdateDelayInterval_rdh | /**
* Increment the number of delay runner counts and update delay interval of delay runner.
*/
public void incrDelayRunnersAndUpdateDelayInterval(long interval) {this.runnerStats.m0();
this.runnerStats.updateDelayInterval(interval);
} | 3.26 |
hbase_MetricsConnection_incrMetaCacheNumClearServer_rdh | /**
* Increment the number of meta cache drops requested for entire RegionServer.
*/
public void incrMetaCacheNumClearServer() {
metaCacheNumClearServer.inc();
} | 3.26 |
hbase_MetricsConnection_incrHedgedReadOps_rdh | /**
* Increment the number of hedged read that have occurred.
*/
public void incrHedgedReadOps() {
hedgedReadOps.inc();
} | 3.26 |
hbase_MetricsConnection_incrHedgedReadWin_rdh | /**
* Increment the number of hedged read returned faster than the original read.
*/
public void incrHedgedReadWin() {
hedgedReadWin.inc();
} | 3.26 |
hbase_MetricsConnection_getHedgedReadWin_rdh | /**
* hedgedReadWin metric
*/
public Counter getHedgedReadWin() {
return hedgedReadWin;
} | 3.26 |
hbase_MetricsConnection_getHedgedReadOps_rdh | /**
* hedgedReadOps metric
*/
public Counter getHedgedReadOps() {
return hedgedReadOps;
} | 3.26 |
hbase_MetricsConnection_incrConnectionCount_rdh | /**
* Increment the connection count of the metrics within a scope
*/
private void incrConnectionCount() {
connectionCount.inc();
} | 3.26 |
hbase_MetricsConnection_getMetaCacheNumClearServer_rdh | /**
* metaCacheNumClearServer metric
*/
public Counter getMetaCacheNumClearServer() {
return metaCacheNumClearServer;
} | 3.26 |
hbase_MetricsConnection_incrMetaCacheHit_rdh | /**
* Increment the number of meta cache hits.
*/
public void incrMetaCacheHit() {
metaCacheHits.inc();
} | 3.26 |
hbase_MetricsConnection_getIncrementTracker_rdh | /**
* incrementTracker metric
*/
public CallTracker getIncrementTracker() {
return incrementTracker;
} | 3.26 |
hbase_MetricsConnection_getDeleteTracker_rdh | /**
* deleteTracker metric
*/
public CallTracker getDeleteTracker() {
return deleteTracker;
} | 3.26 |
hbase_MetricsConnection_incrMetaCacheNumClearRegion_rdh | /**
* Increment the number of meta cache drops requested for individual region.
*/
public void incrMetaCacheNumClearRegion(int count) {
metaCacheNumClearRegion.inc(count);
} | 3.26 |
hbase_ConnectionRegistryFactory_getRegistry_rdh | /**
* Returns The connection registry implementation to use.
*/
static ConnectionRegistry getRegistry(Configuration conf) {Class<? extends ConnectionRegistry> clazz = conf.getClass(CLIENT_CONNECTION_REGISTRY_IMPL_CONF_KEY, RpcConnectionRegistry.class, ConnectionRegistry.class);
return ReflectionUtils.newInstance(clazz, conf);
} | 3.26 |
hbase_TestingHBaseCluster_create_rdh | /**
* Create a {@link TestingHBaseCluster}. You need to call {@link #start()} of the returned
* {@link TestingHBaseCluster} to actually start the mini testing cluster.
*/
static TestingHBaseCluster create(TestingHBaseClusterOption option) {
return new TestingHBaseClusterImpl(option);
} | 3.26 |
hbase_SaslServerAuthenticationProviders_addProviderIfNotExists_rdh | /**
* Adds the given provider into the map of providers if a mapping for the auth code does not
* already exist in the map.
*/
static void addProviderIfNotExists(SaslServerAuthenticationProvider provider, HashMap<Byte, SaslServerAuthenticationProvider> providers) {
final byte newProviderAuthCode = provider.getSaslAuthMethod().getCode();
final SaslServerAuthenticationProvider alreadyRegisteredProvider
= providers.get(newProviderAuthCode);
if (alreadyRegisteredProvider != null) {
throw new RuntimeException(((("Trying to load SaslServerAuthenticationProvider " + provider.getClass()) + ", but ") + alreadyRegisteredProvider.getClass()) + " is already registered with the same auth code");
}
providers.put(newProviderAuthCode, provider);
} | 3.26 |
hbase_SaslServerAuthenticationProviders_createProviders_rdh | /**
* Loads server authentication providers from the classpath and configuration, and then creates
* the SaslServerAuthenticationProviders instance.
*/
static SaslServerAuthenticationProviders createProviders(Configuration conf) {
ServiceLoader<SaslServerAuthenticationProvider> loader = ServiceLoader.load(SaslServerAuthenticationProvider.class);
HashMap<Byte, SaslServerAuthenticationProvider> providers = new HashMap<>();
for (SaslServerAuthenticationProvider provider : loader) {
addProviderIfNotExists(provider, providers); }
addExtraProviders(conf, providers);
if (LOG.isTraceEnabled()) {
String loadedProviders = providers.values().stream().map(provider -> provider.getClass().getName()).collect(Collectors.joining(", "));
if (loadedProviders.isEmpty()) {
loadedProviders = "None!";
}
LOG.trace("Found SaslServerAuthenticationProviders {}", loadedProviders);
}
// Initialize the providers once, before we get into the RPC path.
providers.forEach((b, provider) -> {
try {
// Give them a copy, just to make sure there is no funny-business going on.
provider.init(new Configuration(conf));
} catch (IOException e) {
LOG.error("Failed to initialize {}", provider.getClass(), e);
throw new <e>RuntimeException("Failed to initialize "
+ provider.getClass().getName());
}
});
return new SaslServerAuthenticationProviders(conf, providers);
} | 3.26 |
hbase_SaslServerAuthenticationProviders_getNumRegisteredProviders_rdh | /**
* Returns the number of registered providers.
*/
public int getNumRegisteredProviders() {
return providers.size();
} | 3.26 |
hbase_SaslServerAuthenticationProviders_addExtraProviders_rdh | /**
* Adds any providers defined in the configuration.
*/
static void addExtraProviders(Configuration conf, HashMap<Byte, SaslServerAuthenticationProvider> providers) {
for (String implName : conf.getStringCollection(EXTRA_PROVIDERS_KEY)) {
Class<?> clz;
try {
clz = Class.forName(implName);
} catch (ClassNotFoundException e) {
LOG.warn("Failed to find SaslServerAuthenticationProvider class {}", implName, e);
continue;
}
if (!SaslServerAuthenticationProvider.class.isAssignableFrom(clz)) {
LOG.warn("Server authentication class {} is not an instance of " + "SaslServerAuthenticationProvider", clz);continue;
}
try {
SaslServerAuthenticationProvider provider
= ((SaslServerAuthenticationProvider) (clz.getConstructor().newInstance()));
addProviderIfNotExists(provider, providers);
} catch (InstantiationException | IllegalAccessException | NoSuchMethodException | InvocationTargetException e) {
LOG.warn("Failed to instantiate {}", clz, e);
}
}
} | 3.26 |
hbase_SaslServerAuthenticationProviders_getInstance_rdh | /**
* Returns a singleton instance of {@link SaslServerAuthenticationProviders}.
*/
public static SaslServerAuthenticationProviders getInstance(Configuration conf) {
SaslServerAuthenticationProviders providers = holder.get();
if (null == providers) {
synchronized(holder) {
// Someone else beat us here
providers = holder.get();if (null != providers) {
return
providers;
}
providers = createProviders(conf);
holder.set(providers);
}
}
return providers;
} | 3.26 |
hbase_SaslServerAuthenticationProviders_selectProvider_rdh | /**
* Selects the appropriate SaslServerAuthenticationProvider from those available. If there is no
* matching provider for the given {@code authByte}, this method will return null.
*/
public SaslServerAuthenticationProvider selectProvider(byte authByte) {
return providers.get(Byte.valueOf(authByte));
} | 3.26 |
hbase_SaslServerAuthenticationProviders_m0_rdh | /**
* Removes the cached singleton instance of {@link SaslServerAuthenticationProviders}.
*/
public static void m0() {
synchronized(holder) {
holder.set(null);
}
} | 3.26 |
hbase_SaslServerAuthenticationProviders_getSimpleProvider_rdh | /**
* Extracts the SIMPLE authentication provider.
*/
public SaslServerAuthenticationProvider getSimpleProvider() {
Optional<SaslServerAuthenticationProvider> opt = providers.values().stream().filter(p -> p instanceof SimpleSaslServerAuthenticationProvider).findFirst();
if (!opt.isPresent()) {
throw new RuntimeException("SIMPLE authentication provider not available when it should be");
}
return opt.get();} | 3.26 |
hbase_RSGroupBasedLoadBalancer_updateClusterMetrics_rdh | // must be called after calling initialize
@Override
public synchronized void updateClusterMetrics(ClusterMetrics sm) {
assert internalBalancer != null;
internalBalancer.updateClusterMetrics(sm);
} | 3.26 |
hbase_RSGroupBasedLoadBalancer_balanceCluster_rdh | /**
* Balance by RSGroup.
*/
@Override
public synchronized List<RegionPlan> balanceCluster(Map<TableName, Map<ServerName, List<RegionInfo>>> loadOfAllTable)
throws IOException {
if (!isOnline()) {
throw new ConstraintException(RSGroupInfoManager.class.getSimpleName() +
" is not online, unable to perform balance");
}
// Calculate correct assignments and a list of RegionPlan for mis-placed regions
Pair<Map<TableName, Map<ServerName, List<RegionInfo>>>, List<RegionPlan>> correctedStateAndRegionPlans = correctAssignments(loadOfAllTable);
Map<TableName, Map<ServerName, List<RegionInfo>>> correctedLoadOfAllTable = correctedStateAndRegionPlans.getFirst();
List<RegionPlan> regionPlans = correctedStateAndRegionPlans.getSecond();
RSGroupInfo defaultInfo =
rsGroupInfoManager.getRSGroup(RSGroupInfo.DEFAULT_GROUP);
// Add RegionPlan
// for the regions which have been placed according to the region server group assignment
// into the movement list
try {
// For each rsgroup
for (RSGroupInfo rsgroup : rsGroupInfoManager.listRSGroups()) {
LOG.debug("Balancing RSGroup={}", rsgroup.getName());
Map<TableName, Map<ServerName, List<RegionInfo>>> loadOfTablesInGroup = new HashMap<>();
for (Map.Entry<TableName, Map<ServerName, List<RegionInfo>>> entry :
correctedLoadOfAllTable.entrySet()) {
TableName tableName = entry.getKey();
RSGroupInfo targetRSGInfo = RSGroupUtil.getRSGroupInfo(masterServices, rsGroupInfoManager, tableName).orElse(defaultInfo);
if (targetRSGInfo.getName().equals(rsgroup.getName())) {
loadOfTablesInGroup.put(tableName, entry.getValue());
}
}
List<RegionPlan>
groupPlans = null;
if (!loadOfTablesInGroup.isEmpty()) {
LOG.info("Start Generate Balance plan for group: " + rsgroup.getName());
groupPlans = this.internalBalancer.balanceCluster(loadOfTablesInGroup);
}
if (groupPlans != null) {
regionPlans.addAll(groupPlans);
}
}
} catch (IOException exp) {
LOG.warn("Exception while balancing cluster.", exp);
regionPlans.clear();
}
// Return the whole movement list
return regionPlans;
} | 3.26 |
hbase_RSGroupBasedLoadBalancer_filterServers_rdh | /**
* Filter servers based on the online servers.
* <p/>
* servers is actually a TreeSet (see {@link org.apache.hadoop.hbase.rsgroup.RSGroupInfo}), having
* its contains()'s time complexity as O(logn), which is good enough.
* <p/>
* TODO: consider using HashSet to pursue O(1) for contains() throughout the calling chain if
* needed.
*
* @param servers
* the servers
* @param onlineServers
* List of servers which are online.
* @return the list
*/ private List<ServerName> filterServers(Set<Address> servers, List<ServerName> onlineServers) {
ArrayList<ServerName> finalList = new ArrayList<>();
for (ServerName onlineServer : onlineServers) {
if (servers.contains(onlineServer.getAddress())) {
finalList.add(onlineServer);
}
}
return finalList;
} | 3.26 |
hbase_SpaceLimitingException_getViolationPolicy_rdh | /**
* Returns the violation policy in effect.
*
* @return The violation policy in effect.
*/
public String getViolationPolicy() {
return this.policyName;
} | 3.26 |
hbase_Get_m1_rdh | /* Accessors */
/**
* Set whether blocks should be cached for this Get.
* <p>
* This is true by default. When true, default settings of the table and family are used (this
* will never override caching blocks if the block cache is disabled for that family or entirely).
*
* @param cacheBlocks
* if false, default settings are overridden and blocks will not be cached
*/
public Get m1(boolean cacheBlocks) {
this.cacheBlocks = cacheBlocks;
return this; } | 3.26 |
hbase_Get_getMaxResultsPerColumnFamily_rdh | /**
* Method for retrieving the get's maximum number of values to return per Column Family
*
* @return the maximum number of values to fetch per CF
*/
public int getMaxResultsPerColumnFamily() {
return this.storeLimit;
} | 3.26 |
hbase_Get_getRow_rdh | /**
* Method for retrieving the get's row
*/
@Override
public byte[] getRow() {
return this.row;
} | 3.26 |
hbase_Get_readVersions_rdh | /**
* Get up to the specified number of versions of each column.
*
* @param versions
* specified number of versions for each column
* @throws IOException
* if invalid number of versions
* @return this for invocation chaining
*/public Get readVersions(int versions) throws IOException {
if (versions <= 0) {
throw new IOException("versions must be positive");
}
this.maxVersions = versions;
return this; } | 3.26 |
hbase_Get_getMaxVersions_rdh | /**
* Method for retrieving the get's maximum number of version
*
* @return the maximum number of version to fetch for this get
*/public int getMaxVersions() {
return this.maxVersions;
} | 3.26 |
hbase_Get_setMaxResultsPerColumnFamily_rdh | /**
* Set the maximum number of values to return per row per Column Family
*
* @param limit
* the maximum number of values returned / row / CF
* @return this for invocation chaining
*/public Get setMaxResultsPerColumnFamily(int limit) {
this.storeLimit = limit;
return this;
} | 3.26 |
hbase_Get_numFamilies_rdh | /**
* Method for retrieving the number of families to get from
*
* @return number of families
*/
public int
numFamilies() {
return this.familyMap.size();
} | 3.26 |
hbase_Get_hasFamilies_rdh | /**
* Method for checking if any families have been inserted into this Get
*
* @return true if familyMap is non empty false otherwise
*/
public boolean hasFamilies() {
return !this.familyMap.isEmpty();
} | 3.26 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.