name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
hbase_ReopenTableRegionsProcedure_setTimeoutFailure_rdh
/** * At end of timeout, wake ourselves up so we run again. */ @Override protected synchronized boolean setTimeoutFailure(MasterProcedureEnv env) { setState(ProcedureState.RUNNABLE); env.getProcedureScheduler().addFront(this); return false;// 'false' means that this procedure handled the timeout }
3.26
hbase_ResponseConverter_buildEnableCatalogJanitorResponse_rdh
/** * Creates a response for the catalog scan request * * @return A EnableCatalogJanitorResponse */ public static EnableCatalogJanitorResponse buildEnableCatalogJanitorResponse(boolean prevValue) { return EnableCatalogJanitorResponse.newBuilder().setPrevValue(prevValue).build(); }
3.26
hbase_ResponseConverter_buildClearRegionBlockCacheResponse_rdh
/** * Creates a protocol buffer ClearRegionBlockCacheResponse * * @return a ClearRegionBlockCacheResponse */ public static ClearRegionBlockCacheResponse buildClearRegionBlockCacheResponse(final HBaseProtos.CacheEvictionStats cacheEvictionStats) { return AdminProtos.ClearRegionBlockCacheResponse.newBuilder().setStats(cacheEvictionStats).build(); }
3.26
hbase_ResponseConverter_getResults_rdh
/** * Create Results from the cells using the cells meta data. */ public static Result[] getResults(CellScanner cellScanner, ScanResponse response) throws IOException { if (response == null) return null; // If cellscanner, then the number of Results to return is the count of elements in the // cellsPerResult list. Otherwise, it is how many results are embedded inside the response. int noOfResults = (cellScanner != null) ? response.getCellsPerResultCount() : response.getResultsCount(); Result[] results = new Result[noOfResults]; for (int i = 0; i < noOfResults; i++) { if (cellScanner != null) { // Cells are out in cellblocks. Group them up again as Results. How many to read at a // time will be found in getCellsLength -- length here is how many Cells in the i'th Result int noOfCells = response.getCellsPerResult(i); boolean isPartial = (response.getPartialFlagPerResultCount() > i) ? response.getPartialFlagPerResult(i) : false; List<Cell> cells = new ArrayList<>(noOfCells); for (int j = 0; j < noOfCells; j++) { try { if (cellScanner.advance() == false) { // We are not able to retrieve the exact number of cells which ResultCellMeta says us. // We have to scan for the same results again. Throwing DNRIOE as a client retry on // the // same scanner will result in OutOfOrderScannerNextException String msg = ((("Results sent from server=" + noOfResults) + ". But only got ") + i) + " results completely at client. Resetting the scanner to scan again."; LOG.error(msg);throw new DoNotRetryIOException(msg); } } catch (IOException ioe) {// We are getting IOE while retrieving the cells for Results. // We have to scan for the same results again. Throwing DNRIOE as a client retry on the // same scanner will result in OutOfOrderScannerNextException LOG.error("Exception while reading cells from result." + "Resetting the scanner to scan again.", ioe); throw new DoNotRetryIOException("Resetting the scanner.", ioe); } cells.add(cellScanner.current());} results[i] = Result.create(cells, null, response.getStale(), isPartial); } else { // Result is pure pb. results[i] = ProtobufUtil.toResult(response.getResults(i)); } } return results; }
3.26
hbase_ResponseConverter_getCheckAndMutateResult_rdh
/** * Create a CheckAndMutateResult object from a protocol buffer MutateResponse * * @return a CheckAndMutateResult object */ public static CheckAndMutateResult getCheckAndMutateResult(ClientProtos.MutateResponse mutateResponse, CellScanner cells) throws IOException { boolean success = mutateResponse.getProcessed(); Result result = null; if (mutateResponse.hasResult()) { result = ProtobufUtil.toResult(mutateResponse.getResult(), cells); } return new CheckAndMutateResult(success, result); }
3.26
hbase_ResponseConverter_buildException_rdh
/** * Returns NameValuePair of the exception name to stringified version os exception. */ public static NameBytesPair buildException(final Throwable t) { NameBytesPair.Builder parameterBuilder = NameBytesPair.newBuilder(); parameterBuilder.setName(t.getClass().getName());parameterBuilder.setValue(ByteString.copyFromUtf8(StringUtils.stringifyException(t))); return parameterBuilder.build(); }
3.26
hbase_ResponseConverter_getResult_rdh
// Start utilities for Client public static SingleResponse getResult(final ClientProtos.MutateRequest request, final ClientProtos.MutateResponse response, final CellScanner cells) throws IOException { SingleResponse singleResponse = new SingleResponse(); SingleResponse.Entry entry = new SingleResponse.Entry(); entry.setResult(ProtobufUtil.toResult(response.getResult(), cells)); entry.setProcessed(response.getProcessed()); singleResponse.setEntry(entry);return singleResponse; }
3.26
hbase_ResponseConverter_buildRunCatalogScanResponse_rdh
/** * Creates a response for the catalog scan request * * @return A RunCatalogScanResponse */ public static RunCatalogScanResponse buildRunCatalogScanResponse(int numCleaned) { return RunCatalogScanResponse.newBuilder().setScanResult(numCleaned).build(); }
3.26
hbase_ResponseConverter_buildGetLastFlushedSequenceIdResponse_rdh
// End utilities for Admin /** * Creates a response for the last flushed sequence Id request * * @return A GetLastFlushedSequenceIdResponse */ public static GetLastFlushedSequenceIdResponse buildGetLastFlushedSequenceIdResponse(RegionStoreSequenceIds ids) { return GetLastFlushedSequenceIdResponse.newBuilder().setLastFlushedSequenceId(ids.getLastFlushedSequenceId()).addAllStoreLastFlushedSequenceId(ids.getStoreSequenceIdList()).build(); }
3.26
hbase_ResponseConverter_getRegionInfos_rdh
// End utilities for Client // Start utilities for Admin /** * Get the list of region info from a GetOnlineRegionResponse * * @param proto * the GetOnlineRegionResponse * @return the list of region info */ public static List<RegionInfo> getRegionInfos(final GetOnlineRegionResponse proto) { if ((proto == null) || (proto.getRegionInfoCount() == 0)) return null; return ProtobufUtil.getRegionInfos(proto); }
3.26
hbase_ResponseConverter_buildGetServerInfoResponse_rdh
/** * A utility to build a GetServerInfoResponse. * * @return the response */ public static GetServerInfoResponse buildGetServerInfoResponse(final ServerName serverName, final int webuiPort) { GetServerInfoResponse.Builder builder = GetServerInfoResponse.newBuilder(); ServerInfo.Builder serverInfoBuilder = ServerInfo.newBuilder(); serverInfoBuilder.setServerName(ProtobufUtil.toServerName(serverName)); if (webuiPort >= 0) { serverInfoBuilder.setWebuiPort(webuiPort); } builder.setServerInfo(serverInfoBuilder.build()); return builder.build(); }
3.26
hbase_ResponseConverter_buildHasPermissionResponse_rdh
/** * Builds a protocol buffer HasPermissionResponse */ public static HasPermissionResponse buildHasPermissionResponse(boolean hasPermission) { HasPermissionResponse.Builder builder = HasPermissionResponse.newBuilder(); builder.setHasPermission(hasPermission); return builder.build(); }
3.26
hbase_ResponseConverter_buildActionResult_rdh
/** * Wrap a throwable to an action result. * * @return an action result builder */ public static Builder buildActionResult(final ClientProtos.Result r) { ResultOrException.Builder builder = ResultOrException.newBuilder(); if (r != null) builder.setResult(r); return builder; }
3.26
hbase_ResponseConverter_buildRunCleanerChoreResponse_rdh
/** * Creates a response for the cleaner chore request * * @return A RunCleanerChoreResponse */ public static RunCleanerChoreResponse buildRunCleanerChoreResponse(boolean ran) { return RunCleanerChoreResponse.newBuilder().setCleanerChoreRan(ran).build(); }
3.26
hbase_ResponseConverter_buildGetOnlineRegionResponse_rdh
/** * A utility to build a GetOnlineRegionResponse. * * @return the response */ public static GetOnlineRegionResponse buildGetOnlineRegionResponse(final List<RegionInfo> regions) { GetOnlineRegionResponse.Builder builder = GetOnlineRegionResponse.newBuilder(); for (RegionInfo region : regions) {builder.addRegionInfo(ProtobufUtil.toRegionInfo(region)); } return builder.build(); }
3.26
hbase_ResponseConverter_setControllerException_rdh
/** * Stores an exception encountered during RPC invocation so it can be passed back through to the * client. * * @param controller * the controller instance provided by the client when calling the service * @param ioe * the exception encountered */ public static void setControllerException(RpcController controller, IOException ioe) { if (controller != null) { if (controller instanceof ServerRpcController) { ((ServerRpcController) (controller)).setFailedOn(ioe); } else { controller.setFailed(StringUtils.stringifyException(ioe)); } } }
3.26
hbase_CacheableDeserializerIdManager_save_rdh
/** * Snapshot a map of the current identifiers to class names for reconstruction on reading out of a * file. */public static Map<Integer, String> save() { // No synchronization here because weakly consistent view should be good enough // The assumed risk is that we might not see a new serializer that comes in while iterating, // but with a synchronized block, we won't see it anyway return registeredDeserializers.entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, e -> e.getValue().getClass().getName())); }
3.26
hbase_CacheableDeserializerIdManager_registerDeserializer_rdh
/** * Register the given {@link Cacheable} -- usually an hfileblock instance, these implement the * Cacheable Interface -- deserializer and generate a unique identifier id for it and return this * as our result. * * @return the identifier of given cacheable deserializer * @see #getDeserializer(int) */ public static int registerDeserializer(CacheableDeserializer<Cacheable> cd) { int idx = identifier.incrementAndGet(); // No synchronization here because keys will be unique registeredDeserializers.put(idx, cd); return idx; }
3.26
hbase_CacheableDeserializerIdManager_getDeserializer_rdh
/** * Get the cacheable deserializer registered at the given identifier Id. * * @see #registerDeserializer(CacheableDeserializer) */ public static CacheableDeserializer<Cacheable> getDeserializer(int id) { return registeredDeserializers.get(id); }
3.26
hbase_SpaceQuotaRefresherChore_extractQuotaSnapshot_rdh
/** * Wrapper around {@link QuotaTableUtil#extractQuotaSnapshot(Result, Map)} for testing. */ void extractQuotaSnapshot(Result result, Map<TableName, SpaceQuotaSnapshot> snapshots) { QuotaTableUtil.extractQuotaSnapshot(result, snapshots); }
3.26
hbase_SpaceQuotaRefresherChore_getRegionReportPercent_rdh
/** * Extracts the percent of Regions for a table to have been reported to enable quota violation * state change. * * @param conf * The configuration object. * @return The percent of regions reported to use. */static Double getRegionReportPercent(Configuration conf) { return conf.getDouble(POLICY_REFRESHER_CHORE_REPORT_PERCENT_KEY, POLICY_REFRESHER_CHORE_REPORT_PERCENT_DEFAULT); }
3.26
hbase_SpaceQuotaRefresherChore_getTimeUnit_rdh
/** * Extracts the time unit for the chore period and initial delay from the configuration. The * configuration value for {@link #POLICY_REFRESHER_CHORE_TIMEUNIT_KEY} must correspond to a * {@link TimeUnit} value. * * @param conf * The configuration object. * @return The configured time unit for the chore period and initial delay or the default value. */ static TimeUnit getTimeUnit(Configuration conf) { return TimeUnit.valueOf(conf.get(f0, POLICY_REFRESHER_CHORE_TIMEUNIT_DEFAULT)); }
3.26
hbase_SpaceQuotaRefresherChore_checkQuotaTableExists_rdh
/** * Checks if hbase:quota exists in hbase:meta * * @return true if hbase:quota table is in meta, else returns false. * @throws IOException * throws IOException */ boolean checkQuotaTableExists() throws IOException { try (Admin admin = getConnection().getAdmin()) { return admin.tableExists(QuotaUtil.QUOTA_TABLE_NAME);} }
3.26
hbase_SpaceQuotaRefresherChore_m0_rdh
/** * Extracts the initial delay for the chore from the configuration. * * @param conf * The configuration object. * @return The configured chore initial delay or the default value. */ static long m0(Configuration conf) { return conf.getLong(POLICY_REFRESHER_CHORE_DELAY_KEY, POLICY_REFRESHER_CHORE_DELAY_DEFAULT); }
3.26
hbase_SpaceQuotaRefresherChore_getPeriod_rdh
/** * Extracts the period for the chore from the configuration. * * @param conf * The configuration object. * @return The configured chore period or the default value. */ static int getPeriod(Configuration conf) { return conf.getInt(POLICY_REFRESHER_CHORE_PERIOD_KEY, POLICY_REFRESHER_CHORE_PERIOD_DEFAULT); }
3.26
hbase_SpaceQuotaRefresherChore_isInViolation_rdh
/** * Checks if the given <code>snapshot</code> is in violation, allowing the snapshot to be null. If * the snapshot is null, this is interpreted as no snapshot which implies not in violation. * * @param snapshot * The snapshot to operate on. * @return true if the snapshot is in violation, false otherwise. */ boolean isInViolation(SpaceQuotaSnapshot snapshot) { if (snapshot == null) { return false; } return snapshot.getQuotaStatus().isInViolation(); }
3.26
hbase_SpaceQuotaRefresherChore_fetchSnapshotsFromQuotaTable_rdh
/** * Reads all quota snapshots from the quota table. * * @return The current "view" of space use by each table. */ public Map<TableName, SpaceQuotaSnapshot> fetchSnapshotsFromQuotaTable() throws IOException { try (Table quotaTable = getConnection().getTable(QuotaUtil.QUOTA_TABLE_NAME);ResultScanner scanner = quotaTable.getScanner(QuotaTableUtil.makeQuotaSnapshotScan())) { Map<TableName, SpaceQuotaSnapshot> snapshots = new HashMap<>(); for (Result result : scanner) { try { extractQuotaSnapshot(result, snapshots); } catch (IllegalArgumentException e) { final String msg = "Failed to parse result for row " + Bytes.toString(result.getRow()); LOG.error(msg, e); throw new IOException(msg, e); } } return snapshots; } }
3.26
hbase_BucketEntry_markAsEvicted_rdh
/** * The {@link BucketCache} will try to release its reference to this BucketEntry many times. we * must make sure the idempotent, otherwise it'll decrease the RPC's reference count in advance, * then for RPC memory leak happen. * * @return true if we deallocate this entry successfully. */ boolean markAsEvicted() { if (markedAsEvicted.compareAndSet(false, true)) { return this.release(); } return false;} /** * Check whether have some RPC patch referring this block.<br/> * For {@link IOEngine#usesSharedMemory()} is true(eg.{@link ByteBufferIOEngine}), there're two * case: <br> * 1. If current refCnt is greater than 1, there must be at least one referring RPC path; <br> * 2. If current refCnt is equal to 1 and the markedAtEvicted is true, the it means backingMap has * released its reference, the remaining reference can only be from RPC path. <br> * We use this check to decide whether we can free the block area: when cached size exceed the * acceptable size, our eviction policy will choose those stale blocks without any RPC reference * and the RPC referred block will be excluded. <br/> * <br/> * For {@link IOEngine#usesSharedMemory()} is false(eg.{@link FileIOEngine}), * {@link BucketEntry#refCnt} is always 1 until it is evicted from {@link BucketCache#backingMap}, * so {@link BucketEntry#isRpcRef()}
3.26
hbase_BucketEntry_release_rdh
/** * We've three cases to release refCnt now: <br> * 1. BucketCache#evictBlock, it will release the backingMap's reference by force because we're * closing file or clear the bucket cache or some corruption happen. when all rpc references gone, * then free the area in bucketAllocator. <br> * 2. BucketCache#returnBlock . when rpc shipped, we'll release the block, only when backingMap * also release its refCnt (case.1 will do this) and no other rpc reference, then it will free the * area in bucketAllocator. <br> * 3.evict those block without any rpc reference if cache size exceeded. we'll only free those * blocks with zero rpc reference count. * * @return true to indicate we've decreased to zero and do the de-allocation. */ @Override public boolean release() { return refCnt.release(); }
3.26
hbase_SimplePositionedMutableByteRange_setOffset_rdh
/** * Update the beginning of this range. {@code offset + length} may not be greater than * {@code bytes.length}. Resets {@code position} to 0. the new start of this range. * * @return this. */ @Override public PositionedByteRange setOffset(int offset) { this.position = 0; super.setOffset(offset); return this; } /** * Update the length of this range. {@code offset + length} should not be greater than * {@code bytes.length}. If {@code position} is greater than the new {@code length}, sets * {@code position} to {@code length}
3.26
hbase_MD5Hash_getMD5AsHex_rdh
/** * Given a byte array, returns its MD5 hash as a hex string. Only "length" number of bytes * starting at "offset" within the byte array are used. * * @param key * the key to hash (variable length byte array) * @return MD5 hash as a 32 character hex string. */ public static String getMD5AsHex(byte[] key, int offset, int length) { try { MessageDigest md = MessageDigest.getInstance("MD5"); md.update(key, offset, length); byte[] digest = md.digest(); return new String(Hex.encodeHex(digest)); } catch (NoSuchAlgorithmException e) { // this should never happen unless the JDK is messed up. throw new RuntimeException("Error computing MD5 hash", e); } }
3.26
hbase_DefaultMetricsSystemHelper_removeSourceName_rdh
/** * Unfortunately Hadoop tries to be too-clever and permanently keeps track of all names registered * so far as a Source, thus preventing further re-registration of the source with the same name. * In case of dynamic metrics tied to region-lifecycles, this becomes a problem because we would * like to be able to re-register and remove with the same name. Otherwise, it is resource leak. * This ugly code manually removes the name from the UniqueNames map. TODO: May not be needed for * Hadoop versions after YARN-5190. */ public void removeSourceName(String name) { if ((sourceNamesField == null) || (mapField == null)) { return; } try {Object sourceNames = sourceNamesField.get(DefaultMetricsSystem.INSTANCE); HashMap v5 = ((HashMap) (mapField.get(sourceNames))); synchronized(sourceNames) { v5.remove(name); } } catch (Exception ex) { if (LOG.isTraceEnabled()) { LOG.trace("Received exception while trying to access Hadoop Metrics classes via " + "reflection.", ex); } } }
3.26
hbase_KeyValueHeap_seek_rdh
/** * Seeks all scanners at or below the specified seek key. If we earlied-out of a row, we may end * up skipping values that were never reached yet. Rather than iterating down, we want to give the * opportunity to re-seek. * <p> * As individual scanners may run past their ends, those scanners are automatically closed and * removed from the heap. * <p> * This function (and {@link #reseek(Cell)}) does not do multi-column Bloom filter and lazy-seek * optimizations. To enable those, call {@link #requestSeek(Cell, boolean, boolean)}. * * @param seekKey * KeyValue to seek at or after * @return true if KeyValues exist at or after specified key, false if not */ @Override public boolean seek(Cell seekKey) throws IOException { return // This is not a lazy seek // forward (false: this is not a reseek) generalizedSeek(false, seekKey, false, false);// Not using Bloom filters }
3.26
hbase_KeyValueHeap_next_rdh
/** * Gets the next row of keys from the top-most scanner. * <p> * This method takes care of updating the heap. * <p> * This can ONLY be called when you are using Scanners that implement InternalScanner as well as * KeyValueScanner (a {@link StoreScanner}). * * @return true if more rows exist after this one, false if scanner is done */ @Override public boolean next(List<Cell> result, ScannerContext scannerContext) throws IOException { if (this.current == null) { return scannerContext.setScannerState(NextState.NO_MORE_VALUES).hasMoreValues(); } InternalScanner currentAsInternal = ((InternalScanner) (this.current)); boolean moreCells = currentAsInternal.next(result, scannerContext); Cell pee = this.current.peek(); /* By definition, any InternalScanner must return false only when it has no further rows to be fetched. So, we can close a scanner if it returns false. All existing implementations seem to be fine with this. It is much more efficient to close scanners which are not needed than keep them in the heap. This is also required for certain optimizations. */ if ((pee == null) || (!moreCells)) { // add the scanner that is to be closed this.scannersForDelayedClose.add(this.current); } else { this.heap.add(this.current); } this.current = null; this.current = pollRealKV(); if (this.current == null) { moreCells = scannerContext.setScannerState(NextState.NO_MORE_VALUES).hasMoreValues(); } return moreCells; }
3.26
hbase_KeyValueHeap_getComparator_rdh
/** */ public CellComparator getComparator() { return this.kvComparator; }
3.26
hbase_KeyValueHeap_reseek_rdh
/** * This function is identical to the {@link #seek(Cell)} function except that * scanner.seek(seekKey) is changed to scanner.reseek(seekKey). */ @Override public boolean reseek(Cell seekKey) throws IOException { return // This is not a lazy seek // forward (true because this is reseek) generalizedSeek(false, seekKey, true, false);// Not using Bloom filters } /** * {@inheritDoc }
3.26
hbase_KeyValueHeap_getHeap_rdh
/** * Returns the current Heap */ public PriorityQueue<KeyValueScanner> getHeap() { return this.heap; }
3.26
hbase_KeyValueHeap_generalizedSeek_rdh
/** * * @param isLazy * whether we are trying to seek to exactly the given row/col. Enables Bloom * filter and most-recent-file-first optimizations for multi-column get/scan * queries. * @param seekKey * key to seek to * @param forward * whether to seek forward (also known as reseek) * @param useBloom * whether to optimize seeks using Bloom filters */ private boolean generalizedSeek(boolean isLazy, Cell seekKey, boolean forward, boolean useBloom) throws IOException { if ((!isLazy) && useBloom) { throw new IllegalArgumentException("Multi-column Bloom filter " + "optimization requires a lazy seek"); } if (current == null) { return false; } KeyValueScanner scanner = current; try {while (scanner != null) { Cell topKey = scanner.peek(); if (comparator.getComparator().compare(seekKey, topKey) <= 0) { // Top KeyValue is at-or-after Seek KeyValue. We only know that all // scanners are at or after seekKey (because fake keys of // scanners where a lazy-seek operation has been done are not greater // than their real next keys) but we still need to enforce our // invariant that the top scanner has done a real seek. This way // StoreScanner and RegionScanner do not have to worry about fake // keys. heap.add(scanner); scanner = null; current = pollRealKV(); return current != null; } boolean seekResult; if (isLazy && (heap.size() > 0)) { // If there is only one scanner left, we don't do lazy seek. seekResult = scanner.requestSeek(seekKey, forward, useBloom);} else { seekResult = NonLazyKeyValueScanner.doRealSeek(scanner, seekKey, forward); } if (!seekResult) { this.scannersForDelayedClose.add(scanner);} else { heap.add(scanner); }scanner = heap.poll(); if (scanner == null) { current = null; } } } catch (Exception e) { if (scanner != null) { try { scanner.close(); } catch (Exception ce) { LOG.warn("close KeyValueScanner error", ce); } } throw e; } // Heap is returning empty, scanner is done return false; }
3.26
hbase_KeyValueHeap_pollRealKV_rdh
/** * Fetches the top sub-scanner from the priority queue, ensuring that a real seek has been done on * it. Works by fetching the top sub-scanner, and if it has not done a real seek, making it do so * (which will modify its top KV), putting it back, and repeating this until success. Relies on * the fact that on a lazy seek we set the current key of a StoreFileScanner to a KV that is not * greater than the real next KV to be read from that file, so the scanner that bubbles up to the * top of the heap will have global next KV in this scanner heap if (1) it has done a real seek * and (2) its KV is the top among all top KVs (some of which are fake) in the scanner heap. */ protected KeyValueScanner pollRealKV() throws IOException { KeyValueScanner kvScanner = heap.poll(); if (kvScanner == null) { return null; } while ((kvScanner != null) && (!kvScanner.realSeekDone())) { if (kvScanner.peek() != null) { try { kvScanner.enforceSeek(); } catch (IOException ioe) { // Add the item to delayed close set in case it is leak from close this.scannersForDelayedClose.add(kvScanner); throw ioe; } Cell curKV = kvScanner.peek(); if (curKV != null) { KeyValueScanner nextEarliestScanner = heap.peek(); if (nextEarliestScanner == null) { // The heap is empty. Return the only possible scanner. return kvScanner; } // Compare the current scanner to the next scanner. We try to avoid // putting the current one back into the heap if possible. Cell nextKV = nextEarliestScanner.peek(); if ((nextKV == null) || (comparator.compare(curKV, nextKV) < 0)) { // We already have the scanner with the earliest KV, so return it. return kvScanner; } // Otherwise, put the scanner back into the heap and let it compete // against all other scanners (both those that have done a "real // seek" and a "lazy seek"). heap.add(kvScanner); } else {// Close the scanner because we did a real seek and found out there // are no more KVs. this.scannersForDelayedClose.add(kvScanner); } } else { // Close the scanner because it has already run out of KVs even before // we had to do a real seek on it. this.scannersForDelayedClose.add(kvScanner); } kvScanner = heap.poll(); } return kvScanner; }
3.26
hbase_KeyValueHeap_compare_rdh
/** * Compares two KeyValue * * @return less than 0 if left is smaller, 0 if equal etc.. */ public int compare(Cell left, Cell right) { return this.kvComparator.compare(left, right); }
3.26
hbase_BrotliCompressor_maxCompressedLength_rdh
// Package private int maxCompressedLength(int len) { return len + CompressionUtil.compressionOverhead(len); }
3.26
hbase_AsyncBufferedMutatorBuilder_disableWriteBufferPeriodicFlush_rdh
/** * Disable the periodical flush, i.e, set the timeout to 0. */ default AsyncBufferedMutatorBuilder disableWriteBufferPeriodicFlush() { return setWriteBufferPeriodicFlush(0, TimeUnit.NANOSECONDS); }
3.26
hbase_AsyncBufferedMutatorBuilder_setMaxRetries_rdh
/** * Set the max retry times for an operation. Usually it is the max attempt times minus 1. * <p> * Operation timeout and max attempt times(or max retry times) are both limitations for retrying, * we will stop retrying when we reach any of the limitations. * * @see #setMaxAttempts(int) * @see #setOperationTimeout(long, TimeUnit) */ default AsyncBufferedMutatorBuilder setMaxRetries(int maxRetries) { return setMaxAttempts(retries2Attempts(maxRetries)); }
3.26
hbase_AsyncBufferedMutatorBuilder_setWriteBufferPeriodicFlush_rdh
/** * Set the periodical flush interval. If the data in the buffer has not been flush for a long * time, i.e, reach this timeout limit, we will flush it automatically. * <p/> * Notice that, set the timeout to 0 or a negative value means disable periodical flush, not * 'flush immediately'. If you want to flush immediately then you should not use this class, as it * is designed to be 'buffered'. */ default AsyncBufferedMutatorBuilder setWriteBufferPeriodicFlush(long timeout, TimeUnit unit) { throw new UnsupportedOperationException("Not implemented"); }
3.26
hbase_CachedEntryQueue_pollLast_rdh
/** * Returns The last element in this queue, or {@code null} if the queue is empty. */ public Map.Entry<BlockCacheKey, BucketEntry> pollLast() { return queue.pollLast(); }
3.26
hbase_CachedEntryQueue_poll_rdh
/** * Returns The next element in this queue, or {@code null} if the queue is empty. */ public Map.Entry<BlockCacheKey, BucketEntry> poll() { return queue.poll(); }
3.26
hbase_CachedEntryQueue_add_rdh
/** * Attempt to add the specified entry to this queue. * <p> * If the queue is smaller than the max size, or if the specified element is ordered after the * smallest element in the queue, the element will be added to the queue. Otherwise, there is no * side effect of this call. * * @param entry * a bucket entry with key to try to add to the queue */ @SuppressWarnings(value = "NP_NULL_ON_SOME_PATH_FROM_RETURN_VALUE", justification = "head can not be null as cacheSize is greater than maxSize," + " which means we have something in the queue") public void add(Map.Entry<BlockCacheKey, BucketEntry> entry) { if (cacheSize < maxSize) { queue.add(entry);cacheSize += entry.getValue().getLength(); } else { BucketEntry head = queue.peek().getValue(); if (BucketEntry.COMPARATOR.compare(entry.getValue(), head) > 0) { cacheSize += entry.getValue().getLength(); cacheSize -= head.getLength(); if (cacheSize > maxSize) { queue.poll(); } else { cacheSize += head.getLength(); }queue.add(entry); } } }
3.26
hbase_ReplicationPeerImpl_getId_rdh
/** * Get the identifier of this peer * * @return string representation of the id (short) */ @Override public String getId() { return id; }
3.26
hbase_ShadedAccessControlUtil_toPermissionAction_rdh
/** * Convert a Permission.Action shaded proto to a client Permission.Action object. */ public static Action toPermissionAction(AccessControlProtos.Permission.Action action) { switch (action) { case READ : return Action.READ; case WRITE : return Action.WRITE; case EXEC : return Action.EXEC; case CREATE : return Action.CREATE; case ADMIN : return Action.ADMIN; } throw new IllegalArgumentException("Unknown action value " + action.name()); }
3.26
hbase_ShadedAccessControlUtil_toUserPermission_rdh
/** * Convert a client user permission to a user permission proto * * @param perm * the client UserPermission * @return the protobuf UserPermission */ public static UserPermission toUserPermission(UserPermission perm) { return AccessControlProtos.UserPermission.newBuilder().setUser(ByteString.copyFromUtf8(perm.getUser())).setPermission(toPermission(perm.getPermission())).build(); }
3.26
hbase_ShadedAccessControlUtil_toPermission_rdh
/** * Convert a client Permission to a Permission shaded proto * * @param perm * the client Permission * @return the protobuf Permission */ public static Permission toPermission(Permission perm) { AccessControlProtos.Permission.Builder ret = AccessControlProtos.Permission.newBuilder(); if (perm instanceof NamespacePermission) { NamespacePermission nsPerm = ((NamespacePermission) (perm)); ret.setType(Type.Namespace); AccessControlProtos.NamespacePermission.Builder builder = AccessControlProtos.NamespacePermission.newBuilder(); builder.setNamespaceName(protobuf.ByteString.copyFromUtf8(nsPerm.getNamespace())); Permission[] actions = perm.getActions(); if (actions != null) {for (Permission.Action a : actions) { builder.addAction(toPermissionAction(a)); } } ret.setNamespacePermission(builder);} else if (perm instanceof TablePermission) { TablePermission tablePerm = ((TablePermission) (perm));ret.setType(Type.Table); AccessControlProtos.TablePermission.Builder builder = AccessControlProtos.TablePermission.newBuilder(); builder.setTableName(toProtoTableName(tablePerm.getTableName())); if (tablePerm.hasFamily()) { builder.setFamily(ByteString.copyFrom(tablePerm.getFamily())); } if (tablePerm.hasQualifier()) { builder.setQualifier(ByteString.copyFrom(tablePerm.getQualifier())); } Permission[] actions = perm.getActions(); if (actions != null) { for (Permission.Action a : actions) { builder.addAction(toPermissionAction(a));} } ret.setTablePermission(builder); } else { // perm.getAccessScope() == Permission.Scope.GLOBAL ret.setType(Type.Global); AccessControlProtos.GlobalPermission.Builder builder = AccessControlProtos.GlobalPermission.newBuilder(); Permission[] actions = perm.getActions(); if (actions != null) { for (Permission.Action a : actions) { builder.addAction(toPermissionAction(a)); } }ret.setGlobalPermission(builder); } return ret.build(); }
3.26
hbase_ShadedAccessControlUtil_m0_rdh
/** * Convert a client user permission to a user permission shaded proto. */ public static Action m0(Permission.Action action) { switch (action) { case READ : return Action.READ; case WRITE : return Action.WRITE; case EXEC : return Action.EXEC; case CREATE : return Action.CREATE; case ADMIN : return Action.ADMIN; } throw new IllegalArgumentException("Unknown action value " + action.name()); }
3.26
hbase_ShadedAccessControlUtil_toUserTablePermissions_rdh
/** * Convert a ListMultimap&lt;String, TablePermission&gt; where key is username to a shaded * protobuf UserPermission * * @param perm * the list of user and table permissions * @return the protobuf UserTablePermissions */ public static UsersAndPermissions toUserTablePermissions(ListMultimap<String, UserPermission> perm) { AccessControlProtos.UsersAndPermissions.Builder builder = AccessControlProtos.UsersAndPermissions.newBuilder(); for (Map.Entry<String, Collection<UserPermission>> entry : perm.asMap().entrySet()) { AccessControlProtos.UsersAndPermissions.UserPermissions.Builder userPermBuilder = UsersAndPermissions.UserPermissions.newBuilder(); userPermBuilder.setUser(ByteString.copyFromUtf8(entry.getKey())); for (UserPermission userPerm : entry.getValue()) { userPermBuilder.addPermissions(toPermission(userPerm.getPermission())); } builder.addUserPermissions(userPermBuilder.build()); } return builder.build(); }
3.26
hbase_ShadedAccessControlUtil_toPermissionActions_rdh
/** * Converts a list of Permission.Action shaded proto to an array of client Permission.Action * objects. * * @param protoActions * the list of shaded protobuf Actions * @return the converted array of Actions */ public static Action[] toPermissionActions(List<AccessControlProtos.Permission.Action> protoActions) { Permission[] actions = new Permission.Action[protoActions.size()]; for (int i = 0; i < protoActions.size(); i++) { actions[i] = toPermissionAction(protoActions.get(i)); } return actions; }
3.26
hbase_HFilePrettyPrinter_mobFileExists_rdh
/** * Checks whether the referenced mob file exists. */ private boolean mobFileExists(FileSystem fs, TableName tn, String mobFileName, String family, Set<String> foundMobFiles, Set<String> missingMobFiles) throws IOException { if (foundMobFiles.contains(mobFileName)) { return true; } if (missingMobFiles.contains(mobFileName)) { return false; } String tableName = tn.getNameAsString(); List<Path> locations = mobFileLocations.get(tableName); if (locations == null) { locations = new ArrayList<>(2); locations.add(MobUtils.getMobFamilyPath(getConf(), tn, family)); locations.add(HFileArchiveUtil.getStoreArchivePath(getConf(), tn, MobUtils.getMobRegionInfo(tn).getEncodedName(), family)); mobFileLocations.put(tn.getNameAsString(), locations); } boolean exist = false; for (Path location : locations) { Path mobFilePath = new Path(location, mobFileName); if (fs.exists(mobFilePath)) { exist = true; break; } } if (exist) { evictMobFilesIfNecessary(foundMobFiles, FOUND_MOB_FILES_CACHE_CAPACITY); foundMobFiles.add(mobFileName); } else { evictMobFilesIfNecessary(missingMobFiles, MISSING_MOB_FILES_CACHE_CAPACITY); missingMobFiles.add(mobFileName); } return exist; }
3.26
hbase_HFilePrettyPrinter_build_rdh
/** * Builds a {@link ConsoleReporter} with the given properties. * * @return a {@link ConsoleReporter} */ public SimpleReporter build() { return new SimpleReporter(output, stats, locale, timeZone); }
3.26
hbase_HFilePrettyPrinter_evictMobFilesIfNecessary_rdh
/** * Evicts the cached mob files if the set is larger than the limit. */ private void evictMobFilesIfNecessary(Set<String> mobFileNames, int limit) { if (mobFileNames.size() < limit) { return; } int index = 0; int v52 = limit / 2; Iterator<String> fileNamesItr = mobFileNames.iterator(); while ((index < v52) && fileNamesItr.hasNext()) { fileNamesItr.next(); fileNamesItr.remove(); index++; } }
3.26
hbase_HFilePrettyPrinter_run_rdh
/** * Runs the command-line pretty-printer, and returns the desired command exit code (zero for * success, non-zero for failure). */ @Override public int run(String[] args) { if (getConf() == null) { throw new RuntimeException("A Configuration instance must be provided."); } try { CommonFSUtils.setFsDefault(getConf(), CommonFSUtils.getRootDir(getConf())); if (!parseOptions(args)) { return 1; } } catch (IOException ex) { LOG.error("Error parsing command-line options", ex);return 1; } catch (ParseException ex) { LOG.error("Error parsing command-line options", ex); return 1; } // iterate over all files found for (Path fileName : files) { try { int exitCode = processFile(fileName, false); if (exitCode != 0) { return exitCode; } } catch (IOException ex) { LOG.error("Error reading " + fileName, ex); return -2; } } if (verbose || printKey) { out.println("Scanned kv count -> " + count); } return 0; }
3.26
hbase_HFilePrettyPrinter_newBuilder_rdh
/** * Returns a new {@link Builder} for {@link SimpleReporter}. * * @return a {@link Builder} instance for a {@link SimpleReporter} */ public static Builder newBuilder() { return new Builder(); }
3.26
hbase_HFilePrettyPrinter_outputTo_rdh
/** * Write to the given {@link PrintStream}. * * @param output * a {@link PrintStream} instance. * @return {@code this} */ public Builder outputTo(PrintStream output) { this.output = output; return this; }
3.26
hbase_HFilePrettyPrinter_processFile_rdh
// HBASE-22561 introduces boolean checkRootDir for WebUI specificly public int processFile(Path file, boolean checkRootDir) throws IOException { if (verbose) { out.println("Scanning -> " + file); } if (checkRootDir) { Path rootPath = CommonFSUtils.getRootDir(getConf()); String rootString = rootPath + Path.SEPARATOR; if (!file.toString().startsWith(rootString)) { // First we see if fully-qualified URI matches the root dir. It might // also be an absolute path in the same filesystem, so we prepend the FS // of the root dir and see if that fully-qualified URI matches. FileSystem rootFS = rootPath.getFileSystem(getConf()); String v20 = rootFS.getUri().toString() + file.toString(); if (!v20.startsWith(rootString)) { err.println(((("ERROR, file (" + file) + ") is not in HBase's root directory (") + rootString) + ")"); return -2; } } } FileSystem fs = file.getFileSystem(getConf()); if (!fs.exists(file)) { err.println("ERROR, file doesnt exist: " + file); return -2; } HFile.Reader reader = HFile.createReader(fs, file, CacheConfig.DISABLED, true, getConf()); Map<byte[], byte[]> fileInfo = reader.getHFileInfo(); KeyValueStatsCollector fileStats = null; if (((((verbose || printKey) || checkRow) || checkFamily) || printStats) || checkMobIntegrity) { // scan over file and read key/value's and check if requested HFileScanner scanner = reader.getScanner(getConf(), false, false, false); fileStats = new KeyValueStatsCollector(); boolean shouldScanKeysValues; if (this.isSeekToRow && (!Bytes.equals(row, reader.getFirstRowKey().orElse(null)))) { // seek to the first kv on this row shouldScanKeysValues = scanner.seekTo(PrivateCellUtil.createFirstOnRow(this.row)) != (-1); } else { shouldScanKeysValues = scanner.seekTo(); } if (shouldScanKeysValues) { scanKeysValues(file, fileStats, scanner, row); } } // print meta data if (shouldPrintMeta) { printMeta(reader, fileInfo);} if (printBlockIndex) { out.println("Block Index:"); out.println(reader.getDataBlockIndexReader()); }if (printBlockHeaders) { out.println("Block Headers:"); /* TODO: this same/similar block iteration logic is used in HFileBlock#blockRange and TestLazyDataBlockDecompression. Refactor? */ FSDataInputStreamWrapper fsdis = new FSDataInputStreamWrapper(fs, file); long fileSize = fs.getFileStatus(file).getLen(); FixedFileTrailer trailer = FixedFileTrailer.readFromStream(fsdis.getStream(false), fileSize); long v30 = trailer.getFirstDataBlockOffset(); long max = trailer.getLastDataBlockOffset(); HFileBlock block;while (v30 <= max) { block = /* cacheBlock */ /* pread */ /* isCompaction */ /* updateCacheMetrics */ reader.readBlock(v30, -1, false, false, false, false, null, null); v30 += block.getOnDiskSizeWithHeader(); out.println(block); } } if (printStats) { fileStats.finish(printStatRanges); out.println("Stats:\n" + fileStats); } reader.close(); return 0; }
3.26
hbase_HFilePrettyPrinter_asSeparateLines_rdh
/** * Format a string of the form "k1=v1, k2=v2, ..." into separate lines with a four-space * indentation. */ private static String asSeparateLines(String keyValueStr) { return keyValueStr.replaceAll(", ([a-zA-Z]+=)", (",\n" + FOUR_SPACES) + "$1"); }
3.26
hbase_HFilePrettyPrinter_addStats_rdh
/** * Add the given {@link KeyValueStats} to be reported * * @param stat * the stat to be reported * @return {@code this} */ public Builder addStats(KeyValueStats stat) { this.stats.add(stat); return this; }
3.26
hbase_FlushSnapshotSubprocedure_releaseBarrier_rdh
/** * Hooray! */ public void releaseBarrier() { // NO OP }
3.26
hbase_FlushSnapshotSubprocedure_acquireBarrier_rdh
/** * do nothing, core of snapshot is executed in {@link #insideBarrier} step. */ @Override public void acquireBarrier() throws ForeignException {// NO OP }
3.26
hbase_FlushSnapshotSubprocedure_cleanup_rdh
/** * Cancel threads if they haven't finished. */ @Override public void cleanup(Exception e) { LOG.info(("Aborting all online FLUSH snapshot subprocedure task threads for '" + snapshot.getName()) + "' due to error", e);try { taskManager.cancelTasks(); } catch (InterruptedException e1) { Thread.currentThread().interrupt(); } }
3.26
hbase_FlushSnapshotSubprocedure_insideBarrier_rdh
/** * do a flush snapshot of every region on this rs from the target table. */ @Override public byte[] insideBarrier() throws ForeignException { flushSnapshot(); return new byte[0]; }
3.26
hbase_ImmutableBytesWritable_hashCode_rdh
// Below methods copied from BytesWritable @Override public int hashCode() { int hash = 1; for (int i = offset; i < (offset + length); i++) hash = (31 * hash) + ((int) (bytes[i])); return hash; }
3.26
hbase_ImmutableBytesWritable_set_rdh
/** * Use passed bytes as backing array for this instance. */ public void set(final byte[] b) { m0(b, 0, b.length);}
3.26
hbase_ImmutableBytesWritable_m0_rdh
/** * Use passed bytes as backing array for this instance. */ public void m0(final byte[] b, final int offset, final int length) { this.bytes = b; this.offset = offset; this.length = length; }
3.26
hbase_ImmutableBytesWritable_getLength_rdh
/** * Returns the number of valid bytes in the buffer */ public int getLength() { if (this.bytes == null) { throw new IllegalStateException("Uninitialiized. Null constructor " + "called w/o accompaying readFields invocation"); } return this.length; }
3.26
hbase_ImmutableBytesWritable_toArray_rdh
/** * Convert a list of byte arrays into an array of byte arrays * * @param array * List of byte []. * @return Array of byte []. */ public static byte[][] toArray(final List<byte[]> array) { // List#toArray doesn't work on lists of byte []. byte[][] v6 = new byte[array.size()][]; for (int i = 0; i < array.size(); i++) { v6[i] = array.get(i);} return v6; }
3.26
hbase_ImmutableBytesWritable_copyBytes_rdh
/** * Returns a copy of the bytes referred to by this writable */ public byte[] copyBytes() { return Arrays.copyOfRange(bytes, offset, offset + length); }
3.26
hbase_ReplicationUtils_getAdaptiveTimeout_rdh
/** * Get the adaptive timeout value when performing a retry */ public static int getAdaptiveTimeout(final int initialValue, final int retries) { int ntries = retries; if (ntries >= RETRY_BACKOFF.length) { ntries = RETRY_BACKOFF.length - 1; } if (ntries < 0) { ntries = 0; } return initialValue * HConstants.RETRY_BACKOFF[ntries]; }
3.26
hbase_ReplicationUtils_sleepForRetries_rdh
/** * Do the sleeping logic * * @param msg * Why we sleep * @param sleepForRetries * the base sleep time. * @param sleepMultiplier * by how many times the default sleeping time is augmented * @param maxRetriesMultiplier * the max retry multiplier * @return True if <code>sleepMultiplier</code> is &lt; <code>maxRetriesMultiplier</code> */ public static boolean sleepForRetries(String msg, long sleepForRetries, int sleepMultiplier, int maxRetriesMultiplier) { try { LOG.trace("{}, sleeping {} times {}", msg, sleepForRetries, sleepMultiplier); Thread.sleep(sleepForRetries * sleepMultiplier); } catch (InterruptedException e) { LOG.debug("Interrupted while sleeping between retries"); Thread.currentThread().interrupt(); } return sleepMultiplier < maxRetriesMultiplier; }
3.26
hbase_ReplicationUtils_isReplicationForBulkLoadDataEnabled_rdh
/** * * @param c * Configuration to look at * @return True if replication for bulk load data is enabled. */ public static boolean isReplicationForBulkLoadDataEnabled(final Configuration c) { return c.getBoolean(HConstants.REPLICATION_BULKLOAD_ENABLE_KEY, HConstants.REPLICATION_BULKLOAD_ENABLE_DEFAULT); }
3.26
hbase_TagUtil_carryForwardTags_rdh
/** * Add to <code>tagsOrNull</code> any Tags <code>cell</code> is carrying or null if none. */ public static List<Tag> carryForwardTags(final List<Tag> tagsOrNull, final Cell cell) { Iterator<Tag> itr = PrivateCellUtil.tagsIterator(cell); if (itr == EMPTY_TAGS_ITR) { // If no Tags, return early. return tagsOrNull; } List<Tag> tags = tagsOrNull; if (tags == null) { tags = new ArrayList<>(); } while (itr.hasNext()) { tags.add(itr.next()); } return tags; }
3.26
hbase_TagUtil_carryForwardTTLTag_rdh
/** * Returns Carry forward the TTL tag. */ public static List<Tag> carryForwardTTLTag(final List<Tag> tagsOrNull, final long ttl) { if (ttl == Long.MAX_VALUE) { return tagsOrNull; }List<Tag> tags = tagsOrNull; // If we are making the array in here, given we are the last thing checked, we'll be only thing // in the array so set its size to '1' (I saw this being done in earlier version of // tag-handling). if (tags == null) { tags = new ArrayList<>(1); } else { // Remove existing TTL tags if any Iterator<Tag> v9 = tags.iterator(); while (v9.hasNext()) { Tag v10 = v9.next(); if (v10.getType() == TagType.TTL_TAG_TYPE) { v9.remove(); break; } } } tags.add(new ArrayBackedTag(TagType.TTL_TAG_TYPE, Bytes.toBytes(ttl))); return tags; }
3.26
hbase_TagUtil_asList_rdh
/** * Creates list of tags from given byte array, expected that it is in the expected tag format. * * @param b * The byte array * @param offset * The offset in array where tag bytes begin * @param length * Total length of all tags bytes * @return List of tags */ public static List<Tag> asList(byte[] b, int offset, int length) { List<Tag> tags = new ArrayList<>(); int pos = offset; while (pos < (offset + length)) { int tagLen = Bytes.readAsInt(b, pos, Tag.TAG_LENGTH_SIZE); tags.add(new ArrayBackedTag(b, pos, tagLen + Tag.TAG_LENGTH_SIZE)); pos += Tag.TAG_LENGTH_SIZE + tagLen; } return tags; }
3.26
hbase_TagUtil_readVIntValuePart_rdh
/** * Reads an int value stored as a VInt at tag's given offset. * * @param tag * The Tag * @param offset * The offset where VInt bytes begin * @return A pair of the int value and number of bytes taken to store VInt * @throws IOException * When varint is malformed and not able to be read correctly */ public static Pair<Integer, Integer> readVIntValuePart(Tag tag, int offset) throws IOException { if (tag.hasArray()) { return StreamUtils.readRawVarint32(tag.getValueArray(), offset); } return StreamUtils.readRawVarint32(tag.getValueByteBuffer(), offset); }
3.26
hbase_TagUtil_fromList_rdh
/** * Write a list of tags into a byte array Note : these are all purely internal APIs. It helps in * cases where we have set of tags and we would want to create a cell out of it. Say in Mobs we * create a reference tags to indicate the presence of mob data. Also note that these are not * exposed to CPs also * * @param tags * The list of tags * @return the serialized tag data as bytes */ public static byte[] fromList(List<Tag> tags) { if ((tags == null) || tags.isEmpty()) { return HConstants.EMPTY_BYTE_ARRAY;} int length = 0; for (Tag tag : tags) { length += tag.getValueLength() + Tag.INFRASTRUCTURE_SIZE; } byte[] b = new byte[length]; int pos = 0; int tlen; for (Tag tag : tags) { tlen = tag.getValueLength(); pos = Bytes.putAsShort(b, pos, tlen + Tag.TYPE_LENGTH_SIZE); pos = Bytes.putByte(b, pos, tag.getType()); if (tag.hasArray()) { pos = Bytes.putBytes(b, pos, tag.getValueArray(), tag.getValueOffset(), tlen); } else { ByteBufferUtils.copyFromBufferToArray(b, tag.getValueByteBuffer(), tag.getValueOffset(), pos, tlen); pos += tlen; } } return b; }
3.26
hbase_MetricRegistryInfo_getMetricsDescription_rdh
/** * Get the description of what this source exposes. */ public String getMetricsDescription() { return metricsDescription; }
3.26
hbase_MetricRegistryInfo_getMetricsName_rdh
/** * Get the name of the metrics that are being exported by this source. Eg. IPC, GC, WAL */ public String getMetricsName() { return metricsName; }
3.26
hbase_MetricRegistryInfo_getMetricsJmxContext_rdh
/** * Get the name of the context in JMX that this source will be exposed through. This is in * ObjectName format. With the default context being Hadoop -&gt; HBase */ public String getMetricsJmxContext() { return metricsJmxContext; }
3.26
hbase_MetricRegistryInfo_getMetricsContext_rdh
/** * Get the metrics context. For hadoop metrics2 system this is usually an all lowercased string. * eg. regionserver, master, thriftserver * * @return The string context used to register this source to hadoop's metrics2 system. */ public String getMetricsContext() { return metricsContext;}
3.26
hbase_SimpleServerCall_done_rdh
/** * Call is done. Execution happened and we returned results to client. It is now safe to cleanup. */ @SuppressWarnings(value = "IS2_INCONSISTENT_SYNC", justification = "Presume the lock on processing request held by caller is protection enough") @Override public void done() { super.done(); this.getConnection().decRpcCount();// Say that we're done with this call. }
3.26
hbase_UserScanQueryMatcher_mergeFilterResponse_rdh
/** * Call this when scan has filter. Decide the desired behavior by checkVersions's MatchCode and * filterCell's ReturnCode. Cell may be skipped by filter, so the column versions in result may be * less than user need. It need to check versions again when filter and columnTracker both include * the cell. <br/> * * <pre> * ColumnChecker FilterResponse Desired behavior * INCLUDE SKIP SKIP * INCLUDE NEXT_COL SEEK_NEXT_COL or SEEK_NEXT_ROW * INCLUDE NEXT_ROW SEEK_NEXT_ROW * INCLUDE SEEK_NEXT_USING_HINT SEEK_NEXT_USING_HINT * INCLUDE INCLUDE INCLUDE * INCLUDE INCLUDE_AND_NEXT_COL INCLUDE_AND_SEEK_NEXT_COL * INCLUDE INCLUDE_AND_SEEK_NEXT_ROW INCLUDE_AND_SEEK_NEXT_ROW * INCLUDE_AND_SEEK_NEXT_COL SKIP SEEK_NEXT_COL * INCLUDE_AND_SEEK_NEXT_COL NEXT_COL SEEK_NEXT_COL or SEEK_NEXT_ROW * INCLUDE_AND_SEEK_NEXT_COL NEXT_ROW SEEK_NEXT_ROW * INCLUDE_AND_SEEK_NEXT_COL SEEK_NEXT_USING_HINT SEEK_NEXT_USING_HINT * INCLUDE_AND_SEEK_NEXT_COL INCLUDE INCLUDE_AND_SEEK_NEXT_COL * INCLUDE_AND_SEEK_NEXT_COL INCLUDE_AND_NEXT_COL INCLUDE_AND_SEEK_NEXT_COL * INCLUDE_AND_SEEK_NEXT_COL INCLUDE_AND_SEEK_NEXT_ROW INCLUDE_AND_SEEK_NEXT_ROW * INCLUDE_AND_SEEK_NEXT_ROW SKIP SEEK_NEXT_ROW * INCLUDE_AND_SEEK_NEXT_ROW NEXT_COL SEEK_NEXT_ROW * INCLUDE_AND_SEEK_NEXT_ROW NEXT_ROW SEEK_NEXT_ROW * INCLUDE_AND_SEEK_NEXT_ROW SEEK_NEXT_USING_HINT SEEK_NEXT_USING_HINT * INCLUDE_AND_SEEK_NEXT_ROW INCLUDE INCLUDE_AND_SEEK_NEXT_ROW * INCLUDE_AND_SEEK_NEXT_ROW INCLUDE_AND_NEXT_COL INCLUDE_AND_SEEK_NEXT_ROW * INCLUDE_AND_SEEK_NEXT_ROW INCLUDE_AND_SEEK_NEXT_ROW INCLUDE_AND_SEEK_NEXT_ROW * </pre> */ private final MatchCode mergeFilterResponse(Cell cell, MatchCode matchCode, ReturnCode filterResponse) { switch (filterResponse) { case SKIP : if (matchCode == MatchCode.INCLUDE) { return MatchCode.SKIP; } else if (matchCode == MatchCode.INCLUDE_AND_SEEK_NEXT_COL) { return MatchCode.SEEK_NEXT_COL; } else if (matchCode == MatchCode.INCLUDE_AND_SEEK_NEXT_ROW) { return MatchCode.SEEK_NEXT_ROW; } break; case NEXT_COL : if ((matchCode == MatchCode.INCLUDE) || (matchCode == MatchCode.INCLUDE_AND_SEEK_NEXT_COL)) { return columns.getNextRowOrNextColumn(cell); } else if (matchCode == MatchCode.INCLUDE_AND_SEEK_NEXT_ROW) { return MatchCode.SEEK_NEXT_ROW; } break; case NEXT_ROW : return MatchCode.SEEK_NEXT_ROW; case SEEK_NEXT_USING_HINT : return MatchCode.SEEK_NEXT_USING_HINT; case INCLUDE : break; case INCLUDE_AND_NEXT_COL : if (matchCode == MatchCode.INCLUDE) { matchCode = MatchCode.INCLUDE_AND_SEEK_NEXT_COL; } break; case INCLUDE_AND_SEEK_NEXT_ROW : matchCode = MatchCode.INCLUDE_AND_SEEK_NEXT_ROW;break; default : throw new RuntimeException("UNEXPECTED"); } // It means it is INCLUDE, INCLUDE_AND_SEEK_NEXT_COL or INCLUDE_AND_SEEK_NEXT_ROW. assert ((matchCode == MatchCode.INCLUDE) || (matchCode == MatchCode.INCLUDE_AND_SEEK_NEXT_COL)) || (matchCode == MatchCode.INCLUDE_AND_SEEK_NEXT_ROW); // We need to make sure that the number of cells returned will not exceed max version in scan // when the match code is INCLUDE* case. if ((curColCell == null) || (!CellUtil.matchingRowColumn(cell, curColCell))) { count = 0; curColCell = cell; } count += 1; if (count > versionsAfterFilter) { // when the number of cells exceed max version in scan, we should return SEEK_NEXT_COL match // code, but if current code is INCLUDE_AND_SEEK_NEXT_ROW, we can optimize to choose the max // step between SEEK_NEXT_COL and INCLUDE_AND_SEEK_NEXT_ROW, which is SEEK_NEXT_ROW. if (matchCode == MatchCode.INCLUDE_AND_SEEK_NEXT_ROW) { matchCode = MatchCode.SEEK_NEXT_ROW; } else { matchCode = MatchCode.SEEK_NEXT_COL;} } if ((matchCode == MatchCode.INCLUDE_AND_SEEK_NEXT_COL) || (matchCode == MatchCode.SEEK_NEXT_COL)) { // Update column tracker to next column, As we use the column hint from the tracker to seek // to next cell (HBASE-19749) columns.doneWithColumn(cell); } return matchCode; }
3.26
hbase_ThriftUtilities_deletesFromThrift_rdh
/** * Converts multiple {@link TDelete}s (Thrift) into a list of {@link Delete}s (HBase). * * @param in * list of <code>TDelete</code>s to convert * @return list of converted <code>Delete</code>s * @see #deleteFromThrift(TDelete) */ public static List<Delete> deletesFromThrift(List<TDelete> in) { List<Delete> out = new ArrayList<>(in.size()); for (TDelete delete : in) { out.add(deleteFromThrift(delete)); } return out; }
3.26
hbase_ThriftUtilities_getFromThrift_rdh
/** * Creates a {@link Get} (HBase) from a {@link TGet} (Thrift). This ignores any timestamps set on * {@link TColumn} objects. * * @param in * the <code>TGet</code> to convert * @return <code>Get</code> object * @throws IOException * if an invalid time range or max version parameter is given */ public static Get getFromThrift(TGet in) throws IOException { Get out = new Get(in.getRow()); // Timestamp overwrites time range if both are set if (in.isSetTimestamp()) { out.setTimestamp(in.getTimestamp()); } else if (in.isSetTimeRange()) { out.setTimeRange(in.getTimeRange().getMinStamp(), in.getTimeRange().getMaxStamp()); } if (in.isSetMaxVersions()) { out.readVersions(in.getMaxVersions()); } if (in.isSetFilterString()) { ParseFilter parseFilter = new ParseFilter(); out.setFilter(parseFilter.parseFilterString(in.getFilterString())); } if (in.isSetAttributes()) { addAttributes(out, in.getAttributes()); } if (in.isSetAuthorizations()) {out.setAuthorizations(new Authorizations(in.getAuthorizations().getLabels())); } if (in.isSetConsistency()) { out.setConsistency(consistencyFromThrift(in.getConsistency())); } if (in.isSetTargetReplicaId()) { out.setReplicaId(in.getTargetReplicaId()); } if (in.isSetCacheBlocks()) { out.setCacheBlocks(in.isCacheBlocks()); } if (in.isSetStoreLimit()) { out.setMaxResultsPerColumnFamily(in.getStoreLimit()); } if (in.isSetStoreOffset()) { out.setRowOffsetPerColumnFamily(in.getStoreOffset()); } if (in.isSetExistence_only()) { out.setCheckExistenceOnly(in.isExistence_only()); } if (in.isSetColumns()) { for (TColumn column : in.getColumns()) { if (column.isSetQualifier()) {out.addColumn(column.getFamily(), column.getQualifier()); } else { out.addFamily(column.getFamily()); } } } if (in.isSetFilterBytes()) { out.setFilter(filterFromThrift(in.getFilterBytes())); }return out; }
3.26
hbase_ThriftUtilities_rowMutationsFromThrift_rdh
/** * Creates a {@link RowMutations} (HBase) from a {@link TRowMutations} (Thrift) * * @param in * the <code>TRowMutations</code> to convert * @return converted <code>RowMutations</code> */ public static RowMutations rowMutationsFromThrift(TRowMutations in) throws IOException { List<TMutation> mutations = in.getMutations(); RowMutations out = new RowMutations(in.getRow(), mutations.size()); for (TMutation mutation : mutations) { if (mutation.isSetPut()) { out.add(putFromThrift(mutation.getPut())); } if (mutation.isSetDeleteSingle()) { out.add(deleteFromThrift(mutation.getDeleteSingle())); } } return out; }
3.26
hbase_ThriftUtilities_putsFromThrift_rdh
/** * Converts multiple {@link TPut}s (Thrift) into a list of {@link Put}s (HBase). * * @param in * list of <code>TPut</code>s to convert * @return list of converted <code>Put</code>s * @see #putFromThrift(TPut) */ public static List<Put> putsFromThrift(List<TPut> in) { List<Put> out = new ArrayList<>(in.size()); for (TPut put : in) { out.add(putFromThrift(put)); } return out; }
3.26
hbase_ThriftUtilities_deleteFromThrift_rdh
/** * Creates a {@link Delete} (HBase) from a {@link TDelete} (Thrift). * * @param in * the <code>TDelete</code> to convert * @return converted <code>Delete</code> */ public static Delete deleteFromThrift(TDelete in) { Delete out; if (in.isSetColumns()) { out = new Delete(in.getRow()); for (TColumn column : in.getColumns()) { if (in.isSetDeleteType()) { switch (in.getDeleteType()) { case DELETE_COLUMN : if (column.isSetTimestamp()) { out.addColumn(column.getFamily(), column.getQualifier(), column.getTimestamp()); } else { out.addColumn(column.getFamily(), column.getQualifier()); } break; case DELETE_COLUMNS : if (column.isSetTimestamp()) { out.addColumns(column.getFamily(), column.getQualifier(), column.getTimestamp()); } else {out.addColumns(column.getFamily(), column.getQualifier()); } break; case DELETE_FAMILY : if (column.isSetTimestamp()) { out.addFamily(column.getFamily(), column.getTimestamp()); } else { out.addFamily(column.getFamily()); } break; case DELETE_FAMILY_VERSION : if (column.isSetTimestamp()) {out.addFamilyVersion(column.getFamily(), column.getTimestamp()); } else { throw new IllegalArgumentException("Timestamp is required for TDelete with DeleteFamilyVersion type"); } break; default : throw new IllegalArgumentException("DeleteType is required for TDelete"); } } else { throw new IllegalArgumentException("DeleteType is required for TDelete"); } } } else if (in.isSetTimestamp()) { out = new Delete(in.getRow(), in.getTimestamp()); } else { out = new Delete(in.getRow()); } if (in.isSetAttributes()) { addAttributes(out, in.getAttributes()); } if (in.isSetDurability()) { out.setDurability(durabilityFromThrift(in.getDurability())); } return out; }
3.26
hbase_ThriftUtilities_resultsFromHBase_rdh
/** * Converts multiple {@link Result}s (HBase) into a list of {@link TResult}s (Thrift). * * @param in * array of <code>Result</code>s to convert * @return list of converted <code>TResult</code>s * @see #resultFromHBase(Result) */ public static List<TResult> resultsFromHBase(Result[] in) { List<TResult> out = new ArrayList<>(in.length); for (Result result : in) { out.add(m0(result)); } return out; }
3.26
hbase_ThriftUtilities_addAttributes_rdh
/** * Adds all the attributes into the Operation object */ private static void addAttributes(OperationWithAttributes op, Map<ByteBuffer, ByteBuffer> attributes) { if ((attributes == null) || attributes.isEmpty()) { return; } for (Map.Entry<ByteBuffer, ByteBuffer> entry : attributes.entrySet()) { String name = Bytes.toStringBinary(getBytes(entry.getKey())); byte[] value = getBytes(entry.getValue()); op.setAttribute(name, value); } }
3.26
hbase_ThriftUtilities_putFromThrift_rdh
/** * Creates a {@link Put} (HBase) from a {@link TPut} (Thrift) * * @param in * the <code>TPut</code> to convert * @return converted <code>Put</code> */ public static Put putFromThrift(TPut in) { Put out; if (in.isSetTimestamp()) { out = new Put(in.getRow(), in.getTimestamp()); } else { out = new Put(in.getRow()); } if (in.isSetDurability()) { out.setDurability(durabilityFromThrift(in.getDurability())); } for (TColumnValue columnValue : in.getColumnValues()) { try { if (columnValue.isSetTimestamp()) { out.add(CellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(out.getRow()).setFamily(columnValue.getFamily()).setQualifier(columnValue.getQualifier()).setTimestamp(columnValue.getTimestamp()).setType(Type.Put).setValue(columnValue.getValue()).build()); } else { out.add(CellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(out.getRow()).setFamily(columnValue.getFamily()).setQualifier(columnValue.getQualifier()).setTimestamp(out.getTimestamp()).setType(Type.Put).setValue(columnValue.getValue()).build()); } } catch (IOException e) { throw new IllegalArgumentException(e); } } if (in.isSetAttributes()) { addAttributes(out, in.getAttributes()); } if (in.getCellVisibility() != null) { out.setCellVisibility(new CellVisibility(in.getCellVisibility().getExpression())); } return out; }
3.26
hbase_ThriftUtilities_getsFromThrift_rdh
/** * Converts multiple {@link TGet}s (Thrift) into a list of {@link Get}s (HBase). * * @param in * list of <code>TGet</code>s to convert * @return list of <code>Get</code> objects * @throws IOException * if an invalid time range or max version parameter is given * @see #getFromThrift(TGet) */ public static List<Get> getsFromThrift(List<TGet> in) throws IOException { List<Get> v3 = new ArrayList<>(in.size()); for (TGet get : in) { v3.add(getFromThrift(get)); } return v3; } /** * Creates a {@link TResult} (Thrift) from a {@link Result}
3.26
hbase_RegionStateStore_hasGlobalReplicationScope_rdh
// ========================================================================== // Table Descriptors helpers // ========================================================================== private boolean hasGlobalReplicationScope(TableName tableName) throws IOException { return hasGlobalReplicationScope(getDescriptor(tableName));}
3.26
hbase_RegionStateStore_hasMergeRegions_rdh
/** * Check whether the given {@code region} has any 'info:merge*' columns. */ public boolean hasMergeRegions(RegionInfo region) throws IOException { return CatalogFamilyFormat.hasMergeRegions(getRegionCatalogResult(region).rawCells()); }
3.26
hbase_RegionStateStore_mergeRegions_rdh
// ============================================================================================ // Update Region Merging State helpers // ============================================================================================ public void mergeRegions(RegionInfo child, RegionInfo[] parents, ServerName serverName, TableDescriptor htd) throws IOException { boolean globalScope = htd.hasGlobalReplicationScope(); long time = HConstants.LATEST_TIMESTAMP; List<Mutation> mutations = new ArrayList<>(); List<RegionInfo> replicationParents = new ArrayList<>(); for (RegionInfo ri : parents) { long seqNum = (globalScope) ? getOpenSeqNumForParentRegion(ri) : -1; // Deletes for merging regions mutations.add(MetaTableAccessor.makeDeleteFromRegionInfo(ri, time)); if (seqNum > 0) { mutations.add(ReplicationBarrierFamilyFormat.makePutForReplicationBarrier(ri, seqNum, time));replicationParents.add(ri); } } // Put for parent Put putOfMerged = MetaTableAccessor.makePutFromRegionInfo(child, time); putOfMerged = addMergeRegions(putOfMerged, Arrays.asList(parents)); // Set initial state to CLOSED. // NOTE: If initial state is not set to CLOSED then merged region gets added with the // default OFFLINE state. If Master gets restarted after this step, start up sequence of // master tries to assign this offline region. This is followed by re-assignments of the // merged region from resumed {@link MergeTableRegionsProcedure} MetaTableAccessor.addRegionStateToPut(putOfMerged, RegionInfo.DEFAULT_REPLICA_ID, State.CLOSED); mutations.add(putOfMerged); // The merged is a new region, openSeqNum = 1 is fine. ServerName may be null // if crash after merge happened but before we got to here.. means in-memory // locations of offlined merged, now-closed, regions is lost. Should be ok. We // assign the merged region later. if (serverName != null) { MetaTableAccessor.addLocation(putOfMerged, serverName, 1, child.getReplicaId()); } // Add empty locations for region replicas of the merged region so that number of replicas // can be cached whenever the primary region is looked up from meta int regionReplication = getRegionReplication(htd); for (int i = 1; i < regionReplication; i++) {MetaTableAccessor.addEmptyLocation(putOfMerged, i); } // add parent reference for serial replication if (!replicationParents.isEmpty()) {ReplicationBarrierFamilyFormat.addReplicationParent(putOfMerged, replicationParents); } m0(child, mutations); }
3.26
hbase_RegionStateStore_deleteRegion_rdh
// ============================================================================================ // Delete Region State helpers // ============================================================================================ /** * Deletes the specified region. */ public void deleteRegion(final RegionInfo regionInfo) throws IOException { deleteRegions(Collections.singletonList(regionInfo)); }
3.26