name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
hbase_EncodedDataBlock_getEncodedData_rdh
/** * Returns encoded data with header */ private byte[] getEncodedData() { if (cachedEncodedData != null) { return cachedEncodedData; } cachedEncodedData = encodeData(); return cachedEncodedData; }
3.26
hbase_EncodedDataBlock_getSize_rdh
/** * Find the size of minimal buffer that could store compressed data. * * @return Size in bytes of compressed data. */ public int getSize() { return getEncodedData().length; }
3.26
hbase_ReplicationProtobufUtil_buildReplicateWALEntryRequest_rdh
/** * Create a new ReplicateWALEntryRequest from a list of WAL entries * * @param entries * the WAL entries to be replicated * @param encodedRegionName * alternative region name to use if not null * @param replicationClusterId * Id which will uniquely identify source cluster FS client * configurations in the replication configuration directory * @param sourceBaseNamespaceDir * Path to source cluster base namespace directory * @param sourceHFileArchiveDir * Path to the source cluster hfile archive directory * @return a pair of ReplicateWALEntryRequest and a CellScanner over all the WALEdit values found. */ public static Pair<ReplicateWALEntryRequest, CellScanner> buildReplicateWALEntryRequest(final Entry[] entries, byte[] encodedRegionName, String replicationClusterId, Path sourceBaseNamespaceDir, Path sourceHFileArchiveDir) { // Accumulate all the Cells seen in here. List<List<? extends Cell>> allCells = new ArrayList<>(entries.length); int size = 0; WALEntry.Builder entryBuilder = WALEntry.newBuilder(); ReplicateWALEntryRequest.Builder builder = ReplicateWALEntryRequest.newBuilder(); for (Entry entry : entries) { entryBuilder.clear(); WALProtos.WALKey.Builder keyBuilder; try { keyBuilder = entry.getKey().getBuilder(WALCellCodec.getNoneCompressor()); } catch (IOException e) { throw new AssertionError("There should not throw exception since NoneCompressor do not throw any exceptions", e); } if (encodedRegionName != null) { keyBuilder.setEncodedRegionName(UnsafeByteOperations.unsafeWrap(encodedRegionName)); } entryBuilder.setKey(keyBuilder.build()); WALEdit edit = entry.getEdit(); List<Cell> cells = edit.getCells(); // Add up the size. It is used later serializing out the kvs. for (Cell cell : cells) { size += PrivateCellUtil.estimatedSerializedSizeOf(cell); } // Collect up the cells allCells.add(cells); // Write out how many cells associated with this entry. entryBuilder.setAssociatedCellCount(cells.size()); builder.addEntry(entryBuilder.build()); } if (replicationClusterId != null) { builder.setReplicationClusterId(replicationClusterId); } if (sourceBaseNamespaceDir != null) { builder.setSourceBaseNamespaceDirPath(sourceBaseNamespaceDir.toString()); } if (sourceHFileArchiveDir != null) { builder.setSourceHFileArchiveDirPath(sourceHFileArchiveDir.toString()); } return new Pair<>(builder.build(), getCellScanner(allCells, size)); }
3.26
hbase_ReplicationProtobufUtil_getCellScanner_rdh
/** * Returns <code>cells</code> packaged as a CellScanner */ static CellScanner getCellScanner(final List<List<? extends Cell>> cells, final int size) { return new SizedCellScanner() { private final Iterator<List<? extends Cell>> entries = cells.iterator(); private Iterator<? extends Cell> currentIterator = null; private Cell currentCell; @Override public Cell current() { return this.currentCell; } @Override public boolean advance() { if (this.currentIterator == null) { if (!this.entries.hasNext()) return false; this.currentIterator = this.entries.next().iterator(); } if (this.currentIterator.hasNext()) { this.currentCell = this.currentIterator.next(); return true; } this.currentCell = null; this.currentIterator = null; return advance(); } @Overridepublic long heapSize() { return size; } }; }
3.26
hbase_ReplicationProtobufUtil_replicateWALEntry_rdh
/** * A helper to replicate a list of WAL entries using region server admin * * @param admin * the region server admin * @param entries * Array of WAL entries to be replicated * @param replicationClusterId * Id which will uniquely identify source cluster FS client * configurations in the replication configuration directory * @param sourceBaseNamespaceDir * Path to source cluster base namespace directory * @param sourceHFileArchiveDir * Path to the source cluster hfile archive directory */ public static CompletableFuture<ReplicateWALEntryResponse> replicateWALEntry(AsyncRegionServerAdmin admin, Entry[] entries, String replicationClusterId, Path sourceBaseNamespaceDir, Path sourceHFileArchiveDir, int timeout) { Pair<ReplicateWALEntryRequest, CellScanner> p = buildReplicateWALEntryRequest(entries, null, replicationClusterId, sourceBaseNamespaceDir, sourceHFileArchiveDir); return admin.replicateWALEntry(p.getFirst(), p.getSecond(), timeout); }
3.26
hbase_JobUtil_getStagingDir_rdh
/** * Initializes the staging directory and returns the path. * * @param conf * system configuration * @return staging directory path * @throws IOException * if the ownership on the staging directory is not as expected * @throws InterruptedException * if the thread getting the staging directory is interrupted */ public static Path getStagingDir(Configuration conf) throws IOException, InterruptedException { return JobSubmissionFiles.getStagingDir(new Cluster(conf), conf); }
3.26
hbase_JobUtil_getQualifiedStagingDir_rdh
/** * Initializes the staging directory and returns the qualified path. * * @param conf * conf system configuration * @return qualified staging directory path * @throws IOException * if the ownership on the staging directory is not as expected * @throws InterruptedException * if the thread getting the staging directory is interrupted */ public static Path getQualifiedStagingDir(Configuration conf) throws IOException, InterruptedException { Cluster cluster = new Cluster(conf);Path stagingDir = JobSubmissionFiles.getStagingDir(cluster, conf); return cluster.getFileSystem().makeQualified(stagingDir); }
3.26
hbase_ConfigurationUtil_setKeyValues_rdh
/** * Store a collection of Map.Entry's in conf, with each entry separated by ',' and key values * delimited by delimiter. * * @param conf * configuration to store the collection in * @param key * overall key to store keyValues under * @param keyValues * kvps to be stored under key in conf * @param delimiter * character used to separate each kvp */ public static void setKeyValues(Configuration conf, String key, Collection<Map.Entry<String, String>> keyValues, char delimiter) { List<String> serializedKvps = Lists.newArrayList(); for (Map.Entry<String, String> kvp : keyValues) { serializedKvps.add((kvp.getKey() + delimiter) + kvp.getValue()); } conf.setStrings(key, serializedKvps.toArray(new String[serializedKvps.size()])); }
3.26
hbase_ConfigurationUtil_getKeyValues_rdh
/** * Retrieve a list of key value pairs from configuration, stored under the provided key * * @param conf * configuration to retrieve kvps from * @param key * key under which the key values are stored * @param delimiter * character used to separate each kvp * @return the list of kvps stored under key in conf, or null if the key isn't present. * @see #setKeyValues(Configuration, String, Collection, char) */ public static List<Map.Entry<String, String>> getKeyValues(Configuration conf, String key, char delimiter) { String[] v2 = conf.getStrings(key); if (v2 == null) { return null; } List<Map.Entry<String, String>> rtn = Lists.newArrayList(); for (String kvp : v2) { String[] splitKvp = StringUtils.split(kvp, delimiter); if (splitKvp.length != 2) {throw new IllegalArgumentException((((((("Expected key value pair for configuration key '" + key) + "'") + " to be of form '<key>") + delimiter) + "<value>; was ") + kvp) + " instead"); } rtn.add(new AbstractMap.SimpleImmutableEntry<>(splitKvp[0], splitKvp[1])); } return rtn; }
3.26
hbase_ConfigurationUtil_m0_rdh
/** * Store a collection of Map.Entry's in conf, with each entry separated by ',' and key values * delimited by {@link #KVP_DELIMITER} * * @param conf * configuration to store the collection in * @param key * overall key to store keyValues under * @param keyValues * kvps to be stored under key in conf */ public static void m0(Configuration conf, String key, Collection<Map.Entry<String, String>> keyValues) { setKeyValues(conf, key, keyValues, KVP_DELIMITER); }
3.26
hbase_GsonUtil_createGson_rdh
/** * Create a builder which is used to create a Gson instance. * <p/> * Will set some common configs for the builder. */ public static GsonBuilder createGson() { return new GsonBuilder().setLongSerializationPolicy(LongSerializationPolicy.STRING).registerTypeAdapter(LongAdder.class, new TypeAdapter<LongAdder>() { @Override public void write(JsonWriter out, LongAdder value) throws IOException {out.value(value.longValue()); } @Override public LongAdder read(JsonReader in) throws IOException { LongAdder value = new LongAdder(); value.add(in.nextLong()); return value; } }); }
3.26
hbase_ThriftHttpServlet_doKerberosAuth_rdh
/** * Do the GSS-API kerberos authentication. We already have a logged in subject in the form of * httpUGI, which GSS-API will extract information from. */ private RemoteUserIdentity doKerberosAuth(HttpServletRequest request) throws HttpAuthenticationException { HttpKerberosServerAction action = new HttpKerberosServerAction(request, httpUGI); try { String principal = httpUGI.doAs(action); return new RemoteUserIdentity(principal, action.outToken); } catch (Exception e) { LOG.info("Failed to authenticate with {} kerberos principal", httpUGI.getUserName()); throw new HttpAuthenticationException(e); } }
3.26
hbase_ThriftHttpServlet_getAuthHeader_rdh
/** * Returns the base64 encoded auth header payload * * @throws HttpAuthenticationException * if a remote or network exception occurs */ private String getAuthHeader(HttpServletRequest request) throws HttpAuthenticationException { String authHeader = request.getHeader(HttpHeaders.AUTHORIZATION); // Each http request must have an Authorization header if ((authHeader == null) || authHeader.isEmpty()) { throw new HttpAuthenticationException("Authorization header received " + "from the client is empty."); } String authHeaderBase64String; int beginIndex = (NEGOTIATE + " ").length(); authHeaderBase64String = authHeader.substring(beginIndex); // Authorization header must have a payload if (authHeaderBase64String.isEmpty()) { throw new HttpAuthenticationException("Authorization header received " + "from the client does not contain any data."); } return authHeaderBase64String; }
3.26
hbase_RegionServerFlushTableProcedureManager_buildSubprocedure_rdh
/** * If in a running state, creates the specified subprocedure to flush table regions. Because this * gets the local list of regions to flush and not the set the master had, there is a possibility * of a race where regions may be missed. * * @return Subprocedure to submit to the ProcedureMember. */ public Subprocedure buildSubprocedure(String table, List<String> families) { // don't run the subprocedure if the parent is stop(ping) if (rss.isStopping() || rss.isStopped()) { throw new IllegalStateException(("Can't start flush region subprocedure on RS: " + rss.getServerName()) + ", because stopping/stopped!"); } // check to see if this server is hosting any regions for the table List<HRegion> involvedRegions; try { involvedRegions = getRegionsToFlush(table); } catch (IOException e1) { throw new IllegalStateException("Failed to figure out if there is region to flush.", e1); } // We need to run the subprocedure even if we have no relevant regions. The coordinator // expects participation in the procedure and without sending message the master procedure // will hang and fail. LOG.debug("Launching subprocedure to flush regions for " + table);ForeignExceptionDispatcher exnDispatcher = new ForeignExceptionDispatcher(table); Configuration conf = rss.getConfiguration(); long v4 = conf.getLong(FLUSH_TIMEOUT_MILLIS_KEY, FLUSH_TIMEOUT_MILLIS_DEFAULT); long wakeMillis = conf.getLong(FLUSH_REQUEST_WAKE_MILLIS_KEY, FLUSH_REQUEST_WAKE_MILLIS_DEFAULT); FlushTableSubprocedurePool taskManager = new FlushTableSubprocedurePool(rss.getServerName().toString(), conf, rss); return new FlushTableSubprocedure(member, exnDispatcher, wakeMillis, v4, involvedRegions, table, families, taskManager); }
3.26
hbase_RegionServerFlushTableProcedureManager_stop_rdh
/** * Gracefully shutdown the thread pool. An ongoing HRegion.flush() should not be interrupted * (see HBASE-13877) */ void stop() { if (this.stopped) return; this.stopped = true; this.executor.shutdown(); }
3.26
hbase_RegionServerFlushTableProcedureManager_submitTask_rdh
/** * Submit a task to the pool. NOTE: all must be submitted before you can safely * {@link #waitForOutstandingTasks()}. */ void submitTask(final Callable<Void> task) { Future<Void> f = this.taskPool.submit(task); f0.add(f); }
3.26
hbase_RegionServerFlushTableProcedureManager_getRegionsToFlush_rdh
/** * Get the list of regions to flush for the table on this server It is possible that if a region * moves somewhere between the calls we'll miss the region. * * @return the list of online regions. Empty list is returned if no regions. */ private List<HRegion> getRegionsToFlush(String table) throws IOException { return ((List<HRegion>) (rss.getRegions(TableName.valueOf(table)))); }
3.26
hbase_RegionServerFlushTableProcedureManager_cancelTasks_rdh
/** * This attempts to cancel out all pending and in progress tasks. Does not interrupt the running * tasks itself. An ongoing HRegion.flush() should not be interrupted (see HBASE-13877). */void cancelTasks() throws InterruptedException { Collection<Future<Void>> tasks = f0; LOG.debug((("cancelling " + tasks.size()) + " flush region tasks ") + name); for (Future<Void> f : tasks) { f.cancel(false); } // evict remaining tasks and futures from taskPool. f0.clear();while (taskPool.poll() != null) { } stop(); }
3.26
hbase_RegionServerFlushTableProcedureManager_initialize_rdh
/** * Initialize this region server flush procedure manager Uses a zookeeper based member controller. * * @param rss * region server * @throws KeeperException * if the zookeeper cannot be reached */ @Overridepublic void initialize(RegionServerServices rss) throws KeeperException { this.rss = rss; ZKWatcher zkw = rss.getZooKeeper(); this.memberRpcs = new ZKProcedureMemberRpcs(zkw, MasterFlushTableProcedureManager.FLUSH_TABLE_PROCEDURE_SIGNATURE); Configuration conf = rss.getConfiguration(); long keepAlive = conf.getLong(FLUSH_TIMEOUT_MILLIS_KEY, FLUSH_TIMEOUT_MILLIS_DEFAULT); int opThreads = conf.getInt(FLUSH_REQUEST_THREADS_KEY, FLUSH_REQUEST_THREADS_DEFAULT); // create the actual flush table procedure member ThreadPoolExecutor pool = ProcedureMember.defaultPool(rss.getServerName().toString(), opThreads, keepAlive); this.member = new ProcedureMember(memberRpcs, pool, new FlushTableSubprocedureBuilder()); }
3.26
hbase_RegionServerFlushTableProcedureManager_start_rdh
/** * Start accepting flush table requests. */ @Override public void start() { LOG.debug("Start region server flush procedure manager " + rss.getServerName().toString()); this.memberRpcs.start(rss.getServerName().toString(), member); }
3.26
hbase_RegionServerFlushTableProcedureManager_waitForOutstandingTasks_rdh
/** * Wait for all of the currently outstanding tasks submitted via {@link #submitTask(Callable)}. * This *must* be called after all tasks are submitted via submitTask. * * @return <tt>true</tt> on success, <tt>false</tt> otherwise */ boolean waitForOutstandingTasks() throws ForeignException, InterruptedException { LOG.debug("Waiting for local region flush to finish."); int sz = f0.size(); try { // Using the completion service to process the futures. for (int i = 0; i < sz; i++) { Future<Void> f = taskPool.take();f.get(); if (!f0.remove(f)) { LOG.warn("unexpected future" + f); } LOG.debug(((("Completed " + (i + 1)) + "/") + sz) + " local region flush tasks."); } LOG.debug(("Completed " + sz) + " local region flush tasks."); return true; } catch (InterruptedException e) { LOG.warn("Got InterruptedException in FlushSubprocedurePool", e); if (!stopped) { Thread.currentThread().interrupt(); throw new ForeignException("FlushSubprocedurePool", e); } // we are stopped so we can just exit. } catch (ExecutionException e) { Throwable cause = e.getCause(); if (cause instanceof ForeignException) { LOG.warn("Rethrowing ForeignException from FlushSubprocedurePool", e); throw ((ForeignException) (e.getCause())); } else if (cause instanceof DroppedSnapshotException) { // we have to abort the region server according to contract of flush abortable.abort("Received DroppedSnapshotException, aborting", cause); } LOG.warn("Got Exception in FlushSubprocedurePool", e); throw new ForeignException(name, e.getCause()); } finally { cancelTasks(); } return false;}
3.26
hbase_TraceUtil_tracedRunnable_rdh
/** * Wrap the provided {@code runnable} in a {@link Runnable} that is traced. */ public static Runnable tracedRunnable(final Runnable runnable, final Supplier<Span> spanSupplier) { // N.B. This method name follows the convention of this class, i.e., tracedFuture, rather than // the convention of the OpenTelemetry classes, i.e., Context#wrap. return () -> { final Span span = spanSupplier.get(); try (final Scope ignored = span.makeCurrent()) { runnable.run(); span.setStatus(StatusCode.OK); } finally { span.end(); } }; }
3.26
hbase_TraceUtil_trace_rdh
/** * Trace the execution of {@code runnable}. */ public static <T extends Throwable> void trace(final ThrowingRunnable<T> runnable, final Supplier<Span> spanSupplier) throws T { Span span = spanSupplier.get(); try (Scope ignored = span.makeCurrent()) {runnable.run(); span.setStatus(StatusCode.OK); } catch (Throwable e) { setError(span, e); throw e; } finally { span.end(); } }
3.26
hbase_TraceUtil_createSpan_rdh
/** * Create a span with the given {@code kind}. Notice that, OpenTelemetry only expects one * {@link SpanKind#CLIENT} span and one {@link SpanKind#SERVER} span for a traced request, so use * this with caution when you want to create spans with kind other than {@link SpanKind#INTERNAL}. */ private static Span createSpan(String name, SpanKind kind) { return getGlobalTracer().spanBuilder(name).setSpanKind(kind).startSpan(); }
3.26
hbase_TraceUtil_tracedFuture_rdh
/** * Trace an asynchronous operation. */ public static <T> CompletableFuture<T> tracedFuture(Supplier<CompletableFuture<T>> action, String spanName) { Span span = createSpan(spanName); try (Scope ignored = span.makeCurrent()) { CompletableFuture<T> future = action.get();endSpan(future, span); return future; } }
3.26
hbase_TraceUtil_createClientSpan_rdh
/** * Create a span with {@link SpanKind#CLIENT}. */ public static Span createClientSpan(String name) { return createSpan(name, SpanKind.CLIENT); }
3.26
hbase_TraceUtil_endSpan_rdh
/** * Finish the {@code span} when the given {@code future} is completed. */ private static void endSpan(CompletableFuture<?> future, Span span) { FutureUtils.addListener(future, (resp, error) -> { if (error != null) { setError(span, error); } else { span.setStatus(StatusCode.OK); } span.end(); }); }
3.26
hbase_TraceUtil_tracedFutures_rdh
/** * Trace an asynchronous operation, and finish the create {@link Span} when all the given * {@code futures} are completed. */ public static <T> List<CompletableFuture<T>> tracedFutures(Supplier<List<CompletableFuture<T>>> action, Supplier<Span> spanSupplier) { Span span = spanSupplier.get(); try (Scope ignored = span.makeCurrent()) { List<CompletableFuture<T>> futures = action.get(); endSpan(CompletableFuture.allOf(futures.toArray(new CompletableFuture[0])), span); return futures; } }
3.26
hbase_TraceUtil_createRemoteSpan_rdh
/** * Create a span which parent is from remote, i.e, passed through rpc. * </p> * We will set the kind of the returned span to {@link SpanKind#SERVER}, as this should be the top * most span at server side. */ public static Span createRemoteSpan(String name, Context ctx) { return getGlobalTracer().spanBuilder(name).setParent(ctx).setSpanKind(SpanKind.SERVER).startSpan();}
3.26
hbase_DateTieredCompactionPolicy_getCompactionBoundariesForMinor_rdh
/** * Returns a list of boundaries for multiple compaction output from minTimestamp to maxTimestamp. */ private static List<Long> getCompactionBoundariesForMinor(CompactionWindow window, boolean singleOutput) { List<Long> boundaries = new ArrayList<>(); boundaries.add(Long.MIN_VALUE); if (!singleOutput) {boundaries.add(window.startMillis()); } return boundaries; }
3.26
hbase_DateTieredCompactionPolicy_selectMinorCompaction_rdh
/** * We receive store files sorted in ascending order by seqId then scan the list of files. If the * current file has a maxTimestamp older than last known maximum, treat this file as it carries * the last known maximum. This way both seqId and timestamp are in the same order. If files carry * the same maxTimestamps, they are ordered by seqId. We then reverse the list so they are ordered * by seqId and maxTimestamp in descending order and build the time windows. All the out-of-order * data into the same compaction windows, guaranteeing contiguous compaction based on sequence id. */ public CompactionRequestImpl selectMinorCompaction(ArrayList<HStoreFile> candidateSelection, boolean mayUseOffPeak, boolean mayBeStuck) throws IOException {long now = EnvironmentEdgeManager.currentTime(); long oldestToCompact = getOldestToCompact(comConf.getDateTieredMaxStoreFileAgeMillis(), now); List<Pair<HStoreFile, Long>> storefileMaxTimestampPairs = Lists.newArrayListWithCapacity(candidateSelection.size()); long maxTimestampSeen = Long.MIN_VALUE; for (HStoreFile storeFile : candidateSelection) { // if there is out-of-order data, // we put them in the same window as the last file in increasing order maxTimestampSeen = Math.max(maxTimestampSeen, storeFile.getMaximumTimestamp().orElse(Long.MIN_VALUE)); storefileMaxTimestampPairs.add(new Pair<>(storeFile, maxTimestampSeen)); } Collections.reverse(storefileMaxTimestampPairs); CompactionWindow window = getIncomingWindow(now); int minThreshold = comConf.getDateTieredIncomingWindowMin(); PeekingIterator<Pair<HStoreFile, Long>> it = Iterators.peekingIterator(storefileMaxTimestampPairs.iterator()); while (it.hasNext()) { if (window.compareToTimestamp(oldestToCompact) < 0) { break; } int compResult = window.compareToTimestamp(it.peek().getSecond()); if (compResult > 0) { // If the file is too old for the window, switch to the next window window = window.nextEarlierWindow(); minThreshold = comConf.getMinFilesToCompact(); } else { // The file is within the target window ArrayList<HStoreFile> fileList = Lists.newArrayList(); // Add all files in the same window. For incoming window // we tolerate files with future data although it is sub-optimal while (it.hasNext() && (window.compareToTimestamp(it.peek().getSecond()) <= 0)) { fileList.add(it.next().getFirst()); } if (fileList.size() >= minThreshold) { if (LOG.isDebugEnabled()) { LOG.debug((("Processing files: " + fileList) + " for window: ") + window); } DateTieredCompactionRequest request = generateCompactionRequest(fileList, window, mayUseOffPeak, mayBeStuck, minThreshold, now); if (request != null) { return request; } }} } // A non-null file list is expected by HStore return new CompactionRequestImpl(Collections.emptyList()); }
3.26
hbase_DateTieredCompactionPolicy_needsCompaction_rdh
/** * Heuristics for guessing whether we need minor compaction. */ @Override @InterfaceAudience.Private public boolean needsCompaction(Collection<HStoreFile> storeFiles, List<HStoreFile> filesCompacting) { ArrayList<HStoreFile> candidates = new ArrayList<>(storeFiles); try { return !selectMinorCompaction(candidates, false, true).getFiles().isEmpty(); } catch (Exception e) { LOG.error("Can not check for compaction: ", e); return false; } }
3.26
hbase_DateTieredCompactionPolicy_getCompactBoundariesForMajor_rdh
/** * Return a list of boundaries for multiple compaction output in ascending order. */ private List<Long> getCompactBoundariesForMajor(Collection<HStoreFile> filesToCompact, long now) { long minTimestamp = filesToCompact.stream().mapToLong(f -> f.getMinimumTimestamp().orElse(Long.MAX_VALUE)).min().orElse(Long.MAX_VALUE); List<Long> boundaries = new ArrayList<>(); // Add startMillis of all windows between now and min timestamp for (CompactionWindow window = getIncomingWindow(now); window.compareToTimestamp(minTimestamp) > 0; window = window.nextEarlierWindow()) { boundaries.add(window.startMillis()); } boundaries.add(Long.MIN_VALUE); Collections.reverse(boundaries); return boundaries; }
3.26
hbase_RestoreTool_getTableArchivePath_rdh
/** * return value represent path for: * ".../user/biadmin/backup1/default/t1_dn/backup_1396650096738/archive/data/default/t1_dn" * * @param tableName * table name * @return path to table archive * @throws IOException * exception */ Path getTableArchivePath(TableName tableName) throws IOException { Path baseDir = new Path(HBackupFileSystem.getTableBackupPath(tableName, backupRootPath, backupId), HConstants.HFILE_ARCHIVE_DIRECTORY); Path dataDir = new Path(baseDir, HConstants.BASE_NAMESPACE_DIR); Path archivePath = new Path(dataDir, tableName.getNamespaceAsString()); Path tableArchivePath = new Path(archivePath, tableName.getQualifierAsString()); if ((!fs.exists(tableArchivePath)) || (!fs.getFileStatus(tableArchivePath).isDirectory())) { LOG.debug(("Folder tableArchivePath: " + tableArchivePath.toString()) + " does not exists"); tableArchivePath = null;// empty table has no archive } return tableArchivePath; }
3.26
hbase_RestoreTool_getRegionList_rdh
/** * Gets region list * * @param tableArchivePath * table archive path * @return RegionList region list * @throws IOException * exception */ ArrayList<Path> getRegionList(Path tableArchivePath) throws IOException { ArrayList<Path> regionDirList = new ArrayList<>(); FileStatus[] v46 = fs.listStatus(tableArchivePath); for (FileStatus v47 : v46) { // here child refer to each region(Name) Path child = v47.getPath(); regionDirList.add(child); } return regionDirList; }
3.26
hbase_RestoreTool_getTableInfoPath_rdh
/** * Returns value represent path for: * ""/$USER/SBACKUP_ROOT/backup_id/namespace/table/.hbase-snapshot/ * snapshot_1396650097621_namespace_table" this path contains .snapshotinfo, .tabledesc (0.96 and * 0.98) this path contains .snapshotinfo, .data.manifest (trunk) * * @param tableName * table name * @return path to table info * @throws IOException * exception */ Path getTableInfoPath(TableName tableName) throws IOException { Path tableSnapShotPath = getTableSnapshotPath(backupRootPath, tableName, backupId); Path tableInfoPath = null; // can't build the path directly as the timestamp values are different FileStatus[] snapshots = fs.listStatus(tableSnapShotPath, new SnapshotDescriptionUtils.CompletedSnaphotDirectoriesFilter(fs)); for (FileStatus snapshot : snapshots) { tableInfoPath = snapshot.getPath(); // SnapshotManifest.DATA_MANIFEST_NAME = "data.manifest"; if (tableInfoPath.getName().endsWith("data.manifest")) { break; } } return tableInfoPath; }
3.26
hbase_RestoreTool_incrementalRestoreTable_rdh
/** * During incremental backup operation. Call WalPlayer to replay WAL in backup image Currently * tableNames and newTablesNames only contain single table, will be expanded to multiple tables in * the future * * @param conn * HBase connection * @param tableBackupPath * backup path * @param logDirs * : incremental backup folders, which contains WAL * @param tableNames * : source tableNames(table names were backuped) * @param newTableNames * : target tableNames(table names to be restored to) * @param incrBackupId * incremental backup Id * @throws IOException * exception */ public void incrementalRestoreTable(Connection conn, Path tableBackupPath, Path[] logDirs, TableName[] tableNames, TableName[] newTableNames, String incrBackupId) throws IOException { try (Admin admin = conn.getAdmin()) { if (tableNames.length != newTableNames.length) { throw new IOException("Number of source tables and target tables does not match!"); } FileSystem fileSys = tableBackupPath.getFileSystem(this.conf); // for incremental backup image, expect the table already created either by user or previous // full backup. Here, check that all new tables exists for (TableName tableName : newTableNames) { if (!admin.tableExists(tableName)) { throw new IOException(("HBase table " + tableName) + " does not exist. Create the table first, e.g. by restoring a full backup."); } } // adjust table schema for (int i = 0; i < tableNames.length; i++) { TableName tableName = tableNames[i]; TableDescriptor tableDescriptor = getTableDescriptor(fileSys, tableName, incrBackupId); if (tableDescriptor == null) { throw new IOException(("Can't find " + tableName) + "'s descriptor."); } LOG.debug((("Found descriptor " + tableDescriptor) + " through ") + incrBackupId); TableName newTableName = newTableNames[i]; TableDescriptor newTableDescriptor = admin.getDescriptor(newTableName); List<ColumnFamilyDescriptor> families = Arrays.asList(tableDescriptor.getColumnFamilies()); List<ColumnFamilyDescriptor> existingFamilies = Arrays.asList(newTableDescriptor.getColumnFamilies()); TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(newTableDescriptor); boolean schemaChangeNeeded = false; for (ColumnFamilyDescriptor family : families) {if (!existingFamilies.contains(family)) { builder.setColumnFamily(family); schemaChangeNeeded = true; } } for (ColumnFamilyDescriptor family : existingFamilies) { if (!families.contains(family)) { builder.removeColumnFamily(family.getName()); schemaChangeNeeded = true; } }if (schemaChangeNeeded) { modifyTableSync(conn, builder.build()); LOG.info((("Changed " + newTableDescriptor.getTableName()) + " to: ") + newTableDescriptor); } } RestoreJob restoreService = BackupRestoreFactory.getRestoreJob(conf); restoreService.run(logDirs, tableNames, restoreRootDir, newTableNames, false); } }
3.26
hbase_RestoreTool_getTableDesc_rdh
/** * Get table descriptor * * @param tableName * is the table backed up * @return {@link TableDescriptor} saved in backup image of the table */ TableDescriptor getTableDesc(TableName tableName) throws IOException { Path tableInfoPath = this.getTableInfoPath(tableName); SnapshotDescription desc = SnapshotDescriptionUtils.readSnapshotInfo(fs, tableInfoPath); SnapshotManifest manifest = SnapshotManifest.open(conf, fs, tableInfoPath, desc); TableDescriptor tableDescriptor = manifest.getTableDescriptor(); if (!tableDescriptor.getTableName().equals(tableName)) { LOG.error((("couldn't find Table Desc for table: " + tableName) + " under tableInfoPath: ") + tableInfoPath.toString()); LOG.error("tableDescriptor.getNameAsString() = " + tableDescriptor.getTableName().getNameAsString()); throw new FileNotFoundException((("couldn't find Table Desc for table: " + tableName) + " under tableInfoPath: ") + tableInfoPath.toString()); } return tableDescriptor; }
3.26
hbase_RestoreTool_generateBoundaryKeys_rdh
/** * Calculate region boundaries and add all the column families to the table descriptor * * @param regionDirList * region dir list * @return a set of keys to store the boundaries */ byte[][] generateBoundaryKeys(ArrayList<Path> regionDirList) throws IOException { TreeMap<byte[], Integer> map = new TreeMap<>(Bytes.BYTES_COMPARATOR); // Build a set of keys to store the boundaries // calculate region boundaries and add all the column families to the table descriptor for (Path regionDir : regionDirList) { LOG.debug("Parsing region dir: " + regionDir); Path hfofDir = regionDir; if (!fs.exists(hfofDir)) { LOG.warn(("HFileOutputFormat dir " + hfofDir) + " not found"); } FileStatus[] familyDirStatuses = fs.listStatus(hfofDir); if (familyDirStatuses == null) { throw new IOException("No families found in " + hfofDir); } for (FileStatus stat : familyDirStatuses) { if (!stat.isDirectory()) { LOG.warn("Skipping non-directory " + stat.getPath()); continue; } boolean isIgnore = false; String v55 = stat.getPath().getName(); for (String ignore : ignoreDirs) { if (v55.contains(ignore)) { LOG.warn("Skipping non-family directory" + v55); isIgnore = true; break; } } if (isIgnore) { continue; } Path familyDir = stat.getPath(); LOG.debug(((("Parsing family dir [" + familyDir.toString()) + " in region [") + regionDir) + "]"); // Skip _logs, etc if (familyDir.getName().startsWith("_") || familyDir.getName().startsWith(".")) {continue; } // start to parse hfile inside one family dir Path[] hfiles = FileUtil.stat2Paths(fs.listStatus(familyDir)); for (Path hfile : hfiles) { if (((hfile.getName().startsWith("_") || hfile.getName().startsWith(".")) || StoreFileInfo.isReference(hfile.getName())) || HFileLink.isHFileLink(hfile.getName())) { continue; } HFile.Reader reader = HFile.createReader(fs, hfile, conf); final byte[] first; final byte[] last; try { first = reader.getFirstRowKey().get(); last = reader.getLastRowKey().get(); LOG.debug((((("Trying to figure out region boundaries hfile=" + hfile) + " first=") + Bytes.toStringBinary(first)) + " last=") + Bytes.toStringBinary(last)); // To eventually infer start key-end key boundaries Integer value = (map.containsKey(first)) ? ((Integer) (map.get(first))) : 0; map.put(first, value + 1); value = (map.containsKey(last)) ? ((Integer) (map.get(last))) : 0; map.put(last, value - 1); } finally { reader.close(); } } } } return BulkLoadHFilesTool.inferBoundaries(map); }
3.26
hbase_RestoreTool_checkAndCreateTable_rdh
/** * Prepare the table for bulkload, most codes copied from {@code createTable} method in * {@code BulkLoadHFilesTool}. * * @param conn * connection * @param targetTableName * target table name * @param regionDirList * region directory list * @param htd * table descriptor * @param truncateIfExists * truncates table if exists * @throws IOException * exception */ private void checkAndCreateTable(Connection conn, TableName targetTableName, ArrayList<Path> regionDirList, TableDescriptor htd, boolean truncateIfExists) throws IOException { try (Admin admin = conn.getAdmin()) { boolean createNew = false; if (admin.tableExists(targetTableName)) { if (truncateIfExists) { LOG.info(("Truncating exising target table '" + targetTableName) + "', preserving region splits"); admin.disableTable(targetTableName); admin.truncateTable(targetTableName, true); } else {LOG.info(("Using exising target table '" + targetTableName) + "'"); } } else { createNew = true; } if (createNew) { LOG.info(("Creating target table '" + targetTableName) + "'"); byte[][] keys = null; try { if ((regionDirList == null) || (regionDirList.size() == 0)) { admin.createTable(htd); } else { keys = generateBoundaryKeys(regionDirList); // create table using table descriptor and region boundaries admin.createTable(htd, keys); } } catch (NamespaceNotFoundException e) { LOG.warn("There was no namespace and the same will be created"); String namespaceAsString = targetTableName.getNamespaceAsString(); LOG.info(("Creating target namespace '" + namespaceAsString) + "'"); admin.createNamespace(NamespaceDescriptor.create(namespaceAsString).build());if (null == keys) { admin.createTable(htd); } else { admin.createTable(htd, keys); } }} long startTime = EnvironmentEdgeManager.currentTime(); while (!admin.isTableAvailable(targetTableName)) { try { Thread.sleep(100); } catch (InterruptedException ie) { Thread.currentThread().interrupt();}if ((EnvironmentEdgeManager.currentTime() - startTime) > TABLE_AVAILABILITY_WAIT_TIME) { throw new IOException(((("Time out " + TABLE_AVAILABILITY_WAIT_TIME) + "ms expired, table ") + targetTableName) + " is still not available"); } } } }
3.26
hbase_HandlerUtil_getRetryCounter_rdh
/** * Get an exponential backoff retry counter. The base unit is 100 milliseconds, and the max * backoff time is 30 seconds. */ public static RetryCounter getRetryCounter() { return new RetryCounterFactory(new RetryCounter.RetryConfig().setBackoffPolicy(new RetryCounter.ExponentialBackoffPolicy()).setSleepInterval(100).setMaxSleepTime(30000).setMaxAttempts(Integer.MAX_VALUE).setTimeUnit(TimeUnit.MILLISECONDS).setJitter(0.01F)).create(); }
3.26
hbase_Union2_decodeA_rdh
/** * Read an instance of the first type parameter from buffer {@code src}. */ public A decodeA(PositionedByteRange src) { return ((A) (decode(src))); }
3.26
hbase_Union2_decodeB_rdh
/** * Read an instance of the second type parameter from buffer {@code src}. */ public B decodeB(PositionedByteRange src) { return ((B) (decode(src))); }
3.26
hbase_RowCounter_createSubmittableJob_rdh
/** * Returns the JobConf */ public JobConf createSubmittableJob(String[] args) throws IOException { JobConf c = new JobConf(getConf(), getClass()); c.setJobName(NAME); // Columns are space delimited StringBuilder sb = new StringBuilder(); final int columnoffset = 2; for (int i = columnoffset; i < args.length; i++) { if (i > columnoffset) { sb.append(" "); } sb.append(args[i]); } // Second argument is the table name. TableMapReduceUtil.initTableMapJob(args[1], sb.toString(), RowCounter.RowCounterMapper.class, ImmutableBytesWritable.class, Result.class, c); c.setNumReduceTasks(0); // First arg is the output directory. FileOutputFormat.setOutputPath(c, new Path(args[0])); return c; }
3.26
hbase_SpaceLimitSettings_buildProtoAddQuota_rdh
/** * Builds a {@link SpaceQuota} protobuf object given the arguments. * * @param sizeLimit * The size limit of the quota. * @param violationPolicy * The action to take when the quota is exceeded. * @return The protobuf SpaceQuota representation. */private SpaceLimitRequest buildProtoAddQuota(long sizeLimit, SpaceViolationPolicy violationPolicy) { return buildProtoFromQuota(SpaceQuota.newBuilder().setSoftLimit(sizeLimit).setViolationPolicy(ProtobufUtil.toProtoViolationPolicy(violationPolicy)).build()); }
3.26
hbase_SpaceLimitSettings_validateProtoArguments_rdh
/** * Validates that the provided protobuf SpaceQuota has the necessary information to construct a * {@link SpaceLimitSettings}. * * @param proto * The protobuf message to validate. */ static void validateProtoArguments(final QuotaProtos.SpaceQuota proto) { if (!Objects.requireNonNull(proto).hasSoftLimit()) { throw new IllegalArgumentException("Cannot handle SpaceQuota without a soft limit"); } if (!proto.hasViolationPolicy()) { throw new IllegalArgumentException("Cannot handle SpaceQuota without a violation policy"); } }
3.26
hbase_SpaceLimitSettings_validateSizeLimit_rdh
// Helper function to validate sizeLimit private void validateSizeLimit(long sizeLimit) { if (sizeLimit < 0L) { throw new IllegalArgumentException("Size limit must be a non-negative value."); } }
3.26
hbase_SpaceLimitSettings_buildProtoRemoveQuota_rdh
/** * Builds a {@link SpaceQuota} protobuf object to remove a quota. * * @return The protobuf SpaceQuota representation. */private SpaceLimitRequest buildProtoRemoveQuota() { return SpaceLimitRequest.newBuilder().setQuota(SpaceQuota.newBuilder().setRemove(true).build()).build(); }
3.26
hbase_SpaceLimitSettings_getProto_rdh
/** * Returns a copy of the internal state of <code>this</code> */ SpaceLimitRequest getProto() { return proto.toBuilder().build(); }
3.26
hbase_SpaceLimitSettings_fromSpaceQuota_rdh
/** * Constructs a {@link SpaceLimitSettings} from the provided protobuf message and namespace. * * @param namespace * The target namespace for the limit. * @param proto * The protobuf representation. * @return A QuotaSettings. */ static SpaceLimitSettings fromSpaceQuota(final String namespace, final QuotaProtos.SpaceQuota proto) { validateProtoArguments(proto); return new SpaceLimitSettings(namespace, proto.getSoftLimit(), ProtobufUtil.toViolationPolicy(proto.getViolationPolicy())); }
3.26
hbase_SpaceLimitSettings_buildProtoFromQuota_rdh
/** * Build a {@link SpaceLimitRequest} protobuf object from the given {@link SpaceQuota}. * * @param protoQuota * The preconstructed SpaceQuota protobuf * @return A protobuf request to change a space limit quota */ private SpaceLimitRequest buildProtoFromQuota(SpaceQuota protoQuota) { return SpaceLimitRequest.newBuilder().setQuota(protoQuota).build(); }
3.26
hbase_HFileOutputFormat2_createFamilyBloomParamMap_rdh
/** * Runs inside the task to deserialize column family to bloom filter param map from the * configuration. * * @param conf * to read the serialized values from * @return a map from column family to the the configured bloom filter param */ @InterfaceAudience.Private static Map<byte[], String> createFamilyBloomParamMap(Configuration conf) { return createFamilyConfValueMap(conf, BLOOM_PARAM_FAMILIES_CONF_KEY); }
3.26
hbase_HFileOutputFormat2_createFamilyBlockSizeMap_rdh
/** * Runs inside the task to deserialize column family to block size map from the configuration. * * @param conf * to read the serialized values from * @return a map from column family to the configured block size */ @InterfaceAudience.Private static Map<byte[], Integer> createFamilyBlockSizeMap(Configuration conf) { Map<byte[], String> stringMap = createFamilyConfValueMap(conf, BLOCK_SIZE_FAMILIES_CONF_KEY); Map<byte[], Integer> blockSizeMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); for (Map.Entry<byte[], String> e : stringMap.entrySet()) { Integer blockSize = Integer.parseInt(e.getValue());blockSizeMap.put(e.getKey(), blockSize); } return blockSizeMap; }
3.26
hbase_HFileOutputFormat2_createFamilyBloomTypeMap_rdh
/** * Runs inside the task to deserialize column family to bloom filter type map from the * configuration. * * @param conf * to read the serialized values from * @return a map from column family to the the configured bloom filter type */ @InterfaceAudience.Privatestatic Map<byte[], BloomType> createFamilyBloomTypeMap(Configuration conf) { Map<byte[], String> stringMap = createFamilyConfValueMap(conf, BLOOM_TYPE_FAMILIES_CONF_KEY); Map<byte[], BloomType> bloomTypeMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); for (Map.Entry<byte[], String> e : stringMap.entrySet()) { BloomType bloomType = BloomType.valueOf(e.getValue()); bloomTypeMap.put(e.getKey(), bloomType); } return bloomTypeMap; }
3.26
hbase_HFileOutputFormat2_m0_rdh
/** * Configure HBase cluster key for remote cluster to load region location for locality-sensitive * if it's enabled. It's not necessary to call this method explicitly when the cluster key for * HBase cluster to be used to load region location is configured in the job configuration. Call * this method when another HBase cluster key is configured in the job configuration. For example, * you should call when you load data from HBase cluster A using {@link TableInputFormat} and * generate hfiles for HBase cluster B. Otherwise, HFileOutputFormat2 fetch location from cluster * A and locality-sensitive won't working correctly. * {@link #configureIncrementalLoad(Job, Table, RegionLocator)} calls this method using * {@link Table#getConfiguration} as clusterConf. See HBASE-25608. * * @param job * which has configuration to be updated * @param clusterConf * which contains cluster key of the HBase cluster to be locality-sensitive * @see #configureIncrementalLoad(Job, Table, RegionLocator) * @see #LOCALITY_SENSITIVE_CONF_KEY * @see #REMOTE_CLUSTER_ZOOKEEPER_QUORUM_CONF_KEY * @see #REMOTE_CLUSTER_ZOOKEEPER_CLIENT_PORT_CONF_KEY * @see #REMOTE_CLUSTER_ZOOKEEPER_ZNODE_PARENT_CONF_KEY */ public static void m0(Job job, Configuration clusterConf) { Configuration conf = job.getConfiguration(); if (!conf.getBoolean(LOCALITY_SENSITIVE_CONF_KEY, DEFAULT_LOCALITY_SENSITIVE)) { return;} final String quorum = clusterConf.get(HConstants.ZOOKEEPER_QUORUM); final int clientPort = clusterConf.getInt(HConstants.ZOOKEEPER_CLIENT_PORT, HConstants.DEFAULT_ZOOKEEPER_CLIENT_PORT); final String parent = clusterConf.get(HConstants.ZOOKEEPER_ZNODE_PARENT, HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT); conf.set(REMOTE_CLUSTER_ZOOKEEPER_QUORUM_CONF_KEY, quorum);conf.setInt(REMOTE_CLUSTER_ZOOKEEPER_CLIENT_PORT_CONF_KEY, clientPort); conf.set(REMOTE_CLUSTER_ZOOKEEPER_ZNODE_PARENT_CONF_KEY, parent); LOG.info((((("ZK configs for remote cluster of bulkload is configured: " + quorum) + ":") + clientPort) + "/") + parent); }
3.26
hbase_HFileOutputFormat2_createFamilyCompressionMap_rdh
/** * Runs inside the task to deserialize column family to compression algorithm map from the * configuration. * * @param conf * to read the serialized values from * @return a map from column family to the configured compression algorithm */ @InterfaceAudience.Privatestatic Map<byte[], Algorithm> createFamilyCompressionMap(Configuration conf) { Map<byte[], String> stringMap = createFamilyConfValueMap(conf, COMPRESSION_FAMILIES_CONF_KEY); Map<byte[], Algorithm> compressionMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); for (Map.Entry<byte[], String> e : stringMap.entrySet()) { Algorithm algorithm = HFileWriterImpl.compressionByName(e.getValue()); compressionMap.put(e.getKey(), algorithm); } return compressionMap; }
3.26
hbase_HFileOutputFormat2_getNewWriter_rdh
/* Create a new StoreFile.Writer. @return A WriterLength, containing a new StoreFile.Writer. */ @SuppressWarnings(value = "BX_UNBOXING_IMMEDIATELY_REBOXED", justification = "Not important") private WriterLength getNewWriter(byte[] tableName, byte[] family, Configuration conf, InetSocketAddress[] favoredNodes) throws IOException { byte[] tableAndFamily = getTableNameSuffixedWithFamily(tableName, family); Path familydir = new Path(outputDir, Bytes.toString(family)); if (writeMultipleTables) { familydir = new Path(outputDir, new Path(getTableRelativePath(tableName), Bytes.toString(family))); }WriterLength wl = new WriterLength(); Algorithm compression = overriddenCompression; compression = (compression == null) ? compressionMap.get(tableAndFamily) : compression; compression = (compression == null) ? defaultCompression : compression; BloomType bloomType = v13.get(tableAndFamily); bloomType = (bloomType == null) ? BloomType.NONE : bloomType; String bloomParam = bloomParamMap.get(tableAndFamily); if (bloomType == BloomType.ROWPREFIX_FIXED_LENGTH) { conf.set(BloomFilterUtil.PREFIX_LENGTH_KEY, bloomParam); } Integer blockSize = blockSizeMap.get(tableAndFamily); blockSize = (blockSize == null) ? HConstants.DEFAULT_BLOCKSIZE : blockSize; DataBlockEncoding encoding = overriddenEncoding; encoding = (encoding == null) ? datablockEncodingMap.get(tableAndFamily) : encoding; encoding = (encoding == null) ? DataBlockEncoding.NONE : encoding; HFileContextBuilder contextBuilder = new HFileContextBuilder().withCompression(compression).withDataBlockEncoding(encoding).withChecksumType(StoreUtils.getChecksumType(conf)).withBytesPerCheckSum(StoreUtils.getBytesPerChecksum(conf)).withBlockSize(blockSize).withColumnFamily(family).withTableName(tableName).withCreateTime(EnvironmentEdgeManager.currentTime()); if (HFile.getFormatVersion(conf) >= HFile.MIN_FORMAT_VERSION_WITH_TAGS) { contextBuilder.withIncludesTags(true); } HFileContext hFileContext = contextBuilder.build(); if (null == favoredNodes) { wl.writer = new StoreFileWriter.Builder(conf, CacheConfig.DISABLED, fs).withOutputDir(familydir).withBloomType(bloomType).withFileContext(hFileContext).build(); } else { wl.writer = new StoreFileWriter.Builder(conf, CacheConfig.DISABLED, new HFileSystem(fs)).withOutputDir(familydir).withBloomType(bloomType).withFileContext(hFileContext).withFavoredNodes(favoredNodes).build(); } this.writers.put(tableAndFamily, wl); return wl; }
3.26
hbase_HFileOutputFormat2_createFamilyConfValueMap_rdh
/** * Run inside the task to deserialize column family to given conf value map. * * @param conf * to read the serialized values from * @param confName * conf key to read from the configuration * @return a map of column family to the given configuration value */ private static Map<byte[], String> createFamilyConfValueMap(Configuration conf, String confName) { Map<byte[], String> v99 = new TreeMap<>(Bytes.BYTES_COMPARATOR); String confVal = conf.get(confName, ""); for (String familyConf : confVal.split("&")) { String[] v102 = familyConf.split("="); if (v102.length != 2) { continue; } try { v99.put(Bytes.toBytes(URLDecoder.decode(v102[0], "UTF-8")), URLDecoder.decode(v102[1], "UTF-8")); } catch (UnsupportedEncodingException e) { // will not happen with UTF-8 encoding throw new AssertionError(e); } } return v99;}
3.26
hbase_HFileOutputFormat2_configureStoragePolicy_rdh
/** * Configure block storage policy for CF after the directory is created. */ static void configureStoragePolicy(final Configuration conf, final FileSystem fs, byte[] tableAndFamily, Path cfPath) { if ((((null == conf) || (null == fs)) || (null == tableAndFamily)) || (null == cfPath)) { return;} String policy = conf.get(STORAGE_POLICY_PROPERTY_CF_PREFIX + Bytes.toString(tableAndFamily), conf.get(STORAGE_POLICY_PROPERTY)); CommonFSUtils.setStoragePolicy(fs, cfPath, policy); }
3.26
hbase_HFileOutputFormat2_configurePartitioner_rdh
/** * Configure <code>job</code> with a TotalOrderPartitioner, partitioning against * <code>splitPoints</code>. Cleans up the partitions file after job exists. */ static void configurePartitioner(Job job, List<ImmutableBytesWritable> splitPoints, boolean writeMultipleTables) throws IOException { Configuration conf = job.getConfiguration(); // create the partitions file FileSystem v104 = FileSystem.get(conf); String hbaseTmpFsDir = conf.get(HConstants.TEMPORARY_FS_DIRECTORY_KEY, v104.getHomeDirectory() + "/hbase-staging"); Path partitionsPath = new Path(hbaseTmpFsDir, "partitions_" + UUID.randomUUID()); v104.makeQualified(partitionsPath); writePartitions(conf, partitionsPath, splitPoints, writeMultipleTables); v104.deleteOnExit(partitionsPath); // configure job to use it job.setPartitionerClass(TotalOrderPartitioner.class); TotalOrderPartitioner.setPartitionFile(conf, partitionsPath); }
3.26
hbase_HFileOutputFormat2_writePartitions_rdh
/** * Write out a {@link SequenceFile} that can be read by {@link TotalOrderPartitioner} that * contains the split points in startKeys. */ @SuppressWarnings("deprecation") private static void writePartitions(Configuration conf, Path partitionsPath, List<ImmutableBytesWritable> startKeys, boolean writeMultipleTables) throws IOException { LOG.info("Writing partition information to " + partitionsPath); if (startKeys.isEmpty()) { throw new IllegalArgumentException("No regions passed"); } // We're generating a list of split points, and we don't ever // have keys < the first region (which has an empty start key) // so we need to remove it. Otherwise we would end up with an // empty reducer with index 0 TreeSet<ImmutableBytesWritable> sorted = new TreeSet<>(startKeys); ImmutableBytesWritable first = sorted.first(); if (writeMultipleTables) { first = new ImmutableBytesWritable(MultiTableHFileOutputFormat.getSuffix(sorted.first().get())); } if (!first.equals(HConstants.EMPTY_BYTE_ARRAY)) { throw new IllegalArgumentException("First region of table should have empty start key. Instead has: " + Bytes.toStringBinary(first.get())); } sorted.remove(sorted.first()); // Write the actual file FileSystem fs = partitionsPath.getFileSystem(conf); SequenceFile.Writer writer = SequenceFile.createWriter(fs, conf, partitionsPath, ImmutableBytesWritable.class, NullWritable.class); try { for (ImmutableBytesWritable startKey : sorted) { writer.append(startKey, NullWritable.get()); } } finally { writer.close(); } }
3.26
hbase_HFileOutputFormat2_getRegionStartKeys_rdh
/** * Return the start keys of all of the regions in this table, as a list of ImmutableBytesWritable. */private static List<ImmutableBytesWritable> getRegionStartKeys(List<RegionLocator> regionLocators, boolean writeMultipleTables) throws IOException { ArrayList<ImmutableBytesWritable> v56 = new ArrayList<>(); for (RegionLocator regionLocator : regionLocators) { TableName tableName = regionLocator.getName(); LOG.info("Looking up current regions for table " + tableName); byte[][] byteKeys = regionLocator.getStartKeys();for (byte[] byteKey : byteKeys) { byte[] fullKey = byteKey;// HFileOutputFormat2 use case if (writeMultipleTables) { // MultiTableHFileOutputFormat use case fullKey = combineTableNameSuffix(tableName.getName(), byteKey); } if (LOG.isDebugEnabled()) { LOG.debug((("SplitPoint startkey for " + tableName) + ": ") + Bytes.toStringBinary(fullKey)); } v56.add(new ImmutableBytesWritable(fullKey)); } } return v56; }
3.26
hbase_HFileOutputFormat2_configureIncrementalLoad_rdh
/** * Configure a MapReduce Job to perform an incremental load into the given table. This * <ul> * <li>Inspects the table to configure a total order partitioner</li> * <li>Uploads the partitions file to the cluster and adds it to the DistributedCache</li> * <li>Sets the number of reduce tasks to match the current number of regions</li> * <li>Sets the output key/value class to match HFileOutputFormat2's requirements</li> * <li>Sets the reducer up to perform the appropriate sorting (either KeyValueSortReducer or * PutSortReducer)</li> * </ul> * The user should be sure to set the map output value class to either KeyValue or Put before * running this function. */public static void configureIncrementalLoad(Job job, TableDescriptor tableDescriptor, RegionLocator regionLocator) throws IOException { ArrayList<TableInfo> singleTableInfo = new ArrayList<>(); singleTableInfo.add(new TableInfo(tableDescriptor, regionLocator)); configureIncrementalLoad(job, singleTableInfo, HFileOutputFormat2.class); }
3.26
hbase_RegionLocations_mergeLocations_rdh
/** * Merges this RegionLocations list with the given list assuming same range, and keeping the most * up to date version of the HRegionLocation entries from either list according to seqNum. If * seqNums are equal, the location from the argument (other) is taken. * * @param other * the locations to merge with * @return an RegionLocations object with merged locations or the same object if nothing is merged */ @SuppressWarnings("ReferenceEquality") public RegionLocations mergeLocations(RegionLocations other) { assert other != null; HRegionLocation[] newLocations = null; // Use the length from other, since it is coming from meta. Otherwise, // in case of region replication going down, we might have a leak here. int max = other.locations.length; RegionInfo regionInfo = null; for (int i = 0; i < max; i++) { HRegionLocation thisLoc = this.getRegionLocation(i); HRegionLocation otherLoc = other.getRegionLocation(i); if (((regionInfo == null) && (otherLoc != null)) && (otherLoc.getRegion() != null)) { // regionInfo is the first non-null HRI from other RegionLocations. We use it to ensure that // all replica region infos belong to the same region with same region id. regionInfo = otherLoc.getRegion(); } HRegionLocation selectedLoc = selectRegionLocation(thisLoc, otherLoc, true, false); if (selectedLoc != thisLoc) { if (newLocations == null) { newLocations = new HRegionLocation[max]; System.arraycopy(locations, 0, newLocations, 0, i); } } if (newLocations != null) {newLocations[i] = selectedLoc;} } // ensure that all replicas share the same start code. Otherwise delete them if ((newLocations != null) && (regionInfo != null)) { for (int i = 0; i < newLocations.length; i++) {if (newLocations[i] != null) { if (!RegionReplicaUtil.isReplicasForSameRegion(regionInfo, newLocations[i].getRegion())) { newLocations[i] = null; } } } } return newLocations == null ? this : new RegionLocations(newLocations); }
3.26
hbase_RegionLocations_numNonNullElements_rdh
/** * Returns the size of not-null locations * * @return the size of not-null locations */ public int numNonNullElements() { return numNonNullElements; }
3.26
hbase_RegionLocations_size_rdh
/** * Returns the size of the list even if some of the elements might be null. * * @return the size of the list (corresponding to the max replicaId) */ public int size() { return locations.length; }
3.26
hbase_RegionLocations_removeElementsWithNullLocation_rdh
/** * Set the element to null if its getServerName method returns null. Returns null if all the * elements are removed. */ public RegionLocations removeElementsWithNullLocation() { HRegionLocation[] newLocations = new HRegionLocation[locations.length]; boolean hasNonNullElement = false; for (int i = 0; i < locations.length; i++) { if ((locations[i] != null) && (locations[i].getServerName() != null)) { hasNonNullElement = true; newLocations[i] = locations[i]; } } return hasNonNullElement ? new RegionLocations(newLocations) : null; }
3.26
hbase_RegionLocations_getRegionLocation_rdh
/** * Returns the first not-null region location in the list */ public HRegionLocation getRegionLocation() { for (HRegionLocation loc : locations) { if (loc != null) { return loc; } } return null; }
3.26
hbase_RegionLocations_isEmpty_rdh
/** * Returns whether there are non-null elements in the list * * @return whether there are non-null elements in the list */ public boolean isEmpty() { return numNonNullElements == 0; }
3.26
hbase_RegionLocations_getRegionLocationByRegionName_rdh
/** * Returns the region location from the list for matching regionName, which can be regionName or * encodedRegionName * * @param regionName * regionName or encodedRegionName * @return HRegionLocation found or null */ public HRegionLocation getRegionLocationByRegionName(byte[] regionName) { for (HRegionLocation loc : locations) { if (loc != null) { if (Bytes.equals(loc.getRegion().getRegionName(), regionName) || Bytes.equals(loc.getRegion().getEncodedNameAsBytes(), regionName)) { return loc; } } } return null; }
3.26
hbase_BalanceAction_undoAction_rdh
/** * Returns an Action which would undo this action */ BalanceAction undoAction() { return this; }
3.26
hbase_HFileLink_build_rdh
/** * Create an HFileLink instance from table/region/family/hfile location * * @param conf * {@link Configuration} from which to extract specific archive locations * @param table * Table name * @param region * Region Name * @param family * Family Name * @param hfile * HFile Name * @return Link to the file with the specified table/region/family/hfile location * @throws IOException * on unexpected error. */ public static HFileLink build(final Configuration conf, final TableName table, final String region, final String family, final String hfile) throws IOException { return HFileLink.buildFromHFileLinkPattern(conf, createPath(table, region, family, hfile)); }
3.26
hbase_HFileLink_m0_rdh
/** * Returns the path of the mob hfiles. */ public Path m0() { return this.f0; }
3.26
hbase_HFileLink_getArchivePath_rdh
/** * Returns the path of the archived hfile. */ public Path getArchivePath() { return this.archivePath; }
3.26
hbase_HFileLink_buildFromHFileLinkPattern_rdh
/** * * @param rootDir * Path to the root directory where hbase files are stored * @param archiveDir * Path to the hbase archive directory * @param hFileLinkPattern * The path of the HFile Link. */ public static final HFileLink buildFromHFileLinkPattern(final Path rootDir, final Path archiveDir, final Path hFileLinkPattern) { Path hfilePath = getHFileLinkPatternRelativePath(hFileLinkPattern); Path tempPath = new Path(new Path(rootDir, HConstants.HBASE_TEMP_DIRECTORY), hfilePath); Path originPath = new Path(rootDir, hfilePath); Path mobPath = new Path(new Path(rootDir, MobConstants.MOB_DIR_NAME), hfilePath); Path archivePath = new Path(archiveDir, hfilePath); return new HFileLink(originPath, tempPath, mobPath, archivePath); }
3.26
hbase_HFileLink_m1_rdh
/** * Get the Region name of the referenced link * * @param fileName * HFileLink file name * @return the name of the referenced Region */ public static String m1(final String fileName) { Matcher m = REF_OR_HFILE_LINK_PATTERN.matcher(fileName); if (!m.matches()) { throw new IllegalArgumentException(fileName + " is not a valid HFileLink name!"); } return m.group(3); }
3.26
hbase_HFileLink_isHFileLink_rdh
/** * * @param fileName * File name to check. * @return True if the path is a HFileLink. */ public static boolean isHFileLink(String fileName) { Matcher m = LINK_NAME_PATTERN.matcher(fileName); if (!m.matches()) { return false; } return (((m.groupCount() > 2) && (m.group(4) != null)) && (m.group(3) != null)) && (m.group(2) != null); }
3.26
hbase_HFileLink_createPath_rdh
/** * Create an HFileLink relative path for the table/region/family/hfile location * * @param table * Table name * @param region * Region Name * @param family * Family Name * @param hfile * HFile Name * @return the relative Path to open the specified table/region/family/hfile link */ public static Path createPath(final TableName table, final String region, final String family, final String hfile) {if (HFileLink.isHFileLink(hfile)) { return new Path(family, hfile);} return new Path(family, HFileLink.createHFileLinkName(table, region, hfile)); }
3.26
hbase_HFileLink_createBackReferenceName_rdh
/** * Create the back reference name */ // package-private for testing static String createBackReferenceName(final String tableNameStr, final String regionName) { return (regionName + ".") + tableNameStr.replace(TableName.NAMESPACE_DELIM, '='); }
3.26
hbase_HFileLink_getHFileLinkPatternRelativePath_rdh
/** * Convert a HFileLink path to a table relative path. e.g. the link: * /hbase/test/0123/cf/testtb=4567-abcd becomes: /hbase/testtb/4567/cf/abcd * * @param path * HFileLink path * @return Relative table path * @throws IOException * on unexpected error. */ private static Path getHFileLinkPatternRelativePath(final Path path) { // table=region-hfile Matcher m = REF_OR_HFILE_LINK_PATTERN.matcher(path.getName()); if (!m.matches()) { throw new IllegalArgumentException(path.getName() + " is not a valid HFileLink pattern!"); } // Convert the HFileLink name into a real table/region/cf/hfile path. TableName tableName = TableName.valueOf(m.group(1), m.group(2)); String regionName = m.group(3); String hfileName = m.group(4); String familyName = path.getParent().getName(); Path tableDir = CommonFSUtils.getTableDir(new Path("./"), tableName); return new Path(tableDir, new Path(regionName, new Path(familyName, hfileName))); }
3.26
hbase_HFileLink_getHFileFromBackReference_rdh
/** * Get the full path of the HFile referenced by the back reference * * @param conf * {@link Configuration} to read for the archive directory name * @param linkRefPath * Link Back Reference path * @return full path of the referenced hfile * @throws IOException * on unexpected error. */public static Path getHFileFromBackReference(final Configuration conf, final Path linkRefPath) throws IOException { return getHFileFromBackReference(CommonFSUtils.getRootDir(conf), linkRefPath);}
3.26
hbase_HFileLink_create_rdh
/** * Create a new HFileLink * <p> * It also adds a back-reference to the hfile back-reference directory to simplify the * reference-count and the cleaning process. * * @param conf * {@link Configuration} to read for the archive directory name * @param fs * {@link FileSystem} on which to write the HFileLink * @param dstFamilyPath * - Destination path (table/region/cf/) * @param dstTableName * - Destination table name * @param dstRegionName * - Destination region name * @param linkedTable * - Linked Table Name * @param linkedRegion * - Linked Region Name * @param hfileName * - Linked HFile name * @param createBackRef * - Whether back reference should be created. Defaults to true. * @return the file link name. * @throws IOException * on file or parent directory creation failure */ public static String create(final Configuration conf, final FileSystem fs, final Path dstFamilyPath, final String familyName, final String dstTableName, final String dstRegionName, final TableName linkedTable, final String linkedRegion, final String hfileName, final boolean createBackRef) throws IOException {String name = createHFileLinkName(linkedTable, linkedRegion, hfileName); String refName = createBackReferenceName(dstTableName, dstRegionName); // Make sure the destination directory exists fs.mkdirs(dstFamilyPath); // Make sure the FileLink reference directory exists Path archiveStoreDir = HFileArchiveUtil.getStoreArchivePath(conf, linkedTable, linkedRegion, familyName); Path backRefPath = null;if (createBackRef) { Path backRefssDir = getBackReferencesDir(archiveStoreDir, hfileName); fs.mkdirs(backRefssDir); // Create the reference for the link backRefPath = new Path(backRefssDir, refName); fs.createNewFile(backRefPath); } try { // Create the link if (fs.createNewFile(new Path(dstFamilyPath, name))) { return name; } } catch (IOException e) { LOG.error((("couldn't create the link=" + name) + " for ") + dstFamilyPath, e); // Revert the reference if the link creation failed if (createBackRef) { fs.delete(backRefPath, false); } throw e; } throw new IOException(((("File link=" + name) + " already exists under ") + dstFamilyPath) + " folder."); } /** * Create a new HFileLink starting from a hfileLink name * <p> * It also adds a back-reference to the hfile back-reference directory to simplify the * reference-count and the cleaning process. * * @param conf * {@link Configuration} to read for the archive directory name * @param fs * {@link FileSystem}
3.26
hbase_HFileLink_getReferencedTableName_rdh
/** * Get the Table name of the referenced link * * @param fileName * HFileLink file name * @return the name of the referenced Table */ public static TableName getReferencedTableName(final String fileName) { Matcher m = REF_OR_HFILE_LINK_PATTERN.matcher(fileName); if (!m.matches()) { throw new IllegalArgumentException(fileName + " is not a valid HFileLink name!"); } return TableName.valueOf(m.group(1), m.group(2)); }
3.26
hbase_HFileLink_getOriginPath_rdh
/** * Returns the origin path of the hfile. */ public Path getOriginPath() { return this.originPath; }
3.26
hbase_HFileLink_getReferencedHFileName_rdh
/** * Get the HFile name of the referenced link * * @param fileName * HFileLink file name * @return the name of the referenced HFile */ public static String getReferencedHFileName(final String fileName) { Matcher m = REF_OR_HFILE_LINK_PATTERN.matcher(fileName); if (!m.matches()) { throw new IllegalArgumentException(fileName + " is not a valid HFileLink name!"); } return m.group(4); }
3.26
hbase_MasterCoprocessorHost_preTruncateRegion_rdh
/** * Invoked just before calling the truncate region procedure * * @param regionInfo * region being truncated */ public void preTruncateRegion(RegionInfo regionInfo) throws IOException { execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override public void call(MasterObserver observer) { observer.preTruncateRegion(this, regionInfo); } }); }
3.26
hbase_MasterCoprocessorHost_postCompletedMergeRegionsAction_rdh
/** * Invoked after completing merge regions operation * * @param regionsToMerge * the regions to merge * @param mergedRegion * the new merged region * @param user * the user */ public void postCompletedMergeRegionsAction(final RegionInfo[] regionsToMerge, final RegionInfo mergedRegion, final User user) throws IOException { execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation(user) { @Override public void call(MasterObserver observer) throws IOException { observer.postCompletedMergeRegionsAction(this, regionsToMerge, mergedRegion); } }); }
3.26
hbase_MasterCoprocessorHost_preCreateTableRegionsInfos_rdh
/* Implementation of hooks for invoking MasterObservers */ public TableDescriptor preCreateTableRegionsInfos(TableDescriptor desc) throws IOException { if (coprocEnvironments.isEmpty()) { return desc; } return execOperationWithResult(new ObserverOperationWithResult<MasterObserver, TableDescriptor>(masterObserverGetter, desc) { @Override protected TableDescriptor call(MasterObserver observer) throws IOException { return observer.preCreateTableRegionsInfos(this, getResult());} }); }
3.26
hbase_MasterCoprocessorHost_preMergeRegionsCommit_rdh
/** * Invoked before merge regions operation writes the new region to hbase:meta * * @param regionsToMerge * the regions to merge * @param metaEntries * the meta entry * @param user * the user */ public void preMergeRegionsCommit(final RegionInfo[] regionsToMerge, @MetaMutationAnnotation final List<Mutation> metaEntries, final User user) throws IOException { execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation(user) { @Override public void call(MasterObserver observer) throws IOException { observer.preMergeRegionsCommitAction(this, regionsToMerge, metaEntries); } });}
3.26
hbase_MasterCoprocessorHost_preSplitBeforeMETAAction_rdh
/** * This will be called before update META step as part of split table region procedure. * * @param user * the user */ public void preSplitBeforeMETAAction(final byte[] splitKey, final List<Mutation> metaEntries, final User user) throws IOException { execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation(user) { @Override public void call(MasterObserver observer) throws IOException { observer.preSplitRegionBeforeMETAAction(this, splitKey, metaEntries); } }); }
3.26
hbase_MasterCoprocessorHost_preSplitRegionAction_rdh
/** * Invoked just before a split * * @param tableName * the table where the region belongs to * @param splitRow * the split point * @param user * the user */ public void preSplitRegionAction(final TableName tableName, final byte[] splitRow, final User user) throws IOException { execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation(user) { @Override public void call(MasterObserver observer) throws IOException { observer.preSplitRegionAction(this, tableName, splitRow); } }); }
3.26
hbase_MasterCoprocessorHost_preCreateNamespace_rdh
// MasterObserver operations // //////////////////////////////////////////////////////////////////////////////////////////////// public void preCreateNamespace(final NamespaceDescriptor ns) throws IOException { execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override public void m0(MasterObserver observer) throws IOException { observer.preCreateNamespace(this, ns); } }); }
3.26
hbase_MasterCoprocessorHost_postCompletedSplitRegionAction_rdh
/** * Invoked just after a split * * @param regionInfoA * the new left-hand daughter region * @param regionInfoB * the new right-hand daughter region * @param user * the user */ public void postCompletedSplitRegionAction(final RegionInfo regionInfoA, final RegionInfo regionInfoB, final User user) throws IOException { execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation(user) { @Override public void call(MasterObserver observer) throws IOException { observer.postCompletedSplitRegionAction(this, regionInfoA, regionInfoB); } }); }
3.26
hbase_MasterCoprocessorHost_postTruncateRegion_rdh
/** * Invoked after calling the truncate region procedure * * @param regionInfo * region being truncated */ public void postTruncateRegion(RegionInfo regionInfo) throws IOException { execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override public void call(MasterObserver observer) {observer.postTruncateRegion(this, regionInfo); } }); }
3.26
hbase_MasterCoprocessorHost_m4_rdh
/** * Invoked just before calling the truncate region procedure * * @param region * Region to be truncated * @param user * The user */ public void m4(final RegionInfo region, User user) throws IOException { execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation(user) {@Override public void call(MasterObserver observer) throws IOException {observer.preTruncateRegionAction(this, region); } }); }
3.26
hbase_MasterCoprocessorHost_postRollBackSplitRegionAction_rdh
/** * Invoked just after the rollback of a failed split * * @param user * the user */ public void postRollBackSplitRegionAction(final User user) throws IOException { execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation(user) { @Override public void call(MasterObserver observer) throws IOException { observer.postRollBackSplitRegionAction(this); } }); }
3.26
hbase_MasterCoprocessorHost_preMergeRegionsAction_rdh
/** * Invoked just before a merge * * @param regionsToMerge * the regions to merge * @param user * the user */ public void preMergeRegionsAction(final RegionInfo[] regionsToMerge, final User user) throws IOException { execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation(user) { @Override public void call(MasterObserver observer) throws IOException { observer.preMergeRegionsAction(this, regionsToMerge); } }); }
3.26
hbase_MasterCoprocessorHost_preSplitAfterMETAAction_rdh
/** * This will be called after update META step as part of split table region procedure. * * @param user * the user */ public void preSplitAfterMETAAction(final User user) throws IOException { execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation(user) { @Override public void call(MasterObserver observer) throws IOException { observer.preSplitRegionAfterMETAAction(this); } }); }
3.26
hbase_MasterCoprocessorHost_postMergeRegionsCommit_rdh
/** * Invoked after merge regions operation writes the new region to hbase:meta * * @param regionsToMerge * the regions to merge * @param mergedRegion * the new merged region * @param user * the user */public void postMergeRegionsCommit(final RegionInfo[] regionsToMerge, final RegionInfo mergedRegion, final User user) throws IOException { execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation(user) { @Override public void call(MasterObserver observer) throws IOException { observer.postMergeRegionsCommitAction(this, regionsToMerge, mergedRegion); } }); }
3.26
hbase_MasterCoprocessorHost_preSplitRegion_rdh
/** * Invoked just before calling the split region procedure * * @param tableName * the table where the region belongs to * @param splitRow * the split point */ public void preSplitRegion(final TableName tableName, final byte[] splitRow) throws IOException { execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override public void call(MasterObserver observer) throws IOException { observer.preSplitRegion(this, tableName, splitRow); } }); }
3.26