name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
hbase_ZKWatcher_keeperException_rdh
/** * Handles KeeperExceptions in client calls. * <p> * This may be temporary but for now this gives one place to deal with these. * <p> * TODO: Currently this method rethrows the exception to let the caller handle * <p> * * @param ke * the exception to rethrow * @throws KeeperException * if a ZooKeeper operation fails */ public void keeperException(KeeperException ke) throws KeeperException { LOG.error(prefix("Received unexpected KeeperException, re-throwing exception"), ke); throw ke; }
3.26
hbase_ZKWatcher_connectionEvent_rdh
// Connection management /** * Called when there is a connection-related event via the Watcher callback. * <p> * If Disconnected or Expired, this should shutdown the cluster. But, since we send a * KeeperException.SessionExpiredException along with the abort call, it's possible for the * Abortable to catch it and try to create a new session with ZooKeeper. This is what the client * does in HCM. * <p> * * @param event * the connection-related event */ private void connectionEvent(WatchedEvent event) {switch (event.getState()) { case SyncConnected : this.identifier = (this.prefix + "-0x") + Long.toHexString(this.recoverableZooKeeper.getSessionId()); // Update our identifier. Otherwise ignore. LOG.debug("{} connected", this.identifier); break; // Abort the server if Disconnected or Expired case Disconnected : LOG.debug(prefix("Received Disconnected from ZooKeeper, ignoring")); break; case Closed : LOG.debug(prefix("ZooKeeper client closed, ignoring")); break; case Expired : String msg = prefix((this.identifier + " received expired from ") + "ZooKeeper, aborting"); // TODO: One thought is to add call to ZKListener so say, // ZKNodeTracker can zero out its data values. if (this.abortable != null) { this.abortable.abort(msg, new KeeperException.SessionExpiredException()); } break; case ConnectedReadOnly : case SaslAuthenticated : case AuthFailed : break; default : throw new IllegalStateException("Received event is not valid: " + event.getState()); } }
3.26
hbase_ZKWatcher_getMetaReplicaNodes_rdh
/** * Get the znodes corresponding to the meta replicas from ZK * * @return list of znodes * @throws KeeperException * if a ZooKeeper operation fails */ public List<String> getMetaReplicaNodes() throws KeeperException { List<String> childrenOfBaseNode = ZKUtil.listChildrenNoWatch(this, znodePaths.baseZNode);return filterMetaReplicaNodes(childrenOfBaseNode); }
3.26
hbase_ZKWatcher_checkAndSetZNodeAcls_rdh
/** * On master start, we check the znode ACLs under the root directory and set the ACLs properly if * needed. If the cluster goes from an unsecure setup to a secure setup, this step is needed so * that the existing znodes created with open permissions are now changed with restrictive perms. */ public void checkAndSetZNodeAcls() { if (!ZKAuthentication.isSecureZooKeeper(getConfiguration())) { LOG.info("not a secure deployment, proceeding");return; } // Check the base znodes permission first. Only do the recursion if base znode's perms are not // correct. try { List<ACL> v8 = recoverableZooKeeper.getAcl(znodePaths.baseZNode, new Stat());if (!isBaseZnodeAclSetup(v8)) { LOG.info("setting znode ACLs"); setZnodeAclsRecursive(znodePaths.baseZNode); } } catch (KeeperException.NoNodeException nne) { return; } catch (InterruptedException ie) { interruptedExceptionNoThrow(ie, false); } catch (IOException | KeeperException e) { LOG.warn("Received exception while checking and setting zookeeper ACLs", e); } }
3.26
hbase_ZKWatcher_isSuperUserId_rdh
/* Validate whether ACL ID is superuser. */ public static boolean isSuperUserId(String[] superUsers, Id id) { for (String v22 : superUsers) { // TODO: Validate super group members also when ZK supports setting node ACL for groups. if ((!AuthUtil.isGroupPrincipal(v22)) && new Id("sasl", v22).equals(id)) {return true; } } return false; }
3.26
hbase_ZKWatcher_checkACLForSuperUsers_rdh
/* Validate whether ACL set for all superusers. */ private boolean checkACLForSuperUsers(String[] superUsers, List<ACL> acls) { for (String user : superUsers) { boolean hasAccess = false; // TODO: Validate super group members also when ZK supports setting node ACL for groups. if (!AuthUtil.isGroupPrincipal(user)) { for (ACL acl : acls) {if (user.equals(acl.getId().getId())) { if (acl.getPerms() == Perms.ALL) { hasAccess = true; } else if (LOG.isDebugEnabled()) { LOG.debug(String.format("superuser '%s' does not have correct permissions: have 0x%x, want 0x%x", acl.getId().getId(), acl.getPerms(), Perms.ALL)); } break; } } if (!hasAccess) { return false; } } } return true; }
3.26
hbase_ZKWatcher_isBaseZnodeAclSetup_rdh
/** * Checks whether the ACLs returned from the base znode (/hbase) is set for secure setup. * * @param acls * acls from zookeeper * @return whether ACLs are set for the base znode * @throws IOException * if getting the current user fails */ private boolean isBaseZnodeAclSetup(List<ACL> acls) throws IOException { if (LOG.isDebugEnabled()) { LOG.debug("Checking znode ACLs"); }String[] superUsers = conf.getStrings(Superusers.SUPERUSER_CONF_KEY); // Check whether ACL set for all superusers if ((superUsers != null) && (!checkACLForSuperUsers(superUsers, acls))) { return false; } // this assumes that current authenticated user is the same as zookeeper client user // configured via JAAS String hbaseUser = UserGroupInformation.getCurrentUser().getShortUserName(); if (acls.isEmpty()) { if (LOG.isDebugEnabled()) { LOG.debug("ACL is empty"); } return false; } for (ACL acl : acls) { int perms = acl.getPerms(); Id id = acl.getId(); // We should only set at most 3 possible ACLs for 3 Ids. One for everyone, one for superuser // and one for the hbase user if (Ids.ANYONE_ID_UNSAFE.equals(id)) { if (perms != Perms.READ) { if (LOG.isDebugEnabled()) { LOG.debug(String.format("permissions for '%s' are not correct: have 0x%x, want 0x%x", id, perms, Perms.READ));} return false; } } else if ((superUsers != null) && isSuperUserId(superUsers, id)) { if (perms != Perms.ALL) { if (LOG.isDebugEnabled()) { LOG.debug(String.format("permissions for '%s' are not correct: have 0x%x, want 0x%x", id, perms, Perms.ALL)); } return false; } } else if ("sasl".equals(id.getScheme())) { String name = id.getId(); // If ZooKeeper recorded the Kerberos full name in the ACL, use only the shortname Matcher match = NAME_PATTERN.matcher(name); if (match.matches()) { name = match.group(1); } if (name.equals(hbaseUser)) { if (perms != Perms.ALL) { if (LOG.isDebugEnabled()) { LOG.debug(String.format("permissions for '%s' are not correct: have 0x%x, want 0x%x", id, perms, Perms.ALL)); } return false; }} else { if (LOG.isDebugEnabled()) { LOG.debug("Unexpected shortname in SASL ACL: {}", id); } return false; } } else { if (LOG.isDebugEnabled()) { LOG.debug("unexpected ACL id '{}'", id); } return false; } }return true; }
3.26
hbase_ZKWatcher_getRecoverableZooKeeper_rdh
/** * Get the connection to ZooKeeper. * * @return connection reference to zookeeper */ public RecoverableZooKeeper getRecoverableZooKeeper() { return recoverableZooKeeper;}
3.26
hbase_ZKWatcher_syncOrTimeout_rdh
/** * Forces a synchronization of this ZooKeeper client connection within a timeout. Enforcing a * timeout lets the callers fail-fast rather than wait forever for the sync to finish. * <p> * Executing this method before running other methods will ensure that the subsequent operations * are up-to-date and consistent as of the time that the sync is complete. * <p> * This is used for compareAndSwap type operations where we need to read the data of an existing * node and delete or transition that node, utilizing the previously read version and data. We * want to ensure that the version read is up-to-date from when we begin the operation. * <p> */ public void syncOrTimeout(String path) throws KeeperException { final CountDownLatch latch = new CountDownLatch(1); long startTime = EnvironmentEdgeManager.currentTime(); this.recoverableZooKeeper.sync(path, (i, s, o) -> latch.countDown(), null);try { if (!latch.await(zkSyncTimeout, TimeUnit.MILLISECONDS)) { LOG.warn("sync() operation to ZK timed out. Configured timeout: {}ms. This usually points " + "to a ZK side issue. Check ZK server logs and metrics.", zkSyncTimeout);throw new KeeperException.RequestTimeoutException(); } } catch (InterruptedException e) { LOG.warn("Interrupted waiting for ZK sync() to finish.", e); Thread.currentThread().interrupt(); return;} if (LOG.isDebugEnabled()) { // TODO: Switch to a metric once server side ZK watcher metrics are implemented. This is a // useful metric to have since the latency of sync() impacts the callers. LOG.debug("ZK sync() operation took {}ms", EnvironmentEdgeManager.currentTime() - startTime); } }
3.26
hbase_ZKWatcher_getQuorum_rdh
/** * Get the quorum address of this instance. * * @return quorum string of this zookeeper connection instance */ public String getQuorum() { return quorum; }
3.26
hbase_ZKWatcher_interruptedException_rdh
/** * Handles InterruptedExceptions in client calls. * * @param ie * the InterruptedException instance thrown * @throws KeeperException * the exception to throw, transformed from the InterruptedException */ public void interruptedException(InterruptedException ie) throws KeeperException { interruptedExceptionNoThrow(ie, true); // Throw a system error exception to let upper level handle it KeeperException keeperException = new KeeperException.SystemErrorException(); keeperException.initCause(ie); throw keeperException; }
3.26
hbase_ZKWatcher_m3_rdh
/** * Method called from ZooKeeper for events and connection status. * <p> * Valid events are passed along to listeners. Connection status changes are dealt with locally. */ @Override public void m3(WatchedEvent event) { LOG.debug(prefix(((((((("Received ZooKeeper Event, " + "type=") + event.getType()) + ", ") + "state=") + event.getState()) + ", ") + "path=") + event.getPath())); final String spanName = (ZKWatcher.class.getSimpleName() + "-") + identifier; if (!zkEventProcessor.isShutdown()) { zkEventProcessor.execute(TraceUtil.tracedRunnable(() -> processEvent(event), spanName)); } }
3.26
hbase_ZKWatcher_m2_rdh
/** * Returns The number of currently registered listeners */ public int m2() { return listeners.size(); }
3.26
hbase_ZKWatcher_unregisterAllListeners_rdh
/** * Clean all existing listeners */ public void unregisterAllListeners() { listeners.clear(); }
3.26
hbase_ZKWatcher_getZNodePaths_rdh
/** * Get the znodePaths. * <p> * Mainly used for mocking as mockito can not mock a field access. */ public ZNodePaths getZNodePaths() { return znodePaths; }
3.26
hbase_ZKWatcher_filterMetaReplicaNodes_rdh
/** * * @param nodes * Input list of znodes * @return Filtered list of znodes from nodes that belong to meta replica(s). */ private List<String> filterMetaReplicaNodes(List<String> nodes) { if ((nodes == null) || nodes.isEmpty()) { return new ArrayList<>(); } List<String> metaReplicaNodes = new ArrayList<>(2); String pattern = conf.get(ZNodePaths.META_ZNODE_PREFIX_CONF_KEY, ZNodePaths.META_ZNODE_PREFIX); for (String child : nodes) { if (child.startsWith(pattern)) { metaReplicaNodes.add(child); } } return metaReplicaNodes; }
3.26
hbase_ZKWatcher_registerListenerFirst_rdh
/** * Register the specified listener to receive ZooKeeper events and add it as the first in the list * of current listeners. * * @param listener * the listener to register */ public void registerListenerFirst(ZKListener listener) { listeners.add(0, listener);}
3.26
hbase_RpcThrottlingException_stringFromMillis_rdh
// Visible for TestRpcThrottlingException protected static String stringFromMillis(long millis) { StringBuilder buf = new StringBuilder(); long hours = millis / ((60 * 60) * 1000); long rem = millis % ((60 * 60) * 1000); long minutes = rem / (60 * 1000); rem = rem % (60 * 1000); long seconds = rem / 1000; long v9 = rem % 1000; if (hours != 0) { buf.append(hours); buf.append(hours > 1 ? "hrs, " : "hr, "); } if (minutes != 0) { buf.append(minutes); buf.append(minutes > 1 ? "mins, " : "min, "); } if (seconds != 0) { buf.append(seconds); buf.append("sec, "); } buf.append(v9); buf.append("ms"); return buf.toString(); }
3.26
hbase_UserQuotaState_setQuotas_rdh
/** * Add the quota information of the specified namespace. (This operation is part of the QuotaState * setup) */ public void setQuotas(final String namespace, Quotas quotas) { namespaceLimiters = setLimiter(namespaceLimiters, namespace, quotas);}
3.26
hbase_UserQuotaState_getTableLimiter_rdh
/** * Return the limiter for the specified table associated with this quota. If the table does not * have its own quota limiter the global one will be returned. In case there is no quota limiter * associated with this object a noop limiter will be returned. * * @return the quota limiter for the specified table */ public synchronized QuotaLimiter getTableLimiter(final TableName table) { lastQuery = EnvironmentEdgeManager.currentTime(); if (tableLimiters != null) { QuotaLimiter limiter = tableLimiters.get(table); if (limiter != null) return limiter; } if (namespaceLimiters != null) { QuotaLimiter limiter = namespaceLimiters.get(table.getNamespaceAsString()); if (limiter != null) return limiter; } return getGlobalLimiterWithoutUpdatingLastQuery(); }
3.26
hbase_UserQuotaState_update_rdh
/** * Perform an update of the quota state based on the other quota state object. (This operation is * executed by the QuotaCache) */ @Override public synchronized void update(final QuotaState other) {super.update(other); if (other instanceof UserQuotaState) { UserQuotaState uOther = ((UserQuotaState) (other)); tableLimiters = m0(tableLimiters, uOther.tableLimiters); namespaceLimiters = m0(namespaceLimiters, uOther.namespaceLimiters); bypassGlobals = uOther.bypassGlobals; } else { tableLimiters = null; namespaceLimiters = null; bypassGlobals = false; } }
3.26
hbase_UserQuotaState_isBypass_rdh
/** * Returns true if there is no quota information associated to this object */ @Override public synchronized boolean isBypass() { return (((!bypassGlobals) && (getGlobalLimiterWithoutUpdatingLastQuery() == NoopQuotaLimiter.get())) && ((tableLimiters == null) || tableLimiters.isEmpty())) && ((namespaceLimiters == null) || namespaceLimiters.isEmpty()); }
3.26
hbase_CostFunction_updateWeight_rdh
/** * Add the cost of this cost function to the weight of the candidate generator that is optimized * for this cost function. By default it is the RandomCandiateGenerator for a cost function. * Called once per init or after postAction. * * @param weights * the weights for every generator. */ public void updateWeight(double[] weights) { weights[GeneratorType.RANDOM.ordinal()] += m0(); }
3.26
hbase_CostFunction_scale_rdh
/** * Scale the value between 0 and 1. * * @param min * Min value * @param max * The Max value * @param value * The value to be scaled. * @return The scaled value. */ protected static double scale(double min, double max, double value) { if ((((max <= min) || (value <= min)) || (Math.abs(max - min) <= COST_EPSILON)) || (Math.abs(value - min) <= COST_EPSILON)) { return 0; } if ((max <= min) || (Math.abs(max - min) <= COST_EPSILON)) { return 0; } return Math.max(0.0, Math.min(1.0, (value - min) / (max - min))); }
3.26
hbase_CostFunction_prepare_rdh
/** * Called once per LB invocation to give the cost function to initialize it's state, and perform * any costly calculation. */ void prepare(BalancerClusterState cluster) { this.cluster = cluster; }
3.26
hbase_CostFunction_postAction_rdh
/** * Called once per cluster Action to give the cost function an opportunity to update it's state. * postAction() is always called at least once before cost() is called with the cluster that this * action is performed on. */ void postAction(BalanceAction action) { switch (action.getType()) { case NULL : break;case ASSIGN_REGION : AssignRegionAction ar = ((AssignRegionAction) (action)); regionMoved(ar.getRegion(), -1, ar.getServer()); break; case MOVE_REGION : MoveRegionAction mra = ((MoveRegionAction) (action)); regionMoved(mra.getRegion(), mra.getFromServer(), mra.getToServer()); break; case SWAP_REGIONS : SwapRegionsAction a = ((SwapRegionsAction) (action)); regionMoved(a.getFromRegion(), a.getFromServer(), a.getToServer()); regionMoved(a.getToRegion(), a.getToServer(), a.getFromServer()); break; default : throw new RuntimeException("Uknown action:" + action.getType()); } }
3.26
hbase_TableInputFormatBase_setTableRecordReader_rdh
/** * Allows subclasses to set the {@link TableRecordReader}. to provide other * {@link TableRecordReader} implementations. */ protected void setTableRecordReader(TableRecordReader tableRecordReader) { this.tableRecordReader = tableRecordReader; }
3.26
hbase_TableInputFormatBase_getRecordReader_rdh
/** * Builds a TableRecordReader. If no TableRecordReader was provided, uses the default. * * @see InputFormat#getRecordReader(InputSplit, JobConf, Reporter) */ public RecordReader<ImmutableBytesWritable, Result> getRecordReader(InputSplit split, JobConf job, Reporter reporter) throws IOException { // In case a subclass uses the deprecated approach or calls initializeTable directly if (table == null) { initialize(job); } // null check in case our child overrides getTable to not throw. try { if (m0() == null) { // initialize() must not have been implemented in the subclass. throw new IOException(INITIALIZATION_ERROR);} } catch (IllegalStateException exception) { throw new IOException(INITIALIZATION_ERROR, exception); } TableSplit tSplit = ((TableSplit) (split)); // if no table record reader was provided use default final TableRecordReader trr = (this.tableRecordReader == null) ? new TableRecordReader() : this.tableRecordReader; trr.setStartRow(tSplit.getStartRow()); trr.setEndRow(tSplit.getEndRow()); trr.setHTable(this.table); trr.setInputColumns(this.inputColumns); trr.setRowFilter(this.rowFilter); trr.init(); return new RecordReader<ImmutableBytesWritable, Result>() { @Overridepublic void close() throws IOException { trr.close(); closeTable(); } @Override public ImmutableBytesWritable createKey() { return trr.createKey(); } @Override public Result createValue() {return trr.createValue(); } @Override public long getPos() throws IOException { return trr.getPos(); } @Override public float getProgress() throws IOException { return trr.getProgress(); } @Override public boolean next(ImmutableBytesWritable key, Result value) throws IOException { return trr.next(key, value); } }; }
3.26
hbase_TableInputFormatBase_getSplits_rdh
/** * Calculates the splits that will serve as input for the map tasks. * <p/> * Splits are created in number equal to the smallest between numSplits and the number of * {@link org.apache.hadoop.hbase.regionserver.HRegion}s in the table. If the number of splits is * smaller than the number of {@link org.apache.hadoop.hbase.regionserver.HRegion}s then splits * are spanned across multiple {@link org.apache.hadoop.hbase.regionserver.HRegion}s and are * grouped the most evenly possible. In the case splits are uneven the bigger splits are placed * first in the {@link InputSplit} array. * * @param job * the map task {@link JobConf} * @param numSplits * a hint to calculate the number of splits (mapred.map.tasks). * @return the input splits * @see InputFormat#getSplits(org.apache.hadoop.mapred.JobConf, int) */ public InputSplit[] getSplits(JobConf job, int numSplits) throws IOException {if (this.table == null) { initialize(job); } // null check in case our child overrides getTable to not throw. try { if (m0() == null) { // initialize() must not have been implemented in the subclass. throw new IOException(INITIALIZATION_ERROR); } } catch (IllegalStateException exception) { throw new IOException(INITIALIZATION_ERROR, exception); } byte[][] startKeys = this.regionLocator.getStartKeys(); if ((startKeys == null) || (startKeys.length == 0)) {throw new IOException("Expecting at least one region"); } if ((this.inputColumns == null) || (this.inputColumns.length == 0)) { throw new IOException("Expecting at least one column"); } int realNumSplits = (numSplits > startKeys.length) ? startKeys.length : numSplits; InputSplit[] splits = new InputSplit[realNumSplits]; int middle = startKeys.length / realNumSplits; int startPos = 0; for (int i = 0; i < realNumSplits; i++) { int lastPos = startPos + middle; lastPos = ((startKeys.length % realNumSplits) > i) ? lastPos + 1 : lastPos; String regionLocation = regionLocator.getRegionLocation(startKeys[startPos]).getHostname(); splits[i] = new TableSplit(this.table.getName(), startKeys[startPos], (i + 1) < realNumSplits ? startKeys[lastPos] : HConstants.EMPTY_START_ROW, regionLocation); LOG.info((("split: " + i) + "->") + splits[i]); startPos = lastPos; } return splits; }
3.26
hbase_TableInputFormatBase_closeTable_rdh
/** * Close the Table and related objects that were initialized via * {@link #initializeTable(Connection, TableName)}. */protected void closeTable() throws IOException { close(table, connection); table = null; connection = null; }
3.26
hbase_TableInputFormatBase_setRowFilter_rdh
/** * Allows subclasses to set the {@link Filter} to be used. */ protected void setRowFilter(Filter rowFilter) {this.rowFilter = rowFilter; }
3.26
hbase_TableInputFormatBase_setInputColumns_rdh
/** * * @param inputColumns * to be passed in {@link Result} to the map task. */ protected void setInputColumns(byte[][] inputColumns) { this.inputColumns = inputColumns; }
3.26
hbase_TableInputFormatBase_m0_rdh
/** * Allows subclasses to get the {@link Table}. */ protected Table m0() { if (table == null) { throw new IllegalStateException(f0); } return this.table; }
3.26
hbase_TableInputFormatBase_initializeTable_rdh
/** * Allows subclasses to initialize the table information. * * @param connection * The Connection to the HBase cluster. MUST be unmanaged. We will close. * @param tableName * The {@link TableName} of the table to process. */ protected void initializeTable(Connection connection, TableName tableName) throws IOException { if ((this.table != null) || (this.connection != null)) { LOG.warn("initializeTable called multiple times. Overwriting connection and table " + "reference; TableInputFormatBase will not close these old references when done."); } this.table = connection.getTable(tableName); this.regionLocator = connection.getRegionLocator(tableName); this.connection = connection; }
3.26
hbase_FanOutOneBlockAsyncDFSOutput_recoverAndClose_rdh
/** * The close method when error occurred. Now we just call recoverFileLease. */ @Override public void recoverAndClose(CancelableProgressable reporter) throws IOException { if (buf != null) { buf.release(); buf = null; } closeDataNodeChannelsAndAwait(); endFileLease(client, fileId); RecoverLeaseFSUtils.recoverFileLease(dfs, new Path(f0), conf, reporter == null ? new CancelOnClose(client) : reporter); }
3.26
hbase_FanOutOneBlockAsyncDFSOutput_close_rdh
/** * End the current block and complete file at namenode. You should call * {@link #recoverAndClose(CancelableProgressable)} if this method throws an exception. */ @Override public void close() throws IOException { endBlock(); state = State.CLOSED; closeDataNodeChannelsAndAwait(); block.setNumBytes(ackedBlockLength); completeFile(client, namenode, f0, clientName, block, fileId); }
3.26
hbase_FanOutOneBlockAsyncDFSOutput_completed_rdh
// all lock-free to make it run faster private void completed(Channel channel) { for (Iterator<Callback> iter = waitingAckQueue.iterator(); iter.hasNext();) { Callback c = iter.next(); // if the current unfinished replicas does not contain us then it means that we have already // acked this one, let's iterate to find the one we have not acked yet. if (c.unfinishedReplicas.remove(channel.id())) { long current = EnvironmentEdgeManager.currentTime(); streamSlowMonitor.checkProcessTimeAndSpeed(datanodeInfoMap.get(channel), c.packetDataLen, current - c.flushTimestamp, c.lastAckTimestamp, c.unfinishedReplicas.size()); c.lastAckTimestamp = current; if (c.unfinishedReplicas.isEmpty()) { // we need to remove first before complete the future. It is possible that after we // complete the future the upper layer will call close immediately before we remove the // entry from waitingAckQueue and lead to an IllegalStateException. And also set the // ackedBlockLength first otherwise we may use a wrong length to commit the block. This // may lead to multiple remove and assign but is OK. The semantic of iter.remove is // removing the entry returned by calling previous next, so if the entry has already been // removed then it is a no-op, and for the assign, the values are the same so no problem. iter.remove(); ackedBlockLength = c.ackedLength; // the future.complete check is to confirm that we are the only one who grabbed the work, // otherwise just give up and return. if (c.future.complete(c.ackedLength)) { // also wake up flush requests which have the same length. while (iter.hasNext()) { Callback maybeDummyCb = iter.next(); if (maybeDummyCb.ackedLength == c.ackedLength) { iter.remove(); maybeDummyCb.future.complete(c.ackedLength); } else { break; } } } } return; } } }
3.26
hbase_FanOutOneBlockAsyncDFSOutput_failed_rdh
// this usually does not happen which means it is not on the critical path so make it synchronized // so that the implementation will not burn up our brain as there are multiple state changes and // checks. private synchronized void failed(Channel channel, Supplier<Throwable> errorSupplier) { if (state == State.CLOSED) { return;} if (state == State.BROKEN) { failWaitingAckQueue(channel, errorSupplier); return; } if (state == State.CLOSING) { Callback c = waitingAckQueue.peekFirst(); if ((c == null) || (!c.unfinishedReplicas.contains(channel.id()))) { // nothing, the endBlock request has already finished. return; } } // disable further write, and fail all pending ack. state = State.BROKEN; failWaitingAckQueue(channel, errorSupplier); datanodeInfoMap.keySet().forEach(NettyFutureUtils::safeClose); }
3.26
hbase_FanOutOneBlockAsyncDFSOutput_flush_rdh
/** * Flush the buffer out to datanodes. * * @param syncBlock * will call hsync if true, otherwise hflush. * @return A CompletableFuture that hold the acked length after flushing. */ @Override public CompletableFuture<Long> flush(boolean syncBlock) { CompletableFuture<Long> future = new CompletableFuture<>(); flush0(future, syncBlock); return future; }
3.26
hbase_ThrottleSettings_getProto_rdh
/** * Returns a copy of the internal state of <code>this</code> */ ThrottleRequest getProto() { return proto.toBuilder().build(); }
3.26
hbase_FileMmapIOEngine_isPersistent_rdh
/** * File IO engine is always able to support persistent storage for the cache */ @Override public boolean isPersistent() { // TODO : HBASE-21981 needed for persistence to really work return true; }
3.26
hbase_FileMmapIOEngine_m1_rdh
/** * Sync the data to file after writing */ @Override public void m1() throws IOException { if (f0 != null) { f0.force(true); } }
3.26
hbase_FileMmapIOEngine_shutdown_rdh
/** * Close the file */ @Override public void shutdown() { try { f0.close(); } catch (IOException ex) {LOG.error("Can't shutdown cleanly", ex); } try { raf.close(); } catch (IOException ex) { LOG.error("Can't shutdown cleanly", ex); } }
3.26
hbase_FileMmapIOEngine_write_rdh
/** * Transfers data from the given byte buffer to file * * @param srcBuffer * the given byte buffer from which bytes are to be read * @param offset * The offset in the file where the first byte to be written */ @Override public void write(ByteBuffer srcBuffer, long offset) throws IOException { bufferArray.write(offset, ByteBuff.wrap(srcBuffer)); }
3.26
hbase_ArrayBackedTag_hasArray_rdh
/** * Returns Offset of actual tag bytes within the backed buffer */ @Overridepublic int getValueOffset() {return this.offset + INFRASTRUCTURE_SIZE;} @Override public boolean hasArray() { return true; }
3.26
hbase_ArrayBackedTag_getValueOffset_rdh
/** * Returns Offset of actual tag bytes within the backed buffer */
3.26
hbase_ArrayBackedTag_getValueArray_rdh
/** * Returns The byte array backing this Tag. */ @Override public byte[] getValueArray() { return this.bytes; }
3.26
hbase_ArrayBackedTag_getType_rdh
/** * Returns the tag type */@Override public byte getType() { return this.type; }
3.26
hbase_ArrayBackedTag_getValueLength_rdh
/** * Returns Length of actual tag bytes within the backed buffer */ @Override public int getValueLength() { return this.length - INFRASTRUCTURE_SIZE; }
3.26
hbase_MobUtils_parseDate_rdh
/** * Parses the string to a date. * * @param dateString * The string format of a date, it's yyyymmdd. * @return A date. */ public static Date parseDate(String dateString) throws ParseException { return LOCAL_FORMAT.get().parse(dateString); }
3.26
hbase_MobUtils_isReadEmptyValueOnMobCellMiss_rdh
/** * Indicates whether return null value when the mob file is missing or corrupt. The information is * set in the attribute "empty.value.on.mobcell.miss" of scan. * * @param scan * The current scan. * @return True if the readEmptyValueOnMobCellMiss is enabled. */ public static boolean isReadEmptyValueOnMobCellMiss(Scan scan) {byte[] readEmptyValueOnMobCellMiss = scan.getAttribute(MobConstants.EMPTY_VALUE_ON_MOBCELL_MISS); try { return (readEmptyValueOnMobCellMiss != null) && Bytes.toBoolean(readEmptyValueOnMobCellMiss); } catch (IllegalArgumentException e) { return false; } }
3.26
hbase_MobUtils_removeMobFiles_rdh
/** * Archives the mob files. * * @param conf * The current configuration. * @param fs * The current file system. * @param tableName * The table name. * @param tableDir * The table directory. * @param family * The name of the column family. * @param storeFiles * The files to be deleted. */ public static boolean removeMobFiles(Configuration conf, FileSystem fs, TableName tableName, Path tableDir, byte[] family, Collection<HStoreFile> storeFiles) { try { HFileArchiver.archiveStoreFiles(conf, fs, getMobRegionInfo(tableName), tableDir, family, storeFiles); LOG.info("Table {} {} expired mob files are deleted", tableName, storeFiles.size()); return true; } catch (IOException e) { LOG.error("Failed to delete the mob files, table {}", tableName, e); } return false;}
3.26
hbase_MobUtils_isMobRegionInfo_rdh
/** * Gets whether the current RegionInfo is a mob one. * * @param regionInfo * The current RegionInfo. * @return If true, the current RegionInfo is a mob one. */ public static boolean isMobRegionInfo(RegionInfo regionInfo) { return regionInfo == null ? false : getMobRegionInfo(regionInfo.getTable()).getEncodedName().equals(regionInfo.getEncodedName());}
3.26
hbase_MobUtils_createMobRefCell_rdh
/** * Creates a mob reference KeyValue. The value of the mob reference KeyValue is mobCellValueSize + * mobFileName. * * @param cell * The original Cell. * @param fileName * The mob file name where the mob reference KeyValue is written. * @param tableNameTag * The tag of the current table name. It's very important in cloning the * snapshot. * @return The mob reference KeyValue. */ public static Cell createMobRefCell(Cell cell, byte[] fileName, Tag tableNameTag) { // Append the tags to the KeyValue. // The key is same, the value is the filename of the mob file List<Tag> tags = new ArrayList<>(); // Add the ref tag as the 1st one. tags.add(MobConstants.MOB_REF_TAG); // Add the tag of the source table name, this table is where this mob file is flushed // from. // It's very useful in cloning the snapshot. When reading from the cloning table, we need to // find the original mob files by this table name. For details please see cloning // snapshot for mob files. tags.add(tableNameTag); return createMobRefCell(cell, fileName, TagUtil.fromList(tags)); }
3.26
hbase_MobUtils_hasValidMobRefCellValue_rdh
/** * Indicates whether the current mob ref cell has a valid value. A mob ref cell has a mob * reference tag. The value of a mob ref cell consists of two parts, real mob value length and mob * file name. The real mob value length takes 4 bytes. The remaining part is the mob file name. * * @param cell * The mob ref cell. * @return True if the cell has a valid value. */ public static boolean hasValidMobRefCellValue(Cell cell) { return cell.getValueLength() > Bytes.SIZEOF_INT; }
3.26
hbase_MobUtils_hasMobColumns_rdh
/** * Checks whether this table has mob-enabled columns. * * @param htd * The current table descriptor. * @return Whether this table has mob-enabled columns. */ public static boolean hasMobColumns(TableDescriptor htd) { ColumnFamilyDescriptor[] hcds = htd.getColumnFamilies(); for (ColumnFamilyDescriptor hcd : hcds) { if (hcd.isMobEnabled()) { return true; } } return false; }
3.26
hbase_MobUtils_isMobFileExpired_rdh
/** * Checks if the mob file is expired. * * @param column * The descriptor of the current column family. * @param current * The current time. * @param fileDate * The date string parsed from the mob file name. * @return True if the mob file is expired. */ public static boolean isMobFileExpired(ColumnFamilyDescriptor column, long current, String fileDate) { if (column.getMinVersions() > 0) { return false; } long timeToLive = column.getTimeToLive(); if (Integer.MAX_VALUE == timeToLive) { return false; } Date expireDate = new Date(current - (timeToLive * 1000)); expireDate = new Date(expireDate.getYear(), expireDate.getMonth(), expireDate.getDate()); try { Date date = parseDate(fileDate); if (date.getTime() < expireDate.getTime()) { return true; } } catch (ParseException e) { LOG.warn("Failed to parse the date " + fileDate, e); return false; } return false; }
3.26
hbase_MobUtils_getTableName_rdh
/** * Get the table name from when this cell was written into a mob hfile as a TableName. * * @param cell * to extract tag from * @return name of table as a TableName. empty if the tag is not found. */ public static Optional<TableName> getTableName(Cell cell) { Optional<Tag> maybe = getTableNameTag(cell); Optional<TableName> name = Optional.empty(); if (maybe.isPresent()) { final Tag tag = maybe.get(); if (tag.hasArray()) { name = Optional.of(TableName.valueOf(tag.getValueArray(), tag.getValueOffset(), tag.getValueLength())); } else { // TODO ByteBuffer handling in tags looks busted. revisit. ByteBuffer buffer = tag.getValueByteBuffer().duplicate(); buffer.mark(); buffer.position(tag.getValueOffset()); buffer.limit(tag.getValueOffset() + tag.getValueLength()); name = Optional.of(TableName.valueOf(buffer));}} return name; }
3.26
hbase_MobUtils_getMobFileName_rdh
/** * Gets the mob file name from the mob ref cell. A mob ref cell has a mob reference tag. The value * of a mob ref cell consists of two parts, real mob value length and mob file name. The real mob * value length takes 4 bytes. The remaining part is the mob file name. * * @param cell * The mob ref cell. * @return The mob file name. */ public static String getMobFileName(Cell cell) { return Bytes.toString(cell.getValueArray(), cell.getValueOffset() + Bytes.SIZEOF_INT, cell.getValueLength() - Bytes.SIZEOF_INT); }
3.26
hbase_MobUtils_getMobFamilyPath_rdh
/** * Gets the family dir of the mob files. It's * {HBASE_DIR}/mobdir/{namespace}/{tableName}/{regionEncodedName}/{columnFamilyName}. * * @param regionPath * The path of mob region which is a dummy one. * @param familyName * The current family name. * @return The family dir of the mob files. */ public static Path getMobFamilyPath(Path regionPath, String familyName) { return new Path(regionPath, familyName); }
3.26
hbase_MobUtils_getTableNameTag_rdh
/** * Gets the table name tag. * * @param cell * The current cell. * @return The table name tag. */ private static Optional<Tag> getTableNameTag(Cell cell) { Optional<Tag> tag = Optional.empty(); if (cell.getTagsLength() > 0) { tag = PrivateCellUtil.getTag(cell, TagType.MOB_TABLE_NAME_TAG_TYPE); } return tag; }
3.26
hbase_MobUtils_isRefOnlyScan_rdh
/** * Indicates whether it's a reference only scan. The information is set in the attribute * "hbase.mob.scan.ref.only" of scan. If it's a ref only scan, only the cells with ref tag are * returned. * * @param scan * The current scan. * @return True if it's a ref only scan. */ public static boolean isRefOnlyScan(Scan scan) {byte[] refOnly = scan.getAttribute(MobConstants.MOB_SCAN_REF_ONLY); try { return (refOnly != null) && Bytes.toBoolean(refOnly); } catch (IllegalArgumentException e) { return false; } }
3.26
hbase_MobUtils_getMobHome_rdh
/** * Gets the root dir of the mob files under the qualified HBase root dir. It's {rootDir}/mobdir. * * @param rootDir * The qualified path of HBase root directory. * @return The root dir of the mob file. */ public static Path getMobHome(Path rootDir) { return new Path(rootDir, MobConstants.MOB_DIR_NAME); }
3.26
hbase_MobUtils_getMobTableDir_rdh
/** * Gets the table dir of the mob files under the qualified HBase root dir. It's * {rootDir}/mobdir/data/${namespace}/${tableName} * * @param rootDir * The qualified path of HBase root directory. * @param tableName * The name of table. * @return The table dir of the mob file. */ public static Path getMobTableDir(Path rootDir, TableName tableName) { return CommonFSUtils.getTableDir(m1(rootDir), tableName); }
3.26
hbase_MobUtils_isCacheMobBlocks_rdh
/** * Indicates whether the scan contains the information of caching blocks. The information is set * in the attribute "hbase.mob.cache.blocks" of scan. * * @param scan * The current scan. * @return True when the Scan attribute specifies to cache the MOB blocks. */ public static boolean isCacheMobBlocks(Scan scan) { byte[] cache = scan.getAttribute(MobConstants.MOB_CACHE_BLOCKS); try { return (cache != null) && Bytes.toBoolean(cache); } catch (IllegalArgumentException e) { return false; } }
3.26
hbase_MobUtils_deserializeMobFileRefs_rdh
/** * Deserialize the set of referenced mob hfiles from store file metadata. * * @param bytes * compatibly serialized data. can not be null * @return a setmultimap of original table to list of hfile names. will be empty if no values. * @throws IllegalStateException * if there are values but no table name */ public static ImmutableSetMultimap.Builder<TableName, String> deserializeMobFileRefs(byte[] bytes) throws IllegalStateException { ImmutableSetMultimap.Builder<TableName, String> map = ImmutableSetMultimap.builder(); if (bytes.length > 1) { // TODO avoid turning the tablename pieces in to strings. String s = Bytes.toString(bytes); String[] v52 = s.split("//"); for (String tableEnc : v52) { final int v54 = tableEnc.indexOf('/'); if (v54 <= 0) { throw new IllegalStateException("MOB reference data does not match expected encoding: " + "no table name included before list of mob refs."); } TableName table = TableName.valueOf(tableEnc.substring(0, v54)); String[] refs = tableEnc.substring(v54 + 1).split(","); map.putAll(table, refs); } } else if (LOG.isDebugEnabled()) { // array length 1 should be the NULL_VALUE. if (!Arrays.equals(HStoreFile.NULL_VALUE, bytes)) { LOG.debug("Serialized MOB file refs array was treated as the placeholder 'no entries' but" + " didn't have the expected placeholder byte. expected={} and actual={}", Arrays.toString(HStoreFile.NULL_VALUE), Arrays.toString(bytes)); } } return map; }
3.26
hbase_MobUtils_m0_rdh
/** * Sets the attribute of caching blocks in the scan. * * @param scan * The current scan. * @param cacheBlocks * True, set the attribute of caching blocks into the scan, the scanner with * this scan caches blocks. False, the scanner doesn't cache blocks for this * scan. */ public static void m0(Scan scan, boolean cacheBlocks) { scan.setAttribute(MobConstants.MOB_CACHE_BLOCKS, Bytes.toBytes(cacheBlocks)); }
3.26
hbase_MobUtils_hasMobReferenceTag_rdh
/** * Whether the tag list has a mob reference tag. * * @param tags * The tag list. * @return True if the list has a mob reference tag, false if it doesn't. */ public static boolean hasMobReferenceTag(List<Tag> tags) {if (!tags.isEmpty()) { for (Tag tag : tags) { if (tag.getType() == TagType.MOB_REFERENCE_TAG_TYPE) { return true; } } } return false; }
3.26
hbase_MobUtils_isMobRegionName_rdh
/** * Gets whether the current region name follows the pattern of a mob region name. * * @param tableName * The current table name. * @param regionName * The current region name. * @return True if the current region name follows the pattern of a mob region name. */ public static boolean isMobRegionName(TableName tableName, byte[] regionName) { return Bytes.equals(regionName, getMobRegionInfo(tableName).getRegionName()); }
3.26
hbase_MobUtils_isRawMobScan_rdh
/** * Indicates whether it's a raw scan. The information is set in the attribute "hbase.mob.scan.raw" * of scan. For a mob cell, in a normal scan the scanners retrieves the mob cell from the mob * file. In a raw scan, the scanner directly returns cell in HBase without retrieve the one in the * mob file. * * @param scan * The current scan. * @return True if it's a raw scan. */ public static boolean isRawMobScan(Scan scan) { byte[] raw = scan.getAttribute(MobConstants.MOB_SCAN_RAW); try { return (raw != null) && Bytes.toBoolean(raw); } catch (IllegalArgumentException e) { return false; } }
3.26
hbase_MobUtils_formatDate_rdh
/** * Formats a date to a string. * * @param date * The date. * @return The string format of the date, it's yyyymmdd. */ public static String formatDate(Date date) { return LOCAL_FORMAT.get().format(date); }
3.26
hbase_MobUtils_cleanExpiredMobFiles_rdh
/** * Cleans the expired mob files. Cleans the files whose creation date is older than (current - * columnFamily.ttl), and the minVersions of that column family is 0. * * @param fs * The current file system. * @param conf * The current configuration. * @param tableName * The current table name. * @param columnDescriptor * The descriptor of the current column family. * @param cacheConfig * The cacheConfig that disables the block cache. * @param current * The current time. */ public static void cleanExpiredMobFiles(FileSystem fs, Configuration conf, TableName tableName, ColumnFamilyDescriptor columnDescriptor, CacheConfig cacheConfig, long current) throws IOException { long timeToLive = columnDescriptor.getTimeToLive(); if (Integer.MAX_VALUE == timeToLive) { // no need to clean, because the TTL is not set. return; } Calendar calendar = Calendar.getInstance(); calendar.setTimeInMillis(current - (timeToLive * 1000)); calendar.set(Calendar.HOUR_OF_DAY, 0); calendar.set(Calendar.MINUTE, 0); calendar.set(Calendar.SECOND, 0);Date expireDate = calendar.getTime();LOG.info(("MOB HFiles older than " + expireDate.toGMTString()) + " will be deleted!"); FileStatus[] stats = null; Path mobTableDir = CommonFSUtils.getTableDir(m1(conf), tableName); Path path = getMobFamilyPath(conf, tableName, columnDescriptor.getNameAsString()); try { stats = fs.listStatus(path); } catch (FileNotFoundException e) { LOG.warn("Failed to find the mob file " + path, e); } if (null == stats) { // no file found return; } List<HStoreFile> filesToClean = new ArrayList<>(); int deletedFileCount = 0; for (FileStatus file : stats) { String fileName = file.getPath().getName(); try { if (HFileLink.isHFileLink(file.getPath())) { HFileLink hfileLink = HFileLink.buildFromHFileLinkPattern(conf, file.getPath()); fileName = hfileLink.getOriginPath().getName(); } Date fileDate = parseDate(MobFileName.getDateFromName(fileName)); if (LOG.isDebugEnabled()) { LOG.debug("Checking file {}", fileName); } if (fileDate.getTime() < expireDate.getTime()) { if (LOG.isDebugEnabled()) { LOG.debug("{} is an expired file", fileName); } filesToClean.add(new HStoreFile(fs, file.getPath(), conf, cacheConfig, BloomType.NONE, true)); if (filesToClean.size() >= conf.getInt(MOB_CLEANER_BATCH_SIZE_UPPER_BOUND, DEFAULT_MOB_CLEANER_BATCH_SIZE_UPPER_BOUND)) { if (removeMobFiles(conf, fs, tableName, mobTableDir, columnDescriptor.getName(), filesToClean)) { deletedFileCount += filesToClean.size(); } filesToClean.clear(); } } } catch (Exception e) { LOG.error("Cannot parse the fileName " + fileName, e); } } if ((!filesToClean.isEmpty()) && removeMobFiles(conf, fs, tableName, mobTableDir, columnDescriptor.getName(), filesToClean)) { deletedFileCount += filesToClean.size(); } LOG.info("Table {} {} expired mob files in total are deleted", tableName, deletedFileCount); }
3.26
hbase_MobUtils_m1_rdh
/** * Gets the root dir of the mob files. It's {HBASE_DIR}/mobdir. * * @param conf * The current configuration. * @return the root dir of the mob file. */ public static Path m1(Configuration conf) { Path hbaseDir = new Path(conf.get(HConstants.HBASE_DIR)); return m1(hbaseDir); }
3.26
hbase_MobUtils_createWriter_rdh
/** * Creates a writer for the mob file in temp directory. * * @param conf * The current configuration. * @param fs * The current file system. * @param family * The descriptor of the current column family. * @param path * The path for a temp directory. * @param maxKeyCount * The key count. * @param compression * The compression algorithm. * @param cacheConfig * The current cache config. * @param cryptoContext * The encryption context. * @param checksumType * The checksum type. * @param bytesPerChecksum * The bytes per checksum. * @param blocksize * The HFile block size. * @param bloomType * The bloom filter type. * @param isCompaction * If the writer is used in compaction. * @return The writer for the mob file. */public static StoreFileWriter createWriter(Configuration conf, FileSystem fs, ColumnFamilyDescriptor family, Path path, long maxKeyCount, Compression.Algorithm compression, CacheConfig cacheConfig, Encryption.Context cryptoContext, ChecksumType checksumType, int bytesPerChecksum, int blocksize, BloomType bloomType, boolean isCompaction) throws IOException { return createWriter(conf, fs, family, path, maxKeyCount, compression, cacheConfig, cryptoContext, checksumType, bytesPerChecksum, blocksize, bloomType, isCompaction, null); }
3.26
hbase_MobUtils_isMobReferenceCell_rdh
/** * Whether the current cell is a mob reference cell. * * @param cell * The current cell. * @return True if the cell has a mob reference tag, false if it doesn't. */ public static boolean isMobReferenceCell(Cell cell) { if (cell.getTagsLength() > 0) { Optional<Tag> tag = PrivateCellUtil.getTag(cell, TagType.MOB_REFERENCE_TAG_TYPE); if (tag.isPresent()) { return true; } } return false; }
3.26
hbase_MobUtils_getMobValueLength_rdh
/** * Gets the mob value length from the mob ref cell. A mob ref cell has a mob reference tag. The * value of a mob ref cell consists of two parts, real mob value length and mob file name. The * real mob value length takes 4 bytes. The remaining part is the mob file name. * * @param cell * The mob ref cell. * @return The real mob value length. */ public static int getMobValueLength(Cell cell) { return PrivateCellUtil.getValueAsInt(cell); }
3.26
hbase_MobUtils_getMobRegionPath_rdh
/** * Gets the region dir of the mob files under the specified root dir. It's * {rootDir}/mobdir/data/{namespace}/{tableName}/{regionEncodedName}. * * @param rootDir * The qualified path of HBase root directory. * @param tableName * The current table name. * @return The region dir of the mob files. */ public static Path getMobRegionPath(Path rootDir, TableName tableName) { Path tablePath = CommonFSUtils.getTableDir(m1(rootDir), tableName); RegionInfo regionInfo = getMobRegionInfo(tableName); return new Path(tablePath, regionInfo.getEncodedName()); }
3.26
hbase_MobUtils_serializeMobFileRefs_rdh
/** * Serialize a set of referenced mob hfiles * * @param mobRefSet * to serialize, may be null * @return byte array to i.e. put into store file metadata. will not be null */ public static byte[] serializeMobFileRefs(SetMultimap<TableName, String> mobRefSet) { if ((mobRefSet != null) && (mobRefSet.size() > 0)) { // Here we rely on the fact that '/' and ',' are not allowed in either table names nor hfile // names for serialization. // // exampleTable/filename1,filename2//example:table/filename5//otherTable/filename3,filename4 // // to approximate the needed capacity we use the fact that there will usually be 1 table name // and each mob filename is around 105 bytes. we pick an arbitrary number to cover "most" // single table name lengths StringBuilder sb = new StringBuilder(100 + (mobRefSet.size() * 105)); boolean doubleSlash = false; for (TableName tableName : mobRefSet.keySet()) { if (doubleSlash) { sb.append("//"); } else { doubleSlash = true; } sb.append(tableName).append("/"); boolean v48 = false; for (String refs : mobRefSet.get(tableName)) { if (v48) { sb.append(","); } else { v48 = true; } sb.append(refs); } } return Bytes.toBytes(sb.toString()); } else { return HStoreFile.NULL_VALUE; } }
3.26
hbase_MobUtils_getQualifiedMobRootDir_rdh
/** * Gets the qualified root dir of the mob files. * * @param conf * The current configuration. * @return The qualified root dir. */ public static Path getQualifiedMobRootDir(Configuration conf) throws IOException { Path hbaseDir = new Path(conf.get(HConstants.HBASE_DIR)); Path mobRootDir = new Path(hbaseDir, MobConstants.MOB_DIR_NAME); FileSystem fs = mobRootDir.getFileSystem(conf); return mobRootDir.makeQualified(fs.getUri(), fs.getWorkingDirectory()); }
3.26
hbase_MobUtils_getMobColumnFamilies_rdh
/** * Get list of Mob column families (if any exists) * * @param htd * table descriptor * @return list of Mob column families */ public static List<ColumnFamilyDescriptor> getMobColumnFamilies(TableDescriptor htd) { List<ColumnFamilyDescriptor> fams = new ArrayList<ColumnFamilyDescriptor>(); ColumnFamilyDescriptor[] hcds = htd.getColumnFamilies(); for (ColumnFamilyDescriptor hcd : hcds) { if (hcd.isMobEnabled()) { fams.add(hcd); } } return fams; }
3.26
hbase_MobUtils_getMobRegionInfo_rdh
/** * Gets the RegionInfo of the mob files. This is a dummy region. The mob files are not saved in a * region in HBase. It's internally used only. * * @return A dummy mob region info. */ public static RegionInfo getMobRegionInfo(TableName tableName) { return RegionInfoBuilder.newBuilder(tableName).setStartKey(MobConstants.MOB_REGION_NAME_BYTES).setEndKey(HConstants.EMPTY_END_ROW).setSplit(false).setRegionId(0).build(); }
3.26
hbase_WALEventTrackerTableAccessor_getRowKey_rdh
/** * Create rowKey: 1. We want RS name to be the leading part of rowkey so that we can query by RS * name filter. WAL name contains rs name as a leading part. 2. Timestamp when the event was * generated. 3. Add state of the wal. Combination of 1 + 2 + 3 is definitely going to create a * unique rowkey. * * @param payload * payload to process * @return rowKey byte[] */public static byte[] getRowKey(final WALEventTrackerPayload payload) { String walName = payload.getWalName(); // converting to string since this will help seeing the timestamp in string format using // hbase shell commands. String timestampStr = String.valueOf(payload.getTimeStamp()); String walState = payload.getState(); final String rowKeyStr = (((walName + DELIMITER) + timestampStr) + DELIMITER) + walState; return Bytes.toBytes(rowKeyStr); }
3.26
hbase_WALEventTrackerTableAccessor_addWalEventTrackerRows_rdh
/** * Add wal event tracker rows to hbase:waleventtracker table * * @param walEventPayloads * List of walevents to process * @param connection * Connection to use. */ public static void addWalEventTrackerRows(Queue<WALEventTrackerPayload> walEventPayloads, final Connection connection) throws Exception { List<Put> puts = new ArrayList<>(walEventPayloads.size()); for (WALEventTrackerPayload payload : walEventPayloads) { final byte[] rowKey = getRowKey(payload); final Put put = new Put(rowKey); // TODO Do we need to SKIP_WAL ? put.setPriority(HConstants.NORMAL_QOS); put.addColumn(WAL_EVENT_TRACKER_INFO_FAMILY, Bytes.toBytes(RS_COLUMN), Bytes.toBytes(payload.getRsName())).addColumn(WAL_EVENT_TRACKER_INFO_FAMILY, Bytes.toBytes(WAL_NAME_COLUMN), Bytes.toBytes(payload.getWalName())).addColumn(WAL_EVENT_TRACKER_INFO_FAMILY, Bytes.toBytes(TIMESTAMP_COLUMN), Bytes.toBytes(payload.getTimeStamp())).addColumn(WAL_EVENT_TRACKER_INFO_FAMILY, Bytes.toBytes(f0), Bytes.toBytes(payload.getState())).addColumn(WAL_EVENT_TRACKER_INFO_FAMILY, Bytes.toBytes(WAL_LENGTH_COLUMN), Bytes.toBytes(payload.getWalLength())); puts.add(put); } doPut(connection, puts); }
3.26
hbase_HFileWriterImpl_checkKey_rdh
/** * Checks that the given Cell's key does not violate the key order. * * @param cell * Cell whose key to check. * @return true if the key is duplicate * @throws IOException * if the key or the key order is wrong */ protected boolean checkKey(final Cell cell) throws IOException { boolean isDuplicateKey = false; if (cell == null) { throw new IOException("Key cannot be null or empty"); } if (lastCell != null) { int keyComp = PrivateCellUtil.compareKeyIgnoresMvcc(this.hFileContext.getCellComparator(), lastCell, cell); if (keyComp > 0) { String message = getLexicalErrorMessage(cell); throw new IOException(message); } else if (keyComp == 0) { isDuplicateKey = true; }} return isDuplicateKey;}
3.26
hbase_HFileWriterImpl_writeInlineBlocks_rdh
/** * Gives inline block writers an opportunity to contribute blocks. */ private void writeInlineBlocks(boolean closing) throws IOException { for (InlineBlockWriter ibw : inlineBlockWriters) { while (ibw.shouldWriteBlock(closing)) { long offset = outputStream.getPos(); boolean cacheThisBlock = ibw.getCacheOnWrite(); ibw.writeInlineBlock(blockWriter.startWriting(ibw.getInlineBlockType())); blockWriter.writeHeaderAndData(outputStream); ibw.blockWritten(offset, blockWriter.getOnDiskSizeWithHeader(), blockWriter.getUncompressedSizeWithoutHeader()); totalUncompressedBytes += blockWriter.getUncompressedSizeWithHeader(); if (cacheThisBlock) { doCacheOnWrite(offset); } } } }
3.26
hbase_HFileWriterImpl_checkValue_rdh
/** * Checks the given value for validity. */ protected void checkValue(final byte[] value, final int offset, final int length) throws IOException { if (value == null) { throw new IOException("Value cannot be null"); } }
3.26
hbase_HFileWriterImpl_appendFileInfo_rdh
/** * Add to the file info. All added key/value pairs can be obtained using * {@link HFile.Reader#getHFileInfo()}. * * @param k * Key * @param v * Value * @throws IOException * in case the key or the value are invalid */ @Override public void appendFileInfo(final byte[] k, final byte[] v) throws IOException { fileInfo.append(k, v, true); }
3.26
hbase_HFileWriterImpl_m0_rdh
/** * Sets the file info offset in the trailer, finishes up populating fields in the file info, and * writes the file info into the given data output. The reason the data output is not always * {@link #outputStream} is that we store file info as a block in version 2. * * @param trailer * fixed file trailer * @param out * the data output to write the file info to */ protected final void m0(FixedFileTrailer trailer, DataOutputStream out) throws IOException { trailer.setFileInfoOffset(outputStream.getPos()); finishFileInfo(); long startTime = EnvironmentEdgeManager.currentTime(); fileInfo.write(out); HFile.updateWriteLatency(EnvironmentEdgeManager.currentTime() - startTime); }
3.26
hbase_HFileWriterImpl_finishBlock_rdh
/** * Clean up the data block that is currently being written. */ private void finishBlock() throws IOException { if ((!blockWriter.isWriting()) || (blockWriter.blockSizeWritten() == 0)) { return; } // Update the first data block offset if UNSET; used scanning. if (firstDataBlockOffset == UNSET) { firstDataBlockOffset = outputStream.getPos(); } // Update the last data block offset each time through here. lastDataBlockOffset = outputStream.getPos(); blockWriter.writeHeaderAndData(outputStream); int onDiskSize = blockWriter.getOnDiskSizeWithHeader(); Cell indexEntry = getMidpoint(this.hFileContext.getCellComparator(), f0, firstCellInBlock); dataBlockIndexWriter.addEntry(PrivateCellUtil.getCellKeySerializedAsKeyValueKey(indexEntry), lastDataBlockOffset, onDiskSize); totalUncompressedBytes += blockWriter.getUncompressedSizeWithHeader(); if (cacheConf.shouldCacheDataOnWrite()) { doCacheOnWrite(lastDataBlockOffset); } }
3.26
hbase_HFileWriterImpl_finishInit_rdh
/** * Additional initialization steps */ protected void finishInit(final Configuration conf) { if (blockWriter != null) { throw new IllegalStateException("finishInit called twice"); } blockWriter = new HFileBlock.Writer(conf, blockEncoder, hFileContext, cacheConf.getByteBuffAllocator(), conf.getInt(MAX_BLOCK_SIZE_UNCOMPRESSED, hFileContext.getBlocksize() * 10));// Data block index writer boolean cacheIndexesOnWrite = cacheConf.shouldCacheIndexesOnWrite(); dataBlockIndexWriter = new HFileBlockIndex.BlockIndexWriter(blockWriter, cacheIndexesOnWrite ? cacheConf : null, cacheIndexesOnWrite ? name : null, indexBlockEncoder); dataBlockIndexWriter.setMaxChunkSize(HFileBlockIndex.getMaxChunkSize(conf)); dataBlockIndexWriter.setMinIndexNumEntries(HFileBlockIndex.getMinIndexNumEntries(conf)); inlineBlockWriters.add(dataBlockIndexWriter); // Meta data block index writer metaBlockIndexWriter = new HFileBlockIndex.BlockIndexWriter(); LOG.trace("Initialized with {}", cacheConf); }
3.26
hbase_HFileWriterImpl_doCacheOnWrite_rdh
/** * Caches the last written HFile block. * * @param offset * the offset of the block we want to cache. Used to determine the cache key. */ private void doCacheOnWrite(long offset) { cacheConf.getBlockCache().ifPresent(cache -> { HFileBlock cacheFormatBlock = blockWriter.getBlockForCaching(cacheConf); try { cache.cacheBlock(new BlockCacheKey(name, offset, true, cacheFormatBlock.getBlockType()), cacheFormatBlock, cacheConf.isInMemory(), true); } finally { // refCnt will auto increase when block add to Cache, see RAMCache#putIfAbsent cacheFormatBlock.release(); } }); }
3.26
hbase_HFileWriterImpl_checkBlockBoundary_rdh
/** * At a block boundary, write all the inline blocks and opens new block. */ protected void checkBlockBoundary() throws IOException { boolean shouldFinishBlock = false; // This means hbase.writer.unified.encoded.blocksize.ratio was set to something different from 0 // and we should use the encoding ratio if (encodedBlockSizeLimit > 0) { shouldFinishBlock = blockWriter.encodedBlockSizeWritten() >= encodedBlockSizeLimit; } else { shouldFinishBlock = (blockWriter.encodedBlockSizeWritten() >= hFileContext.getBlocksize()) || (blockWriter.blockSizeWritten() >= hFileContext.getBlocksize()); } shouldFinishBlock &= blockWriter.checkBoundariesWithPredicate(); if (shouldFinishBlock) { finishBlock(); writeInlineBlocks(false); newBlock(); } }
3.26
hbase_HFileWriterImpl_append_rdh
/** * Add key/value to file. Keys must be added in an order that agrees with the Comparator passed on * construction. Cell to add. Cannot be empty nor null. */ @Override public void append(final Cell cell) throws IOException { // checkKey uses comparator to check we are writing in order. boolean dupKey = checkKey(cell); if (!dupKey) { checkBlockBoundary();} if (!blockWriter.isWriting()) { newBlock(); } blockWriter.write(cell); totalKeyLength += PrivateCellUtil.estimatedSerializedSizeOfKey(cell); totalValueLength += cell.getValueLength(); if (lenOfBiggestCell < PrivateCellUtil.estimatedSerializedSizeOf(cell)) { lenOfBiggestCell = PrivateCellUtil.estimatedSerializedSizeOf(cell); keyOfBiggestCell = PrivateCellUtil.getCellKeySerializedAsKeyValueKey(cell); } // Are we the first key in this block? if (firstCellInBlock == null) { // If cell is big, block will be closed and this firstCellInBlock reference will only last // a short while. firstCellInBlock = cell; } // TODO: What if cell is 10MB and we write infrequently? We hold on to cell here indefinitely? lastCell = cell; entryCount++; this.maxMemstoreTS = Math.max(this.maxMemstoreTS, cell.getSequenceId()); int tagsLength = cell.getTagsLength(); if (tagsLength > this.maxTagsLength) { this.maxTagsLength = tagsLength; } }
3.26
hbase_HFileWriterImpl_getMidpoint_rdh
/** * Try to return a Cell that falls between <code>left</code> and <code>right</code> but that is * shorter; i.e. takes up less space. This trick is used building HFile block index. Its an * optimization. It does not always work. In this case we'll just return the <code>right</code> * cell. * * @return A cell that sorts between <code>left</code> and <code>right</code>. */ public static Cell getMidpoint(final CellComparator comparator, final Cell left, final Cell right) { if (right == null) { throw new IllegalArgumentException("right cell can not be null"); } if (left == null) { return right; } // If Cells from meta table, don't mess around. meta table Cells have schema // (table,startrow,hash) so can't be treated as plain byte arrays. Just skip // out without trying to do this optimization. if (comparator instanceof MetaCellComparator) { return right; } byte[] midRow; boolean bufferBacked = (left instanceof ByteBufferExtendedCell) && (right instanceof ByteBufferExtendedCell);if (bufferBacked) { midRow = getMinimumMidpointArray(((ByteBufferExtendedCell) (left)).getRowByteBuffer(), ((ByteBufferExtendedCell) (left)).getRowPosition(), left.getRowLength(), ((ByteBufferExtendedCell) (right)).getRowByteBuffer(), ((ByteBufferExtendedCell) (right)).getRowPosition(), right.getRowLength()); } else { midRow = getMinimumMidpointArray(left.getRowArray(), left.getRowOffset(), left.getRowLength(), right.getRowArray(), right.getRowOffset(), right.getRowLength()); } if (midRow != null) { return PrivateCellUtil.createFirstOnRow(midRow); } // Rows are same. Compare on families. if (bufferBacked) { midRow = getMinimumMidpointArray(((ByteBufferExtendedCell) (left)).getFamilyByteBuffer(), ((ByteBufferExtendedCell) (left)).getFamilyPosition(), left.getFamilyLength(), ((ByteBufferExtendedCell) (right)).getFamilyByteBuffer(), ((ByteBufferExtendedCell) (right)).getFamilyPosition(), right.getFamilyLength()); } else { midRow = getMinimumMidpointArray(left.getFamilyArray(), left.getFamilyOffset(), left.getFamilyLength(), right.getFamilyArray(), right.getFamilyOffset(), right.getFamilyLength()); } if (midRow != null) { return PrivateCellUtil.createFirstOnRowFamily(right, midRow, 0, midRow.length); }// Families are same. Compare on qualifiers. if (bufferBacked) { midRow = getMinimumMidpointArray(((ByteBufferExtendedCell) (left)).getQualifierByteBuffer(), ((ByteBufferExtendedCell) (left)).getQualifierPosition(), left.getQualifierLength(), ((ByteBufferExtendedCell) (right)).getQualifierByteBuffer(), ((ByteBufferExtendedCell) (right)).getQualifierPosition(), right.getQualifierLength()); } else { midRow = getMinimumMidpointArray(left.getQualifierArray(), left.getQualifierOffset(), left.getQualifierLength(), right.getQualifierArray(), right.getQualifierOffset(), right.getQualifierLength()); } if (midRow != null) { return PrivateCellUtil.createFirstOnRowCol(right, midRow, 0, midRow.length); } // No opportunity for optimization. Just return right key. return right; }
3.26
hbase_HFileWriterImpl_newBlock_rdh
/** * Ready a new block for writing. */ protected void newBlock() throws IOException { // This is where the next block begins. blockWriter.startWriting(BlockType.DATA); firstCellInBlock = null; if (lastCell != null) { f0 = lastCell; } }
3.26
hbase_HFileWriterImpl_appendMetaBlock_rdh
/** * Add a meta block to the end of the file. Call before close(). Metadata blocks are expensive. * Fill one with a bunch of serialized data rather than do a metadata block per metadata instance. * If metadata is small, consider adding to file info using * {@link #appendFileInfo(byte[], byte[])} name of the block will call readFields to get data * later (DO NOT REUSE) */ @Override public void appendMetaBlock(String metaBlockName, Writable content) { byte[] key = Bytes.toBytes(metaBlockName); int i; for (i = 0; i < metaNames.size(); ++i) { // stop when the current key is greater than our own byte[] cur = metaNames.get(i); if (Bytes.BYTES_RAWCOMPARATOR.compare(cur, 0, cur.length, key, 0, key.length) > 0) { break; } } metaNames.add(i, key); metaData.add(i, content); }
3.26
hbase_OrderedInt16_encodeShort_rdh
/** * Write instance {@code val} into buffer {@code dst}. * * @param dst * the {@link PositionedByteRange} to write to * @param val * the value to write to {@code dst} * @return the number of bytes written */ public int encodeShort(PositionedByteRange dst, short val) { return OrderedBytes.encodeInt16(dst, val, order); }
3.26
hbase_OrderedInt16_decodeShort_rdh
/** * Read a {@code short} value from the buffer {@code src}. * * @param src * the {@link PositionedByteRange} to read the {@code float} from * @return the {@code short} read from buffer */ public short decodeShort(PositionedByteRange src) { return OrderedBytes.decodeInt16(src); }
3.26
hbase_IndexBuilder_configureJob_rdh
/** * Job configuration. */ public static Job configureJob(Configuration conf, String[] args) throws IOException {String tableName = args[0]; String columnFamily = args[1]; System.out.println("****" + tableName); conf.set(TableInputFormat.SCAN, TableMapReduceUtil.convertScanToString(new Scan())); conf.set(TableInputFormat.INPUT_TABLE, tableName); conf.set("index.tablename", tableName); conf.set("index.familyname", columnFamily); String[] fields = new String[args.length - 2]; System.arraycopy(args, 2, fields, 0, fields.length); conf.setStrings("index.fields", fields); Job job = new Job(conf, tableName); job.setJarByClass(IndexBuilder.class); job.setMapperClass(IndexBuilder.Map.class); job.setNumReduceTasks(0); job.setInputFormatClass(TableInputFormat.class); job.setOutputFormatClass(MultiTableOutputFormat.class);return job; }
3.26
hbase_ParseFilter_registerFilter_rdh
/** * Register a new filter with the parser. If the filter is already registered, an * IllegalArgumentException will be thrown. * * @param name * a name for the filter * @param filterClass * fully qualified class name */ public static void registerFilter(String name, String filterClass) { if (LOG.isInfoEnabled()) LOG.info("Registering new filter " + name); filterHashMap.put(name, filterClass); }
3.26