name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hbase_NewVersionBehaviorTracker_add_rdh | // DeleteTracker
@Override
public void add(Cell cell) {
prepare(cell);
byte v9 = cell.getTypeByte();switch (Type.codeToType(v9)) {
// By the order of seen. We put null cq at first.
case DeleteFamily :
// Delete all versions of all columns of the specified family
delFamMap.put(cell.getSequenceId(), new DeleteVersionsNode(cell.getTimestamp(), cell.getSequenceId()));
break;
case DeleteFamilyVersion :
// Delete all columns of the specified family and specified version
delFamMap.ceilingEntry(cell.getSequenceId()).getValue().addVersionDelete(cell);
break;
// These two kinds of markers are mix with Puts.
case DeleteColumn :
// Delete all versions of the specified column
delColMap.put(cell.getSequenceId(), new
DeleteVersionsNode(cell.getTimestamp(), cell.getSequenceId()));
break;
case Delete :
// Delete the specified version of the specified column.
delColMap.ceilingEntry(cell.getSequenceId()).getValue().addVersionDelete(cell);
break;
default :
throw new AssertionError("Unknown delete marker type for " + cell);
}
} | 3.26 |
hbase_GssSaslServerAuthenticationProvider_handle_rdh | /**
* {@inheritDoc }
*/
@Override
public void handle(Callback[] callbacks) throws UnsupportedCallbackException {
AuthorizeCallback ac = null;
for (Callback callback : callbacks) {
if
(callback instanceof AuthorizeCallback) {
ac = ((AuthorizeCallback) (callback));
} else {
throw new UnsupportedCallbackException(callback, "Unrecognized SASL GSSAPI Callback");
}
}
if (ac != null) {
String authid = ac.getAuthenticationID();
String authzid = ac.getAuthorizationID();
if (authid.equals(authzid)) {
ac.setAuthorized(true);
} else {
ac.setAuthorized(false);
}if (ac.isAuthorized()) {
LOG.debug("SASL server GSSAPI callback: setting canonicalized client ID: {}", authzid);
ac.setAuthorizedID(authzid);
}
}
} | 3.26 |
hbase_FixedIntervalRateLimiter_setNextRefillTime_rdh | // This method is for strictly testing purpose only
@Override
public void setNextRefillTime(long nextRefillTime) {
this.nextRefillTime = nextRefillTime;
} | 3.26 |
hbase_UserProvider_setUserProviderForTesting_rdh | /**
* Set the {@link UserProvider} in the given configuration that should be instantiated
*
* @param conf
* to update
* @param provider
* class of the provider to set
*/
public static void setUserProviderForTesting(Configuration conf, Class<? extends UserProvider> provider) {
conf.set(USER_PROVIDER_CONF_KEY, provider.getName());
} | 3.26 |
hbase_UserProvider_getCurrentUserName_rdh | /**
* Returns the userName for the current logged-in user.
*
* @throws IOException
* if the underlying user cannot be obtained
*/
public String getCurrentUserName() throws IOException {
User user = getCurrent();
return user == null ? null : user.getName();
} | 3.26 |
hbase_UserProvider_m0_rdh | // Provide the reload function that uses the executor thread.
@Override
public ListenableFuture<String[]> m0(final String k, String[] oldValue) throws Exception {
return executor.submit(new Callable<String[]>() {
@Override
public String[] call() throws Exception {
return getGroupStrings(k);
}
});} | 3.26 |
hbase_UserProvider_getCurrent_rdh | /**
* Return the current user within the current execution context
*
* @throws IOException
* if the user cannot be loaded
*/
public User getCurrent() throws IOException {
return User.getCurrent();
} | 3.26 |
hbase_UserProvider_create_rdh | /**
* Wraps an underlying {@code UserGroupInformation} instance.
*
* @param ugi
* The base Hadoop user
*/
public User create(UserGroupInformation ugi) {
if (ugi == null) {
return null;
}
return new User.SecureHadoopUser(ugi, groupCache);
} | 3.26 |
hbase_UserProvider_instantiate_rdh | /**
* Instantiate the {@link UserProvider} specified in the configuration and set the passed
* configuration via {@link UserProvider#setConf(Configuration)}
*
* @param conf
* to read and set on the created {@link UserProvider}
* @return a {@link UserProvider} ready for use.
*/
public static UserProvider instantiate(Configuration conf) {
Class<? extends UserProvider> v2 = conf.getClass(USER_PROVIDER_CONF_KEY,
UserProvider.class, UserProvider.class);
return ReflectionUtils.newInstance(v2, conf);
} | 3.26 |
hbase_UserProvider_shouldLoginFromKeytab_rdh | /**
* In secure environment, if a user specified his keytab and principal, a hbase client will try to
* login with them. Otherwise, hbase client will try to obtain ticket(through kinit) from system.
*/
public boolean shouldLoginFromKeytab() {
return User.shouldLoginFromKeytab(this.getConf());
} | 3.26 |
hbase_UserProvider_isHadoopSecurityEnabled_rdh | /**
* Return whether or not Kerberos authentication is configured for Hadoop. For non-secure Hadoop,
* this always returns <code>false</code>. For secure Hadoop, it will return the value from
* {@code UserGroupInformation.isSecurityEnabled()}.
*/
public boolean isHadoopSecurityEnabled() {
return User.isSecurityEnabled();
} | 3.26 |
hbase_UserProvider_login_rdh | /**
* Login with given keytab and principal. This can be used for both SPN(Service Principal Name)
* and UPN(User Principal Name) which format should be clientname@REALM.
*
* @param fileConfKey
* config name for client keytab
* @param principalConfKey
* config name for client principal
* @throws IOException
* underlying exception from UserGroupInformation.loginUserFromKeytab
*/
public void login(String fileConfKey, String principalConfKey) throws IOException
{
User.login(getConf().get(fileConfKey), getConf().get(principalConfKey));
} | 3.26 |
hbase_UserProvider_isHBaseSecurityEnabled_rdh | /**
* Returns <tt>true</tt> if security is enabled, <tt>false</tt> otherwise
*/
public boolean isHBaseSecurityEnabled() {
return User.isHBaseSecurityEnabled(this.getConf());
} | 3.26 |
hbase_PageFilter_parseFrom_rdh | /**
* Parse a serialized representation of {@link PageFilter}
*
* @param pbBytes
* A pb serialized {@link PageFilter} instance
* @return An instance of {@link PageFilter} made from <code>bytes</code>
* @throws DeserializationException
* if an error occurred
* @see #toByteArray
*/
public static PageFilter parseFrom(final byte[] pbBytes) throws DeserializationException {
FilterProtos.PageFilter proto;
try {
proto = FilterProtos.PageFilter.parseFrom(pbBytes);
} catch (InvalidProtocolBufferException e) {
throw new DeserializationException(e);
} return new PageFilter(proto.getPageSize());
} | 3.26 |
hbase_PageFilter_areSerializedFieldsEqual_rdh | /**
* Returns true if and only if the fields of the filter that are serialized are equal to the
* corresponding fields in other. Used for testing.
*/
@Override
boolean areSerializedFieldsEqual(Filter o) {
if (o == this)
{
return true;
}
if (!(o
instanceof PageFilter)) {
return false;
}
PageFilter other = ((PageFilter) (o));
return this.getPageSize() == other.getPageSize();
} | 3.26 |
hbase_MetricsRegionServerWrapperImpl_initMobFileCache_rdh | /**
* Initializes the mob file cache.
*/
private void initMobFileCache() {
this.mobFileCache = this.regionServer.getMobFileCache().orElse(null);
} | 3.26 |
hbase_IncrementingEnvironmentEdge_incrementTime_rdh | /**
* Increment the time by the given amount
*/
public synchronized long incrementTime(long amount) {
timeIncrement += amount;
return timeIncrement;
} | 3.26 |
hbase_IncrementingEnvironmentEdge_currentTime_rdh | /**
* {@inheritDoc }
* <p>
* This method increments a known value for the current time each time this method is called. The
* first value is 1.
* </p>
*/
@Override
public synchronized long currentTime() {
return timeIncrement++;
} | 3.26 |
hbase_StripeMultiFileWriter_sanityCheckRight_rdh | /**
* Subclasses can call this method to make sure the last KV is within multi-writer range.
*
* @param right
* The right boundary of the writer.
*/
protected void sanityCheckRight(byte[] right, Cell cell) throws IOException {
if ((!Arrays.equals(StripeStoreFileManager.OPEN_KEY, right)) && (comparator.compareRows(cell, right, 0,
right.length)
>= 0)) {
String error = ((("The last row is higher or equal than the right boundary of [" + Bytes.toString(right)) + "]: [") + Bytes.toString(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength())) + "]";LOG.error(error);
throw new IOException(error);
}
} | 3.26 |
hbase_StripeMultiFileWriter_sanityCheckLeft_rdh | /**
* Subclasses can call this method to make sure the first KV is within multi-writer range.
*
* @param left
* The left boundary of the writer.
* @param cell
* The cell whose row has to be checked.
*/
protected void sanityCheckLeft(byte[] left, Cell cell) throws IOException {
if ((!Arrays.equals(StripeStoreFileManager.OPEN_KEY, left)) && (comparator.compareRows(cell, left, 0, left.length) < 0)) {
String error = ((("The first row is lower than the left boundary of [" + Bytes.toString(left)) + "]: [") + Bytes.toString(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength())) + "]";
LOG.error(error);
throw new IOException(error);
}
} | 3.26 |
hbase_ClusterId_parseFrom_rdh | /**
* Parse the serialized representation of the {@link ClusterId}
*
* @param bytes
* A pb serialized {@link ClusterId} instance with pb magic prefix
* @return An instance of {@link ClusterId} made from <code>bytes</code>
* @see #toByteArray()
*/
public static ClusterId parseFrom(final byte[] bytes) throws DeserializationException {
if
(ProtobufUtil.isPBMagicPrefix(bytes)) {
int pblen = ProtobufUtil.lengthOfPBMagic();
ClusterIdProtos.ClusterId.Builder builder = ClusterIdProtos.ClusterId.newBuilder();
ClusterIdProtos.ClusterId cid = null;
try {
ProtobufUtil.mergeFrom(builder, bytes, pblen, bytes.length - pblen);
cid = builder.build();
} catch (IOException e) {
throw new DeserializationException(e);
}
return convert(cid);
} else {
// Presume it was written out this way, the old way.
return new ClusterId(Bytes.toString(bytes));
}
} | 3.26 |
hbase_ClusterId_toByteArray_rdh | /**
* Returns The clusterid serialized using pb w/ pb magic prefix
*/
public byte[] toByteArray() {
return ProtobufUtil.prependPBMagic(convert().toByteArray());
} | 3.26 |
hbase_ClusterId_convert_rdh | /**
* Returns A {@link ClusterId} made from the passed in <code>cid</code>
*/
public static ClusterId convert(final ClusterIdProtos.ClusterId cid)
{
return new ClusterId(cid.getClusterId());
} | 3.26 |
hbase_ClusterId_toString_rdh | /**
*
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
return this.id;
} | 3.26 |
hbase_SnapshotInfo_getSnapshotDescription_rdh | /**
* Returns the snapshot descriptor
*/
public SnapshotDescription getSnapshotDescription() {return ProtobufUtil.createSnapshotDesc(this.snapshot);
} | 3.26 |
hbase_SnapshotInfo_addStoreFile_rdh | /**
* Add the specified store file to the stats
*
* @param region
* region encoded Name
* @param family
* family name
* @param storeFile
* store file name
* @param filesMap
* store files map for all snapshots, it may be null
* @return the store file information
*/
FileInfo addStoreFile(final RegionInfo region, final String family, final SnapshotRegionManifest.StoreFile storeFile, final Map<Path, Integer> filesMap)
throws IOException {
HFileLink link = HFileLink.build(conf,
snapshotTable, region.getEncodedName(), family, storeFile.getName());
boolean isCorrupted = false;
boolean inArchive = false;
long size = -1;
try {
if (fs.exists(link.getArchivePath())) {
inArchive = true;
size = fs.getFileStatus(link.getArchivePath()).getLen();
hfilesArchiveSize.addAndGet(size);
hfilesArchiveCount.incrementAndGet();
// If store file is not shared with other snapshots and tables,
// increase nonSharedHfilesArchiveSize
if ((filesMap != null) && (!isArchivedFileStillReferenced(link.getArchivePath(), filesMap))) {nonSharedHfilesArchiveSize.addAndGet(size);
}
} else if (fs.exists(link.getMobPath())) {
inArchive = true;
size = fs.getFileStatus(link.getMobPath()).getLen();
hfilesMobSize.addAndGet(size);
hfilesMobCount.incrementAndGet();
} else {
size = link.getFileStatus(fs).getLen();
hfilesSize.addAndGet(size);
hfilesCount.incrementAndGet();
}
isCorrupted = storeFile.hasFileSize() && (storeFile.getFileSize() != size);if (isCorrupted)
hfilesCorrupted.incrementAndGet();
} catch (FileNotFoundException e) {
hfilesMissing.incrementAndGet();
}
return new FileInfo(inArchive, size, isCorrupted);
} | 3.26 |
hbase_SnapshotInfo_isMissing_rdh | /**
* Returns true if the file is missing
*/
public boolean isMissing() {
return this.size < 0;
} | 3.26 |
hbase_SnapshotInfo_getMissingStoreFilesCount_rdh | /**
* Returns the number of missing store files
*/
public int getMissingStoreFilesCount() {
return hfilesMissing.get();
} | 3.26 |
hbase_SnapshotInfo_printSchema_rdh | /**
* Dump the {@link org.apache.hadoop.hbase.client.TableDescriptor}
*/
private void printSchema() {
System.out.println("Table Descriptor");
System.out.println("----------------------------------------");
System.out.println(snapshotManifest.getTableDescriptor().toString());
System.out.println();
} | 3.26 |
hbase_SnapshotInfo_getSnapshotsFilesMap_rdh | /**
* Returns the map of store files based on path for all snapshots
*
* @param conf
* the {@link Configuration} to use
* @param uniqueHFilesArchiveSize
* pass out the size for store files in archive
* @param uniqueHFilesSize
* pass out the size for store files shared
* @param uniqueHFilesMobSize
* pass out the size for mob store files shared
* @return the map of store files
*/
public static Map<Path, Integer> getSnapshotsFilesMap(final Configuration conf, AtomicLong uniqueHFilesArchiveSize,
AtomicLong uniqueHFilesSize, AtomicLong uniqueHFilesMobSize) throws IOException {
List<SnapshotDescription> snapshotList = m0(conf);
if (snapshotList.isEmpty()) {
return Collections.emptyMap();
}
ConcurrentHashMap<Path, Integer> fileMap = new ConcurrentHashMap<>();
ExecutorService exec = SnapshotManifest.createExecutor(conf, "SnapshotsFilesMapping");
try {
for (final SnapshotDescription snapshot : snapshotList) {
getSnapshotFilesMap(conf, snapshot, exec, fileMap, uniqueHFilesArchiveSize, uniqueHFilesSize, uniqueHFilesMobSize);
}
} finally {
exec.shutdown();
}return fileMap;
} | 3.26 |
hbase_SnapshotInfo_getSharedStoreFilesSize_rdh | /**
* Returns the total size of the store files shared
*/
public long getSharedStoreFilesSize() {
return hfilesSize.get();
} | 3.26 |
hbase_SnapshotInfo_getMobStoreFilesSize_rdh | /**
* Returns the total size of the store files in the mob store
*/
public long getMobStoreFilesSize() {
return hfilesMobSize.get();
}
/**
*
* @return the total size of the store files in the archive which is not shared with other
snapshots and tables This is only calculated when
{@link #getSnapshotStats(Configuration, SnapshotProtos.SnapshotDescription, Map)} | 3.26 |
hbase_SnapshotInfo_inArchive_rdh | /**
* Returns true if the file is in the archive
*/
public boolean inArchive() {
return this.inArchive;
} | 3.26 |
hbase_SnapshotInfo_getSharedStoreFilePercentage_rdh | /**
* Returns the percentage of the shared store files
*/
public float getSharedStoreFilePercentage()
{
return (((float) (hfilesSize.get())) / getStoreFilesSize()) *
100;
} | 3.26 |
hbase_SnapshotInfo_getMobStoreFilePercentage_rdh | /**
* Returns the percentage of the mob store files
*/
public float getMobStoreFilePercentage() {
return (((float) (hfilesMobSize.get())) / getStoreFilesSize()) * 100;} | 3.26 |
hbase_SnapshotInfo_getMobStoreFilesCount_rdh | /**
* Returns the number of available store files in the mob dir
*/
public int getMobStoreFilesCount() {return hfilesMobCount.get();
} | 3.26 |
hbase_SnapshotInfo_loadSnapshotInfo_rdh | /**
* Load snapshot info and table descriptor for the specified snapshot
*
* @param snapshotName
* name of the snapshot to load
* @return false if snapshot is not found
*/
private boolean loadSnapshotInfo(final String snapshotName) throws IOException {
Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, rootDir);
if (!fs.exists(snapshotDir)) {
LOG.warn((("Snapshot '" + snapshotName) + "' not found in: ") + snapshotDir);
return false;
}
SnapshotProtos.SnapshotDescription snapshotDesc = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir);
snapshotManifest
= SnapshotManifest.open(getConf(), fs, snapshotDir, snapshotDesc);
return
true;
} | 3.26 |
hbase_SnapshotInfo_isArchivedFileStillReferenced_rdh | /**
* Check if for a give file in archive, if there are other snapshots/tables still reference it.
*
* @param filePath
* file path in archive
* @param snapshotFilesMap
* a map for store files in snapshots about how many snapshots refer to
* it.
* @return true or false
*/
private boolean isArchivedFileStillReferenced(final Path filePath, final Map<Path, Integer> snapshotFilesMap) {
Integer c = snapshotFilesMap.get(filePath);
// Check if there are other snapshots or table from clone_snapshot() (via back-reference)
// still reference to it.
if ((c
!= null) && (c == 1)) {
Path parentDir =
filePath.getParent();
Path backRefDir = HFileLink.getBackReferencesDir(parentDir, filePath.getName());
try {
if (CommonFSUtils.listStatus(fs, backRefDir) == null) {
return false;
}
} catch (IOException e) {
// For the purpose of this function, IOException is ignored and treated as
// the file is still being referenced.
}
}
return true;
} | 3.26 |
hbase_SnapshotInfo_getCorruptedStoreFilesCount_rdh | /**
* Returns the number of corrupted store files
*/
public int getCorruptedStoreFilesCount() {
return hfilesCorrupted.get();
} | 3.26 |
hbase_SnapshotInfo_getSnapshotStats_rdh | /**
* Returns the snapshot stats
*
* @param conf
* the {@link Configuration} to use
* @param snapshotDesc
* HBaseProtos.SnapshotDescription to get stats from
* @param filesMap
* {@link Map} store files map for all snapshots, it may be null
* @return the snapshot stats
*/public static SnapshotStats getSnapshotStats(final Configuration conf, final SnapshotProtos.SnapshotDescription snapshotDesc, final Map<Path, Integer> filesMap) throws IOException {
Path rootDir = CommonFSUtils.getRootDir(conf);
FileSystem fs = FileSystem.get(rootDir.toUri(), conf);
Path snapshotDir
= SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotDesc, rootDir);
SnapshotManifest manifest = SnapshotManifest.open(conf, fs, snapshotDir, snapshotDesc);
final SnapshotStats stats = new SnapshotStats(conf, fs, snapshotDesc);
SnapshotReferenceUtil.concurrentVisitReferencedFiles(conf, fs, manifest, "SnapshotsStatsAggregation", new SnapshotReferenceUtil.SnapshotVisitor() {
@Override
public void storeFile(final RegionInfo regionInfo, final String family, final SnapshotRegionManifest.StoreFile storeFile) throws IOException {
if (!storeFile.hasReference()) {
stats.addStoreFile(regionInfo, family, storeFile, filesMap);
}
}
});
return stats;
}
/**
* Returns the list of available snapshots in the specified location
*
* @param conf
* the {@link Configuration} | 3.26 |
hbase_SnapshotInfo_addLogFile_rdh | /**
* Add the specified log file to the stats
*
* @param server
* server name
* @param logfile
* log file name
* @return the log information
*/
FileInfo addLogFile(final String server, final String
logfile) throws IOException {WALLink logLink = new WALLink(conf, server, logfile);
long v8 = -1;try {
v8 = logLink.getFileStatus(fs).getLen();logSize.addAndGet(v8);
logsCount.incrementAndGet();
} catch (FileNotFoundException e) {
logsMissing.incrementAndGet();
}
return new FileInfo(false, v8, false);
} | 3.26 |
hbase_SnapshotInfo_printInfo_rdh | /**
* Dump the {@link SnapshotDescription}
*/
private void printInfo() {
SnapshotProtos.SnapshotDescription snapshotDesc = snapshotManifest.getSnapshotDescription();
SimpleDateFormat df = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss");
System.out.println("Snapshot Info");
System.out.println("----------------------------------------");
System.out.println(" Name: " + snapshotDesc.getName());
System.out.println(" Type: " + snapshotDesc.getType());
System.out.println(" Table: " + snapshotDesc.getTable());
System.out.println(" Format: " + snapshotDesc.getVersion());
System.out.println("Created: " + df.format(new Date(snapshotDesc.getCreationTime())));
System.out.println(" Ttl: " + snapshotDesc.getTtl());
System.out.println(" Owner: " + snapshotDesc.getOwner());
System.out.println();
} | 3.26 |
hbase_SnapshotInfo_isSnapshotCorrupted_rdh | /**
* Returns true if the snapshot is corrupted
*/
public boolean isSnapshotCorrupted() {
return ((hfilesMissing.get() > 0) || (logsMissing.get() > 0)) || (hfilesCorrupted.get()
> 0);
} | 3.26 |
hbase_SnapshotInfo_getSnapshotFilesMap_rdh | /**
* Gets the store files map for snapshot
*
* @param conf
* the {@link Configuration} to use
* @param snapshot
* {@link SnapshotDescription} to get stats from
* @param exec
* the {@link ExecutorService} to use
* @param filesMap
* {@link Map} the map to put the mapping entries
* @param uniqueHFilesArchiveSize
* {@link AtomicLong} the accumulated store file size in archive
* @param uniqueHFilesSize
* {@link AtomicLong} the accumulated store file size shared
* @param uniqueHFilesMobSize
* {@link AtomicLong} the accumulated mob store file size shared
*/
private static void getSnapshotFilesMap(final Configuration conf, final SnapshotDescription snapshot, final ExecutorService exec, final ConcurrentHashMap<Path, Integer> filesMap, final AtomicLong uniqueHFilesArchiveSize, final AtomicLong uniqueHFilesSize, final AtomicLong uniqueHFilesMobSize) throws IOException {
SnapshotProtos.SnapshotDescription
snapshotDesc = ProtobufUtil.createHBaseProtosSnapshotDesc(snapshot);
Path rootDir = CommonFSUtils.getRootDir(conf);
final FileSystem fs = FileSystem.get(rootDir.toUri(), conf);
Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotDesc,
rootDir);
SnapshotManifest manifest = SnapshotManifest.open(conf, fs, snapshotDir, snapshotDesc);
SnapshotReferenceUtil.concurrentVisitReferencedFiles(conf, fs, manifest, exec, new SnapshotReferenceUtil.SnapshotVisitor() {
@Override
public void storeFile(final RegionInfo regionInfo, final String family, final SnapshotRegionManifest.StoreFile storeFile) throws IOException {
if (!storeFile.hasReference()) {
HFileLink link = HFileLink.build(conf, snapshot.getTableName(), regionInfo.getEncodedName(), family,
storeFile.getName());
long size;
Integer count;
Path v44;
AtomicLong al;
int c = 0;
if (fs.exists(link.getArchivePath())) {
v44 =
link.getArchivePath();
al = uniqueHFilesArchiveSize;
size = fs.getFileStatus(v44).getLen();
} else if (fs.exists(link.getMobPath())) {
v44 = link.getMobPath();
al = uniqueHFilesMobSize;
size = fs.getFileStatus(v44).getLen();
} else {
v44 = link.getOriginPath();
al = uniqueHFilesSize;
size = link.getFileStatus(fs).getLen();
}
// If it has been counted, do not double count
count = filesMap.get(v44);
if (count != null)
{
c = count.intValue();
} else {
al.addAndGet(size);
}
filesMap.put(v44, ++c);
}
}
});
} | 3.26 |
hbase_SnapshotInfo_isCorrupted_rdh | /**
* Returns true if the file is corrupted
*/
public boolean isCorrupted() {
return this.corrupted;
} | 3.26 |
hbase_SnapshotInfo_getStoreFilesSize_rdh | /**
* Returns the total size of the store files referenced by the snapshot
*/
public long getStoreFilesSize() {
return (hfilesSize.get() + hfilesArchiveSize.get()) + hfilesMobSize.get();
} | 3.26 |
hbase_SnapshotInfo_getLogsCount_rdh | /**
* Returns the number of available log files
*/
public int getLogsCount() {
return logsCount.get();
} | 3.26 |
hbase_SnapshotInfo_getMissingLogsCount_rdh | /**
* Returns the number of missing log files
*/
public int getMissingLogsCount() {
return logsMissing.get();
} | 3.26 |
hbase_SnapshotInfo_printFiles_rdh | /**
* Collect the hfiles and logs statistics of the snapshot and dump the file list if requested and
* the collected information.
*/
private void printFiles(final boolean showFiles, final boolean showStats) throws IOException {
if (showFiles) {
System.out.println("Snapshot Files");
System.out.println("----------------------------------------");
}
// Collect information about hfiles and logs in the snapshot
final SnapshotProtos.SnapshotDescription snapshotDesc = snapshotManifest.getSnapshotDescription();
final String table = snapshotDesc.getTable();
final SnapshotDescription desc = ProtobufUtil.createSnapshotDesc(snapshotDesc);
final SnapshotStats stats = new SnapshotStats(this.getConf(), this.fs, desc);
SnapshotReferenceUtil.concurrentVisitReferencedFiles(getConf(), fs, snapshotManifest, "SnapshotInfo", new SnapshotReferenceUtil.SnapshotVisitor() {
@Override
public
void storeFile(final RegionInfo regionInfo, final String family, final SnapshotRegionManifest.StoreFile storeFile) throws IOException {
if (storeFile.hasReference())
return;
SnapshotStats.FileInfo v20 = stats.addStoreFile(regionInfo, family, storeFile, null);
if (showFiles) {
String state = v20.getStateToString();
System.out.printf("%8s %s/%s/%s/%s %s%n", v20.isMissing() ? "-" : fileSizeToString(v20.getSize()), table, regionInfo.getEncodedName(), family, storeFile.getName(), state == null ? "" : ("(" + state) + ")");
}
}
});
// Dump the stats
System.out.println(); if (stats.isSnapshotCorrupted()) {
System.out.println("**************************************************************");
System.out.printf("BAD SNAPSHOT: %d hfile(s) and %d log(s) missing.%n", stats.getMissingStoreFilesCount(), stats.getMissingLogsCount());System.out.printf(" %d hfile(s) corrupted.%n", stats.getCorruptedStoreFilesCount());
System.out.println("**************************************************************");
}
if (showStats) {
System.out.printf("%d HFiles (%d in archive, %d in mob storage), total size %s " + "(%.2f%% %s shared with the source table, %.2f%% %s in mob dir)%n", stats.getStoreFilesCount(), stats.getArchivedStoreFilesCount(), stats.getMobStoreFilesCount(), fileSizeToString(stats.getStoreFilesSize()), stats.getSharedStoreFilePercentage(), fileSizeToString(stats.getSharedStoreFilesSize()), stats.getMobStoreFilePercentage(), fileSizeToString(stats.getMobStoreFilesSize()));
System.out.printf("%d Logs, total size %s%n", stats.getLogsCount(), fileSizeToString(stats.getLogsSize()));
System.out.println();
}
} | 3.26 |
hbase_SnapshotInfo_getLogsSize_rdh | /**
* Returns the total log size
*/
public long getLogsSize() {
return logSize.get();
} | 3.26 |
hbase_SnapshotInfo_getArchivedStoreFilesCount_rdh | /**
* Returns the number of available store files in the archive
*/
public int getArchivedStoreFilesCount() {
return hfilesArchiveCount.get();
} | 3.26 |
hbase_SnapshotInfo_getSize_rdh | /**
* Returns the file size
*/
public long getSize() {
return this.size;
} | 3.26 |
hbase_SnapshotInfo_getArchivedStoreFileSize_rdh | /**
* Returns the total size of the store files in the archive
*/
public long getArchivedStoreFileSize() {
return hfilesArchiveSize.get();
} | 3.26 |
hbase_DefaultEnvironmentEdge_currentTime_rdh | /**
* {@inheritDoc }
* <p>
* This implementation returns {@link System#currentTimeMillis()}
* </p>
*/
@Override
public long currentTime() {
return System.currentTimeMillis();
} | 3.26 |
hbase_ByteBufferListOutputStream_releaseResources_rdh | /**
* Release the resources it uses (The ByteBuffers) which are obtained from pool. Call this only
* when all the data is fully used. And it must be called at the end of usage else we will leak
* ByteBuffers from pool.
*/
public void releaseResources() {
try
{
close();
} catch (IOException e) {
LOG.debug(e.toString(), e);
}
// Return back all the BBs to pool
for (ByteBuff buf : this.allBufs) {
buf.release();
}
this.allBufs = null;
this.curBuf = null;
} | 3.26 |
hbase_ByteBufferListOutputStream_getByteBuffers_rdh | /**
* We can be assured that the buffers returned by this method are all flipped
*
* @return list of bytebuffers
*/public List<ByteBuffer> getByteBuffers() {
if (!this.lastBufFlipped) {
this.lastBufFlipped = true;
// All the other BBs are already flipped while moving to the new BB.
curBuf.flip();
}
List<ByteBuffer> bbs = new ArrayList<>(this.allBufs.size()); for (SingleByteBuff bb : this.allBufs) {
bbs.add(bb.nioByteBuffers()[0]);}
return bbs;
} | 3.26 |
hbase_ExpiredMobFileCleaner_cleanExpiredMobFiles_rdh | /**
* Cleans the MOB files when they're expired and their min versions are 0. If the latest timestamp
* of Cells in a MOB file is older than the TTL in the column family, it's regarded as expired.
* This cleaner deletes them. At a time T0, the cells in a mob file M0 are expired. If a user
* starts a scan before T0, those mob cells are visible, this scan still runs after T0. At that
* time T1, this mob file M0 is expired, meanwhile a cleaner starts, the M0 is archived and can be
* read in the archive directory.
*
* @param tableName
* The current table name.
* @param family
* The current family.
*/
public void cleanExpiredMobFiles(String tableName, ColumnFamilyDescriptor family) throws IOException {
Configuration conf = getConf();
TableName tn = TableName.valueOf(tableName);
FileSystem fs = FileSystem.get(conf);
LOG.info((("Cleaning the expired MOB files of " + family.getNameAsString()) + " in ") + tableName);
// disable the block cache.
Configuration copyOfConf = new Configuration(conf);
copyOfConf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0.0F);
CacheConfig cacheConfig = new CacheConfig(copyOfConf);
MobUtils.cleanExpiredMobFiles(fs, conf, tn, family, cacheConfig, EnvironmentEdgeManager.currentTime());
} | 3.26 |
hbase_ScanQueryMatcher_preCheck_rdh | /**
* Check before the delete logic.
*
* @return null means continue.
*/
protected final MatchCode preCheck(Cell cell) {
if (currentRow == null) {
// Since the curCell is null it means we are already sure that we have moved over to the next
// row
return MatchCode.DONE;
}
// if row key is changed, then we know that we have moved over to the next row
if (rowComparator.compareRows(currentRow, cell) != 0) {
return MatchCode.DONE;
}
if (this.columns.done()) {
return MatchCode.SEEK_NEXT_ROW;
}
long timestamp = cell.getTimestamp();
// check if this is a fake cell. The fake cell is an optimization, we should make the scanner
// seek to next column or next row. See StoreFileScanner.requestSeek for more details.
// check for early out based on timestamp alone
if ((timestamp == PrivateConstants.OLDEST_TIMESTAMP) || columns.isDone(timestamp)) {
return columns.getNextRowOrNextColumn(cell);
}
// check if the cell is expired by cell TTL
if (m0(cell, this.oldestUnexpiredTS, this.now)) {
return MatchCode.SKIP;
}
return null;
} | 3.26 |
hbase_ScanQueryMatcher_clearCurrentRow_rdh | /**
* Make {@link #currentRow()} return null.
*/
public void clearCurrentRow() {
currentRow = null;
} | 3.26 |
hbase_ScanQueryMatcher_m0_rdh | /**
* Returns true if the cell is expired
*/
private static boolean m0(final Cell cell, final long oldestTimestamp, final long now) {
// Look for a TTL tag first. Use it instead of the family setting if
// found. If a cell has multiple TTLs, resolve the conflict by using the
// first tag encountered.
Iterator<Tag> i = PrivateCellUtil.tagsIterator(cell);
while (i.hasNext()) {Tag t = i.next();
if (TagType.TTL_TAG_TYPE == t.getType()) {
// Unlike in schema cell TTLs are stored in milliseconds, no need
// to convert
long ts = cell.getTimestamp();
assert t.getValueLength() == Bytes.SIZEOF_LONG;
long ttl = Tag.getValueAsLong(t);if ((ts + ttl) < now) {
return true;
}
// Per cell TTLs cannot extend lifetime beyond family settings, so
// fall through to check that
break;
}
}
return false;
} | 3.26 |
hbase_ScanQueryMatcher_checkColumn_rdh | // Used only for testing purposes
static MatchCode checkColumn(ColumnTracker columnTracker, byte[] bytes, int offset, int length, long ttl, byte type, boolean ignoreCount) throws IOException {KeyValue kv = KeyValueUtil.createFirstOnRow(HConstants.EMPTY_BYTE_ARRAY, 0, 0, HConstants.EMPTY_BYTE_ARRAY, 0, 0, bytes, offset, length);
MatchCode matchCode = columnTracker.checkColumn(kv, type);
if (matchCode == MatchCode.INCLUDE) {
return
columnTracker.checkVersions(kv, ttl, type, ignoreCount);
}
return matchCode;
} | 3.26 |
hbase_ScanQueryMatcher_currentRow_rdh | /**
* Returns a cell represent the current row
*/
public Cell currentRow() {
return currentRow;
} | 3.26 |
hbase_ScanQueryMatcher_setToNewRow_rdh | /**
* Set the row when there is change in row
*/
public void setToNewRow(Cell currentRow) {
this.currentRow = currentRow;
columns.reset();reset();
} | 3.26 |
hbase_ScanQueryMatcher_m1_rdh | /**
*
* @param nextIndexed
* the key of the next entry in the block index (if any)
* @param currentCell
* The Cell we're using to calculate the seek key
* @return result of the compare between the indexed key and the key portion of the passed cell
*/public int m1(Cell nextIndexed, Cell currentCell) {
ColumnCount nextColumn = columns.getColumnHint();
if (nextColumn == null) {
return PrivateCellUtil.compareKeyBasedOnColHint(rowComparator,
nextIndexed, currentCell, 0, 0, null, 0, 0, PrivateConstants.OLDEST_TIMESTAMP, Type.Minimum.getCode());
} else {
return PrivateCellUtil.compareKeyBasedOnColHint(rowComparator, nextIndexed, currentCell, currentCell.getFamilyOffset(), currentCell.getFamilyLength(), nextColumn.getBuffer(),
nextColumn.getOffset(), nextColumn.getLength(), HConstants.LATEST_TIMESTAMP, Type.Maximum.getCode());
}
} | 3.26 |
hbase_ScanQueryMatcher_getStartKey_rdh | /**
* Returns the start key
*/
public Cell getStartKey() {
return startKey;
} | 3.26 |
hbase_ByteBufferOutputStream_getByteBuffer_rdh | /**
* This flips the underlying BB so be sure to use it _last_!
*/
public ByteBuffer getByteBuffer() {
curBuf.flip();
return curBuf;
} | 3.26 |
hbase_ByteBufferOutputStream_writeInt_rdh | /**
* Writes an <code>int</code> to the underlying output stream as four bytes, high byte first.
*
* @param i
* the <code>int</code> to write
* @throws IOException
* if an I/O error occurs.
*/
@Override
public void writeInt(int i) throws IOException {
checkSizeAndGrow(Bytes.SIZEOF_INT);
ByteBufferUtils.putInt(this.curBuf, i);
} | 3.26 |
hbase_ByteBufferOutputStream_writeTo_rdh | /**
* Writes the complete contents of this byte buffer output stream to the specified output stream
* argument.
*
* @param out
* the output stream to which to write the data.
* @exception IOException
* if an I/O error occurs.
*/
public void writeTo(OutputStream out) throws IOException {
WritableByteChannel channel = Channels.newChannel(out);
ByteBuffer bb = curBuf.duplicate();
bb.flip();
channel.write(bb);
} | 3.26 |
hbase_ByteBufferOutputStream_write_rdh | // OutputStream
@Override
public void write(int b) throws IOException {
checkSizeAndGrow(Bytes.SIZEOF_BYTE);
curBuf.put(((byte)
(b)));
} | 3.26 |
hbase_User_getToken_rdh | /**
* Returns the Token of the specified kind associated with this user, or null if the Token is not
* present.
*
* @param kind
* the kind of token
* @param service
* service on which the token is supposed to be used
* @return the token of the specified kind.
*/public Token<?> getToken(String kind, String service) throws IOException {
for (Token<?> token : ugi.getTokens()) {
if (token.getKind().toString().equals(kind) && ((service != null) && token.getService().toString().equals(service))) {
return token;
}
}
return null;
} | 3.26 |
hbase_User_isHBaseSecurityEnabled_rdh | /**
* Returns whether or not secure authentication is enabled for HBase. Note that HBase security
* requires HDFS security to provide any guarantees, so it is recommended that secure HBase should
* run on secure HDFS.
*/
public static boolean isHBaseSecurityEnabled(Configuration conf) {
return "kerberos".equalsIgnoreCase(conf.get(HBASE_SECURITY_CONF_KEY));
} | 3.26 |
hbase_User_addToken_rdh | /**
* Adds the given Token to the user's credentials.
*
* @param token
* the token to add
*/
public void addToken(Token<? extends TokenIdentifier> token) {
ugi.addToken(token);
} | 3.26 |
hbase_User_getTokens_rdh | /**
* Returns all the tokens stored in the user's credentials.
*/
public Collection<Token<? extends TokenIdentifier>> getTokens() {
return ugi.getTokens();
} | 3.26 |
hbase_User_createUserForTesting_rdh | /**
* Create a user for testing.
*
* @see User#createUserForTesting(org.apache.hadoop.conf.Configuration, String, String[])
*/
public static User createUserForTesting(Configuration conf, String name, String[] groups) {
synchronized(UserProvider.class) {
if
(!(UserProvider.groups instanceof TestingGroups)) {
UserProvider.groups = new TestingGroups(UserProvider.groups);
}
}
((TestingGroups) (UserProvider.groups)).setUserGroups(name, groups);
return new SecureHadoopUser(UserGroupInformation.createUserForTesting(name, groups));
} | 3.26 |
hbase_User_create_rdh | /**
* Wraps an underlying {@code UserGroupInformation} instance.
*
* @param ugi
* The base Hadoop user
*/
public static User create(UserGroupInformation ugi) {
if (ugi == null) {
return null;
}
return new SecureHadoopUser(ugi);
} | 3.26 |
hbase_User_getName_rdh | /**
* Returns the full user name. For Kerberos principals this will include the host and realm
* portions of the principal name.
*
* @return User full name.
*/
public String getName() {
return ugi.getUserName();
} | 3.26 |
hbase_User_isSecurityEnabled_rdh | /**
* Returns the result of {@code UserGroupInformation.isSecurityEnabled()}.
*/
public static boolean isSecurityEnabled() {
return UserGroupInformation.isSecurityEnabled();
} | 3.26 |
hbase_User_runAsLoginUser_rdh | /**
* Executes the given action as the login user
*/
@SuppressWarnings({ "rawtypes", "unchecked" })
public static <T> T runAsLoginUser(PrivilegedExceptionAction<T> action) throws IOException {
try {Class c = Class.forName("org.apache.hadoop.security.SecurityUtil");
Class[] types = new Class[]{ PrivilegedExceptionAction.class };
Object[] args = new Object[]{ action };
return ((T) (Methods.call(c, null, "doAsLoginUser", types, args)));
} catch (Throwable e) {throw new IOException(e);
}
} | 3.26 |
hbase_User_getCurrent_rdh | /**
* Returns the {@code User} instance within current execution context.
*/
public static User getCurrent() throws IOException {
User user = new SecureHadoopUser();
if (user.getUGI() == null) {
return null;
}
return user;
} | 3.26 |
hbase_User_isLoginFromKeytab_rdh | /**
* Returns true if user credentials are obtained from keytab.
*/
public boolean isLoginFromKeytab() {return ugi.isFromKeytab();
} | 3.26 |
hbase_User_shouldLoginFromKeytab_rdh | /**
* In secure environment, if a user specified his keytab and principal, a hbase client will try to
* login with them. Otherwise, hbase client will try to obtain ticket(through kinit) from system.
*
* @param conf
* configuration file
* @return true if keytab and principal are configured
*/
public static boolean shouldLoginFromKeytab(Configuration
conf) {
Optional<String> keytab = Optional.ofNullable(conf.get(AuthUtil.HBASE_CLIENT_KEYTAB_FILE));
Optional<String> principal = Optional.ofNullable(conf.get(AuthUtil.HBASE_CLIENT_KERBEROS_PRINCIPAL));
return keytab.isPresent() &&
principal.isPresent();
} | 3.26 |
hbase_User_login_rdh | /**
* Login through configured keytab and pricipal.
*
* @param keytabLocation
* location of keytab
* @param principalName
* principal in keytab
* @throws IOException
* exception from UserGroupInformation.loginUserFromKeytab
*/
public static void login(String keytabLocation, String principalName) throws IOException {
if (isSecurityEnabled()) {
UserGroupInformation.loginUserFromKeytab(principalName, keytabLocation);
}
} | 3.26 |
hbase_User_m0_rdh | /**
* Returns the list of groups of which this user is a member. On secure Hadoop this returns the
* group information for the user as resolved on the server. For 0.20 based Hadoop, the group
* names are passed from the client.
*/
public String[] m0() {
return ugi.getGroupNames();
} | 3.26 |
hbase_MetricsStochasticBalancerSourceImpl_updateStochasticCost_rdh | /**
* Reports stochastic load balancer costs to JMX
*/
@Override
public void updateStochasticCost(String tableName, String costFunctionName, String functionDesc, Double cost) {
if (((tableName == null) || (costFunctionName == null)) || (cost == null)) {
return;
}
if (functionDesc != null) {
costFunctionDescs.put(costFunctionName, functionDesc);
}
synchronized(stochasticCosts) {
Map<String, Double> costs = stochasticCosts.get(tableName);
if (costs == null) {
costs = new ConcurrentHashMap<>();
}
costs.put(costFunctionName, cost);
stochasticCosts.put(tableName, costs);
}} | 3.26 |
hbase_MetricsStochasticBalancerSourceImpl_calcMruCap_rdh | /**
* Calculates the mru cache capacity from the metrics size
*/
private static int calcMruCap(int metricsSize) {
return ((int) (Math.ceil(metricsSize / MRU_LOAD_FACTOR))) + 1;
} | 3.26 |
hbase_HBaseServiceHandler_getTable_rdh | /**
* Creates and returns a Table instance from a given table name. name of table
*
* @return Table object
* @throws IOException
* if getting the table fails
*/
protected Table getTable(final byte[] tableName) throws IOException {
String table = Bytes.toString(tableName);
return connectionCache.getTable(table);
} | 3.26 |
hbase_HBaseServiceHandler_getAdmin_rdh | /**
* Obtain HBaseAdmin. Creates the instance if it is not already created.
*/
protected Admin getAdmin() throws IOException {
return connectionCache.getAdmin();
} | 3.26 |
hbase_RawCell_cloneTags_rdh | /**
* Allows cloning the tags in the cell to a new byte[]
*
* @return the byte[] having the tags
*/
default byte[] cloneTags() {
return PrivateCellUtil.cloneTags(this);
} | 3.26 |
hbase_RawCell_createCell_rdh | /**
* Returns A new cell which is having the extra tags also added to it.
*/
public static Cell createCell(Cell cell, List<Tag>
tags) {
return PrivateCellUtil.createCell(cell, tags);
} | 3.26 |
hbase_RawCell_checkForTagsLength_rdh | /**
* Check the length of tags. If it is invalid, throw IllegalArgumentException
*
* @param tagsLength
* the given length of tags
* @throws IllegalArgumentException
* if tagslength is invalid
*/
public static void checkForTagsLength(int tagsLength) {
if (tagsLength > MAX_TAGS_LENGTH) {
throw
new IllegalArgumentException((("tagslength " + tagsLength) + " > ")
+ MAX_TAGS_LENGTH);
}
} | 3.26 |
hbase_RawCell_getTag_rdh | /**
* Returns the specific tag of the given type
*
* @param type
* the type of the tag
* @return the specific tag if available or null
*/ default Optional<Tag> getTag(byte type) {
return PrivateCellUtil.getTag(this, type);
} | 3.26 |
hbase_RawCell_getTags_rdh | /**
* Creates a list of tags in the current cell
*
* @return a list of tags
*/
default Iterator<Tag> getTags() {
return PrivateCellUtil.tagsIterator(this);
} | 3.26 |
hbase_RSGroupAdminServiceImpl_fillTables_rdh | // for backward compatible
private RSGroupInfo fillTables(RSGroupInfo rsGroupInfo) throws IOException {return RSGroupUtil.fillTables(rsGroupInfo, master.getTableDescriptors().getAll().values());
} | 3.26 |
hbase_StripeCompactionPolicy_setMajorRange_rdh | /**
* Sets compaction "major range". Major range is the key range for which all the files are
* included, so they can be treated like major-compacted files.
*
* @param startRow
* Left boundary, inclusive.
* @param endRow
* Right boundary, exclusive.
*/
public void setMajorRange(byte[] startRow, byte[] endRow) {
this.majorRangeFromRow = startRow;this.majorRangeToRow = endRow;
} | 3.26 |
hbase_StripeCompactionPolicy_selectSimpleCompaction_rdh | /**
* Selects the compaction of a single stripe using default policy.
*
* @param sfs
* Files.
* @param allFilesOnly
* Whether a compaction of all-or-none files is needed.
* @return The resulting selection.
*/
private List<HStoreFile> selectSimpleCompaction(List<HStoreFile> sfs, boolean allFilesOnly, boolean isOffpeak, boolean forceCompact) {
int minFilesLocal = Math.max(allFilesOnly ? sfs.size() : 0, this.config.getStripeCompactMinFiles());
int maxFilesLocal = Math.max(this.config.getStripeCompactMaxFiles(), minFilesLocal);
List<HStoreFile> selected = stripePolicy.applyCompactionPolicy(sfs, false, isOffpeak, minFilesLocal, maxFilesLocal);
if ((forceCompact && ((selected == null) || selected.isEmpty())) && (!sfs.isEmpty())) {
return stripePolicy.selectCompactFiles(sfs, maxFilesLocal, isOffpeak);
}
return selected;
} | 3.26 |
hbase_StripeCompactionPolicy_setMajorRangeFull_rdh | /**
* Set major range of the compaction to the entire compaction range. See
* {@link #setMajorRange(byte[], byte[])}.
*/
public void setMajorRangeFull() {
setMajorRange(this.startRow, this.endRow);
} | 3.26 |
hbase_StripeCompactionPolicy_needsSingleStripeCompaction_rdh | /**
*
* @param si
* StoreFileManager.
* @return Whether any stripe potentially needs compaction.
*/
protected boolean needsSingleStripeCompaction(StripeInformationProvider si) {int minFiles = this.config.getStripeCompactMinFiles();
for (List<HStoreFile> stripe : si.getStripes()) {if (stripe.size() >= minFiles)
return true;
}
return
false;
} | 3.26 |
hbase_TableSnapshotInputFormat_setInput_rdh | /**
* Configures the job to use TableSnapshotInputFormat to read from a snapshot.
*
* @param job
* the job to configure
* @param snapshotName
* the name of the snapshot to read from
* @param restoreDir
* a temporary directory to restore the snapshot into. Current user
* should have write permissions to this directory, and this should not
* be a subdirectory of rootdir. After the job is finished, restoreDir
* can be deleted.
* @param splitAlgo
* split algorithm to generate splits from region
* @param numSplitsPerRegion
* how many input splits to generate per one region
* @throws IOException
* if an error occurs
*/
public static void setInput(JobConf
job, String snapshotName, Path restoreDir, RegionSplitter.SplitAlgorithm splitAlgo, int numSplitsPerRegion)
throws IOException {
TableSnapshotInputFormatImpl.setInput(job, snapshotName, restoreDir, splitAlgo, numSplitsPerRegion);
} | 3.26 |
hbase_StartMiniClusterOption_builder_rdh | /**
* Returns a new builder.
*/
public static Builder
builder() {
return new Builder();
} | 3.26 |
hbase_LzoCodec_getBufferSize_rdh | // Package private
static int getBufferSize(Configuration conf) {
return conf.getInt(LZO_BUFFER_SIZE_KEY, conf.getInt(CommonConfigurationKeys.IO_COMPRESSION_CODEC_LZO_BUFFERSIZE_KEY, CommonConfigurationKeys.IO_COMPRESSION_CODEC_LZO_BUFFERSIZE_DEFAULT));
} | 3.26 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.