name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hbase_TableDescriptorBuilder_setSplitEnabled_rdh | /**
* Setting the table region split enable flag.
*
* @param isEnable
* True if enable region split.
* @return the modifyable TD
*/
public ModifyableTableDescriptor setSplitEnabled(final boolean isEnable) {
return setValue(SPLIT_ENABLED_KEY, Boolean.toString(isEnable));
} | 3.26 |
hbase_TableDescriptorBuilder_setRegionMemStoreReplication_rdh | /**
* Enable or Disable the memstore replication from the primary region to the replicas. The
* replication will be used only for meta operations (e.g. flush, compaction, ...)
*
* @param memstoreReplication
* true if the new data written to the primary region should be
* replicated. false if the secondaries can tollerate to have new
* data only when the primary flushes the memstore.
* @return the modifyable TD
*/public ModifyableTableDescriptor setRegionMemStoreReplication(boolean memstoreReplication) {
return setValue(REGION_MEMSTORE_REPLICATION_KEY, Boolean.toString(memstoreReplication));
} | 3.26 |
hbase_TableDescriptorBuilder_toString_rdh | /**
* Returns Name of this table and then a map of all of the column family descriptors.
*/
@Override
public String toString() {
StringBuilder s = new StringBuilder();
s.append('\'').append(Bytes.toString(name.getName())).append('\'');
s.append(m5(true));
families.values().forEach(f -> s.append(", ").append(f));
return s.toString();
} | 3.26 |
hbase_TableDescriptorBuilder_setMergeEnabled_rdh | /**
* Setting the table region merge enable flag.
*
* @param isEnable
* True if enable region merge.
* @return the modifyable TD
*/
public ModifyableTableDescriptor setMergeEnabled(final boolean isEnable) {
return setValue(MERGE_ENABLED_KEY, Boolean.toString(isEnable));} | 3.26 |
hbase_TableDescriptorBuilder_m7_rdh | /**
* Returns the ColumnFamilyDescriptor for a specific column family with name as specified by the
* parameter column.
*
* @param column
* Column family name
* @return Column descriptor for the passed family name or the family on passed in column.
*/
@Override
public ColumnFamilyDescriptor m7(final byte[] column) {
return this.families.get(column);
} | 3.26 |
hbase_TableDescriptorBuilder_setMaxFileSize_rdh | /**
* Sets the maximum size upto which a region can grow to after which a region split is
* triggered. The region size is represented by the size of the biggest store file in that
* region, i.e. If the biggest store file grows beyond the maxFileSize, then the region split is
* triggered. This defaults to a value of 256 MB.
* <p>
* This is not an absolute value and might vary. Assume that a single row exceeds the
* maxFileSize then the storeFileSize will be greater than maxFileSize since a single row cannot
* be split across multiple regions
* </p>
*
* @param maxFileSize
* The maximum file size that a store file can grow to before a split is
* triggered.
* @return the modifyable TD
*/
public ModifyableTableDescriptor setMaxFileSize(long maxFileSize) {
return setValue(MAX_FILESIZE_KEY, Long.toString(maxFileSize));
} | 3.26 |
hbase_TableDescriptorBuilder_isMetaRegion_rdh | /**
* Checks if this table is <code> hbase:meta </code> region.
*
* @return true if this table is <code> hbase:meta </code> region
*/
@Override
public boolean isMetaRegion() {
return getOrDefault(IS_META_KEY, Boolean::valueOf, false);
} | 3.26 |
hbase_TableDescriptorBuilder_getRegionReplication_rdh | /**
* Returns the configured replicas per region
*/
@Override
public int getRegionReplication()
{return getOrDefault(REGION_REPLICATION_KEY, Integer::valueOf, DEFAULT_REGION_REPLICATION);
} | 3.26 |
hbase_TableDescriptorBuilder_setNormalizerTargetRegionCount_rdh | /**
* Setting the target region count of table normalization .
*
* @param regionCount
* the target region count.
* @return the modifyable TD
*/
public ModifyableTableDescriptor setNormalizerTargetRegionCount(final int regionCount) {
return setValue(NORMALIZER_TARGET_REGION_COUNT_KEY, Integer.toString(regionCount));
} | 3.26 |
hbase_TableDescriptorBuilder_m6_rdh | /**
* Returns true if the read-replicas memstore replication is enabled.
*/@Override
public boolean m6() {
return getOrDefault(REGION_MEMSTORE_REPLICATION_KEY, Boolean::valueOf, DEFAULT_REGION_MEMSTORE_REPLICATION);
} | 3.26 |
hbase_TableDescriptorBuilder_removeColumnFamily_rdh | /**
* Removes the ColumnFamilyDescriptor with name specified by the parameter column from the table
* descriptor
*
* @param column
* Name of the column family to be removed.
* @return Column descriptor for the passed family name or the family on passed in column.
*/
public ColumnFamilyDescriptor removeColumnFamily(final byte[] column) {
return this.families.remove(column);
} | 3.26 |
hbase_TableDescriptorBuilder_isCompactionEnabled_rdh | /**
* Check if the compaction enable flag of the table is true. If flag is false then no
* minor/major compactions will be done in real.
*
* @return true if table compaction enabled
*/
@Override
public boolean isCompactionEnabled() {
return getOrDefault(COMPACTION_ENABLED_KEY, Boolean::valueOf, DEFAULT_COMPACTION_ENABLED);
} | 3.26 |
hbase_TableDescriptorBuilder_setValue_rdh | /**
* Setter for storing metadata as a (key, value) pair in {@link #values} map
*
* @param key
* The key.
* @param value
* The value. If null, removes the setting.
*/
public ModifyableTableDescriptor setValue(final Bytes key, final Bytes value) {
if ((value == null) || (value.getLength() == 0)) {
values.remove(key);
} else {
values.put(key, value);
}
return this;
} | 3.26 |
hbase_TableDescriptorBuilder_setRegionReplication_rdh | /**
* Sets the number of replicas per region.
*
* @param regionReplication
* the replication factor per region
* @return the modifyable TD
*/
public ModifyableTableDescriptor setRegionReplication(int regionReplication) {return setValue(REGION_REPLICATION_KEY, Integer.toString(regionReplication));
} | 3.26 |
hbase_TableDescriptorBuilder_getNormalizerTargetRegionCount_rdh | /**
* Check if there is the target region count. If so, the normalize plan will be calculated based
* on the target region count.
*
* @return target region count after normalize done
*/
@Override
public int getNormalizerTargetRegionCount()
{
return getOrDefault(NORMALIZER_TARGET_REGION_COUNT_KEY, Integer::valueOf, Integer.valueOf(-1));
} | 3.26 |
hbase_TableDescriptorBuilder_hashCode_rdh | /**
* Returns hash code
*/
@Override
public int hashCode() {
int result = this.name.hashCode();
if (this.families.size() > 0) {
for (ColumnFamilyDescriptor e : this.families.values()) {
result ^= e.hashCode();
}
}
result ^= values.hashCode();
return result;
} | 3.26 |
hbase_TableDescriptorBuilder_setRegionSplitPolicyClassName_rdh | /**
* This sets the class associated with the region split policy which determines when a region
* split should occur. The class used by default is defined in
* org.apache.hadoop.hbase.regionserver.RegionSplitPolicy
*
* @param clazz
* the class name
* @return the modifyable TD
*/
public ModifyableTableDescriptor setRegionSplitPolicyClassName(String clazz) {
return setValue(SPLIT_POLICY_KEY, clazz);
} | 3.26 |
hbase_TableDescriptorBuilder_setReplicationScope_rdh | /**
* Sets replication scope all & only the columns already in the builder. Columns added later won't
* be backfilled with replication scope.
*
* @param scope
* replication scope
* @return a TableDescriptorBuilder
*/
public TableDescriptorBuilder setReplicationScope(int scope) {
Map<byte[], ColumnFamilyDescriptor> newFamilies = new TreeMap<>(Bytes.BYTES_RAWCOMPARATOR);
newFamilies.putAll(desc.families);newFamilies.forEach((cf, cfDesc) -> {
desc.removeColumnFamily(cf);
desc.setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(cfDesc).setScope(scope).build());
});
return this;
} | 3.26 |
hbase_TableDescriptorBuilder_setNormalizerTargetRegionSize_rdh | /**
* Setting the target region size of table normalization.
*
* @param regionSize
* the target region size.
* @return the modifyable TD
*/
public ModifyableTableDescriptor setNormalizerTargetRegionSize(final long regionSize) {
return setValue(NORMALIZER_TARGET_REGION_SIZE_MB_KEY, Long.toString(regionSize));
} | 3.26 |
hbase_TableDescriptorBuilder_setCompactionEnabled_rdh | /**
* Setting the table compaction enable flag.
*
* @param isEnable
* True if enable compaction.
* @return the modifyable TD
*/
public ModifyableTableDescriptor setCompactionEnabled(final boolean isEnable) {
return setValue(COMPACTION_ENABLED_KEY, Boolean.toString(isEnable));
} | 3.26 |
hbase_TableDescriptorBuilder_setReadOnly_rdh | /**
* Setting the table as read only sets all the columns in the table as read only. By default all
* tables are modifiable, but if the readOnly flag is set to true then the contents of the table
* can only be read but not modified.
*
* @param readOnly
* True if all of the columns in the table should be read only.
* @return the modifyable TD
*/
public ModifyableTableDescriptor setReadOnly(final boolean readOnly)
{
return setValue(READONLY_KEY, Boolean.toString(readOnly));
} | 3.26 |
hbase_TableDescriptorBuilder_getDurability_rdh | /**
* Returns the durability setting for the table.
*
* @return durability setting for the table.
*/
@Override
public Durability getDurability() {
return getOrDefault(DURABILITY_KEY, Durability::valueOf, DEFAULT_DURABLITY);
} | 3.26 |
hbase_TableDescriptorBuilder_isNormalizationEnabled_rdh | /**
* Check if normalization enable flag of the table is true. If flag is false then no region
* normalizer won't attempt to normalize this table.
*
* @return true if region normalization is enabled for this table
*/
@Override
public boolean isNormalizationEnabled() {
return getOrDefault(NORMALIZATION_ENABLED_KEY, Boolean::valueOf, false);
} | 3.26 |
hbase_TableDescriptorBuilder_setCoprocessor_rdh | /**
* Add a table coprocessor to this table. The coprocessor type must be
* org.apache.hadoop.hbase.coprocessor.RegionObserver or Endpoint. It won't check if the class
* can be loaded or not. Whether a coprocessor is loadable or not will be determined when a
* region is opened.
*
* @throws IOException
* any illegal parameter key/value
* @return the modifyable TD
*/
public ModifyableTableDescriptor setCoprocessor(CoprocessorDescriptor cp) throws IOException {
checkHasCoprocessor(cp.getClassName());
if (cp.getPriority() < 0) {
throw new IOException("Priority must be bigger than or equal with zero, current:" + cp.getPriority());
}
// Validate parameter kvs and then add key/values to kvString.
StringBuilder kvString = new StringBuilder();
for (Map.Entry<String, String> e : cp.getProperties().entrySet()) {
if (!e.getKey().matches(CP_HTD_ATTR_VALUE_PARAM_KEY_PATTERN)) {
throw new IOException("Illegal parameter key = " + e.getKey());
}
if (!e.getValue().matches(CP_HTD_ATTR_VALUE_PARAM_VALUE_PATTERN)) {
throw new IOException((("Illegal parameter (" + e.getKey()) + ") value = ") + e.getValue());
}
if (kvString.length() != 0) {
kvString.append(',');
}
kvString.append(e.getKey());
kvString.append('=');
kvString.append(e.getValue());
}
String value = (((((cp.getJarPath().orElse("") + "|") + cp.getClassName()) + "|") + Integer.toString(cp.getPriority())) + "|") + kvString.toString();
return m8(value);
} | 3.26 |
hbase_TableDescriptorBuilder_removeValue_rdh | /**
* Remove metadata represented by the key from the {@link #values} map
*
* @param key
* Key whose key and value we're to remove from TableDescriptor parameters.
* @return the modifyable TD
*/
public ModifyableTableDescriptor removeValue(final byte[] key) {
return removeValue(new Bytes(key));
} | 3.26 |
hbase_TableDescriptorBuilder_getMemStoreFlushSize_rdh | /**
* Returns the size of the memstore after which a flush to filesystem is triggered.
*
* @return memory cache flush size for each hregion, -1 if not set.
* @see #setMemStoreFlushSize(long)
*/
@Override
public long getMemStoreFlushSize() {
return getOrDefault(MEMSTORE_FLUSHSIZE_KEY, Long::valueOf, ((long) (-1)));
} | 3.26 |
hbase_TableDescriptorBuilder_getNormalizerTargetRegionSize_rdh | /**
* Check if there is the target region size. If so, the normalize plan will be calculated based
* on the target region size.
*
* @return target region size after normalize done
*/
@Override
public long getNormalizerTargetRegionSize() {
long target_region_size = getOrDefault(NORMALIZER_TARGET_REGION_SIZE_MB_KEY, Long::valueOf, Long.valueOf(-1));
return target_region_size == Long.valueOf(-1) ? getOrDefault(NORMALIZER_TARGET_REGION_SIZE_KEY, Long::valueOf, Long.valueOf(-1)) : target_region_size;
} | 3.26 |
hbase_TableDescriptorBuilder_setColumnFamily_rdh | /**
* Adds a column family. For the updating purpose please use
* {@link #modifyColumnFamily(ColumnFamilyDescriptor)} instead.
*
* @param family
* to add.
* @return the modifyable TD
*/
public ModifyableTableDescriptor
setColumnFamily(final ColumnFamilyDescriptor family) {
if ((family.getName() == null) || (family.getName().length <= 0)) {
throw new
IllegalArgumentException("Family name cannot be null or empty");
}
int flength = (family.getName() == null) ? 0 : family.getName().length;
if (flength > Byte.MAX_VALUE) {
throw new IllegalArgumentException("The length of family name is bigger than " +
Byte.MAX_VALUE);
}if (hasColumnFamily(family.getName())) {
throw new IllegalArgumentException(("Family '" + family.getNameAsString()) + "' already exists so cannot be added");
}
return m4(family);
} | 3.26 |
hbase_TableDescriptorBuilder_isSplitEnabled_rdh | /**
* Check if the split enable flag of the table is true. If flag is false then no split will be
* done.
*
* @return true if table region split enabled
*/
@Override
public boolean isSplitEnabled() {
return getOrDefault(SPLIT_ENABLED_KEY, Boolean::valueOf, DEFAULT_SPLIT_ENABLED);
} | 3.26 |
hbase_TableDescriptorBuilder_getMaxFileSize_rdh | /**
* Returns the maximum size upto which a region can grow to after which a region split is
* triggered. The region size is represented by the size of the biggest store file in that
* region.
*
* @return max hregion size for table, -1 if not set.
* @see #setMaxFileSize(long)
*/
@Override
public long getMaxFileSize()
{return getOrDefault(MAX_FILESIZE_KEY, Long::valueOf, ((long) (-1)));
} | 3.26 |
hbase_TableDescriptorBuilder_removeCoprocessor_rdh | /**
* Remove a coprocessor from those set on the table
*
* @param className
* Class name of the co-processor
*/
public void removeCoprocessor(String className) {
Bytes match = null;Matcher keyMatcher;
Matcher valueMatcher;
for (Map.Entry<Bytes, Bytes> e : this.values.entrySet()) {
keyMatcher = CP_HTD_ATTR_KEY_PATTERN.matcher(Bytes.toString(e.getKey().get()));
if (!keyMatcher.matches()) {
continue;
}
valueMatcher = CP_HTD_ATTR_VALUE_PATTERN.matcher(Bytes.toString(e.getValue().get()));
if (!valueMatcher.matches()) {
continue;
}
// get className and compare
String clazz = valueMatcher.group(2).trim();// classname is the 2nd field
// remove the CP if it is present
if (clazz.equals(className.trim())) {
match =
e.getKey();
break;
}
}
// if we found a match, remove it
if (match != null) {
this.removeValue(match);
} else {
throw new IllegalArgumentException(String.format("coprocessor with class name %s was not found in the table attribute", className));
}
} | 3.26 |
hbase_TableDescriptorBuilder_isMetaTable_rdh | /**
* Checks if the table is a <code>hbase:meta</code> table
*
* @return true if table is <code> hbase:meta </code> region.
*/
@Override
public boolean isMetaTable() {
return isMetaRegion();
} | 3.26 |
hbase_TableDescriptorBuilder_isMergeEnabled_rdh | /**
* Check if the region merge enable flag of the table is true. If flag is false then no merge
* will be done.
*
* @return true if table region merge enabled
*/
@Override
public boolean isMergeEnabled() { return getOrDefault(MERGE_ENABLED_KEY,
Boolean::valueOf, DEFAULT_MERGE_ENABLED);
} | 3.26 |
hbase_TableDescriptorBuilder_modifyColumnFamily_rdh | /**
* Modifies the existing column family.
*
* @param family
* to update
* @return this (for chained invocation)
*/
public ModifyableTableDescriptor modifyColumnFamily(final ColumnFamilyDescriptor family) {
if ((family.getName() == null) || (family.getName().length
<= 0)) {
throw new IllegalArgumentException("Family name cannot be null or empty");
}
if (!hasColumnFamily(family.getName())) {
throw new IllegalArgumentException(("Column family '" + family.getNameAsString()) + "' does not exist");
}
return m4(family);
} | 3.26 |
hbase_TableDescriptorBuilder_m8_rdh | /**
* Add coprocessor to values Map
*
* @param specStr
* The Coprocessor specification all in in one String
* @return Returns <code>this</code>
*/
private ModifyableTableDescriptor m8(final String
specStr) {
if (specStr == null) {
return this;
}
// generate a coprocessor key
int maxCoprocessorNumber = 0;
Matcher keyMatcher;
for (Map.Entry<Bytes, Bytes> e : this.values.entrySet()) {
keyMatcher = CP_HTD_ATTR_KEY_PATTERN.matcher(Bytes.toString(e.getKey().get()));
if (!keyMatcher.matches()) {
continue;}
maxCoprocessorNumber = Math.max(Integer.parseInt(keyMatcher.group(1)), maxCoprocessorNumber);
}
maxCoprocessorNumber++;
String key = "coprocessor$" + Integer.toString(maxCoprocessorNumber);
return setValue(new
Bytes(Bytes.toBytes(key)), new Bytes(Bytes.toBytes(specStr)));
} | 3.26 |
hbase_TableDescriptorBuilder_hasColumnFamily_rdh | /**
* Checks to see if this table contains the given column family
*
* @param familyName
* Family name or column name.
* @return true if the table contains the specified family name
*/
@Override
public boolean hasColumnFamily(final byte[] familyName)
{
return families.containsKey(familyName);
} | 3.26 |
hbase_TableDescriptorBuilder_hasCoprocessor_rdh | /**
* Check if the table has an attached co-processor represented by the name className
*
* @param classNameToMatch
* - Class name of the co-processor
* @return true of the table has a co-processor className
*/
@Override
public boolean hasCoprocessor(String classNameToMatch) {
return m9().stream().anyMatch(cp -> cp.getClassName().equals(classNameToMatch));
} | 3.26 |
hbase_TableDescriptorBuilder_toByteArray_rdh | /**
* Returns the bytes in pb format
*/
private byte[] toByteArray()
{
return ProtobufUtil.prependPBMagic(ProtobufUtil.toTableSchema(this).toByteArray());
} | 3.26 |
hbase_TableDescriptorBuilder_setNormalizationEnabled_rdh | /**
* Setting the table normalization enable flag.
*
* @param isEnable
* True if enable normalization.
* @return the modifyable TD
*/
public ModifyableTableDescriptor setNormalizationEnabled(final boolean isEnable) {
return setValue(NORMALIZATION_ENABLED_KEY, Boolean.toString(isEnable));
} | 3.26 |
hbase_TableDescriptorBuilder_m9_rdh | /**
* Return the list of attached co-processor represented by their name className
*
* @return The list of co-processors classNames
*/
@Override
public List<CoprocessorDescriptor> m9() {
List<CoprocessorDescriptor> result = new ArrayList<>();
for (Map.Entry<Bytes, Bytes> e : getValues().entrySet()) {
String key = Bytes.toString(e.getKey().get()).trim();
if (CP_HTD_ATTR_KEY_PATTERN.matcher(key).matches()) {
toCoprocessorDescriptor(Bytes.toString(e.getValue().get()).trim()).ifPresent(result::add);
}
}
return result;
} | 3.26 |
hbase_TableDescriptorBuilder_setFlushPolicyClassName_rdh | /**
* This sets the class associated with the flush policy which determines determines the stores
* need to be flushed when flushing a region. The class used by default is defined in
* org.apache.hadoop.hbase.regionserver.FlushPolicy.
*
* @param clazz
* the class name
* @return the modifyable TD
*/
public ModifyableTableDescriptor setFlushPolicyClassName(String clazz) {
return setValue(FLUSH_POLICY_KEY, clazz);
} | 3.26 |
hbase_TableDescriptorBuilder_getColumnFamilyNames_rdh | /**
* Returns all the column family names of the current table. The map of TableDescriptor contains
* mapping of family name to ColumnFamilyDescriptor. This returns all the keys of the family map
* which represents the column family names of the table.
*
* @return Immutable sorted set of the keys of the families.
*/
@Override
public Set<byte[]> getColumnFamilyNames() {
return Collections.unmodifiableSet(this.families.keySet());
} | 3.26 |
hbase_TableDescriptorBuilder_parseFrom_rdh | /**
*
* @param bytes
* A pb serialized {@link ModifyableTableDescriptor} instance with pb magic prefix
* @return An instance of {@link ModifyableTableDescriptor} made from <code>bytes</code>
* @see #toByteArray()
*/
private static TableDescriptor parseFrom(final byte[] bytes) throws DeserializationException {
if (!ProtobufUtil.isPBMagicPrefix(bytes)) {
throw new DeserializationException("Expected PB encoded ModifyableTableDescriptor");
}
int pblen = ProtobufUtil.lengthOfPBMagic();
HBaseProtos.TableSchema.Builder builder = HBaseProtos.TableSchema.newBuilder();
try {
ProtobufUtil.mergeFrom(builder, bytes, pblen, bytes.length - pblen);
return ProtobufUtil.toTableDescriptor(builder.build());
} catch (IOException e) {
throw new DeserializationException(e);
}
} | 3.26 |
hbase_TableDescriptorBuilder_newBuilder_rdh | /**
* Copy all values, families, and name from the input.
*
* @param desc
* The desciptor to copy
* @return A clone of input
*/
public static TableDescriptorBuilder newBuilder(final TableDescriptor desc) {
return new TableDescriptorBuilder(desc);
} | 3.26 |
hbase_TableDescriptorBuilder_setMemStoreFlushSize_rdh | /**
* Represents the maximum size of the memstore after which the contents of the memstore are
* flushed to the filesystem. This defaults to a size of 64 MB.
*
* @param memstoreFlushSize
* memory cache flush size for each hregion
* @return the modifyable TD
*/
public ModifyableTableDescriptor setMemStoreFlushSize(long memstoreFlushSize) {
return setValue(MEMSTORE_FLUSHSIZE_KEY, Long.toString(memstoreFlushSize));
} | 3.26 |
hbase_TableDescriptorBuilder_equals_rdh | /**
* Compare the contents of the descriptor with another one passed as a parameter. Checks if the
* obj passed is an instance of ModifyableTableDescriptor, if yes then the contents of the
* descriptors are compared.
*
* @param obj
* The object to compare
* @return true if the contents of the the two descriptors exactly match
* @see java.lang.Object#equals(java.lang.Object)
*/
@Override
public boolean equals(Object obj) {if (this == obj) {
return true;
}
if (obj instanceof ModifyableTableDescriptor) {
return TableDescriptor.COMPARATOR.compare(this, ((ModifyableTableDescriptor) (obj))) == 0;
}
return false;
} | 3.26 |
hbase_TableDescriptorBuilder_setDurability_rdh | /**
* Sets the {@link Durability} setting for the table. This defaults to Durability.USE_DEFAULT.
*
* @param durability
* enum value
* @return the modifyable TD
*/
public ModifyableTableDescriptor setDurability(Durability durability) {
return setValue(DURABILITY_KEY, durability.name());
} | 3.26 |
hbase_TableDescriptorBuilder_getValues_rdh | /**
* Getter for fetching an unmodifiable {@link #values} map.
*
* @return unmodifiable map {@link #values}.
* @see #values
*/
@Override
public Map<Bytes, Bytes> getValues() {
// shallow pointer copy
return Collections.unmodifiableMap(values);
} | 3.26 |
hbase_Cell_getType_rdh | /**
* Returns the type of cell in a human readable format using {@link Type}. Note : This does not
* expose the internal types of Cells like {@link KeyValue.Type#Maximum} and
* {@link KeyValue.Type#Minimum}
*
* @return The data type this cell: one of Put, Delete, etc
*/
default Type getType() {
byte byteType = getTypeByte();Type t = Type.CODE_ARRAY[byteType & 0xff];
if (t != null) {
return t;
}
throw new UnsupportedOperationException("Invalid type of cell " + byteType);
} | 3.26 |
hbase_MemStoreFlusher_flushOneForGlobalPressure_rdh | /**
* The memstore across all regions has exceeded the low water mark. Pick one region to flush and
* flush it synchronously (this is called from the flush thread)
*
* @return true if successful
*/
private boolean flushOneForGlobalPressure(FlushType flushType) {
SortedMap<Long, Collection<HRegion>> regionsBySize = null;
switch (flushType) {
case ABOVE_OFFHEAP_HIGHER_MARK :
case ABOVE_OFFHEAP_LOWER_MARK :
regionsBySize = server.getCopyOfOnlineRegionsSortedByOffHeapSize();
break;
case ABOVE_ONHEAP_HIGHER_MARK :
case ABOVE_ONHEAP_LOWER_MARK :
default :
regionsBySize = server.getCopyOfOnlineRegionsSortedByOnHeapSize();
}
Set<HRegion> excludedRegions = new HashSet<>();
double secondaryMultiplier = ServerRegionReplicaUtil.getRegionReplicaStoreFileRefreshMultiplier(conf);
boolean flushedOne = false;
while (!flushedOne) {
// Find the biggest region that doesn't have too many storefiles (might be null!)
HRegion bestFlushableRegion = getBiggestMemStoreRegion(regionsBySize, excludedRegions, true);
// Find the biggest region, total, even if it might have too many flushes.
HRegion bestAnyRegion = getBiggestMemStoreRegion(regionsBySize, excludedRegions, false);
// Find the biggest region that is a secondary region
HRegion v7 = getBiggestMemStoreOfRegionReplica(regionsBySize, excludedRegions);
if (bestAnyRegion == null) {
// If bestAnyRegion is null, assign replica. It may be null too. Next step is check for null
bestAnyRegion = v7;
}
if (bestAnyRegion == null) {
LOG.error("Above memory mark but there are no flushable regions!");
return
false;
}
HRegion regionToFlush;
long bestAnyRegionSize;
long
bestFlushableRegionSize;
switch (flushType) {
case ABOVE_OFFHEAP_HIGHER_MARK :
case ABOVE_OFFHEAP_LOWER_MARK :
bestAnyRegionSize = bestAnyRegion.getMemStoreOffHeapSize();
bestFlushableRegionSize = getMemStoreOffHeapSize(bestFlushableRegion);break;
case ABOVE_ONHEAP_HIGHER_MARK :
case ABOVE_ONHEAP_LOWER_MARK :
bestAnyRegionSize = bestAnyRegion.getMemStoreHeapSize();
bestFlushableRegionSize = getMemStoreHeapSize(bestFlushableRegion);
break;
default :
bestAnyRegionSize =
bestAnyRegion.getMemStoreDataSize();
bestFlushableRegionSize = getMemStoreDataSize(bestFlushableRegion);
}if (bestAnyRegionSize > (2 * bestFlushableRegionSize)) {
// Even if it's not supposed to be flushed, pick a region if it's more than twice
// as big as the best flushable one - otherwise when we're under pressure we make
// lots of little flushes and cause lots of compactions, etc, which just makes
// life worse!
if (LOG.isDebugEnabled()) {
LOG.debug(((((((("Under global heap pressure: " + "Region ") + bestAnyRegion.getRegionInfo().getRegionNameAsString()) + " has too many ") +
"store files, but is ") + TraditionalBinaryPrefix.long2String(bestAnyRegionSize, "", 1)) + " vs best flushable region's ") + TraditionalBinaryPrefix.long2String(bestFlushableRegionSize, "", 1)) + ". Choosing the bigger.");
}
regionToFlush
= bestAnyRegion;
} else if (bestFlushableRegion == null)
{
regionToFlush = bestAnyRegion;
} else {
regionToFlush = bestFlushableRegion;
}
long regionToFlushSize;
long bestRegionReplicaSize;
switch (flushType) {
case ABOVE_OFFHEAP_HIGHER_MARK :
case ABOVE_OFFHEAP_LOWER_MARK :
regionToFlushSize = regionToFlush.getMemStoreOffHeapSize(); bestRegionReplicaSize = getMemStoreOffHeapSize(v7);
break;
case ABOVE_ONHEAP_HIGHER_MARK :
case ABOVE_ONHEAP_LOWER_MARK :
regionToFlushSize = regionToFlush.getMemStoreHeapSize();
bestRegionReplicaSize = getMemStoreHeapSize(v7);
break;
default :
regionToFlushSize = regionToFlush.getMemStoreDataSize();
bestRegionReplicaSize = getMemStoreDataSize(v7);
}
if (((regionToFlush == null) || (regionToFlushSize == 0)) && (bestRegionReplicaSize == 0)) {
// A concurrency issue (such as splitting region) may happen such that the online region
// seen by getCopyOfOnlineRegionsSortedByXX() method is no longer eligible to
// getBiggestMemStoreRegion(). This means that we can come out of the loop
LOG.debug("Above memory mark but there is no flushable region");
return false; }
if ((regionToFlush == null) || (((v7 != null) && ServerRegionReplicaUtil.isRegionReplicaStoreFileRefreshEnabled(conf)) && (bestRegionReplicaSize > (secondaryMultiplier * regionToFlushSize)))) {
LOG.info((((("Refreshing storefiles of region " + v7) + " due to global heap pressure. Total memstore off heap size=") + TraditionalBinaryPrefix.long2String(server.getRegionServerAccounting().getGlobalMemStoreOffHeapSize(), "", 1)) + " memstore heap size=") + TraditionalBinaryPrefix.long2String(server.getRegionServerAccounting().getGlobalMemStoreHeapSize(), "", 1));
flushedOne = refreshStoreFilesAndReclaimMemory(v7);
if (!flushedOne) {
LOG.info(("Excluding secondary region " + v7) + " - trying to find a different region to refresh files.");
excludedRegions.add(v7);
}
} else {
LOG.info(((((((((("Flush of region " + regionToFlush) + " due to global heap pressure. ") + "Flush type=") + flushType.toString()) + ", Total Memstore Heap size=") + TraditionalBinaryPrefix.long2String(server.getRegionServerAccounting().getGlobalMemStoreHeapSize(), "", 1)) + ", Total Memstore Off-Heap size=") + TraditionalBinaryPrefix.long2String(server.getRegionServerAccounting().getGlobalMemStoreOffHeapSize(), "", 1)) + ", Region memstore size=") + TraditionalBinaryPrefix.long2String(regionToFlushSize, "", 1));
flushedOne = flushRegion(regionToFlush, true, null, FlushLifeCycleTracker.DUMMY);
if (!flushedOne) {
LOG.info(("Excluding unflushable region " + regionToFlush) + " - trying to find a different region to flush.");
excludedRegions.add(regionToFlush);
}
}
}
return true;} | 3.26 |
hbase_MemStoreFlusher_isAboveLowWaterMark_rdh | /**
* Return the FlushType if we're above the low watermark
*/
private FlushType isAboveLowWaterMark() {
return server.getRegionServerAccounting().isAboveLowWaterMark();
} | 3.26 |
hbase_MemStoreFlusher_getMemStoreHeapSize_rdh | /**
* Returns Return memstore heap size or null if <code>r</code> is null
*/
private static long getMemStoreHeapSize(HRegion r) {
return r == null ? 0 : r.getMemStoreHeapSize();
} | 3.26 |
hbase_MemStoreFlusher_m0_rdh | /**
* Returns True if we have been delayed > <code>maximumWait</code> milliseconds.
*/
public boolean m0(final long maximumWait) {
return (EnvironmentEdgeManager.currentTime() - this.createTime) >
maximumWait;
} | 3.26 |
hbase_MemStoreFlusher_getMemStoreOffHeapSize_rdh | /**
* Returns Return memstore offheap size or null if <code>r</code> is null
*/
private static long getMemStoreOffHeapSize(HRegion r) {
return r == null ? 0 :
r.getMemStoreOffHeapSize();
} | 3.26 |
hbase_MemStoreFlusher_getMemStoreDataSize_rdh | /**
* Returns Return memstore data size or null if <code>r</code> is null
*/
private static long getMemStoreDataSize(HRegion r) {
return r == null ? 0 : r.getMemStoreDataSize();
} | 3.26 |
hbase_MemStoreFlusher_reclaimMemStoreMemory_rdh | /**
* Check if the regionserver's memstore memory usage is greater than the limit. If so, flush
* regions with the biggest memstores until we're down to the lower limit. This method blocks
* callers until we're down to a safe amount of memstore consumption.
*/
public void reclaimMemStoreMemory() {
Span span = TraceUtil.getGlobalTracer().spanBuilder("MemStoreFluser.reclaimMemStoreMemory").startSpan();
try (Scope scope = span.makeCurrent()) {
FlushType flushType = isAboveHighWaterMark();
if (flushType != FlushType.NORMAL) {
span.addEvent("Force Flush. We're above high water mark.");
long start = EnvironmentEdgeManager.currentTime();
long
nextLogTimeMs = start;
synchronized(this.blockSignal) {
boolean blocked = false;
long startTime = 0;
boolean interrupted = false;
try {
flushType = isAboveHighWaterMark();
while ((flushType != FlushType.NORMAL) && (!server.isStopped())) {
if (!blocked) {
startTime = EnvironmentEdgeManager.currentTime();
if (!server.getRegionServerAccounting().isOffheap()) {
logMsg("global memstore heapsize", server.getRegionServerAccounting().getGlobalMemStoreHeapSize(), server.getRegionServerAccounting().getGlobalMemStoreLimit());
} else {
switch
(flushType) {
case ABOVE_OFFHEAP_HIGHER_MARK :logMsg("the global offheap memstore datasize", server.getRegionServerAccounting().getGlobalMemStoreOffHeapSize(), server.getRegionServerAccounting().getGlobalMemStoreLimit());
break;
case ABOVE_ONHEAP_HIGHER_MARK :
logMsg("global memstore heapsize", server.getRegionServerAccounting().getGlobalMemStoreHeapSize(), server.getRegionServerAccounting().getGlobalOnHeapMemStoreLimit());
break;
default :
break;
}
}
}blocked = true;
wakeupFlushThread();
try {
// we should be able to wait forever, but we've seen a bug where
// we miss a notify, so put a 5 second bound on it at least.
blockSignal.wait(5 * 1000);
} catch (InterruptedException ie) {
LOG.warn("Interrupted while waiting");
interrupted = true;
}
long nowMs = EnvironmentEdgeManager.currentTime();
if (nowMs >= nextLogTimeMs) {
LOG.warn("Memstore is above high water mark and block {} ms", nowMs - start);
nextLogTimeMs = nowMs + 1000;}
flushType = isAboveHighWaterMark();
} } finally {if (interrupted) {
Thread.currentThread().interrupt();
}
}
if (blocked) {
final long v47 = EnvironmentEdgeManager.currentTime() - startTime;
if (v47 > 0) {
this.updatesBlockedMsHighWater.add(v47);
}
LOG.info("Unblocking updates for server "
+ server.toString());
}
}
} else {
flushType = isAboveLowWaterMark();
if (flushType != FlushType.NORMAL) {
wakeupFlushThread();}
span.end();
}
}
} | 3.26 |
hbase_MemStoreFlusher_unregisterFlushRequestListener_rdh | /**
* Unregister the listener from MemstoreFlushListeners
*
* @return true when passed listener is unregistered successfully.
*/
@Overridepublic boolean unregisterFlushRequestListener(final FlushRequestListener listener) {
return this.flushRequestListeners.remove(listener);
} | 3.26 |
hbase_MemStoreFlusher_registerFlushRequestListener_rdh | /**
* Register a MemstoreFlushListener
*/
@Override
public void registerFlushRequestListener(final FlushRequestListener
listener) {this.flushRequestListeners.add(listener);
} | 3.26 |
hbase_MemStoreFlusher_interruptIfNecessary_rdh | /**
* Only interrupt once it's done with a run through the work loop.
*/void interruptIfNecessary() {
lock.writeLock().lock();
try {
for (FlushHandler flushHandler : flushHandlers) {if (flushHandler != null) {
flushHandler.interrupt();
}}
} finally {
lock.writeLock().unlock();
}
} | 3.26 |
hbase_MemStoreFlusher_setGlobalMemStoreLimit_rdh | /**
* Sets the global memstore limit to a new size.
*/
@Override
public void setGlobalMemStoreLimit(long globalMemStoreSize) {
this.server.getRegionServerAccounting().setGlobalMemStoreLimits(globalMemStoreSize);
reclaimMemStoreMemory();
} | 3.26 |
hbase_WALPrettyPrinter_beginPersistentOutput_rdh | /**
* enables output as a single, persistent list. at present, only relevant in the case of JSON
* output.
*/public void beginPersistentOutput() {
if (persistentOutput) {
return;
}
persistentOutput = true;
firstTxn = true;
if (outputJSON) {
out.print("[");
}
} | 3.26 |
hbase_WALPrettyPrinter_disableValues_rdh | /**
* turns value output off
*/
public void disableValues() {
outputValues = false;
} | 3.26 |
hbase_WALPrettyPrinter_setRowFilter_rdh | /**
* sets the row key by which output will be filtered when not null, serves as a filter; only log
* entries from this row will be printed
*/
public void setRowFilter(String row) {
this.f0 = row;} | 3.26 |
hbase_WALPrettyPrinter_endPersistentOutput_rdh | /**
* ends output of a single, persistent list. at present, only relevant in the case of JSON output.
*/
public void endPersistentOutput() {
if (!persistentOutput) {
return;
}persistentOutput = false;
if (outputJSON) {
out.print("]");}
} | 3.26 |
hbase_WALPrettyPrinter_setPosition_rdh | /**
* sets the position to start seeking the WAL file initial position to start seeking the given WAL
* file
*/
public void setPosition(long position) {
this.f1 =
position; } | 3.26 |
hbase_WALPrettyPrinter_run_rdh | /**
* Pass one or more log file names and formatting options and it will dump out a text version of
* the contents on <code>stdout</code>. Command line arguments Thrown upon file system errors etc.
*/
public static void run(String[] args) throws IOException {
// create options
Options options = new Options();
options.addOption("h", "help", false, "Output help message");
options.addOption("j", "json", false, "Output JSON");
options.addOption("p", "printvals", false, "Print values");
options.addOption("t", "tables", true, "Table names (comma separated) to filter by; eg: test1,test2,test3 ");
options.addOption("r", "region", true, "Region to filter by. Pass encoded region name; e.g. '9192caead6a5a20acb4454ffbc79fa14'");
options.addOption("s", "sequence", true, "Sequence to filter by. Pass sequence number.");
options.addOption("k", "outputOnlyRowKey", false, "Print only row keys");
options.addOption("w", "row",
true, "Row to filter by. Pass row name.");
options.addOption("f", "rowPrefix", true, "Row prefix to filter by.");
options.addOption("g", "goto", true, "Position to seek to in the file");
WALPrettyPrinter v22 = new WALPrettyPrinter();
CommandLineParser parser = new PosixParser();
List<?> files = null;
try {
CommandLine cmd = parser.parse(options, args);
files = cmd.getArgList();
if (files.isEmpty() || cmd.hasOption("h")) {
HelpFormatter formatter = new HelpFormatter();
formatter.printHelp("WAL <filename...>", options, true);
System.exit(-1);
}
// configure the pretty printer using command line options
if (cmd.hasOption("p")) {
v22.enableValues();
}
if (cmd.hasOption("j")) {
v22.enableJSON();
}
if (cmd.hasOption("k")) {
v22.setOutputOnlyRowKey();
}
if (cmd.hasOption("t")) {
v22.setTableFilter(cmd.getOptionValue("t"));
}
if (cmd.hasOption("r")) {
v22.setRegionFilter(cmd.getOptionValue("r"));
}
if (cmd.hasOption("s")) {
v22.setSequenceFilter(Long.parseLong(cmd.getOptionValue("s"))); }
if (cmd.hasOption("w")) {
if (cmd.hasOption("f")) {
throw new ParseException("Row and Row-prefix cannot be supplied together");
}
v22.setRowFilter(cmd.getOptionValue("w"));
}
if (cmd.hasOption("f")) {
if (cmd.hasOption("w")) {
throw new ParseException("Row and Row-prefix cannot be supplied together");
}
v22.setRowPrefixFilter(cmd.getOptionValue("f"));
}
if (cmd.hasOption("g")) {
v22.setPosition(Long.parseLong(cmd.getOptionValue("g")));
}
} catch (ParseException e) {
LOG.error("Failed to parse commandLine arguments", e);
HelpFormatter formatter = new HelpFormatter();
formatter.printHelp("HFile filename(s) ", options, true);
System.exit(-1);
}
// get configuration, file system, and process the given files
Configuration conf = HBaseConfiguration.create();
CommonFSUtils.setFsDefault(conf, CommonFSUtils.getRootDir(conf));
// begin output
v22.beginPersistentOutput();
for (Object f : files) {
Path file = new Path(((String) (f)));
FileSystem fs = file.getFileSystem(conf);
if (!fs.exists(file)) {
System.err.println("ERROR, file doesnt exist: " + file);
return;
}
v22.processFile(conf, file);
}
v22.endPersistentOutput();
} | 3.26 |
hbase_WALPrettyPrinter_setRowPrefixFilter_rdh | /**
* sets the rowPrefix key prefix by which output will be filtered when not null, serves as a
* filter; only log entries with rows having this prefix will be printed
*/
public void setRowPrefixFilter(String rowPrefix) {
this.rowPrefix = rowPrefix;
} | 3.26 |
hbase_WALPrettyPrinter_setTableFilter_rdh | /**
* Sets the tables filter. Only log entries for these tables are printed.
*
* @param tablesWithDelimiter
* table names separated with comma.
*/
public void setTableFilter(String tablesWithDelimiter) {
Collections.addAll(tableSet, tablesWithDelimiter.split(","));
} | 3.26 |
hbase_WALPrettyPrinter_enableJSON_rdh | /**
* turns JSON output on
*/
public void enableJSON() {
outputJSON = true; } | 3.26 |
hbase_WALPrettyPrinter_setRegionFilter_rdh | /**
* sets the region by which output will be filtered when not null, serves as a filter; only log
* entries from this region will be printed
*/
public void setRegionFilter(String region) {
this.region = region;
} | 3.26 |
hbase_WALPrettyPrinter_setSequenceFilter_rdh | /**
* sets the region by which output will be filtered when nonnegative, serves as a filter; only log
* entries with this sequence id will be printed
*/
public void setSequenceFilter(long sequence) {
this.sequence = sequence;
} | 3.26 |
hbase_WALPrettyPrinter_m0_rdh | /**
* turns JSON output off, and turns on "pretty strings" for human consumption
*/
public void m0() {
outputJSON = false;
} | 3.26 |
hbase_WALPrettyPrinter_processFile_rdh | /**
* reads a log file and outputs its contents, one transaction at a time, as specified by the
* currently configured options the HBase configuration relevant to this log file the path of the
* log file to be read may be unable to access the configured filesystem or requested file.
*/public void processFile(final Configuration conf, final Path p) throws IOException {
FileSystem fs = p.getFileSystem(conf);
if (!fs.exists(p)) {
throw new FileNotFoundException(p.toString());
}
if (!fs.isFile(p)) {
throw new IOException(p + " is not a file");
}
WALStreamReader log = WALFactory.createStreamReader(fs, p,
conf, f1 > 0 ? f1 : -1);
if
(log instanceof AbstractProtobufWALReader) {
List<String> writerClsNames = ((AbstractProtobufWALReader) (log)).getWriterClsNames();
if ((writerClsNames != null) && (writerClsNames.size() > 0)) {
out.print("Writer Classes: ");
for (int i = 0; i < writerClsNames.size(); i++) {
out.print(writerClsNames.get(i));
if (i != (writerClsNames.size() - 1)) {
out.print(" ");
}
}out.println();
}
String cellCodecClsName = ((AbstractProtobufWALReader) (log)).getCodecClsName();
if (cellCodecClsName != null) {
out.println("Cell Codec Class: " + cellCodecClsName);
}
}
if (outputJSON && (!persistentOutput)) {
out.print("[");
firstTxn = true;
}
try {
WAL.Entry entry;
while ((entry = log.next()) != null) {
WALKey key
= entry.getKey();
WALEdit edit
= entry.getEdit();
// begin building a transaction structure
Map<String, Object> txn = key.toStringMap();
long writeTime = key.getWriteTime();
// check output filters
if ((!tableSet.isEmpty()) && (!tableSet.contains(txn.get("table").toString()))) {
continue;
}
if ((sequence >= 0) && (((Long) (txn.get("sequence"))) != sequence)) {
continue;
}
if ((region != null) && (!txn.get("region").equals(region))) {
continue;
}
// initialize list into which we will store atomic actions
List<Map<String, Object>> actions =
new ArrayList<>();for (Cell cell : edit.getCells()) {
// add atomic operation to txn
Map<String, Object> op = new HashMap<>(toStringMap(cell, outputOnlyRowKey, rowPrefix, f0, outputValues));
if (op.isEmpty()) {
continue;
}
actions.add(op);
}
if (actions.isEmpty()) {
continue;
}
txn.put("actions", actions);
if (outputJSON) {
// JSON output is a straightforward "toString" on the txn object
if (firstTxn) {
firstTxn = false;
} else {
out.print(",");
}
// encode and print JSON
out.print(GSON.toJson(txn));
} else {
// Pretty output, complete with indentation by atomic action
if (!outputOnlyRowKey) {
out.println(String.format(outputTmpl, txn.get("sequence"), txn.get("table"), txn.get("region"), new Date(writeTime)));
}
for (int i = 0; i < actions.size(); i++) {
Map<String, Object> op = actions.get(i);
printCell(out, op, outputValues, outputOnlyRowKey);
}
}
if (!outputOnlyRowKey) {
out.println("edit heap size: " + entry.getEdit().heapSize());
out.println("position: " + log.getPosition());
}
}
} finally {
log.close();
}
if (outputJSON && (!persistentOutput)) {
out.print("]");
}
} | 3.26 |
hbase_WALPrettyPrinter_setOutputOnlyRowKey_rdh | /**
* Option to print the row key only in case you just need the row keys from the WAL
*/
public void setOutputOnlyRowKey() {
this.outputOnlyRowKey = true;
} | 3.26 |
hbase_WALActionsListener_preLogRoll_rdh | /**
* The WAL is going to be rolled. The oldPath can be null if this is the first log file from the
* regionserver.
*
* @param oldPath
* the path to the old wal
* @param newPath
* the path to the new wal
*/default void preLogRoll(Path oldPath, Path newPath) throws IOException {
} | 3.26 |
hbase_WALActionsListener_visitLogEntryBeforeWrite_rdh | /**
* Called before each write.
*/default void visitLogEntryBeforeWrite(RegionInfo info, WALKey logKey, WALEdit logEdit) {
} | 3.26 |
hbase_WALActionsListener_postAppend_rdh | /**
* For notification post append to the writer. Used by metrics system at least. TODO: Combine this
* with above.
*
* @param entryLen
* approx length of cells in this append.
* @param elapsedTimeMillis
* elapsed time in milliseconds.
* @param logKey
* A WAL key
* @param logEdit
* A WAL edit containing list of cells.
* @throws IOException
* if any network or I/O error occurred
*/
default void postAppend(final long entryLen, final long elapsedTimeMillis, final WALKey logKey, final WALEdit logEdit) throws IOException {
} | 3.26 |
hbase_WALActionsListener_preLogArchive_rdh | /**
* The WAL is going to be archived.
*
* @param oldPath
* the path to the old wal
* @param newPath
* the path to the new wal
*/
default void preLogArchive(Path oldPath, Path newPath) throws IOException
{
} | 3.26 |
hbase_WALActionsListener_postSync_rdh | /**
* For notification post writer sync. Used by metrics system at least.
*
* @param timeInNanos
* How long the filesystem sync took in nanoseconds.
* @param handlerSyncs
* How many sync handler calls were released by this call to filesystem sync.
*/
default void postSync(final long timeInNanos, final int handlerSyncs) {
} | 3.26 |
hbase_WALActionsListener_postLogRoll_rdh | /**
* The WAL has been rolled. The oldPath can be null if this is the first log file from the
* regionserver.
*
* @param oldPath
* the path to the old wal
* @param newPath
* the path to the new wal
*/
default void postLogRoll(Path oldPath, Path newPath) throws IOException {
} | 3.26 |
hbase_WALActionsListener_postLogArchive_rdh | /**
* The WAL has been archived.
*
* @param oldPath
* the path to the old wal
* @param newPath
* the path to the new wal
*/
default void postLogArchive(Path oldPath, Path newPath) throws IOException {
} | 3.26 |
hbase_ExtendedCell_deepClone_rdh | /**
* Does a deep copy of the contents to a new memory area and returns it as a new cell.
*
* @return The deep cloned cell
*/
default ExtendedCell deepClone() {
// When being added to the memstore, deepClone() is called and KeyValue has less heap overhead.
return new KeyValue(this);
}
/**
* Extracts the id of the backing bytebuffer of this cell if it was obtained from fixed sized
* chunks as in case of MemstoreLAB
*
* @return the chunk id if the cell is backed by fixed sized Chunks, else return
{@link #CELL_NOT_BASED_ON_CHUNK} | 3.26 |
hbase_ExtendedCell_getSerializedSize_rdh | /**
* Returns Serialized size (defaults to include tag length).
*/
@Overridedefault int getSerializedSize() {
return getSerializedSize(true);
} | 3.26 |
hbase_ExtendedCell_write_rdh | /**
* Write this Cell into the given buf's offset in a {@link KeyValue} format.
*
* @param buf
* The buffer where to write the Cell.
* @param offset
* The offset within buffer, to write the Cell.
*/
default void write(ByteBuffer buf, int offset) {
KeyValueUtil.appendTo(this, buf, offset, true);
} | 3.26 |
hbase_MovingAverage_measure_rdh | /**
* Measure elapsed time of a measurable method.
*
* @param measurable
* method implements {@link TimeMeasurable}
* @return T it refers to the original return type of the measurable method
*/
public T measure(TimeMeasurable<T> measurable) {
long startTime = start();
LOG.trace("{} - start to measure at: {} ns.", label, startTime);
// Here may throw exceptions which should be taken care by caller, not here.
// If exception occurs, this time wouldn't count.
T result = measurable.measure();
long elapsed = stop(startTime);
LOG.trace("{} - elapse: {} ns.", label, elapsed);
updateMostRecentTime(elapsed);return result;
} | 3.26 |
hbase_MovingAverage_stop_rdh | /**
* Mark end time of an execution, and return its interval.
*
* @param startTime
* start time of an execution
* @return elapsed time
*/
protected long stop(long startTime) {
return System.nanoTime() - startTime;
} | 3.26 |
hbase_MovingAverage_start_rdh | /**
* Mark start time of an execution.
*
* @return time in ns.
*/
protected long start() {
return System.nanoTime();
} | 3.26 |
hbase_OrderedFloat32_decodeFloat_rdh | /**
* Read a {@code float} value from the buffer {@code dst}.
*
* @param dst
* the {@link PositionedByteRange} to read the {@code float} from
* @return the {@code float} read from the buffer
*/
public float decodeFloat(PositionedByteRange dst) {
return OrderedBytes.decodeFloat32(dst);
} | 3.26 |
hbase_OrderedFloat32_encodeFloat_rdh | /**
* Write instance {@code val} into buffer {@code buff}.
*
* @param dst
* the {@link PositionedByteRange} to write to
* @param val
* the value to write to {@code dst}
* @return the number of bytes written
*/public int encodeFloat(PositionedByteRange dst, float val) {
return OrderedBytes.encodeFloat32(dst, val, order);
} | 3.26 |
hbase_HBaseRpcServicesBase_checkOOME_rdh | /**
* Check if an OOME and, if so, abort immediately to avoid creating more objects.
*
* @return True if we OOME'd and are aborting.
*/
@Override
public boolean checkOOME(Throwable e) {
return OOMEChecker.exitIfOOME(e, getClass().getSimpleName());
} | 3.26 |
hbase_LogRollBackupSubprocedure_cleanup_rdh | /**
* Cancel threads if they haven't finished.
*/
@Override
public void cleanup(Exception e) {
taskManager.abort("Aborting log roll subprocedure tasks for backup due to error", e);
} | 3.26 |
hbase_LogRollBackupSubprocedure_releaseBarrier_rdh | /**
* Hooray!
*/
public void releaseBarrier() {
// NO OP
} | 3.26 |
hbase_LogRollBackupSubprocedure_insideBarrier_rdh | /**
* do a log roll.
*
* @return some bytes
*/
@Override
public byte[] insideBarrier() throws ForeignException {
rolllog();
return
null;
} | 3.26 |
hbase_Timer_updateMillis_rdh | /**
* Update the timer with the given duration in milliseconds
*
* @param durationMillis
* the duration of the event in ms
*/
default void updateMillis(long durationMillis) {
update(durationMillis, TimeUnit.NANOSECONDS);
} | 3.26 |
hbase_Timer_updateNanos_rdh | /**
* Update the timer with the given duration in nanoseconds
*
* @param durationNanos
* the duration of the event in ns
*/
default void updateNanos(long durationNanos) {
update(durationNanos, TimeUnit.NANOSECONDS);} | 3.26 |
hbase_Timer_updateMicros_rdh | /**
* Update the timer with the given duration in microseconds
*
* @param durationMicros
* the duration of the event in microseconds
*/
default void updateMicros(long durationMicros) {
update(durationMicros, TimeUnit.MICROSECONDS);
} | 3.26 |
hbase_TableBackupClient_failBackup_rdh | /**
* Fail the overall backup.
*
* @param backupInfo
* backup info
* @param e
* exception
* @throws IOException
* exception
*/
protected void failBackup(Connection conn, BackupInfo backupInfo, BackupManager backupManager, Exception e, String msg, BackupType type,
Configuration conf) throws IOException { try {
f1.error(msg + getMessage(e), e);
// If this is a cancel exception, then we've already cleaned.
// set the failure timestamp of the overall backup
backupInfo.setCompleteTs(EnvironmentEdgeManager.currentTime());
// set failure message
backupInfo.setFailedMsg(e.getMessage());
// set overall backup status: failed
backupInfo.setState(BackupState.FAILED);
// compose the backup failed data
String backupFailedData = (((((((("BackupId=" + backupInfo.getBackupId()) + ",startts=") + backupInfo.getStartTs()) + ",failedts=") + backupInfo.getCompleteTs()) + ",failedphase=") + backupInfo.getPhase()) + ",failedmessage=") + backupInfo.getFailedMsg();
f1.error(backupFailedData);
cleanupAndRestoreBackupSystem(conn, backupInfo, conf);
// If backup session is updated to FAILED state - means we
// processed recovery already.
backupManager.updateBackupInfo(backupInfo);
backupManager.finishBackupSession();
f1.error(("Backup " + backupInfo.getBackupId()) + " failed.");} catch (IOException ee) {
f1.error("Please run backup repair tool manually to restore backup system integrity");
throw ee;
}
} | 3.26 |
hbase_TableBackupClient_obtainBackupMetaDataStr_rdh | /**
* Get backup request meta data dir as string.
*
* @param backupInfo
* backup info
* @return meta data dir
*/
protected String obtainBackupMetaDataStr(BackupInfo backupInfo) {
StringBuilder sb = new StringBuilder();
sb.append(("type=" + backupInfo.getType()) + ",tablelist=");
for (TableName table : backupInfo.getTables()) {
sb.append(table + ";");
}
if (sb.lastIndexOf(";") > 0) {
sb.delete(sb.lastIndexOf(";"), sb.lastIndexOf(";") + 1);
}
sb.append(",targetRootDir=" + backupInfo.getBackupRootDir());
return sb.toString();
} | 3.26 |
hbase_TableBackupClient_cleanupExportSnapshotLog_rdh | /**
* Clean up directories with prefix "exportSnapshot-", which are generated when exporting
* snapshots.
*
* @throws IOException
* exception
*/
protected static void cleanupExportSnapshotLog(Configuration conf) throws IOException {
FileSystem fs = CommonFSUtils.getCurrentFileSystem(conf);
Path stagingDir = new Path(conf.get(BackupRestoreConstants.CONF_STAGING_ROOT, fs.getWorkingDirectory().toString()));
FileStatus[] files = CommonFSUtils.listStatus(fs, stagingDir);
if (files == null) {
return;
}for (FileStatus file : files) {
if (file.getPath().getName().startsWith("exportSnapshot-")) {
f1.debug("Delete log files of exporting snapshot: " + file.getPath().getName());
if (CommonFSUtils.delete(fs, file.getPath(), true) == false) {
f1.warn("Can not delete " + file.getPath());
}
}
}
} | 3.26 |
hbase_TableBackupClient_beginBackup_rdh | /**
* Begin the overall backup.
*
* @param backupInfo
* backup info
* @throws IOException
* exception
*/
protected void beginBackup(BackupManager backupManager, BackupInfo backupInfo) throws IOException {
BackupSystemTable.snapshot(conn);
backupManager.setBackupInfo(backupInfo);
// set the start timestamp of the overall backup
long startTs = EnvironmentEdgeManager.currentTime();
backupInfo.setStartTs(startTs);
// set overall backup status: ongoing
backupInfo.setState(BackupState.RUNNING);
backupInfo.setPhase(BackupPhase.REQUEST);
f1.info(((("Backup " + backupInfo.getBackupId()) + " started at ") + startTs) + ".");
backupManager.updateBackupInfo(backupInfo);
if (f1.isDebugEnabled()) {
f1.debug(("Backup session " + backupInfo.getBackupId()) + " has been started.");
}
} | 3.26 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.