name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hbase_HDFSBlocksDistribution_getBlocksLocalWithSsdWeight_rdh | /**
* Get the blocks local weight with ssd for a given host
*
* @param host
* the host name
* @return the blocks local with ssd weight of the given host
*/
public long getBlocksLocalWithSsdWeight(String host) {
return getBlocksLocalityWeightInternal(host, HostAndWeight::getWeightForSsd);
} | 3.26 |
hbase_HDFSBlocksDistribution_getBlockLocalityIndexForSsd_rdh | /**
* Get the block locality index for a ssd for a given host
*
* @param host
* the host name
* @return the locality index with ssd of the given host
*/
public float getBlockLocalityIndexForSsd(String host) {if (uniqueBlocksTotalWeight == 0) {
return 0.0F;
} else
{
return
((float) (getBlocksLocalityWeightInternal(host, HostAndWeight::getWeightForSsd))) / ((float) (uniqueBlocksTotalWeight));
}
} | 3.26 |
hbase_HDFSBlocksDistribution_add_rdh | /**
* This will add the distribution from input to this object
*
* @param otherBlocksDistribution
* the other hdfs blocks distribution
*/
public void add(HDFSBlocksDistribution otherBlocksDistribution) {
Map<String, HostAndWeight> otherHostAndWeights = otherBlocksDistribution.getHostAndWeights();
for (Map.Entry<String, HostAndWeight> otherHostAndWeight : otherHostAndWeights.entrySet()) {
addHostAndBlockWeight(otherHostAndWeight.getValue().host, otherHostAndWeight.getValue().weight, otherHostAndWeight.getValue().weightForSsd);
}
addUniqueWeight(otherBlocksDistribution.getUniqueBlocksTotalWeight());
} | 3.26 |
hbase_HDFSBlocksDistribution_getWeightForSsd_rdh | /**
* Returns the weight for ssd
*/
public long getWeightForSsd() {
return weightForSsd;
} | 3.26 |
hbase_HDFSBlocksDistribution_m0_rdh | /**
* Return the sorted list of hosts in terms of their weights
*/
public List<String> m0() {
HostAndWeight[] hostAndWeights
= getTopHostsWithWeights();
List<String> topHosts = new ArrayList<>(hostAndWeights.length);
for (HostAndWeight haw : hostAndWeights) {
topHosts.add(haw.getHost());
}
return topHosts;
} | 3.26 |
hbase_HDFSBlocksDistribution_getWeight_rdh | /**
* return the weight for a specific host, that will be the total bytes of all blocks on the host
*
* @param host
* the host name
* @return the weight of the given host
*/
public long getWeight(String host) {
long weight = 0;
if (host != null) {
HostAndWeight hostAndWeight = this.hostAndWeights.get(host);
if (hostAndWeight != null) {
weight =
hostAndWeight.getWeight();
}
}
return weight;
} | 3.26 |
hbase_HDFSBlocksDistribution_addUniqueWeight_rdh | /**
* add some weight to the total unique weight
*
* @param weight
* the weight
*/
private void addUniqueWeight(long weight) {
uniqueBlocksTotalWeight += weight;
} | 3.26 |
hbase_HDFSBlocksDistribution_addWeight_rdh | /**
* add weight
*
* @param weight
* the weight
* @param weightForSsd
* the weight for ssd
*/
public void addWeight(long weight, long weightForSsd) {
this.weight += weight;
this.weightForSsd
+= weightForSsd;
} | 3.26 |
hbase_HDFSBlocksDistribution_getBlocksLocalWeight_rdh | /**
* Get the blocks local weight for a given host
*
* @param host
* the host name
* @return the blocks local weight of the given host
*/
public long
getBlocksLocalWeight(String host) {return getBlocksLocalityWeightInternal(host, HostAndWeight::getWeight);
} | 3.26 |
hbase_HDFSBlocksDistribution_getUniqueBlocksTotalWeight_rdh | /**
* Returns the sum of all unique blocks' weight
*/
public long getUniqueBlocksTotalWeight() {
return uniqueBlocksTotalWeight;
} | 3.26 |
hbase_HDFSBlocksDistribution_getHostAndWeights_rdh | /**
* Returns the hosts and their weights
*/
public Map<String, HostAndWeight> getHostAndWeights() {
return this.hostAndWeights;
} | 3.26 |
hbase_HDFSBlocksDistribution_getTopHostsWithWeights_rdh | /**
* Return the sorted list of hosts in terms of their weights
*/
public HostAndWeight[] getTopHostsWithWeights() {
NavigableSet<HostAndWeight> orderedHosts = new TreeSet<>(new HostAndWeight.WeightComparator());
orderedHosts.addAll(this.hostAndWeights.values());
return orderedHosts.descendingSet().toArray(new HostAndWeight[orderedHosts.size()]);
} | 3.26 |
hbase_HDFSBlocksDistribution_addHostsAndBlockWeight_rdh | /**
* add some weight to a list of hosts, update the value of unique block weight
*
* @param hosts
* the list of the host
* @param weight
* the weight
*/
public void addHostsAndBlockWeight(String[] hosts, long weight, StorageType[] storageTypes) {if ((hosts == null) || (hosts.length == 0)) {
// erroneous data
return;
}
addUniqueWeight(weight);
if ((storageTypes != null) && (storageTypes.length == hosts.length)) {
for (int i = 0; i < hosts.length; i++) {
long weightForSsd = 0;
if (storageTypes[i] == StorageType.SSD) {
weightForSsd = weight;
}
addHostAndBlockWeight(hosts[i], weight, weightForSsd);}
} else {
for (String hostname : hosts) {
addHostAndBlockWeight(hostname, weight, 0);
}
}
} | 3.26 |
hbase_RingBufferTruck_m0_rdh | /**
* Unload the truck of its {@link SyncFuture} payload. The internal reference is released.
*/
SyncFuture m0() {
SyncFuture sync = this.sync;
this.sync = null;
this.type = Type.EMPTY;
return sync;
} | 3.26 |
hbase_RingBufferTruck_load_rdh | /**
* Load the truck with a {@link SyncFuture}.
*/
void load(final SyncFuture syncFuture) {
this.sync = syncFuture;
this.type = Type.SYNC;
} | 3.26 |
hbase_RingBufferTruck_unloadAppend_rdh | /**
* Unload the truck of its {@link FSWALEntry} payload. The internal reference is released.
*/
FSWALEntry unloadAppend() {
FSWALEntry entry = this.entry;
this.entry = null;
this.type =
Type.EMPTY;
return entry;
} | 3.26 |
hbase_RingBufferTruck_type_rdh | /**
* Returns the type of this truck's payload.
*/
Type type() {
return type;
} | 3.26 |
hbase_RandomRowFilter_parseFrom_rdh | /**
* Parse a serialized representation of {@link RandomRowFilter}
*
* @param pbBytes
* A pb serialized {@link RandomRowFilter} instance
* @return An instance of {@link RandomRowFilter} made from <code>bytes</code>
* @throws DeserializationException
* if an error occurred
* @see #toByteArray
*/
public static RandomRowFilter parseFrom(final byte[] pbBytes) throws DeserializationException {
FilterProtos.RandomRowFilter proto;
try {
proto = FilterProtos.RandomRowFilter.parseFrom(pbBytes);
} catch (InvalidProtocolBufferException e) {
throw new DeserializationException(e);
}
return new RandomRowFilter(proto.getChance());
} | 3.26 |
hbase_RandomRowFilter_getChance_rdh | /**
* Returns The chance that a row gets included.
*/
public float getChance() {
return chance;
} | 3.26 |
hbase_RandomRowFilter_toByteArray_rdh | /**
* Returns The filter serialized using pb
*/@Overridepublic byte[] toByteArray() {
FilterProtos.RandomRowFilter.Builder builder = FilterProtos.RandomRowFilter.newBuilder();
builder.setChance(this.chance);
return builder.build().toByteArray();
} | 3.26 |
hbase_RandomRowFilter_areSerializedFieldsEqual_rdh | /**
* Returns true if and only if the fields of the filter that are serialized are equal to the
* corresponding fields in other. Used for testing.
*/
@Override
boolean areSerializedFieldsEqual(Filter o) {
if (o == this) {
return true;
}
if (!(o instanceof RandomRowFilter)) {
return false;
}
RandomRowFilter other = ((RandomRowFilter) (o));
return this.getChance() == other.getChance();
} | 3.26 |
hbase_RandomRowFilter_setChance_rdh | /**
* Set the chance that a row is included.
*/
public void setChance(float chance) {
this.chance = chance;
} | 3.26 |
hbase_GlobalQuotaSettingsImpl_toQuotas_rdh | /**
* Constructs a new {@link Quotas} message from {@code this}.
*/
protected Quotas toQuotas() {
QuotaProtos.Quotas.Builder builder = QuotaProtos.Quotas.newBuilder();
if (getThrottleProto() != null) {
builder.setThrottle(getThrottleProto());
} if (getBypassGlobals() != null) {
builder.setBypassGlobals(getBypassGlobals());
}
if (getSpaceProto() != null) {
builder.setSpace(getSpaceProto());
}
return builder.build();
} | 3.26 |
hbase_ConnectionOverAsyncConnection_toString_rdh | /**
* An identifier that will remain the same for a given connection.
*/
@Override
public String toString() {return "connection-over-async-connection-0x" + Integer.toHexString(hashCode());
} | 3.26 |
hbase_ConnectionOverAsyncConnection_createThreadPool_rdh | // only used for executing coprocessor calls, as users may reference the methods in the
// BlockingInterface of the protobuf stub so we have to execute the call in a separated thread...
// Will be removed in 4.0.0 along with the deprecated coprocessor methods in Table and Admin
// interface.
private ThreadPoolExecutor createThreadPool() {
Configuration conf = conn.getConfiguration();
int threads = conf.getInt("hbase.hconnection.threads.max", 256);
long keepAliveTime =
conf.getLong("hbase.hconnection.threads.keepalivetime", 60);
BlockingQueue<Runnable> workQueue = new LinkedBlockingQueue<>(threads * conf.getInt(HConstants.HBASE_CLIENT_MAX_TOTAL_TASKS, HConstants.DEFAULT_HBASE_CLIENT_MAX_TOTAL_TASKS));
ThreadPoolExecutor tpe = new ThreadPoolExecutor(threads, threads, keepAliveTime, TimeUnit.SECONDS, workQueue, new ThreadFactoryBuilder().setDaemon(true).setNameFormat(toString() + "-shared-%d").build());
tpe.allowCoreThreadTimeOut(true);
return tpe;
} | 3.26 |
hbase_ConnectionOverAsyncConnection_closePool_rdh | // will be called from AsyncConnection, to avoid infinite loop as in the above method we will call
// AsyncConnection.close.
synchronized void closePool() {
ExecutorService batchPool = this.batchPool; if (batchPool != null) {
ConnectionUtils.shutdownPool(batchPool);
this.batchPool = null;
}
} | 3.26 |
hbase_ConnectionOverAsyncConnection_getBatchPool_rdh | // only used for executing coprocessor calls, as users may reference the methods in the
// BlockingInterface of the protobuf stub so we have to execute the call in a separated thread...
// Will be removed in 4.0.0 along with the deprecated coprocessor methods in Table and Admin
// interface.
private ExecutorService getBatchPool() throws IOException {
if (batchPool == null) {synchronized(this) {
if (isClosed()) {
throw new DoNotRetryIOException("Connection is closed");
}
if (batchPool == null) {
this.batchPool = createThreadPool();
}
}
}
return this.batchPool;
} | 3.26 |
hbase_ColumnFamilyDescriptorBuilder_setVersionsWithTimeToLive_rdh | /**
* Retain all versions for a given TTL(retentionInterval), and then only a specific number of
* versions(versionAfterInterval) after that interval elapses.
*
* @param retentionInterval
* Retain all versions for this interval
* @param versionAfterInterval
* Retain no of versions to retain after retentionInterval
* @return this (for chained invocation)
*/
public ModifyableColumnFamilyDescriptor setVersionsWithTimeToLive(final int retentionInterval, final int versionAfterInterval) {
ModifyableColumnFamilyDescriptor modifyableColumnFamilyDescriptor = setVersions(versionAfterInterval, Integer.MAX_VALUE);
modifyableColumnFamilyDescriptor.setTimeToLive(retentionInterval);
modifyableColumnFamilyDescriptor.setKeepDeletedCells(KeepDeletedCells.TTL);
return modifyableColumnFamilyDescriptor;
} | 3.26 |
hbase_ColumnFamilyDescriptorBuilder_setCacheIndexesOnWrite_rdh | /**
* Set the setCacheIndexesOnWrite flag
*
* @param value
* true if we should cache index blocks on write
* @return this (for chained invocation)
*/
public ModifyableColumnFamilyDescriptor
setCacheIndexesOnWrite(boolean value) {
return setValue(CACHE_INDEX_ON_WRITE_BYTES, Boolean.toString(value));
} | 3.26 |
hbase_ColumnFamilyDescriptorBuilder_setCompressionType_rdh | /**
* Compression types supported in hbase. LZO is not bundled as part of the hbase distribution.
* See See <a href="http://hbase.apache.org/book.html#lzo.compression">LZO Compression</a> for
* how to enable it.
*
* @param type
* Compression type setting.
* @return this (for chained invocation)
*/
public ModifyableColumnFamilyDescriptor setCompressionType(Compression.Algorithm type) {
return setValue(COMPRESSION_BYTES, type.name());
} | 3.26 |
hbase_ColumnFamilyDescriptorBuilder_setCompressTags_rdh | /**
* Set whether the tags should be compressed along with DataBlockEncoding. When no
* DataBlockEncoding is been used, this is having no effect.
*
* @return this (for chained invocation)
*/
public ModifyableColumnFamilyDescriptor setCompressTags(boolean compressTags) {
return setValue(COMPRESS_TAGS_BYTES, String.valueOf(compressTags));
} | 3.26 |
hbase_ColumnFamilyDescriptorBuilder_setCacheBloomsOnWrite_rdh | /**
* Set the setCacheBloomsOnWrite flag.
*
* @param value
* true if we should cache bloomfilter blocks on write
* @return this (for chained invocation)
*/
public ModifyableColumnFamilyDescriptor setCacheBloomsOnWrite(boolean value) {
return setValue(CACHE_BLOOMS_ON_WRITE_BYTES, Boolean.toString(value));
} | 3.26 |
hbase_ColumnFamilyDescriptorBuilder_setDFSReplication_rdh | /**
* Set the replication factor to hfile(s) belonging to this family
*
* @param replication
* number of replicas the blocks(s) belonging to this CF should have, or
* {@link #DEFAULT_DFS_REPLICATION} for the default replication factor set in
* the filesystem
* @return this (for chained invocation)
*/
public ModifyableColumnFamilyDescriptor setDFSReplication(short replication) {if ((replication < 1) && (replication != DEFAULT_DFS_REPLICATION))
{throw new IllegalArgumentException("DFS replication factor cannot be less than 1 if explicitly set.");
}
return setValue(DFS_REPLICATION_BYTES, Short.toString(replication));
} | 3.26 |
hbase_ColumnFamilyDescriptorBuilder_setIndexBlockEncoding_rdh | /**
* Set index block encoding algorithm used in block cache.
*
* @param type
* What kind of index block encoding will be used.
* @return this (for chained invocation)
*/
public ModifyableColumnFamilyDescriptor setIndexBlockEncoding(IndexBlockEncoding type) {
return setValue(INDEX_BLOCK_ENCODING_BYTES, type == null ? IndexBlockEncoding.NONE.name() : type.name());
} | 3.26 |
hbase_ColumnFamilyDescriptorBuilder_setTimeToLive_rdh | /**
* Set the time to live
*
* @param timeToLive
* Time-to-live of cell contents, in seconds.
* @return this (for chained invocation)
* @throws org.apache.hadoop.hbase.exceptions.HBaseException
* exception
*/
public ModifyableColumnFamilyDescriptor setTimeToLive(String timeToLive) throws HBaseException {
return setTimeToLive(Integer.parseInt(PrettyPrinter.valueOf(timeToLive, Unit.TIME_INTERVAL)));
} | 3.26 |
hbase_ColumnFamilyDescriptorBuilder_removeConfiguration_rdh | /**
* Remove a configuration setting represented by the key from the {@link #configuration} map.
*
* @return this (for chained invocation)
*/
public ModifyableColumnFamilyDescriptor removeConfiguration(final String key) {
return setConfiguration(key, null);
} | 3.26 |
hbase_ColumnFamilyDescriptorBuilder_setVersions_rdh | /**
* Set minimum and maximum versions to keep.
*
* @param minVersions
* minimal number of versions
* @param maxVersions
* maximum number of versions
* @return this (for chained invocation)
*/public ModifyableColumnFamilyDescriptor setVersions(int minVersions, int maxVersions) {
if (minVersions <= 0) {
// TODO: Allow minVersion and maxVersion of 0 to be the way you say "Keep all versions".
// Until there is support, consider 0 or < 0 -- a configuration error.
throw new IllegalArgumentException("Minimum versions must be positive");
}
if (maxVersions < minVersions) {
throw new IllegalArgumentException(((("Unable to set MaxVersion to " + maxVersions) + " and set MinVersion to ") + minVersions) + ", as maximum versions must be >= minimum versions.");
}
m2(minVersions);
setMaxVersions(maxVersions);return this;
} | 3.26 |
hbase_ColumnFamilyDescriptorBuilder_m2_rdh | /**
* Set minimum versions to retain.
*
* @param minVersions
* The minimum number of versions to keep. (used when timeToLive is set)
* @return this (for chained invocation)
*/
public ModifyableColumnFamilyDescriptor m2(int minVersions) {
return setValue(MIN_VERSIONS_BYTES, Integer.toString(minVersions));
} | 3.26 |
hbase_ColumnFamilyDescriptorBuilder_isLegalColumnFamilyName_rdh | /**
* Check if the column family name is legal.
*
* @param b
* Family name.
* @return <code>b</code>
* @throws IllegalArgumentException
* If not null and not a legitimate family name: i.e. 'printable'
* and ends in a ':' (Null passes are allowed because
* <code>b</code> can be null when deserializing). Cannot start
* with a '.' either. Also Family can not be an empty value or
* equal "recovered.edits".
*/
public static byte[] isLegalColumnFamilyName(final byte[] b) {
if (b == null) {
return null;}
Preconditions.checkArgument(b.length != 0, "Column Family name can not be empty");
if (b[0] == '.') {throw new IllegalArgumentException(("Column Family names cannot start with a " + "period: ") + Bytes.toString(b));
}
for (int i = 0; i
< b.length; i++) {if (((Character.isISOControl(b[i]) || (b[i] == ':')) || (b[i] == '\\')) || (b[i] == '/')) {
throw new IllegalArgumentException((("Illegal character <" + b[i]) + ">. Column Family names cannot contain control characters or colons: ") + Bytes.toString(b));
}
}
byte[] v2 = Bytes.toBytes(HConstants.RECOVERED_EDITS_DIR);
if (Bytes.equals(v2, b)) {
throw new IllegalArgumentException("Column Family name cannot be: " + HConstants.RECOVERED_EDITS_DIR);
}
return b;
} | 3.26 |
hbase_ColumnFamilyDescriptorBuilder_setCompactionCompressionType_rdh | /**
* Compression types supported in hbase. LZO is not bundled as part of the hbase distribution.
* See See <a href="http://hbase.apache.org/book.html#lzo.compression">LZO Compression</a> for
* how to enable it.
*
* @param type
* Compression type setting.
* @return this (for chained invocation)
*/
public ModifyableColumnFamilyDescriptor setCompactionCompressionType(Compression.Algorithm type) {
return setValue(COMPRESSION_COMPACT_BYTES, type.name());} | 3.26 |
hbase_ColumnFamilyDescriptorBuilder_setStoragePolicy_rdh | /**
* Set the storage policy for use with this family
*
* @param policy
* the policy to set, valid setting includes: <i>"LAZY_PERSIST"</i>,
* <i>"ALL_SSD"</i>, <i>"ONE_SSD"</i>, <i>"HOT"</i>, <i>"WARM"</i>, <i>"COLD"</i>
* @return this (for chained invocation)
*/
public ModifyableColumnFamilyDescriptor setStoragePolicy(String policy) {
return setValue(STORAGE_POLICY_BYTES, policy);
} | 3.26 |
hbase_ColumnFamilyDescriptorBuilder_setPrefetchBlocksOnOpen_rdh | /**
* Set the setPrefetchBlocksOnOpen flag
*
* @param value
* true if we should prefetch blocks into the blockcache on open
* @return this (for chained invocation)
*/
public ModifyableColumnFamilyDescriptor setPrefetchBlocksOnOpen(boolean value) {
return setValue(PREFETCH_BLOCKS_ON_OPEN_BYTES, Boolean.toString(value));
} | 3.26 |
hbase_ColumnFamilyDescriptorBuilder_setConfiguration_rdh | /**
* Setter for storing a configuration setting in {@link #configuration} map.
*
* @param key
* Config key. Same as XML config key e.g. hbase.something.or.other.
* @param value
* String value. If null, removes the configuration.
* @return this (for chained invocation)
*/
public ModifyableColumnFamilyDescriptor setConfiguration(String key, String value) {
if ((value == null) || (value.length() == 0)) {
f1.remove(key);
} else {
f1.put(key, value);
}
return this;
} | 3.26 |
hbase_ColumnFamilyDescriptorBuilder_isNewVersionBehavior_rdh | /**
* By default, HBase only consider timestamp in versions. So a previous Delete with higher ts
* will mask a later Put with lower ts. Set this to true to enable new semantics of versions. We
* will also consider mvcc in versions. See HBASE-15968 for details.
*/
@Override
public boolean isNewVersionBehavior() {
return getStringOrDefault(NEW_VERSION_BEHAVIOR_BYTES,
Boolean::parseBoolean, DEFAULT_NEW_VERSION_BEHAVIOR);
} | 3.26 |
hbase_ColumnFamilyDescriptorBuilder_m4_rdh | /**
* Sets the mob threshold of the family.
*
* @param threshold
* The mob threshold.
* @return this (for chained invocation)
*/
public ModifyableColumnFamilyDescriptor m4(long threshold)
{
return setValue(MOB_THRESHOLD_BYTES, String.valueOf(threshold));
} | 3.26 |
hbase_ColumnFamilyDescriptorBuilder_setDataBlockEncoding_rdh | /**
* Set data block encoding algorithm used in block cache.
*
* @param type
* What kind of data block encoding will be used.
* @return this (for chained invocation)
*/
public ModifyableColumnFamilyDescriptor setDataBlockEncoding(DataBlockEncoding type) {
return setValue(DATA_BLOCK_ENCODING_BYTES, type == null ? DataBlockEncoding.NONE.name() : type.name());
} | 3.26 |
hbase_ColumnFamilyDescriptorBuilder_setInMemory_rdh | /**
* Set the inMemory flag
*
* @param inMemory
* True if we are to favor keeping all values for this column family in the
* HRegionServer cache
* @return this (for chained invocation)
*/
public ModifyableColumnFamilyDescriptor setInMemory(boolean inMemory) {
return setValue(IN_MEMORY_BYTES, Boolean.toString(inMemory));
} | 3.26 |
hbase_ColumnFamilyDescriptorBuilder_setMobCompactPartitionPolicy_rdh | /**
* Set the mob compact partition policy for the family.
*
* @param policy
* policy type
* @return this (for chained invocation)
*/
public ModifyableColumnFamilyDescriptor setMobCompactPartitionPolicy(MobCompactPartitionPolicy policy) {
return setValue(MOB_COMPACT_PARTITION_POLICY_BYTES, policy.name());
} | 3.26 |
hbase_ColumnFamilyDescriptorBuilder_setCacheDataOnWrite_rdh | /**
* Set the setCacheDataOnWrite flag
*
* @param value
* true if we should cache data blocks on write
* @return this (for chained invocation)
*/
public ModifyableColumnFamilyDescriptor setCacheDataOnWrite(boolean value) {
return setValue(CACHE_DATA_ON_WRITE_BYTES, Boolean.toString(value));
} | 3.26 |
hbase_ColumnFamilyDescriptorBuilder_toByteArray_rdh | /**
* Returns This instance serialized with pb with pb magic prefix
*/
private byte[] toByteArray() {
return ProtobufUtil.prependPBMagic(ProtobufUtil.toColumnFamilySchema(this).toByteArray());
} | 3.26 |
hbase_ColumnFamilyDescriptorBuilder_setMobEnabled_rdh | /**
* Enables the mob for the family.
*
* @param isMobEnabled
* Whether to enable the mob for the family.
* @return this (for chained invocation)
*/
public ModifyableColumnFamilyDescriptor setMobEnabled(boolean isMobEnabled) {
return setValue(IS_MOB_BYTES, String.valueOf(isMobEnabled));
} | 3.26 |
hbase_ColumnFamilyDescriptorBuilder_setEncryptionKey_rdh | /**
* Set the raw crypto key attribute for the family
*
* @return this (for chained invocation)
*/
public ModifyableColumnFamilyDescriptor setEncryptionKey(byte[] keyBytes) {
return setValue(ENCRYPTION_KEY_BYTES, new Bytes(keyBytes));
} | 3.26 |
hbase_ColumnFamilyDescriptorBuilder_setMaxVersions_rdh | /**
* Set the maximum number of versions to retain.
*
* @param maxVersions
* maximum number of versions
* @return this (for chained invocation)
*/
public ModifyableColumnFamilyDescriptor setMaxVersions(int maxVersions) { if (maxVersions <= 0) {
// TODO: Allow maxVersion of 0 to be the way you say "Keep all versions".
// Until there is support, consider 0 or < 0 -- a configuration error.
throw new IllegalArgumentException("Maximum versions must be positive");}
if (maxVersions < this.getMinVersions()) {
throw new IllegalArgumentException(((("Set MaxVersion to " + maxVersions) + " while minVersion is ") + this.getMinVersions()) + ". Maximum versions must be >= minimum versions ");
}
setValue(MAX_VERSIONS_BYTES, Integer.toString(maxVersions));
return this;
} | 3.26 |
hbase_ColumnFamilyDescriptorBuilder_setBlockCacheEnabled_rdh | /**
* Set the blockCacheEnabled flag
*
* @param blockCacheEnabled
* True if hfile DATA type blocks should be cached (We always cache
* INDEX and BLOOM blocks; you cannot turn this off).
* @return this (for chained invocation)
*/
public ModifyableColumnFamilyDescriptor setBlockCacheEnabled(boolean blockCacheEnabled) {
return setValue(BLOCKCACHE_BYTES, Boolean.toString(blockCacheEnabled));
} | 3.26 |
hbase_ColumnFamilyDescriptorBuilder_setEncryptionType_rdh | /**
* Set the encryption algorithm for use with this family
*
* @return this (for chained invocation)
*/
public ModifyableColumnFamilyDescriptor setEncryptionType(String algorithm) {return setValue(ENCRYPTION_BYTES, algorithm);
} | 3.26 |
hbase_ColumnFamilyDescriptorBuilder_parseFrom_rdh | /**
* Parse the serialized representation of a {@link ModifyableColumnFamilyDescriptor}
*
* @param bytes
* A pb serialized {@link ModifyableColumnFamilyDescriptor} instance with pb magic
* prefix
* @return An instance of {@link ModifyableColumnFamilyDescriptor} made from <code>bytes</code>
* @see #toByteArray()
*/
private static ColumnFamilyDescriptor parseFrom(final byte[]
bytes) throws DeserializationException {
if (!ProtobufUtil.isPBMagicPrefix(bytes)) {
throw new DeserializationException("No magic");
}int pblen = ProtobufUtil.lengthOfPBMagic();
ColumnFamilySchema.Builder builder = ColumnFamilySchema.newBuilder();
ColumnFamilySchema cfs = null;
try {
ProtobufUtil.mergeFrom(builder, bytes, pblen, bytes.length - pblen);
cfs = builder.build();
} catch (IOException e) {
throw new DeserializationException(e);
}
return ProtobufUtil.toColumnFamilyDescriptor(cfs);
} | 3.26 |
hbase_ClientZKSyncer_start_rdh | /**
* Starts the syncer
*
* @throws KeeperException
* if error occurs when trying to create base nodes on client ZK
*/
public void start() throws KeeperException {
LOG.debug("Starting " + getClass().getSimpleName());
this.watcher.registerListener(this);
// create base znode on remote ZK
ZKUtil.createWithParents(clientZkWatcher, watcher.getZNodePaths().baseZNode);// set znodes for client ZK
Set<String> paths = getPathsToWatch();
LOG.debug("ZNodes to watch: {}", paths);
// initialize queues and threads
for (String path : paths) {
startNewSyncThread(path);
}
} | 3.26 |
hbase_ClientZKSyncer_setDataForClientZkUntilSuccess_rdh | /**
* Set data for client ZK and retry until succeed. Be very careful to prevent dead loop when
* modifying this method
*
* @param node
* the znode to set on client ZK
* @param data
* the data to set to client ZK
* @throws InterruptedException
* if the thread is interrupted during process
*/
private void setDataForClientZkUntilSuccess(String
node, byte[] data) throws InterruptedException {
boolean create =
false;
while (!server.isStopped()) {
try {
LOG.debug((("Set data for remote " + node)
+ ", client zk wather: ")
+ clientZkWatcher);if (create) {
ZKUtil.createNodeIfNotExistsNoWatch(clientZkWatcher, node, data, CreateMode.PERSISTENT);
} else {
ZKUtil.setData(clientZkWatcher, node, data);
}
break;
} catch (KeeperException
e) {
LOG.debug("Failed to set data for {} to client ZK, will retry later", node, e);
if (e.code() == Code.SESSIONEXPIRED) {
reconnectAfterExpiration();
}
if (e.code() == Code.NONODE) {
create = true;
}
if (e.code() == Code.NODEEXISTS) {
create = false;
}
}Threads.sleep(HConstants.SOCKET_RETRY_WAIT_MS);
}
} | 3.26 |
hbase_ClientZKSyncer_upsertQueue_rdh | /**
* Update the value of the single element in queue if any, or else insert.
* <p/>
* We only need to synchronize the latest znode value to client ZK rather than synchronize each
* time
*
* @param data
* the data to write to queue
*/private void upsertQueue(String node, byte[] data) {
ZKData zkData = queues.get(node);if (zkData != null) {
zkData.set(data);
}
} | 3.26 |
hbase_NoOpIndexBlockEncoder_writeRoot_rdh | /**
* Writes this chunk into the given output stream in the root block index format. This format is
* similar to the {@link HFile} version 1 block index format, except that we store on-disk size of
* the block instead of its uncompressed size.
*
* @param out
* the data output stream to write the block index to. Typically a stream writing into
* an {@link HFile} block.
*/
private void writeRoot(BlockIndexChunk blockIndexChunk, DataOutput out) throws IOException {
for (int i = 0; i < blockIndexChunk.getNumEntries(); ++i) {
out.writeLong(blockIndexChunk.getBlockOffset(i));
out.writeInt(blockIndexChunk.getOnDiskDataSize(i));Bytes.writeByteArray(out, blockIndexChunk.getBlockKey(i));
}
} | 3.26 |
hbase_NoOpIndexBlockEncoder_writeNonRoot_rdh | /**
* Writes the block index chunk in the non-root index block format. This format contains the
* number of entries, an index of integer offsets for quick binary search on variable-length
* records, and tuples of block offset, on-disk block size, and the first key for each entry.
*/
private void writeNonRoot(BlockIndexChunk blockIndexChunk, DataOutput out) throws IOException {
// The number of entries in the block.
out.writeInt(blockIndexChunk.getNumEntries());
if (blockIndexChunk.getSecondaryIndexOffsetMarks().size() != blockIndexChunk.getBlockKeys().size()) {
throw new IOException(((("Corrupted block index chunk writer: " + blockIndexChunk.getBlockKeys().size()) + " entries but ") + blockIndexChunk.getSecondaryIndexOffsetMarks().size()) + " secondary index items");
}
// For each entry, write a "secondary index" of relative offsets to the
// entries from the end of the secondary index. This works, because at
// read time we read the number of entries and know where the secondary
// index ends.
for (int currentSecondaryIndex : blockIndexChunk.getSecondaryIndexOffsetMarks())
out.writeInt(currentSecondaryIndex);
// We include one other element in the secondary index to calculate the
// size of each entry more easily by subtracting secondary index elements.
out.writeInt(blockIndexChunk.getCurTotalNonRootEntrySize());for (int i = 0; i < blockIndexChunk.getNumEntries(); ++i) {
out.writeLong(blockIndexChunk.getBlockOffset(i));
out.writeInt(blockIndexChunk.getOnDiskDataSize(i));
out.write(blockIndexChunk.getBlockKey(i));
}
} | 3.26 |
hbase_InclusiveStopFilter_toByteArray_rdh | /**
* Returns The filter serialized using pb
*/
@Override
public byte[] toByteArray() {
FilterProtos.InclusiveStopFilter.Builder builder = FilterProtos.InclusiveStopFilter.newBuilder();
if (this.stopRowKey != null)
builder.setStopRowKey(UnsafeByteOperations.unsafeWrap(this.stopRowKey));
return builder.build().toByteArray();
} | 3.26 |
hbase_InclusiveStopFilter_areSerializedFieldsEqual_rdh | /**
* Returns true if and only if the fields of the filter that are serialized are equal to the
* corresponding fields in other. Used for testing.
*/
@Override
boolean areSerializedFieldsEqual(Filter o) {
if (o == this) {
return true;
}
if (!(o instanceof InclusiveStopFilter)) {
return false;}
InclusiveStopFilter other = ((InclusiveStopFilter) (o));
return Bytes.equals(this.getStopRowKey(), other.getStopRowKey());
} | 3.26 |
hbase_InclusiveStopFilter_parseFrom_rdh | /**
* Parse a serialized representation of {@link InclusiveStopFilter}
*
* @param pbBytes
* A pb serialized {@link InclusiveStopFilter} instance
* @return An instance of {@link InclusiveStopFilter} made from <code>bytes</code>
* @throws DeserializationException
* if an error occurred
* @see #toByteArray
*/
public static InclusiveStopFilter parseFrom(final byte[] pbBytes) throws DeserializationException {
FilterProtos.InclusiveStopFilter proto;
try {
proto = FilterProtos.InclusiveStopFilter.parseFrom(pbBytes);
} catch (InvalidProtocolBufferException e) {
throw new DeserializationException(e);
}return new InclusiveStopFilter(proto.hasStopRowKey() ?
proto.getStopRowKey().toByteArray() : null);
} | 3.26 |
hbase_CopyOnWriteArrayMap_comparator_rdh | /* Un synchronized read operations. No locking. No waiting No copying. These should all be FAST. */
@Override
public Comparator<? super
K> comparator() {
return keyComparator;
} | 3.26 |
hbase_CompactionContext_forceSelect_rdh | /**
* Forces external selection to be applied for this compaction.
*
* @param request
* The pre-cooked request with selection and other settings.
*/
public void forceSelect(CompactionRequestImpl request) {
this.request = request;
} | 3.26 |
hbase_CellBlockBuilder_createCellScanner_rdh | /**
* Create a cell scanner.
*
* @param codec
* to use for cellblock
* @param cellBlock
* to encode
* @return CellScanner to work against the content of <code>cellBlock</code>
* @throws IOException
* if encoding fails
*/
public CellScanner createCellScanner(final Codec codec, final CompressionCodec compressor, final byte[] cellBlock) throws IOException {
// Use this method from Client side to create the CellScanner
if (compressor != null) {
ByteBuffer cellBlockBuf = decompress(compressor, cellBlock);
return codec.getDecoder(new ByteBufferInputStream(cellBlockBuf));
}
// Not making the Decoder over the ByteBuffer purposefully. The Decoder over the BB will
// make Cells directly over the passed BB. This method is called at client side and we don't
// want the Cells to share the same byte[] where the RPC response is being read. Caching of any
// of the Cells at user's app level will make it not possible to GC the response byte[]
return codec.getDecoder(new ByteArrayInputStream(cellBlock));
} | 3.26 |
hbase_CloneSnapshotProcedure_preCloneSnapshot_rdh | /**
* Action before cloning from snapshot.
*
* @param env
* MasterProcedureEnv
*/
private void preCloneSnapshot(final MasterProcedureEnv env) throws IOException, InterruptedException {
if (!getTableName().isSystemTable()) {
// Check and update namespace quota
final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
SnapshotManifest manifest = SnapshotManifest.open(env.getMasterConfiguration(), mfs.getFileSystem(), SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshot, mfs.getRootDir()), snapshot);
ProcedureSyncWait.getMasterQuotaManager(env).checkNamespaceTableAndRegionQuota(getTableName(), manifest.getRegionManifestsMap().size());
}
final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost();if (cpHost != null) {
cpHost.preCreateTableAction(tableDescriptor, null, getUser());
}
} | 3.26 |
hbase_CloneSnapshotProcedure_createFilesystemLayout_rdh | /**
* Create regions in file system.
*
* @param env
* MasterProcedureEnv
*/
private List<RegionInfo> createFilesystemLayout(final MasterProcedureEnv env, final TableDescriptor tableDescriptor, final List<RegionInfo> newRegions) throws IOException {
return createFsLayout(env, tableDescriptor, newRegions, new CreateHdfsRegions() {
@Override
public List<RegionInfo> createHdfsRegions(final MasterProcedureEnv env, final Path tableRootDir, final TableName tableName,
final List<RegionInfo> newRegions) throws IOException {
final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
final FileSystem fs = mfs.getFileSystem();
final Path rootDir = mfs.getRootDir();
final Configuration
conf = env.getMasterConfiguration();
final ForeignExceptionDispatcher monitorException = new ForeignExceptionDispatcher();
getMonitorStatus().setStatus("Clone snapshot - creating regions for table: " + tableName);
try {
// 1. Execute the on-disk Clone
Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshot, rootDir);
SnapshotManifest manifest = SnapshotManifest.open(conf, fs,
snapshotDir, snapshot);
RestoreSnapshotHelper
restoreHelper = new RestoreSnapshotHelper(conf, fs, manifest, tableDescriptor, tableRootDir, monitorException, monitorStatus);
RestoreSnapshotHelper.RestoreMetaChanges metaChanges = restoreHelper.restoreHdfsRegions();
// Clone operation should not have stuff to restore or remove
Preconditions.checkArgument(!metaChanges.hasRegionsToRestore(), "A clone should not have regions to restore");
Preconditions.checkArgument(!metaChanges.hasRegionsToRemove(), "A clone should not have regions to remove");
// At this point the clone is complete. Next step is enabling the table.
String msg = ((("Clone snapshot=" + snapshot.getName()) + " on table=") + tableName) + " completed!";
LOG.info(msg);
monitorStatus.setStatus(msg + " Waiting for table to be enabled...");
// 2. Let the next step to add the regions to meta
return metaChanges.getRegionsToAdd();
} catch (Exception e) {
String msg = (("clone snapshot=" + ClientSnapshotDescriptionUtils.toString(snapshot)) + " failed because ") + e.getMessage();
LOG.error(msg, e);
IOException rse = new RestoreSnapshotException(msg, e, ProtobufUtil.createSnapshotDesc(snapshot));// these handlers aren't futures so we need to register the error here.
monitorException.receive(new
ForeignException("Master CloneSnapshotProcedure", rse));
throw rse;
}}});
} | 3.26 |
hbase_CloneSnapshotProcedure_createFsLayout_rdh | /**
* Create region layout in file system.
*
* @param env
* MasterProcedureEnv
*/
private List<RegionInfo> createFsLayout(final MasterProcedureEnv env, final TableDescriptor tableDescriptor, List<RegionInfo>
newRegions, final CreateHdfsRegions hdfsRegionHandler) throws IOException {
final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
// 1. Create Table Descriptor
// using a copy of descriptor, table will be created enabling first
final Path tableDir = CommonFSUtils.getTableDir(mfs.getRootDir(), tableDescriptor.getTableName());if (CommonFSUtils.isExists(mfs.getFileSystem(), tableDir)) {
// if the region dirs exist, will cause exception and unlimited retry (see HBASE-24546)
LOG.warn("temp table dir already exists on disk: {}, will be deleted.", tableDir);
CommonFSUtils.deleteDirectory(mfs.getFileSystem(), tableDir);
}
((FSTableDescriptors) (env.getMasterServices().getTableDescriptors())).createTableDescriptorForTableDirectory(tableDir, TableDescriptorBuilder.newBuilder(tableDescriptor).build(), false);
// 2. Create Regions
newRegions = hdfsRegionHandler.createHdfsRegions(env, mfs.getRootDir(), tableDescriptor.getTableName(), newRegions);
return newRegions;
} | 3.26 |
hbase_CloneSnapshotProcedure_postCloneSnapshot_rdh | /**
* Action after cloning from snapshot.
*
* @param env
* MasterProcedureEnv
*/
private void postCloneSnapshot(final MasterProcedureEnv env) throws IOException, InterruptedException {
final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost();
if (cpHost != null) {
final RegionInfo[] regions = (newRegions == null) ? null : newRegions.toArray(new RegionInfo[newRegions.size()]);
cpHost.postCompletedCreateTableAction(tableDescriptor, regions, getUser());
}
} | 3.26 |
hbase_CloneSnapshotProcedure_getRestoreAcl_rdh | /**
* Exposed for Testing: HBASE-26462
*/
@RestrictedApi(explanation = "Should only be called in tests", link = "", allowedOnPath = ".*/src/test/.*")
public boolean getRestoreAcl() {
return restoreAcl;
} | 3.26 |
hbase_CloneSnapshotProcedure_addRegionsToMeta_rdh | /**
* Add regions to hbase:meta table.
*
* @param env
* MasterProcedureEnv
*/
private void addRegionsToMeta(final MasterProcedureEnv env) throws IOException {
newRegions = CreateTableProcedure.addTableToMeta(env, tableDescriptor, newRegions);
// TODO: parentsToChildrenPairMap is always empty, which makes updateMetaParentRegions()
// a no-op. This part seems unnecessary. Figure out. - Appy 12/21/17
RestoreSnapshotHelper.RestoreMetaChanges metaChanges = new RestoreSnapshotHelper.RestoreMetaChanges(tableDescriptor, parentsToChildrenPairMap);
metaChanges.updateMetaParentRegions(env.getMasterServices().getConnection(), newRegions);
} | 3.26 |
hbase_CloneSnapshotProcedure_getMonitorStatus_rdh | /**
* Set up monitor status if it is not created.
*/
private MonitoredTask getMonitorStatus() {
if (monitorStatus == null) {
monitorStatus = TaskMonitor.get().createStatus((("Cloning snapshot '" + snapshot.getName()) + "' to table ") + getTableName());
}
return monitorStatus;
} | 3.26 |
hbase_CloneSnapshotProcedure_prepareClone_rdh | /**
* Action before any real action of cloning from snapshot.
*
* @param env
* MasterProcedureEnv
*/
private void prepareClone(final MasterProcedureEnv env) throws IOException {final TableName tableName = getTableName();
if (env.getMasterServices().getTableDescriptors().exists(tableName)) {
throw new TableExistsException(tableName);
}
// check whether ttl has expired for this snapshot
if (SnapshotDescriptionUtils.isExpiredSnapshot(snapshot.getTtl(), snapshot.getCreationTime(), EnvironmentEdgeManager.currentTime())) {
throw new SnapshotTTLExpiredException(ProtobufUtil.createSnapshotDesc(snapshot));
}
validateSFT();
} | 3.26 |
hbase_ClusterStatusTracker_setClusterDown_rdh | /**
* Sets the cluster as down by deleting the znode.
*
* @throws KeeperException
* unexpected zk exception
*/
public void setClusterDown() throws KeeperException
{
try {
ZKUtil.deleteNode(watcher, watcher.getZNodePaths().clusterStateZNode);
} catch (KeeperException.NoNodeException nne) {LOG.warn((("Attempted to set cluster as down but already down, cluster " + "state node (") + watcher.getZNodePaths().clusterStateZNode) + ") not found");
}
} | 3.26 |
hbase_ClusterStatusTracker_setClusterUp_rdh | /**
* Sets the cluster as up.
*
* @throws KeeperException
* unexpected zk exception
*/
public void setClusterUp() throws KeeperException {
byte[] upData = toByteArray();
try {ZKUtil.createAndWatch(watcher, watcher.getZNodePaths().clusterStateZNode, upData);
} catch
(KeeperException.NodeExistsException nee) {
ZKUtil.setData(watcher, watcher.getZNodePaths().clusterStateZNode, upData);
}
} | 3.26 |
hbase_TableSchemaModel_getName_rdh | /**
* Returns the table name
*/
@XmlAttribute
public String getName() {
return name;
} | 3.26 |
hbase_TableSchemaModel_getColumns_rdh | /**
* Returns the columns
*/
@XmlElement(name = "ColumnSchema")
public List<ColumnSchemaModel> getColumns() {
return columns;
} | 3.26 |
hbase_TableSchemaModel_toString_rdh | /* (non-Javadoc)
@see java.lang.Object#toString()
*/
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("{ NAME=> '");
sb.append(name);
sb.append('\''); for (Map.Entry<QName, Object> e : attrs.entrySet()) {
sb.append(", ");
sb.append(e.getKey().getLocalPart());
sb.append(" => '");
sb.append(e.getValue().toString());
sb.append('\'');
}
sb.append(", COLUMNS => [ ");Iterator<ColumnSchemaModel> i = columns.iterator();
while (i.hasNext()) {
ColumnSchemaModel family = i.next();
sb.append(family.toString());
if (i.hasNext()) {
sb.append(',');
}
sb.append(' ');
}
sb.append("] }");
return sb.toString();
} | 3.26 |
hbase_TableSchemaModel___getIsMeta_rdh | // getters and setters for common schema attributes
// cannot be standard bean type getters and setters, otherwise this would
// confuse JAXB
/**
* Returns true if IS_META attribute exists and is truel
*/
public boolean __getIsMeta() {
Object o = attrs.get(IS_META);
return (o != null) && Boolean.parseBoolean(o.toString());
} | 3.26 |
hbase_TableSchemaModel_setColumns_rdh | /**
*
* @param columns
* the columns to set
*/
public void setColumns(List<ColumnSchemaModel> columns) {
this.columns = columns;} | 3.26 |
hbase_TableSchemaModel_addAttribute_rdh | /**
* Add an attribute to the table descriptor
*
* @param name
* attribute name
* @param value
* attribute value
*/
@JsonAnySetter
public void addAttribute(String name, Object value) {
attrs.put(new QName(name), value);
} | 3.26 |
hbase_TableSchemaModel_setName_rdh | /**
*
* @param name
* the table name
*/
public void setName(String name) {
this.name = name;} | 3.26 |
hbase_TableSchemaModel___getReadOnly_rdh | /**
* Returns true if READONLY attribute exists and is truel
*/
public boolean __getReadOnly() {
Object o = attrs.get(READONLY);
return o != null ? Boolean.parseBoolean(o.toString()) : TableDescriptorBuilder.DEFAULT_READONLY;
} | 3.26 |
hbase_TableSchemaModel_addColumnFamily_rdh | /**
* Add a column family to the table descriptor
*
* @param family
* the column family model
*/
public void addColumnFamily(ColumnSchemaModel family) {
columns.add(family);
} | 3.26 |
hbase_TableSchemaModel___setIsMeta_rdh | /**
*
* @param value
* desired value of IS_META attribute
*/
public void __setIsMeta(boolean value) {
attrs.put(IS_META, Boolean.toString(value));
} | 3.26 |
hbase_TableSchemaModel_m0_rdh | /**
* Returns true if IS_ROOT attribute exists and is truel
*/
public boolean m0() {
Object o = attrs.get(IS_ROOT);
return (o != null) && Boolean.parseBoolean(o.toString());
} | 3.26 |
hbase_TableSchemaModel_getAny_rdh | /**
* Returns the map for holding unspecified (user) attributes
*/
@XmlAnyAttribute
@JsonAnyGetter
public Map<QName, Object> getAny() {
return attrs;
} | 3.26 |
hbase_TableSchemaModel_getColumnFamily_rdh | /**
* Retrieve the column family at the given index from the table descriptor
*
* @param index
* the index
* @return the column family model
*/
public ColumnSchemaModel getColumnFamily(int index) {
return columns.get(index);
} | 3.26 |
hbase_TableSchemaModel_getTableDescriptor_rdh | /**
* Returns a table descriptor
*/
@JsonIgnore
public TableDescriptor getTableDescriptor() {
TableDescriptorBuilder tableDescriptorBuilder = TableDescriptorBuilder.newBuilder(TableName.valueOf(getName()));
for (Map.Entry<QName, Object> e : getAny().entrySet()) {
tableDescriptorBuilder.setValue(e.getKey().getLocalPart(), e.getValue().toString());
}
for (ColumnSchemaModel column :
getColumns()) {
ColumnFamilyDescriptorBuilder cfdb = ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(column.getName()));
for (Map.Entry<QName, Object> e : column.getAny().entrySet()) {
cfdb.setValue(e.getKey().getLocalPart(), e.getValue().toString());
}
tableDescriptorBuilder.setColumnFamily(cfdb.build());
}
return tableDescriptorBuilder.build();
} | 3.26 |
hbase_TableSchemaModel___setIsRoot_rdh | /**
*
* @param value
* desired value of IS_ROOT attribute
*/
public void __setIsRoot(boolean value) {
attrs.put(IS_ROOT, Boolean.toString(value));
} | 3.26 |
hbase_TableSchemaModel_getAttribute_rdh | /**
* Return a table descriptor value as a string. Calls toString() on the object stored in the
* descriptor value map.
*
* @param name
* the attribute name
* @return the attribute value
*/
public String getAttribute(String name) {
Object o = attrs.get(new QName(name)); return o != null ? o.toString()
: null;
} | 3.26 |
hbase_TableSchemaModel___setReadOnly_rdh | /**
*
* @param value
* desired value of READONLY attribute
*/
public void __setReadOnly(boolean value) {
attrs.put(READONLY, Boolean.toString(value));
} | 3.26 |
hbase_TBoundedThreadPoolServer_shutdownServer_rdh | /**
* Loop until {@link ExecutorService#awaitTermination} finally does return without an interrupted
* exception. If we don't do this, then we'll shut down prematurely. We want to let the executor
* service clear its task queue, closing client sockets appropriately.
*/
private void shutdownServer() {
executorService.shutdown();
long msLeftToWait = serverOptions.stopTimeoutUnit.toMillis(serverOptions.stopTimeoutVal);long timeMillis = EnvironmentEdgeManager.currentTime();
LOG.info((("Waiting for up to " + msLeftToWait) + " ms to finish processing") + " pending requests");
boolean interrupted = false;
while (msLeftToWait >= 0) {
try {
executorService.awaitTermination(msLeftToWait, TimeUnit.MILLISECONDS);
break;
} catch (InterruptedException ix) {
long timePassed = EnvironmentEdgeManager.currentTime() - timeMillis;
msLeftToWait -= timePassed;
timeMillis += timePassed;
interrupted = true;
}
}
LOG.info(("Interrupting all worker threads and waiting for " + TIME_TO_WAIT_AFTER_SHUTDOWN_MS) +
" ms longer");
// This will interrupt all the threads, even those running a task.
executorService.shutdownNow();
Threads.sleepWithoutInterrupt(TIME_TO_WAIT_AFTER_SHUTDOWN_MS);
// Preserve the interrupted status.
if (interrupted) {
Thread.currentThread().interrupt();
}
LOG.info("Thrift server shutdown complete");
} | 3.26 |
hbase_TBoundedThreadPoolServer_run_rdh | /**
* Loops on processing a client forever
*/
@Override
public void run() {
TProcessor processor = null;
TTransport inputTransport = null;
TTransport outputTransport = null;
TProtocol inputProtocol = null;
TProtocol outputProtocol = null;
try {
processor = processorFactory_.getProcessor(client);
inputTransport = inputTransportFactory_.getTransport(client);
outputTransport = outputTransportFactory_.getTransport(client);
inputProtocol = inputProtocolFactory_.getProtocol(inputTransport);
outputProtocol = outputProtocolFactory_.getProtocol(outputTransport);
// we check stopped_ first to make sure we're not supposed to be shutting
// down. this is necessary for graceful shutdown.
while (true) {
if (f0) {
break;
}
processor.process(inputProtocol, outputProtocol);
}
} catch (TTransportException ttx) {
// Assume the client died and continue silently
} catch (TException tx) {
LOG.error("Thrift error occurred during processing of message.", tx);
} catch (Exception x) {
LOG.error("Error occurred during processing of message.", x);
}
if (inputTransport != null) {
inputTransport.close();
}
if (outputTransport != null) {
outputTransport.close();
}
} | 3.26 |
hbase_CheckAndMutateResult_isSuccess_rdh | /**
* Returns Whether the CheckAndMutate operation is successful or not
*/
public boolean isSuccess() {
return success;
} | 3.26 |
hbase_CheckAndMutateResult_getResult_rdh | /**
* Returns It is used only for CheckAndMutate operations with Increment/Append. Otherwise null
*/
public Result getResult() {
return result;
} | 3.26 |
hbase_ConnectionSpanBuilder_populateConnectionAttributes_rdh | /**
* Static utility method that performs the primary logic of this builder. It is visible to other
* classes in this package so that other builders can use this functionality as a mix-in.
*
* @param attributes
* the attributes map to be populated.
* @param connectionStringSupplier
* the source of the {@code db.connection_string} attribute value.
* @param userSupplier
* the source of the {@code db.user} attribute value.
*/
static void populateConnectionAttributes(final Map<AttributeKey<?>, Object> attributes, final Supplier<String> connectionStringSupplier, final Supplier<User> userSupplier) {
attributes.put(DB_SYSTEM, DB_SYSTEM_VALUE);
attributes.put(DB_CONNECTION_STRING, connectionStringSupplier.get());
attributes.put(DB_USER, Optional.ofNullable(userSupplier.get()).map(Object::toString).orElse(null));
} | 3.26 |
hbase_CreateNamespaceProcedure_prepareCreate_rdh | /**
* Action before any real action of creating namespace.
*
* @param env
* MasterProcedureEnv
*/private boolean prepareCreate(final MasterProcedureEnv
env) throws IOException {
if (getTableNamespaceManager(env).doesNamespaceExist(nsDescriptor.getName())) {
setFailure("master-create-namespace", new NamespaceExistException(("Namespace " + nsDescriptor.getName()) + " already exists"));
return false;
}
getTableNamespaceManager(env).validateTableAndRegionCount(nsDescriptor);
checkNamespaceRSGroup(env, nsDescriptor);
return true;} | 3.26 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.