name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hbase_StorageClusterStatusModel_setMaxHeapSizeMB_rdh | /**
*
* @param maxHeapSizeMB
* the maximum heap size, in MB
*/
public void setMaxHeapSizeMB(int maxHeapSizeMB) {
this.maxHeapSizeMB = maxHeapSizeMB;
} | 3.26 |
hbase_StorageClusterStatusModel_setWriteRequestsCount_rdh | /**
*
* @param writeRequestsCount
* The current total write requests made to region
*/
public void setWriteRequestsCount(long writeRequestsCount) {
this.writeRequestsCount = writeRequestsCount;
} | 3.26 |
hbase_StorageClusterStatusModel_getAverageLoad_rdh | /**
* Returns the average load of the region servers in the cluster
*/
@XmlAttribute
public double getAverageLoad() {
return averageLoad;} | 3.26 |
hbase_StorageClusterStatusModel_getRequests_rdh | /**
* Returns the number of requests per second processed by the region server
*/
@XmlAttribute
public long getRequests() {
return requests;
} | 3.26 |
hbase_StorageClusterStatusModel_setCpRequestsCount_rdh | /**
*
* @param cpRequestsCount
* The current total read requests made to region
*/
public void setCpRequestsCount(long cpRequestsCount) {
this.cpRequestsCount = cpRequestsCount;
} | 3.26 |
hbase_StorageClusterStatusModel_m3_rdh | /**
* Returns the list of live nodes
*/
// workaround https://github.com/FasterXML/jackson-dataformat-xml/issues/192
@XmlElement(name = "Node")
@XmlElementWrapper(name = "LiveNodes")
@JsonProperty("LiveNodes")
public List<Node> m3() {
return liveNodes;
} | 3.26 |
hbase_StorageClusterStatusModel_setDeadNodes_rdh | /**
*
* @param nodes
* the list of dead node names
*/
public void setDeadNodes(List<String> nodes) {
this.deadNodes = nodes;
} | 3.26 |
hbase_StorageClusterStatusModel_setStorefiles_rdh | /**
*
* @param storefiles
* the number of store files
*/
public void setStorefiles(int storefiles) {
this.storefiles = storefiles;
} | 3.26 |
hbase_StorageClusterStatusModel_setTotalStaticBloomSizeKB_rdh | /**
*
* @param totalStaticBloomSizeKB
* The total size of all Bloom filter blocks, not just loaded
* into the block cache, in KB.
*/
public void setTotalStaticBloomSizeKB(int totalStaticBloomSizeKB) {
this.totalStaticBloomSizeKB = totalStaticBloomSizeKB;
} | 3.26 |
hbase_StorageClusterStatusModel_getTotalCompactingKVs_rdh | /**
* Returns The total number of compacting key-values
*/
@XmlAttribute
public long getTotalCompactingKVs() {
return totalCompactingKVs;
} | 3.26 |
hbase_StorageClusterStatusModel_getStartCode_rdh | /**
* Returns the region server's start code
*/
@XmlAttribute
public long getStartCode() {
return
startCode;
} | 3.26 |
hbase_StorageClusterStatusModel_getCpRequestsCount_rdh | /**
* Returns the current total read requests made to region
*/
@XmlAttribute
public long
getCpRequestsCount() {
return cpRequestsCount;
} | 3.26 |
hbase_StorageClusterStatusModel_getMemStoreSizeMB_rdh | /**
* Returns memstore size, in MB
*/
@XmlAttribute
public int getMemStoreSizeMB() {
return memstoreSizeMB;
} | 3.26 |
hbase_StorageClusterStatusModel_addLiveNode_rdh | /**
* Add a live node to the cluster representation.
*
* @param name
* the region server name
* @param startCode
* the region server's start code
* @param heapSizeMB
* the current heap size, in MB
* @param maxHeapSizeMB
* the maximum heap size, in MB
*/
public Node addLiveNode(String name, long startCode, int heapSizeMB, int maxHeapSizeMB) {
Node node
= new Node(name, startCode);
node.setHeapSizeMB(heapSizeMB);
node.setMaxHeapSizeMB(maxHeapSizeMB); liveNodes.add(node);
return node;
} | 3.26 |
hbase_StorageClusterStatusModel_setName_rdh | /**
*
* @param name
* the region server's hostname
*/
public void setName(String name) {
this.name = name;
} | 3.26 |
hbase_StorageClusterStatusModel_getStorefileSizeMB_rdh | /**
* Returns the total size of store files, in MB
*/
@XmlAttribute
public int getStorefileSizeMB() {
return storefileSizeMB;
} | 3.26 |
hbase_AbstractHBaseSaslRpcClient_m0_rdh | /**
* Release resources used by wrapped saslClient
*/
public void m0() {
SaslUtil.safeDispose(saslClient);
} | 3.26 |
hbase_AbstractHBaseSaslRpcClient_getInitialResponse_rdh | /**
* Computes the initial response a client sends to a server to begin the SASL challenge/response
* handshake. If the client's SASL mechanism does not have an initial response, an empty token
* will be returned without querying the evaluateChallenge method, as an authentication processing
* must be started by client.
*
* @return The client's initial response to send the server (which may be empty).
*/
public byte[] getInitialResponse() throws SaslException {
if (saslClient.hasInitialResponse()) {
return saslClient.evaluateChallenge(EMPTY_TOKEN);
}
return EMPTY_TOKEN;
} | 3.26 |
hbase_CompoundConfiguration_size_rdh | // TODO: This method overestimates the number of configuration settings -- if a value is masked
// by an overriding config or map, it will be counted multiple times.
@Override
public int size() {
int ret = 0;
if (mutableConf != null) {
ret += mutableConf.size();
}
for (ImmutableConfigMap m : this.configs) {
ret += m.size();
}
return ret;
} | 3.26 |
hbase_CompoundConfiguration_m2_rdh | /**
* Add String map to config list. This map is generally created by HTableDescriptor or
* HColumnDescriptor, but can be abstractly used. The added configuration overrides the previous
* ones if there are name collisions.
*
* @return this, for builder pattern
*/
public CompoundConfiguration m2(final Map<String, String> map) {
freezeMutableConf();
// put new map at the front of the list (top priority)
this.configs.add(0, new ImmutableConfigMap() {
private final Map<String, String> m = map;
@Override
public Iterator<Map.Entry<String, String>> iterator() {
return map.entrySet().iterator();
}
@Override
public String get(String key) {
return m.get(key);}
@Overridepublic String getRaw(String key) {
return get(key);
}
@Override
public Class<?> getClassByName(String name) throws ClassNotFoundException {
return null;
}
@Overridepublic int size() {
return m.size();
}
@Override
public String toString() {
return m.toString();
}
});
return this;
} | 3.26 |
hbase_CompoundConfiguration_freezeMutableConf_rdh | /**
* If set has been called, it will create a mutableConf. This converts the mutableConf to an
* immutable one and resets it to allow a new mutable conf. This is used when a new map or conf is
* added to the compound configuration to preserve proper override semantics.
*/
void freezeMutableConf() {
if (mutableConf == null) {
// do nothing if there is no current mutableConf
return;
}
this.configs.add(0, new ImmutableConfWrapper(mutableConf));
mutableConf = null;
} | 3.26 |
hbase_CompoundConfiguration_addBytesMap_rdh | /**
* Add Bytes map to config list. This map is generally created by HTableDescriptor or
* HColumnDescriptor, but can be abstractly used. The added configuration overrides the previous
* ones if there are name collisions. Bytes map
*
* @return this, for builder pattern
*/public CompoundConfiguration addBytesMap(final Map<Bytes, Bytes> map) {
freezeMutableConf();
// put new map at the front of the list (top priority)
this.configs.add(0, new ImmutableConfigMap() {
private final Map<Bytes, Bytes> m = map;
@Override
public Iterator<Map.Entry<String, String>> iterator() {
Map<String, String> ret = new HashMap<>();
for (Map.Entry<Bytes, Bytes> entry : map.entrySet()) {
String v2 = Bytes.toString(entry.getKey().get());
String val = (entry.getValue() == null) ? null : Bytes.toString(entry.getValue().get());
ret.put(v2, val);
}
return ret.entrySet().iterator();
}
@Override
public String get(String key) {
Bytes ibw = new Bytes(Bytes.toBytes(key));if (!m.containsKey(ibw))
return null;
Bytes value = m.get(ibw);
if ((value == null) || (value.get() == null))
return null;
return Bytes.toString(value.get());
}
@Override
public String getRaw(String key) {
return get(key);}
@Overridepublic Class<?> getClassByName(String name) throws ClassNotFoundException {
return null;
}
@Override
public int size() {
return m.size();
}
@Override
public String toString() {
return m.toString();
}
});
return this;
} | 3.26 |
hbase_CompoundConfiguration_get_rdh | /**
* Get the value of the <code>name</code>. If the key is deprecated, it returns the value of the
* first key which replaces the deprecated key and is not null. If no such property exists, then
* <code>defaultValue</code> is returned. The CompooundConfiguration does not do property
* substitution. To do so we need Configuration.getProps to be protected or package visible.
* Though in hadoop2 it is protected, in hadoop1 the method is private and not accessible. All of
* the get* methods call this overridden get method.
*
* @param name
* property name.
* @param defaultValue
* default value.
* @return property value, or <code>defaultValue</code> if the property doesn't exist.
*/
@Override
public String get(String name, String defaultValue) {
String ret = get(name);
return ret == null ? defaultValue : ret;
} | 3.26 |
hbase_CompoundConfiguration_clear_rdh | /**
* *********************************************************************************************
* These methods are unsupported, and no code using CompoundConfiguration depend upon them.
* Quickly abort upon any attempts to use them.
* ********************************************************************************************
*/
@Override
public void clear() {
throw new UnsupportedOperationException("Immutable Configuration");
} | 3.26 |
hbase_CompoundConfiguration_m1_rdh | /**
* Add Hadoop Configuration object to config list. The added configuration overrides the previous
* ones if there are name collisions.
*
* @param conf
* configuration object
* @return this, for builder pattern
*/
public CompoundConfiguration m1(final Configuration conf) {
freezeMutableConf();
if (conf instanceof CompoundConfiguration) {
this.configs.addAll(0, ((CompoundConfiguration) (conf)).configs);
return this;
}// put new config at the front of the list (top priority)
this.configs.add(0, new ImmutableConfWrapper(conf));
return this;
} | 3.26 |
hbase_TableOutputFormat_write_rdh | /**
* Writes a key/value pair into the table.
*
* @param key
* The key.
* @param value
* The value.
* @throws IOException
* When writing fails.
* @see RecordWriter#write(Object, Object)
*/
@Override
public void write(KEY key, Mutation value) throws IOException {
if ((!(value instanceof Put)) && (!(value instanceof
Delete))) {
throw new IOException("Pass a Delete or a Put");
}
mutator.mutate(value);
} | 3.26 |
hbase_TableOutputFormat_getOutputCommitter_rdh | /**
* Returns the output committer.
*
* @param context
* The current context.
* @return The committer.
* @throws IOException
* When creating the committer fails.
* @throws InterruptedException
* When the job is aborted.
* @see OutputFormat#getOutputCommitter(TaskAttemptContext)
*/
@Override
public OutputCommitter getOutputCommitter(TaskAttemptContext context) throws IOException, InterruptedException {return new
TableOutputCommitter();
} | 3.26 |
hbase_TableOutputFormat_getRecordWriter_rdh | /**
* Creates a new record writer. Be aware that the baseline javadoc gives the impression that there
* is a single {@link RecordWriter} per job but in HBase, it is more natural if we give you a new
* RecordWriter per call of this method. You must close the returned RecordWriter when done.
* Failure to do so will drop writes.
*
* @param context
* The current task context.
* @return The newly created writer instance.
* @throws IOException
* When creating the writer fails.
* @throws InterruptedException
* When the job is cancelled.
*/
@Override
public RecordWriter<KEY, Mutation> getRecordWriter(TaskAttemptContext context) throws IOException, InterruptedException {
return new TableRecordWriter();
} | 3.26 |
hbase_TableOutputFormat_checkOutputSpecs_rdh | /**
* Checks if the output table exists and is enabled.
*
* @param context
* The current context.
* @throws IOException
* When the check fails.
* @throws InterruptedException
* When the job is aborted.
* @see OutputFormat#checkOutputSpecs(JobContext)
*/
@Override
public void checkOutputSpecs(JobContext context) throws IOException, InterruptedException {
Configuration hConf = getConf();
if (hConf == null) {
hConf =
context.getConfiguration();
}
try (Connection connection = ConnectionFactory.createConnection(hConf);Admin admin = connection.getAdmin()) {
TableName tableName = TableName.valueOf(hConf.get(OUTPUT_TABLE));
if (!admin.tableExists(tableName)) {
throw new TableNotFoundException("Can't write, table does not exist:" + tableName.getNameAsString());
}
if (!admin.isTableEnabled(tableName)) {
throw new TableNotEnabledException("Can't write, table is not enabled: " + tableName.getNameAsString());
}
}
} | 3.26 |
hbase_TableOutputFormat_close_rdh | /**
* Closes the writer, in this case flush table commits.
*
* @param context
* The context.
* @throws IOException
* When closing the writer fails.
* @see RecordWriter#close(TaskAttemptContext)
*/@Override
public void close(TaskAttemptContext context) throws IOException {
try {
if (mutator != null) {
mutator.close(); }
} finally { if (connection != null) {
connection.close();
}
}
} | 3.26 |
hbase_CompressionUtil_roundInt2_rdh | /**
* Round up to the next power of two, unless the value would become negative (ints are signed), in
* which case just return Integer.MAX_VALUE.
*/public static int roundInt2(int v) {
v = Integer.highestOneBit(v) << 1;
if (v < 0) {
return Integer.MAX_VALUE;
}
return v;
} | 3.26 |
hbase_CompressionUtil_compressionOverhead_rdh | /**
* Most compression algorithms can be presented with pathological input that causes an expansion
* rather than a compression. Hadoop's compression API requires that we calculate additional
* buffer space required for the worst case. There is a formula developed for gzip that applies as
* a ballpark to all LZ variants. It should be good enough for now and has been tested as such
* with a range of different inputs.
*/
public static int compressionOverhead(int bufferSize) {
// Given an input buffer of 'buffersize' bytes we presume a worst case expansion of
// 32 bytes (block header) and addition 1/6th of the input size.
return (bufferSize / 6) + 32;
} | 3.26 |
hbase_TableScanResource_getIterator_rdh | // jackson needs an iterator for streaming
@JsonProperty("Row")
public Iterator<RowModel> getIterator() {
return Row.iterator();
} | 3.26 |
hbase_PendingWatcher_prepare_rdh | /**
* Associates the substantial watcher of processing events. This method should be called once, and
* {@code watcher} should be non-null. This method is expected to call as soon as possible because
* the event processing, being invoked by the ZooKeeper event thread, is uninterruptibly blocked
* until this method is called.
*/
void prepare(Watcher watcher) {
pending.prepare(watcher);
} | 3.26 |
hbase_RefCountingMap_remove_rdh | /**
* Decrements the ref count of k, and removes from map if ref count == 0.
*
* @param k
* the key to remove
* @return the value associated with the specified key or null if key is removed from map.
*/
V remove(K k) {
Payload<V> p =
map.computeIfPresent(k, (k1, v) -> (--v.refCount) <= 0 ? null : v);
return p ==
null ? null : p.v;
} | 3.26 |
hbase_ColumnSchemaModel_addAttribute_rdh | /**
* Add an attribute to the column family schema
*
* @param name
* the attribute name
* @param value
* the attribute value
*/
@JsonAnySetter
public void addAttribute(String
name, Object value) {attrs.put(new QName(name), value);
} | 3.26 |
hbase_ColumnSchemaModel___getTTL_rdh | /**
* Returns the value of the TTL attribute or its default if it is unset
*/
public int __getTTL() {
Object o = attrs.get(TTL);
return o != null ? Integer.parseInt(o.toString()) : ColumnFamilyDescriptorBuilder.DEFAULT_TTL;
} | 3.26 |
hbase_ColumnSchemaModel___setVersions_rdh | /**
*
* @param value
* the desired value of the VERSIONS attribute
*/
public void __setVersions(int value) {
attrs.put(VERSIONS, Integer.toString(value));
} | 3.26 |
hbase_ColumnSchemaModel___setBlocksize_rdh | /**
*
* @param value
* the desired value of the BLOCKSIZE attribute
*/
public void __setBlocksize(int value) {
attrs.put(BLOCKSIZE, Integer.toString(value));
} | 3.26 |
hbase_ColumnSchemaModel___getBlockcache_rdh | // getters and setters for common schema attributes
// cannot be standard bean type getters and setters, otherwise this would
// confuse JAXB
/**
* Returns true if the BLOCKCACHE attribute is present and true
*/
public boolean __getBlockcache() {
Object o = attrs.get(BLOCKCACHE);
return o != null ? Boolean.parseBoolean(o.toString()) : ColumnFamilyDescriptorBuilder.DEFAULT_BLOCKCACHE;
} | 3.26 |
hbase_ColumnSchemaModel___setCompression_rdh | /**
*
* @param value
* the desired value of the COMPRESSION attribute
*/
public void __setCompression(String value) {
attrs.put(COMPRESSION, value);
} | 3.26 |
hbase_ColumnSchemaModel___getBloomfilter_rdh | /**
* Returns the value of the BLOOMFILTER attribute or its default if unset
*/
public String __getBloomfilter() {
Object o = attrs.get(BLOOMFILTER);
return o != null ? o.toString() : ColumnFamilyDescriptorBuilder.DEFAULT_BLOOMFILTER.name();
} | 3.26 |
hbase_ColumnSchemaModel___getInMemory_rdh | /**
* Returns true if the IN_MEMORY attribute is present and true
*/
public boolean __getInMemory() {
Object o = attrs.get(IN_MEMORY);
return o != null ? Boolean.parseBoolean(o.toString()) : ColumnFamilyDescriptorBuilder.DEFAULT_IN_MEMORY;
} | 3.26 |
hbase_ColumnSchemaModel_getAny_rdh | /**
* Returns the map for holding unspecified (user) attributes
*/
@XmlAnyAttribute
@JsonAnyGetter
public Map<QName, Object> getAny() {
return attrs;
} | 3.26 |
hbase_ColumnSchemaModel_toString_rdh | /* (non-Javadoc)
@see java.lang.Object#toString()
*/
@Override
public String toString() {StringBuilder
sb = new StringBuilder();
sb.append("{ NAME => '");
sb.append(name);
sb.append('\'');
for (Map.Entry<QName, Object> e : attrs.entrySet()) {
sb.append(", ");
sb.append(e.getKey().getLocalPart());
sb.append(" => '");
sb.append(e.getValue().toString());
sb.append('\'');
}
sb.append(" }");
return sb.toString();
} | 3.26 |
hbase_ColumnSchemaModel___setTTL_rdh | /**
*
* @param value
* the desired value of the TTL attribute
*/
public void __setTTL(int value) {
attrs.put(TTL, Integer.toString(value));
} | 3.26 |
hbase_ColumnSchemaModel_getAttribute_rdh | /**
*
* @param name
* the attribute name
* @return the attribute value
*/
public String getAttribute(String name) {
Object v0 = attrs.get(new QName(name));
return v0 !=
null ? v0.toString() : null;
} | 3.26 |
hbase_ColumnSchemaModel_getName_rdh | /**
* Returns the column name
*/
@XmlAttribute
public String
getName() {
return name;
} | 3.26 |
hbase_ColumnSchemaModel___getVersions_rdh | /**
* Returns the value of the VERSIONS attribute or its default if it is unset
*/public int __getVersions() {
Object o = attrs.get(VERSIONS);
return o != null ? Integer.parseInt(o.toString()) : ColumnFamilyDescriptorBuilder.DEFAULT_MAX_VERSIONS;
} | 3.26 |
hbase_ColumnSchemaModel___setBlockcache_rdh | /**
*
* @param value
* the desired value of the BLOCKCACHE attribute
*/
public void __setBlockcache(boolean value) {
attrs.put(BLOCKCACHE, Boolean.toString(value));
} | 3.26 |
hbase_ColumnSchemaModel___getCompression_rdh | /**
* Returns the value of the COMPRESSION attribute or its default if unset
*/public String __getCompression() { Object o = attrs.get(COMPRESSION);
return o != null ? o.toString() : ColumnFamilyDescriptorBuilder.DEFAULT_COMPRESSION.name();
} | 3.26 |
hbase_ColumnSchemaModel_setName_rdh | /**
*
* @param name
* the table name
*/
public void setName(String name) {
this.name = name;
} | 3.26 |
hbase_ColumnSchemaModel___setInMemory_rdh | /**
*
* @param value
* the desired value of the IN_MEMORY attribute
*/
public void __setInMemory(boolean value) {
attrs.put(IN_MEMORY, Boolean.toString(value));
} | 3.26 |
hbase_Union3_decodeC_rdh | /**
* Read an instance of the third type parameter from buffer {@code src}.
*/
public C decodeC(PositionedByteRange src) {
return ((C) (decode(src)));
} | 3.26 |
hbase_HFileArchiver_getFileSystem_rdh | /**
* Returns the {@link FileSystem} on which this file resides
*/
public FileSystem getFileSystem() {
return this.fs;
} | 3.26 |
hbase_HFileArchiver_deleteRegionWithoutArchiving_rdh | /**
* Without regard for backup, delete a region. Should be used with caution.
*
* @param regionDir
* {@link Path} to the region to be deleted.
* @param fs
* FileSystem from which to delete the region
* @return <tt>true</tt> on successful deletion, <tt>false</tt> otherwise
* @throws IOException
* on filesystem operation failure
*/
private static boolean deleteRegionWithoutArchiving(FileSystem fs, Path regionDir) throws IOException {
if (fs.delete(regionDir, true)) {LOG.debug("Deleted {}", regionDir);
return true;
}
LOG.debug("Failed to delete directory {}", regionDir);
return false;
} | 3.26 |
hbase_HFileArchiver_archiveStoreFile_rdh | /**
* Archive the store file
*
* @param fs
* the filesystem where the store files live
* @param regionInfo
* region hosting the store files
* @param conf
* {@link Configuration} to examine to determine the archive directory
* @param tableDir
* {@link Path} to where the table is being stored (for building the archive
* path)
* @param family
* the family hosting the store files
* @param storeFile
* file to be archived
* @throws IOException
* if the files could not be correctly disposed.
*/
public static void archiveStoreFile(Configuration conf, FileSystem fs, RegionInfo regionInfo, Path tableDir, byte[] family, Path storeFile) throws IOException {
Path storeArchiveDir = HFileArchiveUtil.getStoreArchivePath(conf, regionInfo, tableDir, family);
// make sure we don't archive if we can't and that the archive dir exists
if (!fs.mkdirs(storeArchiveDir)) {
throw new IOException(((("Could not make archive directory (" + storeArchiveDir) + ") for store:") + Bytes.toString(family)) + ", deleting compacted files instead.");
}
// do the actual archive
long start = EnvironmentEdgeManager.currentTime();
File file = new FileablePath(fs, storeFile);
if (!resolveAndArchiveFile(storeArchiveDir, file, Long.toString(start))) {
throw new IOException(((((("Failed to archive/delete the file for region:" + regionInfo.getRegionNameAsString()) + ", family:") + Bytes.toString(family)) + " into ") + storeArchiveDir) + ". Something is probably awry on the filesystem.");
}
}
/**
* Resolve any conflict with an existing archive file via timestamp-append renaming of the
* existing file and then archive the passed in files.
*
* @param fs
* {@link FileSystem} | 3.26 |
hbase_HFileArchiver_getThreadFactory_rdh | // We need this method instead of Threads.getNamedThreadFactory() to pass some tests.
// The difference from Threads.getNamedThreadFactory() is that it doesn't fix ThreadGroup for
// new threads. If we use Threads.getNamedThreadFactory(), we will face ThreadGroup related
// issues in some tests.
private static ThreadFactory getThreadFactory() {
return new ThreadFactory()
{
final AtomicInteger threadNumber = new AtomicInteger(1);
@Override
public Thread newThread(Runnable r) {
final String name = "HFileArchiver-" + threadNumber.getAndIncrement();
Thread t = new Thread(r, name);
t.setDaemon(true);
return t;
}
};
} | 3.26 |
hbase_HFileArchiver_deleteStoreFilesWithoutArchiving_rdh | /**
* Just do a simple delete of the given store files
* <p>
* A best effort is made to delete each of the files, rather than bailing on the first failure.
* <p>
*
* @param compactedFiles
* store files to delete from the file system.
* @throws IOException
* if a file cannot be deleted. All files will be attempted to deleted before
* throwing the exception, rather than failing at the first file.
*/
private static void deleteStoreFilesWithoutArchiving(Collection<HStoreFile> compactedFiles) throws IOException {
LOG.debug("Deleting files without archiving.");
List<IOException> errors = new ArrayList<>(0);
for (HStoreFile hsf : compactedFiles) {
try {hsf.deleteStoreFile();} catch (IOException e) {
LOG.error("Failed to delete {}",
hsf.getPath());
errors.add(e);
}
}
if (errors.size() > 0) {
throw MultipleIOException.createIOException(errors);
}
} | 3.26 |
hbase_HFileArchiver_exists_rdh | /**
* Returns True if the Region exits in the filesystem.
*/public static boolean exists(Configuration conf, FileSystem fs, RegionInfo info) throws IOException {
Path rootDir = CommonFSUtils.getRootDir(conf);
Path regionDir = FSUtils.getRegionDirFromRootDir(rootDir, info);
return fs.exists(regionDir);
} | 3.26 |
hbase_HFileArchiver_archiveRegions_rdh | /**
* Archive the specified regions in parallel.
*
* @param conf
* the configuration to use
* @param fs
* {@link FileSystem} from which to remove the region
* @param rootDir
* {@link Path} to the root directory where hbase files are stored (for
* building the archive path)
* @param tableDir
* {@link Path} to where the table is being stored (for building the archive
* path)
* @param regionDirList
* {@link Path} to where regions are being stored (for building the archive
* path)
* @throws IOException
* if the request cannot be completed
*/
public static void archiveRegions(Configuration conf, FileSystem fs, Path rootDir, Path tableDir, List<Path> regionDirList) throws IOException {
List<Future<Void>> v10 = new ArrayList<>(regionDirList.size());for (Path regionDir : regionDirList) {
Future<Void> future = getArchiveExecutor(conf).submit(() -> {
archiveRegion(fs, rootDir, tableDir, regionDir);
return null;
});
v10.add(future);
}
try {
for (Future<Void> future : v10) {
future.get();
}
} catch (InterruptedException e) {
throw new InterruptedIOException(e.getMessage());
} catch
(ExecutionException e) {
throw new IOException(e.getCause());
}
} | 3.26 |
hbase_HFileArchiver_archiveFamilyByFamilyDir_rdh | /**
* Removes from the specified region the store files of the specified column family, either by
* archiving them or outright deletion
*
* @param fs
* the filesystem where the store files live
* @param conf
* {@link Configuration} to examine to determine the archive directory
* @param parent
* Parent region hosting the store files
* @param familyDir
* {@link Path} to where the family is being stored
* @param family
* the family hosting the store files
* @throws IOException
* if the files could not be correctly disposed.
*/
public static void archiveFamilyByFamilyDir(FileSystem fs, Configuration conf, RegionInfo parent, Path familyDir, byte[] family) throws IOException {
FileStatus[] storeFiles = CommonFSUtils.listStatus(fs, familyDir);
if (storeFiles
== null) {
LOG.debug("No files to dispose of in {}, family={}", parent.getRegionNameAsString(), Bytes.toString(family));
return;
}
FileStatusConverter getAsFile = new FileStatusConverter(fs);
Collection<File> toArchive = Stream.of(storeFiles).map(getAsFile).collect(Collectors.toList());
Path storeArchiveDir = HFileArchiveUtil.getStoreArchivePath(conf, parent, family);
// do the actual archive
List<File> failedArchive = m1(fs, storeArchiveDir, toArchive, EnvironmentEdgeManager.currentTime());
if (!failedArchive.isEmpty()) {
throw new FailedArchiveException(((((("Failed to archive/delete all the files for region:" + Bytes.toString(parent.getRegionName())) +
", family:") + Bytes.toString(family)) + " into ") + storeArchiveDir) + ". Something is probably awry on the filesystem.",
failedArchive.stream().map(FUNC_FILE_TO_PATH).collect(Collectors.toList()));
}
} | 3.26 |
hbase_HFileArchiver_archiveFamily_rdh | /**
* Remove from the specified region the store files of the specified column family, either by
* archiving them or outright deletion
*
* @param fs
* the filesystem where the store files live
* @param conf
* {@link Configuration} to examine to determine the archive directory
* @param parent
* Parent region hosting the store files
* @param tableDir
* {@link Path} to where the table is being stored (for building the archive path)
* @param family
* the family hosting the store files
* @throws IOException
* if the files could not be correctly disposed.
*/
public static void archiveFamily(FileSystem fs, Configuration conf, RegionInfo parent, Path tableDir, byte[] family) throws IOException {
Path familyDir = new Path(tableDir, new Path(parent.getEncodedName(), Bytes.toString(family)));
archiveFamilyByFamilyDir(fs, conf, parent, familyDir, family);
} | 3.26 |
hbase_HFileArchiver_moveAndClose_rdh | /**
* Move the file to the given destination
*
* @return <tt>true</tt> on success
*/
public boolean moveAndClose(Path dest) throws IOException {
this.m2();
Path p = this.getPath();
return CommonFSUtils.renameAndSetModifyTime(fs, p, dest);
} | 3.26 |
hbase_HFileArchiver_archiveRegion_rdh | /**
* Cleans up all the files for a HRegion by archiving the HFiles to the archive directory
*
* @param conf
* the configuration to use
* @param fs
* the file system object
* @param info
* RegionInfo for region to be deleted
*/
public static void archiveRegion(Configuration conf, FileSystem fs, RegionInfo info) throws IOException {
Path rootDir = CommonFSUtils.getRootDir(conf);
archiveRegion(fs, rootDir, CommonFSUtils.getTableDir(rootDir, info.getTable()), FSUtils.getRegionDirFromRootDir(rootDir, info));
}
/**
* Remove an entire region from the table directory via archiving the region's hfiles.
*
* @param fs
* {@link FileSystem} from which to remove the region
* @param rootdir
* {@link Path} to the root directory where hbase files are stored (for building
* the archive path)
* @param tableDir
* {@link Path} to where the table is being stored (for building the archive
* path)
* @param regionDir
* {@link Path} | 3.26 |
hbase_HFileArchiver_archiveStoreFiles_rdh | /**
* Remove the store files, either by archiving them or outright deletion
*
* @param conf
* {@link Configuration} to examine to determine the archive directory
* @param fs
* the filesystem where the store files live
* @param regionInfo
* {@link RegionInfo} of the region hosting the store files
* @param family
* the family hosting the store files
* @param compactedFiles
* files to be disposed of. No further reading of these files should be
* attempted; otherwise likely to cause an {@link IOException}
* @throws IOException
* if the files could not be correctly disposed.
*/
public static void archiveStoreFiles(Configuration conf, FileSystem fs, RegionInfo regionInfo, Path tableDir,
byte[] family, Collection<HStoreFile> compactedFiles) throws IOException {
Path storeArchiveDir = HFileArchiveUtil.getStoreArchivePath(conf, regionInfo, tableDir, family);archive(fs, regionInfo, family, compactedFiles, storeArchiveDir);
}
/**
* Archive recovered edits using existing logic for archiving store files. This is currently only
* relevant when <b>hbase.region.archive.recovered.edits</b> is true, as recovered edits shouldn't
* be kept after replay. In theory, we could use very same method available for archiving store
* files, but supporting WAL dir and store files on different FileSystems added the need for extra
* validation of the passed FileSystem instance and the path where the archiving edits should be
* placed.
*
* @param conf
* {@link Configuration} to determine the archive directory.
* @param fs
* the filesystem used for storing WAL files.
* @param regionInfo
* {@link RegionInfo} | 3.26 |
hbase_ExecutorService_dumpTo_rdh | /**
* Dump a textual representation of the executor's status to the given writer.
*
* @param out
* the stream to write to
* @param indent
* a string prefix for each line, used for indentation
*/
public void dumpTo(Writer out, String indent) throws IOException {out.write(((indent + "Status for executor: ") + executor) + "\n");
out.write(indent + "=======================================\n");
out.write((((indent + queuedEvents.size()) + " events queued, ") + running.size()) + " running\n");
if (!queuedEvents.isEmpty()) {
out.write(indent + "Queued:\n");
for (EventHandler e : queuedEvents) {
out.write(((indent + " ") + e) + "\n");
}
out.write("\n");
}
if (!running.isEmpty()) {
out.write(indent + "Running:\n");
for (RunningEventStatus stat : running) {
out.write(((((indent + " Running on thread '") + stat.threadInfo.getThreadName()) + "': ") + stat.event) + "\n");
out.write(ThreadMonitoring.formatThreadInfo(stat.threadInfo, indent + " "));
out.write("\n");
}
}
out.flush();
} | 3.26 |
hbase_ExecutorService_m1_rdh | /**
* Submit the event to the queue for handling.
*/
void m1(final EventHandler event)
{
// If there is a listener for this type, make sure we call the before
// and after process methods.
this.threadPoolExecutor.execute(event);
} | 3.26 |
hbase_ExecutorService_delayedSubmit_rdh | // Submit the handler after the given delay. Used for retrying.
public void delayedSubmit(EventHandler eh, long delay, TimeUnit unit) {
ListenableFuture<?> future = delayedSubmitTimer.schedule(() -> submit(eh), delay, unit);
future.addListener(() -> {
try {
future.get();
} catch (Exception e) {
LOG.error("Failed to submit the event handler {} to executor", eh, e); }
}, MoreExecutors.directExecutor());
} | 3.26 |
hbase_ExecutorService_getExecutorLazily_rdh | /**
* Initialize the executor lazily, Note if an executor need to be initialized lazily, then all
* paths should use this method to get the executor, should not start executor by using
* {@link ExecutorService#startExecutorService(ExecutorConfig)}
*/
public ThreadPoolExecutor getExecutorLazily(ExecutorConfig config) {
return executorMap.computeIfAbsent(config.getName(), executorName -> new Executor(config)).getThreadPoolExecutor();
} | 3.26 |
hbase_ExecutorService_startExecutorService_rdh | /**
* Start an executor service with a given name. If there was a service already started with the
* same name, this throws a RuntimeException.
*
* @param config
* Configuration to use for the executor.
*/
public void startExecutorService(final ExecutorConfig config) {
final String name = config.getName();
Executor hbes = this.executorMap.compute(name, (key, value) -> {
if (value != null) {
throw new RuntimeException(("An executor service with the name " + key) + " is already running!");
}
return new Executor(config);
});
LOG.debug("Starting executor service name={}, corePoolSize={}, maxPoolSize={}", name, hbes.threadPoolExecutor.getCorePoolSize(), hbes.threadPoolExecutor.getMaximumPoolSize());
} | 3.26 |
hbase_ExecutorService_setAllowCoreThreadTimeout_rdh | /**
* Allows timing out of core threads. Good to set this for non-critical thread pools for release
* of unused resources. Refer to {@link ThreadPoolExecutor#allowCoreThreadTimeOut} for
* additional details.
*/public ExecutorConfig setAllowCoreThreadTimeout(boolean allowCoreThreadTimeout) {
this.allowCoreThreadTimeout = allowCoreThreadTimeout;return this;
} | 3.26 |
hbase_IOEngine_usesSharedMemory_rdh | /**
* IOEngine uses shared memory means, when reading Cacheable from it, those refers to the same
* memory area as used by the Engine for caching it.
*
* @return true when IOEngine using shared memory.
*/
default boolean usesSharedMemory() {
return false;} | 3.26 |
hbase_ForeignException_serialize_rdh | /**
* Converts a ForeignException to an array of bytes.
*
* @param source
* the name of the external exception source
* @param t
* the "local" external exception (local)
* @return protobuf serialized version of ForeignException
*/
public static byte[] serialize(String source, Throwable t) {
GenericExceptionMessage.Builder v4 = GenericExceptionMessage.newBuilder();
v4.setClassName(t.getClass().getName());
if (t.getMessage() != null) {
v4.setMessage(t.getMessage());
}
// set the stack trace, if there is one
List<StackTraceElementMessage> stack = ForeignException.toStackTraceElementMessages(t.getStackTrace());
if (stack != null) {
v4.addAllTrace(stack);
}
GenericExceptionMessage payload = v4.build();
ForeignExceptionMessage.Builder exception = ForeignExceptionMessage.newBuilder();
exception.setGenericException(payload).setSource(source);
ForeignExceptionMessage eem = exception.build();
return eem.toByteArray();
} | 3.26 |
hbase_ForeignException_isRemote_rdh | /**
* The cause of a ForeignException can be an exception that was generated on a local in process
* thread, or a thread from a 'remote' separate process. If the cause is a ProxyThrowable, we know
* it came from deserialization which usually means it came from not only another thread, but also
* from a remote thread.
*
* @return true if went through deserialization, false if locally generated
*/
public boolean isRemote() {
return getCause() instanceof ProxyThrowable;
} | 3.26 |
hbase_ForeignException_deserialize_rdh | /**
* Takes a series of bytes and tries to generate an ForeignException instance for it.
*
* @return the ForeignExcpetion instance
* @throws InvalidProtocolBufferException
* if there was deserialization problem this is thrown.
*/
public static ForeignException deserialize(byte[] bytes) throws
IOException {
// figure out the data we need to pass
ForeignExceptionMessage eem = ForeignExceptionMessage.parseFrom(bytes);
GenericExceptionMessage gem = eem.getGenericException();
StackTraceElement[] trace = ForeignException.toStackTrace(gem.getTraceList());
ProxyThrowable dfe = new ProxyThrowable(gem.getMessage(), trace);
ForeignException e = new ForeignException(eem.getSource(), dfe);
return e;
}
/**
* Unwind a serialized array of {@link StackTraceElementMessage}s to a {@link StackTraceElement} | 3.26 |
hbase_ForeignException_toStackTraceElementMessages_rdh | /**
* Convert a stack trace to list of {@link StackTraceElement}.
*
* @param trace
* the stack trace to convert to protobuf message
* @return <tt>null</tt> if the passed stack is <tt>null</tt>.
*/
private static List<StackTraceElementMessage> toStackTraceElementMessages(StackTraceElement[] trace) {
// if there is no stack trace, ignore it and just return the message
if (trace == null)
return null;
// build the stack trace for the message
List<StackTraceElementMessage> pbTrace = new ArrayList<>(trace.length);
for (StackTraceElement elem : trace) {
StackTraceElementMessage.Builder stackBuilder = StackTraceElementMessage.newBuilder();
stackBuilder.setDeclaringClass(elem.getClassName());
stackBuilder.setFileName(elem.getFileName());stackBuilder.setLineNumber(elem.getLineNumber());
stackBuilder.setMethodName(elem.getMethodName());
pbTrace.add(stackBuilder.build());
}
return pbTrace;
} | 3.26 |
hbase_BinaryPrefixComparator_areSerializedFieldsEqual_rdh | /**
* Returns true if and only if the fields of the comparator that are serialized are equal to the
* corresponding fields in other. Used for testing.
*/
@Override
boolean areSerializedFieldsEqual(ByteArrayComparable other) {
if (other == this) {
return true;
}
if (!(other instanceof BinaryPrefixComparator)) {
return false;
}
return super.areSerializedFieldsEqual(other);
} | 3.26 |
hbase_BinaryPrefixComparator_toByteArray_rdh | /**
* Returns The comparator serialized using pb
*/
@Override
public byte[] toByteArray() {
ComparatorProtos.BinaryPrefixComparator.Builder
builder = ComparatorProtos.BinaryPrefixComparator.newBuilder();
builder.setComparable(ProtobufUtil.toByteArrayComparable(this.value));
return builder.build().toByteArray();
} | 3.26 |
hbase_BinaryPrefixComparator_parseFrom_rdh | /**
* Parse a serialized representation of {@link BinaryPrefixComparator}
*
* @param pbBytes
* A pb serialized {@link BinaryPrefixComparator} instance
* @return An instance of {@link BinaryPrefixComparator} made from <code>bytes</code>
* @throws DeserializationException
* if an error occurred
* @see #toByteArray
*/
public static BinaryPrefixComparator parseFrom(final byte[] pbBytes) throws DeserializationException {
ComparatorProtos.BinaryPrefixComparator proto;
try {
proto = ComparatorProtos.BinaryPrefixComparator.parseFrom(pbBytes);
} catch (InvalidProtocolBufferException e)
{
throw new DeserializationException(e);
}
return new BinaryPrefixComparator(proto.getComparable().getValue().toByteArray());
} | 3.26 |
hbase_RegionMover_filename_rdh | /**
* Path of file where regions will be written to during unloading/read from during loading
*
* @return RegionMoverBuilder object
*/
public RegionMoverBuilder filename(String filename) {
this.filename = filename;
return this;
} | 3.26 |
hbase_RegionMover_rackManager_rdh | /**
* Set specific rackManager implementation. This setter method is for testing purpose only.
*
* @param rackManager
* rackManager impl
* @return RegionMoverBuilder object
*/@InterfaceAudience.Private
public RegionMoverBuilder rackManager(RackManager rackManager) {
this.rackManager = rackManager;
return this;
} | 3.26 |
hbase_RegionMover_readServersFromFile_rdh | /**
*
* @param filename
* The file should have 'host:port' per line
* @return List of servers from the file in format 'hostname:port'.
*/
private List<String> readServersFromFile(String filename)
throws IOException {
List<String> servers = new ArrayList<>();
if (filename != null) {
try {
Files.readAllLines(Paths.get(filename)).stream().map(String::trim).filter(((Predicate<String>) (String::isEmpty)).negate()).map(String::toLowerCase).forEach(servers::add);
} catch (IOException e) {LOG.error("Exception while reading servers from file,", e);
throw e;
}
}
return servers;
} | 3.26 |
hbase_RegionMover_designatedFile_rdh | /**
* Set the designated file. Designated file contains hostnames where region moves. Designated
* file should have 'host:port' per line. Port is mandatory here as we can have many RS running
* on a single host.
*
* @param designatedFile
* The designated file
* @return RegionMoverBuilder object
*/
public RegionMoverBuilder designatedFile(String designatedFile) {this.designatedFile = designatedFile;return this;
} | 3.26 |
hbase_RegionMover_ack_rdh | /**
* Set ack/noAck mode.
* <p>
* In ack mode regions are acknowledged before and after moving and the move is retried
* hbase.move.retries.max times, if unsuccessful we quit with exit code 1.No Ack mode is a best
* effort mode,each region movement is tried once.This can be used during graceful shutdown as
* even if we have a stuck region,upon shutdown it'll be reassigned anyway.
* <p>
*
* @return RegionMoverBuilder object
*/
public RegionMoverBuilder ack(boolean ack) {
this.ack = ack;
return this;
} | 3.26 |
hbase_RegionMover_unloadFromRack_rdh | /**
* Unload regions from given {@link #hostname} using ack/noAck mode and {@link #maxthreads}.In
* noAck mode we do not make sure that region is successfully online on the target region
* server,hence it is best effort.We do not unload regions to hostnames given in
* {@link #excludeFile}. If designatedFile is present with some contents, we will unload regions
* to hostnames provided in {@link #designatedFile}. While unloading regions, destination
* RegionServers are selected from different rack i.e regions should not move to any RegionServers
* that belong to same rack as source RegionServer.
*
* @return true if unloading succeeded, false otherwise
*/
public boolean
unloadFromRack() throws InterruptedException, ExecutionException, TimeoutException {
return unloadRegions(true);
} | 3.26 |
hbase_RegionMover_unload_rdh | /**
* Unload regions from given {@link #hostname} using ack/noAck mode and {@link #maxthreads}.In
* noAck mode we do not make sure that region is successfully online on the target region
* server,hence it is best effort.We do not unload regions to hostnames given in
* {@link #excludeFile}. If designatedFile is present with some contents, we will unload regions
* to hostnames provided in {@link #designatedFile}
*
* @return true if unloading succeeded, false otherwise
*/
public boolean unload() throws InterruptedException, ExecutionException, TimeoutException {
return unloadRegions(false);
} | 3.26 |
hbase_RegionMover_stripServer_rdh | /**
* Remove the servername whose hostname and port portion matches from the passed array of servers.
* Returns as side-effect the servername removed.
*
* @return server removed from list of Region Servers
*/
private ServerName stripServer(List<ServerName> regionServers, String hostname, int port) {
for (Iterator<ServerName> iter = regionServers.iterator(); iter.hasNext();) {
ServerName server = iter.next();
if (server.getAddress().getHostName().equalsIgnoreCase(hostname) && (server.getAddress().getPort() == port)) {
iter.remove();
return server;
}
}
return null;
} | 3.26 |
hbase_RegionMover_isolateRegions_rdh | /**
* Isolated regions specified in {@link #isolateRegionIdArray} on {@link #hostname} in ack Mode
* and Unload regions from given {@link #hostname} using ack/noAck mode and {@link #maxthreads}.
* In noAck mode we do not make sure that region is successfully online on the target region
* server,hence it is the best effort. We do not unload regions to hostnames given in
* {@link #excludeFile}. If designatedFile is present with some contents, we will unload regions
* to hostnames provided in {@link #designatedFile}
*
* @return true if region isolation succeeded, false otherwise
*/
public boolean isolateRegions() throws ExecutionException, InterruptedException, TimeoutException {
return unloadRegions(false, isolateRegionIdArray);
} | 3.26 |
hbase_RegionMover_writeFile_rdh | /**
* Write the number of regions moved in the first line followed by regions moved in subsequent
* lines
*/
private void writeFile(String filename, List<RegionInfo> movedRegions) throws IOException {
try (DataOutputStream dos = new DataOutputStream(new BufferedOutputStream(new FileOutputStream(filename)))) {
dos.writeInt(movedRegions.size());
for (RegionInfo region : movedRegions) {
Bytes.writeByteArray(dos, RegionInfo.toByteArray(region));
}
} catch (IOException e) {
LOG.error(("ERROR: Was Not able to write regions moved to output file but moved " + movedRegions.size()) + " regions", e);
throw e;
}
} | 3.26 |
hbase_RegionMover_includeExcludeRegionServers_rdh | /**
* Designates or excludes the servername whose hostname and port portion matches the list given in
* the file. Example:<br>
* If you want to designated RSs, suppose designatedFile has RS1, regionServers has RS1, RS2 and
* RS3. When we call includeExcludeRegionServers(designatedFile, regionServers, true), RS2 and RS3
* are removed from regionServers list so that regions can move to only RS1. If you want to
* exclude RSs, suppose excludeFile has RS1, regionServers has RS1, RS2 and RS3. When we call
* includeExcludeRegionServers(excludeFile, servers, false), RS1 is removed from regionServers
* list so that regions can move to only RS2 and RS3.
*/
private void includeExcludeRegionServers(String fileName, List<ServerName>
regionServers, boolean isInclude) throws IOException {
if (fileName != null) {
List<String> servers = readServersFromFile(fileName);
if (servers.isEmpty()) {
LOG.warn("No servers provided in the file: {}." + fileName);
return;
}
Iterator<ServerName> i = regionServers.iterator();
while
(i.hasNext()) {
String rs = i.next().getServerName();
String rsPort = (rs.split(ServerName.SERVERNAME_SEPARATOR)[0].toLowerCase() + ":") + rs.split(ServerName.SERVERNAME_SEPARATOR)[1];
if (isInclude != servers.contains(rsPort)) {
i.remove();
}
}
}
} | 3.26 |
hbase_RegionMover_isolateRegionIdArray_rdh | /**
* Set the region ID to isolate on the region server.
*/
public RegionMoverBuilder isolateRegionIdArray(List<String> isolateRegionIdArray) {
this.isolateRegionIdArray = isolateRegionIdArray;
return this;
} | 3.26 |
hbase_RegionMover_stripMaster_rdh | /**
* Exclude master from list of RSs to move regions to
*/
private void stripMaster(List<ServerName> regionServers) throws IOException {
ServerName master = admin.getClusterMetrics(EnumSet.of(Option.MASTER)).getMasterName();
stripServer(regionServers, master.getHostname(), master.getPort());
} | 3.26 |
hbase_RegionMover_timeout_rdh | /**
* Set the timeout for Load/Unload operation in seconds.This is a global timeout,threadpool for
* movers also have a separate time which is hbase.move.wait.max * number of regions to
* load/unload
*
* @param timeout
* in seconds
* @return RegionMoverBuilder object
*/
public RegionMoverBuilder timeout(int timeout) {
this.timeout = timeout;
return this;
} | 3.26 |
hbase_RegionMover_load_rdh | /**
* Loads the specified {@link #hostname} with regions listed in the {@link #filename} RegionMover
* Object has to be created using {@link #RegionMover(RegionMoverBuilder)}
*
* @return true if loading succeeded, false otherwise
*/
public boolean load() throws ExecutionException, InterruptedException, TimeoutException {
ExecutorService loadPool = Executors.newFixedThreadPool(1);Future<Boolean> loadTask = loadPool.submit(getMetaRegionMovePlan());
boolean isMetaMoved = waitTaskToFinish(loadPool, loadTask, "loading");
if (!isMetaMoved) {
return false;
}
loadPool = Executors.newFixedThreadPool(1);
loadTask = loadPool.submit(getNonMetaRegionsMovePlan());
return waitTaskToFinish(loadPool, loadTask, "loading");
} | 3.26 |
hbase_RegionMover_build_rdh | /**
* This method builds the appropriate RegionMover object which can then be used to load/unload
* using load and unload methods
*
* @return RegionMover object
*/
public RegionMover build() throws IOException {
return new RegionMover(this);
} | 3.26 |
hbase_ZKProcedureCoordinator_abort_rdh | /**
* Receive a notification and propagate it to the local coordinator
*
* @param abortNode
* full znode path to the failed procedure information
*/
protected void abort(String abortNode) {
String procName = ZKUtil.getNodeName(abortNode);
ForeignException ee = null;
try {
byte[] data = ZKUtil.getData(zkProc.getWatcher(), abortNode);
if ((data == null) ||
(data.length == 0)) {
// ignore
return;
} else if (!ProtobufUtil.isPBMagicPrefix(data)) {
LOG.warn(("Got an error notification for op:" + abortNode) + " but we can't read the information. Killing the procedure.");
// we got a remote exception, but we can't describe it
ee = new ForeignException(coordName, "Data in abort node is illegally formatted. ignoring content.");
} else {
data = Arrays.copyOfRange(data, ProtobufUtil.lengthOfPBMagic(), data.length);
ee = ForeignException.deserialize(data);
}
} catch
(IOException e) {
LOG.warn(("Got an error notification for op:" + abortNode) + " but we can't read the information. Killing the procedure.");
// we got a remote exception, but we can't describe it
ee = new ForeignException(coordName, e);
} catch (KeeperException e) {
coordinator.rpcConnectionFailure(("Failed to get data for abort node:" + abortNode) + zkProc.getAbortZnode(), new IOException(e));
} catch (InterruptedException e) {
coordinator.rpcConnectionFailure(("Failed to get data for abort node:" + abortNode) + zkProc.getAbortZnode(), new IOException(e));
Thread.currentThread().interrupt();
}
coordinator.abortProcedure(procName, ee);
} | 3.26 |
hbase_ZKProcedureCoordinator_getZkProcedureUtil_rdh | /**
* Used in testing
*/
final ZKProcedureUtil getZkProcedureUtil() {
return zkProc;
} | 3.26 |
hbase_ZKProcedureCoordinator_sendGlobalBarrierAcquire_rdh | /**
* The "acquire" phase. The coordinator creates a new procType/acquired/ znode dir. If znodes
* appear, first acquire to relevant listener or sets watch waiting for notification of the
* acquire node
*
* @param proc
* the Procedure
* @param info
* data to be stored in the acquire node
* @param nodeNames
* children of the acquire phase
* @throws IOException
* if any failure occurs.
*/
@Override
public final void sendGlobalBarrierAcquire(Procedure proc, byte[] info, List<String> nodeNames) throws IOException, IllegalArgumentException { String procName = proc.getName();
// start watching for the abort node
String abortNode = zkProc.getAbortZNode(procName);
try {
// check to see if the abort node already exists
if (ZKUtil.watchAndCheckExists(zkProc.getWatcher(), abortNode)) {
abort(abortNode);
}
// If we get an abort node watch triggered here, we'll go complete creating the acquired
// znode but then handle the acquire znode and bail out
}
catch (KeeperException e) {String msg = "Failed while watching abort node:" + abortNode;
LOG.error(msg, e);
throw new IOException(msg, e);
}
// create the acquire barrier
String acquire = zkProc.getAcquiredBarrierNode(procName);LOG.debug("Creating acquire znode:" + acquire);
try {
// notify all the procedure listeners to look for the acquire node
byte[] data = ProtobufUtil.prependPBMagic(info);
ZKUtil.createWithParents(zkProc.getWatcher(), acquire, data);
// loop through all the children of the acquire phase and watch for them
for (String node : nodeNames) {
String znode = ZNodePaths.joinZNode(acquire, node);
LOG.debug("Watching for acquire node:" + znode);if (ZKUtil.watchAndCheckExists(zkProc.getWatcher(), znode)) {
coordinator.memberAcquiredBarrier(procName, node);
}
}
} catch (KeeperException e) {
String v7 = "Failed while creating acquire node:" + acquire;
LOG.error(v7, e);
throw new IOException(v7, e);
}
} | 3.26 |
hbase_ZKProcedureCoordinator_sendAbortToMembers_rdh | /**
* This is the abort message being sent by the coordinator to member TODO this code isn't actually
* used but can be used to issue a cancellation from the coordinator.
*/
@Override
public final void sendAbortToMembers(Procedure proc, ForeignException ee) {
String procName = proc.getName();
LOG.debug(("Aborting procedure '" + procName) + "' in zk");
String procAbortNode = zkProc.getAbortZNode(procName);
try {
LOG.debug("Creating abort znode:" + procAbortNode);
String source = (ee.getSource() == null) ? coordName : ee.getSource();
byte[] errorInfo = ProtobufUtil.prependPBMagic(ForeignException.serialize(source, ee));
// first create the znode for the procedure
ZKUtil.createAndFailSilent(zkProc.getWatcher(), procAbortNode,
errorInfo);
LOG.debug("Finished creating abort node:" + procAbortNode);
} catch (KeeperException e) {
// possible that we get this error for the procedure if we already reset the zk state, but in
// that case we should still get an error for that procedure anyways
zkProc.logZKTree(zkProc.baseZNode);
coordinator.rpcConnectionFailure(((("Failed to post zk node:" + procAbortNode) + " to abort procedure '") + procName) + "'", new IOException(e));
}
} | 3.26 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.