name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hbase_ByteBufferUtils_skip_rdh | /**
* Increment position in buffer.
*
* @param buffer
* In this buffer.
* @param length
* By that many bytes.
*/
public static void skip(ByteBuffer buffer, int length) {
buffer.position(buffer.position() + length);
} | 3.26 |
hbase_ByteBufferUtils_putLong_rdh | /**
* Put a long value out to the given ByteBuffer's current position in big-endian format. This also
* advances the position in buffer by long size.
*
* @param buffer
* the ByteBuffer to write to
* @param val
* long to write out
*/
public static void putLong(ByteBuffer buffer, long val) {
ConverterHolder.BEST_CONVERTER.putLong(buffer, val);
} | 3.26 |
hbase_ByteBufferUtils_arePartsEqual_rdh | /**
* Check whether two parts in the same buffer are equal.
*
* @param buffer
* In which buffer there are parts
* @param offsetLeft
* Beginning of first part.
* @param lengthLeft
* Length of the first part.
* @param offsetRight
* Beginning of the second part.
* @param lengthRight
* Length of the second part.
* @return True if equal
*/
public static boolean
arePartsEqual(ByteBuffer buffer, int offsetLeft, int lengthLeft,
int offsetRight, int lengthRight) {
if (lengthLeft != lengthRight) {
return false;
}
if (buffer.hasArray()) {
return 0 == Bytes.compareTo(buffer.array(), buffer.arrayOffset() + offsetLeft, lengthLeft, buffer.array(), buffer.arrayOffset() + offsetRight, lengthRight);
}
for (int i = 0; i < lengthRight; ++i) {
if (buffer.get(offsetLeft + i) != buffer.get(offsetRight + i)) {
return false;
}
}
return true;
} | 3.26 |
hbase_ByteBufferUtils_toStringBinary_rdh | // For testing purpose
public static String toStringBinary(final ByteBuffer b, int off, int len) {
StringBuilder result = new StringBuilder();
// Just in case we are passed a 'len' that is > buffer length...
if (off >= b.capacity()) {
return result.toString();
}
if ((off
+ len) > b.capacity()) {
len = b.capacity() - off;
}
for (int i = off; i < (off + len); ++i) {
int ch = b.get(i) & 0xff;
if (((((ch >=
'0') && (ch <= '9')) || ((ch >= 'A') && (ch <= 'Z'))) || ((ch >= 'a') && (ch <= 'z'))) || (" `~!@#$%^&*()-_=+[]{}|;:'\",.<>/?".indexOf(ch) >= 0)) {
result.append(((char) (ch)));
} else {
result.append(String.format("\\x%02X", ch));
}
}
return result.toString();
} | 3.26 |
hbase_ByteBufferUtils_longFitsIn_rdh | /**
* Check how many bytes are required to store value.
*
* @param value
* Value which size will be tested.
* @return How many bytes are required to store value.
*/
public static int longFitsIn(final long value) {
if (value < 0) {
return 8;
}
if
(value < (1L << (4 * 8))) {
// no more than 4 bytes
if (value < (1L << (2 * 8))) {
if (value < (1L
<< (1 * 8))) {
return 1;
}
return 2;
}
if (value < (1L << (3 * 8))) {
return 3;
}
return 4;
}
// more than 4 bytes
if (value < (1L << (6 * 8))) {
if (value < (1L << (5 * 8))) {
return 5;
}
return 6;
}
if (value < (1L << (7 * 8))) {
return 7;
}
return 8;
} | 3.26 |
hbase_ByteBufferUtils_readAsInt_rdh | /**
* Converts a ByteBuffer to an int value
*
* @param buf
* The ByteBuffer
* @param offset
* Offset to int value
* @param length
* Number of bytes used to store the int value.
* @return the int value if there's not enough bytes left in the buffer after the given offset
*/
public static int readAsInt(ByteBuffer buf, int offset, final int length) {
if ((offset + length) > buf.limit()) {
throw new IllegalArgumentException(((((("offset (" + offset) + ") + length (") + length) +
") exceed the") + " limit of the buffer: ") + buf.limit());
}
int n = 0;
for (int i = offset; i < (offset + length); i++) {
n <<= 8;
n ^= toByte(buf, i) &
0xff;
}
return n;
} | 3.26 |
hbase_ByteBufferUtils_compareTo_rdh | // The below two methods show up in lots of places. Versions of them in commons util and in
// Cassandra. In guava too? They are copied from ByteBufferUtils. They are here as static
// privates. Seems to make code smaller and make Hotspot happier (comes of compares and study
// of compiled code via jitwatch).
public static int compareTo(byte[] buf1, int o1, int l1, ByteBuffer buf2, int o2, int l2) {
return ComparerHolder.BEST_COMPARER.compareTo(buf1, o1, l1, buf2, o2, l2);
} | 3.26 |
hbase_ByteBufferUtils_writeVLong_rdh | /**
* Similar to {@link WritableUtils#writeVLong(java.io.DataOutput, long)}, but writes to a
* {@link ByteBuffer}.
*/
public static void writeVLong(ByteBuffer out, long i) {
if ((i >= (-112)) &&
(i <= 127)) {
out.put(((byte) (i)));
return;
}
int len = -112;
if (i < 0) {
i ^= -1L;// take one's complement
len = -120;
}
long v36
= i;
while (v36 != 0) {
v36 = v36 >> 8;
len--;
}
out.put(((byte) (len)));
len = (len < (-120)) ? -(len + 120) : -(len + 112);
for (int idx = len; idx != 0; idx--) {
int shiftbits
= (idx - 1) * 8;
long mask = 0xffL << shiftbits;
out.put(((byte) ((i & mask) >>
shiftbits)));
}
} | 3.26 |
hbase_ByteBufferUtils_toBytes_rdh | /**
* Copy the given number of bytes from specified offset into a new byte[]
*
* @param buffer
* input bytebuffer to read
* @param offset
* input offset where Bytes are
* @param length
* the number of bytes to read
* @return a new byte[] containing the bytes in the specified range
*/
public static byte[] toBytes(ByteBuffer buffer, int offset, int length) {
byte[] output = new byte[length];
for (int i = 0; i < length; i++) {
output[i] = buffer.get(offset + i);
}
return output; } | 3.26 |
hbase_ByteBufferUtils_toLong_rdh | /**
* Reads a long value at the given buffer's offset.
*
* @param buffer
* input byte buffer to read
* @param offset
* input offset where Long is
* @return long value at offset
*/
public static long toLong(ByteBuffer buffer, int offset) {
return ConverterHolder.BEST_CONVERTER.toLong(buffer, offset);} | 3.26 |
hbase_ByteBufferUtils_copyFromBufferToBuffer_rdh | /**
* Copy from one buffer to another from given offset. This will be absolute positional copying and
* won't affect the position of any of the buffers.
*
* @param in
* input bytebuffer
* @param out
* destination bytebuffer
* @param sourceOffset
* offset of source buffer
* @param destinationOffset
* offset of destination buffer
* @param length
* the number of bytes to copy
*/
public static void copyFromBufferToBuffer(ByteBuffer in, ByteBuffer out, int sourceOffset, int destinationOffset, int length) {
if (in.hasArray() && out.hasArray()) {
System.arraycopy(in.array(), sourceOffset + in.arrayOffset(), out.array(), out.arrayOffset() + destinationOffset, length);
} else if (UNSAFE_AVAIL) {
UnsafeAccess.copy(in, sourceOffset, out, destinationOffset, length);
} else {
ByteBuffer outDup = out.duplicate();
outDup.position(destinationOffset);
ByteBuffer inDup = in.duplicate();
inDup.position(sourceOffset).limit(sourceOffset + length);outDup.put(inDup);
}
// We used to return a result but disabled; return destinationOffset + length;
}
/**
* Copy from one buffer to another from given offset.
* <p>
* Note : This will advance the position marker of {@code out} but not change the position maker
* for {@code in} | 3.26 |
hbase_ByteBufferUtils_readLong_rdh | /**
* Read long which was written to fitInBytes bytes and increment position.
*
* @param fitInBytes
* In how many bytes given long is stored.
* @return The value of parsed long.
*/
public static long readLong(ByteBuffer in, final int fitInBytes) {
long v58 = 0;
for (int i = 0; i < fitInBytes; ++i) {
v58 |= (in.get() & 0xffL) << (8L * i);}
return v58;
} | 3.26 |
hbase_ByteBufferUtils_toShort_rdh | /**
* Reads a short value at the given buffer's offset.
*
* @param buffer
* input byte buffer to read
* @param offset
* input offset where short is
* @return short value at offset
*/
public static short toShort(ByteBuffer buffer, int offset) {
return ConverterHolder.BEST_CONVERTER.m0(buffer, offset);
} | 3.26 |
hbase_ByteBufferUtils_hashCode_rdh | /**
* ByteBuffer to hash offset to start from length to hash
*/
public static int hashCode(ByteBuffer buf, int offset, int length) {
int hash = 1;
for (int i = offset; i < (offset + length); i++) {
hash = (31 * hash) + ((int) (toByte(buf, i)));
}
return hash;
} | 3.26 |
hbase_ByteBufferUtils_readCompressedInt_rdh | /**
* Read integer from buffer coded in 7 bits and increment position.
*
* @return Read integer.
*/
public static int readCompressedInt(ByteBuffer buffer) {
byte b = buffer.get();
if ((b & NEXT_BIT_MASK) != 0) {
return (b & VALUE_MASK) + (readCompressedInt(buffer) << NEXT_BIT_SHIFT);
}
return b & VALUE_MASK;
} | 3.26 |
hbase_ByteBufferUtils_searchDelimiterIndexInReverse_rdh | /**
* Find index of passed delimiter walking from end of buffer backwards.
*
* @return Index of delimiter
*/
public static int searchDelimiterIndexInReverse(ByteBuffer b, int offset,
int length, int
delimiter) {for (int
i = (offset + length)
- 1; i >= offset; i--) {
if (b.get(i) == delimiter) {
return i;
}
}
return -1;
} | 3.26 |
hbase_BufferedMutator_getWriteBufferSize_rdh | /**
* Returns the maximum size in bytes of the write buffer for this HTable.
* <p>
* The default value comes from the configuration parameter {@code hbase.client.write.buffer}.
*
* @return The size of the write buffer in bytes.
*/default long getWriteBufferSize() {throw new UnsupportedOperationException("The BufferedMutator::getWriteBufferSize has not been implemented");
}
/**
* Set rpc timeout for this mutator instance
*
* @deprecated Since 3.0.0, will be removed in 4.0.0. Please set this through the
{@link BufferedMutatorParams} | 3.26 |
hbase_BufferedMutator_disableWriteBufferPeriodicFlush_rdh | /**
* Disable periodic flushing of the write buffer.
*/
default void disableWriteBufferPeriodicFlush() {
setWriteBufferPeriodicFlush(0, MIN_WRITE_BUFFER_PERIODIC_FLUSH_TIMERTICK_MS);
} | 3.26 |
hbase_BufferedMutator_setWriteBufferPeriodicFlush_rdh | /**
* Sets the maximum time before the buffer is automatically flushed.
*
* @param timeoutMs
* The maximum number of milliseconds how long records may be buffered before
* they are flushed. Set to 0 to disable.
* @param timerTickMs
* The number of milliseconds between each check if the timeout has been
* exceeded. Must be 100ms (as defined in
* {@link #MIN_WRITE_BUFFER_PERIODIC_FLUSH_TIMERTICK_MS}) or larger to avoid
* performance problems.
*/
default void setWriteBufferPeriodicFlush(long timeoutMs, long timerTickMs) {throw new UnsupportedOperationException("The BufferedMutator::setWriteBufferPeriodicFlush has not been implemented");
} | 3.26 |
hbase_ThriftConnection_getTableBuilder_rdh | /**
* Get a TableBuider to build ThriftTable, ThriftTable is NOT thread safe
*
* @return a TableBuilder
* @throws IOException
* IOException
*/
@Override
public TableBuilder getTableBuilder(TableName tableName, ExecutorService pool) {
return new TableBuilder() {
@Override
public TableBuilder setOperationTimeout(int timeout) {
return this;
}
@Override
public TableBuilder
setRpcTimeout(int timeout) {
return this;
}
@Override
public TableBuilder setReadRpcTimeout(int timeout) {
return this;
}
@Override
public TableBuilder setWriteRpcTimeout(int timeout) {return this;
}
@Override
public TableBuilder setRequestAttribute(String key, byte[] value) {
return this;
}
@Override
public Table build() {
try {
Pair<THBaseService.Client, TTransport> v18 = clientBuilder.getClient();
return
new ThriftTable(tableName, v18.getFirst(),
v18.getSecond(), conf);
} catch (IOException ioE) {
throw new RuntimeException(ioE);
}
}
};
} | 3.26 |
hbase_ThriftConnection_getAdmin_rdh | /**
* Get a ThriftAdmin, ThriftAdmin is NOT thread safe
*
* @return a ThriftAdmin
* @throws IOException
* IOException
*/
@Override
public Admin getAdmin() throws IOException {
Pair<THBaseService.Client, TTransport> client = clientBuilder.getClient();
return new ThriftAdmin(client.getFirst(), client.getSecond(), conf);
} | 3.26 |
hbase_ForeignExceptionDispatcher_addListener_rdh | /**
* Listen for failures to a given process. This method should only be used during initialization
* and not added to after exceptions are accepted.
*
* @param errorable
* listener for the errors. may be null.
*/
public synchronized void addListener(ForeignExceptionListener errorable) {
this.listeners.add(errorable);
} | 3.26 |
hbase_ForeignExceptionDispatcher_dispatch_rdh | /**
* Sends an exception to all listeners.
*
* @param e
* {@link ForeignException} containing the cause. Can be null.
*/
private void dispatch(ForeignException e) {
// update all the listeners with the passed error
for (ForeignExceptionListener l : listeners) {
l.receive(e);
}
} | 3.26 |
hbase_ClientUtils_utf8_rdh | /**
* Helper to translate a byte buffer to UTF8 strings
*
* @param bb
* byte buffer
* @return UTF8 decoded string value
*/
public static String utf8(final ByteBuffer bb) {
// performance is not very critical here so we always copy the BB to a byte array
byte[] buf = new byte[bb.remaining()];
// duplicate so the get will not change the position of the original bb
bb.duplicate().get(buf);
return utf8(buf);
} | 3.26 |
hbase_ClientUtils_getLoginContext_rdh | /**
* To authenticate the demo client, kinit should be invoked ahead. Here we try to get the Kerberos
* credential from the ticket cache
*
* @return LoginContext Object
* @throws LoginException
* Exception thrown if unable to get LoginContext
*/
public static LoginContext getLoginContext() throws LoginException {
return new LoginContext(StringUtils.EMPTY, new Subject(), null, new Configuration() {
@Override
public AppConfigurationEntry[] getAppConfigurationEntry(String name) {
Map<String, String> options = new HashMap<>();
options.put("useKeyTab", "false");
options.put("storeKey", "false");
options.put("doNotPrompt", "true");
options.put("useTicketCache", "true");
options.put("renewTGT", "true");
options.put("refreshKrb5Config", "true");
options.put("isInitiator", "true");
String ticketCache = System.getenv("KRB5CCNAME");
if (ticketCache != null) {
options.put("ticketCache", ticketCache);
}
options.put("debug", "true");
return new AppConfigurationEntry[]{ new AppConfigurationEntry("com.sun.security.auth.module.Krb5LoginModule", AppConfigurationEntry.LoginModuleControlFlag.REQUIRED, options) };
}
});
} | 3.26 |
hbase_ClientUtils_printRow_rdh | /**
* copy values into a TreeMap to get them in sorted order and print it
*
* @param rowResult
* Holds row name and then a map of columns to cells
*/
public static void printRow(final TRowResult rowResult) {
TreeMap<String, TCell> sorted = new TreeMap<>();
for (Map.Entry<ByteBuffer, TCell> column : rowResult.columns.entrySet()) {
sorted.put(utf8(column.getKey().array()), column.getValue());
}
StringBuilder rowStr = new StringBuilder();
for (Map.Entry<String, TCell>
entry : sorted.entrySet()) {
rowStr.append(entry.getKey());
rowStr.append(" => ");
rowStr.append(utf8(entry.getValue().value.array()));rowStr.append("; ");
}
System.out.println((("row: "
+ utf8(rowResult.row.array())) + ", cols: ") + rowStr);
} | 3.26 |
hbase_RegionHDFSBlockLocationFinder_mapHostNameToServerName_rdh | /**
* Map hostname to ServerName, The output ServerName list will have the same order as input hosts.
*
* @param hosts
* the list of hosts
* @return ServerName list
*/
@RestrictedApi(explanation = "Should only be called in tests", link = "", allowedOnPath = ".*/src/test/.*|.*/RegionHDFSBlockLocationFinder.java")
@SuppressWarnings("MixedMutabilityReturnType")
List<ServerName> mapHostNameToServerName(List<String> hosts) {
if ((hosts == null) || (status == null)) {
if (hosts == null) {
LOG.warn("RegionLocationFinder top hosts is null");
}
return Collections.emptyList();
}
List<ServerName> topServerNames = new ArrayList<>();
Collection<ServerName> regionServers = status.getLiveServerMetrics().keySet();
// create a mapping from hostname to ServerName for fast lookup
Map<String, List<ServerName>> hostToServerName = new HashMap<>();
for (ServerName sn : regionServers) {String host = sn.getHostname();
if (!hostToServerName.containsKey(host)) {
hostToServerName.put(host, new ArrayList<>());
}hostToServerName.get(host).add(sn);
}for (String host : hosts) {
if (!hostToServerName.containsKey(host)) {
continue;
}for (ServerName sn : hostToServerName.get(host)) {
// it is possible that HDFS is up ( thus host is valid ),
// but RS is down ( thus sn is null )
if (sn != null) {
topServerNames.add(sn);
}
}
}
return topServerNames;
} | 3.26 |
hbase_RegionHDFSBlockLocationFinder_scheduleFullRefresh_rdh | /**
* Refresh all the region locations.
*
* @return true if user created regions got refreshed.
*/
private boolean scheduleFullRefresh() {
ClusterInfoProvider service = this.provider;
// Protect from anything being null while starting up.
if (service == null) {
return false;
}
// TODO: Should this refresh all the regions or only the ones assigned?
boolean includesUserTables = false;
for (final RegionInfo hri : service.getAssignedRegions()) {
cache.refresh(hri);
includesUserTables |= !hri.getTable().isSystemTable();
}return includesUserTables;
} | 3.26 |
hbase_RegionHDFSBlockLocationFinder_refreshLocalityChangedRegions_rdh | /**
* If locality for a region has changed, that pretty certainly means our cache is out of date.
* Compare oldStatus and newStatus, refreshing any regions which have moved or changed locality.
*/
private void refreshLocalityChangedRegions(ClusterMetrics oldStatus, ClusterMetrics newStatus) {
if
((oldStatus == null) || (newStatus == null)) {
LOG.debug("Skipping locality-based refresh due to oldStatus={}, newStatus={}", oldStatus, newStatus);return;
}
Map<ServerName, ServerMetrics> oldServers = oldStatus.getLiveServerMetrics();
Map<ServerName, ServerMetrics> newServers
= newStatus.getLiveServerMetrics();
Map<String, RegionInfo> regionsByName = new HashMap<>(cache.asMap().size());
for (RegionInfo regionInfo : cache.asMap().keySet()) {
regionsByName.put(regionInfo.getEncodedName(), regionInfo);
}
for (Map.Entry<ServerName, ServerMetrics> v5 : newServers.entrySet()) {
Map<byte[], RegionMetrics> newRegions = v5.getValue().getRegionMetrics();
for (Map.Entry<byte[], RegionMetrics> regionEntry : newRegions.entrySet()) {
String encodedName = RegionInfo.encodeRegionName(regionEntry.getKey());
RegionInfo region = regionsByName.get(encodedName);
if (region == null) {
continue;
}
float newLocality = regionEntry.getValue().getDataLocality();
float oldLocality = getOldLocality(v5.getKey(),
regionEntry.getKey(), oldServers);
if (Math.abs(newLocality - oldLocality) > EPSILON) {
LOG.debug("Locality for region {} changed from {} to {}, refreshing cache", region.getEncodedName(), oldLocality, newLocality);
cache.refresh(region);
}
}
}
} | 3.26 |
hbase_RegionHDFSBlockLocationFinder_getDescriptor_rdh | /**
* return TableDescriptor for a given tableName
*
* @param tableName
* the table name
*/
private TableDescriptor getDescriptor(TableName tableName) throws IOException {
ClusterInfoProvider service = this.provider;
if (service == null) {
return null;
}
return service.getTableDescriptor(tableName);
} | 3.26 |
hbase_RegionHDFSBlockLocationFinder_internalGetTopBlockLocation_rdh | /**
* Returns an ordered list of hosts that are hosting the blocks for this region. The weight of
* each host is the sum of the block lengths of all files on that host, so the first host in the
* list is the server which holds the most bytes of the given region's HFiles.
*
* @param region
* region
* @return ordered list of hosts holding blocks of the specified region
*/
private HDFSBlocksDistribution internalGetTopBlockLocation(RegionInfo region) {
try {
TableDescriptor tableDescriptor = getDescriptor(region.getTable());
if (tableDescriptor != null) {
HDFSBlocksDistribution blocksDistribution = provider.computeHDFSBlocksDistribution(getConf(), tableDescriptor, region);
return blocksDistribution;
}
} catch (IOException ioe) {
LOG.warn("IOException during HDFSBlocksDistribution computation for region = {}", region.getEncodedName(),
ioe);
}
return EMPTY_BLOCK_DISTRIBUTION;
} | 3.26 |
hbase_RegionHDFSBlockLocationFinder_createCache_rdh | /**
* Create a cache for region to list of servers
*
* @return A new Cache.
*/
private LoadingCache<RegionInfo, HDFSBlocksDistribution> createCache() {
return CacheBuilder.newBuilder().expireAfterWrite(CACHE_TIME, TimeUnit.MILLISECONDS).build(loader);
} | 3.26 |
hbase_StoreFileTrackerFactory_create_rdh | /**
* Used at master side when splitting/merging regions, as we do not have a Store, thus no
* StoreContext at master side.
*/
public static StoreFileTracker create(Configuration conf, TableDescriptor td, ColumnFamilyDescriptor cfd, HRegionFileSystem regionFs) {
StoreContext ctx = StoreContext.getBuilder().withColumnFamilyDescriptor(cfd).withRegionFileSystem(regionFs).withFamilyStoreDirectoryPath(regionFs.getStoreDir(cfd.getNameAsString())).build();
return StoreFileTrackerFactory.create(mergeConfigurations(conf, td, cfd), true,
ctx);
} | 3.26 |
hbase_StoreFileTrackerFactory_createForMigration_rdh | /**
* Create store file tracker to be used as source or destination for
* {@link MigrationStoreFileTracker}.
*/
static StoreFileTrackerBase createForMigration(Configuration conf, String configName, boolean isPrimaryReplica, StoreContext ctx) {
Class<? extends StoreFileTrackerBase> tracker = getStoreFileTrackerClassForMigration(conf, configName);
// prevent nest of MigrationStoreFileTracker, it will cause infinite recursion.
if (MigrationStoreFileTracker.class.isAssignableFrom(tracker)) {
throw new IllegalArgumentException(((("Should not specify " + configName) + " as ") + Trackers.MIGRATION) + " because it can not be nested");
}
LOG.debug("instantiating StoreFileTracker impl {} as {}", tracker.getName(), configName);
return ReflectionUtils.newInstance(tracker, conf, isPrimaryReplica, ctx);
} | 3.26 |
hbase_BaseLoadBalancer_updateBalancerStatus_rdh | /**
* Updates the balancer status tag reported to JMX
*/
@Override
public void updateBalancerStatus(boolean status) {
metricsBalancer.balancerStatus(status);
} | 3.26 |
hbase_BaseLoadBalancer_randomAssignment_rdh | /**
* Used to assign a single region to a random server.
*/
private ServerName randomAssignment(BalancerClusterState cluster, RegionInfo regionInfo, List<ServerName> servers) {
int numServers =
servers.size();// servers is not null, numServers > 1
ServerName sn = null;
final int maxIterations = numServers * 4;
int iterations = 0;
List<ServerName> usedSNs = new ArrayList<>(servers.size());
Random rand = ThreadLocalRandom.current();
do {
int i = rand.nextInt(numServers);sn = servers.get(i);
if (!usedSNs.contains(sn)) {
usedSNs.add(sn);
}
} while (cluster.wouldLowerAvailability(regionInfo, sn) && ((iterations++) <
maxIterations) );
if (iterations >= maxIterations)
{
// We have reached the max. Means the servers that we collected is still lowering the
// availability
for (ServerName unusedServer : servers) {
if (!usedSNs.contains(unusedServer)) {
// check if any other unused server is there for us to use.
// If so use it. Else we have not other go but to go with one of them
if (!cluster.wouldLowerAvailability(regionInfo, unusedServer)) {
sn = unusedServer;
break;
}}
}
}
cluster.doAssignRegion(regionInfo, sn);
return sn;
} | 3.26 |
hbase_BaseLoadBalancer_roundRobinAssignment_rdh | /**
* Round-robin a list of regions to a list of servers
*/
private void roundRobinAssignment(BalancerClusterState cluster, List<RegionInfo> regions, List<ServerName> servers, Map<ServerName, List<RegionInfo>> assignments) {
Random rand = ThreadLocalRandom.current();
List<RegionInfo> unassignedRegions = new ArrayList<>();
int numServers = servers.size();
int numRegions = regions.size();
int max = ((int) (Math.ceil(((float) (numRegions)) / numServers)));
int serverIdx = 0;
if (numServers > 1) {
serverIdx = rand.nextInt(numServers);
}
int regionIdx = 0;
for (int j = 0; j < numServers; j++) {
ServerName server = servers.get((j + serverIdx) % numServers);
List<RegionInfo> serverRegions = new ArrayList<>(max);
for (int i = regionIdx; i < numRegions; i +=
numServers) {
RegionInfo region = regions.get(i % numRegions);
if (cluster.wouldLowerAvailability(region, server)) {unassignedRegions.add(region);
} else {
serverRegions.add(region);
cluster.doAssignRegion(region, server);}
}
assignments.put(server, serverRegions);
regionIdx++;
}
List<RegionInfo> lastFewRegions = new ArrayList<>();
// assign the remaining by going through the list and try to assign to servers one-by-one
serverIdx = rand.nextInt(numServers);
for (RegionInfo region : unassignedRegions) {boolean assigned = false;
for (int j = 0; j < numServers; j++) {
// try all servers one by one
ServerName server = servers.get((j + serverIdx) % numServers);
if (cluster.wouldLowerAvailability(region, server)) {
continue;
} else {
assignments.computeIfAbsent(server, k -> new ArrayList<>()).add(region);
cluster.doAssignRegion(region, server);
serverIdx = ((j + serverIdx) + 1) % numServers;// remain from next server
assigned = true;
break;
}
}
if (!assigned) {
lastFewRegions.add(region);
}
}
// just sprinkle the rest of the regions on random regionservers. The balanceCluster will
// make it optimal later. we can end up with this if numReplicas > numServers.
for (RegionInfo region : lastFewRegions) {
int i = rand.nextInt(numServers);
ServerName server = servers.get(i);
assignments.computeIfAbsent(server, k -> new ArrayList<>()).add(region);
cluster.doAssignRegion(region, server);
}
} | 3.26 |
hbase_BaseLoadBalancer_getRegionAssignmentsByServer_rdh | // return a modifiable map, as we may add more entries into the returned map.
private Map<ServerName, List<RegionInfo>> getRegionAssignmentsByServer(Collection<RegionInfo> regions) {
return provider != null ? new HashMap<>(provider.getSnapShotOfAssignment(regions)) : new HashMap<>();
} | 3.26 |
hbase_SimpleRpcServerResponder_processResponse_rdh | /**
* Process the response for this call. You need to have the lock on
* {@link org.apache.hadoop.hbase.ipc.SimpleServerRpcConnection#responseWriteLock}
*
* @return true if we proceed the call fully, false otherwise.
*/
private boolean processResponse(SimpleServerRpcConnection conn, RpcResponse resp) throws IOException {
boolean error = true;
BufferChain buf = resp.getResponse();
if (conn.useWrap) {
buf = wrapWithSasl(conn.saslServer, buf);
}
try {
// Send as much data as we can in the non-blocking fashion
long numBytes = this.simpleRpcServer.channelWrite(conn.channel, buf);
if (numBytes < 0) {
throw new HBaseIOException("Error writing on the socket " + conn);
}
error = false;
} finally {
if (error) {
SimpleRpcServer.LOG.debug(conn + ": output error -- closing");
// We will be closing this connection itself. Mark this call as done so that all the
// buffer(s) it got from pool can get released
resp.done();
this.simpleRpcServer.closeConnection(conn);
}
}
if (!buf.hasRemaining()) {resp.done();
return true;} else {
// set the serve time when the response has to be sent later
conn.lastSentTime = EnvironmentEdgeManager.currentTime();
return false;// Socket can't take more, we will have to come back.
}
} | 3.26 |
hbase_SimpleRpcServerResponder_doRespond_rdh | //
// Enqueue a response from the application.
//
void doRespond(SimpleServerRpcConnection conn,
RpcResponse
resp) throws IOException {
boolean added = false;
// If there is already a write in progress, we don't wait. This allows to free the handlers
// immediately for other tasks.
if (conn.responseQueue.isEmpty() && conn.responseWriteLock.tryLock()) {
try {
if (conn.responseQueue.isEmpty()) {
// If we're alone, we can try to do a direct call to the socket. It's
// an optimization to save on context switches and data transfer between cores..
if (processResponse(conn, resp)) {
return;// we're done.
}
// Too big to fit, putting ahead.
conn.responseQueue.addFirst(resp);
added = true;// We will register to the selector later, outside of the lock.
}
} finally
{
conn.responseWriteLock.unlock();
}}
if (!added) {
conn.responseQueue.addLast(resp);
}
registerForWrite(conn);
} | 3.26 |
hbase_SimpleRpcServerResponder_registerWrites_rdh | /**
* Take the list of the connections that want to write, and register them in the selector.
*/
private void registerWrites() { Iterator<SimpleServerRpcConnection> it = writingCons.iterator();
while (it.hasNext()) {
SimpleServerRpcConnection c = it.next();
it.remove();
SelectionKey sk = c.channel.keyFor(writeSelector);
try {
if (sk == null) {
try {
c.channel.register(writeSelector, SelectionKey.OP_WRITE, c);} catch (ClosedChannelException e) {
// ignore: the client went away.
if (SimpleRpcServer.LOG.isTraceEnabled())
SimpleRpcServer.LOG.trace("ignored", e);
}
} else {
sk.interestOps(SelectionKey.OP_WRITE);
}
} catch (CancelledKeyException e) {// ignore: the client went away.
if (SimpleRpcServer.LOG.isTraceEnabled())
SimpleRpcServer.LOG.trace("ignored", e);
}
}
} | 3.26 |
hbase_SimpleRpcServerResponder_purge_rdh | /**
* If there were some calls that have not been sent out for a long time, we close the connection.
*
* @return the time of the purge.
*/
private long purge(long lastPurgeTime) {
long now = EnvironmentEdgeManager.currentTime();
if (now < (lastPurgeTime + this.simpleRpcServer.purgeTimeout)) {
return lastPurgeTime;
}
ArrayList<SimpleServerRpcConnection> v9 = new ArrayList<>();
// get the list of channels from list of keys.
synchronized(writeSelector.keys()) {
for (SelectionKey key : writeSelector.keys()) {
SimpleServerRpcConnection connection = ((SimpleServerRpcConnection) (key.attachment()));
if (connection == null) {
throw new IllegalStateException("Coding error: SelectionKey key without attachment.");
}
if ((connection.lastSentTime > 0) && (now > (connection.lastSentTime + this.simpleRpcServer.purgeTimeout))) {
v9.add(connection);
}
}
}
// Seems safer to close the connection outside of the synchronized loop...
for (SimpleServerRpcConnection connection : v9) {
this.simpleRpcServer.closeConnection(connection);
}
return now;
} | 3.26 |
hbase_SimpleRpcServerResponder_registerForWrite_rdh | /**
* Add a connection to the list that want to write,
*/
public void registerForWrite(SimpleServerRpcConnection c) {
if (writingCons.add(c)) {
writeSelector.wakeup();
}
} | 3.26 |
hbase_RoundRobinTableInputFormat_configure_rdh | /**
* Adds a configuration to the Context disabling remote rpc'ing to figure Region size when
* calculating InputSplits. See up in super-class TIF where we rpc to every server to find the
* size of all involved Regions. Here we disable this super-class action. This means InputSplits
* will have a length of zero. If all InputSplits have zero-length InputSplits, the ordering done
* in here will 'pass-through' Hadoop's length-first sort. The superclass TIF will ask every node
* for the current size of each of the participating Table Regions. It does this because it wants
* to schedule the biggest Regions first (This fixation comes of hadoop itself -- see JobSubmitter
* where it sorts inputs by size). This extra diligence takes time and is of no utility in this
* RRTIF where spread is of more import than size-first. Also, if a rolling restart is happening
* when we go to launch the job, the job launch may fail because the request for Region size fails
* -- even after retries -- because rolled RegionServer may take a while to come online: e.g. it
* takes java 90 seconds to allocate a 160G. RegionServer is offline during this time. The job
* launch will fail with 'Connection rejected'. So, we set 'hbase.regionsizecalculator.enable' to
* false here in RRTIF.
*
* @see #unconfigure()
*/
void configure() {
if (getConf().get(HBASE_REGIONSIZECALCULATOR_ENABLE) != null) {
this.hbaseRegionsizecalculatorEnableOriginalValue = getConf().getBoolean(HBASE_REGIONSIZECALCULATOR_ENABLE, true);
}
getConf().setBoolean(HBASE_REGIONSIZECALCULATOR_ENABLE, false);
} | 3.26 |
hbase_RoundRobinTableInputFormat_unconfigure_rdh | /**
*
* @see #configure()
*/
void unconfigure() {
if (this.hbaseRegionsizecalculatorEnableOriginalValue == null) {
getConf().unset(HBASE_REGIONSIZECALCULATOR_ENABLE);
} else {
getConf().setBoolean(HBASE_REGIONSIZECALCULATOR_ENABLE, this.hbaseRegionsizecalculatorEnableOriginalValue);
}
} | 3.26 |
hbase_RoundRobinTableInputFormat_getSuperSplits_rdh | /**
* Call super-classes' getSplits. Have it out here as its own method so can be overridden.
*/
List<InputSplit> getSuperSplits(JobContext context) throws IOException {
return super.getSplits(context);
} | 3.26 |
hbase_RoundRobinTableInputFormat_roundRobin_rdh | /**
* Spread the splits list so as to avoid clumping on RegionServers. Order splits so every server
* gets one split before a server gets a second, and so on; i.e. round-robin the splits amongst
* the servers in the cluster.
*/
List<InputSplit>
roundRobin(List<InputSplit> inputs) throws IOException {
if ((inputs == null) || inputs.isEmpty()) {
return inputs;
}
List<InputSplit> result = new ArrayList<>(inputs.size());
// Prepare a hashmap with each region server as key and list of Input Splits as value
Map<String, List<InputSplit>> regionServerSplits = new HashMap<>();
for (InputSplit is : inputs)
{
if (is instanceof TableSplit) {
String regionServer = ((TableSplit) (is)).getRegionLocation();
if ((regionServer != null) && (!StringUtils.isBlank(regionServer))) {
regionServerSplits.computeIfAbsent(regionServer, k -> new ArrayList<>()).add(is);
continue;
}
}
// If TableSplit or region server not found, add it anyways.
result.add(is);
}
// Write out splits in a manner that spreads splits for a RegionServer to avoid 'clumping'.
while (!regionServerSplits.isEmpty()) {
Iterator<List<InputSplit>> iter = regionServerSplits.values().iterator();
while (iter.hasNext()) {
List<InputSplit> inputSplitListForRegion = iter.next();
if (!inputSplitListForRegion.isEmpty()) {
result.add(inputSplitListForRegion.remove(0));
}
if (inputSplitListForRegion.isEmpty()) {
iter.remove();
}
}
}
return result;
} | 3.26 |
hbase_RoundRobinTableInputFormat_main_rdh | /**
* Pass table name as argument. Set the zk ensemble to use with the System property
* 'hbase.zookeeper.quorum'
*/
public static void main(String[] args) throws IOException {
TableInputFormat tif = new RoundRobinTableInputFormat();final Configuration configuration = HBaseConfiguration.create();
configuration.setBoolean("hbase.regionsizecalculator.enable", false); configuration.set(HConstants.ZOOKEEPER_QUORUM, System.getProperty(HConstants.ZOOKEEPER_QUORUM, "localhost"));
configuration.set(TableInputFormat.INPUT_TABLE, args[0]);
tif.setConf(configuration);
List<InputSplit> splits = tif.getSplits(new JobContextImpl(configuration, new JobID()));
for (InputSplit split : splits) {
System.out.println(split);
}
} | 3.26 |
hbase_Append_setReturnResults_rdh | /**
* True (default) if the append operation should return the results. A client that is not
* interested in the result can save network bandwidth setting this to false.
*/
@Override
public Append setReturnResults(boolean returnResults) {
super.setReturnResults(returnResults);
return this;
} | 3.26 |
hbase_Append_getTimeRange_rdh | /**
* Gets the TimeRange used for this append.
*/
public TimeRange getTimeRange() {
return this.tr;
} | 3.26 |
hbase_Append_isReturnResults_rdh | /**
* Returns current setting for returnResults
*/
// This method makes public the superclasses's protected method.
@Override
public boolean isReturnResults() {
return super.isReturnResults();
} | 3.26 |
hbase_Append_addColumn_rdh | /**
* Add the specified column and value to this Append operation.
*
* @param family
* family name
* @param qualifier
* column qualifier
* @param value
* value to append to specified column
*/
public Append addColumn(byte[] family, byte[] qualifier, byte[] value) {
KeyValue kv = new KeyValue(this.row, family, qualifier, this.ts, Type.Put, value);
return add(kv);
} | 3.26 |
hbase_Append_add_rdh | /**
* Add column and value to this Append operation.
*
* @return This instance
*/ @Override
public Append add(final Cell cell) {
try {
super.add(cell);
} catch (IOException e) {
// we eat the exception of wrong row for BC..
LOG.error(e.toString(), e);
}
return this;
} | 3.26 |
hbase_Append_setTimeRange_rdh | /**
* Sets the TimeRange to be used on the Get for this append.
* <p>
* This is useful for when you have counters that only last for specific periods of time (ie.
* counters that are partitioned by time). By setting the range of valid times for this append,
* you can potentially gain some performance with a more optimal Get operation. Be careful adding
* the time range to this class as you will update the old cell if the time range doesn't include
* the latest cells.
* <p>
* This range is used as [minStamp, maxStamp).
*
* @param minStamp
* minimum timestamp value, inclusive
* @param maxStamp
* maximum timestamp value, exclusive
*/
public Append setTimeRange(long minStamp, long maxStamp) {
tr = TimeRange.between(minStamp, maxStamp);
return this;} | 3.26 |
hbase_SnapshotQuotaObserverChore_getSnapshotsFromTables_rdh | /**
* Computes a mapping of originating {@code TableName} to snapshots, when the {@code TableName}
* exists in the provided {@code Set}.
*/
Multimap<TableName, String> getSnapshotsFromTables(Admin admin, Set<TableName> tablesToFetchSnapshotsFrom) throws IOException {
Multimap<TableName, String>
snapshotsToCompute = HashMultimap.create();
for (SnapshotDescription sd : admin.listSnapshots()) {
TableName tn = sd.getTableName();
if (tablesToFetchSnapshotsFrom.contains(tn)) {
snapshotsToCompute.put(tn, sd.getName());
}
}
return
snapshotsToCompute;
} | 3.26 |
hbase_SnapshotQuotaObserverChore_pruneTableSnapshots_rdh | /**
* Removes the snapshot entries that are present in Quota table but not in snapshotsToComputeSize
*
* @param snapshotsToComputeSize
* list of snapshots to be persisted
*/
void pruneTableSnapshots(Multimap<TableName, String> snapshotsToComputeSize) throws IOException {
Multimap<TableName, String> existingSnapshotEntries = QuotaTableUtil.getTableSnapshots(conn);
Multimap<TableName, String>
snapshotEntriesToRemove = HashMultimap.create();
for (Entry<TableName, Collection<String>> entry : existingSnapshotEntries.asMap().entrySet()) {
TableName tn = entry.getKey();
Set<String> setOfSnapshots = new HashSet<>(entry.getValue());for (String snapshot : snapshotsToComputeSize.get(tn)) {
setOfSnapshots.remove(snapshot);
}
for (String snapshot : setOfSnapshots) {
snapshotEntriesToRemove.put(tn, snapshot);
}
}
removeExistingTableSnapshotSizes(snapshotEntriesToRemove);
} | 3.26 |
hbase_SnapshotQuotaObserverChore_getPeriod_rdh | /**
* Extracts the period for the chore from the configuration.
*
* @param conf
* The configuration object.
* @return The configured chore period or the default value.
*/
static int getPeriod(Configuration conf) {
return conf.getInt(SNAPSHOT_QUOTA_CHORE_PERIOD_KEY, SNAPSHOT_QUOTA_CHORE_PERIOD_DEFAULT);
} | 3.26 |
hbase_SnapshotQuotaObserverChore_getSnapshotsToComputeSize_rdh | /**
* Fetches each table with a quota (table or namespace quota), and then fetch the name of each
* snapshot which was created from that table.
*
* @return A mapping of table to snapshots created from that table
*/
Multimap<TableName, String> getSnapshotsToComputeSize() throws IOException {
Set<TableName> tablesToFetchSnapshotsFrom = new HashSet<>();
QuotaFilter filter = new QuotaFilter();
filter.addTypeFilter(QuotaType.SPACE);
try (Admin admin = conn.getAdmin()) {
// Pull all of the tables that have quotas (direct, or from namespace)
for (QuotaSettings qs : QuotaRetriever.open(conf, filter)) {
if (qs.getQuotaType() == QuotaType.SPACE) {String ns = qs.getNamespace();
TableName tn = qs.getTableName();
if (((null == ns) && (null == tn)) || ((null != ns) && (null != tn))) {
throw new IllegalStateException("Expected either one of namespace and tablename to be null but not both");
}
// Collect either the table name itself, or all of the tables in the namespace
if (null != ns) {
tablesToFetchSnapshotsFrom.addAll(Arrays.asList(admin.listTableNamesByNamespace(ns)));
} else {
tablesToFetchSnapshotsFrom.add(tn);
}
}
}
// Fetch all snapshots that were created from these tables
return getSnapshotsFromTables(admin, tablesToFetchSnapshotsFrom);
}
} | 3.26 |
hbase_SnapshotQuotaObserverChore_persistSnapshotSizesForNamespaces_rdh | /**
* Writes the size used by snapshots for each namespace to the quota table.
*/
void persistSnapshotSizesForNamespaces(Map<String, Long> snapshotSizesByNamespace) throws IOException {
try (Table quotaTable = conn.getTable(QuotaUtil.QUOTA_TABLE_NAME)) {
quotaTable.put(snapshotSizesByNamespace.entrySet().stream().map(e -> QuotaTableUtil.createPutForNamespaceSnapshotSize(e.getKey(), e.getValue())).collect(Collectors.toList()));
}
} | 3.26 |
hbase_SnapshotQuotaObserverChore_pruneNamespaceSnapshots_rdh | /**
* Removes the snapshot entries that are present in Quota table but not in snapshotsToComputeSize
*
* @param snapshotsToComputeSize
* list of snapshots to be persisted
*/
void pruneNamespaceSnapshots(Multimap<TableName, String> snapshotsToComputeSize) throws IOException {Set<String> existingSnapshotEntries = QuotaTableUtil.getNamespaceSnapshots(conn);
for (TableName tableName : snapshotsToComputeSize.keySet()) {
existingSnapshotEntries.remove(tableName.getNamespaceAsString());
}
// here existingSnapshotEntries is left with the entries to be removed
removeExistingNamespaceSnapshotSizes(existingSnapshotEntries);} | 3.26 |
hbase_SnapshotQuotaObserverChore_getInitialDelay_rdh | /**
* Extracts the initial delay for the chore from the configuration.
*
* @param conf
* The configuration object.
* @return The configured chore initial delay or the default value.
*/
static long getInitialDelay(Configuration conf) {
return conf.getLong(SNAPSHOT_QUOTA_CHORE_DELAY_KEY, SNAPSHOT_QUOTA_CHORE_DELAY_DEFAULT);
} | 3.26 |
hbase_SnapshotQuotaObserverChore_getTimeUnit_rdh | /**
* Extracts the time unit for the chore period and initial delay from the configuration. The
* configuration value for {@link #SNAPSHOT_QUOTA_CHORE_TIMEUNIT_KEY} must correspond to a
* {@link TimeUnit} value.
*
* @param conf
* The configuration object.
* @return The configured time unit for the chore period and initial delay or the default value.
*/
static TimeUnit getTimeUnit(Configuration conf) {
return TimeUnit.valueOf(conf.get(SNAPSHOT_QUOTA_CHORE_TIMEUNIT_KEY, SNAPSHOT_QUOTA_CHORE_TIMEUNIT_DEFAULT));
} | 3.26 |
hbase_SnapshotQuotaObserverChore_getNotifierForTable_rdh | /**
* Returns the correct instance of {@link FileArchiverNotifier} for the given table name.
*
* @param tn
* The table name
* @return A {@link FileArchiverNotifier} instance
*/
FileArchiverNotifier getNotifierForTable(TableName tn) {return FileArchiverNotifierFactoryImpl.getInstance().get(conn, conf, fs, tn);
} | 3.26 |
hbase_SnapshotQuotaObserverChore_computeSnapshotSizes_rdh | /**
* Computes the size of each snapshot provided given the current files referenced by the table.
*
* @param snapshotsToComputeSize
* The snapshots to compute the size of
* @return A mapping of table to snapshot created from that table and the snapshot's size.
*/
Map<String, Long> computeSnapshotSizes(Multimap<TableName, String> snapshotsToComputeSize) throws IOException {
final Map<String, Long> snapshotSizesByNamespace = new HashMap<>();
final long start = System.nanoTime();
for (Entry<TableName, Collection<String>> entry : snapshotsToComputeSize.asMap().entrySet()) {
final TableName tn = entry.getKey();
final Collection<String> snapshotNames = entry.getValue();
// Get our notifier instance, this is tracking archivals that happen out-of-band of this chore
FileArchiverNotifier notifier = getNotifierForTable(tn);
// The total size consumed by all snapshots against this table
long totalSnapshotSize = notifier.computeAndStoreSnapshotSizes(snapshotNames);
// Bucket that size into the appropriate namespace
snapshotSizesByNamespace.merge(tn.getNamespaceAsString(), totalSnapshotSize, Long::sum);
}
// Update the amount of time it took to compute the size of the snapshots for a table
if (metrics != null) {
metrics.incrementSnapshotSizeComputationTime((System.nanoTime() - start) / 1000000);
}
return snapshotSizesByNamespace;
} | 3.26 |
hbase_ReplicationQueueInfo_extractDeadServersFromZNodeString_rdh | /**
* Parse dead server names from queue id. servername can contain "-" such as
* "ip-10-46-221-101.ec2.internal", so we need skip some "-" during parsing for the following
* cases: 2-ip-10-46-221-101.ec2.internal,52170,1364333181125-<server name>-...
*/
private static void extractDeadServersFromZNodeString(String deadServerListStr, List<ServerName> result) {
if (((deadServerListStr == null) || (result == null)) ||
deadServerListStr.isEmpty()) {
return;
}
// valid server name delimiter "-" has to be after "," in a server name
int seenCommaCnt = 0;
int startIndex = 0;int v3 = deadServerListStr.length();
for (int i = 0; i < v3; i++) {
switch (deadServerListStr.charAt(i)) {
case ',' :
seenCommaCnt += 1;break;
case '-' :
if (seenCommaCnt >= 2) {
if (i > startIndex) {
String serverName = deadServerListStr.substring(startIndex, i);
if (ServerName.isFullServerName(serverName)) {
result.add(ServerName.valueOf(serverName));
} else {
LOG.error("Found invalid server name:" + serverName);
}
startIndex = i + 1;
}
seenCommaCnt = 0;
}
break;
default :
break;
}
}
// add tail
if (startIndex
< (v3 - 1)) {
String serverName = deadServerListStr.substring(startIndex, v3);
if (ServerName.isFullServerName(serverName)) {
result.add(ServerName.valueOf(serverName));
} else {
LOG.error("Found invalid server name at the end:" + serverName);
}
}
LOG.debug("Found dead servers:" + result);
} | 3.26 |
hbase_SampleUploader_configureJob_rdh | /**
* Job configuration.
*/
public static Job configureJob(Configuration conf, String[] args) throws IOException {
Path inputPath = new Path(args[0]);
String tableName = args[1];
Job v9 = new Job(conf, (NAME + "_") + tableName);
v9.setJarByClass(SampleUploader.Uploader.class);
FileInputFormat.setInputPaths(v9, inputPath);
v9.setInputFormatClass(SequenceFileInputFormat.class);
v9.setMapperClass(SampleUploader.Uploader.class);
// No reducers. Just write straight to table. Call initTableReducerJob
// because it sets up the TableOutputFormat.
TableMapReduceUtil.initTableReducerJob(tableName, null, v9);
v9.setNumReduceTasks(0);
return v9;
} | 3.26 |
hbase_SampleUploader_run_rdh | /**
* Main entry point.
*
* @param otherArgs
* The command line parameters after ToolRunner handles standard.
* @throws Exception
* When running the job fails.
*/
@Override
public int run(String[] otherArgs) throws Exception {
if (otherArgs.length != 2) {
System.err.println("Wrong number of arguments: " + otherArgs.length);
System.err.println(("Usage: " + NAME) + " <input> <tablename>");return -1;
}
Job job =
configureJob(getConf(), otherArgs);
return job.waitForCompletion(true) ? 0 : 1;
} | 3.26 |
hbase_InfoServer_addUnprivilegedServlet_rdh | /**
* Adds a servlet in the server that any user can access.
*
* @see HttpServer#addUnprivilegedServlet(String, ServletHolder)
*/
public void addUnprivilegedServlet(String name, String pathSpec, ServletHolder holder) {if (name != null) {
holder.setName(name);
}
this.httpServer.addUnprivilegedServlet(pathSpec, holder);
} | 3.26 |
hbase_InfoServer_canUserModifyUI_rdh | /**
* Returns true if and only if UI authentication (spnego) is enabled, UI authorization is enabled,
* and the requesting user is defined as an administrator. If the UI is set to readonly, this
* method always returns false.
*/
public static boolean canUserModifyUI(HttpServletRequest req, ServletContext ctx, Configuration conf) {
if (conf.getBoolean("hbase.master.ui.readonly", false)) {
return false;
}
String remoteUser = req.getRemoteUser();
if (("kerberos".equalsIgnoreCase(conf.get(HttpServer.HTTP_UI_AUTHENTICATION)) && conf.getBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false)) && (remoteUser != null)) {
return HttpServer.userHasAdministratorAccess(ctx, remoteUser);
}
return false;
} | 3.26 |
hbase_InfoServer_addPrivilegedServlet_rdh | /**
* Adds a servlet in the server that any user can access.
*
* @see HttpServer#addPrivilegedServlet(String, String, Class)
*/
public void addPrivilegedServlet(String name, String pathSpec, Class<? extends HttpServlet> clazz) {
this.httpServer.addPrivilegedServlet(name, pathSpec, clazz);
} | 3.26 |
hbase_InfoServer_buildAdminAcl_rdh | /**
* Builds an ACL that will restrict the users who can issue commands to endpoints on the UI which
* are meant only for administrators.
*/AccessControlList buildAdminAcl(Configuration conf) {
final String userGroups = conf.get(HttpServer.HTTP_SPNEGO_AUTHENTICATION_ADMIN_USERS_KEY, null);
final String adminGroups = conf.get(HttpServer.HTTP_SPNEGO_AUTHENTICATION_ADMIN_GROUPS_KEY, null);
if ((userGroups == null) && (adminGroups == null)) {
// Backwards compatibility - if the user doesn't have anything set, allow all users in.
return
new AccessControlList("*", null);
}
return new AccessControlList(userGroups, adminGroups);
}
/**
* Explicitly invoke {@link #addPrivilegedServlet(String, String, Class)} or
* {@link #addUnprivilegedServlet(String, String, Class)} instead of this method. This method will
* add a servlet which any authenticated user can access.
*
* @deprecated Use {@link #addUnprivilegedServlet(String, String, Class)} or
{@link #addPrivilegedServlet(String, String, Class)} | 3.26 |
hbase_InfoServer_getPort_rdh | /**
*
* @return the port of the info server
* @deprecated Since 0.99.0
*/
@Deprecatedpublic int getPort() {
return this.httpServer.getPort();} | 3.26 |
hbase_MutableSegment_add_rdh | /**
* Adds the given cell into the segment
*
* @param cell
* the cell to add
* @param mslabUsed
* whether using MSLAB
*/
public void add(Cell cell, boolean mslabUsed, MemStoreSizing memStoreSizing, boolean sizeAddedPreOperation) {
internalAdd(cell, mslabUsed, memStoreSizing, sizeAddedPreOperation);
} | 3.26 |
hbase_RSGroupInfo_setConfiguration_rdh | /**
* Setter for storing a configuration setting in {@link #configuration} map.
*
* @param key
* Config key.
* @param value
* String value.
*/
public void setConfiguration(String key, String value) {
configuration.put(key, Objects.requireNonNull(value));
} | 3.26 |
hbase_RSGroupInfo_addAllServers_rdh | /**
* Adds the given servers to the group.
*/
public void addAllServers(Collection<Address> hostPort) {
servers.addAll(hostPort);
} | 3.26 |
hbase_RSGroupInfo_getServers_rdh | /**
* Get list of servers.
*/
public Set<Address> getServers() {
return servers;
} | 3.26 |
hbase_RSGroupInfo_removeConfiguration_rdh | /**
* Remove a config setting represented by the key from the {@link #configuration} map
*/
public void removeConfiguration(final String key) {
configuration.remove(key);
} | 3.26 |
hbase_RSGroupInfo_getName_rdh | /**
* Get group name.
*/
public String getName() {
return name;
} | 3.26 |
hbase_RSGroupInfo_getConfiguration_rdh | /**
* Getter for fetching an unmodifiable {@link #configuration} map.
*/
public Map<String, String> getConfiguration() {
// shallow pointer copy
return Collections.unmodifiableMap(configuration);
} | 3.26 |
hbase_RSGroupInfo_removeServer_rdh | /**
* Remove given server from the group.
*/
public boolean removeServer(Address hostPort) {
return servers.remove(hostPort);
} | 3.26 |
hbase_RSGroupInfo_containsServer_rdh | /**
* Returns true if a server with hostPort is found
*/
public boolean containsServer(Address hostPort) {
return servers.contains(hostPort);
} | 3.26 |
hbase_RSGroupInfo_addServer_rdh | /**
* Adds the given server to the group.
*/
public void addServer(Address hostPort) {
servers.add(hostPort);
} | 3.26 |
hbase_SplitLogWorker_start_rdh | /**
* start the SplitLogWorker thread
*/
public void start() {
worker = new Thread(null, this, "SplitLogWorker-" + server.getServerName().toShortString());
worker.start();
} | 3.26 |
hbase_SplitLogWorker_splitLog_rdh | /**
* Returns Result either DONE, RESIGNED, or ERR.
*/
static Status splitLog(String filename, CancelableProgressable p, Configuration conf, RegionServerServices server, LastSequenceId sequenceIdChecker, WALFactory factory) {
Path walDir;
FileSystem fs;
try {walDir = CommonFSUtils.getWALRootDir(conf);
fs = walDir.getFileSystem(conf);
} catch (IOException e) {
LOG.warn("Resigning, could not find root dir or fs", e);
return
Status.RESIGNED;
}
try {
if (!processSyncReplicationWAL(filename, conf, server, fs, walDir)) {
return Status.DONE;
}
} catch (IOException e) {
LOG.warn("failed to process sync replication wal {}", filename, e);
return
Status.RESIGNED;
}
// TODO have to correctly figure out when log splitting has been
// interrupted or has encountered a transient error and when it has
// encountered a bad non-retry-able persistent error.
try {
SplitLogWorkerCoordination splitLogWorkerCoordination = (server.getCoordinatedStateManager() == null) ? null : server.getCoordinatedStateManager().getSplitLogWorkerCoordination();
if (!WALSplitter.splitLogFile(walDir, fs.getFileStatus(new Path(walDir, filename)), fs, conf, p, sequenceIdChecker, splitLogWorkerCoordination,
factory, server)) {return Status.PREEMPTED;
}
} catch (InterruptedIOException iioe) { LOG.warn("Resigning, interrupted splitting WAL {}", filename, iioe);
return Status.RESIGNED;
} catch (IOException e) {
if (e instanceof FileNotFoundException) {
// A wal file may not exist anymore. Nothing can be recovered so move on
LOG.warn("Done, WAL {} does not exist anymore", filename, e);
return Status.DONE;
}
Throwable cause
= e.getCause();if ((e instanceof RetriesExhaustedException) && (((cause instanceof NotServingRegionException) || (cause instanceof ConnectException)) || (cause instanceof SocketTimeoutException))) {
LOG.warn("Resigning, can't connect to target regionserver splitting WAL {}", filename, e);
return Status.RESIGNED;
} else
if (cause instanceof InterruptedException) {
LOG.warn("Resigning, interrupted splitting WAL {}", filename, e);
return Status.RESIGNED;
}
LOG.warn("Error splitting WAL {}", filename, e);
return Status.ERR;
}
LOG.debug("Done splitting WAL {}", filename);
return Status.DONE;
} | 3.26 |
hbase_SplitLogWorker_m0_rdh | /**
* Returns the number of tasks processed by coordination. This method is used by tests only
*/
public int m0() {
return coordination.getTaskReadySeq();
} | 3.26 |
hbase_SplitLogWorker_processSyncReplicationWAL_rdh | // returns whether we need to continue the split work
private static boolean processSyncReplicationWAL(String name, Configuration conf, RegionServerServices server,
FileSystem fs, Path walDir) throws IOException {
Path walFile = new Path(walDir, name);
String filename = walFile.getName();
Optional<String> optSyncPeerId = AbstractWALProvider.getSyncReplicationPeerIdFromWALName(filename);
if (!optSyncPeerId.isPresent()) {
return true;
}
String peerId = optSyncPeerId.get();
ReplicationPeerImpl peer = server.getReplicationSourceService().getReplicationPeers().getPeer(peerId);
if ((peer == null) ||
(!peer.getPeerConfig().isSyncReplication())) {return true;
}
Pair<SyncReplicationState, SyncReplicationState> stateAndNewState = peer.getSyncReplicationStateAndNewState();
if (stateAndNewState.getFirst().equals(SyncReplicationState.ACTIVE) && stateAndNewState.getSecond().equals(SyncReplicationState.NONE)) {
// copy the file to remote and overwrite the previous one
String remoteWALDir = peer.getPeerConfig().getRemoteWALDir();
Path remoteWALDirForPeer = ReplicationUtils.getPeerRemoteWALDir(remoteWALDir, peerId);
Path tmpRemoteWAL = new Path(remoteWALDirForPeer, filename + ".tmp");
FileSystem remoteFs = ReplicationUtils.getRemoteWALFileSystem(conf, remoteWALDir);
try (FSDataInputStream in =
fs.open(walFile);FSDataOutputStream out = remoteFs.createNonRecursive(tmpRemoteWAL, true,
CommonFSUtils.getDefaultBufferSize(remoteFs), remoteFs.getDefaultReplication(tmpRemoteWAL), remoteFs.getDefaultBlockSize(tmpRemoteWAL), null)) {
IOUtils.copy(in, out);
}
Path toCommitRemoteWAL = new Path(remoteWALDirForPeer, filename + ReplicationUtils.RENAME_WAL_SUFFIX);
// Some FileSystem implementations may not support atomic rename so we need to do it in two
// phases
FSUtils.renameFile(remoteFs, tmpRemoteWAL, toCommitRemoteWAL);
FSUtils.renameFile(remoteFs,
toCommitRemoteWAL, new Path(remoteWALDirForPeer, filename));
} else if ((stateAndNewState.getFirst().equals(SyncReplicationState.ACTIVE) && stateAndNewState.getSecond().equals(SyncReplicationState.STANDBY)) || stateAndNewState.getFirst().equals(SyncReplicationState.STANDBY)) {
// check whether we still need to process this file
// actually we only write wal file which name is ended with .syncrep in A state, and after
// transiting to a state other than A, we will reopen all the regions so the data in the wal
// will be flushed so the wal file will be archived soon. But it is still possible that there
// is a server crash when we are transiting from A to S, to simplify the logic of the transit
// procedure, here we will also check the remote snapshot directory in state S, so that we do
// not need wait until all the wal files with .syncrep suffix to be archived before finishing
// the procedure.
String remoteWALDir = peer.getPeerConfig().getRemoteWALDir();
Path remoteSnapshotDirForPeer = ReplicationUtils.getPeerSnapshotWALDir(remoteWALDir, peerId);
FileSystem remoteFs = ReplicationUtils.getRemoteWALFileSystem(conf, remoteWALDir);
if (remoteFs.exists(new Path(remoteSnapshotDirForPeer, filename))) {
// the file has been replayed when the remote cluster was transited from S to DA, the
// content will be replicated back to us so give up split it.
LOG.warn("Giveup splitting {} since it has been replayed in the remote cluster and " +
"the content will be replicated back", filename); return false;
}
}
return true;
} | 3.26 |
hbase_SplitLogWorker_stopTask_rdh | /**
* If the worker is doing a task i.e. splitting a log file then stop the task. It doesn't exit the
* worker thread.
*/
public void
stopTask() {
LOG.info("Sending interrupt to stop the worker thread");
worker.interrupt();// TODO interrupt often gets swallowed, do what else?
} | 3.26 |
hbase_SplitLogWorker_stop_rdh | /**
* stop the SplitLogWorker thread
*/
public void stop() {
coordination.stopProcessingTasks();
stopTask();
} | 3.26 |
hbase_RegionReplicationBufferManager_decrease_rdh | /**
* Called after you ship the edits out.
*/
public void decrease(long size) {
pendingSize.addAndGet(-size);
} | 3.26 |
hbase_RegionReplicationBufferManager_increase_rdh | /**
* Return whether we should just drop all the edits, if we have reached the hard limit of max
* pending size.
*
* @return {@code true} means OK, {@code false} means drop all the edits.
*/
public boolean increase(long size) {
long sz = pendingSize.addAndGet(size);
if (sz > softMaxPendingSize) {
executor.execute(this::flush);
}
return sz <= maxPendingSize;
} | 3.26 |
hbase_ColumnCount_setCount_rdh | /**
* Set the current count to a new count
*
* @param count
* new count to set
*/
public void setCount(int count) {
this.count = count;
} | 3.26 |
hbase_ColumnCount_getLength_rdh | /**
* Returns the length
*/
public int getLength() {
return this.length;
} | 3.26 |
hbase_ColumnCount_increment_rdh | /**
* Increment the current version count
*
* @return current count
*/
public int increment() {
return ++count;
} | 3.26 |
hbase_ColumnCount_getOffset_rdh | /**
* Returns the offset
*/
public int getOffset() {return this.offset;
} | 3.26 |
hbase_ColumnCount_getBuffer_rdh | /**
* Returns the buffer
*/
public byte[] getBuffer() {
return this.bytes;
} | 3.26 |
hbase_CompactionPipeline_replaceAtIndex_rdh | // replacing one segment in the pipeline with a new one exactly at the same index
// need to be called only within synchronized block
@SuppressWarnings(value = "VO_VOLATILE_INCREMENT", justification = "replaceAtIndex is invoked under a synchronize block so safe")
private void replaceAtIndex(int idx, ImmutableSegment newSegment) {
pipeline.set(idx, newSegment);
readOnlyCopy = new LinkedList<>(pipeline);
// the version increment is indeed needed, because the swap uses removeAll() method of the
// linked-list that compares the objects to find what to remove.
// The flattening changes the segment object completely (creation pattern) and so
// swap will not proceed correctly after concurrent flattening.
version++;
} | 3.26 |
hbase_CompactionPipeline_flattenOneSegment_rdh | /**
* If the caller holds the current version, go over the the pipeline and try to flatten each
* segment. Flattening is replacing the ConcurrentSkipListMap based CellSet to CellArrayMap based.
* Flattening of the segment that initially is not based on ConcurrentSkipListMap has no effect.
* Return after one segment is successfully flatten.
*
* @return true iff a segment was successfully flattened
*/
public boolean flattenOneSegment(long requesterVersion, CompactingMemStore.IndexType idxType, MemStoreCompactionStrategy.Action action) {
if (requesterVersion != version)
{
LOG.warn((("Segment flattening failed, because versions do not match. Requester version: " + requesterVersion) + ", actual version: ") + version);
return false;
}synchronized(pipeline) {
if (requesterVersion != version) {
LOG.warn("Segment flattening failed, because versions do not match");
return false;
}
int i = -1;
for (ImmutableSegment s : pipeline) {
i++;if (s.canBeFlattened()) {
s.waitForUpdates();// to ensure all updates preceding s in-memory flush have completed
if (s.isEmpty()) {
// after s.waitForUpdates() is called, there is no updates pending,if no cells in s,
// we can skip it.
continue;
}
// size to be updated
MemStoreSizing newMemstoreAccounting = new NonThreadSafeMemStoreSizing();
ImmutableSegment newS = SegmentFactory.instance().createImmutableSegmentByFlattening(((CSLMImmutableSegment) (s)), idxType, newMemstoreAccounting, action);
replaceAtIndex(i, newS);
if (region != null)
{
// Update the global memstore size counter upon flattening there is no change in the
// data size
MemStoreSize mss = newMemstoreAccounting.getMemStoreSize();
region.addMemStoreSize(mss.getDataSize(), mss.getHeapSize(), mss.getOffHeapSize(), mss.getCellsCount());}
LOG.debug("Compaction pipeline segment {} flattened", s);
return true;
}
}
}
// do not update the global memstore size counter and do not increase the version,
// because all the cells remain in place
return false;
} | 3.26 |
hbase_CompactionPipeline_swapSuffix_rdh | /**
* Must be called under the {@link CompactionPipeline#pipeline} Lock.
*/private void swapSuffix(List<? extends Segment> suffix, ImmutableSegment segment, boolean closeSegmentsInSuffix) {
matchAndRemoveSuffixFromPipeline(suffix);
if (segment != null) {
pipeline.addLast(segment);
}
// During index merge we won't be closing the segments undergoing the merge. Segment#close()
// will release the MSLAB chunks to pool. But in case of index merge there wont be any data copy
// from old MSLABs. So the new cells in new segment also refers to same chunks. In case of data
// compaction, we would have copied the cells data from old MSLAB chunks into a new chunk
// created for the result segment. So we can release the chunks associated with the compacted
// segments.
if (closeSegmentsInSuffix) {
for (Segment itemInSuffix : suffix) {
itemInSuffix.close();
}
}
} | 3.26 |
hbase_CompactionPipeline_validateSuffixList_rdh | // debug method
private boolean validateSuffixList(LinkedList<ImmutableSegment> suffix) {
if (suffix.isEmpty())
{
// empty suffix is always valid
return true;
}
Iterator<ImmutableSegment> pipelineBackwardIterator = pipeline.descendingIterator();
Iterator<ImmutableSegment> suffixBackwardIterator = suffix.descendingIterator();
ImmutableSegment v46;ImmutableSegment pipelineCurrent;
for (; suffixBackwardIterator.hasNext();) {
if (!pipelineBackwardIterator.hasNext()) {
// a suffix longer than pipeline is invalid
return false;
}
v46 = suffixBackwardIterator.next();
pipelineCurrent = pipelineBackwardIterator.next();
if (v46 != pipelineCurrent) {
// non-matching suffix
return false;
}
}
// suffix matches pipeline suffix
return true;
} | 3.26 |
hbase_CompactionPipeline_matchAndRemoveSuffixFromPipeline_rdh | /**
* Checking that the {@link Segment}s in suffix input parameter is same as the {@link Segment}s in
* {@link CompactionPipeline#pipeline} one by one from the last element to the first element of
* suffix. If matched, remove suffix from {@link CompactionPipeline#pipeline}. <br/>
* Must be called under the {@link CompactionPipeline#pipeline} Lock.
*/
private void matchAndRemoveSuffixFromPipeline(List<? extends Segment> suffix) {
if (suffix.isEmpty()) {
return;
}
if (pipeline.size() < suffix.size()) {
throw new IllegalStateException(((("CODE-BUG:pipleine size:[" +
pipeline.size()) + "],suffix size:[") + suffix.size()) + "],pipeline size must greater than or equals suffix size");
}
ListIterator<? extends Segment> suffixIterator = suffix.listIterator(suffix.size());
ListIterator<? extends Segment> pipelineIterator = pipeline.listIterator(pipeline.size());
int count
= 0;
while (suffixIterator.hasPrevious()) {
Segment suffixSegment = suffixIterator.previous();
Segment pipelineSegment = pipelineIterator.previous();
if (suffixSegment != pipelineSegment) {
throw new IllegalStateException(((((("CODE-BUG:suffix last:[" + count) + "]") + suffixSegment) + " is not pipleline segment:[") + pipelineSegment) + "]");
}
count++;
}
for
(int index = 1; index <= count; index++) {
pipeline.pollLast();
}
} | 3.26 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.