name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hbase_StripeStoreFileManager_rowEquals_rdh | /**
* Compare two keys for equality.
*/
private final boolean rowEquals(byte[] k1, byte[] k2) {
return Bytes.equals(k1, 0, k1.length, k2, 0, k2.length);
} | 3.26 |
hbase_StripeStoreFileManager_findStripeIndexByEndRow_rdh | /**
* Finds the stripe index by end row.
*/
private final int findStripeIndexByEndRow(byte[] endRow) {
assert !isInvalid(endRow);if (isOpen(endRow))
return state.f0.length;
return Arrays.binarySearch(state.f0, endRow, Bytes.BYTES_COMPARATOR);
} | 3.26 |
hbase_StripeStoreFileManager_processNewCandidateStripes_rdh | /**
* See {@link #addCompactionResults(Collection, Collection)} - updates the stripe list with new
* candidate stripes/removes old stripes; produces new set of stripe end rows.
*
* @param newStripes
* New stripes - files by end row.
*/
private void processNewCandidateStripes(TreeMap<byte[], HStoreFile> newStripes) {
// Validate that the removed and added aggregate ranges still make for a full key space.
boolean
hasStripes = !this.stripeFiles.isEmpty();
this.stripeEndRows = new ArrayList<>(Arrays.asList(StripeStoreFileManager.this.state.f0));
int removeFrom = 0;
byte[] firstStartRow = startOf(newStripes.firstEntry().getValue());
byte[] v95 = newStripes.lastKey();
if ((!hasStripes) && ((!isOpen(firstStartRow)) || (!isOpen(v95)))) {
throw new IllegalStateException("Newly created stripes do not cover the entire key space.");
}
boolean canAddNewStripes = true;
Collection<HStoreFile> filesForL0 = null;
if (hasStripes) {
// Determine which stripes will need to be removed because they conflict with new stripes.
// The new boundaries should match old stripe boundaries, so we should get exact matches.
if (isOpen(firstStartRow)) {
removeFrom = 0;
} else {
removeFrom = findStripeIndexByEndRow(firstStartRow);
if (removeFrom <
0) {
throw new IllegalStateException("Compaction is trying to add a bad range.");
}
++removeFrom;
}
int removeTo =
findStripeIndexByEndRow(v95);
if (removeTo < 0) {
throw new IllegalStateException("Compaction is trying to add a bad range.");
}
// See if there are files in the stripes we are trying to replace.
ArrayList<HStoreFile> conflictingFiles = new ArrayList<>();
for (int v100 = removeTo; v100 >= removeFrom; --v100) {
conflictingFiles.addAll(this.stripeFiles.get(v100));
}
if (!conflictingFiles.isEmpty()) {
// This can be caused by two things - concurrent flush into stripes, or a bug.
// Unfortunately, we cannot tell them apart without looking at timing or something
// like that. We will assume we are dealing with a flush and dump it into L0.
if (isFlush) {
long newSize = StripeCompactionPolicy.getTotalFileSize(newStripes.values());
LOG.warn(("Stripes were created by a flush, but results of size " + newSize) + " cannot be added because the stripes have changed");
canAddNewStripes = false;
filesForL0 = newStripes.values();
} else {
long oldSize = StripeCompactionPolicy.getTotalFileSize(conflictingFiles); LOG.info((((conflictingFiles.size() + " conflicting files (likely created by a flush) ") + " of size ") + oldSize) + " are moved to L0 due to concurrent stripe change");
filesForL0 = conflictingFiles;
}
if (filesForL0 != null) {
for (HStoreFile sf : filesForL0) {
insertFileIntoStripe(getLevel0Copy(), sf);
}
l0Results.addAll(filesForL0);
}
}
if (canAddNewStripes) {
// Remove old empty stripes.
int originalCount = this.stripeFiles.size();
for (int removeIndex = removeTo; removeIndex >= removeFrom; --removeIndex) {
if (removeIndex != (originalCount - 1)) {
this.stripeEndRows.remove(removeIndex);
}
this.stripeFiles.remove(removeIndex);
}
}
}
if (!canAddNewStripes) {
return;// Files were already put into L0.
}
// Now, insert new stripes. The total ranges match, so we can insert where we removed.
byte[] previousEndRow = null;
int insertAt = removeFrom;
for (Map.Entry<byte[], HStoreFile> newStripe : newStripes.entrySet()) {
if (previousEndRow != null) { // Validate that the ranges are contiguous.
assert !isOpen(previousEndRow);
byte[] startRow = startOf(newStripe.getValue());
if (!rowEquals(previousEndRow, startRow)) {
throw
new IllegalStateException(("The new stripes produced by " + (isFlush ? "flush" : "compaction")) + " are not contiguous");
}
}
// Add the new stripe.
ArrayList<HStoreFile> tmp = new ArrayList<>();
tmp.add(newStripe.getValue());
stripeFiles.add(insertAt, tmp);
previousEndRow =
newStripe.getKey();
if (!isOpen(previousEndRow)) {
stripeEndRows.add(insertAt, previousEndRow);
} ++insertAt;
}
} | 3.26 |
hbase_StripeStoreFileManager_updateCandidateFilesForRowKeyBefore_rdh | /**
* See {@link StoreFileManager#getCandidateFilesForRowKeyBefore(KeyValue)} and
* {@link StoreFileManager#updateCandidateFilesForRowKeyBefore(Iterator, KeyValue, Cell)} for
* details on this methods.
*/
@Override public Iterator<HStoreFile> updateCandidateFilesForRowKeyBefore(Iterator<HStoreFile> candidateFiles, final KeyValue targetKey, final Cell candidate) {
StripeStoreFileManager.KeyBeforeConcatenatedLists.Iterator original = ((KeyBeforeConcatenatedLists.Iterator) (candidateFiles));
assert
original != null;
ArrayList<List<HStoreFile>> components = original.getComponents();
for (int firstIrrelevant = 0; firstIrrelevant < components.size(); ++firstIrrelevant) {
HStoreFile sf = components.get(firstIrrelevant).get(0);
byte[] endKey = endOf(sf);
// Entries are ordered as such: L0, then stripes in reverse order. We never remove
// level 0; we remove the stripe, and all subsequent ones, as soon as we find the
// first one that cannot possibly have better candidates.
if (((!isInvalid(endKey)) && (!isOpen(endKey))) && (nonOpenRowCompare(targetKey, endKey) >= 0)) {
original.removeComponents(firstIrrelevant);
break;
}}
return original;
} | 3.26 |
hbase_StripeStoreFileManager_isInvalid_rdh | /**
* Checks whether the key is invalid (e.g. from an L0 file, or non-stripe-compacted files).
*/
private static final boolean isInvalid(byte[] key) {
// No need to use Arrays.equals because INVALID_KEY is null
return key == INVALID_KEY;
} | 3.26 |
hbase_StripeStoreFileManager_getStripeFilesSize_rdh | /**
* Gets the total size of all files in the stripe.
*
* @param stripeIndex
* Stripe index.
* @return Size.
*/
private long
getStripeFilesSize(int stripeIndex) {
long result = 0;for (HStoreFile sf : state.stripeFiles.get(stripeIndex)) {
result += sf.getReader().length();
}
return result;
} | 3.26 |
hbase_StripeStoreFileManager_getStripeCopy_rdh | /**
*
* @param index
* Index of the stripe we need.
* @return A lazy stripe copy from current stripes.
*/
private final ArrayList<HStoreFile> getStripeCopy(int index) {
List<HStoreFile> stripeCopy = this.stripeFiles.get(index);
ArrayList<HStoreFile> v81 = null;
if (stripeCopy instanceof ImmutableList<?>) {
v81
= new ArrayList<>(stripeCopy);
this.stripeFiles.set(index, v81);
} else {
v81 = ((ArrayList<HStoreFile>) (stripeCopy));
}
return v81;
} | 3.26 |
hbase_StripeStoreFileManager_nonOpenRowCompare_rdh | /**
* Compare two keys. Keys must not be open (isOpen(row) == false).
*/
private final int nonOpenRowCompare(byte[] k1, byte[] k2) {
assert (!isOpen(k1)) && (!isOpen(k2));
return Bytes.compareTo(k1, k2);
} | 3.26 |
hbase_StripeStoreFileManager_loadUnclassifiedStoreFiles_rdh | /**
* Loads initial store files that were picked up from some physical location pertaining to this
* store (presumably). Unlike adding files after compaction, assumes empty initial sets, and is
* forgiving with regard to stripe constraints - at worst, many/all files will go to level 0.
*
* @param storeFiles
* Store files to add.
*/
private void loadUnclassifiedStoreFiles(List<HStoreFile> storeFiles) {
LOG.debug(("Attempting to load " + storeFiles.size()) + " store files.");
TreeMap<byte[], ArrayList<HStoreFile>> candidateStripes = new TreeMap<>(MAP_COMPARATOR);
ArrayList<HStoreFile> level0Files = new ArrayList<>();
// Separate the files into tentative stripes; then validate. Currently, we rely on metadata.
// If needed, we could dynamically determine the stripes in future.
for (HStoreFile sf : storeFiles) {
byte[] startRow = startOf(sf);
byte[] endRow = endOf(sf);
// Validate the range and put the files into place.
if (isInvalid(startRow) || isInvalid(endRow)) {
insertFileIntoStripe(level0Files, sf);// No metadata - goes to L0.
ensureLevel0Metadata(sf);
} else if (((!isOpen(startRow)) && (!isOpen(endRow))) &&
(nonOpenRowCompare(startRow, endRow) >= 0)) {
LOG.error(((((("Unexpected metadata - start row [" + Bytes.toString(startRow))
+ "], end row [") + Bytes.toString(endRow)) + "] in file [") + sf.getPath()) + "], pushing to L0");
insertFileIntoStripe(level0Files, sf);// Bad metadata - goes to L0 also.
ensureLevel0Metadata(sf);
} else {
ArrayList<HStoreFile> stripe = candidateStripes.get(endRow);
if (stripe == null) {
stripe = new ArrayList<>();
candidateStripes.put(endRow, stripe);
}
insertFileIntoStripe(stripe, sf);
}
}
// Possible improvement - for variable-count stripes, if all the files are in L0, we can
// instead create single, open-ended stripe with all files.
boolean
hasOverlaps = false;
byte[] expectedStartRow = null;// first stripe can start wherever
Iterator<Map.Entry<byte[], ArrayList<HStoreFile>>> v42
= candidateStripes.entrySet().iterator();
while
(v42.hasNext()) {
Map.Entry<byte[], ArrayList<HStoreFile>> entry = v42.next();
ArrayList<HStoreFile> files = entry.getValue();
// Validate the file start rows, and remove the bad ones to level 0.
for (int i = 0; i < files.size(); ++i) {
HStoreFile sf = files.get(i);
byte[] startRow = startOf(sf);
if (expectedStartRow == null) {
expectedStartRow = startRow;// ensure that first stripe is still consistent
} else if (!rowEquals(expectedStartRow, startRow)) {
hasOverlaps = true;
LOG.warn(((("Store file doesn't fit into the tentative stripes - expected to start at [" + Bytes.toString(expectedStartRow)) + "], but starts at [") + Bytes.toString(startRow)) + "], to L0 it goes");
HStoreFile badSf = files.remove(i);
insertFileIntoStripe(level0Files, badSf);
ensureLevel0Metadata(badSf);
--i;
}
}
// Check if any files from the candidate stripe are valid. If so, add a stripe.
byte[] endRow = entry.getKey();
if (!files.isEmpty()) {
expectedStartRow = endRow;// Next stripe must start exactly at that key.
} else {
v42.remove();
}
}
// In the end, there must be open ends on two sides. If not, and there were no errors i.e.
// files are consistent, they might be coming from a split. We will treat the boundaries
// as open keys anyway, and log the message.
// If there were errors, we'll play it safe and dump everything into L0.
if (!candidateStripes.isEmpty()) {
HStoreFile firstFile = candidateStripes.firstEntry().getValue().get(0);
boolean isOpen = isOpen(startOf(firstFile)) && isOpen(candidateStripes.lastKey());
if (!isOpen) {
LOG.warn(((("The range of the loaded files does not cover full key space: from [" + Bytes.toString(startOf(firstFile))) + "], to [") + Bytes.toString(candidateStripes.lastKey())) + "]");
if (!hasOverlaps) {
ensureEdgeStripeMetadata(candidateStripes.firstEntry().getValue(), true);
ensureEdgeStripeMetadata(candidateStripes.lastEntry().getValue(), false);
} else {
LOG.warn("Inconsistent files, everything goes to L0.");
for
(ArrayList<HStoreFile> files : candidateStripes.values()) {
for (HStoreFile sf : files) {
insertFileIntoStripe(level0Files, sf);
ensureLevel0Metadata(sf);
}}
candidateStripes.clear();
}
}
}
// Copy the results into the fields.
State state = new State();state.level0Files = ImmutableList.copyOf(level0Files);
state.stripeFiles = new ArrayList<>(candidateStripes.size());
state.f0 = new byte[Math.max(0, candidateStripes.size() - 1)][];
ArrayList<HStoreFile> newAllFiles = new ArrayList<>(level0Files);
int i = candidateStripes.size() - 1;
for (Map.Entry<byte[], ArrayList<HStoreFile>> entry : candidateStripes.entrySet()) {
state.stripeFiles.add(ImmutableList.copyOf(entry.getValue()));
newAllFiles.addAll(entry.getValue());
if (i > 0) {
state.f0[state.stripeFiles.size() - 1] = entry.getKey();
}
--i;
}
state.allFilesCached = ImmutableList.copyOf(newAllFiles);
this.state = state;
debugDumpState("Files loaded");
} | 3.26 |
hbase_StripeStoreFileManager_getCandidateFilesForRowKeyBefore_rdh | /**
* See {@link StoreFileManager#getCandidateFilesForRowKeyBefore(KeyValue)} for details on this
* methods.
*/
@Override
public Iterator<HStoreFile> getCandidateFilesForRowKeyBefore(final KeyValue targetKey) {
KeyBeforeConcatenatedLists v3 = new KeyBeforeConcatenatedLists();
// Order matters for this call.
v3.addSublist(state.level0Files);
if (!state.stripeFiles.isEmpty()) {
int lastStripeIndex = findStripeForRow(CellUtil.cloneRow(targetKey), false);
for (int stripeIndex
= lastStripeIndex; stripeIndex >= 0; --stripeIndex) {
v3.addSublist(state.stripeFiles.get(stripeIndex));
}
}
return v3.iterator();} | 3.26 |
hbase_StripeStoreFileManager_isOpen_rdh | /**
* Checks whether the key indicates an open interval boundary (i.e. infinity).
*/
private static final boolean isOpen(byte[] key) {
return (key != null) && (key.length == 0);} | 3.26 |
hbase_RawBytes_encode_rdh | /**
* Write {@code val} into {@code dst}, respecting {@code voff} and {@code vlen}.
*
* @param dst
* the {@link PositionedByteRange} to write to
* @param val
* the value to write to {@code dst}
* @param voff
* the offset in {@code dst} where to write {@code val} to
* @param vlen
* the length of {@code val}
* @return number of bytes written
*/
public int encode(PositionedByteRange dst, byte[] val, int voff, int vlen) {
Bytes.putBytes(dst.getBytes(), dst.getOffset() + dst.getPosition(), val, voff, vlen);
dst.setPosition(dst.getPosition() + vlen);
return vlen;
} | 3.26 |
hbase_RawBytes_decode_rdh | /**
* Read a {@code byte[]} from the buffer {@code src}.
*
* @param src
* the {@link PositionedByteRange} to read the {@code byte[]} from
* @param length
* the length to read from the buffer
* @return the {@code byte[]} read from the buffer
*/
public byte[] decode(PositionedByteRange src, int length) {
byte[] val = new byte[length];
src.get(val);
return val;
} | 3.26 |
hbase_RecordFilter_newBuilder_rdh | /* For FilterBuilder */
public static FilterBuilder newBuilder(Field field) {
return new FilterBuilder(field, false);
} | 3.26 |
hbase_RecordFilter_parse_rdh | /* Parse a filter string and build a RecordFilter instance. */
public static RecordFilter parse(String filterString, List<Field> fields, boolean ignoreCase) {
int index = 0;
boolean not = isNot(filterString);
if (not) {
index += 1;
}
StringBuilder fieldString = new StringBuilder();
while ((((filterString.length() > index) && (filterString.charAt(index) != '<')) && (filterString.charAt(index) != '>')) && (filterString.charAt(index) != '=')) {
fieldString.append(filterString.charAt(index++));
}
if ((fieldString.length() == 0) || (filterString.length() == index)) {
return null;
}
Field field = getField(fields, fieldString.toString());
if (field == null) {
return null;
}
StringBuilder operatorString = new StringBuilder();
while ((filterString.length() > index) && (((filterString.charAt(index) == '<') || (filterString.charAt(index) == '>')) || (filterString.charAt(index) == '=')))
{
operatorString.append(filterString.charAt(index++));
}
Operator operator = getOperator(operatorString.toString());
if (operator == null) {
return null;
}
String value = filterString.substring(index);
FieldValue fieldValue = getFieldValue(field,
value);
if (fieldValue == null) {
return null;
}return new RecordFilter(ignoreCase, not, field, operator, fieldValue);
} | 3.26 |
hbase_FileChangeWatcher_setState_rdh | /**
* Sets the state to <code>newState</code>.
*
* @param newState
* the new state.
*/
private synchronized void setState(State newState) {
state = newState;
this.notifyAll();
} | 3.26 |
hbase_FileChangeWatcher_waitForState_rdh | /**
* Blocks until the current state becomes <code>desiredState</code>. Currently only used by tests,
* thus package-private.
*
* @param desiredState
* the desired state.
* @throws InterruptedException
* if the current thread gets interrupted.
*/
synchronized void waitForState(State desiredState) throws InterruptedException {
while (this.state != desiredState) {this.wait();
} } | 3.26 |
hbase_FileChangeWatcher_stop_rdh | /**
* Tells the background thread to stop. Does not wait for it to exit.
*/
public void stop() {
if (compareAndSetState(new State[]{ State.RUNNING, State.STARTING }, State.STOPPING)) {
watcherThread.interrupt();
}
} | 3.26 |
hbase_FileChangeWatcher_getState_rdh | /**
* Returns the current {@link FileChangeWatcher.State}.
*
* @return the current state.
*/
public synchronized State getState() {
return state;
} | 3.26 |
hbase_FileChangeWatcher_start_rdh | /**
* Tells the background thread to start. Does not wait for it to be running. Calling this method
* more than once has no effect.
*/
public void start() {
if (!compareAndSetState(State.NEW, State.STARTING)) {
// If previous state was not NEW, start() has already been called.
return;
}
this.watcherThread.start();
} | 3.26 |
hbase_ValueFilter_parseFrom_rdh | /**
* Parse a serialized representation of {@link ValueFilter}
*
* @param pbBytes
* A pb serialized {@link ValueFilter} instance
* @return An instance of {@link ValueFilter} made from <code>bytes</code>
* @throws DeserializationException
* if an error occurred
* @see #toByteArray
*/
public static ValueFilter parseFrom(final byte[] pbBytes) throws DeserializationException {
FilterProtos.ValueFilter proto;
try {
proto = FilterProtos.ValueFilter.parseFrom(pbBytes);
}
catch (InvalidProtocolBufferException e) {
throw new DeserializationException(e);
}
final CompareOperator valueCompareOp = CompareOperator.valueOf(proto.getCompareFilter().getCompareOp().name());
ByteArrayComparable valueComparator = null;
try {
if (proto.getCompareFilter().hasComparator()) {
valueComparator = ProtobufUtil.toComparator(proto.getCompareFilter().getComparator());
}
} catch (IOException ioe) {
throw new DeserializationException(ioe);
}
return new
ValueFilter(valueCompareOp, valueComparator);
} | 3.26 |
hbase_ValueFilter_toByteArray_rdh | /**
* Returns The filter serialized using pb
*/
@Override
public byte[] toByteArray() {
FilterProtos.ValueFilter.Builder builder = FilterProtos.ValueFilter.newBuilder();
builder.setCompareFilter(super.convert());
return builder.build().toByteArray();
} | 3.26 |
hbase_ValueFilter_areSerializedFieldsEqual_rdh | /**
* Returns true if and only if the fields of the filter that are serialized are equal to the
* corresponding fields in other. Used for testing.
*/
@Override
boolean areSerializedFieldsEqual(Filter o) {
if (o == this) {
return true;
}
if (!(o instanceof ValueFilter)) {
return false;
}
return super.areSerializedFieldsEqual(o);
} | 3.26 |
hbase_Reference_createBottomReference_rdh | /**
* Returns A {@link Reference} that points at the bottom half of a an hfile
*/
public static Reference createBottomReference(final byte[] splitRow) {
return new Reference(splitRow, Range.bottom);
} | 3.26 |
hbase_Reference_toByteArray_rdh | /**
* Use this when writing to a stream and you want to use the pb mergeDelimitedFrom (w/o the
* delimiter, pb reads to EOF which may not be what you want).
*
* @return This instance serialized as a delimited protobuf w/ a magic pb prefix.
*/
byte[] toByteArray() throws IOException {
return ProtobufUtil.prependPBMagic(convert().toByteArray());
} | 3.26 |
hbase_Reference_createTopReference_rdh | /**
* Returns A {@link Reference} that points at top half of a an hfile
*/
public static Reference createTopReference(final byte[] splitRow) {
return
new Reference(splitRow, Range.top);} | 3.26 |
hbase_Reference_getFileRegion_rdh | /**
*/
public Range getFileRegion() {
return this.region;
} | 3.26 |
hbase_Reference_read_rdh | /**
* Read a Reference from FileSystem.
*
* @return New Reference made from passed <code>p</code>
*/
public static Reference read(final FileSystem fs, final Path p) throws IOException {
InputStream in = fs.open(p);
try {
// I need to be able to move back in the stream if this is not a pb serialization so I can
// do the Writable decoding instead.
in = (in.markSupported()) ? in : new BufferedInputStream(in);
int pblen = ProtobufUtil.lengthOfPBMagic();
in.mark(pblen);
byte[] pbuf = new byte[pblen];
IOUtils.readFully(in, pbuf,
0, pblen);
// WATCHOUT! Return in middle of function!!!
if (ProtobufUtil.isPBMagicPrefix(pbuf))
return convert(FSProtos.Reference.parseFrom(in));
// Else presume Writables. Need to reset the stream since it didn't start w/ pb.
// We won't bother rewriting thie Reference as a pb since Reference is transitory.
in.reset();
Reference r = new Reference();
DataInputStream v6 = new
DataInputStream(in);
// Set in = dis so it gets the close below in the finally on our way out.
in = v6;
r.readFields(v6);
return r;
} finally {
in.close();
}
} | 3.26 |
hbase_Reference_toString_rdh | /**
*
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
return "" + this.region;
} | 3.26 |
hbase_Reference_getSplitKey_rdh | /**
*/
public byte[] getSplitKey() {
return f0;} | 3.26 |
hbase_TableRegionModel_toString_rdh | /* (non-Javadoc)
@see java.lang.Object#toString()
*/@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append(getName());
sb.append(" [\n id=");
sb.append(id);
sb.append("\n startKey='");
sb.append(Bytes.toString(startKey));
sb.append("'\n endKey='");
sb.append(Bytes.toString(endKey));
if (location != null) {
sb.append("'\n location='");
sb.append(location);
}
sb.append("'\n]\n");
return sb.toString();
} | 3.26 |
hbase_TableRegionModel_m0_rdh | /**
*
* @param startKey
* the start key
*/
public void m0(byte[] startKey) {
this.startKey = startKey;
} | 3.26 |
hbase_TableRegionModel_getEndKey_rdh | /**
* Returns the end key
*/
@XmlAttribute
public byte[] getEndKey() {
return endKey;
} | 3.26 |
hbase_TableRegionModel_setLocation_rdh | /**
*
* @param location
* the name and port of the region server hosting the region
*/
public void setLocation(String location) {
this.location = location;
} | 3.26 |
hbase_TableRegionModel_setId_rdh | /**
*
* @param id
* the region's encoded id
*/
public void setId(long id) {
this.id = id;
} | 3.26 |
hbase_TableRegionModel_getStartKey_rdh | /**
* Returns the start key
*/
@XmlAttribute
public byte[] getStartKey() {
return startKey;
} | 3.26 |
hbase_TableRegionModel_setName_rdh | /**
*
* @param name
* region printable name
*/
public void setName(String name) {
String split[] = name.split(",");
this.table = split[0];
this.startKey = Bytes.toBytes(split[1]);
String tail = split[2];split =
tail.split("\\.");
id = Long.parseLong(split[0]);
} | 3.26 |
hbase_TableRegionModel_getName_rdh | /**
* Returns the region name
*/
@XmlAttribute
public String
getName() { byte[] tableNameAsBytes = Bytes.toBytes(this.table);
TableName tableName = TableName.valueOf(tableNameAsBytes);
byte[]
nameAsBytes = RegionInfo.createRegionName(tableName, this.startKey, this.id, !tableName.isSystemTable());return Bytes.toString(nameAsBytes);
} | 3.26 |
hbase_TableRegionModel_getLocation_rdh | /**
* Returns the name and port of the region server hosting the region
*/
@XmlAttribute
public String getLocation() {
return location;
} | 3.26 |
hbase_TableRegionModel_setEndKey_rdh | /**
*
* @param endKey
* the end key
*/
public void setEndKey(byte[] endKey) {
this.endKey = endKey;
} | 3.26 |
hbase_TableRegionModel_getId_rdh | /**
* Returns the encoded region id
*/
@XmlAttribute
public long getId() {
return id;
} | 3.26 |
hbase_VisibilityClient_listLabels_rdh | /**
* Retrieve the list of visibility labels defined in the system.
*
* @param connection
* The Connection instance to use.
* @param regex
* The regular expression to filter which labels are returned.
* @return labels The list of visibility labels defined in the system.
*/
public static ListLabelsResponse listLabels(Connection connection, final String regex) throws Throwable {
try (Table table = connection.getTable(LABELS_TABLE_NAME)) {
Batch.Call<VisibilityLabelsService, ListLabelsResponse> callable = new Batch.Call<VisibilityLabelsService, ListLabelsResponse>() {
ServerRpcController controller = new ServerRpcController();
CoprocessorRpcUtils.BlockingRpcCallback<ListLabelsResponse> rpcCallback = new CoprocessorRpcUtils.BlockingRpcCallback<>();
@Override
public ListLabelsResponse call(VisibilityLabelsService service) throws IOException {
ListLabelsRequest.Builder v14 = ListLabelsRequest.newBuilder();
if (regex != null) {
// Compile the regex here to catch any regex exception earlier.
Pattern pattern
= Pattern.compile(regex);
v14.setRegex(pattern.toString());}
service.listLabels(controller, v14.build(), rpcCallback);
ListLabelsResponse response = rpcCallback.get();
if (controller.failedOnException()) {
throw controller.getFailedOn();
}
return response;
}
};Map<byte[],
ListLabelsResponse> result = table.coprocessorService(VisibilityLabelsService.class, HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY, callable);
return result.values().iterator().next();// There will be exactly one region for labels
// table and so one entry in result Map.
}
} | 3.26 |
hbase_VisibilityClient_getAuths_rdh | /**
* Get the authorization for a given user
*
* @param connection
* the Connection instance to use
* @param user
* the user
* @return labels the given user is globally authorized for
*/
public static GetAuthsResponse getAuths(Connection connection, final String user) throws Throwable {
try (Table table = connection.getTable(LABELS_TABLE_NAME)) {
Batch.Call<VisibilityLabelsService, GetAuthsResponse> callable = new Batch.Call<VisibilityLabelsService, GetAuthsResponse>() {
ServerRpcController controller = new ServerRpcController();
CoprocessorRpcUtils.BlockingRpcCallback<GetAuthsResponse> rpcCallback = new CoprocessorRpcUtils.BlockingRpcCallback<>();
@Override
public GetAuthsResponse call(VisibilityLabelsService service) throws IOException {
GetAuthsRequest.Builder getAuthReqBuilder = GetAuthsRequest.newBuilder();
getAuthReqBuilder.setUser(UnsafeByteOperations.unsafeWrap(Bytes.toBytes(user)));
service.getAuths(controller, getAuthReqBuilder.build(), rpcCallback);
GetAuthsResponse response = rpcCallback.get();
if (controller.failedOnException()) {
throw controller.getFailedOn();
}
return response;
}
};
Map<byte[], GetAuthsResponse> result = table.coprocessorService(VisibilityLabelsService.class, HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY, callable);
return result.values().iterator().next();// There will be exactly one region for labels
// table and so one entry in result Map.
}
} | 3.26 |
hbase_VisibilityClient_addLabels_rdh | /**
* Utility method for adding labels to the system.
*/
public static VisibilityLabelsResponse addLabels(Connection connection, final String[] labels) throws Throwable {
try (Table table = connection.getTable(LABELS_TABLE_NAME)) {
Batch.Call<VisibilityLabelsService,
VisibilityLabelsResponse> callable = new Batch.Call<VisibilityLabelsService, VisibilityLabelsResponse>() {
ServerRpcController controller = new ServerRpcController();CoprocessorRpcUtils.BlockingRpcCallback<VisibilityLabelsResponse> rpcCallback = new CoprocessorRpcUtils.BlockingRpcCallback<>();
@Override
public VisibilityLabelsResponse call(VisibilityLabelsService service)
throws IOException {
VisibilityLabelsRequest.Builder builder = VisibilityLabelsRequest.newBuilder();
for (String label : labels) {
if (label.length() > 0) {
VisibilityLabel.Builder newBuilder = VisibilityLabel.newBuilder();
newBuilder.setLabel(UnsafeByteOperations.unsafeWrap(Bytes.toBytes(label)));
builder.addVisLabel(newBuilder.build());
}
}
service.addLabels(controller, builder.build(), rpcCallback);
VisibilityLabelsResponse response = rpcCallback.get();
if (controller.failedOnException()) {
throw controller.getFailedOn();
}
return response;
}
};
Map<byte[], VisibilityLabelsResponse> result = table.coprocessorService(VisibilityLabelsService.class, HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY, callable);
return result.values().iterator().next();// There will be exactly one region for labels
// table and so one entry in result Map.
}
} | 3.26 |
hbase_VisibilityClient_clearAuths_rdh | /**
* Removes given labels from user's globally authorized list of labels.
*/
public static VisibilityLabelsResponse clearAuths(Connection connection, final String[] auths, final String user) throws Throwable {
return setOrClearAuths(connection, auths, user, false);
} | 3.26 |
hbase_VisibilityClient_addLabel_rdh | /**
* Utility method for adding label to the system.
*/
public static VisibilityLabelsResponse addLabel(Connection connection, final String label) throws Throwable
{
return addLabels(connection, new String[]{ label });
} | 3.26 |
hbase_VisibilityClient_setAuths_rdh | /**
* Sets given labels globally authorized for the user.
*/
public static VisibilityLabelsResponse setAuths(Connection connection, final String[] auths, final String user) throws Throwable {
return setOrClearAuths(connection, auths, user, true);
} | 3.26 |
hbase_DeleteTableProcedure_cleanRegionsInMeta_rdh | /**
* There may be items for this table still up in hbase:meta in the case where the info:regioninfo
* column was empty because of some write error. Remove ALL rows from hbase:meta that have to do
* with this table.
* <p/>
* See HBASE-12980.
*/
private static void cleanRegionsInMeta(final MasterProcedureEnv env, final TableName tableName) throws IOException {
Scan tableScan = MetaTableAccessor.getScanForTableName(env.getMasterConfiguration(), tableName).setFilter(new KeyOnlyFilter());
long now = EnvironmentEdgeManager.currentTime();
List<Delete> deletes = new ArrayList<>();
try (Table metaTable = env.getMasterServices().getConnection().getTable(TableName.META_TABLE_NAME);ResultScanner scanner = metaTable.getScanner(tableScan)) {
for (; ;) {
Result result = scanner.next();
if (result == null) {
break;
}deletes.add(new Delete(result.getRow(),
now));
}
if (!deletes.isEmpty()) {
LOG.warn((((("Deleting some vestigial " + deletes.size()) + " rows of ") + tableName) + " from ") + TableName.META_TABLE_NAME);
metaTable.delete(deletes);
}
}
} | 3.26 |
hbase_TableName_createTableNameIfNecessary_rdh | /**
* Check that the object does not exist already. There are two reasons for creating the objects
* only once: 1) With 100K regions, the table names take ~20MB. 2) Equals becomes much faster as
* it's resolved with a reference and an int comparison.
*/
private static TableName createTableNameIfNecessary(ByteBuffer bns, ByteBuffer qns)
{
for (TableName tn : tableCache) {
if (Bytes.equals(tn.getQualifier(), qns) && Bytes.equals(tn.m1(), bns)) {
return tn;
}
}
TableName newTable = new TableName(bns, qns);
if (tableCache.add(newTable)) {// Adds the specified element if it is not already present
return newTable;
}
// Someone else added it. Let's find it.
for (TableName tn : tableCache)
{
if (Bytes.equals(tn.getQualifier(), qns) && Bytes.equals(tn.m1(), bns)) {
return tn;
}
}
// this should never happen.
throw new IllegalStateException(newTable + " was supposed to be in the cache");
} | 3.26 |
hbase_TableName_toBytes_rdh | /**
* Returns A pointer to TableName as String bytes.
*/public byte[] toBytes() {
return name;
} | 3.26 |
hbase_TableName_isLegalNamespaceName_rdh | /**
* Valid namespace characters are alphabetic characters, numbers, and underscores.
*/
public static void isLegalNamespaceName(final byte[] namespaceName, final int start, final int end) {
if ((end - start) < 1) {
throw new IllegalArgumentException("Namespace name must not be empty");
}
String nsString = new String(namespaceName, start, end - start, StandardCharsets.UTF_8);
if (nsString.equals(DISALLOWED_TABLE_NAME)) {
// Per https://zookeeper.apache.org/doc/r3.4.10/zookeeperProgrammers.html#ch_zkDataModel
// A znode named "zookeeper" is disallowed by zookeeper.
throw new IllegalArgumentException(("Tables may not be named '" + DISALLOWED_TABLE_NAME) + "'");
}
for (int i = 0; i < nsString.length(); i++) {
// Treat the string as a char-array as some characters may be multi-byte
char c =
nsString.charAt(i);
// ZooKeeper also has limitations, but Character.isAlphabetic omits those all
// See https://zookeeper.apache.org/doc/r3.4.10/zookeeperProgrammers.html#ch_zkDataModel
if ((Character.isAlphabetic(c) || Character.isDigit(c))
|| (c == '_')) {
continue;
}
throw new IllegalArgumentException(((((("Illegal character <" + c) + "> at ") + i) + ". Namespaces may only contain ") + "'alphanumeric characters' from any language and digits: ") + nsString);
}
} | 3.26 |
hbase_TableName_valueOf_rdh | /**
* Construct a TableName
*
* @throws IllegalArgumentException
* if fullName equals old root or old meta. Some code depends on
* this.
*/
public static TableName valueOf(String name) {
for (TableName tn : tableCache) {
if (name.equals(tn.getNameAsString())) {
return tn;
}
}
final int namespaceDelimIndex = name.indexOf(NAMESPACE_DELIM);
if (namespaceDelimIndex < 0) {
return createTableNameIfNecessary(ByteBuffer.wrap(NamespaceDescriptor.DEFAULT_NAMESPACE_NAME), ByteBuffer.wrap(Bytes.toBytes(name)));
} else {
// indexOf is by character, not byte (consider multi-byte characters)
String ns = name.substring(0, namespaceDelimIndex);
String
qualifier = name.substring(namespaceDelimIndex + 1);
return createTableNameIfNecessary(ByteBuffer.wrap(Bytes.toBytes(ns)), ByteBuffer.wrap(Bytes.toBytes(qualifier)));
}
} | 3.26 |
hbase_TableName_isLegalFullyQualifiedTableName_rdh | /**
* Check passed byte array, "tableName", is legal user-space table name.
*
* @return Returns passed <code>tableName</code> param
* @throws IllegalArgumentException
* if passed a tableName is null or is made of other than 'word'
* characters or underscores: i.e.
* <code>[\p{IsAlphabetic}\p{Digit}.-:]</code>. The ':' is used
* to delimit the namespace from the table name and can be used
* for nothing else. Namespace names can only contain 'word'
* characters <code>[\p{IsAlphabetic}\p{Digit}]</code> or '_'
* Qualifier names can only contain 'word' characters
* <code>[\p{IsAlphabetic}\p{Digit}]</code> or '_', '.' or '-'.
* The name may not start with '.' or '-'. Valid fully qualified
* table names: foo:bar, namespace=>foo, table=>bar
* org:foo.bar, namespace=org, table=>foo.bar
*/public static byte[] isLegalFullyQualifiedTableName(final byte[] tableName) {
if ((tableName == null) || (tableName.length <= 0)) {
throw new IllegalArgumentException("Name is null or empty");
}
int namespaceDelimIndex = ArrayUtils.lastIndexOf(tableName, ((byte) (NAMESPACE_DELIM)));
if (namespaceDelimIndex < 0) {
isLegalTableQualifierName(tableName);
} else {
isLegalNamespaceName(tableName, 0, namespaceDelimIndex);
isLegalTableQualifierName(tableName, namespaceDelimIndex + 1, tableName.length);
}
return tableName;
} | 3.26 |
hbase_TableName_getADummyTableName_rdh | /**
* It is used to create table names for old META, and ROOT table. These tables are not really
* legal tables. They are not added into the cache.
*
* @return a dummy TableName instance (with no validation) for the passed qualifier
*/
private static TableName getADummyTableName(String qualifier) {
return new TableName(qualifier);
} | 3.26 |
hbase_TableName_isLegalTableQualifierName_rdh | /**
* Qualifier names can only contain 'word' characters <code>[\p{IsAlphabetic}\p{Digit}]</code> or
* '_', '.' or '-'. The name may not start with '.' or '-'.
*
* @param qualifierName
* byte array containing the qualifier name
* @param start
* start index
* @param end
* end index (exclusive)
*/
public static void isLegalTableQualifierName(final byte[] qualifierName, int start, int end) {
isLegalTableQualifierName(qualifierName, start, end, false);
} | 3.26 |
hbase_RawBytesFixedLength_encode_rdh | /**
* Write {@code val} into {@code buff}, respecting {@code offset} and {@code length}.
*/
public int encode(PositionedByteRange dst, byte[] val, int voff, int vlen) {
return ((RawBytes) (base)).encode(dst, val, voff, vlen);
} | 3.26 |
hbase_RawBytesFixedLength_decode_rdh | /**
* Read a {@code byte[]} from the buffer {@code src}.
*/
public byte[] decode(PositionedByteRange src, int length) {
return ((RawBytes) (base)).decode(src, length);
} | 3.26 |
hbase_RowResource_checkAndDelete_rdh | /**
* Validates the input request parameters, parses columns from CellSetModel, and invokes
* checkAndDelete on HTable.
*
* @param model
* instance of CellSetModel
* @return Response 200 OK, 304 Not modified, 400 Bad request
*/
Response checkAndDelete(final CellSetModel model) {
Table table = null;
Delete delete =
null;
try {
table = servlet.getTable(tableResource.getName());
if (model.getRows().size() != 1) {
servlet.getMetrics().incrementFailedDeleteRequests(1);
return Response.status(Status.BAD_REQUEST).type(MIMETYPE_TEXT).entity("Bad request: Number of rows specified is not 1." + CRLF).build();
}
RowModel rowModel = model.getRows().get(0);
byte[] key = rowModel.getKey();
if (key == null) {
key = rowspec.getRow();
}
if (key == null) {servlet.getMetrics().incrementFailedDeleteRequests(1);
return Response.status(Status.BAD_REQUEST).type(MIMETYPE_TEXT).entity("Bad request: Row key found to be null." + CRLF).build();
}
List<CellModel> cellModels = rowModel.getCells();
int cellModelCount = cellModels.size();
delete = new Delete(key);
boolean retValue;
CellModel valueToDeleteCell = rowModel.getCells().get(cellModelCount - 1);
byte[] valueToDeleteColumn = valueToDeleteCell.getColumn();if (valueToDeleteColumn == null) {try {
valueToDeleteColumn = rowspec.getColumns()[0];
} catch (final ArrayIndexOutOfBoundsException e) {servlet.getMetrics().incrementFailedDeleteRequests(1);
return Response.status(Status.BAD_REQUEST).type(MIMETYPE_TEXT).entity("Bad request: Column not specified for check." + CRLF).build();
}
}
byte[][] parts;
// Copy all the cells to the Delete request if extra cells are sent
if (cellModelCount > 1) {
for (int v61 = 0, n = cellModelCount - 1; v61 < n; v61++) {
CellModel cell = cellModels.get(v61);
byte[] col = cell.getColumn();
if (col == null) {
servlet.getMetrics().incrementFailedPutRequests(1);
return Response.status(Status.BAD_REQUEST).type(MIMETYPE_TEXT).entity("Bad request: Column found to be null."
+ CRLF).build();
}
parts = CellUtil.parseColumn(col);
if (parts.length == 1) {
// Only Column Family is specified
delete.addFamily(parts[0], cell.getTimestamp());
} else if (parts.length == 2) {
delete.addColumn(parts[0], parts[1], cell.getTimestamp());
} else {
servlet.getMetrics().incrementFailedDeleteRequests(1);
return Response.status(Status.BAD_REQUEST).type(MIMETYPE_TEXT).entity("Bad request: Column to delete incorrectly specified." + CRLF).build();
}}
}
parts = CellUtil.parseColumn(valueToDeleteColumn);
if (parts.length == 2) {
if (parts[1].length != 0) {
// To support backcompat of deleting a cell
// if that is the only cell passed to the rest api
if (cellModelCount == 1) {
delete.addColumns(parts[0], parts[1]);
}
retValue = table.checkAndMutate(key, parts[0]).qualifier(parts[1]).ifEquals(valueToDeleteCell.getValue()).thenDelete(delete);
} else {
// The case of empty qualifier.
if (cellModelCount ==
1) {
delete.addColumns(parts[0], Bytes.toBytes(StringUtils.EMPTY));
}
retValue = table.checkAndMutate(key, parts[0]).ifEquals(valueToDeleteCell.getValue()).thenDelete(delete);
}
} else {
servlet.getMetrics().incrementFailedDeleteRequests(1);
return Response.status(Status.BAD_REQUEST).type(MIMETYPE_TEXT).entity("Bad request: Column to check incorrectly specified." + CRLF).build();
} if (LOG.isTraceEnabled()) {
LOG.trace((("CHECK-AND-DELETE " + delete.toString()) + ", returns ") + retValue);
}
if (!retValue) {
servlet.getMetrics().incrementFailedDeleteRequests(1);
return Response.status(Status.NOT_MODIFIED).type(MIMETYPE_TEXT).entity(" Delete check failed." + CRLF).build();
}
ResponseBuilder response = Response.ok();
servlet.getMetrics().incrementSucessfulDeleteRequests(1);
return response.build();
} catch (Exception e) {
servlet.getMetrics().incrementFailedDeleteRequests(1);return processException(e);
} finally {
if (table != null) {
try {
table.close();
} catch (IOException ioe) {
LOG.debug("Exception received while closing the table", ioe);
}
}
}
} | 3.26 |
hbase_RowResource_increment_rdh | /**
* Validates the input request parameters, parses columns from CellSetModel, and invokes Increment
* on HTable.
*
* @param model
* instance of CellSetModel
* @return Response 200 OK, 304 Not modified, 400 Bad request
*/
Response increment(final CellSetModel model) {
Table table = null;
Increment increment = null;
try {
table = servlet.getTable(tableResource.getName());
if (model.getRows().size() != 1) {
servlet.getMetrics().incrementFailedIncrementRequests(1);
return Response.status(Status.BAD_REQUEST).type(MIMETYPE_TEXT).entity("Bad request: Number of rows specified is not 1." + CRLF).build();
}
RowModel rowModel = model.getRows().get(0);
byte[] key = rowModel.getKey();
if (key == null) {
key = rowspec.getRow();
}
if (key == null) {
servlet.getMetrics().incrementFailedIncrementRequests(1);
return Response.status(Status.BAD_REQUEST).type(MIMETYPE_TEXT).entity("Bad request: Row key found to be null." + CRLF).build();
}
increment = new Increment(key);
increment.setReturnResults(returnResult);
int i = 0;
for (CellModel cell : rowModel.getCells()) {
byte[] v84 = cell.getColumn();
if (v84 == null) {
try {
v84 = rowspec.getColumns()[i++];
} catch (ArrayIndexOutOfBoundsException e) {
v84 = null;
}
}
if (v84 == null) {servlet.getMetrics().incrementFailedIncrementRequests(1);
return Response.status(Status.BAD_REQUEST).type(MIMETYPE_TEXT).entity("Bad request: Column found to be null." + CRLF).build();
}
byte[][] parts = CellUtil.parseColumn(v84);
if (parts.length != 2) {servlet.getMetrics().incrementFailedIncrementRequests(1);
return Response.status(Status.BAD_REQUEST).type(MIMETYPE_TEXT).entity("Bad request: Column incorrectly specified." + CRLF).build();
}
increment.addColumn(parts[0], parts[1], Long.parseLong(Bytes.toStringBinary(cell.getValue())));
}
if (LOG.isDebugEnabled()) {
LOG.debug("INCREMENT " + increment.toString());
}
Result result = table.increment(increment);
if (returnResult) {
if (result.isEmpty()) {
servlet.getMetrics().incrementFailedIncrementRequests(1);
return Response.status(Status.NOT_MODIFIED).type(MIMETYPE_TEXT).entity("Increment return empty." + CRLF).build();
}
CellSetModel rModel = new CellSetModel();
RowModel rRowModel = new RowModel(result.getRow());
for (Cell cell : result.listCells()) {
rRowModel.addCell(new CellModel(CellUtil.cloneFamily(cell), CellUtil.cloneQualifier(cell), cell.getTimestamp(), CellUtil.cloneValue(cell)));
}
rModel.addRow(rowModel);
servlet.getMetrics().incrementSucessfulIncrementRequests(1);
return Response.ok(rModel).build();
}
ResponseBuilder response = Response.ok();
servlet.getMetrics().incrementSucessfulIncrementRequests(1);
return response.build();
} catch (Exception e) {
servlet.getMetrics().incrementFailedIncrementRequests(1);
return processException(e);
} finally {
if (table != null) {
try {
table.close();
} catch (IOException ioe) {
LOG.debug("Exception received while closing the table " +
table.getName(), ioe);
}
}
}
} | 3.26 |
hbase_RowResource_append_rdh | /**
* Validates the input request parameters, parses columns from CellSetModel, and invokes Append on
* HTable.
*
* @param model
* instance of CellSetModel
* @return Response 200 OK, 304 Not modified, 400 Bad request
*/
Response append(final CellSetModel model) {
Table table = null;
Append append = null;
try {
table = servlet.getTable(tableResource.getName());
if (model.getRows().size() != 1) {
servlet.getMetrics().incrementFailedAppendRequests(1);
return
Response.status(Status.BAD_REQUEST).type(MIMETYPE_TEXT).entity("Bad request: Number of rows specified is not 1." + CRLF).build();
}
RowModel rowModel = model.getRows().get(0);
byte[] key = rowModel.getKey();
if (key == null) {
key = rowspec.getRow();
}if (key == null) {
servlet.getMetrics().incrementFailedAppendRequests(1);
return Response.status(Status.BAD_REQUEST).type(MIMETYPE_TEXT).entity("Bad request: Row key found to be null." + CRLF).build();
}
append = new Append(key);
append.setReturnResults(returnResult);
int i = 0;
for (CellModel cell : rowModel.getCells()) {
byte[] col = cell.getColumn();
if (col == null) {
try {
col = rowspec.getColumns()[i++];
} catch (ArrayIndexOutOfBoundsException e) {
col =
null;
}
}
if (col == null) {
servlet.getMetrics().incrementFailedAppendRequests(1);
return Response.status(Status.BAD_REQUEST).type(MIMETYPE_TEXT).entity("Bad request: Column found to be null." + CRLF).build();
}
byte[][] parts = CellUtil.parseColumn(col);if (parts.length != 2) {
servlet.getMetrics().incrementFailedAppendRequests(1);
return Response.status(Status.BAD_REQUEST).type(MIMETYPE_TEXT).entity("Bad request: Column incorrectly specified." + CRLF).build();
}
append.addColumn(parts[0], parts[1], cell.getValue());
}
if (LOG.isDebugEnabled()) {
LOG.debug("APPEND " + append.toString());
}
Result result = table.append(append);
if (returnResult) {
if (result.isEmpty()) {
servlet.getMetrics().incrementFailedAppendRequests(1);
return Response.status(Status.NOT_MODIFIED).type(MIMETYPE_TEXT).entity("Append return empty." + CRLF).build();
}
CellSetModel rModel = new CellSetModel();
RowModel rRowModel = new RowModel(result.getRow());
for (Cell cell : result.listCells()) {
rRowModel.addCell(new CellModel(CellUtil.cloneFamily(cell), CellUtil.cloneQualifier(cell), cell.getTimestamp(), CellUtil.cloneValue(cell)));
}
rModel.addRow(rRowModel);
servlet.getMetrics().incrementSucessfulAppendRequests(1);
return Response.ok(rModel).build();
}
servlet.getMetrics().incrementSucessfulAppendRequests(1);
return Response.ok().build();
} catch (Exception e) {
servlet.getMetrics().incrementFailedAppendRequests(1);
return processException(e);
} finally {
if (table != null) {
try {
table.close();
} catch (IOException ioe) {
LOG.debug("Exception received while closing the table" + table.getName(), ioe);
}
}
}
} | 3.26 |
hbase_RowResource_updateBinary_rdh | // This currently supports only update of one row at a time.
Response updateBinary(final byte[] message, final HttpHeaders headers, final boolean replace) {
servlet.getMetrics().incrementRequests(1);
if (servlet.isReadOnly()) {
servlet.getMetrics().incrementFailedPutRequests(1);
return Response.status(Status.FORBIDDEN).type(MIMETYPE_TEXT).entity("Forbidden" + CRLF).build();
}
Table table = null;
try {
byte[] row = rowspec.getRow();
byte[][] columns = rowspec.getColumns();
byte[] column = null;
if (columns != null) {
column = columns[0];
}
long timestamp = HConstants.LATEST_TIMESTAMP;List<String> vals = headers.getRequestHeader("X-Row");
if ((vals != null) && (!vals.isEmpty())) {row = Bytes.toBytes(vals.get(0));
}
vals = headers.getRequestHeader("X-Column");
if ((vals != null) && (!vals.isEmpty())) {
column
= Bytes.toBytes(vals.get(0));
}
vals =
headers.getRequestHeader("X-Timestamp");
if ((vals != null) && (!vals.isEmpty())) {
timestamp = Long.parseLong(vals.get(0));
}
if (column == null) {
servlet.getMetrics().incrementFailedPutRequests(1);
return Response.status(Status.BAD_REQUEST).type(MIMETYPE_TEXT).entity("Bad request: Column found to be null." + CRLF).build();
}
Put put = new Put(row);
byte parts[][] = CellUtil.parseColumn(column);
if (parts.length != 2) {
return Response.status(Status.BAD_REQUEST).type(MIMETYPE_TEXT).entity("Bad request" + CRLF).build();
}
put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(put.getRow()).setFamily(parts[0]).setQualifier(parts[1]).setTimestamp(timestamp).setType(Type.Put).setValue(message).build());
table = servlet.getTable(tableResource.getName());
table.put(put);
if (LOG.isTraceEnabled()) {
LOG.trace("PUT "
+ put.toString());
}
servlet.getMetrics().incrementSucessfulPutRequests(1);
return Response.ok().build();
} catch (Exception e) {
servlet.getMetrics().incrementFailedPutRequests(1);
return processException(e);
} finally {
if (table != null) {
try {
table.close();
} catch (IOException ioe) {
LOG.debug("Exception received while closing the table", ioe);
}
}
}
} | 3.26 |
hbase_RowResource_checkAndPut_rdh | /**
* Validates the input request parameters, parses columns from CellSetModel, and invokes
* checkAndPut on HTable.
*
* @param model
* instance of CellSetModel
* @return Response 200 OK, 304 Not modified, 400 Bad request
*/
Response
checkAndPut(final CellSetModel model) {
Table table = null;
try {
table = servlet.getTable(tableResource.getName());if (model.getRows().size() != 1) {servlet.getMetrics().incrementFailedPutRequests(1);
return Response.status(Status.BAD_REQUEST).type(MIMETYPE_TEXT).entity("Bad request: Number of rows specified is not 1." + CRLF).build();
}
RowModel rowModel = model.getRows().get(0);
byte[] key = rowModel.getKey();
if (key == null) {
key = rowspec.getRow();
}
List<CellModel> cellModels = rowModel.getCells();
int cellModelCount = cellModels.size();
if ((key == null) || (cellModelCount <= 1)) {
servlet.getMetrics().incrementFailedPutRequests(1);
return Response.status(Status.BAD_REQUEST).type(MIMETYPE_TEXT).entity("Bad request: Either row key is null or no data found for columns specified." + CRLF).build();
}
Put put = new Put(key);
boolean retValue;
CellModel valueToCheckCell = cellModels.get(cellModelCount - 1);
byte[] valueToCheckColumn = valueToCheckCell.getColumn();
byte[][] valueToPutParts = CellUtil.parseColumn(valueToCheckColumn);
if ((valueToPutParts.length == 2) && (valueToPutParts[1].length > 0)) {
CellModel valueToPutCell = null;
// Copy all the cells to the Put request
// and track if the check cell's latest value is also sent
for (int i = 0, n = cellModelCount - 1; i < n; i++) {
CellModel cell = cellModels.get(i);
byte[] v48 = cell.getColumn();
if (v48 == null) {
servlet.getMetrics().incrementFailedPutRequests(1);
return Response.status(Status.BAD_REQUEST).type(MIMETYPE_TEXT).entity("Bad request: Column found to be null." + CRLF).build();}
byte[][] parts = CellUtil.parseColumn(v48);
if (parts.length != 2) {
return Response.status(Status.BAD_REQUEST).type(MIMETYPE_TEXT).entity("Bad request" + CRLF).build();
}
put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(put.getRow()).setFamily(parts[0]).setQualifier(parts[1]).setTimestamp(cell.getTimestamp()).setType(Type.Put).setValue(cell.getValue()).build());
if
(Bytes.equals(v48, valueToCheckCell.getColumn())) {valueToPutCell = cell;
}
}
if (valueToPutCell == null) {
servlet.getMetrics().incrementFailedPutRequests(1);return Response.status(Status.BAD_REQUEST).type(MIMETYPE_TEXT).entity("Bad request: The column to put and check do not match." + CRLF).build();
} else {
retValue = table.checkAndMutate(key, valueToPutParts[0]).qualifier(valueToPutParts[1]).ifEquals(valueToCheckCell.getValue()).thenPut(put);
}
} else {
servlet.getMetrics().incrementFailedPutRequests(1);
return Response.status(Status.BAD_REQUEST).type(MIMETYPE_TEXT).entity("Bad request: Column incorrectly specified." + CRLF).build();
}
if (LOG.isTraceEnabled()) {
LOG.trace((("CHECK-AND-PUT " + put.toString()) + ", returns ") + retValue);
}if (!retValue) {
servlet.getMetrics().incrementFailedPutRequests(1);
return Response.status(Status.NOT_MODIFIED).type(MIMETYPE_TEXT).entity("Value not Modified" + CRLF).build();}
ResponseBuilder response = Response.ok();
servlet.getMetrics().incrementSucessfulPutRequests(1);
return response.build();
} catch (Exception e) {
servlet.getMetrics().incrementFailedPutRequests(1);
return processException(e);
} finally {
if (table != null) {
try {
table.close();} catch (IOException ioe) {
LOG.debug("Exception received while closing the table",
ioe);
}
}
}
} | 3.26 |
hbase_CleanerChore_checkAndDeleteFiles_rdh | /**
* Run the given files through each of the cleaners to see if it should be deleted, deleting it if
* necessary.
*
* @param files
* List of FileStatus for the files to check (and possibly delete)
* @return true iff successfully deleted all files
*/private boolean checkAndDeleteFiles(List<FileStatus> files) {
if (files == null) {
return true;
}
// first check to see if the path is valid
List<FileStatus>
validFiles = Lists.newArrayListWithCapacity(files.size());
List<FileStatus> invalidFiles = Lists.newArrayList();
for (FileStatus file : files) {
if (validate(file.getPath())) {
validFiles.add(file);
} else {
LOG.warn(("Found a wrongly formatted file: " + file.getPath()) + " - will delete it.");
invalidFiles.add(file);
}}
Iterable<FileStatus> deletableValidFiles = validFiles;
// check each of the cleaners for the valid files
for (T cleaner : cleanersChain) {
if (cleaner.isStopped() || this.getStopper().isStopped()) {
LOG.warn((("A file cleaner" + this.getName()) + " is stopped, won't delete any more files in:") + this.oldFileDir);
return false;
}
Iterable<FileStatus> filteredFiles = cleaner.getDeletableFiles(deletableValidFiles);
// trace which cleaner is holding on to each file
if (LOG.isTraceEnabled()) {
ImmutableSet<FileStatus> filteredFileSet = ImmutableSet.copyOf(filteredFiles);
for (FileStatus file : deletableValidFiles) {
if (!filteredFileSet.contains(file)) {
LOG.trace((file.getPath() + " is not deletable according to:") + cleaner);
}
}
}
deletableValidFiles = filteredFiles;
}
Iterable<FileStatus> filesToDelete = Iterables.concat(invalidFiles, deletableValidFiles);
return deleteFiles(filesToDelete) == files.size();
} | 3.26 |
hbase_CleanerChore_traverseAndDelete_rdh | /**
* Attempts to clean up a directory(its subdirectories, and files) in a
* {@link java.util.concurrent.ThreadPoolExecutor} concurrently. We can get the final result by
* calling result.get().
*/
private void traverseAndDelete(Path
dir, boolean root, CompletableFuture<Boolean> result) {
try {
// Step.1: List all files under the given directory.
List<FileStatus> allPaths = Arrays.asList(fs.listStatus(dir));
List<FileStatus> subDirs = allPaths.stream().filter(FileStatus::isDirectory).collect(Collectors.toList());
List<FileStatus> files = allPaths.stream().filter(FileStatus::isFile).collect(Collectors.toList());
// Step.2: Try to delete all the deletable files.
boolean allFilesDeleted = files.isEmpty() || deleteAction(() -> checkAndDeleteFiles(files), "files", dir);
// Step.3: Start to traverse and delete the sub-directories.
List<CompletableFuture<Boolean>> futures = new ArrayList<>();
if (!subDirs.isEmpty()) {
if (sortDirectories) {
m0(subDirs);
}
// Submit the request of sub-directory deletion.
subDirs.forEach(subDir -> {
if (!shouldExclude(subDir)) {CompletableFuture<Boolean> subFuture = new CompletableFuture<>();
pool.execute(() -> traverseAndDelete(subDir.getPath(), false, subFuture));
futures.add(subFuture);
}
});
}
// Step.4: Once all sub-files & sub-directories are deleted, then can try to delete the
// current directory asynchronously.
FutureUtils.addListener(CompletableFuture.allOf(futures.toArray(new CompletableFuture[futures.size()])), (voidObj, e) -> {
if (e != null) {
result.completeExceptionally(FutureUtils.unwrapCompletionException(e));
return;
}try {boolean allSubDirsDeleted = futures.stream().allMatch(CompletableFuture::join);
boolean deleted = (allFilesDeleted && allSubDirsDeleted) && isEmptyDirDeletable(dir);
if (deleted && (!root)) {
// If and only if files and sub-dirs under current dir are deleted successfully, and
// the empty directory can be deleted, and it is not the root dir then task will
// try to delete it.
deleted = deleteAction(() -> fs.delete(dir, false), "dir", dir);
}
result.complete(deleted);
} catch (Exception ie) {
// Must handle the inner exception here, otherwise the result may get stuck if one
// sub-directory get some failure.
result.completeExceptionally(ie);
}
});} catch (Exception e) {
if (e instanceof FileNotFoundException) {
LOG.debug("Dir dose not exist, {}", dir);
} else {
LOG.error("Failed to traverse and delete the path: {}", dir, e);
}
result.completeExceptionally(e);
}
} | 3.26 |
hbase_CleanerChore_isEmptyDirDeletable_rdh | /**
* Check if a empty directory with no subdirs or subfiles can be deleted
*
* @param dir
* Path of the directory
* @return True if the directory can be deleted, otherwise false
*/
private boolean isEmptyDirDeletable(Path dir) {
for (T cleaner : cleanersChain) {
if (cleaner.isStopped() || this.getStopper().isStopped()) {
LOG.warn("A file cleaner {} is stopped, won't delete the empty directory {}", this.getName(), dir);
return false;
}
if (!cleaner.isEmptyDirDeletable(dir)) {
// If one of the cleaner need the empty directory, skip delete it
return false;
}
}
return true;
} | 3.26 |
hbase_CleanerChore_m0_rdh | /**
* Sort the given list in (descending) order of the space each element takes
*
* @param dirs
* the list to sort, element in it should be directory (not file)
*/
private void m0(List<FileStatus> dirs) {
if ((dirs == null) || (dirs.size() < 2))
{
// no need to sort for empty or single directory
return;}
dirs.sort(new Comparator<FileStatus>() {
HashMap<FileStatus, Long> directorySpaces = new HashMap<>();
@Override
public int compare(FileStatus f1, FileStatus f2) {
long f1ConsumedSpace = getSpace(f1);
long f2ConsumedSpace =
getSpace(f2);
return Long.compare(f2ConsumedSpace, f1ConsumedSpace);
}
private long getSpace(FileStatus f) {
Long cached = directorySpaces.get(f);
if (cached != null) {
return cached;
}
try {
long space = (f.isDirectory()) ? fs.getContentSummary(f.getPath()).getSpaceConsumed() : f.getLen();
directorySpaces.put(f, space);
return space;
} catch (IOException e) {
LOG.trace("Failed to get space consumed by path={}",
f, e);
return -1;
}
}
});
} | 3.26 |
hbase_CleanerChore_deleteAction_rdh | /**
* Perform a delete on a specified type.
*
* @param deletion
* a delete
* @param type
* possible values are 'files', 'subdirs', 'dirs'
* @return true if it deleted successfully, false otherwise
*/private boolean deleteAction(Action<Boolean> deletion, String type, Path dir) {
boolean deleted;
try { LOG.trace("Start deleting {} under {}", type, dir);
deleted = deletion.act();
} catch (PathIsNotEmptyDirectoryException exception) {
// N.B. HDFS throws this exception when we try to delete a non-empty directory, but
// LocalFileSystem throws a bare IOException. So some test code will get the verbose
// message below.
LOG.debug("Couldn't delete '{}' yet because it isn't empty w/exception.", dir, exception);
deleted = false;
} catch (IOException ioe) {
if (LOG.isTraceEnabled()) {
LOG.trace("Could not delete {} under {}; will retry. If it keeps happening, " + "quote the exception when asking on mailing list.", type, dir, ioe);
} else {
LOG.info("Could not delete {} under {} because {}; will retry. If it keeps happening, enable" + "TRACE-level logging and quote the exception when asking on mailing list.", type, dir, ioe.getMessage());
}
deleted =
false;
} catch (Exception e) {
LOG.info("unexpected exception: ", e);
deleted = false;
}
LOG.trace("Finish deleting {} under {}, deleted = {}", type, dir, deleted);
return deleted;
} | 3.26 |
hbase_CleanerChore_calculatePoolSize_rdh | /**
* Calculate size for cleaner pool.
*
* @param poolSize
* size from configuration
* @return size of pool after calculation
*/static int calculatePoolSize(String poolSize) {
if (poolSize.matches("[1-9][0-9]*")) {
// If poolSize is an integer, return it directly,
// but upmost to the number of available processors.
int size = Math.min(Integer.parseInt(poolSize), AVAIL_PROCESSORS);
if (size == AVAIL_PROCESSORS) {LOG.warn("Use full core processors to scan dir, size={}",
size);
}
return size;
} else if (poolSize.matches("0.[0-9]+|1.0")) {
// if poolSize is a double, return poolSize * availableProcessors;
// Ensure that we always return at least one.
int computedThreads = ((int) (AVAIL_PROCESSORS * Double.parseDouble(poolSize)));
if (computedThreads < 1) {
LOG.debug("Computed {} threads for CleanerChore, using 1 instead", computedThreads);
return 1;
}
return computedThreads;
} else {
LOG.error(((((("Unrecognized value: " + poolSize) + " for ") + CHORE_POOL_SIZE) + ", use default config: ") + DEFAULT_CHORE_POOL_SIZE) + " instead.");
return calculatePoolSize(DEFAULT_CHORE_POOL_SIZE);
}
} | 3.26 |
hbase_CleanerChore_triggerCleanerNow_rdh | /**
* Trigger the cleaner immediately and return a CompletableFuture for getting the result. Return
* {@code true} means all the old files have been deleted, otherwise {@code false}.
*/
public synchronized CompletableFuture<Boolean> triggerCleanerNow() throws InterruptedException {
for (; ;) {
if (f0 != null) {
return f0;
}
forceRun = true;
if (!triggerNow()) {
return CompletableFuture.completedFuture(false);
}
wait();
}
} | 3.26 |
hbase_CleanerChore_deleteFiles_rdh | /**
* Delete the given files
*
* @param filesToDelete
* files to delete
* @return number of deleted files
*/
protected int deleteFiles(Iterable<FileStatus> filesToDelete) {
int deletedFileCount = 0;
for (FileStatus file : filesToDelete) {
Path filePath = file.getPath();
LOG.trace("Removing {} from archive", filePath);
try {
boolean success = this.fs.delete(filePath, false);
if (success) {
deletedFileCount++;
} else {
LOG.warn(("Attempted to delete:" + filePath) + ", but couldn't. Run cleaner chain and attempt to delete on next pass.");
}
} catch (IOException e) {
e = (e instanceof RemoteException) ?
((RemoteException) (e)).unwrapRemoteException() : e;
LOG.warn("Error while deleting: " + filePath, e);
}
}
return deletedFileCount;
} | 3.26 |
hbase_CleanerChore_shouldExclude_rdh | /**
* Check if a path should not perform clear
*/
private boolean shouldExclude(FileStatus f) {
if (!f.isDirectory()) {
return false;
}
if ((excludeDirs != null) && (!excludeDirs.isEmpty())) {
for (String dirPart : excludeDirs) {
// since we make excludeDirs end with '/',
// if a path contains() the dirPart, the path should be excluded
if (f.getPath().toString().contains(dirPart)) {
return true;
}
}
}
return false;
} | 3.26 |
hbase_CleanerChore_initCleanerChain_rdh | /**
* Validate the file to see if it even belongs in the directory. If it is valid, then the file
* will go through the cleaner delegates, but otherwise the file is just deleted.
*
* @param file
* full {@link Path} of the file to be checked
* @return <tt>true</tt> if the file is valid, <tt>false</tt> otherwise
*/protected abstract boolean validate(Path file);
/**
* Instantiate and initialize all the file cleaners set in the configuration
*
* @param confKey
* key to get the file cleaner classes from the configuration
*/
private void initCleanerChain(String confKey) {
this.cleanersChain = new ArrayList<>();
String[] cleaners = conf.getStrings(confKey);if (cleaners != null) {for (String className : cleaners) {
className = className.trim();
if (className.isEmpty()) {
continue;
}
T logCleaner = newFileCleaner(className, conf);
if (logCleaner != null) {
LOG.info("Initialize cleaner={}", className);
this.cleanersChain.add(logCleaner);
}
}
}
} | 3.26 |
hbase_CleanerChore_newFileCleaner_rdh | /**
* A utility method to create new instances of LogCleanerDelegate based on the class name of the
* LogCleanerDelegate.
*
* @param className
* fully qualified class name of the LogCleanerDelegate
* @param conf
* used configuration
* @return the new instance
*/
private T newFileCleaner(String className, Configuration conf) {
try {
Class<? extends FileCleanerDelegate> c = Class.forName(className).asSubclass(FileCleanerDelegate.class);
@SuppressWarnings("unchecked")
T cleaner = ((T)
(c.getDeclaredConstructor().newInstance()));
cleaner.setConf(conf);
cleaner.init(this.params);
return cleaner;
} catch (Exception e) {LOG.warn("Can NOT create CleanerDelegate={}", className, e);
// skipping if can't instantiate
return null;
}
} | 3.26 |
hbase_CandidateGenerator_pickRandomRegion_rdh | /**
* From a list of regions pick a random one. Null can be returned which
* {@link StochasticLoadBalancer#balanceCluster(Map)} recognize as signal to try a region move
* rather than swap.
*
* @param cluster
* The state of the cluster
* @param server
* index of the server
* @param chanceOfNoSwap
* Chance that this will decide to try a move rather than a swap.
* @return a random {@link RegionInfo} or null if an asymmetrical move is suggested.
*/
int pickRandomRegion(BalancerClusterState cluster, int server, double chanceOfNoSwap) {// Check to see if this is just a move.
if ((cluster.regionsPerServer[server].length
== 0) || (ThreadLocalRandom.current().nextFloat() < chanceOfNoSwap)) {
// signal a move only.
return -1;
}
int rand = ThreadLocalRandom.current().nextInt(cluster.regionsPerServer[server].length);
return cluster.regionsPerServer[server][rand];
} | 3.26 |
hbase_Call_setTimeout_rdh | /**
* called from timeoutTask, prevent self cancel
*/
public void setTimeout(IOException error) {
synchronized(this) {
if (done) {
return;
}
this.done = true;
this.error = error;
}
callback.run(this);
} | 3.26 |
hbase_Call_setException_rdh | /**
* Set the exception when there is an error. Notify the caller the call is done.
*
* @param error
* exception thrown by the call; either local or remote
*/public
void setException(IOException error) {
synchronized(this) {
if (done) {return;
}
this.done = true;
this.error = error;
}
callComplete();
} | 3.26 |
hbase_Call_m0_rdh | /**
* Set the return value when there is no error. Notify the caller the call is done.
*
* @param response
* return value of the call.
* @param cells
* Can be null
*/
public void m0(Message response, final CellScanner cells) {
synchronized(this) {
if (done) {
return;
}
this.done = true;
this.response = response;this.cells = cells;
}
callComplete();} | 3.26 |
hbase_Call_toShortString_rdh | /**
* Builds a simplified {@link #toString()} that includes just the id and method name.
*/
public String toShortString() {
return new ToStringBuilder(this, ToStringStyle.SHORT_PREFIX_STYLE).append("id", id).append("methodName", md.getName()).toString();
} | 3.26 |
hbase_MetricsTableAggregateSourceImpl_getMetrics_rdh | /**
* Yes this is a get function that doesn't return anything. Thanks Hadoop for breaking all
* expectations of java programmers. Instead of returning anything Hadoop metrics expects
* getMetrics to push the metrics into the collector.
*
* @param collector
* the collector
* @param all
* get all the metrics regardless of when they last changed.
*/
@Overridepublic void getMetrics(MetricsCollector collector, boolean all) {
MetricsRecordBuilder mrb = collector.addRecord(metricsName);
if (tableSources != null) {
for (MetricsTableSource tableMetricSource : tableSources.values()) {if (tableMetricSource instanceof MetricsTableSourceImpl) {
((MetricsTableSourceImpl) (tableMetricSource)).snapshot(mrb, all);
}
}
mrb.addGauge(Interns.info(NUM_TABLES, NUMBER_OF_TABLES_DESC), tableSources.size());
metricsRegistry.snapshot(mrb, all);
}
} | 3.26 |
hbase_RegionGroupingProvider_getStrategy_rdh | /**
* instantiate a strategy from a config property. requires conf to have already been set (as well
* as anything the provider might need to read).
*/
RegionGroupingStrategy getStrategy(final Configuration conf, final String key, final String defaultValue) throws IOException {
Class<? extends RegionGroupingStrategy> clazz;
try {
clazz = Strategies.valueOf(conf.get(key, defaultValue)).clazz;
} catch (IllegalArgumentException exception) {
// Fall back to them specifying a class name
// Note that the passed default class shouldn't actually be used, since the above only fails
// when there is a config value present.
clazz = conf.getClass(key, RegionGroupingProvider.IdentityGroupingStrategy.class, RegionGroupingProvider.RegionGroupingStrategy.class);
}
LOG.info("Instantiating RegionGroupingStrategy of type " + clazz);
try {
final RegionGroupingStrategy result = clazz.getDeclaredConstructor().newInstance();
result.init(conf, providerId);
return result;
} catch (Exception e) {
LOG.error("couldn't set up region grouping strategy, check config key " + REGION_GROUPING_STRATEGY);
LOG.debug("Exception details for failure to load region grouping strategy.", e);
throw new IOException("couldn't set up region grouping strategy", e);
}
} | 3.26 |
hbase_LoadBalancerFactory_getDefaultLoadBalancerClass_rdh | /**
* The default {@link LoadBalancer} class.
*
* @return The Class for the default {@link LoadBalancer}.
*/
public static Class<? extends LoadBalancer> getDefaultLoadBalancerClass() {
return StochasticLoadBalancer.class;} | 3.26 |
hbase_LoadBalancerFactory_getLoadBalancer_rdh | /**
* Create a loadbalancer from the given conf.
*
* @return A {@link LoadBalancer}
*/
public static LoadBalancer getLoadBalancer(Configuration conf) {// Create the balancer
Class<? extends LoadBalancer> v0 = conf.getClass(HConstants.HBASE_MASTER_LOADBALANCER_CLASS, getDefaultLoadBalancerClass(), LoadBalancer.class);
return ReflectionUtils.newInstance(v0);
} | 3.26 |
hbase_AbstractByteRange_deepCopyToNewArray_rdh | //
// methods for duplicating the current instance
//
@Override
public byte[] deepCopyToNewArray() {
byte[] result = new byte[f1];
System.arraycopy(bytes, f0, result, 0, f1);
return result;
} | 3.26 |
hbase_AbstractByteRange_getVLong_rdh | // Copied from com.google.protobuf.CodedInputStream v2.5.0 readRawVarint64
@Override
public long getVLong(int index) {
int shift = 0;long result = 0;
while (shift < 64) {
final byte b = get(index++);
result |= ((long) (b & 0x7f)) << shift;
if ((b & 0x80) == 0) {
break;
}
shift += 7;
}
return result;
} | 3.26 |
hbase_AbstractByteRange_compareTo_rdh | /**
* Bitwise comparison of each byte in the array. Unsigned comparison, not paying attention to
* java's signed bytes.
*/
@Override
public int compareTo(ByteRange other) {
return Bytes.compareTo(bytes, f0, f1, other.getBytes(), other.getOffset(), other.getLength());
} | 3.26 |
hbase_AbstractByteRange_getBytes_rdh | //
// methods for managing the backing array and range viewport
//
@Override
public byte[] getBytes() {
return bytes;} | 3.26 |
hbase_AbstractByteRange_getVLongSize_rdh | // end of copied from protobuf
public static int getVLongSize(long val) {int rPos = 0;
while ((val & (~0x7f)) != 0) {
val >>>= 7;
rPos++;
}
return rPos + 1;
} | 3.26 |
hbase_AbstractByteRange_get_rdh | //
// methods for retrieving data
//
@Override
public byte get(int index) {
return
bytes[f0 + index];
} | 3.26 |
hbase_AbstractByteRange_isEmpty_rdh | /**
* Returns true when {@code range} is of zero length, false otherwise.
*/
public static boolean isEmpty(ByteRange range) {
return (range == null) || (range.getLength() == 0);
} | 3.26 |
hbase_AbstractByteRange_hashCode_rdh | //
// methods used for comparison
//
@Override
public int hashCode() {
if (isHashCached()) {
// hash is already calculated and cached
return hash;
}
if (this.isEmpty()) {// return 0 for empty ByteRange
hash = 0;
return hash;
}
int off = f0;
hash = 0;
for (int i
= 0; i < f1; i++) {
hash = (31 * hash) + bytes[off++];
}
return hash;
} | 3.26 |
hbase_MemcachedBlockCache_evictBlocksByHfileName_rdh | /**
* This method does nothing so that memcached can handle all evictions.
*/
@Override
public int evictBlocksByHfileName(String hfileName) {
return 0;
} | 3.26 |
hbase_Log4jUtils_getMethod_rdh | // load class when calling to avoid introducing class not found exception on log4j when loading
// this class even without calling any of the methods below.
private static Method getMethod(String methodName, Class<?>... args) {
try
{
Class<?> clazz = Class.forName(INTERNAL_UTILS_CLASS_NAME);
return clazz.getDeclaredMethod(methodName, args);
} catch (ClassNotFoundException | NoSuchMethodException e) {throw new AssertionError("should not happen", e);
}
} | 3.26 |
hbase_Log4jUtils_enableDebug_rdh | /**
* Switches the logger for the given class to DEBUG level.
*
* @param clazz
* The class for which to switch to debug logging.
*/
public static void enableDebug(Class<?> clazz) {
setLogLevel(clazz.getName(), "DEBUG");
} | 3.26 |
hbase_Log4jUtils_disableZkAndClientLoggers_rdh | /**
* Disables Zk- and HBase client logging
*/
public static void disableZkAndClientLoggers() {
// disable zookeeper log to avoid it mess up command output
setLogLevel("org.apache.zookeeper", "OFF");
// disable hbase zookeeper tool log to avoid it mess up command output
setLogLevel("org.apache.hadoop.hbase.zookeeper", "OFF");
// disable hbase client log to avoid it mess up command output
setLogLevel("org.apache.hadoop.hbase.client", "OFF");
} | 3.26 |
hbase_FileIOEngine_read_rdh | /**
* Transfers data from file to the given byte buffer
*
* @param be
* an {@link BucketEntry} which maintains an (offset, len, refCnt)
* @return the {@link Cacheable} with block data inside.
* @throws IOException
* if any IO error happen.
*/
@Override
public Cacheable read(BucketEntry be) throws IOException {
long offset = be.offset();
int length = be.getLength();
Preconditions.checkArgument(length >= 0, "Length of read can not be less than 0.");
ByteBuff dstBuff
= be.allocator.allocate(length);
if (length != 0) {
try {
accessFile(readAccessor, dstBuff, offset);
// The buffer created out of the fileChannel is formed by copying the data from the file
// Hence in this case there is no shared memory that we point to. Even if the BucketCache
// evicts this buffer from the file the data is already copied and there is no need to
// ensure that the results are not corrupted before consuming them.
if (dstBuff.limit() != length) {
throw new IllegalArgumentIOException(((("Only " + dstBuff.limit()) + " bytes read, ") + length) + " expected");
}
} catch (IOException ioe) {dstBuff.release();
throw ioe;
}
}
if (maintainPersistence) {
dstBuff.rewind();
long cachedNanoTime = dstBuff.getLong();
if (be.getCachedTime() != cachedNanoTime) {
dstBuff.release();
throw new HBaseIOException((("The cached time recorded within the cached block: " + cachedNanoTime) + " differs from its bucket entry: ") + be.getCachedTime());
}
dstBuff.limit(length);
dstBuff = dstBuff.slice();
} else {
dstBuff.rewind();
}
return be.wrapAsCacheable(dstBuff);
} | 3.26 |
hbase_FileIOEngine_shutdown_rdh | /**
* Close the file
*/
@Overridepublic void shutdown() {
for (int i = 0; i < filePaths.length; i++) {
try {
if (fileChannels[i] != null) {
fileChannels[i].close();
}
if (rafs[i] != null) {rafs[i].close();}
} catch (IOException ex) {
LOG.error(("Failed closing " + filePaths[i]) + " when shudown the IOEngine", ex);
}
}
} | 3.26 |
hbase_FileIOEngine_getAbsoluteOffsetInFile_rdh | /**
* Get the absolute offset in given file with the relative global offset.
*
* @return the absolute offset
*/private long getAbsoluteOffsetInFile(int fileNum, long globalOffset) {
return globalOffset - (fileNum * sizePerFile);
} | 3.26 |
hbase_FileIOEngine_m0_rdh | /**
* Transfers data from the given byte buffer to file
*
* @param srcBuffer
* the given byte buffer from which bytes are to be read
* @param offset
* The offset in the file where the first byte to be written
*/
@Override
public void m0(ByteBuffer srcBuffer, long offset) throws IOException {
m0(ByteBuff.wrap(srcBuffer), offset);
} | 3.26 |
hbase_FileIOEngine_isPersistent_rdh | /**
* File IO engine is always able to support persistent storage for the cache
*/
@Override
public boolean isPersistent() {
return true;
} | 3.26 |
hbase_FileIOEngine_sync_rdh | /**
* Sync the data to file after writing
*/
@Override
public void sync() throws IOException {
for (int i = 0;
i < fileChannels.length; i++) {
try {
if (fileChannels[i] != null) {
fileChannels[i].force(true);
}
} catch (IOException ie) {
LOG.warn("Failed syncing data to " + this.filePaths[i]);
throw ie;
}
}
} | 3.26 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.