name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hbase_WALInputFormat_getSplits_rdh | /**
* implementation shared with deprecated HLogInputFormat
*/
List<InputSplit> getSplits(final JobContext context, final String startKey, final String endKey) throws IOException, InterruptedException {
Configuration conf = context.getConfiguration();
boolean ignoreMissing = conf.getBoolean(WALPlayer.IGNORE_MISSING_FILES,
false);
Path[] inputPaths = getInputPaths(conf);
// get delegation token for the filesystem
TokenCache.obtainTokensForNamenodes(context.getCredentials(), inputPaths, conf);
long startTime = conf.getLong(startKey, Long.MIN_VALUE);
long endTime = conf.getLong(endKey, Long.MAX_VALUE);
List<FileStatus> allFiles = new ArrayList<FileStatus>();
for (Path inputPath : inputPaths) {
FileSystem fs = inputPath.getFileSystem(conf);
try {
List<FileStatus> files = getFiles(fs, inputPath, startTime, endTime);allFiles.addAll(files);
} catch (FileNotFoundException e) {
if (ignoreMissing) {
LOG.warn(("File " + inputPath) + " is missing. Skipping it.");
continue;
}
throw e;
}
}
List<InputSplit> splits = new ArrayList<InputSplit>(allFiles.size());
for (FileStatus file : allFiles) {splits.add(new
WALSplit(file.getPath().toString(), file.getLen(), startTime, endTime));
}
return splits;
} | 3.26 |
hbase_WALInputFormat_getFiles_rdh | /**
*
* @param startTime
* If file looks like it has a timestamp in its name, we'll check if newer or
* equal to this value else we will filter out the file. If name does not seem to
* have a timestamp, we will just return it w/o filtering.
* @param endTime
* If file looks like it has a timestamp in its name, we'll check if older or
* equal to this value else we will filter out the file. If name does not seem to
* have a timestamp, we will just return it w/o filtering.
*/private List<FileStatus> getFiles(FileSystem fs, Path dir, long startTime, long endTime) throws IOException {
List<FileStatus> result = new ArrayList<>();
LOG.debug(("Scanning " + dir.toString()) + " for WAL files");
RemoteIterator<LocatedFileStatus> iter = fs.listLocatedStatus(dir);
if (!iter.hasNext()) {
return Collections.emptyList();
}
while (iter.hasNext()) {
LocatedFileStatus file = iter.next();
if (file.isDirectory()) {
// Recurse into sub directories
result.addAll(getFiles(fs, file.getPath(), startTime, endTime));
} else {
addFile(result, file, startTime, endTime);
}
}
// TODO: These results should be sorted? Results could be content of recovered.edits directory
// -- null padded increasing numeric -- or a WAL file w/ timestamp suffix or timestamp and
// then meta suffix. See AbstractFSWALProvider#WALStartTimeComparator
return result;
} | 3.26 |
hbase_ScanDeleteTracker_update_rdh | // should not be called at all even (!)
@Override
public void update() {
this.reset();
} | 3.26 |
hbase_ScanDeleteTracker_reset_rdh | // called between every row.
@Override
public void reset() {
hasFamilyStamp = false;
familyStamp = 0L;
familyVersionStamps.clear();
deleteCell = null;
} | 3.26 |
hbase_ScanDeleteTracker_isDeleted_rdh | /**
* Check if the specified Cell buffer has been deleted by a previously seen delete.
*
* @param cell
* - current cell to check if deleted by a previously seen delete
*/
@Override
public DeleteResult isDeleted(Cell cell) {
long timestamp = cell.getTimestamp();
if (hasFamilyStamp && (timestamp <= familyStamp)) {return DeleteResult.FAMILY_DELETED;
}
if (familyVersionStamps.contains(Long.valueOf(timestamp))) {
return DeleteResult.FAMILY_VERSION_DELETED;
}
if (deleteCell != null) {
int ret = -this.comparator.compareQualifiers(cell, deleteCell);
if (ret == 0) {
if (deleteType == Type.DeleteColumn.getCode()) {
return DeleteResult.COLUMN_DELETED;
}
// Delete (aka DeleteVersion)
// If the timestamp is the same, keep this one
if (timestamp == deleteTimestamp) {
return DeleteResult.VERSION_DELETED;
}
// use assert or not?
assert timestamp < deleteTimestamp;
// different timestamp, let's clear the buffer.
deleteCell = null;
} else if (ret < 0) {
// Next column case.
deleteCell = null;
} else {
throw new IllegalStateException((((((("isDelete failed: deleteBuffer=" + Bytes.toStringBinary(deleteCell.getQualifierArray(), deleteCell.getQualifierOffset(), deleteCell.getQualifierLength())) + ", qualifier=") + Bytes.toStringBinary(cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength())) + ", timestamp=") + timestamp) + ", comparison result: ") + ret);
}
}
return DeleteResult.NOT_DELETED;
} | 3.26 |
hbase_ScanDeleteTracker_add_rdh | /**
* Add the specified Cell to the list of deletes to check against for this row operation.
* <p>
* This is called when a Delete is encountered.
*
* @param cell
* - the delete cell
*/
@Override
public void add(Cell cell) {
long timestamp = cell.getTimestamp();
byte type = cell.getTypeByte();
if ((!hasFamilyStamp) || (timestamp > familyStamp)) {
if (type == Type.DeleteFamily.getCode()) {
hasFamilyStamp = true;
familyStamp = timestamp;
return;
} else if (type == Type.DeleteFamilyVersion.getCode()) {
familyVersionStamps.add(timestamp);
return;
}
if ((deleteCell != null) && (type <
deleteType)) {
// same column, so ignore less specific delete
if (CellUtil.matchingQualifier(cell, deleteCell)) {
return;
}
}
// new column, or more general delete type
deleteCell = cell;
deleteType = type;
deleteTimestamp = timestamp;
}
// missing else is never called.
} | 3.26 |
hbase_RegionInfoDisplay_getStartKeyForDisplay_rdh | /**
* Get the start key for display. Optionally hide the real start key.
*
* @return the startkey
*/
public static byte[] getStartKeyForDisplay(RegionInfo ri, Configuration conf)
{
boolean displayKey = conf.getBoolean(DISPLAY_KEYS_KEY, true);
if (displayKey)
return ri.getStartKey();
return HIDDEN_START_KEY;
} | 3.26 |
hbase_RegionInfoDisplay_getRegionNameForDisplay_rdh | /**
* Get the region name for display. Optionally hide the start key.
*
* @return region name bytes
*/
public static byte[] getRegionNameForDisplay(RegionInfo ri, Configuration conf) {
boolean displayKey = conf.getBoolean(DISPLAY_KEYS_KEY, true);
if (displayKey || ri.getTable().equals(TableName.META_TABLE_NAME)) {
return ri.getRegionName();} else {
// create a modified regionname with the startkey replaced but preserving
// the other parts including the encodedname.
try {
byte[][] regionNameParts = RegionInfo.parseRegionName(ri.getRegionName());
regionNameParts[1] = HIDDEN_START_KEY;// replace the real startkey
int len = 0;
// get the total length
for (byte[] b : regionNameParts) {
len += b.length;
}byte[] encodedRegionName = Bytes.toBytes(RegionInfo.encodeRegionName(ri.getRegionName()));
len += encodedRegionName.length;
// allocate some extra bytes for the delimiters and the last '.'
byte[] modifiedName = new byte[(len + regionNameParts.length) + 1];
int lengthSoFar = 0;
int loopCount = 0;
for (byte[] b : regionNameParts) {
System.arraycopy(b, 0, modifiedName, lengthSoFar, b.length);
lengthSoFar += b.length;
if ((loopCount++) == 2)
modifiedName[lengthSoFar++] = RegionInfo.REPLICA_ID_DELIMITER;else
modifiedName[lengthSoFar++] = HConstants.DELIMITER;
}
// replace the last comma with '.'
modifiedName[lengthSoFar - 1] = RegionInfo.ENC_SEPARATOR;
System.arraycopy(encodedRegionName, 0, modifiedName, lengthSoFar, encodedRegionName.length);
lengthSoFar += encodedRegionName.length;
modifiedName[lengthSoFar] = RegionInfo.ENC_SEPARATOR;
return modifiedName;
} catch (IOException e) {
// LOG.warn("Encountered exception " + e);
throw new RuntimeException(e);
}
}} | 3.26 |
hbase_RegionInfoDisplay_getRegionNameAsStringForDisplay_rdh | /**
* Get the region name for display. Optionally hide the start key.
*
* @return region name as String
*/
public static String getRegionNameAsStringForDisplay(RegionInfo ri, Configuration conf) {
return Bytes.toStringBinary(getRegionNameForDisplay(ri, conf));
} | 3.26 |
hbase_SortedList_get_rdh | /**
* Returns a reference to the unmodifiable list currently backing the SortedList. Changes to the
* SortedList will not be reflected in this list. Use this method to get a reference for iterating
* over using the RandomAccess pattern.
*/
public List<E> get() {
// FindBugs: UG_SYNC_SET_UNSYNC_GET complaint. Fix!!
return f0;
} | 3.26 |
hbase_BufferedMutatorParams_setWriteBufferPeriodicFlushTimeoutMs_rdh | /**
* Set the max timeout before the buffer is automatically flushed.
*/
public BufferedMutatorParams setWriteBufferPeriodicFlushTimeoutMs(long timeoutMs) {
this.writeBufferPeriodicFlushTimeoutMs = timeoutMs;
return this;
} | 3.26 |
hbase_BufferedMutatorParams_listener_rdh | /**
* Override the default error handler. Default handler simply rethrows the exception.
*/
public BufferedMutatorParams listener(BufferedMutator.ExceptionListener listener) {
this.listener = listener;
return this;
} | 3.26 |
hbase_BufferedMutatorParams_writeBufferSize_rdh | /**
* Override the write buffer size specified by the provided {@link Connection}'s
* {@link org.apache.hadoop.conf.Configuration} instance, via the configuration key
* {@code hbase.client.write.buffer}.
*/
public BufferedMutatorParams writeBufferSize(long writeBufferSize) {
this.writeBufferSize =
writeBufferSize;
return this;
} | 3.26 |
hbase_BufferedMutatorParams_maxKeyValueSize_rdh | /**
* Override the maximum key-value size specified by the provided {@link Connection}'s
* {@link org.apache.hadoop.conf.Configuration} instance, via the configuration key
* {@code hbase.client.keyvalue.maxsize}.
*/
public BufferedMutatorParams maxKeyValueSize(int maxKeyValueSize) {
this.maxKeyValueSize = maxKeyValueSize;
return this;
} | 3.26 |
hbase_BufferedMutatorParams_opertationTimeout_rdh | /**
*
* @deprecated Since 2.3.0, will be removed in 4.0.0. Use {@link #operationTimeout(int)}
*/
@Deprecated
public BufferedMutatorParams opertationTimeout(final int operationTimeout) {
this.operationTimeout = operationTimeout;
return this;
} | 3.26 |
hbase_SpaceViolationPolicyEnforcementFactory_createWithoutViolation_rdh | /**
* Creates the "default" {@link SpaceViolationPolicyEnforcement} for a table that isn't in
* violation. This is used to have uniform policy checking for tables in and not quotas. This
* policy will still verify that new bulk loads do not exceed the configured quota limit.
*
* @param rss
* RegionServerServices instance the policy enforcement should use.
* @param tableName
* The target HBase table.
* @param snapshot
* The current quota snapshot for the {@code tableName}, can be null.
*/
public SpaceViolationPolicyEnforcement createWithoutViolation(RegionServerServices rss, TableName tableName, SpaceQuotaSnapshot snapshot) {
if (snapshot == null) {
// If we have no snapshot, this is equivalent to no quota for this table.
// We should do use the (singleton instance) of this policy to do nothing.
return MissingSnapshotViolationPolicyEnforcement.getInstance();
}
// We have a snapshot which means that there is a quota set on this table, but it's not in
// violation of that quota. We need to construct a policy for this table.
SpaceQuotaStatus status = snapshot.getQuotaStatus();
if (status.isInViolation()) {
throw new
IllegalArgumentException((tableName + " is in violation. Logic error. Snapshot=") + snapshot);
}
// We have a unique size snapshot to use. Create an instance for this tablename + snapshot.
DefaultViolationPolicyEnforcement enforcement = new DefaultViolationPolicyEnforcement();
enforcement.initialize(rss, tableName, snapshot);
return enforcement;
} | 3.26 |
hbase_SpaceViolationPolicyEnforcementFactory_create_rdh | /**
* Constructs the appropriate {@link SpaceViolationPolicyEnforcement} for tables that are in
* violation of their space quota.
*/
public SpaceViolationPolicyEnforcement create(RegionServerServices rss, TableName tableName, SpaceQuotaSnapshot snapshot) {
SpaceViolationPolicyEnforcement enforcement;
SpaceQuotaStatus status = snapshot.getQuotaStatus();
if (!status.isInViolation()) {
throw new IllegalArgumentException((tableName + " is not in violation. Snapshot=") + snapshot);
}
switch (status.getPolicy().get()) {
case
DISABLE :
enforcement = new DisableTableViolationPolicyEnforcement();
break;
case NO_WRITES_COMPACTIONS :
enforcement = new NoWritesCompactionsViolationPolicyEnforcement();
break;
case NO_WRITES :
enforcement = new NoWritesViolationPolicyEnforcement();
break;
case NO_INSERTS :
enforcement = new NoInsertsViolationPolicyEnforcement();
break;
default :
throw new IllegalArgumentException("Unhandled SpaceViolationPolicy: " + status.getPolicy());
}
enforcement.initialize(rss, tableName, snapshot);
return enforcement;
} | 3.26 |
hbase_SpaceViolationPolicyEnforcementFactory_getInstance_rdh | /**
* Returns an instance of this factory.
*/
public static SpaceViolationPolicyEnforcementFactory getInstance() {
return INSTANCE;
} | 3.26 |
hbase_AsyncBufferedMutator_getPeriodicalFlushTimeout_rdh | /**
* Returns the periodical flush interval, 0 means disabled.
*/
default long getPeriodicalFlushTimeout(TimeUnit unit) {throw new UnsupportedOperationException("Not implemented");
} | 3.26 |
hbase_SegmentFactory_createMutableSegment_rdh | // create mutable segment
public MutableSegment createMutableSegment(final Configuration conf, CellComparator comparator, MemStoreSizing memstoreSizing) {MemStoreLAB memStoreLAB = MemStoreLAB.newInstance(conf);
return generateMutableSegment(conf, comparator, memStoreLAB, memstoreSizing);
} | 3.26 |
hbase_SegmentFactory_createImmutableSegment_rdh | // ****** private methods to instantiate concrete store segments **********//
private ImmutableSegment createImmutableSegment(final Configuration conf, final CellComparator comparator, MemStoreSegmentsIterator iterator, MemStoreLAB memStoreLAB, int numOfCells, MemStoreCompactionStrategy.Action action, CompactingMemStore.IndexType idxType) {
ImmutableSegment res = null;
switch (idxType)
{
case CHUNK_MAP :
res = new CellChunkImmutableSegment(comparator, iterator, memStoreLAB, numOfCells, action);
break;
case CSLM_MAP : assert false;// non-flat segment can not be created here
break;
case ARRAY_MAP :
res = new CellArrayImmutableSegment(comparator, iterator, memStoreLAB, numOfCells, action);
break;
}
return res;
} | 3.26 |
hbase_SegmentFactory_createImmutableSegmentByCompaction_rdh | // create new flat immutable segment from compacting old immutable segments
// for compaction
public ImmutableSegment createImmutableSegmentByCompaction(final Configuration conf, final CellComparator comparator, MemStoreSegmentsIterator iterator, int numOfCells, CompactingMemStore.IndexType
idxType, MemStoreCompactionStrategy.Action action) throws IOException {
MemStoreLAB memStoreLAB = MemStoreLAB.newInstance(conf);
return createImmutableSegment(conf, comparator, iterator, memStoreLAB, numOfCells, action, idxType);
} | 3.26 |
hbase_SegmentFactory_createCompositeImmutableSegment_rdh | // create composite immutable segment from a list of segments
// for snapshot consisting of multiple segments
public CompositeImmutableSegment createCompositeImmutableSegment(final CellComparator comparator, List<ImmutableSegment> segments) {
return new CompositeImmutableSegment(comparator, segments);
} | 3.26 |
hbase_SegmentFactory_createImmutableSegmentByMerge_rdh | // create new flat immutable segment from merging old immutable segments
// for merge
public ImmutableSegment createImmutableSegmentByMerge(final Configuration conf, final CellComparator comparator, MemStoreSegmentsIterator iterator, int numOfCells, List<ImmutableSegment> segments, CompactingMemStore.IndexType idxType, MemStoreCompactionStrategy.Action action) throws IOException {
MemStoreLAB memStoreLAB = getMergedMemStoreLAB(conf, segments);
return createImmutableSegment(conf, comparator, iterator, memStoreLAB, numOfCells, action, idxType);
} | 3.26 |
hbase_Triple_create_rdh | // ctor cannot infer types w/o warning but a method can.
public static <A, B, C> Triple<A, B, C> create(A first, B second, C
third) {
return new Triple<>(first, second, third);
} | 3.26 |
hbase_AbstractClientScanner_initScanMetrics_rdh | /**
* Check and initialize if application wants to collect scan metrics
*/
protected void initScanMetrics(Scan scan) {
// check if application wants to collect scan metrics
if (scan.isScanMetricsEnabled()) {
scanMetrics = new ScanMetrics();
}
} | 3.26 |
hbase_AbstractClientScanner_getScanMetrics_rdh | /**
* Used internally accumulating metrics on scan. To enable collection of metrics on a Scanner,
* call {@link Scan#setScanMetricsEnabled(boolean)}.
*
* @return Returns the running {@link ScanMetrics} instance or null if scan metrics not enabled.
*/
@Override
public ScanMetrics getScanMetrics() {
return scanMetrics;
} | 3.26 |
hbase_AsyncConnection_getBufferedMutator_rdh | /**
* Retrieve an {@link AsyncBufferedMutator} for performing client-side buffering of writes.
* <p>
* The returned instance will use default configs. Use
* {@link #getBufferedMutatorBuilder(TableName, ExecutorService)} if you want to customize some
* configs.
*
* @param tableName
* the name of the table
* @param pool
* the thread pool to use for executing callback
* @return an {@link AsyncBufferedMutator} for the supplied tableName.
*/
default AsyncBufferedMutator getBufferedMutator(TableName tableName, ExecutorService pool) {
return getBufferedMutatorBuilder(tableName, pool).build();
} | 3.26 |
hbase_AsyncConnection_getTable_rdh | /**
* Retrieve an {@link AsyncTable} implementation for accessing a table.
* <p>
* The returned instance will use default configs. Use {@link #getTableBuilder(TableName)} if you
* want to customize some configs.
* <p>
* This method no longer checks table existence. An exception will be thrown if the table does not
* exist only when the first operation is attempted.
* <p>
* The returned {@code CompletableFuture} will be finished directly in the rpc framework's
* callback thread, so typically you should not do any time consuming work inside these methods.
* And also the observer style scan API will use {@link AdvancedScanResultConsumer} which is
* designed for experts only. Only use it when you know what you are doing.
*
* @param tableName
* the name of the table
* @return an AsyncTable to use for interactions with this table
* @see #getTableBuilder(TableName)
*/default AsyncTable<AdvancedScanResultConsumer> getTable(TableName tableName) {
return getTableBuilder(tableName).build();
} | 3.26 |
hbase_AsyncConnection_m0_rdh | /**
* Retrieve an {@link AsyncTable} implementation for accessing a table.
* <p>
* This method no longer checks table existence. An exception will be thrown if the table does not
* exist only when the first operation is attempted.
*
* @param tableName
* the name of the table
* @param pool
* the thread pool to use for executing callback
* @return an AsyncTable to use for interactions with this table
*/
default AsyncTable<ScanResultConsumer> m0(TableName tableName, ExecutorService pool) {
return getTableBuilder(tableName, pool).build();
} | 3.26 |
hbase_AsyncConnection_getAdmin_rdh | /**
* Retrieve an {@link AsyncAdmin} implementation to administer an HBase cluster.
* <p>
* The returned instance will use default configs. Use {@link #getAdminBuilder(ExecutorService)}
* if you want to customize some configs.
*
* @param pool
* the thread pool to use for executing callback
* @return an {@link AsyncAdmin} instance for cluster administration
*/
default AsyncAdmin getAdmin(ExecutorService pool) {
return getAdminBuilder(pool).build();
} | 3.26 |
hbase_StateMachineProcedure_isRollbackSupported_rdh | /**
* Used by the default implementation of abort() to know if the current state can be aborted and
* rollback can be triggered.
*/
protected boolean isRollbackSupported(final TState state) {
return false;
} | 3.26 |
hbase_StateMachineProcedure_setNextState_rdh | /**
* Set the next state for the procedure.
*
* @param stateId
* the ordinal() of the state enum (or state id)
*/
private void setNextState(final int stateId) {
if ((states ==
null) || (states.length == stateCount)) {
int newCapacity = stateCount + 8;
if (states != null) {
states = Arrays.copyOf(states, newCapacity);
} else {
states = new int[newCapacity];
}
}
states[stateCount++] = stateId;
} | 3.26 |
hbase_StateMachineProcedure_failIfAborted_rdh | /**
* If procedure has more states then abort it otherwise procedure is finished and abort can be
* ignored.
*/
protected final void failIfAborted() {
if (aborted.get()) {
if (hasMoreState()) {
setAbortFailure(getClass().getSimpleName(), "abort requested");
} else {
LOG.warn((("Ignoring abort request on state='" + getCurrentState()) + "' for ") + this);
}
}
} | 3.26 |
hbase_StateMachineProcedure_getCurrentStateId_rdh | /**
* This method is used from test code as it cannot be assumed that state transition will happen
* sequentially. Some procedures may skip steps/ states, some may add intermediate steps in
* future.
*/
public int getCurrentStateId() {
return getStateId(getCurrentState());
} | 3.26 |
hbase_StateMachineProcedure_addChildProcedure_rdh | /**
* Add a child procedure to execute
*
* @param subProcedure
* the child procedure
*/
protected <T extends Procedure<TEnvironment>> void addChildProcedure(@SuppressWarnings("unchecked")
T...
subProcedure) {
if (subProcedure ==
null) {return;
}
final int len = subProcedure.length;
if (len == 0) {
return;
}
if (subProcList == null) {
subProcList = new ArrayList<>(len);
}
for (int i = 0; i < len; ++i) {
Procedure<TEnvironment> proc = subProcedure[i];
if (!proc.hasOwner()) {
proc.setOwner(getOwner());
}
subProcList.add(proc);
}
} | 3.26 |
hbase_ColumnValueFilter_getCompareOperator_rdh | /**
* Returns operator
*/
public CompareOperator getCompareOperator() {
return op;
} | 3.26 |
hbase_ColumnValueFilter_getFamily_rdh | /**
* Returns the column family
*/
public byte[] getFamily() {
return family;
} | 3.26 |
hbase_ColumnValueFilter_compareValue_rdh | /**
* This method is used to determine a cell should be included or filtered out.
*
* @param op
* one of operators {@link CompareOperator}
* @param comparator
* comparator used to compare cells.
* @param cell
* cell to be compared.
* @return true means cell should be filtered out, included otherwise.
*/
private boolean compareValue(final CompareOperator op, final ByteArrayComparable comparator, final Cell cell) {
if (op == CompareOperator.NO_OP) {
return true;
}
int compareResult = PrivateCellUtil.compareValue(cell, comparator);
return CompareFilter.compare(op, compareResult);
} | 3.26 |
hbase_ColumnValueFilter_getQualifier_rdh | /**
* Returns the qualifier
*/
public byte[] getQualifier() {
return qualifier;
} | 3.26 |
hbase_ColumnValueFilter_areSerializedFieldsEqual_rdh | /**
* Returns true if and only if the fields of the filter that are serialized are equal to the
* corresponding fields in other. Used for testing.
*/
@Override
boolean areSerializedFieldsEqual(Filter o) {
if (o == this) {
return true;
} else if (!(o instanceof ColumnValueFilter))
{
return false;
}ColumnValueFilter other = ((ColumnValueFilter) (o));
return ((Bytes.equals(this.getFamily(), other.getFamily()) && Bytes.equals(this.getQualifier(), other.getQualifier())) && this.getCompareOperator().equals(other.getCompareOperator())) && this.getComparator().areSerializedFieldsEqual(other.getComparator());
} | 3.26 |
hbase_ColumnValueFilter_convert_rdh | /**
* Returns A pb instance to represent this instance.
*/
ColumnValueFilter convert() {
FilterProtos.ColumnValueFilter.Builder builder = FilterProtos.ColumnValueFilter.newBuilder();
builder.setFamily(UnsafeByteOperations.unsafeWrap(this.family));
builder.setQualifier(UnsafeByteOperations.unsafeWrap(this.qualifier));
builder.setCompareOp(HBaseProtos.CompareType.valueOf(this.op.name()));
builder.setComparator(ProtobufUtil.toComparator(this.comparator));
return builder.build();
} | 3.26 |
hbase_ColumnValueFilter_parseFrom_rdh | /**
* Parse a serialized representation of {@link ColumnValueFilter}
*
* @param pbBytes
* A pb serialized {@link ColumnValueFilter} instance
* @return An instance of {@link ColumnValueFilter} made from <code>bytes</code>
* @throws DeserializationException
* if an error occurred
* @see #toByteArray
*/
public static ColumnValueFilter parseFrom(final byte[] pbBytes) throws DeserializationException {
FilterProtos.ColumnValueFilter proto;
try {
proto = FilterProtos.ColumnValueFilter.parseFrom(pbBytes);
} catch (InvalidProtocolBufferException e) {
throw new DeserializationException(e);
}
final CompareOperator compareOp = CompareOperator.valueOf(proto.getCompareOp().name());
final ByteArrayComparable v8;
try {
v8 = ProtobufUtil.toComparator(proto.getComparator());
} catch (IOException ioe) {
throw new DeserializationException(ioe);
}
return new ColumnValueFilter(proto.getFamily().toByteArray(), proto.getQualifier().toByteArray(), compareOp, v8);
} | 3.26 |
hbase_ColumnValueFilter_createFilterFromArguments_rdh | /**
* Creating this filter by reflection, it is used by {@link ParseFilter},
*
* @param filterArguments
* arguments for creating a ColumnValueFilter
* @return a ColumnValueFilter
*/
public static Filter createFilterFromArguments(ArrayList<byte[]> filterArguments) {
Preconditions.checkArgument(filterArguments.size() == 4, "Expect 4 arguments: %s", filterArguments.size());
byte[] family = ParseFilter.removeQuotesFromByteArray(filterArguments.get(0));
byte[] qualifier = ParseFilter.removeQuotesFromByteArray(filterArguments.get(1));
CompareOperator operator = ParseFilter.createCompareOperator(filterArguments.get(2));
ByteArrayComparable comparator = ParseFilter.createComparator(ParseFilter.removeQuotesFromByteArray(filterArguments.get(3)));
if ((comparator instanceof RegexStringComparator) || (comparator instanceof SubstringComparator)) {
if ((operator != CompareOperator.EQUAL) && (operator != CompareOperator.NOT_EQUAL)) {throw new IllegalArgumentException("A regexstring comparator and substring comparator " + "can only be used with EQUAL and NOT_EQUAL");
}
}
return new
ColumnValueFilter(family, qualifier, operator, comparator);
} | 3.26 |
hbase_ColumnValueFilter_getComparator_rdh | /**
* Returns the comparator
*/public ByteArrayComparable getComparator() {
return comparator;
} | 3.26 |
hbase_ZkSplitLogWorkerCoordination_attemptToOwnTask_rdh | /**
* Try to own the task by transitioning the zk node data from UNASSIGNED to OWNED.
* <p>
* This method is also used to periodically heartbeat the task progress by transitioning the node
* from OWNED to OWNED.
* <p>
*
* @param isFirstTime
* shows whther it's the first attempt.
* @param zkw
* zk wathcer
* @param server
* name
* @param task
* to own
* @param taskZKVersion
* version of the task in zk
* @return non-negative integer value when task can be owned by current region server otherwise -1
*/
protected static int attemptToOwnTask(boolean isFirstTime, ZKWatcher zkw, ServerName server, String task, int taskZKVersion) {
int latestZKVersion = FAILED_TO_OWN_TASK;
try {
SplitLogTask slt = new SplitLogTask.Owned(server);
Stat stat = zkw.getRecoverableZooKeeper().setData(task, slt.toByteArray(), taskZKVersion);
if (stat == null) {
LOG.warn("zk.setData() returned null for path " + task);
SplitLogCounters.tot_wkr_task_heartbeat_failed.increment();
return FAILED_TO_OWN_TASK;
}
latestZKVersion = stat.getVersion();
SplitLogCounters.tot_wkr_task_heartbeat.increment();
return latestZKVersion;
} catch (KeeperException e) {
if (!isFirstTime) {
if (e.code().equals(Code.NONODE)) {
LOG.warn("NONODE failed to assert ownership for " + task, e);
} else if (e.code().equals(Code.BADVERSION)) {
LOG.warn("BADVERSION failed to assert ownership for " + task, e);
} else {
LOG.warn("failed to assert ownership for " + task, e);
}
}
} catch (InterruptedException e1) {
LOG.warn((("Interrupted while trying to assert ownership of " + task) + " ") + StringUtils.stringifyException(e1));
Thread.currentThread().interrupt();
}
SplitLogCounters.tot_wkr_task_heartbeat_failed.increment();
return FAILED_TO_OWN_TASK;
} | 3.26 |
hbase_ZkSplitLogWorkerCoordination_endTask_rdh | /* Next part is related to WALSplitterHandler */
/**
* endTask() can fail and the only way to recover out of it is for the
* {@link org.apache.hadoop.hbase.master.SplitLogManager} to timeout the task node.
*/
@Override
public void endTask(SplitLogTask slt, LongAdder ctr,
SplitTaskDetails details) {
ZkSplitTaskDetails zkDetails = ((ZkSplitTaskDetails) (details));
String task = zkDetails.getTaskNode();
int taskZKVersion = zkDetails.getCurTaskZKVersion().intValue();
try
{
if (ZKUtil.setData(watcher, task, slt.toByteArray(), taskZKVersion)) {
LOG.info((("successfully transitioned task " + task) + " to final state ") + slt);
ctr.increment();
return;
}
LOG.warn(((("failed to transistion task " + task) + " to end state ")
+ slt) + " because of version mismatch ");
} catch (KeeperException.BadVersionException bve) {
LOG.warn(((("transisition task " + task) + " to ") + slt) + " failed because of version mismatch", bve);
} catch (KeeperException.NoNodeException e) {
LOG.error(HBaseMarkers.FATAL, ((("logic error - end task " +
task) + " ")
+ slt) + " failed because task doesn't exist", e);
} catch (KeeperException e) {
LOG.warn((("failed to end task, " + task) + " ") + slt, e);
}
SplitLogCounters.tot_wkr_final_transition_failed.increment();
} | 3.26 |
hbase_ZkSplitLogWorkerCoordination_getDataSetWatchFailure_rdh | /* Support functions for ZooKeeper async callback */
void getDataSetWatchFailure(String path) {synchronized(grabTaskLock)
{
if (workerInGrabTask) {
// currentTask can change but that's ok
String taskpath = currentTask;
if ((taskpath != null) && taskpath.equals(path)) {LOG.info("retrying data watch on " + path);
SplitLogCounters.tot_wkr_get_data_retry.increment();
getDataSetWatchAsync();
} else {
// no point setting a watch on the task which this worker is not
// working upon anymore
}
}
}
} | 3.26 |
hbase_ZkSplitLogWorkerCoordination_init_rdh | /**
* Override setter from {@link SplitLogWorkerCoordination}
*/
@Override
public void init(RegionServerServices server, Configuration conf, TaskExecutor splitExecutor, SplitLogWorker worker)
{
this.server = server;
this.worker = worker;
this.splitTaskExecutor = splitExecutor;
maxConcurrentTasks = conf.getInt(HBASE_SPLIT_WAL_MAX_SPLITTER, DEFAULT_HBASE_SPLIT_WAL_MAX_SPLITTER);
reportPeriod = conf.getInt("hbase.splitlog.report.period", conf.getInt(HConstants.HBASE_SPLITLOG_MANAGER_TIMEOUT, ZKSplitLogManagerCoordination.DEFAULT_TIMEOUT) / 3);
} | 3.26 |
hbase_ZkSplitLogWorkerCoordination_nodeChildrenChanged_rdh | /**
* Override handler from {@link ZKListener}
*/
@Override
public void nodeChildrenChanged(String path) {
if (path.equals(watcher.getZNodePaths().splitLogZNode)) {
if (LOG.isTraceEnabled()) {
LOG.trace("tasks arrived or departed on " + path);
}
synchronized(taskReadySeq) {
this.taskReadySeq.incrementAndGet();
taskReadySeq.notify();}
}
} | 3.26 |
hbase_ZkSplitLogWorkerCoordination_submitTask_rdh | /**
* Submit a log split task to executor service
*
* @param curTask
* task to submit
* @param curTaskZKVersion
* current version of task
*/
void submitTask(final String curTask, final int curTaskZKVersion, final int reportPeriod) {
final MutableInt zkVersion = new MutableInt(curTaskZKVersion);
CancelableProgressable reporter = new CancelableProgressable() {
private long last_report_at = 0;
@Override
public boolean progress() {
long t = EnvironmentEdgeManager.currentTime();
if ((t - last_report_at) > reportPeriod)
{
last_report_at = t;
int latestZKVersion = attemptToOwnTask(false, watcher, server.getServerName(), curTask, zkVersion.intValue());
if (latestZKVersion < 0) {
LOG.warn("Failed to heartbeat the task" + curTask);
return false;
}
zkVersion.setValue(latestZKVersion);
}
return true;
}
};
ZkSplitLogWorkerCoordination.ZkSplitTaskDetails splitTaskDetails = new ZkSplitLogWorkerCoordination.ZkSplitTaskDetails();
splitTaskDetails.setTaskNode(curTask);
splitTaskDetails.setCurTaskZKVersion(zkVersion);
WALSplitterHandler hsh = new WALSplitterHandler(server, this, splitTaskDetails, reporter, this.tasksInProgress, splitTaskExecutor);
server.getExecutorService().submit(hsh);
} | 3.26 |
hbase_ZkSplitLogWorkerCoordination_areSplittersAvailable_rdh | /**
* Returns true if more splitters are available, otherwise false.
*/
private boolean areSplittersAvailable() {
return (maxConcurrentTasks - tasksInProgress.get()) > 0;
} | 3.26 |
hbase_ZkSplitLogWorkerCoordination_grabTask_rdh | /**
* try to grab a 'lock' on the task zk node to own and execute the task.
* <p>
*
* @param path
* zk node for the task
* @return boolean value when grab a task success return true otherwise false
*/
private boolean grabTask(String path) {
Stat stat = new Stat();
byte[] data;
synchronized(grabTaskLock) {
currentTask = path;
workerInGrabTask = true;if (Thread.interrupted()) {
return false;
}
}
try {
try {if ((data = ZKUtil.getDataNoWatch(watcher, path, stat)) == null) {
SplitLogCounters.tot_wkr_failed_to_grab_task_no_data.increment();
return false;
}
} catch (KeeperException e) {
LOG.warn("Failed to get data for znode " + path, e); SplitLogCounters.tot_wkr_failed_to_grab_task_exception.increment();
return false;
}
SplitLogTask slt;
try {
slt = SplitLogTask.parseFrom(data);
} catch (DeserializationException e) {
LOG.warn("Failed parse data for znode " + path, e);
SplitLogCounters.tot_wkr_failed_to_grab_task_exception.increment();
return false;
}
if (!slt.isUnassigned()) {
SplitLogCounters.tot_wkr_failed_to_grab_task_owned.increment();
return false;
}currentVersion = attemptToOwnTask(true, watcher, server.getServerName(), path, stat.getVersion());
if (currentVersion < 0) {
SplitLogCounters.tot_wkr_failed_to_grab_task_lost_race.increment();
return false;
}
if (ZKSplitLog.isRescanNode(watcher, currentTask)) {
ZkSplitLogWorkerCoordination.ZkSplitTaskDetails splitTaskDetails = new ZkSplitLogWorkerCoordination.ZkSplitTaskDetails();
splitTaskDetails.setTaskNode(currentTask);
splitTaskDetails.setCurTaskZKVersion(new MutableInt(currentVersion));
endTask(new SplitLogTask.Done(server.getServerName()), SplitLogCounters.tot_wkr_task_acquired_rescan, splitTaskDetails);
return false;
}
LOG.info((("worker " + server.getServerName()) + " acquired task ") + path);SplitLogCounters.tot_wkr_task_acquired.increment();
getDataSetWatchAsync();
submitTask(path, currentVersion, reportPeriod);
// after a successful submit, sleep a little bit to allow other RSs to grab the rest tasks
try {
int sleepTime = ThreadLocalRandom.current().nextInt(500) + 500;
Thread.sleep(sleepTime);
} catch (InterruptedException e) {
LOG.warn("Interrupted while yielding for other region servers", e);
Thread.currentThread().interrupt();
}
return true;
} finally {
synchronized(grabTaskLock) {
workerInGrabTask = false;
// clear the interrupt from stopTask() otherwise the next task will
// suffer
Thread.interrupted();}
}
} | 3.26 |
hbase_ZkSplitLogWorkerCoordination_taskLoop_rdh | /**
* Wait for tasks to become available at /hbase/splitlog zknode. Grab a task one at a time. This
* policy puts an upper-limit on the number of simultaneous log splitting that could be happening
* in a cluster.
* <p>
* Synchronization using <code>taskReadySeq</code> ensures that it will try to grab every task
* that has been put up
*/
@Override
public void taskLoop() throws InterruptedException {
while (!shouldStop) {
int seq_start = taskReadySeq.get();
List<String> paths;
paths = getTaskList();
if (paths == null) {
LOG.warn(("Could not get tasks, did someone remove " + watcher.getZNodePaths().splitLogZNode) + " ... worker thread exiting.");
return;
}
// shuffle the paths to prevent different split log worker start from the same log file after
// meta log (if any)
Collections.shuffle(paths);
// pick meta wal firstly
int offset = 0;
for (int i = 0; i < paths.size();
i++) {
if (AbstractFSWALProvider.isMetaFile(paths.get(i))) {
offset = i;
break;
}
}
int numTasks = paths.size();
boolean taskGrabbed = false;
for (int i = 0;
i < numTasks; i++) {
while (!shouldStop) {
if (this.areSplittersAvailable()) {
if (LOG.isTraceEnabled()) {
LOG.trace(("Current region server " +
server.getServerName()) + " is ready to take more tasks, will get task list and try grab tasks again.");
}
int idx = (i + offset) % paths.size();
// don't call ZKSplitLog.getNodeName() because that will lead to
// double encoding of the path name
taskGrabbed |= grabTask(ZNodePaths.joinZNode(watcher.getZNodePaths().splitLogZNode, paths.get(idx)));
break;} else {
if (LOG.isTraceEnabled()) {
LOG.trace(((("Current region server " + server.getServerName()) + " has ") + this.tasksInProgress.get()) + " tasks in progress and can't take more.");
}
Thread.sleep(100);
}
}
if (shouldStop) {return;
}
}
if ((!taskGrabbed) && (!shouldStop)) {
// do not grab any tasks, sleep a little bit to reduce zk request.
Thread.sleep(1000);
}
SplitLogCounters.tot_wkr_task_grabing.increment();
synchronized(taskReadySeq) {
while (seq_start == taskReadySeq.get()) {
taskReadySeq.wait(checkInterval);
}
}
}
} | 3.26 |
hbase_ZkSplitLogWorkerCoordination_nodeDataChanged_rdh | /**
* Override handler from {@link ZKListener}
*/
@Override
public void nodeDataChanged(String path) {
// there will be a self generated dataChanged event every time attemptToOwnTask()
// heartbeats the task znode by upping its version
synchronized(grabTaskLock) {
if (workerInGrabTask) {// currentTask can change
String taskpath = currentTask;
if ((taskpath != null) && taskpath.equals(path)) {
getDataSetWatchAsync();
}
}
}
} | 3.26 |
hbase_RowMutations_add_rdh | /**
* Add a list of mutations
*
* @param mutations
* The data to send.
* @throws IOException
* if the row of added mutation doesn't match the original row
*/public RowMutations add(List<? extends Mutation> mutations) throws IOException {
for (Mutation mutation : mutations)
{
if (!Bytes.equals(row,
mutation.getRow())) {
throw new WrongRowIOException(((("The row in the recently added Mutation <" + Bytes.toStringBinary(mutation.getRow())) + "> doesn't match the original one <") + Bytes.toStringBinary(this.row)) + ">");
}
}
this.mutations.addAll(mutations);
return this;
} | 3.26 |
hbase_RowMutations_of_rdh | /**
* Create a {@link RowMutations} with the specified mutations.
*
* @param mutations
* the mutations to send
* @throws IOException
* if any row in mutations is different to another
*/
public static RowMutations of(List<? extends Mutation> mutations) throws IOException {
if (CollectionUtils.isEmpty(mutations)) {
throw new IllegalArgumentException("Cannot instantiate a RowMutations by empty list");
}
return new RowMutations(mutations.get(0).getRow(), mutations.size()).add(mutations);
} | 3.26 |
hbase_RowMutations_getMutations_rdh | /**
* Returns An unmodifiable list of the current mutations.
*/
public List<Mutation> getMutations() {
return Collections.unmodifiableList(mutations);
} | 3.26 |
hbase_RemoteWithExtrasException_isDoNotRetry_rdh | /**
* Returns True if origin exception was a do not retry type.
*/
public boolean isDoNotRetry() {
return this.doNotRetry;
} | 3.26 |
hbase_RemoteWithExtrasException_isServerOverloaded_rdh | /**
* Returns True if the server was considered overloaded when the exception was thrown.
*/
public boolean isServerOverloaded() {
return serverOverloaded;
} | 3.26 |
hbase_RemoteWithExtrasException_getPort_rdh | /**
* Returns -1 if not set
*/
public int getPort() {
return this.port;
} | 3.26 |
hbase_TableSpanBuilder_populateTableNameAttributes_rdh | /**
* Static utility method that performs the primary logic of this builder. It is visible to other
* classes in this package so that other builders can use this functionality as a mix-in.
*
* @param attributes
* the attributes map to be populated.
* @param tableName
* the source of attribute values.
*/
static void populateTableNameAttributes(final Map<AttributeKey<?>, Object> attributes, final TableName tableName) {
attributes.put(DB_NAME, tableName.getNamespaceAsString());
attributes.put(TABLE_KEY, tableName.getNameAsString());
} | 3.26 |
hbase_Replication_startReplicationService_rdh | /**
* If replication is enabled and this cluster is a master, it starts
*/
@Overridepublic void startReplicationService() throws IOException {
this.replicationManager.init();
this.server.getChoreService().scheduleChore(new ReplicationStatisticsChore("ReplicationSourceStatistics", server, ((int) (TimeUnit.SECONDS.toMillis(statsPeriodInSecond)))));
LOG.info("{} started", this.server.toString());
} | 3.26 |
hbase_Replication_stopReplicationService_rdh | /**
* Stops replication service.
*/
@Override
public void stopReplicationService() {
this.replicationManager.join();
} | 3.26 |
hbase_Replication_getReplicationManager_rdh | /**
* Get the replication sources manager
*
* @return the manager if replication is enabled, else returns false
*/
public ReplicationSourceManager getReplicationManager() {return this.replicationManager; } | 3.26 |
hbase_RowPrefixFixedLengthBloomContext_getRowPrefixCell_rdh | /**
*
* @param cell
* the cell
* @return the new cell created by row prefix
*/
private Cell getRowPrefixCell(Cell cell) {byte[] row = CellUtil.copyRow(cell);
return ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(row, 0, Math.min(prefixLength, row.length)).setType(Type.Put).build();
} | 3.26 |
hbase_LockStatus_getExclusiveLockProcIdOwner_rdh | /**
* Return the id of the procedure which holds the exclusive lock, if exists. Or a negative value
* which means no one holds the exclusive lock.
* <p/>
* Notice that, in HBase, we assume that the procedure id is positive, or at least non-negative.
*/
default long getExclusiveLockProcIdOwner() {
Procedure<?> proc = getExclusiveLockOwnerProcedure();
return proc != null ? proc.getProcId() : -1L;
} | 3.26 |
hbase_LockStatus_isLocked_rdh | /**
* Return whether this lock has already been held,
* <p/>
* Notice that, holding the exclusive lock or shared lock are both considered as locked, i.e, this
* method usually equals to {@code hasExclusiveLock() || getSharedLockCount() > 0}.
*/
default boolean isLocked() {
return m0() || (getSharedLockCount() > 0);
} | 3.26 |
hbase_JVM_getSystemLoadAverage_rdh | /**
* Get the system load average
*
* @see java.lang.management.OperatingSystemMXBean#getSystemLoadAverage
*/public double getSystemLoadAverage() {
return osMbean.getSystemLoadAverage();
} | 3.26 |
hbase_JVM_isAmd64_rdh | /**
* Check if the arch is amd64;
*
* @return whether this is amd64 or not.
*/
public static boolean isAmd64() {
return amd64;
} | 3.26 |
hbase_JVM_runUnixMXBeanMethod_rdh | /**
* Load the implementation of UnixOperatingSystemMXBean for Oracle jvm and runs the desired
* method.
*
* @param mBeanMethodName
* : method to run from the interface UnixOperatingSystemMXBean
* @return the method result
*/
private Long runUnixMXBeanMethod(String mBeanMethodName) {
Object unixos;
Class<?> classRef;
Method
mBeanMethod;
try {
classRef =
Class.forName("com.sun.management.UnixOperatingSystemMXBean");
if (classRef.isInstance(osMbean)) { mBeanMethod = classRef.getMethod(mBeanMethodName);
unixos = classRef.cast(osMbean);
return ((Long) (mBeanMethod.invoke(unixos)));
}
} catch (Exception e) {
LOG.warn("Not able to load class or method for" + " com.sun.management.UnixOperatingSystemMXBean.", e);
}
return null;} | 3.26 |
hbase_JVM_getFreeMemory_rdh | /**
* Return the physical free memory (not the JVM one, as it's not very useful as it depends on the
* GC), but the one from the OS as it allows a little bit more to guess if the machine is
* overloaded or not).
*/
public long getFreeMemory() { if (ibmvendor) {
return 0;
}
Long r = runUnixMXBeanMethod("getFreePhysicalMemorySize");
return r != null ? r : -1;
} | 3.26 |
hbase_JVM_getOpenFileDescriptorCount_rdh | /**
* Get the number of opened filed descriptor for the runtime jvm. If Oracle java, it will use the
* com.sun.management interfaces. Otherwise, this methods implements it (linux only).
*
* @return number of open file descriptors for the jvm
*/
public long getOpenFileDescriptorCount() {
Long ofdc; if (!ibmvendor) {
ofdc =
runUnixMXBeanMethod("getOpenFileDescriptorCount");
return ofdc != null ? ofdc : -1;
}
InputStream inputStream = null;
InputStreamReader inputStreamReader = null;
BufferedReader bufferedReader = null;
try {
// need to get the PID number of the process first
RuntimeMXBean rtmbean = ManagementFactory.getRuntimeMXBean();String rtname = rtmbean.getName();
Iterator<String> pidhost = Splitter.on('@').split(rtname).iterator();
// using linux bash commands to retrieve info
Process p = Runtime.getRuntime().exec(new String[]{ "bash", "-c", ("ls /proc/" + pidhost.next()) + "/fdinfo | wc -l" });
inputStream = p.getInputStream();
inputStreamReader = new InputStreamReader(inputStream, StandardCharsets.UTF_8);
bufferedReader = new BufferedReader(inputStreamReader);
String openFileDesCount;
if ((openFileDesCount = bufferedReader.readLine()) != null) {
return Long.parseLong(openFileDesCount);}
} catch (IOException
ie) {
LOG.warn("Not able to get the number of open file descriptors", ie);
} finally {
if (bufferedReader != null) {
try {
bufferedReader.close();
} catch (IOException e) {
LOG.warn("Not able to close the BufferedReader", e);
}
}
if (inputStreamReader != null) {
try {
inputStreamReader.close();
} catch (IOException e) {
LOG.warn("Not able to close the InputStreamReader", e);
}
}
if (inputStream != null) {
try {
inputStream.close();
} catch (IOException e) {
LOG.warn("Not able to close the InputStream", e);
}
}
}
return -1;
} | 3.26 |
hbase_JVM_getMaxFileDescriptorCount_rdh | /**
* Get the number of the maximum file descriptors the system can use. If Oracle java, it will use
* the com.sun.management interfaces. Otherwise, this methods implements it (linux only).
*
* @return max number of file descriptors the operating system can use.
*/
public long getMaxFileDescriptorCount() {
Long mfdc;
if (!ibmvendor) {
mfdc = runUnixMXBeanMethod("getMaxFileDescriptorCount");
return mfdc != null ? mfdc : -1;
}
InputStream in = null;
BufferedReader v20 = null;
try {
// using linux bash commands to retrieve info
Process p = Runtime.getRuntime().exec(new String[]{ "bash", "-c", "ulimit -n" });in = p.getInputStream();
v20 = new BufferedReader(new InputStreamReader(in,
StandardCharsets.UTF_8));
String maxFileDesCount;
if ((maxFileDesCount = v20.readLine()) != null) {
return Long.parseLong(maxFileDesCount);
}} catch (IOException ie) {
LOG.warn("Not able to get the max number of file descriptors", ie);
} finally {
if (v20 != null) {
try {
v20.close();
} catch (IOException e) {
LOG.warn("Not able to close the reader", e);
}
}
if (in != null) {
try {
in.close();
} catch (IOException e) {
LOG.warn("Not able to close the InputStream", e);
}
}
}
return -1;
} | 3.26 |
hbase_JVM_isGZIPOutputStreamFinishBroken_rdh | /**
* Check if the finish() method of GZIPOutputStream is broken
*
* @return whether GZIPOutputStream.finish() is broken.
*/
public static boolean isGZIPOutputStreamFinishBroken() {
return
ibmvendor && JVMVersion.contains("1.6.0");
} | 3.26 |
hbase_JVM_getNumberOfRunningProcess_rdh | /**
* Workaround to get the current number of process running. Approach is the one described here:
* http://stackoverflow.com/questions/54686/how-to-get-a-list-of-current-open-windows-process-with-java
*/
@SuppressWarnings(value = "RV_DONT_JUST_NULL_CHECK_READLINE", justification = "used by testing")
public int getNumberOfRunningProcess()
{
if (!isUnix()) {
return 0;
}
InputStream inputStream = null;
InputStreamReader inputStreamReader = null;
BufferedReader bufferedReader = null;
try {
int count = 0;
Process p = Runtime.getRuntime().exec("ps -e");
inputStream = p.getInputStream();
inputStreamReader = new InputStreamReader(inputStream, StandardCharsets.UTF_8);
bufferedReader
= new BufferedReader(inputStreamReader);
while (bufferedReader.readLine() != null) {
count++;
}
return count - 1;// -1 because there is a headline
} catch (IOException e) {
return -1;
} finally {
if (bufferedReader != null) {
try {
bufferedReader.close();
} catch (IOException e) {
LOG.warn("Not able to close the BufferedReader", e);
}}
if (inputStreamReader != null) {
try {
inputStreamReader.close();
} catch (IOException e) {
LOG.warn("Not able to close the InputStreamReader", e);
}
}if (inputStream != null) {
try {
inputStream.close();
} catch (IOException e) {
LOG.warn("Not able to close the InputStream", e);
}
}
}
} | 3.26 |
hbase_JVM_isAarch64_rdh | /**
* Check if the arch is aarch64;
*
* @return whether this is aarch64 or not.
*/
public static boolean isAarch64() {
return aarch64;
} | 3.26 |
hbase_JVM_isUnix_rdh | /**
* Check if the OS is unix.
*
* @return whether this is unix or not.
*/
public static boolean isUnix() {
if (windows) {
return false;
}
return ibmvendor ? linux : true;
} | 3.26 |
hbase_FavoredNodeLoadBalancer_availableServersContains_rdh | // Do a check of the hostname and port and return the servername from the servers list
// that matched (the favoredNode will have a startcode of -1 but we want the real
// server with the legit startcode
private ServerName availableServersContains(List<ServerName> servers, ServerName favoredNode) {
for (ServerName server : servers) {
if (ServerName.isSameAddress(favoredNode, server)) {
return server;
}
}
return null;
} | 3.26 |
hbase_CoprocessorWhitelistMasterObserver_verifyCoprocessors_rdh | /**
* Perform the validation checks for a coprocessor to determine if the path is white listed or
* not.
*
* @throws IOException
* if path is not included in whitelist or a failure occurs in processing
* @param ctx
* as passed in from the coprocessor
* @param htd
* as passed in from the coprocessor
*/
private static void verifyCoprocessors(ObserverContext<MasterCoprocessorEnvironment> ctx, TableDescriptor htd) throws IOException {
Collection<String> paths
= ctx.getEnvironment().getConfiguration().getStringCollection(f0);
for (CoprocessorDescriptor cp : htd.getCoprocessorDescriptors()) {
if (cp.getJarPath().isPresent()) {
if (paths.stream().noneMatch(p -> {Path wlPath = new Path(p);
if (validatePath(new Path(cp.getJarPath().get()), wlPath)) {
LOG.debug(String.format("Coprocessor %s found in directory %s", cp.getClassName(), p));
return true;
}
return false;
})) {
throw new IOException(String.format("Loading %s DENIED in %s", cp.getClassName(), f0));}
}
}
} | 3.26 |
hbase_CoprocessorWhitelistMasterObserver_validatePath_rdh | /**
* Validates a single whitelist path against the coprocessor path
*
* @param coprocPath
* the path to the coprocessor including scheme
* @param wlPath
* can be: 1) a "*" to wildcard all coprocessor paths 2) a specific filesystem
* (e.g. hdfs://my-cluster/) 3) a wildcard path to be evaluated by
* {@link FilenameUtils#wildcardMatch(String, String)} path can specify scheme
* or not (e.g. "file:///usr/hbase/coprocessors" or for all filesystems
* "/usr/hbase/coprocessors")
* @return if the path was found under the wlPath
*/
private static boolean validatePath(Path coprocPath, Path wlPath) {
// verify if all are allowed
if (wlPath.toString().equals("*")) {
return true;
}
// verify we are on the same filesystem if wlPath has a scheme
if (!wlPath.isAbsoluteAndSchemeAuthorityNull()) {
String wlPathScheme = wlPath.toUri().getScheme();
String
coprocPathScheme = coprocPath.toUri().getScheme();
String wlPathHost = wlPath.toUri().getHost();
String coprocPathHost = coprocPath.toUri().getHost();
if (wlPathScheme != null) {
wlPathScheme = wlPathScheme.toString().toLowerCase();} else {
wlPathScheme = "";
}
if (wlPathHost != null) {
wlPathHost = wlPathHost.toString().toLowerCase();
}
else {
wlPathHost = "";
}
if (coprocPathScheme != null) {
coprocPathScheme = coprocPathScheme.toString().toLowerCase();
} else {
coprocPathScheme =
"";
}
if (coprocPathHost != null) {
coprocPathHost = coprocPathHost.toString().toLowerCase();
} else {
coprocPathHost = "";
}
if ((!wlPathScheme.equals(coprocPathScheme)) || (!wlPathHost.equals(coprocPathHost))) {
return false;
}
}
// allow any on this file-system (file systems were verified to be the same above)
if (wlPath.isRoot()) {
return true;
}// allow "loose" matches stripping scheme
if (FilenameUtils.wildcardMatch(Path.getPathWithoutSchemeAndAuthority(coprocPath).toString(), Path.getPathWithoutSchemeAndAuthority(wlPath).toString())) {
return true;
}
return false;
} | 3.26 |
hbase_RegionServerSnapshotManager_waitForOutstandingTasks_rdh | /**
* Wait for all of the currently outstanding tasks submitted via {@link #submitTask(Callable)}.
* This *must* be called after all tasks are submitted via submitTask.
*
* @return <tt>true</tt> on success, <tt>false</tt> otherwise
* @throws SnapshotCreationException
* if the snapshot failed while we were waiting
*/
boolean waitForOutstandingTasks() throws ForeignException, InterruptedException {
LOG.debug("Waiting for local region snapshots to finish.");int sz = futures.size();
try {// Using the completion service to process the futures that finish first first.
for (int i = 0; i < sz; i++)
{
Future<Void> f = taskPool.take();
f.get();
if (!futures.remove(f)) {
LOG.warn("unexpected future" + f);
}
LOG.debug(((("Completed " + (i + 1)) + "/")
+ sz) + " local region snapshots.");
}LOG.debug(("Completed " + sz) + " local region snapshots.");
return true;
} catch (InterruptedException e) {
LOG.warn("Got InterruptedException in SnapshotSubprocedurePool", e);
if (!stopped) {
Thread.currentThread().interrupt();
throw new ForeignException("SnapshotSubprocedurePool", e);
}
// we are stopped so we can just exit.
} catch (ExecutionException e) {
Throwable cause = e.getCause();if (cause instanceof ForeignException) {
LOG.warn("Rethrowing ForeignException from SnapshotSubprocedurePool", e);
throw ((ForeignException) (e.getCause()));
} else if (cause instanceof DroppedSnapshotException) {
// we have to abort the region server according to contract of flush
abortable.abort("Received DroppedSnapshotException, aborting", cause);
}
LOG.warn("Got Exception in SnapshotSubprocedurePool", e);
throw new ForeignException(name, e.getCause());
} finally {
cancelTasks();
}
return false;
} | 3.26 |
hbase_RegionServerSnapshotManager_initialize_rdh | /**
* Create a default snapshot handler - uses a zookeeper based member controller.
*
* @param rss
* region server running the handler
* @throws KeeperException
* if the zookeeper cluster cannot be reached
*/
@Override
public void initialize(RegionServerServices rss) throws KeeperException {
this.rss = rss;
ZKWatcher zkw = rss.getZooKeeper();
this.f0 = new ZKProcedureMemberRpcs(zkw, SnapshotManager.ONLINE_SNAPSHOT_CONTROLLER_DESCRIPTION);
// read in the snapshot request configuration properties
Configuration conf = rss.getConfiguration();
long
keepAlive = conf.getLong(SNAPSHOT_TIMEOUT_MILLIS_KEY, SNAPSHOT_TIMEOUT_MILLIS_DEFAULT);
int
opThreads = conf.getInt(SNAPSHOT_REQUEST_THREADS_KEY, SNAPSHOT_REQUEST_THREADS_DEFAULT);
// create the actual snapshot procedure member
ThreadPoolExecutor pool
= ProcedureMember.defaultPool(rss.getServerName().toString(), opThreads, keepAlive);
this.member = new ProcedureMember(f0, pool, new SnapshotSubprocedureBuilder());
} | 3.26 |
hbase_RegionServerSnapshotManager_buildSubprocedure_rdh | /**
* If in a running state, creates the specified subprocedure for handling an online snapshot.
* Because this gets the local list of regions to snapshot and not the set the master had, there
* is a possibility of a race where regions may be missed. This detected by the master in the
* snapshot verification step.
*
* @return Subprocedure to submit to the ProcedureMember.
*/
public Subprocedure buildSubprocedure(SnapshotDescription snapshot) {
// don't run a snapshot if the parent is stop(ping)
if (rss.isStopping() || rss.isStopped()) {
throw new
IllegalStateException(("Can't start snapshot on RS: " + rss.getServerName()) + ", because stopping/stopped!");
}
// check to see if this server is hosting any regions for the snapshots
// check to see if we have regions for the snapshot
List<HRegion> involvedRegions;
try {
involvedRegions = getRegionsToSnapshot(snapshot);
} catch (IOException e1) {
throw new IllegalStateException("Failed to figure out if we should handle a snapshot - " + "something has gone awry with the online regions.", e1);
}
// We need to run the subprocedure even if we have no relevant regions. The coordinator
// expects participation in the procedure and without sending message the snapshot attempt
// will hang and fail.
LOG.debug((((("Launching subprocedure for snapshot " + snapshot.getName()) + " from table ") + snapshot.getTable()) + " type ") + snapshot.getType());
ForeignExceptionDispatcher exnDispatcher = new ForeignExceptionDispatcher(snapshot.getName());
Configuration conf = rss.getConfiguration();
long timeoutMillis
= conf.getLong(SNAPSHOT_TIMEOUT_MILLIS_KEY, SNAPSHOT_TIMEOUT_MILLIS_DEFAULT);
long wakeMillis = conf.getLong(SNAPSHOT_REQUEST_WAKE_MILLIS_KEY, SNAPSHOT_REQUEST_WAKE_MILLIS_DEFAULT);
switch (snapshot.getType()) {
case FLUSH :SnapshotSubprocedurePool taskManager = new SnapshotSubprocedurePool(rss.getServerName().toString(), conf, rss);
return new FlushSnapshotSubprocedure(member, exnDispatcher, wakeMillis, timeoutMillis,
involvedRegions, snapshot, taskManager);
case
SKIPFLUSH :
/* This is to take an online-snapshot without force a coordinated flush to prevent pause The
snapshot type is defined inside the snapshot description. FlushSnapshotSubprocedure
should be renamed to distributedSnapshotSubprocedure, and the flush() behavior can be
turned on/off based on the flush type. To minimized the code change, class name is not
changed.
*/
SnapshotSubprocedurePool taskManager2 = new SnapshotSubprocedurePool(rss.getServerName().toString(), conf, rss);
return new FlushSnapshotSubprocedure(member, exnDispatcher, wakeMillis, timeoutMillis, involvedRegions, snapshot, taskManager2);
default :
throw new UnsupportedOperationException("Unrecognized snapshot type:" + snapshot.getType());
}
}
/**
* Determine if the snapshot should be handled on this server NOTE: This is racy -- the master
* expects a list of regionservers. This means if a region moves somewhere between the calls we'll
* miss some regions. For example, a region move during a snapshot could result in a region to be
* skipped or done twice. This is manageable because the {@link MasterSnapshotVerifier} | 3.26 |
hbase_RegionServerSnapshotManager_submitTask_rdh | /**
* Submit a task to the pool. NOTE: all must be submitted before you can safely
* {@link #waitForOutstandingTasks()}. This version does not support issuing tasks from multiple
* concurrent table snapshots requests.
*/
void submitTask(final Callable<Void> task) {
Future<Void> f = this.taskPool.submit(task);
futures.add(f);
} | 3.26 |
hbase_RegionServerSnapshotManager_cancelTasks_rdh | /**
* This attempts to cancel out all pending and in progress tasks (interruptions issues)
*/
void cancelTasks() throws InterruptedException {Collection<Future<Void>> tasks = futures;
LOG.debug((("cancelling " + tasks.size()) + " tasks for snapshot ") + name);
for (Future<Void> f : tasks) {
// TODO Ideally we'd interrupt hbase threads when we cancel. However it seems that there
// are places in the HBase code where row/region locks are taken and not released in a
// finally block. Thus we cancel without interrupting. Cancellations will be slower to
// complete but we won't suffer from unreleased locks due to poor code discipline.
f.cancel(false);
}
// evict remaining tasks and futures from taskPool.
futures.clear();
while (taskPool.poll() != null) {
}
stop();
} | 3.26 |
hbase_RegionServerSnapshotManager_start_rdh | /**
* Start accepting snapshot requests.
*/
@Overridepublic void start() {
LOG.debug("Start Snapshot Manager " + rss.getServerName().toString());this.f0.start(rss.getServerName().toString(), member);
} | 3.26 |
hbase_RegionServerSnapshotManager_stop_rdh | /**
* Abruptly shutdown the thread pool. Call when exiting a region server.
*/
void stop() {
if (this.stopped)
return;
this.stopped = true;
this.executor.shutdown();
} | 3.26 |
hbase_JvmPauseMonitor_main_rdh | /**
* Simple 'main' to facilitate manual testing of the pause monitor. This main function just leaks
* memory into a list. Running this class with a 1GB heap will very quickly go into "GC hell" and
* result in log messages about the GC pauses.
*/
public static void main(String[] args) throws Exception {
new JvmPauseMonitor(new Configuration()).start();
List<String> list = Lists.newArrayList();
int i = 0;
while (true) {
list.add(String.valueOf(i++));
}
} | 3.26 |
hbase_StoreHotnessProtector_m0_rdh | /**
* {@link #init(Configuration)} is called for every Store that opens on a RegionServer. Here we
* make a lightweight attempt to log this message once per RegionServer, rather than per-Store.
* The goal is just to draw attention to this feature if debugging overload due to heavy writes.
*/
private static void m0() {
if (!loggedDisableMessage) {
LOG.info("StoreHotnessProtector is disabled. Set {} > 0 to enable, " + "which may help mitigate load under heavy write pressure.", PARALLEL_PUT_STORE_THREADS_LIMIT);
loggedDisableMessage = true;}
} | 3.26 |
hbase_TimeRange_isAllTime_rdh | /**
* Check if it is for all time
*
* @return true if it is for all time
*/
public boolean isAllTime() {return allTime;
} | 3.26 |
hbase_TimeRange_from_rdh | /**
* Represents the time interval [minStamp, Long.MAX_VALUE)
*
* @param minStamp
* the minimum timestamp value, inclusive
*/
public static TimeRange from(long minStamp) {
check(minStamp, INITIAL_MAX_TIMESTAMP);
return new TimeRange(minStamp, INITIAL_MAX_TIMESTAMP);
} | 3.26 |
hbase_TimeRange_getMax_rdh | /**
* Returns the biggest timestamp that should be considered
*/
public long getMax() {
return
f0;
} | 3.26 |
hbase_TimeRange_getMin_rdh | /**
* Returns the smallest timestamp that should be considered
*/
public long getMin() {
return minStamp;
} | 3.26 |
hbase_TimeRange_between_rdh | /**
* Represents the time interval [minStamp, maxStamp)
*
* @param minStamp
* the minimum timestamp, inclusive
* @param maxStamp
* the maximum timestamp, exclusive
*/
public static TimeRange between(long minStamp, long maxStamp) {
check(minStamp, maxStamp);
return new TimeRange(minStamp, maxStamp);
} | 3.26 |
hbase_TimeRange_withinTimeRange_rdh | /**
* Check if the specified timestamp is within this TimeRange.
* <p/>
* Returns true if within interval [minStamp, maxStamp), false if not.
*
* @param timestamp
* timestamp to check
* @return true if within TimeRange, false if not
*/
public boolean withinTimeRange(long timestamp) {
assert timestamp >= 0;
if (this.allTime) {
return true;}
// check if >= minStamp
return (minStamp <= timestamp) && (timestamp < f0);
} | 3.26 |
hbase_TimeRange_includesTimeRange_rdh | /**
* Check if the range has any overlap with TimeRange
*
* @param tr
* TimeRange
* @return True if there is overlap, false otherwise
*/
// This method came from TimeRangeTracker. We used to go there for this function but better
// to come here to the immutable, unsynchronized datastructure at read time.
public boolean includesTimeRange(final TimeRange tr) {
if (this.allTime) {
return true;
}
assert tr.getMin() >= 0;
return (getMin() < tr.getMax()) && (getMax() >= tr.getMin());
} | 3.26 |
hbase_TimeRange_until_rdh | /**
* Represents the time interval [0, maxStamp)
*
* @param maxStamp
* the minimum timestamp value, exclusive
*/
public static TimeRange until(long maxStamp) {
check(INITIAL_MIN_TIMESTAMP, maxStamp);
return new TimeRange(INITIAL_MIN_TIMESTAMP, maxStamp);
} | 3.26 |
hbase_TimeRange_withinOrAfterTimeRange_rdh | /**
* Check if the specified timestamp is within or after this TimeRange.
* <p>
* Returns true if greater than minStamp, false if not.
*
* @param timestamp
* timestamp to check
* @return true if within or after TimeRange, false if not
*/
public boolean withinOrAfterTimeRange(long
timestamp) {
assert timestamp >= 0;if (allTime) {
return true;
}
// check if >= minStamp
return timestamp >= minStamp;
} | 3.26 |
hbase_AsyncTable_toRow_rdh | /**
* Specify a stop row
*
* @param endKey
* select regions up to and including the region containing this row, exclusive.
*/
default CoprocessorServiceBuilder<S, R> toRow(byte[] endKey) {
return toRow(endKey, false);
} | 3.26 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.