name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hbase_ParseFilter_reduce_rdh | /**
* This function is called while parsing the filterString and an operator is parsed
* <p>
*
* @param operatorStack
* the stack containing the operators and parenthesis
* @param filterStack
* the stack containing the filters
* @param operator
* the operator found while parsing the filterString
*/
public void reduce(Stack<ByteBuffer> operatorStack, Stack<Filter> filterStack, ByteBuffer operator) {
while
(((!operatorStack.empty()) && (!ParseConstants.LPAREN_BUFFER.equals(operatorStack.peek()))) && hasHigherPriority(operatorStack.peek(), operator)) {
filterStack.push(popArguments(operatorStack, filterStack));
}
} | 3.26 |
hbase_ParseFilter_parseComparator_rdh | /**
* Splits a column in comparatorType:comparatorValue form into separate byte arrays
* <p>
*
* @param comparator
* the comparator
* @return the parsed arguments of the comparator as a 2D byte array
*/
public static byte[][] parseComparator(byte[] comparator) {
final int index = Bytes.searchDelimiterIndex(comparator, 0, comparator.length,
ParseConstants.COLON);
if (index
== (-1)) {
throw new IllegalArgumentException("Incorrect comparator");}
byte[][] result = new byte[2][0];
result[0] = new byte[index];
System.arraycopy(comparator, 0,
result[0], 0, index);
final int len = comparator.length - (index + 1);
result[1] = new byte[len];
System.arraycopy(comparator, index + 1, result[1], 0, len);
return result;
} | 3.26 |
hbase_ParseFilter_checkForWhile_rdh | /**
* Checks if the current index of filter string we are on is the beginning of the keyword 'WHILE'
* <p>
*
* @param filterStringAsByteArray
* filter string given by the user
* @param indexOfWhile
* index at which an 'W' was read
* @return true if the keyword 'WHILE' is at the current index
*/
public static boolean checkForWhile(byte[] filterStringAsByteArray, int indexOfWhile) throws CharacterCodingException {
try {
if (((((((filterStringAsByteArray[indexOfWhile] == ParseConstants.W) && (filterStringAsByteArray[indexOfWhile + 1] == ParseConstants.H)) && (filterStringAsByteArray[indexOfWhile + 2] == ParseConstants.I)) && (filterStringAsByteArray[indexOfWhile + 3] == ParseConstants.L)) && (filterStringAsByteArray[indexOfWhile + 4] == ParseConstants.E)) && ((((indexOfWhile == 0) || (filterStringAsByteArray[indexOfWhile - 1] == ParseConstants.WHITESPACE)) || (filterStringAsByteArray[indexOfWhile - 1] == ParseConstants.RPAREN)) || (filterStringAsByteArray[indexOfWhile - 1] == ParseConstants.LPAREN))) && ((filterStringAsByteArray[indexOfWhile + 5] == ParseConstants.WHITESPACE) || (filterStringAsByteArray[indexOfWhile
+ 5] == ParseConstants.LPAREN))) {
return true;
} else {
return false;
}} catch (ArrayIndexOutOfBoundsException e) {
return false;
}
} | 3.26 |
hbase_ParseFilter_convertByteArrayToInt_rdh | /**
* Converts an int expressed in a byte array to an actual int
* <p>
* This doesn't use Bytes.toInt because that assumes that there will be {@link Bytes#SIZEOF_INT}
* bytes available.
* <p>
*
* @param numberAsByteArray
* the int value expressed as a byte array
* @return the int value
*/
public static int convertByteArrayToInt(byte[] numberAsByteArray) {
long tempResult = ParseFilter.convertByteArrayToLong(numberAsByteArray); if (tempResult > Integer.MAX_VALUE) {
throw new IllegalArgumentException("Integer Argument too large");
} else if (tempResult < Integer.MIN_VALUE) {
throw new IllegalArgumentException("Integer Argument too small");
}int result = ((int) (tempResult));
return result;
} | 3.26 |
hbase_ParseFilter_createUnescapdArgument_rdh | /**
* Removes the single quote escaping a single quote - thus it returns an unescaped argument
* <p>
*
* @param filterStringAsByteArray
* filter string given by user
* @param argumentStartIndex
* start index of the argument
* @param argumentEndIndex
* end index of the argument
* @return returns an unescaped argument
*/
public static byte[] createUnescapdArgument(byte[] filterStringAsByteArray, int argumentStartIndex, int argumentEndIndex) {
int unescapedArgumentLength = 2;
for (int i = argumentStartIndex + 1; i <= (argumentEndIndex - 1); i++) {
unescapedArgumentLength++;
if (((filterStringAsByteArray[i] == ParseConstants.SINGLE_QUOTE) && (i != (argumentEndIndex - 1))) && (filterStringAsByteArray[i + 1] == ParseConstants.SINGLE_QUOTE)) {i++;
continue;
}
}
byte[] unescapedArgument = new byte[unescapedArgumentLength];
int count = 1;
unescapedArgument[0] = '\'';
for (int i = argumentStartIndex + 1; i <= (argumentEndIndex - 1); i++) {
if (((filterStringAsByteArray[i] == ParseConstants.SINGLE_QUOTE) && (i != (argumentEndIndex - 1))) && (filterStringAsByteArray[i + 1] == ParseConstants.SINGLE_QUOTE))
{
unescapedArgument[count++] = filterStringAsByteArray[i + 1];
i++;
} else {
unescapedArgument[count++] = filterStringAsByteArray[i]; }
}
unescapedArgument[unescapedArgumentLength - 1] = '\'';
return unescapedArgument;
} | 3.26 |
hbase_ParseFilter_m0_rdh | /**
* Converts a boolean expressed in a byte array to an actual boolean
* <p>
* This doesn't used Bytes.toBoolean because Bytes.toBoolean(byte []) assumes that 1 stands for
* true and 0 for false. Here, the byte array representing "true" and "false" is parsed
* <p>
*
* @param booleanAsByteArray
* the boolean value expressed as a byte array
* @return the boolean value
*/
public static boolean m0(byte[] booleanAsByteArray) {
if (booleanAsByteArray == null) {
throw new IllegalArgumentException("convertByteArrayToBoolean called with a null array");
}
if (((((booleanAsByteArray.length == 4) && ((booleanAsByteArray[0] == 't') || (booleanAsByteArray[0] == 'T'))) && ((booleanAsByteArray[1] == 'r') || (booleanAsByteArray[1] == 'R'))) && ((booleanAsByteArray[2] == 'u') || (booleanAsByteArray[2] == 'U'))) && ((booleanAsByteArray[3] == 'e') || (booleanAsByteArray[3]
== 'E'))) {
return true;
} else
if ((((((booleanAsByteArray.length == 5) && ((booleanAsByteArray[0] == 'f') || (booleanAsByteArray[0] == 'F'))) && ((booleanAsByteArray[1] == 'a') || (booleanAsByteArray[1] == 'A'))) && ((booleanAsByteArray[2] == 'l') || (booleanAsByteArray[2] == 'L'))) && ((booleanAsByteArray[3] == 's') || (booleanAsByteArray[3] == 'S'))) && ((booleanAsByteArray[4] == 'e') || (booleanAsByteArray[4] == 'E'))) {
return false;
} else {throw new IllegalArgumentException("Incorrect Boolean Expression");
}
} | 3.26 |
hbase_ParseFilter_isQuoteUnescaped_rdh | /**
* Returns a boolean indicating whether the quote was escaped or not
* <p>
*
* @param array
* byte array in which the quote was found
* @param quoteIndex
* index of the single quote
* @return returns true if the quote was unescaped
*/
public static boolean isQuoteUnescaped(byte[] array, int quoteIndex) {
if (array == null)
{
throw new IllegalArgumentException("isQuoteUnescaped called with a null array");
}if ((quoteIndex == (array.length - 1)) || (array[quoteIndex + 1] != ParseConstants.SINGLE_QUOTE)) {
return true;
} else {
return false;
}
} | 3.26 |
hbase_ParseFilter_getSupportedFilters_rdh | /**
* Return a Set of filters supported by the Filter Language
*/
public Set<String> getSupportedFilters() {
return filterHashMap.keySet();
} | 3.26 |
hbase_ParseFilter_convertByteArrayToLong_rdh | /**
* Converts a long expressed in a byte array to an actual long
* <p>
* This doesn't use Bytes.toLong because that assumes that there will be {@link Bytes#SIZEOF_INT}
* bytes available.
* <p>
*
* @param numberAsByteArray
* the long value expressed as a byte array
* @return the long value
*/
public static long convertByteArrayToLong(byte[] numberAsByteArray) {
if (numberAsByteArray == null) {
throw new IllegalArgumentException("convertByteArrayToLong called with a null array");
}
int i = 0;
long result = 0;
boolean isNegative = false;
if (numberAsByteArray[i] == ParseConstants.MINUS_SIGN) {i++;
isNegative
= true;
}
while (i != numberAsByteArray.length) {
if ((numberAsByteArray[i] < ParseConstants.ZERO) || (numberAsByteArray[i] > ParseConstants.NINE)) {
throw new IllegalArgumentException("Byte Array should only contain digits");
}
result = (result * 10) + (numberAsByteArray[i] - ParseConstants.ZERO);
if (result < 0) {
throw new IllegalArgumentException("Long Argument too large");
}
i++;
}
if (isNegative) {
return -result;
} else {
return result;
}
} | 3.26 |
hbase_ParseFilter_checkForOr_rdh | /**
* Checks if the current index of filter string we are on is the beginning of the keyword 'OR'
* <p>
*
* @param filterStringAsByteArray
* filter string given by the user
* @param indexOfOr
* index at which an 'O' was read
* @return true if the keyword 'OR' is at the current index
*/
public static boolean checkForOr(byte[] filterStringAsByteArray, int indexOfOr) throws CharacterCodingException, ArrayIndexOutOfBoundsException {
try {
if ((((filterStringAsByteArray[indexOfOr] == ParseConstants.O) && (filterStringAsByteArray[indexOfOr + 1] == ParseConstants.R)) && ((filterStringAsByteArray[indexOfOr - 1] ==
ParseConstants.WHITESPACE) || (filterStringAsByteArray[indexOfOr - 1] == ParseConstants.RPAREN))) && ((filterStringAsByteArray[indexOfOr + 2] == ParseConstants.WHITESPACE) || (filterStringAsByteArray[indexOfOr + 2] == ParseConstants.LPAREN))) {
return true;
} else {
return false;
}
} catch (ArrayIndexOutOfBoundsException e) {
return false;
}
} | 3.26 |
hbase_ParseFilter_hasHigherPriority_rdh | /**
* Returns which operator has higher precedence
* <p>
* If a has higher precedence than b, it returns true If they have the same precedence, it returns
* false
*/
public boolean hasHigherPriority(ByteBuffer a, ByteBuffer b) {
if ((operatorPrecedenceHashMap.get(a) - operatorPrecedenceHashMap.get(b)) < 0) {
return true;
}
return false;
} | 3.26 |
hbase_ParseFilter_extractFilterSimpleExpression_rdh | /**
* Extracts a simple filter expression from the filter string given by the user
* <p>
* A simpleFilterExpression is of the form: FilterName('arg', 'arg', 'arg') The user given filter
* string can have many simpleFilterExpressions combined using operators.
* <p>
* This function extracts a simpleFilterExpression from the larger filterString given the start
* offset of the simpler expression
* <p>
*
* @param filterStringAsByteArray
* filter string given by the user
* @param filterExpressionStartOffset
* start index of the simple filter expression
* @return byte array containing the simple filter expression
*/
public byte[] extractFilterSimpleExpression(byte[] filterStringAsByteArray, int filterExpressionStartOffset)
throws CharacterCodingException {
int quoteCount = 0;
for (int i = filterExpressionStartOffset; i < filterStringAsByteArray.length; i++) {
if (filterStringAsByteArray[i] == ParseConstants.SINGLE_QUOTE) {
if (isQuoteUnescaped(filterStringAsByteArray, i)) {
quoteCount++; } else {
// To skip the next quote that has been escaped
i++;
}
}
if ((filterStringAsByteArray[i] == ParseConstants.RPAREN) && ((quoteCount % 2) == 0)) {
byte[] filterSimpleExpression = new byte[(i - filterExpressionStartOffset) + 1];
Bytes.putBytes(filterSimpleExpression, 0, filterStringAsByteArray, filterExpressionStartOffset, (i - filterExpressionStartOffset) + 1);
return filterSimpleExpression;
}
}
throw new IllegalArgumentException("Incorrect Filter String");
} | 3.26 |
hbase_ParseFilter_popArguments_rdh | /**
* Pops an argument from the operator stack and the number of arguments required by the operator
* from the filterStack and evaluates them
* <p>
*
* @param operatorStack
* the stack containing the operators
* @param filterStack
* the stack containing the filters
* @return the evaluated filter
*/
public static Filter popArguments(Stack<ByteBuffer> operatorStack, Stack<Filter> filterStack) {ByteBuffer argumentOnTopOfStack = operatorStack.peek();
if (argumentOnTopOfStack.equals(ParseConstants.OR_BUFFER)) {
// The top of the stack is an OR
try {
ArrayList<Filter> listOfFilters = new ArrayList<>();while ((!operatorStack.empty()) && operatorStack.peek().equals(ParseConstants.OR_BUFFER)) {
Filter filter = filterStack.pop();
listOfFilters.add(0, filter);
operatorStack.pop();
}
Filter filter = filterStack.pop();
listOfFilters.add(0, filter);
Filter orFilter = new FilterList(Operator.MUST_PASS_ONE, listOfFilters);
return orFilter;} catch (EmptyStackException e) {
throw new IllegalArgumentException("Incorrect input string - an OR needs two filters");
}
} else
if (argumentOnTopOfStack.equals(ParseConstants.AND_BUFFER)) {
// The top of the stack is an AND
try {ArrayList<Filter> listOfFilters = new ArrayList<>();
while ((!operatorStack.empty()) && operatorStack.peek().equals(ParseConstants.AND_BUFFER)) {Filter filter = filterStack.pop();
listOfFilters.add(0, filter);
operatorStack.pop();
} Filter filter = filterStack.pop();
listOfFilters.add(0, filter);
Filter andFilter = new FilterList(Operator.MUST_PASS_ALL, listOfFilters);
return andFilter;
} catch (EmptyStackException e) {
throw new IllegalArgumentException("Incorrect input string - an AND needs two filters");
}
} else if (argumentOnTopOfStack.equals(ParseConstants.SKIP_BUFFER)) {
// The top of the stack is a SKIP
try {
Filter
wrappedFilter = filterStack.pop();
Filter skipFilter = new SkipFilter(wrappedFilter);
operatorStack.pop();
return skipFilter;
} catch (EmptyStackException e) {
throw new IllegalArgumentException("Incorrect input string - a SKIP wraps a filter");
}
} else if (argumentOnTopOfStack.equals(ParseConstants.WHILE_BUFFER)) {
// The top of the stack is a WHILE
try {
Filter wrappedFilter = filterStack.pop();
Filter whileMatchFilter = new WhileMatchFilter(wrappedFilter);
operatorStack.pop();
return whileMatchFilter;
} catch (EmptyStackException e) {throw new IllegalArgumentException("Incorrect input string - a WHILE wraps a filter");
}
} else if (argumentOnTopOfStack.equals(ParseConstants.LPAREN_BUFFER)) {
// The top of the stack is a LPAREN
try {
Filter filter = filterStack.pop(); operatorStack.pop(); return filter;
} catch (EmptyStackException e) {
throw new IllegalArgumentException("Incorrect Filter String");
}
}
else {
throw new IllegalArgumentException("Incorrect arguments on operatorStack");
}
} | 3.26 |
hbase_ParseFilter_getFilterArguments_rdh | /**
* Returns the arguments of the filter from the filter string
* <p>
*
* @param filterStringAsByteArray
* filter string given by the user
* @return an ArrayList containing the arguments of the filter in the filter string
*/
public static ArrayList<byte[]> getFilterArguments(byte[] filterStringAsByteArray) {
int argumentListStartIndex = Bytes.searchDelimiterIndex(filterStringAsByteArray, 0, filterStringAsByteArray.length, ParseConstants.LPAREN);
if (argumentListStartIndex == (-1)) {
throw new IllegalArgumentException("Incorrect argument list");
}
int v20 = 0;
int v21 = 0;
ArrayList<byte[]> filterArguments = new ArrayList<>();
for (int i = argumentListStartIndex + 1; i < filterStringAsByteArray.length; i++) {
if (((filterStringAsByteArray[i] == ParseConstants.WHITESPACE) || (filterStringAsByteArray[i] == ParseConstants.COMMA)) || (filterStringAsByteArray[i] == ParseConstants.RPAREN)) {
continue;
}
// The argument is in single quotes - for example 'prefix'
if (filterStringAsByteArray[i] == ParseConstants.SINGLE_QUOTE)
{
v20 = i;
for (int j = v20 + 1; j < filterStringAsByteArray.length; j++) {
if (filterStringAsByteArray[j] == ParseConstants.SINGLE_QUOTE) {
if (isQuoteUnescaped(filterStringAsByteArray, j)) {
v21 = j;
i = j + 1;
byte[] filterArgument = createUnescapdArgument(filterStringAsByteArray, v20, v21);
filterArguments.add(filterArgument);
break;
} else {
// To jump over the second escaped quote
j++;
}
} else if
(j == (filterStringAsByteArray.length - 1)) {throw new IllegalArgumentException("Incorrect argument list");
}
}
} else {
// The argument is an integer, boolean, comparison operator like <, >, != etc
v20 = i;
for (int j = v20; j < filterStringAsByteArray.length; j++) {
if (((filterStringAsByteArray[j] == ParseConstants.WHITESPACE) || (filterStringAsByteArray[j] == ParseConstants.COMMA)) || (filterStringAsByteArray[j] == ParseConstants.RPAREN)) {
v21 = j - 1;
i = j;
byte[] filterArgument = new byte[(v21 - v20) +
1];
Bytes.putBytes(filterArgument, 0, filterStringAsByteArray, v20, (v21 - v20)
+ 1);
filterArguments.add(filterArgument);
break;
} else if (j == (filterStringAsByteArray.length - 1)) {
throw new IllegalArgumentException("Incorrect argument list");}
}
}
}
return filterArguments;
} | 3.26 |
hbase_ParseFilter_getAllFilters_rdh | /**
* Returns all known filters
*
* @return an unmodifiable map of filters
*/
public static Map<String, String> getAllFilters() {return Collections.unmodifiableMap(filterHashMap);
} | 3.26 |
hbase_ParseFilter_parseFilterString_rdh | /**
* Parses the filterString and constructs a filter using it
* <p>
*
* @param filterStringAsByteArray
* filter string given by the user
* @return filter object we constructed
*/
public Filter parseFilterString(byte[] filterStringAsByteArray) throws CharacterCodingException {
// stack for the operators and parenthesis
Stack<ByteBuffer> operatorStack = new Stack<>();
// stack for the filter objects
Stack<Filter> v1 = new Stack<>();
Filter filter = null;
for (int i = 0; i < filterStringAsByteArray.length; i++) {
if (filterStringAsByteArray[i] == ParseConstants.LPAREN) {
// LPAREN found
operatorStack.push(ParseConstants.LPAREN_BUFFER);
} else if ((filterStringAsByteArray[i] == ParseConstants.WHITESPACE) || (filterStringAsByteArray[i] == ParseConstants.TAB)) {
// WHITESPACE or TAB found
continue;
} else if (checkForOr(filterStringAsByteArray, i)) {
// OR found
i
+= OR_ARRAY.length - 1;
reduce(operatorStack, v1, ParseConstants.OR_BUFFER);
operatorStack.push(ParseConstants.OR_BUFFER);
} else if (checkForAnd(filterStringAsByteArray, i)) {
// AND found
i += AND_ARRAY.length - 1;
reduce(operatorStack, v1, ParseConstants.AND_BUFFER);
operatorStack.push(ParseConstants.AND_BUFFER);
} else if (checkForSkip(filterStringAsByteArray, i)) {
// SKIP found
i += SKIP_ARRAY.length - 1;
reduce(operatorStack, v1, ParseConstants.SKIP_BUFFER);
operatorStack.push(ParseConstants.SKIP_BUFFER);
} else if (checkForWhile(filterStringAsByteArray, i)) {
// WHILE found
i += WHILE_ARRAY.length
- 1;
reduce(operatorStack, v1, ParseConstants.WHILE_BUFFER);
operatorStack.push(ParseConstants.WHILE_BUFFER);
} else if (filterStringAsByteArray[i] == ParseConstants.RPAREN) {
// RPAREN found
if (operatorStack.empty()) {
throw new IllegalArgumentException("Mismatched parenthesis");
}
ByteBuffer argumentOnTopOfStack = operatorStack.peek();
if (argumentOnTopOfStack.equals(ParseConstants.LPAREN_BUFFER)) {
operatorStack.pop();
continue;
}
while (!argumentOnTopOfStack.equals(ParseConstants.LPAREN_BUFFER)) {
v1.push(popArguments(operatorStack, v1));if (operatorStack.empty()) {
throw new IllegalArgumentException("Mismatched parenthesis");
}
argumentOnTopOfStack = operatorStack.pop();
}
} else {
// SimpleFilterExpression found
byte[] filterSimpleExpression = extractFilterSimpleExpression(filterStringAsByteArray, i);
i += filterSimpleExpression.length - 1;
filter = parseSimpleFilterExpression(filterSimpleExpression);
v1.push(filter);
}
}
// Finished parsing filterString
while (!operatorStack.empty()) {
v1.push(popArguments(operatorStack, v1));
}
if (v1.empty()) {
throw new IllegalArgumentException("Incorrect Filter String");
}
filter = v1.pop();
if (!v1.empty()) {
throw new IllegalArgumentException("Incorrect Filter String");
}
return filter;
} | 3.26 |
hbase_ParseFilter_checkForAnd_rdh | /**
* Checks if the current index of filter string we are on is the beginning of the keyword 'AND'
* <p>
*
* @param filterStringAsByteArray
* filter string given by the user
* @param indexOfAnd
* index at which an 'A' was read
* @return true if the keyword 'AND' is at the current index
*/
public static boolean checkForAnd(byte[] filterStringAsByteArray, int indexOfAnd) throws CharacterCodingException {
try {
if (((((filterStringAsByteArray[indexOfAnd]
== ParseConstants.A) &&
(filterStringAsByteArray[indexOfAnd + 1] == ParseConstants.N)) && (filterStringAsByteArray[indexOfAnd + 2] == ParseConstants.D)) && ((filterStringAsByteArray[indexOfAnd - 1]
== ParseConstants.WHITESPACE) || (filterStringAsByteArray[indexOfAnd - 1] == ParseConstants.RPAREN))) && ((filterStringAsByteArray[indexOfAnd + 3] == ParseConstants.WHITESPACE) || (filterStringAsByteArray[indexOfAnd + 3] == ParseConstants.LPAREN))) {
return true;
} else {
return false;
}
} catch (ArrayIndexOutOfBoundsException e) {
return false;
}
} | 3.26 |
hbase_ParseFilter_getFilterName_rdh | /**
* Returns the filter name given a simple filter expression
* <p>
*
* @param filterStringAsByteArray
* a simple filter expression
* @return name of filter in the simple filter expression
*/
public static byte[] getFilterName(byte[] filterStringAsByteArray) {
int filterNameStartIndex = 0;
int filterNameEndIndex
= 0;
for (int i = filterNameStartIndex; i < filterStringAsByteArray.length; i++) {
if ((filterStringAsByteArray[i] == ParseConstants.LPAREN) || (filterStringAsByteArray[i] == ParseConstants.WHITESPACE)) {
filterNameEndIndex = i;
break;
}}
if (filterNameEndIndex == 0) {
throw new IllegalArgumentException("Incorrect Filter Name");
}
byte[] filterName = new
byte[filterNameEndIndex - filterNameStartIndex];
Bytes.putBytes(filterName, 0, filterStringAsByteArray, 0, filterNameEndIndex - filterNameStartIndex);
return filterName;
} | 3.26 |
hbase_ParseFilter_checkForSkip_rdh | /**
* Checks if the current index of filter string we are on is the beginning of the keyword 'SKIP'
* <p>
*
* @param filterStringAsByteArray
* filter string given by the user
* @param indexOfSkip
* index at which an 'S' was read
* @return true if the keyword 'SKIP' is at the current index
*/
public static boolean checkForSkip(byte[] filterStringAsByteArray, int indexOfSkip) throws CharacterCodingException {try {
if ((((((filterStringAsByteArray[indexOfSkip] == ParseConstants.S) && (filterStringAsByteArray[indexOfSkip + 1] == ParseConstants.K)) && (filterStringAsByteArray[indexOfSkip + 2] == ParseConstants.I)) && (filterStringAsByteArray[indexOfSkip + 3] == ParseConstants.P)) && ((((indexOfSkip == 0) || (filterStringAsByteArray[indexOfSkip - 1] == ParseConstants.WHITESPACE)) || (filterStringAsByteArray[indexOfSkip -
1] == ParseConstants.RPAREN)) || (filterStringAsByteArray[indexOfSkip - 1] == ParseConstants.LPAREN))) && ((filterStringAsByteArray[indexOfSkip + 4] == ParseConstants.WHITESPACE) || (filterStringAsByteArray[indexOfSkip + 4] == ParseConstants.LPAREN))) {
return
true;
} else {
return false;
}
} catch (ArrayIndexOutOfBoundsException e) {
return false;}
} | 3.26 |
hbase_ParseFilter_parseSimpleFilterExpression_rdh | /**
* Constructs a filter object given a simple filter expression
* <p>
*
* @param filterStringAsByteArray
* filter string given by the user
* @return filter object we constructed
*/
public Filter parseSimpleFilterExpression(byte[] filterStringAsByteArray) throws CharacterCodingException {
String filterName = Bytes.toString(getFilterName(filterStringAsByteArray));
ArrayList<byte[]> filterArguments = getFilterArguments(filterStringAsByteArray);
if (!filterHashMap.containsKey(filterName)) {
throw new IllegalArgumentException(("Filter Name " + filterName) + " not supported");
}
filterName = filterHashMap.get(filterName);
final String methodName = "createFilterFromArguments";
try {
Class<?> c = Class.forName(filterName);
Class<?>[] argTypes = new Class[]{ ArrayList.class };
Method m = c.getDeclaredMethod(methodName, argTypes);
return ((Filter) (m.invoke(null, filterArguments)));
} catch (ClassNotFoundException e) {
LOG.error("Could not find class {}", filterName, e);
} catch (NoSuchMethodException e) {
LOG.error("Could not find method {} in {}", methodName, filterName, e);} catch (IllegalAccessException e) {
LOG.error("Unable to access specified class {}", filterName, e);
} catch (InvocationTargetException e) {
LOG.error("Method {} threw an exception for {}", methodName, filterName, e);
}
throw new IllegalArgumentException("Incorrect filter string " + new String(filterStringAsByteArray, StandardCharsets.UTF_8));
} | 3.26 |
hbase_ParseFilter_removeQuotesFromByteArray_rdh | /**
* Takes a quoted byte array and converts it into an unquoted byte array For example: given a byte
* array representing 'abc', it returns a byte array representing abc
* <p>
*
* @param quotedByteArray
* the quoted byte array
* @return Unquoted byte array
*/
public static byte[] removeQuotesFromByteArray(byte[] quotedByteArray) {if ((((quotedByteArray == null) || (quotedByteArray.length < 2)) || (quotedByteArray[0] != ParseConstants.SINGLE_QUOTE)) || (quotedByteArray[quotedByteArray.length - 1] != ParseConstants.SINGLE_QUOTE)) {
throw new IllegalArgumentException("removeQuotesFromByteArray needs a quoted byte array");
} else {
byte[] v47 = new byte[quotedByteArray.length - 2];
Bytes.putBytes(v47, 0, quotedByteArray, 1, quotedByteArray.length - 2);
return v47;
}
} | 3.26 |
hbase_ParseFilter_createCompareOperator_rdh | /**
* Takes a compareOperator symbol as a byte array and returns the corresponding CompareOperator
*
* @param compareOpAsByteArray
* the comparatorOperator symbol as a byte array
* @return the Compare Operator
*/
public static CompareOperator createCompareOperator(byte[] compareOpAsByteArray) {
ByteBuffer compareOp = ByteBuffer.wrap(compareOpAsByteArray);
if (compareOp.equals(ParseConstants.LESS_THAN_BUFFER))
return CompareOperator.LESS;
else if (compareOp.equals(ParseConstants.LESS_THAN_OR_EQUAL_TO_BUFFER))
return CompareOperator.LESS_OR_EQUAL;
else if (compareOp.equals(ParseConstants.GREATER_THAN_BUFFER))
return CompareOperator.GREATER;
else if (compareOp.equals(ParseConstants.GREATER_THAN_OR_EQUAL_TO_BUFFER))
return CompareOperator.GREATER_OR_EQUAL;
else if (compareOp.equals(ParseConstants.NOT_EQUAL_TO_BUFFER))
return CompareOperator.NOT_EQUAL;
else if (compareOp.equals(ParseConstants.EQUAL_TO_BUFFER))
return CompareOperator.EQUAL;
else
throw new IllegalArgumentException("Invalid compare operator");
} | 3.26 |
hbase_Compressor_writeCompressed_rdh | /**
* Compresses and writes an array to a DataOutput
*
* @param data
* the array to write.
* @param out
* the DataOutput to write into
* @param dict
* the dictionary to use for compression
*/
@Deprecated
static void writeCompressed(byte[] data, int offset, int length, DataOutput out, Dictionary dict) throws IOException {
short dictIdx = Dictionary.NOT_IN_DICTIONARY;
if (dict != null) {
dictIdx = dict.findEntry(data, offset, length);
}
if (dictIdx == Dictionary.NOT_IN_DICTIONARY) {
// not in dict
out.writeByte(Dictionary.NOT_IN_DICTIONARY);
WritableUtils.writeVInt(out, length);out.write(data, offset, length);
}
else {
out.writeShort(dictIdx);
}
} | 3.26 |
hbase_Compressor_main_rdh | /**
* Command line tool to compress and uncompress WALs.
*/
public static void main(String[] args) throws IOException {
if (((args.length != 2) || args[0].equals("--help")) || args[0].equals("-h")) {
m0();
System.exit(-1);
}
Path inputPath = new Path(args[0]);
Path outputPath = new Path(args[1]);
transformFile(inputPath, outputPath);
} | 3.26 |
hbase_Compressor_uncompressIntoArray_rdh | /**
* Reads a compressed entry into an array. The output into the array ends up length-prefixed.
*
* @param to
* the array to write into
* @param offset
* array offset to start writing to
* @param in
* the DataInput to read from
* @param dict
* the dictionary to use for compression
* @return the length of the uncompressed data
*/
@Deprecatedstatic int uncompressIntoArray(byte[] to, int offset, DataInput in, Dictionary dict) throws IOException {
byte status = in.readByte();
if (status == Dictionary.NOT_IN_DICTIONARY) {
// status byte indicating that data to be read is not in dictionary.
// if this isn't in the dictionary, we need to add to the dictionary.
int length = WritableUtils.readVInt(in);
in.readFully(to, offset, length);
dict.addEntry(to, offset, length);
return length;
} else {
// the status byte also acts as the higher order byte of the dictionary
// entry
short dictIdx = toShort(status, in.readByte());
byte[] entry;
try {
entry = dict.getEntry(dictIdx);
} catch (Exception
ex) {
throw new IOException("Unable to uncompress the log entry", ex);
}
if (entry == null) {
throw
new IOException("Missing dictionary entry for index " + dictIdx);
}
// now we write the uncompressed value.
Bytes.putBytes(to, offset, entry,
0, entry.length);
return entry.length;
}
} | 3.26 |
hbase_Compressor_readCompressed_rdh | /**
* Reads the next compressed entry and returns it as a byte array
*
* @param in
* the DataInput to read from
* @param dict
* the dictionary we use for our read.
* @return the uncompressed array.
*/
@Deprecated
static byte[] readCompressed(DataInput
in, Dictionary dict) throws IOException {
byte status = in.readByte();
if (status == Dictionary.NOT_IN_DICTIONARY) {
int length = WritableUtils.readVInt(in);
// if this isn't in the dictionary, we need to add to the dictionary.
byte[] arr = new byte[length];
in.readFully(arr);
if (dict != null) {
dict.addEntry(arr, 0, length);
}
return arr;
} else {
// Status here is the higher-order byte of index of the dictionary entry
// (when its not Dictionary.NOT_IN_DICTIONARY -- dictionary indices are
// shorts).
short dictIdx = toShort(status, in.readByte());
byte[] entry = dict.getEntry(dictIdx);
if (entry == null) {
throw new IOException("Missing dictionary entry for index " + dictIdx);
}
return entry;
}
} | 3.26 |
hbase_MetricsMaster_setNumSpaceQuotas_rdh | /**
* Sets the number of space quotas defined.
*
* @see MetricsMasterQuotaSource#updateNumSpaceQuotas(long)
*/
public void setNumSpaceQuotas(final long numSpaceQuotas) {
masterQuotaSource.updateNumSpaceQuotas(numSpaceQuotas);
} | 3.26 |
hbase_MetricsMaster_incrementSnapshotObserverTime_rdh | /**
* Sets the execution time of a period of the {@code SnapshotQuotaObserverChore}.
*/
public void incrementSnapshotObserverTime(final long executionTime) {masterQuotaSource.incrementSnapshotObserverChoreTime(executionTime);
} | 3.26 |
hbase_MetricsMaster_incrementReadRequests_rdh | /**
*
* @param inc
* How much to add to read requests.
*/
public void incrementReadRequests(final long inc) {
masterSource.incReadRequests(inc);
} | 3.26 |
hbase_MetricsMaster_incrementRequests_rdh | /**
*
* @param inc
* How much to add to requests.
*/
public void incrementRequests(final long inc) {
masterSource.incRequests(inc);} | 3.26 |
hbase_MetricsMaster_getServerCrashProcMetrics_rdh | /**
* Returns Set of metrics for assign procedure
*/
public ProcedureMetrics getServerCrashProcMetrics() {
return serverCrashProcMetrics;
} | 3.26 |
hbase_MetricsMaster_incrementSnapshotFetchTime_rdh | /**
* Sets the execution time to fetch the mapping of snapshots to originating table.
*/
public void
incrementSnapshotFetchTime(long executionTime) {
masterQuotaSource.incrementSnapshotObserverSnapshotFetchTime(executionTime);
} | 3.26 |
hbase_MetricsMaster_convertToProcedureMetrics_rdh | /**
* This is utility function that converts {@link OperationMetrics} to {@link ProcedureMetrics}.
* NOTE: Procedure framework in hbase-procedure module accesses metrics common to most procedures
* through {@link ProcedureMetrics} interface. Metrics source classes in hbase-hadoop-compat
* module provides similar interface {@link OperationMetrics} that contains metrics common to most
* operations. As both hbase-procedure and hbase-hadoop-compat are lower level modules used by
* hbase-server (this) module and there is no dependency between them, this method does the
* required conversion.
*/
public static ProcedureMetrics convertToProcedureMetrics(final OperationMetrics metrics) {
return new ProcedureMetrics() {
@Override
public Counter getSubmittedCounter() {
return
metrics.getSubmittedCounter();
}
@Override
public Histogram getTimeHisto() {
return metrics.getTimeHisto();
}
@Override
public Counter m1() {
return metrics.getFailedCounter();
}
};
} | 3.26 |
hbase_MetricsMaster_setNumRegionSizeReports_rdh | /**
* Sets the number of region size reports the master currently has in memory.
*
* @see MetricsMasterQuotaSource#updateNumCurrentSpaceQuotaRegionSizeReports(long)
*/
public void setNumRegionSizeReports(final long numRegionReports) {
masterQuotaSource.updateNumCurrentSpaceQuotaRegionSizeReports(numRegionReports);
} | 3.26 |
hbase_MetricsMaster_incrementQuotaObserverTime_rdh | /**
* Sets the execution time of a period of the QuotaObserverChore.
*
* @param executionTime
* The execution time in milliseconds.
* @see MetricsMasterQuotaSource#incrementSpaceQuotaObserverChoreTime(long)
*/
public void incrementQuotaObserverTime(final long executionTime) {masterQuotaSource.incrementSpaceQuotaObserverChoreTime(executionTime);
} | 3.26 |
hbase_MetricsMaster_m0_rdh | /**
*
* @param inc
* How much to add to write requests.
*/
public void m0(final long inc) {
masterSource.incWriteRequests(inc);
} | 3.26 |
hbase_MetricsMaster_setNumTableInSpaceQuotaViolation_rdh | /**
* Sets the number of table in violation of a space quota.
*
* @see MetricsMasterQuotaSource#updateNumTablesInSpaceQuotaViolation(long)
*/
public void
setNumTableInSpaceQuotaViolation(final long numTablesInViolation) {
masterQuotaSource.updateNumTablesInSpaceQuotaViolation(numTablesInViolation);
} | 3.26 |
hbase_MetricsMaster_incrementSnapshotSizeComputationTime_rdh | /**
* Sets the execution time to compute the size of a single snapshot.
*/
public void incrementSnapshotSizeComputationTime(final long executionTime) {
masterQuotaSource.incrementSnapshotObserverSnapshotComputationTime(executionTime);
} | 3.26 |
hbase_MetricsMaster_setNumNamespacesInSpaceQuotaViolation_rdh | /**
* Sets the number of namespaces in violation of a space quota.
*
* @see MetricsMasterQuotaSource#updateNumNamespacesInSpaceQuotaViolation(long)
*/
public void setNumNamespacesInSpaceQuotaViolation(final long numNamespacesInViolation) {
masterQuotaSource.updateNumNamespacesInSpaceQuotaViolation(numNamespacesInViolation);
} | 3.26 |
hbase_MetricsMaster_getMetricsSource_rdh | // for unit-test usage
public MetricsMasterSource getMetricsSource() {
return masterSource;
} | 3.26 |
hbase_AbstractViolationPolicyEnforcement_getFileSize_rdh | /**
* Computes the size of a single file on the filesystem. If the size cannot be computed for some
* reason, a {@link SpaceLimitingException} is thrown, as the file may violate a quota. If the
* provided path does not reference a file, an {@link IllegalArgumentException} is thrown.
*
* @param fs
* The FileSystem which the path refers to a file upon
* @param path
* The path on the {@code fs} to a file whose size is being checked
* @return The size in bytes of the file
*/
long getFileSize(FileSystem fs, String path) throws SpaceLimitingException {
final FileStatus status;try {
status = fs.getFileStatus(new Path(Objects.requireNonNull(path)));
} catch (IOException e) {
throw new SpaceLimitingException(getPolicyName(), "Could not verify length of file to bulk load: " + path,
e);
}
if (!status.isFile()) {
throw new IllegalArgumentException(path + " is not a file.");
}
return status.getLen();
} | 3.26 |
hbase_AccessControlFilter_toByteArray_rdh | /**
* Returns The filter serialized using pb
*/
@Override
public byte[] toByteArray() {
// no implementation, server-side use only
throw new UnsupportedOperationException("Serialization not supported. Intended for server-side use only.");
} | 3.26 |
hbase_AccessControlFilter_parseFrom_rdh | /**
*
* @param pbBytes
* A pb serialized {@link AccessControlFilter} instance
* @return An instance of {@link AccessControlFilter} made from <code>bytes</code>
* @throws org.apache.hadoop.hbase.exceptions.DeserializationException
* @see #toByteArray()
*/
public static AccessControlFilter parseFrom(final byte[] pbBytes) throws DeserializationException {
// no implementation, server-side use only
throw new UnsupportedOperationException("Serialization not supported. Intended for server-side use only.");
} | 3.26 |
hbase_SnapshotManager_snapshotEnabledTable_rdh | /**
* Take a snapshot of an enabled table.
*
* @param snapshot
* description of the snapshot to take.
* @throws IOException
* if the snapshot could not be started or filesystem for snapshot temporary
* directory could not be determined
*/
private synchronized void snapshotEnabledTable(SnapshotDescription snapshot) throws IOException {
// setup the snapshot
prepareWorkingDirectory(snapshot);
// Take the snapshot of the enabled table
EnabledTableSnapshotHandler handler = new EnabledTableSnapshotHandler(snapshot, master, this);
snapshotTable(snapshot, handler);
} | 3.26 |
hbase_SnapshotManager_deleteSnapshot_rdh | /**
* Delete the specified snapshot
*
* @throws SnapshotDoesNotExistException
* If the specified snapshot does not exist.
* @throws IOException
* For filesystem IOExceptions
*/
public void deleteSnapshot(SnapshotDescription snapshot) throws IOException {
// check to see if it is completed
if (!isSnapshotCompleted(snapshot)) {
throw new SnapshotDoesNotExistException(ProtobufUtil.createSnapshotDesc(snapshot));
}
String snapshotName = snapshot.getName();
// first create the snapshot description and check to see if it exists
FileSystem fs = master.getMasterFileSystem().getFileSystem();
Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, rootDir);
// Get snapshot info from file system. The one passed as parameter is a "fake" snapshotInfo with
// just the "name" and it does not contains the "real" snapshot information
snapshot = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir);
// call coproc pre hook
MasterCoprocessorHost cpHost = master.getMasterCoprocessorHost();
SnapshotDescription snapshotPOJO = null;
if (cpHost != null) {
snapshotPOJO
= ProtobufUtil.createSnapshotDesc(snapshot);
cpHost.preDeleteSnapshot(snapshotPOJO);
}
LOG.debug("Deleting snapshot: " + snapshotName);// delete the existing snapshot
if (!fs.delete(snapshotDir, true)) {
throw new HBaseSnapshotException("Failed to delete snapshot directory: " + snapshotDir);
}
// call coproc post hook
if (cpHost != null) {
cpHost.postDeleteSnapshot(snapshotPOJO);
}
} | 3.26 |
hbase_SnapshotManager_restoreSnapshot_rdh | /**
* Restore the specified snapshot. The restore will fail if the destination table has a snapshot
* or restore in progress.
*
* @param snapshot
* Snapshot Descriptor
* @param tableDescriptor
* Table Descriptor
* @param nonceKey
* unique identifier to prevent duplicated RPC
* @param restoreAcl
* true to restore acl of snapshot
* @return procId the ID of the restore snapshot procedure
*/
private synchronized long restoreSnapshot(final SnapshotDescription snapshot, final TableDescriptor tableDescriptor, final NonceKey nonceKey, final boolean restoreAcl) throws HBaseSnapshotException {final TableName tableName = tableDescriptor.getTableName();
// make sure we aren't running a snapshot on the same table
if (isTableTakingAnySnapshot(tableName)) {
throw new RestoreSnapshotException("Snapshot in progress on the restore table=" + tableName);
}
// make sure we aren't running a restore on the same table
if (isRestoringTable(tableName)) {
throw new RestoreSnapshotException("Restore already in progress on the table=" + tableName);
}
try {
long procId = master.getMasterProcedureExecutor().submitProcedure(new RestoreSnapshotProcedure(master.getMasterProcedureExecutor().getEnvironment(), tableDescriptor, snapshot, restoreAcl), nonceKey);
this.restoreTableToProcIdMap.put(tableName, procId);
return procId;
} catch (Exception e) {String msg = (("Couldn't restore the snapshot=" + ClientSnapshotDescriptionUtils.toString(snapshot)) + " on table=")
+ tableName;
LOG.error(msg, e);
throw new RestoreSnapshotException(msg, e);} } | 3.26 |
hbase_SnapshotManager_sanityCheckBeforeSnapshot_rdh | /**
* Check if the snapshot can be taken. Currently we have some limitations, for zk-coordinated
* snapshot, we don't allow snapshot with same name or taking multiple snapshots of a table at the
* same time, for procedure-coordinated snapshot, we don't allow snapshot with same name.
*
* @param snapshot
* description of the snapshot being checked.
* @param checkTable
* check if the table is already taking a snapshot. For zk-coordinated snapshot,
* we need to check if another zk-coordinated snapshot is in progress, for the
* snapshot procedure, this is unnecessary.
* @return the table descriptor of the table
*/
private synchronized TableDescriptor sanityCheckBeforeSnapshot(SnapshotDescription snapshot, boolean checkTable) throws IOException {
// check to see if we already completed the snapshot
if (isSnapshotCompleted(snapshot)) {
throw new SnapshotExistsException(("Snapshot '" + snapshot.getName()) + "' already stored on the filesystem.", ProtobufUtil.createSnapshotDesc(snapshot));
}
LOG.debug("No existing snapshot, attempting snapshot...");
// stop tracking "abandoned" handlers
cleanupSentinels();
TableName snapshotTable = TableName.valueOf(snapshot.getTable());
// make sure we aren't already running a snapshot
if (isTakingSnapshot(snapshot, checkTable)) {
throw new SnapshotCreationException((("Rejected taking " + ClientSnapshotDescriptionUtils.toString(snapshot)) + " because we are already running another snapshot")
+ " on the same table or with the same name");
}
// make sure we aren't running a restore on the same table
if (isRestoringTable(snapshotTable)) {
throw
new SnapshotCreationException(("Rejected taking " + ClientSnapshotDescriptionUtils.toString(snapshot)) + " because we are already have a restore in progress on the same snapshot.");
}
// check to see if the table exists
TableDescriptor desc = null;try {
desc = master.getTableDescriptors().get(TableName.valueOf(snapshot.getTable()));
} catch (FileNotFoundException e) {
String msg = ("Table:" + snapshot.getTable()) + " info doesn't exist!";
LOG.error(msg);
throw new SnapshotCreationException(msg, e, ProtobufUtil.createSnapshotDesc(snapshot));
} catch (IOException e) {
throw new SnapshotCreationException("Error while geting table description for table " + snapshot.getTable(), e, ProtobufUtil.createSnapshotDesc(snapshot));
}
if (desc == null) {
throw new SnapshotCreationException(("Table '" + snapshot.getTable()) + "' doesn't exist, can't take snapshot.", ProtobufUtil.createSnapshotDesc(snapshot));
}
return desc;
} | 3.26 |
hbase_SnapshotManager_checkSnapshotSupport_rdh | /**
* Called at startup, to verify if snapshot operation is supported, and to avoid starting the
* master if there're snapshots present but the cleaners needed are missing. Otherwise we can end
* up with snapshot data loss.
*
* @param conf
* The {@link Configuration} object to use
* @param mfs
* The MasterFileSystem to use
* @throws IOException
* in case of file-system operation failure
* @throws UnsupportedOperationException
* in case cleaners are missing and there're snapshot in the
* system
*/private void checkSnapshotSupport(final Configuration conf, final MasterFileSystem mfs) throws IOException, UnsupportedOperationException {
// Verify if snapshot is disabled by the user
String enabled = conf.get(HBASE_SNAPSHOT_ENABLED);
boolean snapshotEnabled = conf.getBoolean(HBASE_SNAPSHOT_ENABLED, false);
boolean userDisabled = ((enabled != null) && (enabled.trim().length() > 0)) && (!snapshotEnabled);
// Extract cleaners from conf
Set<String> hfileCleaners = new HashSet<>();
String[] cleaners = conf.getStrings(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS);
if (cleaners != null)
Collections.addAll(hfileCleaners, cleaners);
Set<String> logCleaners
= new HashSet<>();
cleaners = conf.getStrings(HConstants.HBASE_MASTER_LOGCLEANER_PLUGINS);
if (cleaners != null)
Collections.addAll(logCleaners, cleaners);
// check if an older version of snapshot directory was present
Path oldSnapshotDir = new Path(mfs.getRootDir(), HConstants.OLD_SNAPSHOT_DIR_NAME);
FileSystem fs = mfs.getFileSystem();
List<SnapshotDescription> ss = getCompletedSnapshots(new Path(rootDir, oldSnapshotDir), false);
if ((ss != null) && (!ss.isEmpty())) {
LOG.error("Snapshots from an earlier release were found under: " + oldSnapshotDir);
LOG.error("Please rename the directory as " + HConstants.SNAPSHOT_DIR_NAME);
}
// If the user has enabled the snapshot, we force the cleaners to be present
// otherwise we still need to check if cleaners are enabled or not and verify
// that there're no snapshot in the .snapshot folder.
if (snapshotEnabled) {
// Inject snapshot cleaners, if snapshot.enable is true
hfileCleaners.add(SnapshotHFileCleaner.class.getName());
hfileCleaners.add(HFileLinkCleaner.class.getName());
// If sync acl to HDFS feature is enabled, then inject the cleaner
if (SnapshotScannerHDFSAclHelper.isAclSyncToHdfsEnabled(conf)) {
hfileCleaners.add(SnapshotScannerHDFSAclCleaner.class.getName());
}
// Set cleaners conf
conf.setStrings(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS, hfileCleaners.toArray(new String[hfileCleaners.size()]));
conf.setStrings(HConstants.HBASE_MASTER_LOGCLEANER_PLUGINS, logCleaners.toArray(new String[logCleaners.size()]));
} else {
// There may be restore tables if snapshot is enabled and then disabled, so add
// HFileLinkCleaner, see HBASE-26670 for more details.
hfileCleaners.add(HFileLinkCleaner.class.getName());
conf.setStrings(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS, hfileCleaners.toArray(new String[hfileCleaners.size()]));
// Verify if SnapshotHFileCleaner are present
snapshotEnabled = hfileCleaners.contains(SnapshotHFileCleaner.class.getName());
// Warn if the cleaners are enabled but the snapshot.enabled property is false/not set.
if (snapshotEnabled) {
LOG.warn(((("Snapshot log and hfile cleaners are present in the configuration, " + "but the '")
+ HBASE_SNAPSHOT_ENABLED)
+ "' property ") + (userDisabled ? "is set to 'false'." : "is not set."));
}
}
// Mark snapshot feature as enabled if cleaners are present and user has not disabled it.
this.isSnapshotSupported = snapshotEnabled && (!userDisabled);
// If cleaners are not enabled, verify that there're no snapshot in the .snapshot folder
// otherwise we end up with snapshot data loss.
if (!snapshotEnabled) {LOG.info("Snapshot feature is not enabled, missing log and hfile cleaners.");
Path snapshotDir = SnapshotDescriptionUtils.getSnapshotsDir(mfs.getRootDir());
if (fs.exists(snapshotDir)) {
FileStatus[] snapshots = CommonFSUtils.listStatus(fs, snapshotDir, new SnapshotDescriptionUtils.CompletedSnaphotDirectoriesFilter(fs));
if (snapshots != null) {
LOG.error("Snapshots are present, but cleaners are not enabled.");
checkSnapshotSupport();
}
}
}
} | 3.26 |
hbase_SnapshotManager_cleanupCompletedRestoreInMap_rdh | /**
* Remove the procedures that are marked as finished
*/
private synchronized void cleanupCompletedRestoreInMap() {
ProcedureExecutor<MasterProcedureEnv> procExec =
master.getMasterProcedureExecutor();
Iterator<Map.Entry<TableName, Long>> it = restoreTableToProcIdMap.entrySet().iterator();
while (it.hasNext()) {
Map.Entry<TableName, Long> entry = it.next();Long procId = entry.getValue();
if (procExec.isRunning() && procExec.isFinished(procId)) {
it.remove();
}
}
} | 3.26 |
hbase_SnapshotManager_isSnapshotDone_rdh | /**
* Check if the specified snapshot is done
*
* @return true if snapshot is ready to be restored, false if it is still being taken.
* @throws IOException
* IOException if error from HDFS or RPC
* @throws UnknownSnapshotException
* if snapshot is invalid or does not exist.
*/
public boolean isSnapshotDone(SnapshotDescription
expected) throws IOException {
// check the request to make sure it has a snapshot
if (expected == null) {
throw new UnknownSnapshotException("No snapshot name passed in request, can't figure out which snapshot you want to check.");
}
Long procId = snapshotToProcIdMap.get(expected);
if (procId != null) {
if (master.getMasterProcedureExecutor().isRunning()) {
return master.getMasterProcedureExecutor().isFinished(procId);
} else {
return false;
}
}
String ssString = ClientSnapshotDescriptionUtils.toString(expected);
// check to see if the sentinel exists,
// and if the task is complete removes it from the in-progress snapshots map.
SnapshotSentinel handler = removeSentinelIfFinished(this.snapshotHandlers, expected);
// stop tracking "abandoned" handlers
cleanupSentinels();
if (handler == null) {
// If there's no handler in the in-progress map, it means one of the following:
// - someone has already requested the snapshot state
// - the requested snapshot was completed long time ago (cleanupSentinels() timeout)
// - the snapshot was never requested
// In those cases returns to the user the "done state" if the snapshots exists on disk,
// otherwise raise an exception saying that the snapshot is not running and doesn't exist.
if (!isSnapshotCompleted(expected)) {
throw new UnknownSnapshotException(("Snapshot " + ssString) + " is not currently running or one of the known completed snapshots.");
}
// was done, return true;
return true;
}
// pass on any failure we find in the sentinel
try {
handler.rethrowExceptionIfFailed();
} catch (ForeignException e) {
// Give some procedure info on an exception.
String
status;
Procedure p = coordinator.getProcedure(expected.getName());
if (p != null) {
status = p.getStatus();
} else {
status = (expected.getName()
+ " not found in proclist ") + coordinator.getProcedureNames();
}
throw new HBaseSnapshotException((("Snapshot " + ssString) + " had an error. ") + status, e, ProtobufUtil.createSnapshotDesc(expected));
}
// check to see if we are done
if (handler.isFinished()) {
LOG.debug(("Snapshot '" + ssString) + "' has completed, notifying client.");
return true;
} else if (LOG.isDebugEnabled()) {
LOG.debug(("Snapshoting '" + ssString) + "' is still in progress!");
}
return false;
} | 3.26 |
hbase_SnapshotManager_cloneSnapshot_rdh | /**
* Clone the specified snapshot into a new table. The operation will fail if the destination table
* has a snapshot or restore in progress.
*
* @param snapshot
* Snapshot Descriptor
* @param tableDescriptor
* Table Descriptor of the table to create
* @param nonceKey
* unique identifier to prevent duplicated RPC
* @return procId the ID of the clone snapshot procedure
*/
synchronized long cloneSnapshot(final SnapshotDescription snapshot, final TableDescriptor tableDescriptor, final NonceKey nonceKey, final boolean restoreAcl, final String customSFT) throws HBaseSnapshotException {
TableName v57 = tableDescriptor.getTableName();
// make sure we aren't running a snapshot on the same table
if (isTableTakingAnySnapshot(v57)) {
throw new RestoreSnapshotException("Snapshot in progress on the restore table=" + v57);
}
// make sure we aren't running a restore on the same table
if (isRestoringTable(v57)) {
throw new RestoreSnapshotException("Restore already in progress on the table=" + v57);}
try {
long procId = master.getMasterProcedureExecutor().submitProcedure(new CloneSnapshotProcedure(master.getMasterProcedureExecutor().getEnvironment(), tableDescriptor, snapshot, restoreAcl, customSFT), nonceKey);
this.restoreTableToProcIdMap.put(v57, procId);
return procId;
} catch (Exception e) {
String msg = (("Couldn't clone the snapshot=" + ClientSnapshotDescriptionUtils.toString(snapshot)) + " on table=") + v57;
LOG.error(msg, e);
throw new RestoreSnapshotException(msg, e);
}
} | 3.26 |
hbase_SnapshotManager_cleanupSentinels_rdh | /**
* Remove the sentinels that are marked as finished and the completion time has exceeded the
* removal timeout.
*
* @param sentinels
* map of sentinels to clean
*/
private synchronized void cleanupSentinels(final Map<TableName, SnapshotSentinel> sentinels) {
long currentTime = EnvironmentEdgeManager.currentTime();
long sentinelsCleanupTimeoutMillis = master.getConfiguration().getLong(HBASE_SNAPSHOT_SENTINELS_CLEANUP_TIMEOUT_MILLIS, SNAPSHOT_SENTINELS_CLEANUP_TIMEOUT_MILLS_DEFAULT);
Iterator<Map.Entry<TableName, SnapshotSentinel>> it = sentinels.entrySet().iterator();
while (it.hasNext()) {
Map.Entry<TableName, SnapshotSentinel> entry = it.next();SnapshotSentinel sentinel = entry.getValue();
if (sentinel.isFinished() && ((currentTime - sentinel.getCompletionTimestamp()) > sentinelsCleanupTimeoutMillis)) {
it.remove();
}
}
} | 3.26 |
hbase_SnapshotManager_snapshotDisabledTable_rdh | /**
* Take a snapshot of a disabled table.
*
* @param snapshot
* description of the snapshot to take. Modified to be {@link Type#DISABLED}.
* @throws IOException
* if the snapshot could not be started or filesystem for snapshot temporary
* directory could not be determined
*/
private synchronized void snapshotDisabledTable(SnapshotDescription snapshot) throws IOException {
// setup the snapshot
prepareWorkingDirectory(snapshot);
// set the snapshot to be a disabled snapshot, since the client doesn't know about that
snapshot = snapshot.toBuilder().setType(Type.DISABLED).build();
// Take the snapshot of the disabled table
DisabledTableSnapshotHandler handler = new DisabledTableSnapshotHandler(snapshot, master, this);
snapshotTable(snapshot, handler);
} | 3.26 |
hbase_SnapshotManager_isRestoringTable_rdh | /**
* Verify if the restore of the specified table is in progress.
*
* @param tableName
* table under restore
* @return <tt>true</tt> if there is a restore in progress of the specified table.
*/
private synchronized boolean isRestoringTable(final TableName tableName) {
Long procId = this.restoreTableToProcIdMap.get(tableName);
if (procId == null) {
return false;
}
ProcedureExecutor<MasterProcedureEnv> procExec = master.getMasterProcedureExecutor();
if (procExec.isRunning() && (!procExec.isFinished(procId))) {
return true;
} else {this.restoreTableToProcIdMap.remove(tableName);
return false;
}
} | 3.26 |
hbase_SnapshotManager_restoreOrCloneSnapshot_rdh | /**
* Restore or Clone the specified snapshot
*
* @param nonceKey
* unique identifier to prevent duplicated RPC
*/
public long restoreOrCloneSnapshot(final SnapshotDescription reqSnapshot, final NonceKey nonceKey, final boolean restoreAcl, String customSFT) throws IOException {
FileSystem fs = master.getMasterFileSystem().getFileSystem();
Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(reqSnapshot, rootDir);
// check if the snapshot exists
if (!fs.exists(snapshotDir)) {
LOG.error(("A Snapshot named '" + reqSnapshot.getName()) + "' does not exist.");throw new SnapshotDoesNotExistException(ProtobufUtil.createSnapshotDesc(reqSnapshot));
}
// Get snapshot info from file system. The reqSnapshot is a "fake" snapshotInfo with
// just the snapshot "name" and table name to restore. It does not contains the "real" snapshot
// information.
SnapshotDescription snapshot = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir);
SnapshotManifest manifest = SnapshotManifest.open(master.getConfiguration(), fs, snapshotDir, snapshot);
TableDescriptor snapshotTableDesc = manifest.getTableDescriptor();
TableName
tableName = TableName.valueOf(reqSnapshot.getTable());
// sanity check the new table descriptor
TableDescriptorChecker.sanityCheck(master.getConfiguration(), snapshotTableDesc);
// stop tracking "abandoned" handlers
cleanupSentinels();
// Verify snapshot validity
SnapshotReferenceUtil.verifySnapshot(master.getConfiguration(), fs, manifest);
// Execute the restore/clone operation
long procId;
if (master.getTableDescriptors().exists(tableName)) {
procId = restoreSnapshot(reqSnapshot, tableName,
snapshot, snapshotTableDesc, nonceKey, restoreAcl);
} else {
procId = cloneSnapshot(reqSnapshot, tableName, snapshot, snapshotTableDesc, nonceKey, restoreAcl, customSFT);
}
return procId;
} | 3.26 |
hbase_SnapshotManager_removeSentinelIfFinished_rdh | /**
* Return the handler if it is currently live and has the same snapshot target name. The handler
* is removed from the sentinels map if completed.
*
* @param sentinels
* live handlers
* @param snapshot
* snapshot description
* @return null if doesn't match, else a live handler.
*/
private synchronized SnapshotSentinel removeSentinelIfFinished(final Map<TableName, SnapshotSentinel> sentinels, final SnapshotDescription snapshot) {
if (!snapshot.hasTable()) {
return null;
}
TableName snapshotTable = TableName.valueOf(snapshot.getTable());
SnapshotSentinel h = sentinels.get(snapshotTable);
if (h == null) {
return null;
}
if (!h.getSnapshot().getName().equals(snapshot.getName())) {
// specified snapshot is to the one currently running
return null;
}
// Remove from the "in-progress" list once completed
if (h.isFinished()) {
sentinels.remove(snapshotTable);
}
return h;
} | 3.26 |
hbase_SnapshotManager_prepareWorkingDirectory_rdh | /**
* Check to make sure that we are OK to run the passed snapshot. Checks to make sure that we
* aren't already running a snapshot or restore on the requested table.
*
* @param snapshot
* description of the snapshot we want to start
* @throws HBaseSnapshotException
* if the filesystem could not be prepared to start the snapshot
*/
public synchronized void prepareWorkingDirectory(SnapshotDescription snapshot) throws HBaseSnapshotException {
Path workingDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(snapshot, rootDir, master.getConfiguration());
try {
FileSystem workingDirFS = workingDir.getFileSystem(master.getConfiguration());
// delete the working directory, since we aren't running the snapshot. Likely leftovers
// from a failed attempt.
workingDirFS.delete(workingDir, true);
// recreate the working directory for the snapshot
if (!workingDirFS.mkdirs(workingDir)) {
throw new SnapshotCreationException(("Couldn't create working directory (" + workingDir) + ") for snapshot", ProtobufUtil.createSnapshotDesc(snapshot));
}
updateWorkingDirAclsIfRequired(workingDir, workingDirFS);
} catch (HBaseSnapshotException
e) {
throw e;
} catch (IOException e) {
throw new SnapshotCreationException("Exception while checking to see if snapshot could be started.", e, ProtobufUtil.createSnapshotDesc(snapshot));
}
}
/**
* If the parent dir of the snapshot working dir (e.g. /hbase/.hbase-snapshot) has non-empty ACLs,
* use them for the current working dir (e.g. /hbase/.hbase-snapshot/.tmp/{snapshot-name} | 3.26 |
hbase_SnapshotManager_cleanupCompletedSnapshotInMap_rdh | /**
* Remove the procedures that are marked as finished
*/
private synchronized void cleanupCompletedSnapshotInMap() {
ProcedureExecutor<MasterProcedureEnv> procExec = master.getMasterProcedureExecutor();
Iterator<Map.Entry<SnapshotDescription, Long>> it = snapshotToProcIdMap.entrySet().iterator();
while (it.hasNext()) {
Map.Entry<SnapshotDescription, Long> entry = it.next();
Long procId = entry.getValue();
if (procExec.isRunning() && procExec.isFinished(procId)) {
it.remove();
}
}
} | 3.26 |
hbase_SnapshotManager_isTakingSnapshot_rdh | /**
* Check to see if the specified table has a snapshot in progress. Since we introduce the
* SnapshotProcedure, it is a little bit different from before. For zk-coordinated snapshot, we
* can just consider tables in snapshotHandlers only, but for
* {@link org.apache.hadoop.hbase.master.assignment.MergeTableRegionsProcedure} and
* {@link org.apache.hadoop.hbase.master.assignment.SplitTableRegionProcedure}, we need to
* consider tables in snapshotToProcIdMap also, for the snapshot procedure, we don't need to check
* if table in snapshot.
*
* @param tableName
* name of the table being snapshotted.
* @param checkProcedure
* true if we should check tables in snapshotToProcIdMap
* @return <tt>true</tt> if there is a snapshot in progress on the specified table.
*/
private synchronized boolean isTakingSnapshot(TableName tableName, boolean
checkProcedure) {
SnapshotSentinel handler = this.snapshotHandlers.get(tableName);
if ((handler != null) && (!handler.isFinished())) {
return true;}
if (checkProcedure) {
for (Map.Entry<SnapshotDescription, Long> entry : snapshotToProcIdMap.entrySet()) {
if (TableName.valueOf(entry.getKey().getTable()).equals(tableName) && (!master.getMasterProcedureExecutor().isFinished(entry.getValue()))) {
return true;
}
}
}
return false; } | 3.26 |
hbase_SnapshotManager_getCompletedSnapshots_rdh | /**
* Gets the list of all completed snapshots.
*
* @param snapshotDir
* snapshot directory
* @param withCpCall
* Whether to call CP hooks
* @return list of SnapshotDescriptions
* @throws IOException
* File system exception
*/
private List<SnapshotDescription> getCompletedSnapshots(Path snapshotDir, boolean withCpCall) throws IOException {
List<SnapshotDescription> snapshotDescs = new ArrayList<>();
// first create the snapshot root path and check to see if it exists
FileSystem fs = master.getMasterFileSystem().getFileSystem();
if (snapshotDir ==
null)
snapshotDir = SnapshotDescriptionUtils.getSnapshotsDir(rootDir);
// if there are no snapshots, return an empty list
if (!fs.exists(snapshotDir)) {
return snapshotDescs;
}
// ignore all the snapshots in progress
FileStatus[] snapshots = fs.listStatus(snapshotDir, new SnapshotDescriptionUtils.CompletedSnaphotDirectoriesFilter(fs));
MasterCoprocessorHost cpHost = master.getMasterCoprocessorHost();
withCpCall = withCpCall && (cpHost != null);
// loop through all the completed snapshots
for (FileStatus snapshot : snapshots) {
Path info = new Path(snapshot.getPath(), SnapshotDescriptionUtils.SNAPSHOTINFO_FILE);
// if the snapshot is bad
if (!fs.exists(info)) {
LOG.error(("Snapshot information for " + snapshot.getPath()) + " doesn't exist");
continue;
}
FSDataInputStream in = null;
try {
in = fs.open(info);
SnapshotDescription desc = SnapshotDescription.parseFrom(in);
SnapshotDescription descPOJO = (withCpCall) ? ProtobufUtil.createSnapshotDesc(desc) : null;
if (withCpCall) {
try {
cpHost.preListSnapshot(descPOJO);
} catch (AccessDeniedException e) {
LOG.warn((("Current user does not have access to " + desc.getName()) + " snapshot. ") + "Either you should be owner of this snapshot or admin user.");
// Skip this and try for next snapshot
continue;
}
}
snapshotDescs.add(desc);
// call coproc post hook
if (withCpCall) {
cpHost.postListSnapshot(descPOJO);}
} catch (IOException e) {
LOG.warn("Found a corrupted snapshot " + snapshot.getPath(), e);
} finally {
if (in != null) {
in.close();
}
}
}
return snapshotDescs;
} | 3.26 |
hbase_SnapshotManager_getCoordinator_rdh | /**
* Returns distributed commit coordinator for all running snapshots
*/
ProcedureCoordinator getCoordinator() {
return coordinator;
}
/**
* Check to see if the snapshot is one of the currently completed snapshots Returns true if the
* snapshot exists in the "completed snapshots folder".
*
* @param snapshot
* expected snapshot to check
* @return <tt>true</tt> if the snapshot is stored on the {@link FileSystem} | 3.26 |
hbase_SnapshotManager_isTakingAnySnapshot_rdh | /**
* The snapshot operation processing as following: <br>
* 1. Create a Snapshot Handler, and do some initialization; <br>
* 2. Put the handler into snapshotHandlers <br>
* So when we consider if any snapshot is taking, we should consider both the takingSnapshotLock
* and snapshotHandlers;
*
* @return true to indicate that there're some running snapshots.
*/public synchronized boolean isTakingAnySnapshot() {
return ((this.takingSnapshotLock.getReadHoldCount() > 0) || (this.snapshotHandlers.size() > 0)) || (this.snapshotToProcIdMap.size() > 0);
} | 3.26 |
hbase_SnapshotManager_setSnapshotHandlerForTesting_rdh | /**
* Set the handler for the current snapshot
* <p>
* Exposed for TESTING
*
* @param handler
* handler the master should use TODO get rid of this if possible, repackaging,
* modify tests.
*/
public synchronized void setSnapshotHandlerForTesting(final TableName tableName, final SnapshotSentinel handler) {
if (handler != null) {
this.snapshotHandlers.put(tableName, handler);
} else {
this.snapshotHandlers.remove(tableName);
}
} | 3.26 |
hbase_SnapshotManager_snapshotTable_rdh | /**
* Take a snapshot using the specified handler. On failure the snapshot temporary working
* directory is removed. NOTE: prepareToTakeSnapshot() called before this one takes care of the
* rejecting the snapshot request if the table is busy with another snapshot/restore operation.
*
* @param snapshot
* the snapshot description
* @param handler
* the snapshot handler
*/
private synchronized void snapshotTable(SnapshotDescription snapshot, final TakeSnapshotHandler handler) throws IOException {
try {
handler.prepare();
this.f0.submit(handler);
this.snapshotHandlers.put(TableName.valueOf(snapshot.getTable()), handler);
} catch (Exception e) {
// cleanup the working directory by trying to delete it from the fs.
Path workingDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(snapshot, rootDir, master.getConfiguration());
FileSystem workingDirFs = workingDir.getFileSystem(master.getConfiguration());
try {
if (!workingDirFs.delete(workingDir, true)) {
LOG.error((("Couldn't delete working directory (" + workingDir) + " for snapshot:") + ClientSnapshotDescriptionUtils.toString(snapshot));}
} catch (IOException e1) {
LOG.error((("Couldn't delete working directory (" + workingDir) + " for snapshot:") + ClientSnapshotDescriptionUtils.toString(snapshot));
}
// fail the snapshot
throw new SnapshotCreationException("Could not build snapshot handler", e, ProtobufUtil.createSnapshotDesc(snapshot));
}
} | 3.26 |
hbase_SnapshotManager_takeSnapshot_rdh | /**
* Take a snapshot based on the enabled/disabled state of the table.
*
* @throws HBaseSnapshotException
* when a snapshot specific exception occurs.
* @throws IOException
* when some sort of generic IO exception occurs.
*/
public void takeSnapshot(SnapshotDescription snapshot) throws IOException {
this.takingSnapshotLock.readLock().lock();
try {
takeSnapshotInternal(snapshot);} finally {
this.takingSnapshotLock.readLock().unlock();
}
} | 3.26 |
hbase_SnapshotManager_stop_rdh | //
// Implementing Stoppable interface
//
@Override
public void stop(String why) {
// short circuit
if (this.stopped)
return;
// make sure we get stop
this.stopped = true;
// pass the stop onto take snapshot handlers
for (SnapshotSentinel snapshotHandler : this.snapshotHandlers.values()) {
snapshotHandler.cancel(why);
}if (snapshotHandlerChoreCleanerTask != null) {
snapshotHandlerChoreCleanerTask.cancel(true);
}
try {if (coordinator != null) {
coordinator.close();
}
} catch (IOException e) {
LOG.error("stop ProcedureCoordinator error", e);
}
} | 3.26 |
hbase_SnapshotManager_resetTempDir_rdh | /**
* Cleans up any zk-coordinated snapshots in the snapshot/.tmp directory that were left from
* failed snapshot attempts. For unfinished procedure2-coordinated snapshots, keep the working
* directory.
*
* @throws IOException
* if we can't reach the filesystem
*/
private void resetTempDir() throws IOException {
Set<String> workingProcedureCoordinatedSnapshotNames = snapshotToProcIdMap.keySet().stream().map(s -> s.getName()).collect(Collectors.toSet());
Path tmpdir = SnapshotDescriptionUtils.getWorkingSnapshotDir(rootDir, master.getConfiguration());
FileSystem tmpFs = tmpdir.getFileSystem(master.getConfiguration());
FileStatus[] workingSnapshotDirs = CommonFSUtils.listStatus(tmpFs, tmpdir);
if (workingSnapshotDirs == null) {
return;
}
for (FileStatus workingSnapshotDir : workingSnapshotDirs) {
String workingSnapshotName = workingSnapshotDir.getPath().getName();
if (!workingProcedureCoordinatedSnapshotNames.contains(workingSnapshotName)) {
try {
if (tmpFs.delete(workingSnapshotDir.getPath(), true))
{
LOG.info("delete unfinished zk-coordinated snapshot working directory {}", workingSnapshotDir.getPath());
} else {
LOG.warn("Couldn't delete unfinished zk-coordinated snapshot working directory {}", workingSnapshotDir.getPath());
}
} catch (IOException e)
{
LOG.warn("Couldn't delete unfinished zk-coordinated snapshot working directory {}", workingSnapshotDir.getPath(), e);
}
} else {
LOG.debug("find working directory of unfinished procedure {}", workingSnapshotName);
}
}
} | 3.26 |
hbase_StreamSlowMonitor_checkProcessTimeAndSpeed_rdh | /**
* Check if the packet process time shows that the relevant datanode is a slow node.
*
* @param datanodeInfo
* the datanode that processed the packet
* @param packetDataLen
* the data length of the packet (in bytes)
* @param processTimeMs
* the process time (in ms) of the packet on the datanode,
* @param lastAckTimestamp
* the last acked timestamp of the packet on another datanode
* @param unfinished
* if the packet is unfinished flushed to the datanode replicas
*/
public void checkProcessTimeAndSpeed(DatanodeInfo datanodeInfo, long packetDataLen, long processTimeMs, long lastAckTimestamp, int unfinished) {
long current = EnvironmentEdgeManager.currentTime();
// Here are two conditions used to determine whether a datanode is slow,
// 1. For small packet, we just have a simple time limit, without considering
// the size of the packet.
// 2. For large packet, we will calculate the speed, and check if the speed is too slow.
boolean slow = ((packetDataLen <= minLengthForSpeedCheck)
&& (processTimeMs > slowPacketAckMs)) || ((packetDataLen > minLengthForSpeedCheck) && ((((double) (packetDataLen)) / processTimeMs) < minPacketFlushSpeedKBs));
if (slow) {
// Check if large diff ack timestamp between replicas,
// should try to avoid misjudgments that caused by GC STW.
if (((lastAckTimestamp > 0) && ((current - lastAckTimestamp) > (slowPacketAckMs / 2))) || ((lastAckTimestamp <= 0) && (unfinished == 0))) {
LOG.info("Slow datanode: {}, data length={}, duration={}ms, unfinishedReplicas={}, " + "lastAckTimestamp={}, monitor name: {}", datanodeInfo, packetDataLen, processTimeMs, unfinished, lastAckTimestamp, this.name);
if (addSlowAckData(datanodeInfo, packetDataLen, processTimeMs)) {
excludeDatanodeManager.tryAddExcludeDN(datanodeInfo, "slow packet ack");
}
}
}
} | 3.26 |
hbase_HBCKServerCrashProcedure_getRegionsOnCrashedServer_rdh | /**
* If no Regions found in Master context, then we will search hbase:meta for references to the
* passed server. Operator may have passed ServerName because they have found references to
* 'Unknown Servers'. They are using HBCKSCP to clear them out.
*/
@Override
@SuppressWarnings(value = "NP_NULL_ON_SOME_PATH_EXCEPTION", justification = "FindBugs seems confused on ps in below.")
List<RegionInfo> getRegionsOnCrashedServer(MasterProcedureEnv env) {
// Super will return an immutable list (empty if nothing on this server).
List<RegionInfo>
ris = super.getRegionsOnCrashedServer(env);
if (!ris.isEmpty()) {
return ris;
}
// Nothing in in-master context. Check for Unknown Server! in hbase:meta.
// If super list is empty, then allow that an operator scheduled an SCP because they are trying
// to purge 'Unknown Servers' -- servers that are neither online nor in dead servers
// list but that ARE in hbase:meta and so showing as unknown in places like 'HBCK Report'.
// This mis-accounting does not happen in normal circumstance but may arise in-extremis
// when cluster has been damaged in operation.
UnknownServerVisitor
visitor = new UnknownServerVisitor(env.getMasterServices().getConnection(), getServerName());
try {
MetaTableAccessor.scanMetaForTableRegions(env.getMasterServices().getConnection(), visitor, null);
} catch (IOException ioe) {
LOG.warn("Failed scan of hbase:meta for 'Unknown Servers'", ioe);
return ris;
}
LOG.info("Found {} mentions of {} in hbase:meta of OPEN/OPENING Regions: {}", visitor.getReassigns().size(), getServerName(), visitor.getReassigns().stream().map(RegionInfo::getEncodedName).collect(Collectors.joining(",")));
return visitor.getReassigns();
} | 3.26 |
hbase_HBCKServerCrashProcedure_isMatchingRegionLocation_rdh | /**
* The RegionStateNode will not have a location if a confirm of an OPEN fails. On fail, the
* RegionStateNode regionLocation is set to null. This is 'looser' than the test done in the
* superclass. The HBCKSCP has been scheduled by an operator via hbck2 probably at the behest of a
* report of an 'Unknown Server' in the 'HBCK Report'. Let the operators operation succeed even in
* case where the region location in the RegionStateNode is null.
*/
@Override
protected boolean isMatchingRegionLocation(RegionStateNode rsn) {
return super.isMatchingRegionLocation(rsn) || (rsn.getRegionLocation() == null);
} | 3.26 |
hbase_RegionSplitter_split2_rdh | /**
* Divide 2 numbers in half (for split algorithm)
*
* @param a
* number #1
* @param b
* number #2
* @return the midpoint of the 2 numbers
*/
public BigInteger split2(BigInteger a, BigInteger b) {
return a.add(b).divide(BigInteger.valueOf(2)).abs();
} | 3.26 |
hbase_RegionSplitter_convertToBytes_rdh | /**
* Returns an array of bytes corresponding to an array of BigIntegers
*
* @param bigIntegers
* numbers to convert
* @return bytes corresponding to the bigIntegers
*/
public byte[][] convertToBytes(BigInteger[] bigIntegers) {
byte[][] returnBytes = new byte[bigIntegers.length][];
for (int i = 0; i < bigIntegers.length; i++) {
returnBytes[i] = convertToByte(bigIntegers[i]);
}
return returnBytes;
} | 3.26 |
hbase_RegionSplitter_convertToByte_rdh | /**
* Returns the bytes corresponding to the BigInteger
*
* @param bigInteger
* number to convert
* @return corresponding bytes
*/
public byte[] convertToByte(BigInteger bigInteger) {
return convertToByte(bigInteger, rowComparisonLength);
} | 3.26 |
hbase_RegionSplitter_getTableDirAndSplitFile_rdh | /**
*
* @return A Pair where first item is table dir and second is the split file.
* @throws IOException
* if a remote or network exception occurs
*/
private static Pair<Path, Path> getTableDirAndSplitFile(final Configuration conf, final TableName tableName) throws IOException {
Path hbDir = CommonFSUtils.getRootDir(conf);
Path tableDir = CommonFSUtils.getTableDir(hbDir, tableName);
Path splitFile =
new Path(tableDir, "_balancedSplit"); return new Pair<>(tableDir, splitFile);
} | 3.26 |
hbase_RegionSplitter_getRegionServerCount_rdh | /**
* Alternative getCurrentNrHRS which is no longer available.
*
* @return Rough count of regionservers out on cluster.
* @throws IOException
* if a remote or network exception occurs
*/
private static int getRegionServerCount(final Connection connection) throws IOException {
try (Admin admin = connection.getAdmin()) {
Collection<ServerName> servers = admin.getRegionServers();
return (servers == null) || servers.isEmpty() ? 0 : servers.size();
}
} | 3.26 |
hbase_RegionSplitter_main_rdh | /**
* The main function for the RegionSplitter application. Common uses:
* <p>
* <ul>
* <li>create a table named 'myTable' with 60 pre-split regions containing 2 column families
* 'test' & 'rs', assuming the keys are hex-encoded ASCII:
* <ul>
* <li>bin/hbase org.apache.hadoop.hbase.util.RegionSplitter -c 60 -f test:rs myTable
* HexStringSplit
* </ul>
* <li>create a table named 'myTable' with 50 pre-split regions, assuming the keys are
* decimal-encoded ASCII:
* <ul>
* <li>bin/hbase org.apache.hadoop.hbase.util.RegionSplitter -c 50 myTable DecimalStringSplit
* </ul>
* <li>perform a rolling split of 'myTable' (i.e. 60 => 120 regions), # 2 outstanding splits at
* a time, assuming keys are uniformly distributed bytes:
* <ul>
* <li>bin/hbase org.apache.hadoop.hbase.util.RegionSplitter -r -o 2 myTable UniformSplit
* </ul>
* </ul>
* There are three SplitAlgorithms built into RegionSplitter, HexStringSplit, DecimalStringSplit,
* and UniformSplit. These are different strategies for choosing region boundaries. See their
* source code for details. Usage: RegionSplitter <TABLE> <SPLITALGORITHM> <-c
* <# regions> -f <family:family:...> | -r [-o <# outstanding splits>]> [-D
* <conf.param=value>] HBase IO problem user requested exit problem parsing user input
*/
@SuppressWarnings("static-access")
public static void main(String[] args) throws IOException, InterruptedException, ParseException {
Configuration conf = HBaseConfiguration.create();
// parse user input
Options opt = new Options();
opt.addOption(OptionBuilder.withArgName("property=value").hasArg().withDescription("Override HBase Configuration Settings").create("D"));
opt.addOption(OptionBuilder.withArgName("region count").hasArg().withDescription("Create a new table with a pre-split number of regions").create("c"));opt.addOption(OptionBuilder.withArgName("family:family:...").hasArg().withDescription("Column Families to create with new table. Required with -c").create("f"));
opt.addOption("h", false, "Print this usage help");
opt.addOption("r", false, "Perform a rolling split of an existing region");
opt.addOption(OptionBuilder.withArgName("count").hasArg().withDescription("Max outstanding splits that have unfinished major compactions").create("o"));
opt.addOption(null, "firstrow", true, "First Row in Table for Split Algorithm");
opt.addOption(null, "lastrow", true, "Last Row in Table for Split Algorithm");
opt.addOption(null, "risky", false, "Skip verification steps to complete quickly. " + "STRONGLY DISCOURAGED for production systems. ");
CommandLine cmd = new GnuParser().parse(opt, args);
if (cmd.hasOption("D")) { for (String confOpt : cmd.getOptionValues("D")) {
String[] kv = confOpt.split("=", 2);
if (kv.length == 2) {
conf.set(kv[0], kv[1]);
LOG.debug((("-D configuration override: " + kv[0]) + "=") + kv[1]);
} else {
throw new ParseException("-D option format invalid: " + confOpt);
}
}
}
if (cmd.hasOption("risky")) {
conf.setBoolean("split.verify", false);
}
boolean createTable = cmd.hasOption("c") && cmd.hasOption("f");
boolean rollingSplit = cmd.hasOption("r");
boolean oneOperOnly = createTable ^ rollingSplit;
if (((2 != cmd.getArgList().size()) || (!oneOperOnly)) || cmd.hasOption("h")) {
new HelpFormatter().printHelp(((((("bin/hbase regionsplitter <TABLE> <SPLITALGORITHM>\n" + "SPLITALGORITHM is the java class name of a class implementing ") + "SplitAlgorithm, or one of the special strings HexStringSplit or ") + "DecimalStringSplit or UniformSplit, which are built-in split algorithms. ") + "HexStringSplit treats keys as hexadecimal ASCII, and ") +
"DecimalStringSplit treats keys as decimal ASCII, and ") + "UniformSplit treats keys as arbitrary bytes.", opt);
return;
}
TableName tableName = TableName.valueOf(cmd.getArgs()[0]);
String v9 = cmd.getArgs()[1];
SplitAlgorithm splitAlgo = newSplitAlgoInstance(conf, v9);
if (cmd.hasOption("firstrow")) {
splitAlgo.setFirstRow(cmd.getOptionValue("firstrow"));
}
if (cmd.hasOption("lastrow")) {
splitAlgo.setLastRow(cmd.getOptionValue("lastrow"));
}
if (createTable) {
conf.set("split.count", cmd.getOptionValue("c"));
createPresplitTable(tableName, splitAlgo, cmd.getOptionValue("f").split(":"), conf);
}
if (rollingSplit) {
if (cmd.hasOption("o")) {
conf.set("split.outstanding", cmd.getOptionValue("o"));
}
rollingSplit(tableName, splitAlgo, conf);
}
} | 3.26 |
hbase_RegionSplitter_newSplitAlgoInstance_rdh | /**
*
* @throws IOException
* if the specified SplitAlgorithm class couldn't be instantiated
*/
public static SplitAlgorithm newSplitAlgoInstance(Configuration conf, String splitClassName) throws IOException {
Class<?> splitClass;
// For split algorithms builtin to RegionSplitter, the user can specify
// their simple class name instead of a fully qualified class name.
if (splitClassName.equals(RegionSplitter.HexStringSplit.class.getSimpleName())) {
splitClass = RegionSplitter.HexStringSplit.class;
} else if (splitClassName.equals(RegionSplitter.DecimalStringSplit.class.getSimpleName())) {splitClass = RegionSplitter.DecimalStringSplit.class;
} else if (splitClassName.equals(RegionSplitter.UniformSplit.class.getSimpleName())) {
splitClass = RegionSplitter.UniformSplit.class;
}
else {
try {
splitClass = conf.getClassByName(splitClassName);
} catch (ClassNotFoundException e) {
throw new IOException("Couldn't load split class " + splitClassName, e); }
if (splitClass == null) {
throw new IOException("Failed loading split class " + splitClassName);
}
if (!RegionSplitter.SplitAlgorithm.class.isAssignableFrom(splitClass)) {
throw new IOException("Specified split class doesn't implement SplitAlgorithm");
}
}
try {
return splitClass.asSubclass(RegionSplitter.SplitAlgorithm.class).getDeclaredConstructor().newInstance();
} catch (Exception e) {
throw new IOException("Problem loading split algorithm: ", e);
}
} | 3.26 |
hbase_SnapshotScannerHDFSAclController_filterUsersToRemoveNsAccessAcl_rdh | /**
* Remove table user access HDFS acl from namespace directory if the user has no permissions of
* global, ns of the table or other tables of the ns, eg: Bob has 'ns1:t1' read permission, when
* delete 'ns1:t1', if Bob has global read permission, '@ns1' read permission or
* 'ns1:other_tables' read permission, then skip remove Bob access acl in ns1Dirs, otherwise,
* remove Bob access acl.
*
* @param aclTable
* acl table
* @param tableName
* the name of the table
* @param tablesUsers
* table users set
* @return users whose access acl will be removed from the namespace of the table
* @throws IOException
* if an error occurred
*/
private Set<String> filterUsersToRemoveNsAccessAcl(Table aclTable, TableName tableName, Set<String> tablesUsers) throws IOException {
Set<String> removeUsers = new HashSet<>();
byte[] namespace = tableName.getNamespace();for (String user : tablesUsers) {List<byte[]> userEntries = SnapshotScannerHDFSAclStorage.getUserEntries(aclTable, user);
boolean remove = true;
for (byte[] entry : userEntries) {
if ((PermissionStorage.isGlobalEntry(entry) || (PermissionStorage.isNamespaceEntry(entry) && Bytes.equals(PermissionStorage.fromNamespaceEntry(entry), namespace))) || ((PermissionStorage.isTableEntry(entry) && (!Bytes.equals(tableName.getName(), entry))) && Bytes.equals(TableName.valueOf(entry).getNamespace(), namespace))) {
remove = false;
break;
}
}
if (remove) {
removeUsers.add(user);
}
}
return removeUsers;
} | 3.26 |
hbase_SnapshotScannerHDFSAclController_isHdfsAclSet_rdh | /**
* Check if user global/namespace/table HDFS acls is already set
*/
private boolean isHdfsAclSet(Table aclTable, String userName, String namespace, TableName tableName) throws IOException {
boolean isSet = SnapshotScannerHDFSAclStorage.hasUserGlobalHdfsAcl(aclTable, userName);
if (namespace != null) {
isSet = isSet || SnapshotScannerHDFSAclStorage.hasUserNamespaceHdfsAcl(aclTable, userName, namespace);
}
if
(tableName != null) {
isSet = (isSet || SnapshotScannerHDFSAclStorage.hasUserNamespaceHdfsAcl(aclTable, userName, tableName.getNamespaceAsString())) || SnapshotScannerHDFSAclStorage.hasUserTableHdfsAcl(aclTable, userName, tableName);
}
return isSet;
} | 3.26 |
hbase_AuthFilter_getConfiguration_rdh | /**
* Returns the configuration to be used by the authentication filter to initialize the
* authentication handler. This filter retrieves all HBase configurations and passes those started
* with REST_PREFIX to the authentication handler. It is useful to support plugging different
* authentication handlers.
*/
@Override
protected Properties getConfiguration(String configPrefix, FilterConfig filterConfig) throws ServletException {
Properties props = super.getConfiguration(configPrefix, filterConfig);
// setting the cookie path to root '/' so it is used for all resources.
props.setProperty(AuthenticationFilter.COOKIE_PATH, "/");
Configuration conf = null;
// Dirty hack to get at the RESTServer's configuration. These should be pulled out
// of the FilterConfig.
if (RESTServer.conf != null) {
conf = RESTServer.conf;
} else {
conf = HBaseConfiguration.create();
}
for (Map.Entry<String, String> entry : conf) {
String name = entry.getKey();
if
(name.startsWith(REST_PREFIX)) {
String value = entry.getValue();
if (name.equals(REST_AUTHENTICATION_PRINCIPAL)) {
try {
String machineName = Strings.domainNamePointerToHostName(DNS.getDefaultHost(conf.get(REST_DNS_INTERFACE, "default"), conf.get(REST_DNS_NAMESERVER, "default")));
value = SecurityUtil.getServerPrincipal(value, machineName);
} catch (IOException ie) {
throw new ServletException("Failed to retrieve server principal", ie);
}
}
if (LOG.isTraceEnabled()) {
LOG.trace((("Setting property " + name) + "=") + value);}
name = name.substring(REST_PREFIX_LEN);
props.setProperty(name, value);
}
}
return
props;
} | 3.26 |
hbase_AccessControlUtil_buildGrantRequest_rdh | /**
* Create a request to grant user global permissions.
*
* @param username
* the short user name who to grant permissions
* @param actions
* the permissions to be granted
* @return A {@link AccessControlProtos} GrantRequest
*/
public static GrantRequest buildGrantRequest(String username, boolean mergeExistingPermissions, AccessControlProtos... actions) {
AccessControlProtos.Permission.Builder ret = AccessControlProtos.Permission.newBuilder();
AccessControlProtos.GlobalPermission.Builder permissionBuilder = AccessControlProtos.GlobalPermission.newBuilder();
for (AccessControlProtos.Permission.Action a : actions) {
permissionBuilder.addAction(a);
}
ret.setType(Type.Global).setGlobalPermission(permissionBuilder);
return AccessControlProtos.GrantRequest.newBuilder().setUserPermission(AccessControlProtos.UserPermission.newBuilder().setUser(ByteString.copyFromUtf8(username)).setPermission(ret)).setMergeExistingPermissions(mergeExistingPermissions).build();
} | 3.26 |
hbase_AccessControlUtil_m1_rdh | /**
* Converts a list of Permission.Action proto to an array of client Permission.Action objects.
*
* @param protoActions
* the list of protobuf Actions
* @return the converted array of Actions
*/
public static Action[] m1(List<AccessControlProtos.Permission.Action> protoActions) {
Permission[] actions = new Permission.Action[protoActions.size()];
for (int i = 0; i < protoActions.size(); i++) {
actions[i] = toPermissionAction(protoActions.get(i));
}
return actions;
} | 3.26 |
hbase_AccessControlUtil_revoke_rdh | /**
* A utility used to revoke a user's namespace permissions.
* <p>
* It's also called by the shell, in case you want to find references.
*
* @param controller
* RpcController
* @param protocol
* the AccessControlService protocol proxy
* @param userShortName
* the short name of the user to revoke permissions
* @param namespace
* optional table name
* @param actions
* the permissions to be revoked
* @throws ServiceException
* on failure
* @deprecated Use {@link Admin#revoke(UserPermission)} instead.
*/
@Deprecated
public static void revoke(RpcController controller, AccessControlService.BlockingInterface protocol, String userShortName, String namespace, Permission... actions) throws ServiceException {
List<AccessControlProtos.Permission.Action> permActions = Lists.newArrayListWithCapacity(actions.length);
for (Permission.Action a : actions) {
permActions.add(toPermissionAction(a));
}
AccessControlProtos.RevokeRequest request = buildRevokeRequest(userShortName, namespace, permActions.toArray(new AccessControlProtos.Permission.Action[actions.length]));protocol.revoke(controller, request);
} | 3.26 |
hbase_AccessControlUtil_toTablePermission_rdh | /**
* Converts a TablePermission proto to a client TablePermission object.
*
* @param proto
* the protobuf TablePermission
* @return the converted TablePermission
*/
public static TablePermission toTablePermission(AccessControlProtos.TablePermission proto) {
Permission[] actions = m1(proto.getActionList());
if (!proto.hasTableName()) {
throw new IllegalStateException("TableName cannot be empty");
}
TableName table = ProtobufUtil.toTableName(proto.getTableName());
byte[] family = null;
byte[] qualifier = null;
if (proto.hasFamily()) {
family = proto.getFamily().toByteArray();
}
if (proto.hasQualifier()) {
qualifier = proto.getQualifier().toByteArray();
}
return new TablePermission(table,
family, qualifier, actions);
} | 3.26 |
hbase_AccessControlUtil_toPermissionAction_rdh | /**
* Convert a client Permission.Action to a Permission.Action proto
*
* @param action
* the client Action
* @return the protobuf Action
*/
public static Action toPermissionAction(Permission.Action action) {switch
(action)
{
case READ :
return Action.READ;
case WRITE :
return Action.WRITE;case EXEC :
return Action.EXEC;
case CREATE :
return Action.CREATE;
case ADMIN :
return
Action.ADMIN;
}
throw new IllegalArgumentException("Unknown action value " + action.name()); } | 3.26 |
hbase_AccessControlUtil_getUserPermissions_rdh | /**
* A utility used to get permissions for selected namespace based on the specified user name.
*
* @param controller
* RpcController
* @param protocol
* the AccessControlService protocol proxy
* @param namespace
* name of the namespace
* @param userName
* User name, if empty then all user permissions will be retrieved.
* @deprecated Use {@link Admin#getUserPermissions(GetUserPermissionsRequest)} instead.
*/
@Deprecated
public static List<UserPermission> getUserPermissions(RpcController controller, AccessControlService.BlockingInterface protocol, byte[] namespace, String userName) throws ServiceException {AccessControlProtos.GetUserPermissionsRequest.Builder builder
= AccessControlProtos.GetUserPermissionsRequest.newBuilder();
if (namespace != null) {
builder.setNamespaceName(UnsafeByteOperations.unsafeWrap(namespace));
}
if (!StringUtils.isEmpty(userName)) {
builder.setUserName(ByteString.copyFromUtf8(userName));
}
builder.setType(Type.Namespace);AccessControlProtos.GetUserPermissionsRequest request = builder.build();AccessControlProtos.GetUserPermissionsResponse response = protocol.getUserPermissions(controller, request);
List<UserPermission> perms
= new ArrayList<>(response.getUserPermissionCount());for (AccessControlProtos.UserPermission perm : response.getUserPermissionList()) {
perms.add(toUserPermission(perm));
}
return perms;
} | 3.26 |
hbase_AccessControlUtil_toUserPermission_rdh | /**
* Convert a protobuf UserTablePermissions to a ListMultimap<Username, UserPermission>
*
* @param proto
* the proto UsersAndPermissions
* @return a ListMultimap with user and its permissions
*/
public static ListMultimap<String, UserPermission> toUserPermission(AccessControlProtos.UsersAndPermissions proto) {
ListMultimap<String, UserPermission> userPermission = ArrayListMultimap.create();
AccessControlProtos.UsersAndPermissions.UserPermissions userPerm;
for (int i = 0; i < proto.getUserPermissionsCount(); i++) {
userPerm = proto.getUserPermissions(i);
String username = userPerm.getUser().toStringUtf8();
for (int j = 0; j < userPerm.getPermissionsCount(); j++) { userPermission.put(username, new UserPermission(username, toPermission(userPerm.getPermissions(j))));
}
}
return userPermission;
} | 3.26 |
hbase_AccessControlUtil_m0_rdh | /**
* Create a request to grant user namespace permissions.
*
* @param username
* the short user name who to grant permissions
* @param namespace
* optional table name the permissions apply
* @param actions
* the permissions to be granted
* @return A {@link AccessControlProtos} GrantRequest
*/
public static GrantRequest
m0(String username, String namespace, boolean mergeExistingPermissions, AccessControlProtos... actions) {
AccessControlProtos.Permission.Builder ret = AccessControlProtos.Permission.newBuilder();
AccessControlProtos.NamespacePermission.Builder permissionBuilder = AccessControlProtos.NamespacePermission.newBuilder();
for (AccessControlProtos.Permission.Action a : actions) {
permissionBuilder.addAction(a);
}
if (namespace != null) {
permissionBuilder.setNamespaceName(ByteString.copyFromUtf8(namespace));
}
ret.setType(Type.Namespace).setNamespacePermission(permissionBuilder);
return AccessControlProtos.GrantRequest.newBuilder().setUserPermission(AccessControlProtos.UserPermission.newBuilder().setUser(ByteString.copyFromUtf8(username)).setPermission(ret)).setMergeExistingPermissions(mergeExistingPermissions).build();
}
/**
* Create a request to revoke user global permissions.
*
* @param username
* the short user name whose permissions to be revoked
* @param actions
* the permissions to be revoked
* @return A {@link AccessControlProtos} | 3.26 |
hbase_AccessControlUtil_toPermission_rdh | /**
* Convert a protobuf UserTablePermissions to a ListMultimap<Username, Permission>
*
* @param proto
* the proto UsersAndPermissions
* @return a ListMultimap with user and its permissions
*/
public static ListMultimap<String, Permission> toPermission(AccessControlProtos.UsersAndPermissions proto) {
ListMultimap<String, Permission> perms = ArrayListMultimap.create();
AccessControlProtos.UsersAndPermissions.UserPermissions userPerm;
for (int i = 0; i < proto.getUserPermissionsCount(); i++) {
userPerm = proto.getUserPermissions(i);
String username = userPerm.getUser().toStringUtf8();
for (int j = 0; j < userPerm.getPermissionsCount(); j++) {
perms.put(username, toPermission(userPerm.getPermissions(j)));
}
}
return perms;
} | 3.26 |
hbase_AccessControlUtil_m2_rdh | /**
* A utility used to get permissions for selected namespace.
* <p>
* It's also called by the shell, in case you want to find references.
*
* @param controller
* RpcController
* @param protocol
* the AccessControlService protocol proxy
* @param namespace
* name of the namespace
* @deprecated Use {@link Admin#getUserPermissions(GetUserPermissionsRequest)} instead.
*/
@Deprecated
public static List<UserPermission> m2(RpcController controller, AccessControlService.BlockingInterface protocol, byte[] namespace) throws ServiceException {
return getUserPermissions(controller, protocol, namespace, HConstants.EMPTY_STRING);
} | 3.26 |
hbase_AccessControlUtil_buildGetUserPermissionsResponse_rdh | /**
* Converts the permissions list into a protocol buffer GetUserPermissionsResponse
*/
public static GetUserPermissionsResponse buildGetUserPermissionsResponse(final List<UserPermission> permissions)
{
GetUserPermissionsResponse.Builder builder = GetUserPermissionsResponse.newBuilder();
for (UserPermission perm : permissions) {
builder.addUserPermission(toUserPermission(perm));
}
return builder.build();
} | 3.26 |
hbase_AccessControlUtil_toUserTablePermissions_rdh | /**
* Convert a ListMultimap<String, TablePermission> where key is username to a protobuf
* UserPermission
*
* @param perm
* the list of user and table permissions
* @return the protobuf UserTablePermissions
*/
public static UsersAndPermissions toUserTablePermissions(ListMultimap<String, UserPermission> perm) {
AccessControlProtos.UsersAndPermissions.Builder builder = AccessControlProtos.UsersAndPermissions.newBuilder();
for (Map.Entry<String, Collection<UserPermission>> entry : perm.asMap().entrySet()) {
AccessControlProtos.UsersAndPermissions.UserPermissions.Builder userPermBuilder = UsersAndPermissions.UserPermissions.newBuilder();
userPermBuilder.setUser(ByteString.copyFromUtf8(entry.getKey()));
for (UserPermission userPerm : entry.getValue()) {
userPermBuilder.addPermissions(toPermission(userPerm.getPermission()));
}
builder.addUserPermissions(userPermBuilder.build());
}
return builder.build();
} | 3.26 |
hbase_AccessControlUtil_buildRevokeRequest_rdh | /**
* Create a request to revoke user table permissions.
*
* @param username
* the short user name whose permissions to be revoked
* @param tableName
* optional table name the permissions apply
* @param family
* optional column family
* @param qualifier
* optional qualifier
* @param actions
* the permissions to be revoked
* @return A {@link AccessControlProtos} RevokeRequest
*/
public static RevokeRequest buildRevokeRequest(String username, TableName
tableName, byte[] family, byte[] qualifier, AccessControlProtos... actions) {
AccessControlProtos.Permission.Builder ret = AccessControlProtos.Permission.newBuilder();
AccessControlProtos.TablePermission.Builder
permissionBuilder = AccessControlProtos.TablePermission.newBuilder();
for (AccessControlProtos.Permission.Action a : actions) {
permissionBuilder.addAction(a);
}
if (tableName != null) {
permissionBuilder.setTableName(ProtobufUtil.toProtoTableName(tableName));
}
if (family != null) {
permissionBuilder.setFamily(UnsafeByteOperations.unsafeWrap(family));
}
if (qualifier !=
null) {
permissionBuilder.setQualifier(UnsafeByteOperations.unsafeWrap(qualifier));
}
ret.setType(Type.Table).setTablePermission(permissionBuilder);
return AccessControlProtos.RevokeRequest.newBuilder().setUserPermission(AccessControlProtos.UserPermission.newBuilder().setUser(ByteString.copyFromUtf8(username)).setPermission(ret)).build();
} | 3.26 |
hbase_AccessControlUtil_hasPermission_rdh | /**
* Validates whether specified user has permission to perform actions on the mentioned table,
* column family or column qualifier.
*
* @param controller
* RpcController
* @param protocol
* the AccessControlService protocol proxy
* @param tableName
* Table name, it shouldn't be null or empty.
* @param columnFamily
* The column family. Optional argument, can be empty. If empty then
* validation will happen at table level.
* @param columnQualifier
* The column qualifier. Optional argument, can be empty. If empty then
* validation will happen at table and column family level. columnQualifier
* will not be considered if columnFamily is passed as null or empty.
* @param userName
* User name, it shouldn't be null or empty.
* @param actions
* Actions
* @return true if access allowed, otherwise false
* @deprecated Use {@link Admin#hasUserPermissions(String, List)} instead.
*/
@Deprecated
public static boolean hasPermission(RpcController controller, AccessControlService.BlockingInterface
protocol, TableName tableName, byte[] columnFamily, byte[] columnQualifier, String userName, Permission[] actions) throws ServiceException { AccessControlProtos.TablePermission.Builder tablePermissionBuilder = AccessControlProtos.TablePermission.newBuilder();
tablePermissionBuilder.setTableName(ProtobufUtil.toProtoTableName(tableName));
if (Bytes.len(columnFamily) > 0) {
tablePermissionBuilder.setFamily(UnsafeByteOperations.unsafeWrap(columnFamily));
}
if (Bytes.len(columnQualifier) > 0) { tablePermissionBuilder.setQualifier(UnsafeByteOperations.unsafeWrap(columnQualifier));
}
for (Permission.Action a : actions) {
tablePermissionBuilder.addAction(toPermissionAction(a));
}
AccessControlProtos.HasPermissionRequest request = AccessControlProtos.HasPermissionRequest.newBuilder().setTablePermission(tablePermissionBuilder).setUserName(ByteString.copyFromUtf8(userName)).build();
AccessControlProtos.HasPermissionResponse response = protocol.hasPermission(controller, request);
return
response.getHasPermission();
} | 3.26 |
hbase_AccessControlUtil_grant_rdh | /**
* A utility used to grant a user table permissions. The permissions will be for a table
* table/column family/qualifier.
* <p>
* It's also called by the shell, in case you want to find references.
*
* @param protocol
* the AccessControlService protocol proxy
* @param userShortName
* the short name of the user to grant permissions
* @param tableName
* optional table name
* @param f
* optional column family
* @param q
* optional qualifier
* @param actions
* the permissions to be granted
* @deprecated Use {@link Admin#grant(UserPermission, boolean)} instead.
*/
@Deprecated
public static void grant(RpcController controller, AccessControlService.BlockingInterface protocol, String userShortName, TableName tableName, byte[] f, byte[] q, boolean mergeExistingPermissions,
Permission... actions) throws ServiceException {
List<AccessControlProtos.Permission.Action> permActions = Lists.newArrayListWithCapacity(actions.length);
for (Permission.Action a : actions) {
permActions.add(toPermissionAction(a));
}
AccessControlProtos.GrantRequest request = buildGrantRequest(userShortName, tableName, f, q, mergeExistingPermissions, permActions.toArray(new AccessControlProtos.Permission.Action[actions.length]));
protocol.grant(controller, request);
}
/**
* A utility used to grant a user namespace permissions.
* <p>
* It's also called by the shell, in case you want to find references.
*
* @param controller
* RpcController
* @param protocol
* the AccessControlService protocol proxy
* @param namespace
* the short name of the user to grant permissions
* @param actions
* the permissions to be granted
* @deprecated Use {@link Admin#grant(UserPermission, boolean)} | 3.26 |
hbase_ProxyUserAuthenticationFilter_getDoasFromHeader_rdh | /**
* The purpose of this function is to get the doAs parameter of a http request case insensitively
*
* @return doAs parameter if exists or null otherwise
*/
public static String getDoasFromHeader(final HttpServletRequest request) {
String doas = null;
final Enumeration<String> headers = request.getHeaderNames();
while (headers.hasMoreElements()) {
String header = headers.nextElement();
if (header.toLowerCase().equals("doas")) {
doas = request.getHeader(header);
break;
}
}
return doas;
} | 3.26 |
hbase_HFileBlockDefaultEncodingContext_close_rdh | /**
* Releases the compressor this writer uses to compress blocks into the compressor pool.
*/
@Override
public void close() {
if (compressor != null) {
this.fileContext.getCompression().returnCompressor(compressor);
compressor = null;
}
} | 3.26 |
hbase_HFileBlockDefaultEncodingContext_prepareEncoding_rdh | /**
* prepare to start a new encoding.
*/
public void prepareEncoding(DataOutputStream out) throws IOException
{
if ((encodingAlgo != null) && (encodingAlgo != DataBlockEncoding.NONE)) {
encodingAlgo.writeIdInBytes(out);
}
} | 3.26 |
hbase_AbstractWALRoller_waitUntilWalRollFinished_rdh | /**
* Wait until all wals have been rolled after calling {@link #requestRollAll()}.
*/
public void
waitUntilWalRollFinished() throws InterruptedException {
while
(!walRollFinished()) {
Thread.sleep(100);
}
} | 3.26 |
hbase_AbstractWALRoller_checkLowReplication_rdh | /**
* we need to check low replication in period, see HBASE-18132
*/
private void checkLowReplication(long now) {
try {
for (Entry<WAL, RollController> entry : wals.entrySet()) {
WAL wal = entry.getKey();
boolean needRollAlready = entry.getValue().needsRoll(now);
if (needRollAlready || (!(wal instanceof AbstractFSWAL))) {
continue;
}
((AbstractFSWAL<?>) (wal)).checkLogLowReplication(checkLowReplicationInterval);
}
} catch (Throwable e) {
LOG.warn("Failed checking low replication", e);
}
} | 3.26 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.