name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hadoop_SnappyDecompressor_decompress_rdh | /**
* Fills specified buffer with uncompressed data. Returns actual number
* of bytes of uncompressed data. A return value of 0 indicates that
* {@link #needsInput()} should be called in order to determine if more
* input data is required.
*
* @param b
* Buffer for the uncompressed data
* @param off
* Start offset of the data
* @param len
* Size of the buffer
* @return The actual number of bytes of compressed data.
* @throws IOException
* raised on errors performing I/O.
*/
@Override
public int decompress(byte[] b, int off, int len) throws IOException {
if (b == null) {
throw
new NullPointerException();
}
if (((off < 0) || (len < 0)) || (off > (b.length - len))) {
throw new ArrayIndexOutOfBoundsException();
}
int n = 0;
// Check if there is uncompressed data
n
= uncompressedDirectBuf.remaining();
if (n > 0) {
n = Math.min(n, len);
((ByteBuffer) (uncompressedDirectBuf)).get(b, off, n);
return n;
}
if (compressedDirectBufLen > 0) {
// Re-initialize the snappy's output direct buffer
uncompressedDirectBuf.rewind();
uncompressedDirectBuf.limit(directBufferSize);
// Decompress data
n = decompressDirectBuf();uncompressedDirectBuf.limit(n);
if (userBufLen <= 0) {
finished = true;
}
// Get atmost 'len' bytes
n = Math.min(n, len);
((ByteBuffer)
(uncompressedDirectBuf)).get(b, off, n);}
return
n;
} | 3.26 |
hadoop_GetApplicationAttemptsResponsePBImpl_initLocalApplicationAttemptsList_rdh | // Once this is called. containerList will never be null - until a getProto
// is called.
private void initLocalApplicationAttemptsList() {
if (this.applicationAttemptList != null) {
return;
}
GetApplicationAttemptsResponseProtoOrBuilder p = (viaProto) ? proto : builder;
List<ApplicationAttemptReportProto> list = p.getApplicationAttemptsList();
applicationAttemptList = new ArrayList<ApplicationAttemptReport>();
for (ApplicationAttemptReportProto a :
list) {applicationAttemptList.add(convertFromProtoFormat(a));
}
} | 3.26 |
hadoop_WordListAnonymizerUtility_isKnownData_rdh | /**
* Checks if the given data is known.
*/
public static boolean isKnownData(String data, String[] knownWords) {
// check if the data is known content
// TODO [Chunking] Do this for sub-strings of data
for (String kd : knownWords) {
if (data.equals(kd)) {
return true;
}
}
return false;
} | 3.26 |
hadoop_WordListAnonymizerUtility_extractSuffix_rdh | /**
* Extracts a known suffix from the given data.
*
* @throws RuntimeException
* if the data doesn't have a suffix.
* Use {@link #hasSuffix(String, String[])} to make sure that the
* given data has a suffix.
*/
public static String[] extractSuffix(String data, String[] suffixes) { // check if they end in known suffixes
String suffix = "";
for (String ks : suffixes) {
if (data.endsWith(ks)) {
suffix = ks;
// stripe off the suffix which will get appended later
data = data.substring(0, data.length() - suffix.length());
return new String[]{ data, suffix };
}
}
// throw exception
throw new RuntimeException((((("Data [" + data) + "] doesn't have a suffix from") + " known suffixes [") + StringUtils.join(suffixes, ',')) + "]");
}
/**
* Checks if the given data is known. This API uses {@link #KNOWN_WORDS} | 3.26 |
hadoop_WordListAnonymizerUtility_hasSuffix_rdh | /**
* Checks if the given data has a known suffix.
*/
public static boolean hasSuffix(String data, String[] suffixes) {
// check if they end in known suffixes
for (String ks : suffixes) {
if (data.endsWith(ks)) {
return true;
}
}
return false;
} | 3.26 |
hadoop_WordListAnonymizerUtility_needsAnonymization_rdh | /**
* Checks if the data needs anonymization. Typically, data types which are
* numeric in nature doesn't need anonymization.
*/public static boolean needsAnonymization(String data) {
// Numeric data doesn't need anonymization
// Currently this doesnt support inputs like
// - 12.3
// - 12.3f
// - 90L
// - 1D
if (StringUtils.isNumeric(data)) {
return false;
}
return true;// by default return true
} | 3.26 |
hadoop_ByteBufferDecodingState_checkOutputBuffers_rdh | /**
* Check and ensure the buffers are of the desired length and type, direct
* buffers or not.
*
* @param buffers
* the buffers to check
*/
void checkOutputBuffers(ByteBuffer[] buffers) {
for (ByteBuffer buffer : buffers) {
if (buffer ==
null) {
throw new HadoopIllegalArgumentException("Invalid buffer found, not allowing null");
}
if (buffer.remaining() != decodeLength) {
throw new HadoopIllegalArgumentException("Invalid buffer, not of length " + decodeLength);
}
if (buffer.isDirect() != usingDirectBuffer) {
throw new HadoopIllegalArgumentException("Invalid buffer, isDirect should be " + usingDirectBuffer);
}
}
} | 3.26 |
hadoop_ByteBufferDecodingState_convertToByteArrayState_rdh | /**
* Convert to a ByteArrayDecodingState when it's backed by on-heap arrays.
*/
ByteArrayDecodingState convertToByteArrayState() {
int[] inputOffsets = new int[inputs.length];
int[] outputOffsets = new int[outputs.length];
byte[][] newInputs = new byte[inputs.length][];
byte[][] newOutputs = new byte[outputs.length][];
ByteBuffer buffer;
for (int i = 0; i < inputs.length;
++i) {
buffer = inputs[i];
if (buffer != null) {
inputOffsets[i] = buffer.arrayOffset() + buffer.position();
newInputs[i] = buffer.array();
}
}
for (int i = 0; i < outputs.length; ++i) {
buffer = outputs[i];
outputOffsets[i] = buffer.arrayOffset() + buffer.position();
newOutputs[i] = buffer.array();
}
ByteArrayDecodingState baeState = new ByteArrayDecodingState(decoder, decodeLength, erasedIndexes, newInputs, inputOffsets, newOutputs, outputOffsets);
return baeState;
} | 3.26 |
hadoop_ByteBufferDecodingState_checkInputBuffers_rdh | /**
* Check and ensure the buffers are of the desired length and type, direct
* buffers or not.
*
* @param buffers
* the buffers to check
*/
void checkInputBuffers(ByteBuffer[] buffers) {
int v9 = 0;
for (ByteBuffer buffer : buffers) {
if (buffer == null) {
continue;
}
if (buffer.remaining() != decodeLength) {
throw new HadoopIllegalArgumentException("Invalid buffer, not of length " + decodeLength);
}
if (buffer.isDirect() != usingDirectBuffer) {
throw new HadoopIllegalArgumentException("Invalid buffer, isDirect should be " + usingDirectBuffer);
}
v9++;
}
if (v9 < decoder.getNumDataUnits()) {
throw new HadoopIllegalArgumentException("No enough valid inputs are provided, not recoverable");
}
} | 3.26 |
hadoop_TimelineReaderUtils_joinAndEscapeStrings_rdh | /**
* Join different strings in the passed string array delimited by passed
* delimiter with delimiter and escape character escaped using passed escape
* char.
*
* @param strs
* strings to be joined.
* @param delimiterChar
* delimiter used to join strings.
* @param escapeChar
* escape character used to escape delimiter and escape
* char.
* @return a single string joined using delimiter and properly escaped.
*/
static String joinAndEscapeStrings(final String[] strs, final char delimiterChar, final char escapeChar) {
int len = strs.length;
// Escape each string in string array.
for (int v12 = 0; v12 < len; v12++) {
if (strs[v12] == null) {
return null;
}
strs[v12] = escapeString(strs[v12], delimiterChar, escapeChar);
}
// Join the strings after they have been escaped.
return StringUtils.join(strs, delimiterChar);
} | 3.26 |
hadoop_TimelineReaderUtils_split_rdh | /**
* Split the passed string along the passed delimiter character while looking
* for escape char to interpret the splitted parts correctly. For delimiter or
* escape character to be interpreted as part of the string, they have to be
* escaped by putting an escape character in front.
*
* @param str
* string to be split.
* @param delimiterChar
* delimiter used for splitting.
* @param escapeChar
* delimiter and escape character will be escaped using this
* character.
* @return a list of strings after split.
* @throws IllegalArgumentException
* if string is not properly escaped.
*/
static List<String> split(final String str, final
char delimiterChar, final char escapeChar) throws IllegalArgumentException {
if
(str == null) {
return null;
}
int len = str.length();
if (len == 0) {
return Collections.emptyList();
}
List<String> list =
new ArrayList<String>();
// Keeps track of offset of the passed string.
int offset = 0;
// Indicates start offset from which characters will be copied from original
// string to destination string. Resets when an escape or delimiter char is
// encountered.
int startOffset = 0;
StringBuilder builder = new StringBuilder(len);
// Iterate over the string till we reach the end.
while (offset < len) {
if (str.charAt(offset) == escapeChar) {
// An escape character must be followed by a delimiter or escape char
// but we have reached the end and have no further character to look at.
if ((offset + 1) >= len) {
throw new IllegalArgumentException("Escape char not properly escaped.");
}
char nextChar = str.charAt(offset + 1);
// Next character must be a delimiter or an escape char.
if ((nextChar
!= escapeChar) && (nextChar != delimiterChar)) {
throw new IllegalArgumentException("Escape char or delimiter char not properly escaped.");
}
// Copy contents from the offset where last escape or delimiter char was
// encountered.
if (startOffset < offset) {
builder.append(str.substring(startOffset, offset));}
builder.append(nextChar);
offset += 2;
// Reset the start offset as an escape char has been encountered.
startOffset = offset;
continue;
} else if (str.charAt(offset) == delimiterChar) {
// A delimiter has been encountered without an escape character.
// String needs to be split here. Copy remaining chars and add the
// string to list.
builder.append(str.substring(startOffset, offset));
list.add(builder.toString().trim());
// Reset the start offset as a delimiter has been encountered.
startOffset = ++offset;
builder = new StringBuilder(len - offset);
continue;
}
offset++;
}
// Copy rest of the characters.
if (!str.isEmpty()) {
builder.append(str.substring(startOffset));
}
// Add the last part of delimited string to list.
list.add(builder.toString().trim());
return list;
} | 3.26 |
hadoop_BaseTableRW_getResultScanner_rdh | /**
*
* @param hbaseConf
* used to read settings that override defaults
* @param conn
* used to create table from
* @param scan
* that specifies what you want to read from this table.
* @return scanner for the table.
* @throws IOException
* if any exception occurs while getting the scanner.
*/
public ResultScanner getResultScanner(Configuration hbaseConf, Connection conn, Scan scan) throws IOException { Table v3 = conn.getTable(getTableName(hbaseConf));
return v3.getScanner(scan);
} | 3.26 |
hadoop_BaseTableRW_m0_rdh | /**
* Used to create a type-safe mutator for this table.
*
* @param hbaseConf
* used to read table name.
* @param conn
* used to create a table from.
* @return a type safe {@link BufferedMutator} for the entity table.
* @throws IOException
* if any exception occurs while creating mutator for the
* table.
*/
public TypedBufferedMutator<T> m0(Configuration
hbaseConf, Connection conn) throws IOException {
TableName tableName = this.getTableName(hbaseConf);
// Plain buffered mutator
BufferedMutator bufferedMutator = conn.getBufferedMutator(tableName);
// Now make this thing type safe.
// This is how service initialization should hang on to this variable, with
// the proper type
TypedBufferedMutator<T> table =
new TypedBufferedMutator<T>(bufferedMutator);
return table;
} | 3.26 |
hadoop_BaseTableRW_getTableName_rdh | /**
* Get the table name for this table.
*
* @param conf
* HBase configuration from which table name will be fetched.
* @return A {@link TableName} object.
*/
public TableName getTableName(Configuration conf) {
String tableName = conf.get(tableNameConfName, defaultTableName);
return getTableName(conf, tableName);
} | 3.26 |
hadoop_BaseTableRW_m1_rdh | /**
* Get the table name based on the input config parameters.
*
* @param conf
* HBase configuration from which table name will be fetched.
* @param tableNameInConf
* the table name parameter in conf.
* @param defaultTableName
* the default table name.
* @return A {@link TableName} object.
*/
public static TableName m1(Configuration conf, String tableNameInConf, String defaultTableName) {
String tableName = conf.get(tableNameInConf, defaultTableName);
return getTableName(conf, tableName);
} | 3.26 |
hadoop_Retryer_continueRetry_rdh | /**
* Returns true if retrying should continue, false otherwise.
*
* @return true if the caller should retry, false otherwise.
*/public boolean continueRetry() {
if (this.delay >= this.maxDelay) {
return false;
}
try {
Thread.sleep(this.perRetryDelay);
} catch (InterruptedException e)
{// Ignore the exception as required by the semantic of this class;
}
this.delay += this.perRetryDelay;
return true;
} | 3.26 |
hadoop_Retryer_updateStatus_rdh | /**
* Returns true if status update interval has been reached.
*
* @return true if status update interval has been reached.
*/
public boolean updateStatus() {
return (this.delay > 0) && ((this.delay % this.statusUpdateInterval) == 0);
} | 3.26 |
hadoop_YarnConfigurationStore_getUser_rdh | /**
* Get user who requested configuration change.
*
* @return user who requested configuration change
*/
public String getUser() {
return f0;
} | 3.26 |
hadoop_YarnConfigurationStore_getUpdates_rdh | /**
* Get key-value configuration updates.
*
* @return map of configuration updates
*/
public Map<String, String> getUpdates() {
return updates;
} | 3.26 |
hadoop_NvidiaGPUPluginForRuntimeV2_topologyAwareSchedule_rdh | /**
* Topology Aware schedule algorithm.
* It doesn't consider CPU affinity or NUMA or bus bandwidths.
* It support two plicy: "spread" and "pack" which can be set by container's
* environment variable. Use pack by default which means prefer the faster
* GPU-GPU. "Spread" means prefer the faster CPU-GPU.
* It can potentially be extend to take GPU attribute like GPU chip memory
* into consideration.
*/
@VisibleForTesting
public void topologyAwareSchedule(Set<Device> allocation, int count, Map<String, String> envs, Set<Device> availableDevices, Map<Integer, List<Map.Entry<Set<Device>, Integer>>> cTable) {
int num = 0;String policy = envs.get(TOPOLOGY_POLICY_ENV_KEY);
if (policy == null) {
policy = TOPOLOGY_POLICY_PACK;
}
/**
* Get combinations from costTable given the count of device want to
* allocate.
*/
if (cTable == null) {
f0.error("No cost table initialized!");
return;
}
List<Map.Entry<Set<Device>, Integer>> combinationsToCost = cTable.get(count);
Iterator<Map.Entry<Set<Device>, Integer>> iterator
= combinationsToCost.iterator();
// the container needs spread policy
if (policy.equalsIgnoreCase(TOPOLOGY_POLICY_SPREAD)) {
// loop from high cost to low cost
iterator = ((LinkedList) (combinationsToCost)).descendingIterator();
}
while (iterator.hasNext()) {
Map.Entry<Set<Device>, Integer> element = iterator.next();
if (availableDevices.containsAll(element.getKey())) {
allocation.addAll(element.getKey());
f0.info("Topology scheduler allocated: " + allocation);
return;
}
}
f0.error("Unknown error happened in topology scheduler");
} | 3.26 |
hadoop_NvidiaGPUPluginForRuntimeV2_buildCostTable_rdh | /**
* Generate combination of devices and its cost.
* costTable
*/
private void buildCostTable(Map<Integer, List<Map.Entry<Set<Device>, Integer>>> cTable, Set<Device> ltfDevices) {
Device[] deviceList =
new Device[ltfDevices.size()];
ltfDevices.toArray(deviceList);
generateAllDeviceCombination(cTable, deviceList, deviceList.length);
} | 3.26 |
hadoop_NvidiaGPUPluginForRuntimeV2_combinationRecursive_rdh | /**
* Populate combination to cost map recursively.
*
* @param cTc
* combinationToCost map.
* The key is device set, the value is cost
* @param allDevices
* all devices used to assign value to subDevicelist
* @param subDeviceList
* store a subset of devices temporary
* @param start
* start index in the allDevices
* @param end
* last index in the allDevices
* @param index
* dynamic index in subDeviceList need to be assigned
* @param r
* the length of the subDeviceList
*/
void combinationRecursive(Map<Set<Device>, Integer> cTc, Device[] allDevices, Device[] subDeviceList, int start, int end, int index, int r) {
// sub device list's length is ready to compute the cost
if (index == r) {Set<Device> oneSet = new TreeSet<>(Arrays.asList(subDeviceList));
int cost = computeCostOfDevices(subDeviceList);
cTc.put(oneSet, cost);
return;
}
for (int i = start; i <= end; i++)
{
subDeviceList[index] = allDevices[i];
combinationRecursive(cTc, allDevices, subDeviceList, i + 1, end, index + 1,
r);
}
} | 3.26 |
hadoop_NvidiaGPUPluginForRuntimeV2_m1_rdh | // Get the topology metrics info from nvdia-smi
public String m1() throws IOException
{
return Shell.execCommand(environment, new String[]{ f1, "topo", "-m" }, MAX_EXEC_TIMEOUT_MS);
} | 3.26 |
hadoop_NvidiaGPUPluginForRuntimeV2_parseTopo_rdh | /**
* A typical sample topo output:
* GPU0 GPU1 GPU2 GPU3 CPU Affinity
* GPU0 X PHB SOC SOC 0-31
* GPU1 PHB X SOC SOC 0-31
* GPU2 SOC SOC X PHB 0-31
* GPU3 SOC SOC PHB X 0-31
*
*
* Legend:
*
* X = Self
* SOC = Connection traversing PCIe as well as the SMP link between
* CPU sockets(e.g. QPI)
* PHB = Connection traversing PCIe as well as a PCIe Host Bridge
* (typically the CPU)
* PXB = Connection traversing multiple PCIe switches
* (without traversing the PCIe Host Bridge)
* PIX = Connection traversing a single PCIe switch
* NV# = Connection traversing a bonded set of # NVLinks」
*/
public void parseTopo(String topo, Map<String, Integer> deviceLinkToWeight) {
String[] lines = topo.split("\n");
int rowMinor;
int colMinor;
String legend;
String tempType;
for (String oneLine : lines) {
oneLine = oneLine.trim();
if (oneLine.isEmpty()) {
continue;
}
// To the end. No more metrics info
if (oneLine.startsWith("Legend")) {
break;
}
// Skip header
if (oneLine.contains("Affinity")) {
continue; }
String[] tokens = oneLine.split("\\s+");
String name = tokens[0];
rowMinor = Integer.parseInt(name.substring(name.lastIndexOf("U") + 1));
for (int v50 = 1; v50 < tokens.length; v50++) {
tempType = tokens[v50];
colMinor = v50 - 1;
// self, skip
if (tempType.equals("X")) {
continue;
}
if (tempType.equals("SOC") || tempType.equals("SYS")) {
populateGraphEdgeWeight(DeviceLinkType.P2PLinkCrossCPUSocket, rowMinor, colMinor, deviceLinkToWeight);
continue;
}
if (tempType.equals("PHB") || tempType.equals("NODE")) {
populateGraphEdgeWeight(DeviceLinkType.P2PLinkSameCPUSocket, rowMinor, colMinor, deviceLinkToWeight);
continue;
}
if (tempType.equals("PXB")) {
populateGraphEdgeWeight(DeviceLinkType.P2PLinkMultiSwitch, rowMinor, colMinor, deviceLinkToWeight);
continue;
}
if (tempType.equals("PIX")) {
populateGraphEdgeWeight(DeviceLinkType.P2PLinkSingleSwitch, rowMinor, colMinor, deviceLinkToWeight);
continue;
}
if (tempType.equals("NV1")) {populateGraphEdgeWeight(DeviceLinkType.P2PLinkNVLink1, rowMinor, colMinor, deviceLinkToWeight);
continue;
}
if (tempType.equals("NV2")) { populateGraphEdgeWeight(DeviceLinkType.P2PLinkNVLink2, rowMinor, colMinor, deviceLinkToWeight);
continue;
}
if (tempType.equals("NV3")) {
populateGraphEdgeWeight(DeviceLinkType.P2PLinkNVLink3, rowMinor, colMinor, deviceLinkToWeight);
continue;
}
if (tempType.equals("NV4")) {
populateGraphEdgeWeight(DeviceLinkType.P2PLinkNVLink4, rowMinor, colMinor, deviceLinkToWeight);
continue;
}
if (tempType.equals("NV5")) {
populateGraphEdgeWeight(DeviceLinkType.P2PLinkNVLink5, rowMinor, colMinor, deviceLinkToWeight);
continue;
}
if (tempType.equals("NV6")) {
populateGraphEdgeWeight(DeviceLinkType.P2PLinkNVLink6, rowMinor, colMinor, deviceLinkToWeight);
continue;
}
if (tempType.equals("NV7")) {
populateGraphEdgeWeight(DeviceLinkType.P2PLinkNVLink7, rowMinor, colMinor, deviceLinkToWeight);
continue;
}
if (tempType.equals("NV8")) {
populateGraphEdgeWeight(DeviceLinkType.P2PLinkNVLink8, rowMinor, colMinor, deviceLinkToWeight);
continue;
}
if (tempType.equals("NV9")) {
populateGraphEdgeWeight(DeviceLinkType.P2PLinkNVLink9, rowMinor, colMinor, deviceLinkToWeight);
continue;
}
}// end one line handling
}
} | 3.26 |
hadoop_NvidiaGPUPluginForRuntimeV2_getMajorNumber_rdh | // Get major number from device name.
private String getMajorNumber(String devName) {
String
output = null;
// output "major:minor" in hex
try {
f0.debug("Get major numbers from /dev/{}", devName);output = shellExecutor.getMajorMinorInfo(devName); String[] strs = output.trim().split(":");
f0.debug("stat output:{}", output);
output = Integer.toString(Integer.parseInt(strs[0], 16));
} catch (IOException e) {
String msg = "Failed to get major number from reading /dev/" + devName;
f0.warn(msg);
} catch (NumberFormatException e) {
f0.error("Failed to parse device major number from stat output");
output = null;}
return output;
} | 3.26 |
hadoop_ClasspathConstructor_buildLibDir_rdh | /**
* Build a lib dir path
*
* @param pathToLibDir
* path to the directory; may or may not end with a
* trailing space
* @return a path to a lib dir that is compatible with the java classpath
*/
public String buildLibDir(String pathToLibDir) { String dir = appendDirectoryTerminator(pathToLibDir);
dir += "*";
return dir;
} | 3.26 |
hadoop_ClasspathConstructor_getPathElements_rdh | /**
* Get a copy of the path list
*
* @return the JARs
*/
public List<String> getPathElements() {
return Collections.unmodifiableList(pathElements);
} | 3.26 |
hadoop_ClasspathConstructor_splitClasspath_rdh | /**
* Split a classpath. This uses the local path separator so MUST NOT
* be used to work with remote classpaths
*
* @param localpath
* local path
* @return a splite
*/
public Collection<String> splitClasspath(String localpath) {
String separator = System.getProperty("path.separator");
return StringUtils.getStringCollection(localpath, separator);
} | 3.26 |
hadoop_ClasspathConstructor_yarnApplicationClasspath_rdh | /**
* Get the list of JARs from the YARN settings
*
* @param config
* configuration
*/
public List<String> yarnApplicationClasspath(Configuration config) {
String[] cp = config.getTrimmedStrings(YarnConfiguration.YARN_APPLICATION_CLASSPATH, YarnConfiguration.DEFAULT_YARN_CROSS_PLATFORM_APPLICATION_CLASSPATH);
return cp != null ? Arrays.asList(cp) : new ArrayList<String>(0);} | 3.26 |
hadoop_ClasspathConstructor_localJVMClasspath_rdh | /**
* Get the local JVM classpath split up
*
* @return the list of entries on the JVM classpath env var
*/
public Collection<String> localJVMClasspath() {
return splitClasspath(System.getProperty("java.class.path"));
} | 3.26 |
hadoop_ClasspathConstructor_insert_rdh | /**
* Insert a path at the front of the list. This places it ahead of
* the standard YARN artifacts
*
* @param path
* path to the JAR. Absolute or relative -on the target
* system
*/
public void insert(String path) {
pathElements.add(0, path);
} | 3.26 |
hadoop_ClasspathConstructor_append_rdh | /**
* Append an entry
*
* @param path
* path
*/
public void append(String path) {
pathElements.add(path);
} | 3.26 |
hadoop_SignerFactory_isSignerRegistered_rdh | /**
* Check if the signer has already been registered.
*
* @param signerType
* signer to get
* @return true if the signer is registered.
*/
public static boolean isSignerRegistered(String signerType) {
return SIGNERS.containsKey(signerType);
} | 3.26 |
hadoop_SignerFactory_createSigner_rdh | /**
* Create an instance of the given signer.
*
* @param signerType
* The signer type.
* @param configKey
* Config key used to configure the signer.
* @return The new signer instance.
* @throws InstantiationIOException
* instantiation problems.
* @throws IOException
* on any other problem.
*/
public static Signer createSigner(String signerType, String configKey) throws IOException {
if (S3_V2_SIGNER.equals(signerType)) {
throw unavailable(null, null, configKey, S3_V2_SIGNER + " is no longer supported");
}
if (!isSignerRegistered(signerType)) {
throw unavailable(null, null, configKey, "unknown signer type: " + signerType);
}
Class<?> signerClass = SIGNERS.get(signerType);
String className = signerClass.getName();
LOG.debug("Signer class from {} and key {} is {}", signerType, configKey, className);
Signer signer = S3AUtils.getInstanceFromReflection(className, null, null, Signer.class, "create", configKey);
return signer;
} | 3.26 |
hadoop_KMSAudit_getAuditLoggerClasses_rdh | /**
* Read the KMSAuditLogger classes from configuration. If any loggers fail to
* load, a RumTimeException will be thrown.
*
* @param conf
* The configuration.
* @return Collection of KMSAudigLogger classes.
*/
private Set<Class<? extends KMSAuditLogger>> getAuditLoggerClasses(final Configuration conf) {
Set<Class<? extends KMSAuditLogger>> result = new HashSet<>();
// getTrimmedStringCollection will remove duplicates.
Collection<String> classes = conf.getTrimmedStringCollection(KMSConfiguration.KMS_AUDIT_LOGGER_KEY);
if (classes.isEmpty()) {
LOG.info("No audit logger configured, using default.");
result.add(SimpleKMSAuditLogger.class);
return result;
}for (String c : classes) {
try {
Class<?> cls = conf.getClassByName(c);result.add(cls.asSubclass(KMSAuditLogger.class));
} catch (ClassNotFoundException cnfe) {
throw new RuntimeException(((("Failed to load " + c) + ", please check ") + "configuration ") + KMSConfiguration.KMS_AUDIT_LOGGER_KEY, cnfe);
}
}
return result;
} | 3.26 |
hadoop_KMSAudit_op_rdh | /**
* Logs to the audit service a single operation on the KMS or on a key.
*
* @param opStatus
* The outcome of the audited event
* @param op
* The operation being audited (either {@link KMS.KMSOp} or
* {@link Type} N.B this is passed as an {@link Object} to allow
* either enum to be passed in.
* @param ugi
* The user's security context
* @param key
* The String name of the key if applicable
* @param remoteHost
* The hostname of the requesting service
* @param extraMsg
* Any extra details for auditing
*/
private void op(final OpStatus opStatus, final Object op, final UserGroupInformation ugi, final String key, final String remoteHost, final String extraMsg) {
final String user = (ugi == null) ? null : ugi.getUserName();
if ((((!Strings.isNullOrEmpty(user)) && (!Strings.isNullOrEmpty(key))) && (op != null)) && AGGREGATE_OPS_WHITELIST.contains(op)) {
String cacheKey = createCacheKey(user, key, op); if (opStatus == KMSAuditLogger.OpStatus.UNAUTHORIZED) {
cache.invalidate(cacheKey);
logEvent(opStatus, new AuditEvent(op, ugi, key, remoteHost, extraMsg));
} else {
try {
AuditEvent event = cache.get(cacheKey, new Callable<AuditEvent>() {
@Override
public KMSAuditLogger.AuditEvent call() throws Exception {
return new AuditEvent(op, ugi, key, remoteHost, extraMsg);
}
});
// Log first access (initialized as -1 so
// incrementAndGet() == 0 implies first access)
if (event.getAccessCount().incrementAndGet() == 0) {
event.getAccessCount().incrementAndGet();
logEvent(opStatus, event);
}
} catch
(ExecutionException ex) {
throw new RuntimeException(ex);
}
}
} else {
logEvent(opStatus, new AuditEvent(op, ugi, key, remoteHost, extraMsg));
}
} | 3.26 |
hadoop_KMSAudit_initializeAuditLoggers_rdh | /**
* Create a collection of KMSAuditLoggers from configuration, and initialize
* them. If any logger failed to be created or initialized, a RunTimeException
* is thrown.
*/
private void initializeAuditLoggers(Configuration conf) {
Set<Class<? extends KMSAuditLogger>> classes = getAuditLoggerClasses(conf);
Preconditions.checkState(!classes.isEmpty(), "Should have at least 1 audit logger.");
for
(Class<? extends KMSAuditLogger> c : classes) {
final KMSAuditLogger logger = ReflectionUtils.newInstance(c, conf);
auditLoggers.add(logger);
}
for (KMSAuditLogger logger : auditLoggers) {
try {
LOG.info("Initializing audit logger {}", logger.getClass());
logger.initialize(conf);
} catch (Exception ex) {
throw new RuntimeException("Failed to initialize " + logger.getClass().getName(), ex);
}
}
} | 3.26 |
hadoop_DiskValidatorFactory_getInstance_rdh | /**
* Returns {@link DiskValidator} instance corresponding to its name.
* The diskValidator parameter can be "basic" for {@link BasicDiskValidator}
* or "read-write" for {@link ReadWriteDiskValidator}.
*
* @param diskValidator
* canonical class name, for example, "basic"
* @throws DiskErrorException
* if the class cannot be located
* @return disk validator.
*/
@SuppressWarnings("unchecked")
public static DiskValidator getInstance(String diskValidator)
throws DiskErrorException {
@SuppressWarnings("rawtypes")
Class clazz;
if (diskValidator.equalsIgnoreCase(BasicDiskValidator.NAME)) {
clazz = BasicDiskValidator.class;
} else if (diskValidator.equalsIgnoreCase(ReadWriteDiskValidator.NAME)) {
clazz = ReadWriteDiskValidator.class;
} else {
try {
clazz = Class.forName(diskValidator);
} catch (ClassNotFoundException cnfe) {
throw new DiskErrorException(diskValidator + " DiskValidator class not found.", cnfe);
}
}
return
getInstance(clazz);
} | 3.26 |
hadoop_BufferPuller_close_rdh | /**
* Closes the iterator so that the underlying streams can be closed.
*/
@Override
public void close() throws IOException {
if (closed) {
return;
}
if (null != nativeReader) {
nativeReader.close();
}
closed = true;
} | 3.26 |
hadoop_YarnAuthorizationProvider_destroy_rdh | /**
* Destroy the {@link YarnAuthorizationProvider} instance.
* This method is called only in Tests.
*/
@VisibleForTestingpublic static void destroy() {
synchronized(YarnAuthorizationProvider.class) {
if (authorizer != null) {
LOG.debug("{} is destroyed.", authorizer.getClass().getName());
authorizer = null;
}
}
} | 3.26 |
hadoop_TaskAttemptsInfo_getTaskAttempts_rdh | // XmlElementRef annotation should be used to identify the exact type of a list element
// otherwise metadata will be added to XML attributes,
// it can lead to incorrect JSON marshaling
@XmlElementRef
public List<TaskAttemptInfo> getTaskAttempts() {
return taskAttempts;
} | 3.26 |
hadoop_NativeRuntime_createNativeObject_rdh | /**
* create native object We use it to create native handlers
*/
public static synchronized long createNativeObject(String clazz) {
assertNativeLibraryLoaded();
final long ret = JNICreateNativeObject(clazz.getBytes(StandardCharsets.UTF_8));
if (ret == 0) {
LOG.warn(("Can't create NativeObject for class " + clazz) + ", probably not exist.");
}
return ret;
} | 3.26 |
hadoop_NativeRuntime_releaseNativeObject_rdh | /**
* destroy native object We use to destroy native handlers
*/
public static synchronized void releaseNativeObject(long addr) {
assertNativeLibraryLoaded();
m1(addr);
} | 3.26 |
hadoop_NativeRuntime_reportStatus_rdh | /**
* Get the status report from native space
*/
public static void reportStatus(TaskReporter reporter) throws
IOException {
assertNativeLibraryLoaded();
synchronized(reporter) {final byte[] v2 = JNIUpdateStatus();
final DataInputBuffer ib = new DataInputBuffer();
ib.reset(v2, v2.length);
final FloatWritable progress
= new FloatWritable();
progress.readFields(ib);
reporter.setProgress(progress.get());
final Text status = new Text();
status.readFields(ib);
if (status.getLength() > 0) {
reporter.setStatus(status.toString());
}
final IntWritable numCounters = new IntWritable();
numCounters.readFields(ib);
if (numCounters.get() == 0) {
return;
}
final Text group = new Text();
final Text name = new Text();
final LongWritable amount = new LongWritable();
for (int i = 0; i < numCounters.get(); i++) {
group.readFields(ib);
name.readFields(ib);
amount.readFields(ib);
reporter.incrCounter(group.toString(), name.toString(), amount.get());
}
}
} | 3.26 |
hadoop_NativeRuntime_registerLibrary_rdh | /**
* Register a customized library
*/
public static synchronized long registerLibrary(String libraryName, String clazz) {
assertNativeLibraryLoaded();
final long ret = JNIRegisterModule(libraryName.getBytes(StandardCharsets.UTF_8), clazz.getBytes(StandardCharsets.UTF_8));
if (ret != 0) {LOG.warn(("Can't create NativeObject for class " + clazz) + ", probably not exist.");
}
return ret;
} | 3.26 |
hadoop_SysInfo_newInstance_rdh | /**
* Return default OS instance.
*
* @throws UnsupportedOperationException
* If cannot determine OS.
* @return Default instance for the detected OS.
*/
public static SysInfo newInstance() {
if (Shell.LINUX) {
return new SysInfoLinux();
}
if (Shell.WINDOWS) {
return new SysInfoWindows();
}
throw new UnsupportedOperationException("Could not determine OS");
} | 3.26 |
hadoop_ReInitializeContainerRequest_newInstance_rdh | /**
* Creates a new instance of the ReInitializationContainerRequest.
*
* @param containerId
* Container Id.
* @param containerLaunchContext
* Container Launch Context.
* @param autoCommit
* AutoCommit.
* @return ReInitializationContainerRequest.
*/
@Public@Unstablepublic static ReInitializeContainerRequest newInstance(ContainerId containerId, ContainerLaunchContext containerLaunchContext, boolean autoCommit) {
ReInitializeContainerRequest record = Records.newRecord(ReInitializeContainerRequest.class);
record.setContainerId(containerId);
record.setContainerLaunchContext(containerLaunchContext);
record.setAutoCommit(autoCommit);
return
record;
} | 3.26 |
hadoop_ColumnRWHelper_getPutTimestamp_rdh | /**
* Figures out the cell timestamp used in the Put For storing.
* Will supplement the timestamp if required. Typically done for flow run
* table.If we supplement the timestamp, we left shift the timestamp and
* supplement it with the AppId id so that there are no collisions in the flow
* run table's cells.
*/
private static long getPutTimestamp(Long timestamp, boolean
supplementTs, Attribute[] attributes) {
if (timestamp == null) {
timestamp
= System.currentTimeMillis();
}
if (!supplementTs) {
return timestamp;
} else {
String appId = getAppIdFromAttributes(attributes);
long supplementedTS = TimestampGenerator.getSupplementedTimestamp(timestamp, appId);
return supplementedTS;
}
} | 3.26 |
hadoop_ColumnRWHelper_store_rdh | /**
* Sends a Mutation to the table. The mutations will be buffered and sent over
* the wire as part of a batch.
*
* @param rowKey
* identifying the row to write. Nothing gets written when null.
* @param tableMutator
* used to modify the underlying HBase table. Caller is
* responsible to pass a mutator for the table that actually has this
* column.
* @param qualifier
* column qualifier. Nothing gets written when null.
* @param timestamp
* version timestamp. When null the server timestamp will be
* used.
* @param attributes
* attributes for the mutation that are used by the
* coprocessor to set/read the cell tags.
* @param inputValue
* the value to write to the rowKey and column qualifier.
* Nothing gets written when null.
* @throws IOException
* if there is any exception encountered while doing
* store operation(sending mutation to the table).
*/
public static void store(byte[] rowKey, TypedBufferedMutator<?> tableMutator, ColumnPrefix<?> columnPrefix, String qualifier, Long timestamp, Object inputValue, Attribute... attributes) throws IOException {
// Null check
if (qualifier == null) {
throw new IOException("Cannot store column with null qualifier in " + tableMutator.getName().getNameAsString());
}
byte[] columnQualifier
= columnPrefix.getColumnPrefixBytes(qualifier);
Attribute[] combinedAttributes = columnPrefix.getCombinedAttrsWithAggr(attributes);
store(rowKey,
tableMutator, columnPrefix.getColumnFamilyBytes(), columnQualifier, timestamp, columnPrefix.supplementCellTimeStamp(), inputValue, columnPrefix.getValueConverter(), combinedAttributes);
} | 3.26 |
hadoop_RollbackResponse_newInstance_rdh | /**
* Create new instance of a Rollback response.
*
* @return Rollback Response.
*/
@Private@Unstable
public static RollbackResponse newInstance() {
return Records.newRecord(RollbackResponse.class);
} | 3.26 |
hadoop_HdfsFileStatus_build_rdh | /**
*
* @return An {@link HdfsFileStatus} instance from these parameters.
*/
public HdfsFileStatus build() {
if (((null == locations) && (!isdir)) && (null == symlink)) {
return new HdfsNamedFileStatus(length, isdir, replication, blocksize, f0, atime, f1, flags, f2, group, symlink, path, fileId, childrenNum, feInfo, storagePolicy, ecPolicy);
}
return new HdfsLocatedFileStatus(length, isdir, replication, blocksize, f0, atime, f1, flags, f2, group, symlink, path, fileId, childrenNum, feInfo, storagePolicy, ecPolicy, locations);
} | 3.26 |
hadoop_HdfsFileStatus_symlink_rdh | /**
* Set symlink bytes for this entity (default = null).
*
* @param symlink
* Symlink bytes (see
* {@link DFSUtilClient#bytes2String(byte[])})
* @return This Builder instance
*/
public Builder symlink(byte[] symlink) {
this.symlink = (null == symlink) ? null :
Arrays.copyOf(symlink, symlink.length);
return this;
} | 3.26 |
hadoop_HdfsFileStatus_storagePolicy_rdh | /**
* Set the storage policy for this entity
* (default = {@link HdfsConstants#BLOCK_STORAGE_POLICY_ID_UNSPECIFIED}).
*
* @param storagePolicy
* Storage policy
* @return This Builder instance
*/
public Builder storagePolicy(byte storagePolicy) {
this.storagePolicy = storagePolicy;
return this;} | 3.26 |
hadoop_HdfsFileStatus_getLocalName_rdh | /**
* Get the string representation of the local name.
*
* @return the local name in string
*/
default String getLocalName() {
return DFSUtilClient.bytes2String(getLocalNameInBytes());
} | 3.26 |
hadoop_HdfsFileStatus_replication_rdh | /**
* Set the replication of this entity (default = 0).
*
* @param replication
* Number of replicas
* @return This Builder instance
*/
public Builder replication(int replication) {
this.replication = replication;
return this;
} | 3.26 |
hadoop_HdfsFileStatus_flags_rdh | /**
* Set {@link Flags} for this entity
* (default = {@link EnumSet#noneOf(Class)}).
*
* @param flags
* Flags
* @return This builder instance
*/
public Builder flags(EnumSet<Flags> flags) {
this.flags = flags;
return this;
} | 3.26 |
hadoop_HdfsFileStatus_makeQualified_rdh | /**
* Resolve the short name of the Path given the URI, parent provided. This
* FileStatus reference will not contain a valid Path until it is resolved
* by this method.
*
* @param defaultUri
* FileSystem to fully qualify HDFS path.
* @param parent
* Parent path of this element.
* @return Reference to this instance.
*/ default FileStatus makeQualified(URI defaultUri, Path parent) {
// fully-qualify path
setPath(m1(parent).makeQualified(defaultUri,
null));
return ((FileStatus) (this));// API compatibility
} | 3.26 |
hadoop_HdfsFileStatus_m1_rdh | /**
* Get the full path.
*
* @param parent
* the parent path
* @return the full path
*/
default Path m1(Path parent) {
if (isEmptyLocalName()) {
return parent;
}
return new Path(parent, getLocalName());
} | 3.26 |
hadoop_HdfsFileStatus_mtime_rdh | /**
* Set the modification time of this entity (default = 0).
*
* @param mtime
* Last modified time
* @return This Builder instance
*/
public Builder mtime(long mtime) {
this.f0 = mtime;
return this;
} | 3.26 |
hadoop_HdfsFileStatus_isdir_rdh | /**
* Set the isDir flag for the entity (default = false).
*
* @param isdir
* True if the referent is a directory.
* @return This Builder instance
*/
public Builder isdir(boolean isdir) {
this.isdir = isdir;
return this;
} | 3.26 |
hadoop_HdfsFileStatus_group_rdh | /**
* Set the group for this entity (default = null).
*
* @param group
* Group
* @return This Builder instance
*/
public Builder group(String group) {
this.group = group;
return this;
} | 3.26 |
hadoop_HdfsFileStatus_perm_rdh | /**
* Set the permission mask of this entity (default = null).
*
* @param permission
* Permission bitmask
* @return This Builder instance
*/
public Builder perm(FsPermission permission) {
this.f1 = permission;
return this;
} | 3.26 |
hadoop_HdfsFileStatus_m0_rdh | /**
* Set the length of the entity (default = 0).
*
* @param length
* Entity length
* @return This Builder instance
*/
public Builder m0(long length) {
this.length = length;
return this;
} | 3.26 |
hadoop_HdfsFileStatus_feInfo_rdh | /**
* Set the encryption info for this entity (default = null).
*
* @param feInfo
* Encryption info
* @return This Builder instance
*/
public Builder feInfo(FileEncryptionInfo feInfo) {
this.feInfo = feInfo;
return this;
} | 3.26 |
hadoop_HdfsFileStatus_blocksize_rdh | /**
* Set the blocksize of this entity (default = 0).
*
* @param blocksize
* Target, default blocksize
* @return This Builder instance
*/
public Builder blocksize(long blocksize) {
this.blocksize = blocksize;
return this;
} | 3.26 |
hadoop_HdfsFileStatus_getFullName_rdh | /**
* Get the string representation of the full path name.
*
* @param parent
* the parent path
* @return the full path in string
*/
default String getFullName(String parent) {
if (isEmptyLocalName()) {
return parent;
}
StringBuilder fullName = new StringBuilder(parent);
if (!parent.endsWith(Path.SEPARATOR)) {
fullName.append(Path.SEPARATOR);
}
fullName.append(getLocalName());
return fullName.toString();
} | 3.26 |
hadoop_HdfsFileStatus_locations_rdh | /**
* Set the block locations for this entity (default = null).
*
* @param locations
* HDFS locations
* (see {@link HdfsLocatedFileStatus#makeQualifiedLocated(URI, Path)})
* @return This Builder instance
*/
public Builder locations(LocatedBlocks locations) {
this.locations = locations;
return this;
} | 3.26 |
hadoop_HdfsFileStatus_isEmptyLocalName_rdh | /**
* Check if the local name is empty.
*
* @return true if the name is empty
*/
default boolean isEmptyLocalName() {
return getLocalNameInBytes().length == 0; } | 3.26 |
hadoop_HdfsFileStatus_atime_rdh | /**
* Set the access time of this entity (default = 0).
*
* @param atime
* Last accessed time
* @return This Builder instance
*/
public Builder atime(long atime) {
this.atime = atime;
return this;
} | 3.26 |
hadoop_HdfsFileStatus_fileId_rdh | /**
* Set the fileId for this entity (default = -1).
*
* @param fileId
* FileId
* @return This Builder instance
*/
public Builder fileId(long fileId)
{
this.fileId = fileId;
return this;
} | 3.26 |
hadoop_HdfsFileStatus_children_rdh | /**
* Set the number of children for this entity (default = 0).
*
* @param childrenNum
* Number of children
* @return This Builder instance
*/
public Builder children(int childrenNum) {
this.childrenNum = childrenNum;
return this;
} | 3.26 |
hadoop_HdfsFileStatus_ecPolicy_rdh | /**
* Set the erasure coding policy for this entity (default = null).
*
* @param ecPolicy
* Erasure coding policy
* @return This Builder instance
*/ public Builder ecPolicy(ErasureCodingPolicy ecPolicy) {
this.ecPolicy = ecPolicy;
return this;
} | 3.26 |
hadoop_HdfsFileStatus_owner_rdh | /**
* Set the owner for this entity (default = null).
*
* @param owner
* Owner
* @return This Builder instance
*/
public Builder owner(String owner) {
this.f2 = owner;
return this;
} | 3.26 |
hadoop_HdfsFileStatus_convert_rdh | /**
* Set redundant flags for compatibility with existing applications.
*/
static FsPermission convert(boolean isdir, boolean symlink, FsPermission p, Set<Flags> f) {
if (p instanceof FsPermissionExtension) {
// verify flags are set consistently
assert p.getAclBit() == f.contains(HdfsFileStatus.Flags.HAS_ACL);
assert p.getEncryptedBit() == f.contains(HdfsFileStatus.Flags.HAS_CRYPT);
assert p.getErasureCodedBit() == f.contains(HdfsFileStatus.Flags.HAS_EC);return p;
}
if (null == p) {
if (isdir) {
p = FsPermission.getDirDefault();
} else if (symlink) {
p = FsPermission.getDefault();
} else {
p = FsPermission.getFileDefault();
}
}
return new FsPermissionExtension(p, f.contains(Flags.HAS_ACL), f.contains(Flags.HAS_CRYPT), f.contains(Flags.HAS_EC));
} | 3.26 |
hadoop_YarnRegistryViewForProviders_registerSelf_rdh | /**
* Add a service under a path for the current user.
*
* @param record
* service record
* @param deleteTreeFirst
* perform recursive delete of the path first
* @return the path the service was created at
* @throws IOException
*/
public String registerSelf(ServiceRecord record, boolean
deleteTreeFirst) throws IOException {
f0 = putService(user, serviceClass, instanceName, record, deleteTreeFirst);setSelfRegistration(record);
return f0;
} | 3.26 |
hadoop_YarnRegistryViewForProviders_deleteComponent_rdh | /**
* Delete a component.
*
* @param containerId
* component name
* @throws IOException
*/
public void deleteComponent(ComponentInstanceId instanceId, String containerId) throws IOException {
String path = RegistryUtils.componentPath(user, serviceClass, instanceName, containerId);
LOG.info((instanceId + ": Deleting registry path ") + path);
registryOperations.delete(path, false);
} | 3.26 |
hadoop_YarnRegistryViewForProviders_putService_rdh | /**
* Add a service under a path, optionally purging any history.
*
* @param username
* user
* @param serviceClass
* service class to use under ~user
* @param serviceName
* name of the service
* @param record
* service record
* @param deleteTreeFirst
* perform recursive delete of the path first.
* @return the path the service was created at
* @throws IOException
*/
public String putService(String username, String serviceClass, String serviceName, ServiceRecord record, boolean
deleteTreeFirst) throws IOException {
String path = RegistryUtils.servicePath(username, serviceClass, serviceName);
if (deleteTreeFirst) {
registryOperations.delete(path, true);
}
registryOperations.mknode(RegistryPathUtils.parentOf(path), true);
registryOperations.bind(path, record, BindFlags.OVERWRITE);
return path;
} | 3.26 |
hadoop_YarnRegistryViewForProviders_putComponent_rdh | /**
* Add a component.
*
* @param serviceClass
* service class to use under ~user
* @param componentName
* component name
* @param record
* record to put
* @throws IOException
*/
public void putComponent(String serviceClass, String
serviceName, String componentName, ServiceRecord record) throws IOException {
String path = RegistryUtils.componentPath(user, serviceClass, serviceName, componentName);
String parentPath = RegistryPathUtils.parentOf(path);
if (!registryOperations.exists(parentPath)) {
registryOperations.mknode(parentPath, true);
}
registryOperations.bind(path, record, BindFlags.OVERWRITE);
} | 3.26 |
hadoop_YarnRegistryViewForProviders_m0_rdh | /**
* List components.
*
* @return a list of components
* @throws IOException
*/
public List<String> m0() throws IOException {
String path = RegistryUtils.componentListPath(user, serviceClass, instanceName);
return registryOperations.list(path);
} | 3.26 |
hadoop_YarnRegistryViewForProviders_getSelfRegistrationPath_rdh | /**
* Get the path to where the service has registered itself.
* Null until the service is registered
*
* @return the service registration path.
*/public String getSelfRegistrationPath() {
return f0;
} | 3.26 |
hadoop_YarnRegistryViewForProviders_deleteChildren_rdh | /**
* Delete the children of a path -but not the path itself.
* It is not an error if the path does not exist
*
* @param path
* path to delete
* @param recursive
* flag to request recursive deletes
* @throws IOException
* IO problems
*/
public void
deleteChildren(String path, boolean recursive) throws IOException {
List<String> childNames = null;
try {
childNames = registryOperations.list(path);
} catch (PathNotFoundException e)
{
return;
}
for (String childName : childNames) {
String child = join(path, childName);registryOperations.delete(child, recursive);}
} | 3.26 |
hadoop_YarnRegistryViewForProviders_getComponent_rdh | /**
* Get a component.
*
* @param componentName
* component name
* @return the service record
* @throws IOException
*/
public ServiceRecord getComponent(String componentName) throws IOException {
String path = RegistryUtils.componentPath(user, serviceClass, instanceName, componentName);
LOG.info("Resolving path {}", path);
return registryOperations.resolve(path);
} | 3.26 |
hadoop_YarnRegistryViewForProviders_getAbsoluteSelfRegistrationPath_rdh | /**
* Get the absolute path to where the service has registered itself.
* This includes the base registry path
* Null until the service is registered
*
* @return the service registration path.
*/
public String getAbsoluteSelfRegistrationPath() {
if (f0 == null) {
return null;
}
String root =
registryOperations.getConfig().getTrimmed(RegistryConstants.KEY_REGISTRY_ZK_ROOT, RegistryConstants.DEFAULT_ZK_REGISTRY_ROOT);
return RegistryPathUtils.join(root, f0);
} | 3.26 |
hadoop_WebAppProxyServer_startServer_rdh | /**
* Start proxy server.
*
* @return proxy server instance.
*/
protected static WebAppProxyServer startServer(Configuration configuration) throws Exception {
WebAppProxyServer proxy = new WebAppProxyServer();
ShutdownHookManager.get().addShutdownHook(new CompositeServiceShutdownHook(proxy), SHUTDOWN_HOOK_PRIORITY);
proxy.init(configuration);
proxy.start();
return proxy;
} | 3.26 |
hadoop_WebAppProxyServer_doSecureLogin_rdh | /**
* Log in as the Kerberos principal designated for the proxy
*
* @param conf
* the configuration holding this information in it.
* @throws IOException
* on any error.
*/
protected void doSecureLogin(Configuration conf) throws
IOException {
InetSocketAddress socAddr
= getBindAddress(conf);
SecurityUtil.login(conf, YarnConfiguration.PROXY_KEYTAB, YarnConfiguration.PROXY_PRINCIPAL, socAddr.getHostName());
} | 3.26 |
hadoop_WebAppProxyServer_getBindAddress_rdh | /**
* Retrieve PROXY bind address from configuration
*
* @param conf
* @return InetSocketAddress
*/
public static InetSocketAddress getBindAddress(Configuration conf) {
return conf.getSocketAddr(YarnConfiguration.PROXY_BIND_HOST, YarnConfiguration.PROXY_ADDRESS, YarnConfiguration.DEFAULT_PROXY_ADDRESS, YarnConfiguration.DEFAULT_PROXY_PORT);
} | 3.26 |
hadoop_ReplicaBeingWritten_hashCode_rdh | // Object
@Override
public int hashCode() {
return super.hashCode();
} | 3.26 |
hadoop_ReplicaBeingWritten_equals_rdh | // Object
@Override
public boolean equals(Object o) {
return super.equals(o);
} | 3.26 |
hadoop_ReplicaBeingWritten_getState_rdh | // ReplicaInfo
@Override
public ReplicaState getState() {
return ReplicaState.RBW;
} | 3.26 |
hadoop_MapReduceTrackingUriPlugin_m0_rdh | /**
* Gets the URI to access the given application on MapReduce history server
*
* @param id
* the ID for which a URI is returned
* @return the tracking URI
* @throws URISyntaxException
*/
@Override
public URI m0(ApplicationId id) throws URISyntaxException {
String jobSuffix = id.toString().replaceFirst("^application_", "job_");
String historyServerAddress = MRWebAppUtil.getJHSWebappURLWithScheme(getConf());
return new URI((historyServerAddress + "/jobhistory/job/") + jobSuffix);
} | 3.26 |
hadoop_Servers_parse_rdh | /**
* Parses a space and/or comma separated sequence of server specifications
* of the form <i>hostname</i> or <i>hostname:port</i>. If
* the specs string is null, defaults to localhost:defaultPort.
*
* @param specs
* server specs (see description)
* @param defaultPort
* the default port if not specified
* @return a list of InetSocketAddress objects.
*/
public static List<InetSocketAddress> parse(String specs, int defaultPort) {
List<InetSocketAddress> result = Lists.newArrayList();
if (specs == null) {
result.add(new InetSocketAddress("localhost", defaultPort));
} else {
String[] specStrings = specs.split("[ ,]+");
for (String specString : specStrings) {
result.add(NetUtils.createSocketAddr(specString, defaultPort));
}
}
return result;
} | 3.26 |
hadoop_TrustedChannelResolver_isTrusted_rdh | /**
* Identify boolean value indicating whether a channel is trusted or not.
*
* @param peerAddress
* address of the peer
* @return true if the channel is trusted and false otherwise.
*/
public boolean isTrusted(InetAddress peerAddress) {
return false;
} | 3.26 |
hadoop_TrustedChannelResolver_getInstance_rdh | /**
* Returns an instance of TrustedChannelResolver.
* Looks up the configuration to see if there is custom class specified.
*
* @return TrustedChannelResolver
*/
public static TrustedChannelResolver getInstance(Configuration conf) {
Class<? extends TrustedChannelResolver> v0 = conf.getClass(HdfsClientConfigKeys.DFS_TRUSTEDCHANNEL_RESOLVER_CLASS, TrustedChannelResolver.class, TrustedChannelResolver.class);
return ReflectionUtils.newInstance(v0, conf);
} | 3.26 |
hadoop_SuccessData_joinMap_rdh | /**
* Join any map of string to value into a string, sorting the keys first.
*
* @param map
* map to join
* @param prefix
* prefix before every entry
* @param middle
* string between key and value
* @param suffix
* suffix to each entry
* @return a string for reporting.
*/protected static String joinMap(Map<String, ?> map, String prefix, String middle, String suffix) {
if (map == null) {
return "";
}
List<String> list = new ArrayList<>(map.keySet());
Collections.sort(list);
StringBuilder sb = new StringBuilder(list.size() * 32);
for (String
k : list) {
sb.append(prefix).append(k).append(middle).append(map.get(k)).append(suffix);
}
return sb.toString();} | 3.26 |
hadoop_SuccessData_m0_rdh | /**
* Get the success flag.
*
* @return did the job succeed?
*/
public boolean m0() {
return success;
} | 3.26 |
hadoop_SuccessData_dumpDiagnostics_rdh | /**
* Dump the diagnostics (if any) to a string.
*
* @param prefix
* prefix before every entry
* @param middle
* string between key and value
* @param suffix
* suffix to each entry
* @return the dumped string
*/
public String dumpDiagnostics(String
prefix, String middle, String suffix) {
return joinMap(diagnostics, prefix, middle, suffix);
} | 3.26 |
hadoop_SuccessData_dumpMetrics_rdh | /**
* Dump the metrics (if any) to a string.
* The metrics are sorted for ease of viewing.
*
* @param prefix
* prefix before every entry
* @param middle
* string between key and value
* @param suffix
* suffix to each entry
* @return the dumped string
*/
public String dumpMetrics(String prefix, String middle, String suffix) {
return joinMap(metrics, prefix, middle, suffix);
} | 3.26 |
hadoop_SuccessData_getHostname_rdh | /**
*
* @return host which created the file (implicitly: committed the work).
*/
| 3.26 |
hadoop_SuccessData_addDiagnostic_rdh | /**
* Add a diagnostics entry.
*
* @param key
* name
* @param value
* value
*/
public void addDiagnostic(String key, String value) {
diagnostics.put(key, value);
} | 3.26 |
hadoop_SuccessData_getJobId_rdh | /**
*
* @return Job ID, if known.
*/
public String getJobId() {
return jobId;
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.