name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hadoop_TwoColumnLayout_content_rdh | /**
*
* @return the class that will render the content of the page.
*/
protected Class<? extends SubView> content() {
return LipsumBlock.class;
} | 3.26 |
hadoop_TwoColumnLayout_postHead_rdh | /**
* Do what needs to be done after the header is rendered.
*
* @param html
* the html to use to render.
*/
protected void postHead(Page.HTML<__> html) {
} | 3.26 |
hadoop_TwoColumnLayout_setTableStyles_rdh | /**
* Sets up a table to be a consistent style.
*
* @param html
* the HTML to use to render.
* @param tableId
* the ID of the table to set styles on.
* @param innerStyles
* any other styles to add to the table.
*/
protected void setTableStyles(Page.HTML<__> html, String tableId, String... innerStyles) {List<String> styles = Lists.newArrayList();
styles.add(join('#', tableId, "_paginate span {font-weight:normal}"));
styles.add(join('#', tableId, " .progress {width:8em}"));
styles.add(join('#', tableId, "_processing {top:-1.5em; font-size:1em;"));
styles.add(" color:#000; background:#fefefe}");
for (String style : innerStyles) {
styles.add(join('#', tableId, " ", style));
}
html.style(styles.toArray());
} | 3.26 |
hadoop_TwoColumnLayout_footer_rdh | /**
*
* @return the class that will render the footer.
*/protected Class<? extends SubView> footer() {
return FooterBlock.class;
} | 3.26 |
hadoop_TwoColumnLayout_nav_rdh | /**
*
* @return the class that will render the navigation bar.
*/
protected Class<? extends SubView> nav() {
return NavBlock.class;
} | 3.26 |
hadoop_MultipleInputs_getInputFormatMap_rdh | /**
* Retrieves a map of {@link Path}s to the {@link InputFormat} class
* that should be used for them.
*
* @param conf
* The confuration of the job
* @see #addInputPath(JobConf, Path, Class)
* @return A map of paths to inputformats for the job
*/static Map<Path, InputFormat> getInputFormatMap(JobConf conf) {
Map<Path,
InputFormat> m = new HashMap<Path, InputFormat>();
String[] pathMappings = conf.get("mapreduce.input.multipleinputs.dir.formats").split(",");
for (String pathMapping : pathMappings) {
String[] split = pathMapping.split(";");
InputFormat inputFormat;
try {
inputFormat = ((InputFormat) (ReflectionUtils.newInstance(conf.getClassByName(split[1]), conf)));
} catch (ClassNotFoundException e) {
throw new RuntimeException(e);
}
m.put(new Path(split[0]), inputFormat);
}
return m;
} | 3.26 |
hadoop_MultipleInputs_getMapperTypeMap_rdh | /**
* Retrieves a map of {@link Path}s to the {@link Mapper} class that
* should be used for them.
*
* @param conf
* The confuration of the job
* @see #addInputPath(JobConf, Path, Class, Class)
* @return A map of paths to mappers for the job
*/
@SuppressWarnings("unchecked")
static Map<Path, Class<? extends Mapper>> getMapperTypeMap(JobConf
conf) {
if
(conf.get("mapreduce.input.multipleinputs.dir.mappers") == null) {
return Collections.emptyMap();
}
Map<Path, Class<? extends Mapper>> m = new HashMap<Path, Class<? extends Mapper>>();
String[] pathMappings = conf.get("mapreduce.input.multipleinputs.dir.mappers").split(",");
for (String pathMapping : pathMappings) {
String[] split = pathMapping.split(";");
Class<? extends Mapper> v13;
try {
v13 = ((Class<?
extends Mapper>) (conf.getClassByName(split[1])));
}
catch (ClassNotFoundException e) {
throw new RuntimeException(e);
}
m.put(new Path(split[0]), v13);
}
return m;
} | 3.26 |
hadoop_MultipleInputs_addInputPath_rdh | /**
* Add a {@link Path} with a custom {@link InputFormat} and
* {@link Mapper} to the list of inputs for the map-reduce job.
*
* @param conf
* The configuration of the job
* @param path
* {@link Path} to be added to the list of inputs for the job
* @param inputFormatClass
* {@link InputFormat} class to use for this path
* @param mapperClass
* {@link Mapper} class to use for this path
*/
public static void addInputPath(JobConf conf, Path path, Class<? extends InputFormat> inputFormatClass, Class<? extends Mapper> mapperClass) {
addInputPath(conf, path, inputFormatClass);
String mapperMapping = (path.toString() + ";") + mapperClass.getName();
String mappers = conf.get("mapreduce.input.multipleinputs.dir.mappers");
conf.set("mapreduce.input.multipleinputs.dir.mappers", mappers == null ? mapperMapping : (mappers + ",") + mapperMapping);
conf.setMapperClass(DelegatingMapper.class);
} | 3.26 |
hadoop_CredentialInitializationException_retryable_rdh | /**
* This exception is not going to go away if you try calling it again.
*
* @return false, always.
*/
@Override
public boolean retryable() {
return false;
} | 3.26 |
hadoop_FileSystemStorageStatistics_isTracked_rdh | /**
* Return true if a statistic is being tracked.
*
* @return True only if the statistic is being tracked.
*/
@Overridepublic boolean isTracked(String key) {
for (String v2 : KEYS) {
if (v2.equals(key)) {
return true;
}
}
return false;
} | 3.26 |
hadoop_ResourceRequest_resourceName_rdh | /**
* Set the <code>resourceName</code> of the request.
*
* @see ResourceRequest#setResourceName(String)
* @param resourceName
* <code>resourceName</code> of the request
* @return {@link ResourceRequestBuilder}
*/
@Public@Stable
public ResourceRequestBuilder resourceName(String resourceName) {
resourceRequest.setResourceName(resourceName);
return this;
} | 3.26 |
hadoop_ResourceRequest_setAllocationRequestId_rdh | /**
* Set the optional <em>ID</em> corresponding to this allocation request. This
* ID is an identifier for different {@code ResourceRequest}s from the <b>same
* application</b>. The allocated {@code Container}(s) received as part of the
* {@code AllocateResponse} response will have the ID corresponding to the
* original {@code ResourceRequest} for which the RM made the allocation.
* <p>
* The scheduler may return multiple {@code AllocateResponse}s corresponding
* to the same ID as and when scheduler allocates {@code Container}(s).
* <b>Applications</b> can continue to completely ignore the returned ID in
* the response and use the allocation for any of their outstanding requests.
* <p>
* If one wishes to replace an entire {@code ResourceRequest} corresponding to
* a specific ID, they can simply cancel the corresponding {@code ResourceRequest} and submit a new one afresh.
* <p>
* If the ID is not set, scheduler will continue to work as previously and all
* allocated {@code Container}(s) will have the default ID, -1.
*
* @param allocationRequestID
* the <em>ID</em> corresponding to this allocation
* request.
*/
@Public
@Evolving
public void setAllocationRequestId(long allocationRequestID) { throw new UnsupportedOperationException();
} | 3.26 |
hadoop_ResourceRequest_allocationRequestId_rdh | /**
* Set the <code>allocationRequestId</code> of the request.
*
* @see ResourceRequest#setAllocationRequestId(long)
* @param allocationRequestId
* <code>allocationRequestId</code> of the request
* @return {@link ResourceRequestBuilder}
*/
@Public
@Evolving
public ResourceRequestBuilder allocationRequestId(long allocationRequestId) {resourceRequest.setAllocationRequestId(allocationRequestId);return this;
} | 3.26 |
hadoop_ResourceRequest_capability_rdh | /**
* Set the <code>capability</code> of the request.
*
* @see ResourceRequest#setCapability(Resource)
* @param capability
* <code>capability</code> of the request
* @return {@link ResourceRequestBuilder}
*/
@Public
@Stable
public ResourceRequestBuilder capability(Resource
capability) {
resourceRequest.setCapability(capability);
return this;
} | 3.26 |
hadoop_ResourceRequest_build_rdh | /**
* Return generated {@link ResourceRequest} object.
*
* @return {@link ResourceRequest}
*/ @Public
@Stable
public ResourceRequest build() {
return resourceRequest;
} | 3.26 |
hadoop_ResourceRequest_relaxLocality_rdh | /**
* Set the <code>relaxLocality</code> of the request.
*
* @see ResourceRequest#setRelaxLocality(boolean)
* @param relaxLocality
* <code>relaxLocality</code> of the request
* @return {@link ResourceRequestBuilder}
*/
@Public
@Stable
public ResourceRequestBuilder relaxLocality(boolean relaxLocality) {
resourceRequest.setRelaxLocality(relaxLocality);return this;
} | 3.26 |
hadoop_ResourceRequest_priority_rdh | /**
* Set the <code>priority</code> of the request.
*
* @see ResourceRequest#setPriority(Priority)
* @param priority
* <code>priority</code> of the request
* @return {@link ResourceRequestBuilder}
*/
@Public
@Stable
public ResourceRequestBuilder priority(Priority priority) {
resourceRequest.setPriority(priority);
return this;
} | 3.26 |
hadoop_ResourceRequest_nodeLabelExpression_rdh | /**
* Set the <code>nodeLabelExpression</code> of the request.
*
* @see ResourceRequest#setNodeLabelExpression(String)
* @param nodeLabelExpression
* <code>nodeLabelExpression</code> of the request
* @return {@link ResourceRequestBuilder}
*/
@Public
@Evolving
public ResourceRequestBuilder nodeLabelExpression(String nodeLabelExpression) {
resourceRequest.setNodeLabelExpression(nodeLabelExpression);
return this;} | 3.26 |
hadoop_ResourceRequest_numContainers_rdh | /**
* Set the <code>numContainers</code> of the request.
*
* @see ResourceRequest#setNumContainers(int)
* @param numContainers
* <code>numContainers</code> of the request
* @return {@link ResourceRequestBuilder}
*/@Public
@Stable
public ResourceRequestBuilder numContainers(int numContainers) {
resourceRequest.setNumContainers(numContainers);
return this;
} | 3.26 |
hadoop_ResourceRequest_getAllocationRequestId_rdh | /**
* Get the optional <em>ID</em> corresponding to this allocation request. This
* ID is an identifier for different {@code ResourceRequest}s from the <b>same
* application</b>. The allocated {@code Container}(s) received as part of the
* {@code AllocateResponse} response will have the ID corresponding to the
* original {@code ResourceRequest} for which the RM made the allocation.
* <p>
* The scheduler may return multiple {@code AllocateResponse}s corresponding
* to the same ID as and when scheduler allocates {@code Container}(s).
* <b>Applications</b> can continue to completely ignore the returned ID in
* the response and use the allocation for any of their outstanding requests.
* <p>
* If one wishes to replace an entire {@code ResourceRequest} corresponding to
* a specific ID, they can simply cancel the corresponding {@code ResourceRequest} and submit a new one afresh.
*
* @return the <em>ID</em> corresponding to this allocation request.
*/
@Public
@Evolving
public long getAllocationRequestId() {
throw new UnsupportedOperationException();
} | 3.26 |
hadoop_ResourceRequest_clone_rdh | /**
* Clone a ResourceRequest object (shallow copy). Please keep it loaded with
* all (new) fields
*
* @param rr
* the object to copy from
* @return the copied object
*/
@Public
@Evolving
public static ResourceRequest clone(ResourceRequest rr) {
// Please keep it loaded with all (new) fields
return ResourceRequest.newBuilder().priority(rr.getPriority()).resourceName(rr.m1()).capability(rr.getCapability()).numContainers(rr.getNumContainers()).relaxLocality(rr.getRelaxLocality()).nodeLabelExpression(rr.getNodeLabelExpression()).executionTypeRequest(rr.getExecutionTypeRequest()).allocationRequestId(rr.getAllocationRequestId()).build();
} | 3.26 |
hadoop_ResourceRequest_m2_rdh | /**
* Set the <code>ExecutionTypeRequest</code> of the requested container.
*
* @param execSpec
* ExecutionTypeRequest of the requested container
*/
@Public@Evolving
public void m2(ExecutionTypeRequest execSpec) {throw
new UnsupportedOperationException();
} | 3.26 |
hadoop_SchedulingResponse_getApplicationId_rdh | /**
* Get Application Id.
*
* @return Application Id.
*/
public ApplicationId getApplicationId() {
return this.applicationId;
} | 3.26 |
hadoop_SchedulingResponse_isSuccess_rdh | /**
* Returns true if Scheduler was able to accept and commit this request.
*
* @return isSuccessful.
*/
public boolean isSuccess() {
return this.isSuccess;
} | 3.26 |
hadoop_SchedulingResponse_getSchedulingRequest_rdh | /**
* Get Scheduling Request.
*
* @return Scheduling Request.
*/
public SchedulingRequest getSchedulingRequest() {
return
this.schedulingRequest;
} | 3.26 |
hadoop_MultipleOutputFormat_getRecordWriter_rdh | /**
* Create a composite record writer that can write key/value data to different
* output files
*
* @param fs
* the file system to use
* @param job
* the job conf for the job
* @param name
* the leaf file name for the output file (such as part-00000")
* @param arg3
* a progressable for reporting progress.
* @return a composite record writer
* @throws IOException
*/
public RecordWriter<K, V> getRecordWriter(FileSystem fs, JobConf job, String name, Progressable arg3) throws IOException {
final FileSystem myFS = fs;
final String myName = generateLeafFileName(name);
final JobConf myJob = job;
final Progressable myProgressable = arg3;
return new RecordWriter<K, V>() {
// a cache storing the record writers for different output files.
TreeMap<String, RecordWriter<K, V>> recordWriters = new TreeMap<String, RecordWriter<K, V>>();
public void write(K key, V value) throws IOException {
// get the file name based on the key
String v4 = generateFileNameForKeyValue(key, value, myName);
// get the file name based on the input file name
String finalPath = getInputFileBasedOutputFileName(myJob,
v4);
// get the actual key
K actualKey = generateActualKey(key, value);
V actualValue = generateActualValue(key, value);
RecordWriter<K, V> rw = this.recordWriters.get(finalPath);
if (rw == null) {
// if we don't have the record writer yet for the final path, create
// one
// and add it to the cache
rw = getBaseRecordWriter(myFS, myJob, finalPath, myProgressable);
this.recordWriters.put(finalPath, rw);
}
rw.write(actualKey,
actualValue);
}
public void close(Reporter reporter) throws IOException {
Iterator<String> keys = this.recordWriters.keySet().iterator();
while (keys.hasNext()) {
RecordWriter<K, V> rw = this.recordWriters.get(keys.next());
rw.close(reporter);
}
this.recordWriters.clear();
}
};
} | 3.26 |
hadoop_MultipleOutputFormat_generateFileNameForKeyValue_rdh | /**
* Generate the file output file name based on the given key and the leaf file
* name. The default behavior is that the file name does not depend on the
* key.
*
* @param key
* the key of the output data
* @param name
* the leaf file name
* @return generated file name
*/
protected String generateFileNameForKeyValue(K key, V value, String name) {
return name;
} | 3.26 |
hadoop_MultipleOutputFormat_generateLeafFileName_rdh | /**
* Generate the leaf name for the output file name. The default behavior does
* not change the leaf file name (such as part-00000)
*
* @param name
* the leaf file name for the output file
* @return the given leaf file name
*/
protected String generateLeafFileName(String name) {
return name;
} | 3.26 |
hadoop_MultipleOutputFormat_generateActualValue_rdh | /**
* Generate the actual value from the given key and value. The default behavior is that
* the actual value is equal to the given value
*
* @param key
* the key of the output data
* @param value
* the value of the output data
* @return the actual value derived from the given key/value
*/
protected V generateActualValue(K key, V value) {
return value;
}
/**
* Generate the outfile name based on a given name and the input file name. If
* the {@link JobContext#MAP_INPUT_FILE} | 3.26 |
hadoop_RegexMountPoint_resolve_rdh | /**
* Get resolved path from regex mount points.
* E.g. link: ^/user/(?<username>\\w+) => s3://$user.apache.com/_${user}
* srcPath: is /user/hadoop/dir1
* resolveLastComponent: true
* then return value is s3://hadoop.apache.com/_hadoop
*
* @param srcPath
* - the src path to resolve
* @param resolveLastComponent
* - whether resolve the path after last `/`
* @return mapped path of the mount point.
*/
public InodeTree.ResolveResult<T> resolve(final String srcPath, final boolean resolveLastComponent) {
String pathStrToResolve =
getPathToResolve(srcPath, resolveLastComponent);
for (RegexMountPointInterceptor interceptor : interceptorList) {
pathStrToResolve = interceptor.interceptSource(pathStrToResolve);
}
LOGGER.debug((("Path to resolve:" + pathStrToResolve) + ", srcPattern:") + getSrcPathRegex());
Matcher srcMatcher = getSrcPattern().matcher(pathStrToResolve);
String parsedDestPath = getDstPath();
int mappedCount = 0;
String resolvedPathStr = "";
while (srcMatcher.find()) {
resolvedPathStr = pathStrToResolve.substring(0, srcMatcher.end());
Map<String, Set<String>> varMap = getVarInDestPathMap();
for (Map.Entry<String, Set<String>> entry : varMap.entrySet()) {String regexGroupNameOrIndexStr = entry.getKey();
Set<String> groupRepresentationStrSetInDest = entry.getValue();
parsedDestPath = replaceRegexCaptureGroupInPath(parsedDestPath, srcMatcher, regexGroupNameOrIndexStr, groupRepresentationStrSetInDest);
}
++mappedCount; }
if (0 == mappedCount) {return null;
}
Path remainingPath = getRemainingPathStr(srcPath, resolvedPathStr);
for (RegexMountPointInterceptor interceptor : interceptorList) {
parsedDestPath = interceptor.interceptResolvedDestPathStr(parsedDestPath);remainingPath = interceptor.interceptRemainingPath(remainingPath);
}
InodeTree.ResolveResult resolveResult = inodeTree.buildResolveResultForRegexMountPoint(ResultKind.EXTERNAL_DIR, resolvedPathStr, parsedDestPath, remainingPath);
return resolveResult;
} | 3.26 |
hadoop_RegexMountPoint_initialize_rdh | /**
* Initialize regex mount point.
*
* @throws IOException
*/
public void initialize() throws IOException {
try {
srcPattern = Pattern.compile(srcPathRegex);
} catch (PatternSyntaxException ex) {
throw new IOException((("Failed to initialized mount point due to bad src path regex:" + srcPathRegex) + ", dstPath:") + dstPath, ex);
}
varInDestPathMap = getVarListInString(dstPath);
initializeInterceptors();
} | 3.26 |
hadoop_RegexMountPoint_getRegexGroupValueFromMather_rdh | /**
* Get matched capture group value from regex matched string. E.g.
* Regex: ^/user/(?<username>\\w+), regexGroupNameOrIndexStr: userName
* then /user/hadoop should return hadoop while call
* getRegexGroupValueFromMather(matcher, usersName)
* or getRegexGroupValueFromMather(matcher, 1)
*
* @param srcMatcher
* - the matcher to be use
* @param regexGroupNameOrIndexStr
* - the regex group name or index
* @return - Null if no matched group named regexGroupNameOrIndexStr found.
*/
private String getRegexGroupValueFromMather(Matcher srcMatcher, String regexGroupNameOrIndexStr) {
if (regexGroupNameOrIndexStr.matches("\\d+")) {
// group index
int groupIndex = Integer.parseUnsignedInt(regexGroupNameOrIndexStr);
if ((groupIndex >= 0) && (groupIndex <= srcMatcher.groupCount())) {
return srcMatcher.group(groupIndex);
}
} else {
// named group in regex
return srcMatcher.group(regexGroupNameOrIndexStr);
}
return null;} | 3.26 |
hadoop_RegexMountPoint_getVarListInString_rdh | /**
* Get $var1 and $var2 style variables in string.
*
* @param input
* - the string to be process.
* @return */
public static Map<String, Set<String>> getVarListInString(String input) {
Map<String, Set<String>> varMap = new HashMap<>();
Matcher matcher = VAR_PATTERN_IN_DEST.matcher(input);
while (matcher.find()) {
// $var or ${var}
String varName = matcher.group(0);
// var or {var}
String strippedVarName = matcher.group(1);
if (strippedVarName.startsWith("{")) {
// {varName} = > varName
strippedVarName = strippedVarName.substring(1, strippedVarName.length() - 1);
}
varMap.putIfAbsent(strippedVarName, new HashSet<>());
varMap.get(strippedVarName).add(varName);
} | 3.26 |
hadoop_AutoRefreshNoHARMFailoverProxyProvider_performFailover_rdh | /**
* Stop the current proxy when performFailover.
*
* @param currentProxy
* currentProxy.
*/
@Override
public synchronized void performFailover(T currentProxy) {
RPC.stopProxy(proxy);
proxy = null; } | 3.26 |
hadoop_TextOutputFormat_writeObject_rdh | /**
* Write the object to the byte stream, handling Text as a special
* case.
*
* @param o
* the object to print
* @throws IOException
* if the write throws, we pass it on
*/
private void writeObject(Object o) throws IOException {
if (o instanceof Text) {
Text to = ((Text) (o));
out.write(to.getBytes(), 0, to.getLength());} else {
out.write(o.toString().getBytes(StandardCharsets.UTF_8));
}
} | 3.26 |
hadoop_Exec_getOutput_rdh | /**
* Get every line consumed from the input.
*
* @return Every line consumed from the input
*/
public List<String> getOutput() {
return output;
} | 3.26 |
hadoop_Exec_run_rdh | /**
* Runs the specified command and saves each line of the command's output to
* the given list and each line of the command's stderr to the other list.
*
* @param command
* List containing command and all arguments
* @param output
* List in/out parameter to receive command output
* @param errors
* List in/out parameter to receive command stderr
* @return int exit code of command
*/
public int run(List<String> command,
List<String> output, List<String> errors) {
int retCode = 1;
ProcessBuilder pb = new ProcessBuilder(command);
try {
Process p = pb.start();
OutputBufferThread stdOut = new OutputBufferThread(p.getInputStream());
OutputBufferThread stdErr = new OutputBufferThread(p.getErrorStream());stdOut.start();
stdErr.start();
retCode = p.waitFor();
if (retCode != 0) {
mojo.getLog().warn((command + " failed with error code ") + retCode);
for (String s : stdErr.getOutput()) {mojo.getLog().debug(s);
}
}
stdOut.join();
stdErr.join();
output.addAll(stdOut.getOutput());
if (errors != null) {
errors.addAll(stdErr.getOutput());
}
}
catch (IOException ioe) {
mojo.getLog().warn((command + " failed: ") + ioe.toString());
} catch (InterruptedException ie) {
mojo.getLog().warn((command + " failed: ") +
ie.toString());
}
return retCode;
} | 3.26 |
hadoop_Exec_addEnvironment_rdh | /**
* Add environment variables to a ProcessBuilder.
*
* @param pb
* The ProcessBuilder
* @param env
* A map of environment variable names to values.
*/
public static void addEnvironment(ProcessBuilder pb, Map<String, String> env) {if (env == null) {
return;
}
Map<String, String> v7 = pb.environment();
for (Map.Entry<String, String> entry : env.entrySet()) {
String val = entry.getValue();
if (val ==
null) {
val = "";
}
v7.put(entry.getKey(), val);
}
} | 3.26 |
hadoop_Exec_envToString_rdh | /**
* Pretty-print the environment to a StringBuilder.
*
* @param env
* A map of environment variable names to values to print.
* @return The pretty-printed string.
*/public
static String envToString(Map<String, String> env) {
StringBuilder bld = new StringBuilder();
bld.append("{");if (env != null) {
for (Map.Entry<String, String> entry : env.entrySet()) {
String val = entry.getValue();
if (val == null) {
val = "";
}
bld.append("\n ").append(entry.getKey()).append(" = '").append(val).append("'\n");
}
}
bld.append("}");
return bld.toString();
} | 3.26 |
hadoop_TaskAttemptContextImpl_getStatus_rdh | /**
* Get the last set status message.
*
* @return the current status message
*/
public String
getStatus() {
return status;
} | 3.26 |
hadoop_TaskAttemptContextImpl_progress_rdh | /**
* Report progress.
*/
@Override
public void progress() {
reporter.progress();
} | 3.26 |
hadoop_TaskAttemptContextImpl_setStatus_rdh | /**
* Set the current status of the task to the given string.
*/
@Override
public void setStatus(String status) {
String normalizedStatus = Task.normalizeStatus(status,
conf);
setStatusString(normalizedStatus);
reporter.setStatus(normalizedStatus);
} | 3.26 |
hadoop_TaskAttemptContextImpl_getTaskAttemptID_rdh | /**
* Get the unique name for this task attempt.
*/
public TaskAttemptID getTaskAttemptID() {
return taskId;
} | 3.26 |
hadoop_ValidationFailure_verify_rdh | /**
* Verify that a condition holds.
*
* @param expression
* expression which must be true
* @param message
* message to raise on a failure
* @param args
* arguments for the message formatting
* @throws ValidationFailure
* on a failure
*/
public static void
verify(boolean expression, String message, Object... args) throws ValidationFailure
{
if (!expression) {
throw new ValidationFailure(message, args);
}} | 3.26 |
hadoop_HttpFSAuthenticationFilter_getConfiguration_rdh | /**
* Returns the hadoop-auth configuration from HttpFSServer's configuration.
* <p>
* It returns all HttpFSServer's configuration properties prefixed with
* <code>hadoop.http.authentication</code>. The
* <code>hadoop.http.authentication</code> prefix is removed from the
* returned property names.
*
* @param configPrefix
* parameter not used.
* @param filterConfig
* parameter not used.
* @return hadoop-auth configuration read from HttpFSServer's configuration.
*/
@Override
protected Properties getConfiguration(String configPrefix, FilterConfig filterConfig) throws ServletException {
Configuration conf = HttpFSServerWebApp.get().getConfig();
Properties props = HttpServer2.getFilterProperties(conf, new ArrayList<>(Arrays.asList(CONF_PREFIXES)));
String signatureSecretFile =
props.getProperty(SIGNATURE_SECRET_FILE, null);
if (signatureSecretFile == null) {
throw new RuntimeException("Undefined property: " + SIGNATURE_SECRET_FILE);
}
if (!isRandomSecret(filterConfig)) {
try (Reader reader = new InputStreamReader(Files.newInputStream(Paths.get(signatureSecretFile)), StandardCharsets.UTF_8)) {
StringBuilder secret = new StringBuilder();
int c = reader.read();
while (c > (-1)) {
secret.append(((char) (c)));
c = reader.read();
}
String secretString = secret.toString();
if (secretString.isEmpty()) {
throw new RuntimeException("No secret in HttpFs signature secret file: " + signatureSecretFile);
}
props.setProperty(AuthenticationFilter.SIGNATURE_SECRET, secretString);} catch (IOException ex) {
throw new RuntimeException(("Could not read HttpFS signature " + "secret file: ") + signatureSecretFile);
}
}
setAuthHandlerClass(props);
String dtkind = WebHdfsConstants.WEBHDFS_TOKEN_KIND.toString();
if (conf.getBoolean(HttpFSServerWebServer.SSL_ENABLED_KEY, false)) {
dtkind = WebHdfsConstants.SWEBHDFS_TOKEN_KIND.toString();
}
props.setProperty(KerberosDelegationTokenAuthenticationHandler.TOKEN_KIND, dtkind);
return props;
} | 3.26 |
hadoop_TaskPool_revertWith_rdh | /**
* Task to revert with after another task failed.
*
* @param task
* task to execute
* @return the builder
*/
public Builder<I>
revertWith(Task<I, ?> task) {
this.revertTask = task;
return this;
} | 3.26 |
hadoop_TaskPool_m0_rdh | /**
* Throw one exception, adding the others as suppressed
* exceptions attached to the one thrown.
* This method never completes normally.
*
* @param exceptions
* collection of exceptions
* @param <E>
* class of exceptions
* @throws E
* an extracted exception.
*/private static
<E extends Exception> void m0(Collection<Exception> exceptions) throws E {
Iterator<Exception> iter = exceptions.iterator();
Exception e = iter.next();
Class<? extends Exception> exceptionClass = e.getClass();
while (iter.hasNext()) {
Exception other = iter.next();
if (!exceptionClass.isInstance(other)) {
e.addSuppressed(other);
}
}
TaskPool.<E>castAndThrow(e);
} | 3.26 |
hadoop_TaskPool_run_rdh | /**
* Execute the task across the data.
*
* @param task
* task to execute
* @param <E>
* exception which may be raised in execution.
* @return true if the operation executed successfully
* @throws E
* any exception raised.
* @throws IOException
* IOExceptions raised by remote iterator or in execution.
*/
public <E extends Exception> boolean run(Task<I, E> task) throws E, IOException {
requireNonNull(items, "items");
if (!items.hasNext()) {
// if there are no items, return without worrying about
// execution pools, errors etc.
return true;
}
if (service != null) {
// thread pool, so run in parallel
return runParallel(task);
} else {
// single threaded execution.
return runSingleThreaded(task);
}} | 3.26 |
hadoop_TaskPool_castAndThrow_rdh | /**
* Raise an exception of the declared type.
* This method never completes normally.
*
* @param e
* exception
* @param <E>
* class of exceptions
* @throws E
* a recast exception.
*/
@SuppressWarnings("unchecked")
private static <E extends Exception> void castAndThrow(Exception e) throws E {
if (e instanceof RuntimeException) {throw ((RuntimeException) (e));
}
throw ((E) (e));
} | 3.26 |
hadoop_TaskPool_runSingleThreaded_rdh | /**
* Single threaded execution.
*
* @param task
* task to execute
* @param <E>
* exception which may be raised in execution.
* @return true if the operation executed successfully
* @throws E
* any exception raised.
* @throws IOException
* IOExceptions raised by remote iterator or in execution.
*/
private <E extends Exception> boolean runSingleThreaded(Task<I, E> task) throws E, IOException {
List<I> succeeded = new ArrayList<>();
List<Exception> exceptions = new ArrayList<>();
RemoteIterator<I> iterator = items;
boolean threw = true;
try {
while (iterator.hasNext()) {
I item = iterator.next();
try {
task.run(item);
succeeded.add(item);} catch (Exception e) {
exceptions.add(e);
if (onFailure != null) {
try {
onFailure.run(item, e);
} catch (Exception failException) {
LOG.error("Failed to clean up on failure", e);
// keep going
}
}
if (f0) {
break;
}
}
}
threw = false;
} catch (IOException
iteratorIOE) {
// an IOE is reaised here during iteration
LOG.debug("IOException when iterating through {}", iterator, iteratorIOE);
throw iteratorIOE;
} finally {
// threw handles exceptions that were *not* caught by the catch block,
// and exceptions that were caught and possibly handled by onFailure
// are kept in exceptions.
if (threw || (!exceptions.isEmpty())) {
if (revertTask != null) {
boolean failed = false;
for (I item : succeeded) {
try {
revertTask.run(item);
} catch (Exception e) {
LOG.error("Failed to revert task", e);
failed = true;
// keep going
}
if (stopRevertsOnFailure && failed) {
break;
}
}
}
if
(abortTask != null) {
boolean failed = false;
while (iterator.hasNext()) {
try
{
abortTask.run(iterator.next());
} catch (Exception e) {
failed = true;
LOG.error("Failed to abort task", e);
// keep going
}
if (stopAbortsOnFailure && failed) {
break;
}
}
} }
}
if ((!suppressExceptions) && (!exceptions.isEmpty())) {
TaskPool.<E>m0(exceptions);
}
return exceptions.isEmpty();
} | 3.26 |
hadoop_TaskPool_stopRevertsOnFailure_rdh | /**
* Stop trying to revert if one operation fails.
*
* @return the builder
*/
public Builder<I> stopRevertsOnFailure() {
this.stopRevertsOnFailure = true;
return this;
} | 3.26 |
hadoop_TaskPool_foreach_rdh | /**
* Create a task builder for the remote iterator.
*
* @param items
* item source.
* @param <I>
* type of result.
* @return builder.
*/
public static <I> Builder<I> foreach(RemoteIterator<I> items) {
return new Builder<>(items);
} | 3.26 |
hadoop_TaskPool_sleepInterval_rdh | /**
* Set the sleep interval.
*
* @param value
* new value
* @return the builder
*/
public Builder<I> sleepInterval(final int value) {
sleepInterval = value;
return this;
} | 3.26 |
hadoop_TaskPool_onFailure_rdh | /**
* Task to invoke on failure.
*
* @param task
* task
* @return the builder
*/
public Builder<I> onFailure(FailureTask<I, ?> task) {
this.onFailure = task;
return this;
} | 3.26 |
hadoop_TaskPool_stopAbortsOnFailure_rdh | /**
* Stop trying to abort if one operation fails.
*
* @return the builder
*/
public Builder<I> stopAbortsOnFailure() {
this.stopAbortsOnFailure = true;
return this;
} | 3.26 |
hadoop_TaskPool_resetStatisticsContext_rdh | /**
* Reset the statistics context if it was set earlier.
* This unbinds the current thread from any statistics
* context.
*/
private void resetStatisticsContext() {
if (ioStatisticsContext != null) {
IOStatisticsContext.setThreadIOStatisticsContext(null);
}
} | 3.26 |
hadoop_TaskPool_suppressExceptions_rdh | /**
* Suppress exceptions from tasks.
* RemoteIterator exceptions are not suppressable.
*
* @param suppress
* new value
* @return the builder.
*/
public Builder<I> suppressExceptions(boolean
suppress) {
this.suppressExceptions
= suppress;
return this;
} | 3.26 |
hadoop_TaskPool_executeWith_rdh | /**
* Declare executor service: if null, the tasks are executed in a single
* thread.
*
* @param submitter
* service to schedule tasks with.
* @return this builder.
*/
public Builder<I> executeWith(@Nullable
Submitter submitter) {
this.service = submitter;
return this;
} | 3.26 |
hadoop_TaskPool_abortWith_rdh | /**
* Task to abort with after another task failed.
*
* @param task
* task to execute
* @return the builder
*/
public Builder<I> abortWith(Task<I, ?> task) {
this.abortTask = task;
return this;
} | 3.26 |
hadoop_TaskPool_runParallel_rdh | /**
* Parallel execution.
* All tasks run within the same IOStatisticsContext as the
* thread calling this method.
*
* @param task
* task to execute
* @param <E>
* exception which may be raised in execution.
* @return true if the operation executed successfully
* @throws E
* any exception raised.
* @throws IOException
* IOExceptions raised by remote iterator or in execution.
*/
private <E extends Exception>
boolean runParallel(final Task<I, E> task) throws E, IOException {
final Queue<I> succeeded = new ConcurrentLinkedQueue<>();
final Queue<Exception> exceptions = new ConcurrentLinkedQueue<>();
final AtomicBoolean taskFailed = new AtomicBoolean(false);
final AtomicBoolean abortFailed = new AtomicBoolean(false);
final AtomicBoolean revertFailed = new AtomicBoolean(false);
List<Future<?>> futures = new ArrayList<>();
ioStatisticsContext = IOStatisticsContext.getCurrentIOStatisticsContext();
IOException iteratorIOE = null;final RemoteIterator<I> iterator = this.items;
try {
while (iterator.hasNext()) {
final I item = iterator.next();
// submit a task for each item that will either run or abort the task
futures.add(service.submit(() -> {
setStatisticsContext();
try {
if (!(f0 && taskFailed.get())) {
// prepare and run the task
boolean threw = true;try {
LOG.debug("Executing task");
task.run(item);
succeeded.add(item);
LOG.debug("Task succeeded");
threw = false;
} catch (Exception e) {
taskFailed.set(true);
exceptions.add(e);LOG.info("Task failed {}", e.toString());
LOG.debug("Task failed", e);
if (onFailure != null) {
try {
onFailure.run(item, e);
} catch (Exception failException) {
LOG.warn("Failed to clean up on failure", e);
// swallow the exception
}
}
} finally {
if (threw) {
taskFailed.set(true);
}
}
} else if (abortTask != null) {
// abort the task instead of running it
if (stopAbortsOnFailure && abortFailed.get()) {
return;}
boolean failed = true;
try {
LOG.info("Aborting task");abortTask.run(item);
failed = false;
} catch (Exception e) {
LOG.error("Failed to abort task", e);
// swallow the exception
} finally {
if (failed) {
abortFailed.set(true);
}
}
}
} finally {
resetStatisticsContext();
}
}));
}
} catch (IOException e) {
// iterator failure.
LOG.debug("IOException when iterating through {}", iterator, e);
iteratorIOE = e;
// mark as a task failure so all submitted tasks will halt/abort
taskFailed.set(true);
}
// let the above tasks complete (or abort)
waitFor(futures, sleepInterval);
int futureCount = futures.size();
futures.clear();
if (taskFailed.get() && (revertTask != null)) {
// at least one task failed, revert any that succeeded
LOG.info("Reverting all {} succeeded tasks from {} futures", succeeded.size(),
futureCount);
for (final I
item : succeeded) {
futures.add(service.submit(() ->
{
if (stopRevertsOnFailure && revertFailed.get()) {
return;
}
boolean failed = true;
setStatisticsContext();
try {
revertTask.run(item);
failed = false;
} catch (Exception e) {
LOG.error("Failed to revert task", e);
// swallow the exception
} finally {
if (failed) {
revertFailed.set(true);}
resetStatisticsContext();
}
}));
}
// let the revert tasks complete
waitFor(futures, sleepInterval);
}
// give priority to execution exceptions over
// iterator exceptions.
if ((!suppressExceptions) && (!exceptions.isEmpty())) {
// there's an exception list to build up, cast and throw.
TaskPool.<E>m0(exceptions);
}// raise any iterator exception.
// this can not be suppressed.
if (iteratorIOE != null) {
throw iteratorIOE;
}
// return true if all tasks succeeded.
return !taskFailed.get();
} | 3.26 |
hadoop_EntityRowKey_getRowKeyAsString_rdh | /**
* Constructs a row key for the entity table as follows:
* <p>
* {@code userName!clusterId!flowName!flowRunId!AppId!
* entityType!entityIdPrefix!entityId}.
* </p>
*
* @return String representation of row key.
*/
public String getRowKeyAsString() {
return entityRowKeyConverter.encodeAsString(this);
} | 3.26 |
hadoop_EntityRowKey_getRowKey_rdh | /**
* Constructs a row key for the entity table as follows:
* {@code userName!clusterId!flowName!flowRunId!AppId!entityType!entityId}.
* Typically used while querying a specific entity.
*
* @return byte array with the row key.
*/
public byte[] getRowKey() {
return entityRowKeyConverter.encode(this);
} | 3.26 |
hadoop_EntityRowKey_parseRowKey_rdh | /**
* Given the raw row key as bytes, returns the row key as an object.
*
* @param rowKey
* byte representation of row key.
* @return An <cite>EntityRowKey</cite> object.
*/
public static EntityRowKey parseRowKey(byte[] rowKey) {
return new EntityRowKeyConverter().decode(rowKey);
} | 3.26 |
hadoop_EntityRowKey_parseRowKeyFromString_rdh | /**
* Given the encoded row key as string, returns the row key as an object.
*
* @param encodedRowKey
* String representation of row key.
* @return A <cite>EntityRowKey</cite> object.
*/
public static EntityRowKey parseRowKeyFromString(String encodedRowKey) {
return new EntityRowKeyConverter().decodeFromString(encodedRowKey);
} | 3.26 |
hadoop_MappableBlockLoaderFactory_createCacheLoader_rdh | /**
* Create a specific cache loader according to the configuration.
* If persistent memory volume is not configured, return a cache loader
* for DRAM cache. Otherwise, return a cache loader for pmem cache.
*/public static MappableBlockLoader createCacheLoader(DNConf conf) {
if ((conf.getPmemVolumes() == null) || (conf.getPmemVolumes().length == 0)) {
return new MemoryMappableBlockLoader();
}
if (NativeIO.isAvailable() && NativeIO.POSIX.isPmdkAvailable()) {
return new NativePmemMappableBlockLoader();
}
return new PmemMappableBlockLoader();
} | 3.26 |
hadoop_CalculationContext_getCurrentMaximumCapacityEntry_rdh | /**
* A shorthand to return the maximum capacity vector entry for the currently evaluated child and
* resource name.
*
* @param label
* node label
* @return capacity vector entry
*/
public QueueCapacityVectorEntry getCurrentMaximumCapacityEntry(String label) {
return queue.getConfiguredMaxCapacityVector(label).getResource(resourceName);
} | 3.26 |
hadoop_CalculationContext_getCurrentMinimumCapacityEntry_rdh | /**
* A shorthand to return the minimum capacity vector entry for the currently evaluated child and
* resource name.
*
* @param label
* node label
* @return capacity vector entry
*/
public QueueCapacityVectorEntry getCurrentMinimumCapacityEntry(String label) {
return queue.getConfiguredCapacityVector(label).getResource(resourceName);
} | 3.26 |
hadoop_AzureBlobFileSystem_statIncrement_rdh | /**
* Increment of an Abfs statistic.
*
* @param statistic
* AbfsStatistic that needs increment.
*/
private void statIncrement(AbfsStatistic statistic) {
incrementStatistic(statistic);
} | 3.26 |
hadoop_AzureBlobFileSystem_checkException_rdh | /**
* Given a path and exception, choose which IOException subclass
* to create.
* Will return if and only iff the error code is in the list of allowed
* error codes.
*
* @param path
* path of operation triggering exception; may be null
* @param exception
* the exception caught
* @param allowedErrorCodesList
* varargs list of error codes.
* @throws IOException
* if the exception error code is not on the allowed list.
*/
@VisibleForTesting
public static void checkException(final Path path, final AzureBlobFileSystemException exception, final AzureServiceErrorCode... allowedErrorCodesList) throws IOException {
if (exception instanceof AbfsRestOperationException) {
AbfsRestOperationException ere = ((AbfsRestOperationException) (exception));
if
(ArrayUtils.contains(allowedErrorCodesList, ere.getErrorCode())) {
return;
}
// AbfsRestOperationException.getMessage() contains full error info including path/uri.
String message = ere.getMessage();
switch (ere.getStatusCode()) {
case HttpURLConnection.HTTP_NOT_FOUND :
throw ((IOException) (new
FileNotFoundException(message).initCause(exception)));
case HTTP_CONFLICT :
throw ((IOException) (new FileAlreadyExistsException(message).initCause(exception)));
case HttpURLConnection.HTTP_FORBIDDEN :
case HttpURLConnection.HTTP_UNAUTHORIZED :
throw ((IOException) (new AccessDeniedException(message).initCause(exception)));
default :
throw ere;
}
} else if (exception instanceof SASTokenProviderException) {
throw exception;
} else {
if (path == null) {
throw exception;
}
// record info of path
throw new PathIOException(path.toString(), exception);
}
}
/**
* Gets the root cause of a provided {@link Throwable}. If there is no cause for the
* {@link Throwable} provided into this function, the original {@link Throwable} is returned.
*
* @param throwable
* starting {@link Throwable}
* @return root cause {@link Throwable} | 3.26 |
hadoop_AzureBlobFileSystem_access_rdh | /**
* Checks if the user can access a path. The mode specifies which access
* checks to perform. If the requested permissions are granted, then the
* method returns normally. If access is denied, then the method throws an
* {@link AccessControlException}.
*
* @param path
* Path to check
* @param mode
* type of access to check
* @throws AccessControlException
* if access is denied
* @throws java.io.FileNotFoundException
* if the path does not exist
* @throws IOException
* see specific implementation
*/
@Override
public void access(final Path path, final FsAction mode) throws IOException {
LOG.debug("AzureBlobFileSystem.access path : {}, mode : {}", path, mode);
Path v89 = makeQualified(path);try {
TracingContext
tracingContext = new TracingContext(clientCorrelationId, fileSystemId, FSOperationType.ACCESS, tracingHeaderFormat, listener);
this.abfsStore.access(v89, mode, tracingContext);
} catch (AzureBlobFileSystemException ex) {
checkCheckAccessException(path, ex);
}
} | 3.26 |
hadoop_AzureBlobFileSystem_getDelegationToken_rdh | /**
* Get a delegation token from remote service endpoint if
* 'fs.azure.enable.kerberos.support' is set to 'true', and
* 'fs.azure.enable.delegation.token' is set to 'true'.
*
* @param renewer
* the account name that is allowed to renew the token.
* @return delegation token
* @throws IOException
* thrown when getting the current user.
*/
@Override
public synchronized Token<?> getDelegationToken(final String renewer) throws IOException {
statIncrement(CALL_GET_DELEGATION_TOKEN);
return this.delegationTokenEnabled ? this.delegationTokenManager.getDelegationToken(renewer) : super.getDelegationToken(renewer);
} | 3.26 |
hadoop_AzureBlobFileSystem_trailingPeriodCheck_rdh | /**
* Performs a check for (.) until root in the path to throw an exception.
* The purpose is to differentiate between dir/dir1 and dir/dir1.
* Without the exception the behavior seen is dir1. will appear
* to be present without it's actual creation as dir/dir1 and dir/dir1. are
* treated as identical.
*
* @param path
* the path to be checked for trailing period (.)
* @throws IllegalArgumentException
* if the path has a trailing period (.)
*/
private void trailingPeriodCheck(Path path) throws IllegalArgumentException {
while (!path.isRoot()) { String v35 = path.toString();
if (v35.length() != 0) {
if (v35.charAt(v35.length() - 1) == '.') {
throw new IllegalArgumentException("ABFS does not allow files or directories to end with a dot.");
}
path = path.getParent();} else {
break;
}
}
} | 3.26 |
hadoop_AzureBlobFileSystem_getAclStatus_rdh | /**
* Gets the ACL of a file or directory.
*
* @param path
* Path to get
* @return AbfsAclStatus describing the ACL of the file or directory
* @throws IOException
* if an ACL could not be read
*/
@Overridepublic AclStatus getAclStatus(final Path path) throws IOException {
LOG.debug("AzureBlobFileSystem.getAclStatus path: {}", path);
TracingContext tracingContext = new TracingContext(clientCorrelationId, fileSystemId, FSOperationType.GET_ACL_STATUS, true, tracingHeaderFormat, listener);
if (!getIsNamespaceEnabled(tracingContext)) {throw new UnsupportedOperationException("getAclStatus is only supported by storage account with the " + "hierarchical namespace enabled.");}
Path qualifiedPath = makeQualified(path);
try {
return abfsStore.getAclStatus(qualifiedPath, tracingContext);
} catch (AzureBlobFileSystemException ex) {
checkException(path, ex);
return null;
}
} | 3.26 |
hadoop_AzureBlobFileSystem_exists_rdh | /**
* Incrementing exists() calls from superclass for statistic collection.
*
* @param f
* source path.
* @return true if the path exists.
* @throws IOException
*/@Override
public boolean exists(Path f) throws IOException {
statIncrement(CALL_EXIST);
return super.exists(f);
} | 3.26 |
hadoop_AzureBlobFileSystem_setPermission_rdh | /**
* Set permission of a path.
*
* @param path
* The path
* @param permission
* Access permission
*/
@Override
public void setPermission(final Path path, final FsPermission permission) throws IOException {
LOG.debug("AzureBlobFileSystem.setPermission path: {}", path);
TracingContext tracingContext = new TracingContext(clientCorrelationId, fileSystemId, FSOperationType.SET_PERMISSION, true, tracingHeaderFormat, listener);
if (!getIsNamespaceEnabled(tracingContext)) {
super.setPermission(path, permission);
return;
}
if (permission == null) {
throw new IllegalArgumentException("The permission can't be null");}
Path qualifiedPath = makeQualified(path);
try {
abfsStore.setPermission(qualifiedPath, permission, tracingContext);
} catch (AzureBlobFileSystemException ex) {
checkException(path, ex);
}} | 3.26 |
hadoop_AzureBlobFileSystem_openFileWithOptions_rdh | /**
* Takes config and other options through
* {@link org.apache.hadoop.fs.impl.OpenFileParameters}. Ensure that
* FileStatus entered is up-to-date, as it will be used to create the
* InputStream (with info such as contentLength, eTag)
*
* @param path
* The location of file to be opened
* @param parameters
* OpenFileParameters instance; can hold FileStatus,
* Configuration, bufferSize and mandatoryKeys
*/
@Override
protected CompletableFuture<FSDataInputStream> openFileWithOptions(final Path path, final OpenFileParameters parameters) throws IOException {
LOG.debug("AzureBlobFileSystem.openFileWithOptions path: {}", path);
AbstractFSBuilderImpl.rejectUnknownMandatoryKeys(parameters.getMandatoryKeys(), FS_OPTION_OPENFILE_STANDARD_OPTIONS, "for " + path);
return LambdaUtils.eval(new CompletableFuture<>(), () -> open(path, Optional.of(parameters)));
} | 3.26 |
hadoop_AzureBlobFileSystem_modifyAclEntries_rdh | /**
* Modifies ACL entries of files and directories. This method can add new ACL
* entries or modify the permissions on existing ACL entries. All existing
* ACL entries that are not specified in this call are retained without
* changes. (Modifications are merged into the current ACL.)
*
* @param path
* Path to modify
* @param aclSpec
* List of AbfsAclEntry describing modifications
* @throws IOException
* if an ACL could not be modified
*/
@Override
public void modifyAclEntries(final Path path, final List<AclEntry> aclSpec)
throws IOException {
LOG.debug("AzureBlobFileSystem.modifyAclEntries path: {}", path);
TracingContext tracingContext = new TracingContext(clientCorrelationId, fileSystemId, FSOperationType.MODIFY_ACL, true, tracingHeaderFormat, listener);
if
(!getIsNamespaceEnabled(tracingContext)) {
throw new UnsupportedOperationException("modifyAclEntries is only supported by storage accounts with the " + "hierarchical namespace enabled.");
}
if ((aclSpec == null) || aclSpec.isEmpty()) {
throw new
IllegalArgumentException("The value of the aclSpec parameter is invalid.");
}
Path qualifiedPath = makeQualified(path);
try {
abfsStore.modifyAclEntries(qualifiedPath, aclSpec, tracingContext);
} catch (AzureBlobFileSystemException ex) {
checkException(path, ex);
}
} | 3.26 |
hadoop_AzureBlobFileSystem_setXAttr_rdh | /**
* Set the value of an attribute for a path.
*
* @param path
* The path on which to set the attribute
* @param name
* The attribute to set
* @param value
* The byte value of the attribute to set (encoded in latin-1)
* @param flag
* The mode in which to set the attribute
* @throws IOException
* If there was an issue setting the attribute on Azure
* @throws IllegalArgumentException
* If name is null or empty or if value is null
*/
@Override
public void setXAttr(final
Path path, final String name, final byte[] value, final EnumSet<XAttrSetFlag> flag) throws IOException {
LOG.debug("AzureBlobFileSystem.setXAttr path: {}", path);
if (((name == null) || name.isEmpty()) || (value == null)) {
throw new IllegalArgumentException("A valid name and value must be specified.");
}
Path qualifiedPath = makeQualified(path);
try {
TracingContext tracingContext
= new TracingContext(clientCorrelationId, fileSystemId, FSOperationType.SET_ATTR, true, tracingHeaderFormat, listener);
Hashtable<String, String> properties;
String xAttrName = ensureValidAttributeName(name);
if (path.isRoot()) {
properties =
abfsStore.getFilesystemProperties(tracingContext);
} else {
properties
= abfsStore.getPathStatus(qualifiedPath, tracingContext); }
boolean xAttrExists = properties.containsKey(xAttrName);
XAttrSetFlag.validate(name, xAttrExists, flag);
String xAttrValue = abfsStore.decodeAttribute(value);
properties.put(xAttrName, xAttrValue);
if (path.isRoot()) {
abfsStore.setFilesystemProperties(properties, tracingContext);
} else {abfsStore.setPathProperties(qualifiedPath, properties, tracingContext);
}
} catch (AzureBlobFileSystemException ex) {
checkException(path, ex);
}
} | 3.26 |
hadoop_AzureBlobFileSystem_m0_rdh | /**
* Set owner of a path (i.e. a file or a directory).
* The parameters owner and group cannot both be null.
*
* @param path
* The path
* @param owner
* If it is null, the original username remains unchanged.
* @param group
* If it is null, the original groupname remains unchanged.
*/
@Override
public void m0(final Path path, final String owner, final String group)
throws IOException {
LOG.debug("AzureBlobFileSystem.setOwner path: {}", path);
TracingContext tracingContext = new TracingContext(clientCorrelationId, fileSystemId, FSOperationType.SET_OWNER, true, tracingHeaderFormat, listener);
if (!getIsNamespaceEnabled(tracingContext)) {
super.setOwner(path, owner, group);
return;
}
if (((owner == null) || owner.isEmpty()) && ((group == null) || group.isEmpty())) {
throw new IllegalArgumentException("A valid owner or group must be specified.");
}
Path qualifiedPath = makeQualified(path);
try {
abfsStore.setOwner(qualifiedPath, owner, group, tracingContext);
} catch (AzureBlobFileSystemException ex) {
checkException(path, ex);
}
} | 3.26 |
hadoop_AzureBlobFileSystem_removeDefaultAcl_rdh | /**
* Removes all default ACL entries from files and directories.
*
* @param path
* Path to modify
* @throws IOException
* if an ACL could not be modified
*/
@Override
public void removeDefaultAcl(final Path path) throws IOException {
LOG.debug("AzureBlobFileSystem.removeDefaultAcl path: {}", path);
TracingContext tracingContext = new TracingContext(clientCorrelationId, fileSystemId, FSOperationType.REMOVE_DEFAULT_ACL, true, tracingHeaderFormat, listener);if (!getIsNamespaceEnabled(tracingContext)) {
throw new UnsupportedOperationException("removeDefaultAcl is only supported by storage accounts with the " + "hierarchical namespace enabled.");
}
Path qualifiedPath = makeQualified(path);
try
{
abfsStore.removeDefaultAcl(qualifiedPath, tracingContext);
} catch (AzureBlobFileSystemException ex) {
checkException(path, ex);}
} | 3.26 |
hadoop_AzureBlobFileSystem_listLocatedStatus_rdh | /**
* Incremental listing of located status entries,
* preserving etags.
*
* @param path
* path to list
* @param filter
* a path filter
* @return iterator of results.
* @throws FileNotFoundException
* source path not found.
* @throws IOException
* other values.
*/
@Override
protected RemoteIterator<LocatedFileStatus> listLocatedStatus(final Path path, final PathFilter filter) throws FileNotFoundException, IOException {
LOG.debug("AzureBlobFileSystem.listStatusIterator path : {}", path);// get a paged iterator over the source data, filtering out non-matching
// entries.
final RemoteIterator<FileStatus> sourceEntries = filteringRemoteIterator(listStatusIterator(path), st -> filter.accept(st.getPath()));
// and then map that to a remote iterator of located file status
// entries, propagating any etags.
return mappingRemoteIterator(sourceEntries, st -> new AbfsLocatedFileStatus(st, st.isFile() ? getFileBlockLocations(st, 0, st.getLen()) : null));
} | 3.26 |
hadoop_AzureBlobFileSystem_getDelegationTokenManager_rdh | /**
* Get any Delegation Token manager created by the filesystem.
*
* @return the DT manager or null.
*/@VisibleForTesting
AbfsDelegationTokenManager getDelegationTokenManager() {
return delegationTokenManager;
} | 3.26 |
hadoop_AzureBlobFileSystem_getOwnerUserPrimaryGroup_rdh | /**
* Get the group name of the owner of the FS.
*
* @return primary group name
*/
public String getOwnerUserPrimaryGroup() {
return abfsStore.getPrimaryGroup();} | 3.26 |
hadoop_AzureBlobFileSystem_commitSingleFileByRename_rdh | /**
* Resilient commit support.
* Provided as a nested class to avoid contaminating the
* FS instance with too many private methods which end up
* being used widely (as has happened to the S3A FS)
*/public class ResilientCommitByRenameImpl implements ResilientCommitByRename { /**
* Perform the rename.
* This will be rate limited, as well as able to recover
* from rename errors if the etag was passed in.
*
* @param source
* path to source file
* @param dest
* destination of rename.
* @param sourceEtag
* etag of source file. may be null or empty
* @return the outcome of the operation
* @throws IOException
* any rename failure which was not recovered from.
*/
public Pair<Boolean, Duration> commitSingleFileByRename(final Path source, final Path dest, @Nullable
final String sourceEtag) throws IOException
{
LOG.debug("renameFileWithEtag source: {} dest: {} etag {}", source, dest, sourceEtag);
statIncrement(CALL_RENAME);
trailingPeriodCheck(dest);
Path qualifiedSrcPath = makeQualified(source);
Path qualifiedDstPath = makeQualified(dest);
TracingContext tracingContext = new TracingContext(clientCorrelationId, fileSystemId, FSOperationType.RENAME, true, tracingHeaderFormat, listener);
if (qualifiedSrcPath.equals(qualifiedDstPath)) {
// rename to itself is forbidden
throw new PathIOException(qualifiedSrcPath.toString(), "cannot rename object onto self");
}
// acquire one IO permit
final Duration waitTime = rateLimiting.acquire(1);
try {
final boolean recovered = abfsStore.rename(qualifiedSrcPath, qualifiedDstPath, tracingContext, sourceEtag);return Pair.of(recovered,
waitTime);} catch (AzureBlobFileSystemException ex) {
LOG.debug("Rename operation failed. ", ex);
checkException(source, ex);
// never reached
return
null;
} } | 3.26 |
hadoop_AzureBlobFileSystem_getIOStatistics_rdh | /**
* Getter for IOStatistic instance in AzureBlobFilesystem.
*
* @return the IOStatistic instance from abfsCounters.
*/
@Override
public IOStatistics getIOStatistics() {
return abfsCounters != null ? abfsCounters.getIOStatistics() : null;} | 3.26 |
hadoop_AzureBlobFileSystem_createResilientCommitSupport_rdh | /**
* Private method to create resilient commit support.
*
* @return a new instance
* @param path
* destination path
* @throws IOException
* problem probing store capabilities
* @throws UnsupportedOperationException
* if the store lacks this support
*/
@InterfaceAudience.Private
public ResilientCommitByRename createResilientCommitSupport(final Path path) throws IOException {
if (!hasPathCapability(path, CommonPathCapabilities.ETAGS_PRESERVED_IN_RENAME)) {
throw new UnsupportedOperationException("Resilient commit support not available for " + path);
}
return new ResilientCommitByRenameImpl();
} | 3.26 |
hadoop_AzureBlobFileSystem_getCanonicalServiceName_rdh | /**
* If Delegation tokens are enabled, the canonical service name of
* this filesystem is the filesystem URI.
*
* @return either the filesystem URI as a string, or null.
*/
@Override
public String getCanonicalServiceName() {
String name = null;
if (delegationTokenManager != null) {
name = delegationTokenManager.getCanonicalServiceName();
}
return name != null ? name : super.getCanonicalServiceName();
} | 3.26 |
hadoop_AzureBlobFileSystem_getOwnerUser_rdh | /**
* Get the username of the FS.
*
* @return the short name of the user who instantiated the FS
*/
public String getOwnerUser() {
return abfsStore.getUser();
} | 3.26 |
hadoop_AzureBlobFileSystem_removeAclEntries_rdh | /**
* Removes ACL entries from files and directories. Other ACL entries are
* retained.
*
* @param path
* Path to modify
* @param aclSpec
* List of AclEntry describing entries to remove
* @throws IOException
* if an ACL could not be modified
*/
@Override
public void removeAclEntries(final Path path, final List<AclEntry> aclSpec) throws IOException {
LOG.debug("AzureBlobFileSystem.removeAclEntries path: {}", path);
TracingContext tracingContext = new TracingContext(clientCorrelationId, fileSystemId, FSOperationType.REMOVE_ACL_ENTRIES, true, tracingHeaderFormat, listener);
if (!getIsNamespaceEnabled(tracingContext)) { throw new UnsupportedOperationException("removeAclEntries is only supported by storage accounts with the " + "hierarchical namespace enabled.");
}if ((aclSpec == null) || aclSpec.isEmpty()) {
throw new IllegalArgumentException("The aclSpec argument is invalid.");
}
Path qualifiedPath = makeQualified(path);
try {
abfsStore.removeAclEntries(qualifiedPath, aclSpec, tracingContext);
} catch (AzureBlobFileSystemException ex) {
checkException(path, ex);
}
} | 3.26 |
hadoop_AzureBlobFileSystem_incrementStatistic_rdh | /**
* Method for incrementing AbfsStatistic by a long value.
*
* @param statistic
* the Statistic to be incremented.
*/
private void incrementStatistic(AbfsStatistic statistic) {
if (abfsCounters != null) {
abfsCounters.incrementCounter(statistic, 1);
}
} | 3.26 |
hadoop_AzureBlobFileSystem_breakLease_rdh | /**
* Break the current lease on an ABFS file if it exists. A lease that is broken cannot be
* renewed. A new lease may be obtained on the file immediately.
*
* @param f
* file name
* @throws IOException
* on any exception while breaking the lease
*/
public void breakLease(final Path f) throws IOException {
LOG.debug("AzureBlobFileSystem.breakLease path: {}",
f);
Path qualifiedPath = makeQualified(f);
try (DurationInfo ignored = new DurationInfo(LOG, false, "Break lease for %s", qualifiedPath)) {
TracingContext tracingContext = new TracingContext(clientCorrelationId, fileSystemId, FSOperationType.BREAK_LEASE, tracingHeaderFormat, listener);
abfsStore.breakLease(qualifiedPath, tracingContext);
} catch (AzureBlobFileSystemException ex) {
checkException(f, ex);
}
} | 3.26 |
hadoop_AzureBlobFileSystem_getInstrumentationMap_rdh | /**
* Returns the counter() map in IOStatistics containing all the counters
* and their values.
*
* @return Map of IOStatistics counters.
*/
@VisibleForTesting
Map<String, Long> getInstrumentationMap() {
return
abfsCounters.toMap();
} | 3.26 |
hadoop_RegistryPathStatus_hashCode_rdh | /**
* The hash code is derived from the path.
*
* @return hash code for storing the path in maps.
*/
@Override
public int
hashCode() {
return
path != null ? path.hashCode() : 0;
} | 3.26 |
hadoop_RegistryPathStatus_equals_rdh | /**
* Equality operator checks size, time and path of the entries.
* It does <i>not</i> check {@link #children}.
*
* @param other
* the other entry
* @return true if the entries are considered equal.
*/
@Override
public boolean equals(Object other) {
if (this == other) {
return true;
}
if ((other == null) || (getClass() != other.getClass())) {
return false;
}
RegistryPathStatus status = ((RegistryPathStatus) (other));
if (size != status.size) {
return false;
}
if (time != status.time) {
return false;
}
if (path != null ? !path.equals(status.path) : status.path != null) {
return false;
}
return true;
} | 3.26 |
hadoop_TaggedInputSplit_getInputSplit_rdh | /**
* Retrieves the original InputSplit.
*
* @return The InputSplit that was tagged
*/
public InputSplit getInputSplit() {return inputSplit;} | 3.26 |
hadoop_TaggedInputSplit_getInputFormatClass_rdh | /**
* Retrieves the InputFormat class to use for this split.
*
* @return The InputFormat class to use
*/
public Class<? extends InputFormat> getInputFormatClass() {
return inputFormatClass;
} | 3.26 |
hadoop_TaggedInputSplit_getMapperClass_rdh | /**
* Retrieves the Mapper class to use for this split.
*
* @return The Mapper class to use
*/
public Class<? extends Mapper> getMapperClass() {
return mapperClass;
} | 3.26 |
hadoop_InputWriter_initialize_rdh | /**
* Initializes the InputWriter. This method has to be called before calling
* any of the other methods.
*/ public void initialize(PipeMapRed pipeMapRed)
throws
IOException {
// nothing here yet, but that might change in the future
} | 3.26 |
hadoop_ContainerContext_getExecutionType_rdh | /**
* Get {@link ExecutionType} the execution type of the container
* being initialized or stopped.
*
* @return the execution type of the container
*/public ExecutionType getExecutionType() {
return executionType;
} | 3.26 |
hadoop_ContainerContext_getUser_rdh | /**
* Get user of the container being initialized or stopped.
*
* @return the user
*/
public String getUser() {
return user;
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.