name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hadoop_RemoteMethod_m0_rdh | /**
* Get the interface/protocol for this method. For example, ClientProtocol or
* NamenodeProtocol.
*
* @return Protocol for this method.
*/
public Class<?> m0() {
return this.protocol;
} | 3.26 |
hadoop_RemoteMethod_getTypes_rdh | /**
* Get the calling types for this method.
*
* @return An array of calling types.
*/
public Class<?>[] getTypes() {
return Arrays.copyOf(this.types, this.types.length);
} | 3.26 |
hadoop_RemoteMethod_getMethodName_rdh | /**
* Get the name of the method.
*
* @return Name of the method.
*/
public String getMethodName() {
return this.methodName;
} | 3.26 |
hadoop_FederationPolicyInitializationContext_setHomeSubcluster_rdh | /**
* Sets in the context the home sub-cluster. Useful for default policy
* behaviors.
*
* @param homeSubcluster
* value to set.
*/
public void setHomeSubcluster(SubClusterId homeSubcluster) {
this.homeSubcluster = homeSubcluster;
} | 3.26 |
hadoop_FederationPolicyInitializationContext_setFederationStateStoreFacade_rdh | /**
* Setter for the {@link FederationStateStoreFacade}.
*
* @param federationStateStoreFacade
* the facade.
*/public void setFederationStateStoreFacade(FederationStateStoreFacade federationStateStoreFacade) {
this.federationStateStoreFacade = federationStateStoreFacade;
} | 3.26 |
hadoop_FederationPolicyInitializationContext_setSubClusterPolicyConfiguration_rdh | /**
* Setter for the {@link SubClusterPolicyConfiguration}.
*
* @param fedPolicyConfiguration
* the {@link SubClusterPolicyConfiguration} to
* be used for initialization.
*/
public void setSubClusterPolicyConfiguration(SubClusterPolicyConfiguration fedPolicyConfiguration) {
this.federationPolicyConfiguration = fedPolicyConfiguration;
}
/**
* Getter for the {@link SubClusterResolver}.
*
* @return the {@link SubClusterResolver} | 3.26 |
hadoop_FederationPolicyInitializationContext_setFederationSubclusterResolver_rdh | /**
* Setter for the {@link SubClusterResolver}.
*
* @param federationSubclusterResolver
* the {@link SubClusterResolver} to be
* used for initialization.
*/
public void setFederationSubclusterResolver(SubClusterResolver federationSubclusterResolver) {
this.federationSubclusterResolver
= federationSubclusterResolver;
} | 3.26 |
hadoop_FederationPolicyInitializationContext_getHomeSubcluster_rdh | /**
* Returns the current home sub-cluster. Useful for default policy behaviors.
*
* @return the home sub-cluster.
*/
public SubClusterId getHomeSubcluster() {
return homeSubcluster;
} | 3.26 |
hadoop_FederationPolicyInitializationContext_getFederationStateStoreFacade_rdh | /**
* Getter for the {@link FederationStateStoreFacade}.
*
* @return the facade.
*/
public FederationStateStoreFacade getFederationStateStoreFacade() {
return federationStateStoreFacade;
} | 3.26 |
hadoop_StripedReconstructor_initDecoderIfNecessary_rdh | // Initialize decoder
protected void initDecoderIfNecessary() {
if (decoder == null) {
decoder = CodecUtil.createRawDecoder(conf, ecPolicy.getCodecName(), coderOptions);
}
} | 3.26 |
hadoop_StripedReconstructor_initDecodingValidatorIfNecessary_rdh | // Initialize decoding validator
protected void
initDecodingValidatorIfNecessary() {
if (isValidationEnabled && (validator == null)) {
validator = new DecodingValidator(decoder);
}
} | 3.26 |
hadoop_StripedReconstructor_getXmits_rdh | /**
* Get the xmits that _will_ be used for this reconstruction task.
*/
int getXmits() {
return stripedReader.getXmits();
} | 3.26 |
hadoop_ReservationClientUtil_createMRReservation_rdh | /**
* Creates a request that envelopes a MR jobs, picking max number of maps and
* reducers, max durations, and max resources per container.
*
* @param reservationId
* the id of the reservation
* @param name
* the name of a reservation
* @param maxMapRes
* maximum resources used by any mapper
* @param numberMaps
* number of mappers
* @param maxMapDur
* maximum duration of any mapper
* @param maxRedRes
* maximum resources used by any reducer
* @param numberReduces
* number of reducers
* @param maxRedDur
* maximum duration of any reducer
* @param arrival
* start time of valid range for reservation
* @param deadline
* deadline for this reservation
* @param queueName
* queue to submit to
* @return a submission request
*/@SuppressWarnings("checkstyle:parameternumber")
public static ReservationSubmissionRequest createMRReservation(ReservationId reservationId, String name, Resource maxMapRes, int numberMaps, long maxMapDur, Resource maxRedRes, int numberReduces, long
maxRedDur, long arrival, long deadline, String queueName) {
ReservationRequest mapRR = ReservationRequest.newInstance(maxMapRes, numberMaps, numberMaps, maxMapDur);
ReservationRequest
redRR = ReservationRequest.newInstance(maxRedRes, numberReduces, numberReduces, maxRedDur);
List<ReservationRequest> listResReq = new ArrayList<ReservationRequest>();
listResReq.add(mapRR);
listResReq.add(redRR);
ReservationRequests reservationRequests = ReservationRequests.newInstance(listResReq, ReservationRequestInterpreter.R_ORDER_NO_GAP);
ReservationDefinition resDef = ReservationDefinition.newInstance(arrival, deadline, reservationRequests, name);
// outermost request
return ReservationSubmissionRequest.newInstance(resDef, queueName, reservationId);} | 3.26 |
hadoop_AuthenticationFilterInitializer_initFilter_rdh | /**
* Initializes hadoop-auth AuthenticationFilter.
* <p>
* Propagates to hadoop-auth AuthenticationFilter configuration all Hadoop
* configuration properties prefixed with "hadoop.http.authentication."
*
* @param container
* The filter container
* @param conf
* Configuration for run-time parameters
*/
@Override
public void initFilter(FilterContainer container, Configuration conf) {
Map<String, String> filterConfig = getFilterConfigMap(conf, PREFIX);
container.addFilter("authentication", AuthenticationFilter.class.getName(), filterConfig);
} | 3.26 |
hadoop_SelectBinding_buildCsvInput_rdh | /**
* Build the CSV input format for a request.
*
* @param ownerConf
* FS owner configuration
* @param builderOptions
* options on the specific request
* @return the input format
* @throws IllegalArgumentException
* argument failure
* @throws IOException
* validation failure
*/
public InputSerialization buildCsvInput(final Configuration ownerConf, final Configuration builderOptions) throws IllegalArgumentException, IOException {
String headerInfo = opt(builderOptions, ownerConf,
CSV_INPUT_HEADER,
CSV_INPUT_HEADER_OPT_DEFAULT, true).toUpperCase(Locale.ENGLISH);
String commentMarker = xopt(builderOptions, ownerConf, CSV_INPUT_COMMENT_MARKER, CSV_INPUT_COMMENT_MARKER_DEFAULT);
String fieldDelimiter = xopt(builderOptions, ownerConf, CSV_INPUT_INPUT_FIELD_DELIMITER, CSV_INPUT_FIELD_DELIMITER_DEFAULT);
String recordDelimiter = xopt(builderOptions, ownerConf, CSV_INPUT_RECORD_DELIMITER, CSV_INPUT_RECORD_DELIMITER_DEFAULT);
String quoteCharacter = xopt(builderOptions, ownerConf, CSV_INPUT_QUOTE_CHARACTER, CSV_INPUT_QUOTE_CHARACTER_DEFAULT); String quoteEscapeCharacter = xopt(builderOptions, ownerConf, CSV_INPUT_QUOTE_ESCAPE_CHARACTER, CSV_INPUT_QUOTE_ESCAPE_CHARACTER_DEFAULT);
// CSV input
CSVInput.Builder csvBuilder = CSVInput.builder().fieldDelimiter(fieldDelimiter).recordDelimiter(recordDelimiter).comments(commentMarker).quoteCharacter(quoteCharacter);
if (StringUtils.isNotEmpty(quoteEscapeCharacter)) {
csvBuilder.quoteEscapeCharacter(quoteEscapeCharacter);
}
csvBuilder.fileHeaderInfo(headerInfo);
InputSerialization.Builder inputSerialization = InputSerialization.builder().csv(csvBuilder.build());
String compression = opt(builderOptions, ownerConf, SELECT_INPUT_COMPRESSION, COMPRESSION_OPT_NONE, true).toUpperCase(Locale.ENGLISH);
if (isNotEmpty(compression)) {
inputSerialization.compressionType(compression);
}
return inputSerialization.build();
} | 3.26 |
hadoop_SelectBinding_expandBackslashChars_rdh | /**
* Perform escaping.
*
* @param src
* source string.
* @return the replaced value
*/
static String expandBackslashChars(String src) {
return // backslash substitution must come last
src.replace("\\n", "\n").replace("\\\"", "\"").replace("\\t", "\t").replace("\\r", "\r").replace("\\\"", "\"").replace("\\\\", "\\");
} | 3.26 |
hadoop_SelectBinding_m0_rdh | /**
* Build and execute a select request.
*
* @param readContext
* the read context, which includes the source path.
* @param expression
* the SQL expression.
* @param builderOptions
* query options
* @param objectAttributes
* object attributes from a HEAD request
* @return an FSDataInputStream whose wrapped stream is a SelectInputStream
* @throws IllegalArgumentException
* argument failure
* @throws IOException
* failure building, validating or executing the request.
* @throws PathIOException
* source path is a directory.
*/
@Retries.RetryTranslated
public FSDataInputStream m0(final S3AReadOpContext readContext, final String expression, final Configuration builderOptions, final S3ObjectAttributes objectAttributes) throws IOException {
return new FSDataInputStream(executeSelect(readContext, objectAttributes, builderOptions, buildSelectRequest(readContext.getPath(), expression, builderOptions)));
} | 3.26 |
hadoop_SelectBinding_buildRequest_rdh | /**
* Build the select request from the configuration built up
* in {@code S3AFileSystem.openFile(Path)} and the default
* options in the cluster configuration.
*
* Options are picked up in the following order.
* <ol>
* <li> Options in {@code openFileOptions}.</li>
* <li> Options in the owning filesystem configuration.</li>
* <li>The default values in {@link SelectConstants}</li>
* </ol>
*
* @param requestBuilder
* request to build up
* @param expression
* SQL expression
* @param builderOptions
* the options which came in from the openFile builder.
* @throws IllegalArgumentException
* if an option is somehow invalid.
* @throws IOException
* if an option is somehow invalid.
*/
void buildRequest(final SelectObjectContentRequest.Builder requestBuilder, final String expression, final
Configuration builderOptions) throws IllegalArgumentException, IOException {Preconditions.checkArgument(StringUtils.isNotEmpty(expression), "No expression provided in parameter " + SELECT_SQL);
final Configuration ownerConf = operations.getConf();
String inputFormat = builderOptions.get(SELECT_INPUT_FORMAT, SELECT_FORMAT_CSV).toLowerCase(Locale.ENGLISH);
Preconditions.checkArgument(SELECT_FORMAT_CSV.equals(inputFormat), "Unsupported input format %s", inputFormat);
String outputFormat = builderOptions.get(SELECT_OUTPUT_FORMAT, SELECT_FORMAT_CSV).toLowerCase(Locale.ENGLISH);
Preconditions.checkArgument(SELECT_FORMAT_CSV.equals(outputFormat), "Unsupported output format %s", outputFormat);
requestBuilder.expressionType(ExpressionType.SQL);
requestBuilder.expression(expandBackslashChars(expression));
requestBuilder.inputSerialization(buildCsvInput(ownerConf, builderOptions));
requestBuilder.outputSerialization(buildCSVOutput(ownerConf, builderOptions));
} | 3.26 |
hadoop_SelectBinding_isSelectEnabled_rdh | /**
* Static probe for select being enabled.
*
* @param conf
* configuration
* @return true iff select is enabled.
*/
public static boolean isSelectEnabled(Configuration conf) {
return conf.getBoolean(FS_S3A_SELECT_ENABLED, true);
} | 3.26 |
hadoop_SelectBinding_toString_rdh | /**
* Stringify the given SelectObjectContentRequest, as its
* toString() operator doesn't.
*
* @param request
* request to convert to a string
* @return a string to print. Does not contain secrets.
*/
public static String toString(final SelectObjectContentRequest request) {
StringBuilder sb = new StringBuilder();
sb.append("SelectObjectContentRequest{").append("bucket name=").append(request.bucket()).append("; key=").append(request.key()).append("; expressionType=").append(request.expressionType()).append("; expression=").append(request.expression());
InputSerialization input = request.inputSerialization();
if (input != null) {
sb.append("; Input").append(input.toString());
} else {
sb.append("; Input Serialization: none");
}
OutputSerialization out =
request.outputSerialization();
if (out != null) {
sb.append("; Output").append(out.toString());
} else {
sb.append("; Output Serialization: none");
}
return sb.append("}").toString();
} | 3.26 |
hadoop_SelectBinding_executeSelect_rdh | /**
* Execute the select request.
*
* @param readContext
* read context
* @param objectAttributes
* object attributes from a HEAD request
* @param builderOptions
* the options which came in from the openFile builder.
* @param request
* the built up select request.
* @return a SelectInputStream
* @throws IOException
* failure
* @throws PathIOException
* source path is a directory.
*/
@Retries.RetryTranslated
private SelectInputStream executeSelect(final S3AReadOpContext readContext, final S3ObjectAttributes objectAttributes, final Configuration builderOptions, final SelectObjectContentRequest request) throws IOException {
Path path = readContext.getPath();
if (readContext.getDstFileStatus().isDirectory()) {throw new PathIOException(path.toString(), ("Can't select " + path) + " because it is a directory");
}
boolean sqlInErrors = builderOptions.getBoolean(SELECT_ERRORS_INCLUDE_SQL, errorsIncludeSql);
String expression = request.expression();
final String errorText = (sqlInErrors) ? expression : "Select";
if (sqlInErrors)
{
LOG.info("Issuing SQL request {}", expression);
}
SelectEventStreamPublisher selectPublisher = operations.select(path, request, errorText);
return new SelectInputStream(readContext, objectAttributes, selectPublisher);
} | 3.26 |
hadoop_SelectBinding_xopt_rdh | /**
* Get an option with backslash arguments transformed.
* These are not trimmed, so whitespace is significant.
*
* @param selectOpts
* options in the select call
* @param fsConf
* filesystem conf
* @param base
* base option name
* @param defVal
* default value
* @return the transformed value
*/
static String xopt(Configuration selectOpts, Configuration fsConf, String base, String defVal)
{
return expandBackslashChars(opt(selectOpts, fsConf, base, defVal, false));
} | 3.26 |
hadoop_SelectBinding_buildCSVOutput_rdh | /**
* Build CSV output format for a request.
*
* @param ownerConf
* FS owner configuration
* @param builderOptions
* options on the specific request
* @return the output format
* @throws IllegalArgumentException
* argument failure
* @throws IOException
* validation failure
*/
public OutputSerialization buildCSVOutput(final Configuration ownerConf, final Configuration builderOptions) throws IllegalArgumentException, IOException {
String fieldDelimiter = xopt(builderOptions, ownerConf, CSV_OUTPUT_FIELD_DELIMITER, CSV_OUTPUT_FIELD_DELIMITER_DEFAULT);
String recordDelimiter = xopt(builderOptions, ownerConf, CSV_OUTPUT_RECORD_DELIMITER, CSV_OUTPUT_RECORD_DELIMITER_DEFAULT);
String quoteCharacter = xopt(builderOptions, ownerConf, CSV_OUTPUT_QUOTE_CHARACTER, CSV_OUTPUT_QUOTE_CHARACTER_DEFAULT);
String quoteEscapeCharacter = xopt(builderOptions, ownerConf, CSV_OUTPUT_QUOTE_ESCAPE_CHARACTER, CSV_OUTPUT_QUOTE_ESCAPE_CHARACTER_DEFAULT);
String quoteFields = xopt(builderOptions, ownerConf, CSV_OUTPUT_QUOTE_FIELDS, CSV_OUTPUT_QUOTE_FIELDS_ALWAYS).toUpperCase(Locale.ENGLISH);
CSVOutput.Builder csvOutputBuilder = CSVOutput.builder().quoteCharacter(quoteCharacter).quoteFields(QuoteFields.fromValue(quoteFields)).fieldDelimiter(fieldDelimiter).recordDelimiter(recordDelimiter);if (!quoteEscapeCharacter.isEmpty()) {
csvOutputBuilder.quoteEscapeCharacter(quoteEscapeCharacter);
}
// output is CSV, always
return OutputSerialization.builder().csv(csvOutputBuilder.build()).build();
} | 3.26 |
hadoop_SelectBinding_buildSelectRequest_rdh | /**
* Build a select request.
*
* @param path
* source path.
* @param expression
* the SQL expression.
* @param builderOptions
* config to extract other query options from
* @return the request to serve
* @throws IllegalArgumentException
* argument failure
* @throws IOException
* problem building/validating the request
*/
public SelectObjectContentRequest buildSelectRequest(final Path path,
final String expression, final Configuration builderOptions) throws IOException {
Preconditions.checkState(isEnabled(), "S3 Select is not enabled for %s", path);
SelectObjectContentRequest.Builder request = operations.newSelectRequestBuilder(path);
buildRequest(request, expression, builderOptions);
return request.build();
} | 3.26 |
hadoop_SelectBinding_opt_rdh | /**
* Resolve an option.
*
* @param builderOptions
* the options which came in from the openFile builder.
* @param fsConf
* configuration of the owning FS.
* @param base
* base option (no s3a: prefix)
* @param defVal
* default value. Must not be null.
* @param trim
* should the result be trimmed.
* @return the possibly trimmed value.
*/
static String
opt(Configuration builderOptions, Configuration fsConf, String base, String defVal, boolean trim) {
String r = builderOptions.get(base, fsConf.get(base, defVal));
return trim ? r.trim() : r;
} | 3.26 |
hadoop_SelectBinding_isEnabled_rdh | /**
* Is the service supported?
*
* @return true iff select is enabled.
*/
public boolean isEnabled() {
return enabled;
} | 3.26 |
hadoop_CommitContext_getSinglePendingFileSerializer_rdh | /**
* Get a serializer for .pending files.
*
* @return a serializer.
*/
public JsonSerialization<SinglePendingCommit> getSinglePendingFileSerializer() {
return singleCommitSerializer.getForCurrentThread();
} | 3.26 |
hadoop_CommitContext_buildSubmitters_rdh | /**
* Build the submitters and thread pools if the number of committerThreads
* is greater than zero.
* This should only be called in constructors; it is synchronized to keep
* SpotBugs happy.
*/
private synchronized void buildSubmitters() {
if (committerThreads != 0) {
outerSubmitter =
new PoolSubmitter(buildThreadPool(committerThreads));
}
}
/**
* Returns an {@link ExecutorService} for parallel tasks. The number of
* threads in the thread-pool is set by fs.s3a.committer.threads.
* If num-threads is 0, this will raise an exception.
* The threads have a lifespan set by
* {@link InternalCommitterConstants#THREAD_KEEP_ALIVE_TIME}.
* When the thread pool is full, the caller runs
* policy takes over.
*
* @param numThreads
* thread count, may be negative.
* @return an {@link ExecutorService} | 3.26 |
hadoop_CommitContext_getJobId_rdh | /**
* Get the job ID.
*
* @return job ID.
*/
public String getJobId() {
return jobId;
} | 3.26 |
hadoop_CommitContext_isCollectIOStatistics_rdh | /**
* Collecting thread level IO statistics?
*
* @return true if thread level IO stats should be collected.
*/
public boolean isCollectIOStatistics() {
return collectIOStatistics;
} | 3.26 |
hadoop_CommitContext_m0_rdh | /**
* IOStatistics context of the created thread.
*
* @return the IOStatistics.
*/
public IOStatisticsContext m0() {
return ioStatisticsContext;
} | 3.26 |
hadoop_CommitContext_getPendingSetSerializer_rdh | /**
* Get a serializer for .pendingset files.
*
* @return a serializer.
*/
public JsonSerialization<PendingSet> getPendingSetSerializer() {
return pendingSetSerializer.getForCurrentThread();
} | 3.26 |
hadoop_CommitContext_abortMultipartCommit_rdh | /**
* See {@link CommitOperations#abortMultipartCommit(String, String)}..
*
* @param destKey
* destination key
* @param uploadId
* upload to cancel
* @throws FileNotFoundException
* if the abort ID is unknown
* @throws IOException
* on any failure
*/
public void abortMultipartCommit(final String destKey, final String uploadId) throws IOException {commitOperations.abortMultipartCommit(destKey, uploadId);
} | 3.26 |
hadoop_CommitContext_destroyThreadPools_rdh | /**
* Destroy any thread pools; wait for that to finish,
* but don't overreact if it doesn't finish in time.
*/private synchronized void destroyThreadPools() {
try {
IOUtils.cleanupWithLogger(LOG, outerSubmitter, innerSubmitter);
} finally {
outerSubmitter = null;
innerSubmitter = null;
}
} | 3.26 |
hadoop_CommitContext_getOuterSubmitter_rdh | /**
* Return a submitter.
* If created with 0 threads, this returns null so
* TaskPool knows to run it in the current thread.
*
* @return a submitter or null
*/
public synchronized Submitter getOuterSubmitter() {
return outerSubmitter;
} | 3.26 |
hadoop_CommitContext_revertCommit_rdh | /**
* See {@link CommitOperations#revertCommit(SinglePendingCommit)}.
*
* @param commit
* pending commit
* @throws IOException
* failure
*/
public void revertCommit(final SinglePendingCommit commit) throws IOException {
commitOperations.revertCommit(commit);
} | 3.26 |
hadoop_CommitContext_getJobContext_rdh | /**
* Job Context.
*
* @return job context.
*/
public JobContext getJobContext() {
return jobContext;
} | 3.26 |
hadoop_CommitContext_submit_rdh | /**
* Forward to the submitter, wrapping in task
* context setting, so as to ensure that all operations
* have job/task attributes.
*
* @param task
* task to execute
* @return the future.
*/
@Override
public Future<?>
submit(Runnable task) {
return executor.submit(() -> {
auditContextUpdater.updateCurrentAuditContext();
try {
task.run();
} finally {
auditContextUpdater.resetCurrentAuditContext();
}
});
} | 3.26 |
hadoop_CommitContext_commit_rdh | /**
* Commit a single pending commit; exceptions are caught
* and converted to an outcome.
* See {@link CommitOperations#commit(SinglePendingCommit, String)}.
*
* @param commit
* entry to commit
* @param origin
* origin path/string for outcome text
* @return the outcome
*/
public MaybeIOE commit(SinglePendingCommit commit, String origin) {
return commitOperations.commit(commit, origin);
}
/**
* See {@link CommitOperations#abortSingleCommit(SinglePendingCommit)} | 3.26 |
hadoop_CommitContext_commitOrFail_rdh | /**
* Commit the operation, throwing an exception on any failure.
* See {@code CommitOperations#commitOrFail(SinglePendingCommit)}.
*
* @param commit
* commit to execute
* @throws IOException
* on a failure
*/public void commitOrFail(SinglePendingCommit commit) throws IOException {
commitOperations.commitOrFail(commit);
} | 3.26 |
hadoop_CommitContext_getConf_rdh | /**
* Job configuration.
*
* @return configuration (never null)
*/
public Configuration getConf() {
return conf;
} | 3.26 |
hadoop_CommitContext_maybeResetIOStatisticsContext_rdh | /**
* Reset the IOStatistics context if statistics are being
* collected.
* Logs at info.
*/
public void maybeResetIOStatisticsContext() {
if (collectIOStatistics) {
LOG.info("Resetting IO statistics context {}", ioStatisticsContext.getID());
ioStatisticsContext.reset();
}
} | 3.26 |
hadoop_CommitContext_switchToIOStatisticsContext_rdh | /**
* Switch to the context IOStatistics context,
* if needed.
*/
public void switchToIOStatisticsContext() {
IOStatisticsContext.setThreadIOStatisticsContext(ioStatisticsContext);
} | 3.26 |
hadoop_CommitContext_getInnerSubmitter_rdh | /**
* Return a submitter. As this pool is used less often,
* create it on demand.
* If created with 0 threads, this returns null so
* TaskPool knows to run it in the current thread.
*
* @return a submitter or null
*/
public synchronized Submitter getInnerSubmitter() {
if ((innerSubmitter == null) && (committerThreads > 0)) {
innerSubmitter = new PoolSubmitter(buildThreadPool(committerThreads));}
return innerSubmitter;
} | 3.26 |
hadoop_MountTableStoreImpl_m0_rdh | /**
* Check parent path permission recursively. It needs WRITE permission
* of the nearest parent entry and other EXECUTE permission.
*
* @param src
* mount entry being checked
* @throws AccessControlException
* if mount table cannot be accessed
*/
private void m0(final String src) throws IOException {
String parent = src.substring(0, src.lastIndexOf(Path.SEPARATOR));
checkMountTableEntryPermission(parent, FsAction.WRITE);
while (!parent.isEmpty()) {
parent
= parent.substring(0, parent.lastIndexOf(Path.SEPARATOR));
checkMountTableEntryPermission(parent, FsAction.EXECUTE);
}
} | 3.26 |
hadoop_MountTableStoreImpl_checkMountTableEntryPermission_rdh | /**
* Whether a mount table entry can be accessed by the current context.
*
* @param src
* mount entry being accessed
* @param action
* type of action being performed on the mount entry
* @throws AccessControlException
* if mount table cannot be accessed
*/
private void checkMountTableEntryPermission(String src, FsAction action)
throws IOException {
final MountTable partial
= MountTable.newInstance();
partial.setSourcePath(src);
final Query<MountTable> v1 = new Query<>(partial);
final MountTable entry = getDriver().get(getRecordClass(), v1);
if (entry != null) {
RouterPermissionChecker pc = RouterAdminServer.getPermissionChecker();
if (pc != null) {
pc.checkPermission(entry, action);
}
}
} | 3.26 |
hadoop_NMContainerStatus_getAllocationTags_rdh | /**
* Get and set the Allocation tags associated with the container.
*
* @return Allocation tags.
*/
public Set<String> getAllocationTags() {
return Collections.emptySet();
} | 3.26 |
hadoop_NMContainerStatus_getExecutionType_rdh | /**
* Get the <code>ExecutionType</code> of the container.
*
* @return <code>ExecutionType</code> of the container
*/
public ExecutionType getExecutionType() {
return ExecutionType.GUARANTEED;
} | 3.26 |
hadoop_NMContainerStatus_newInstance_rdh | // Used by tests only
public static NMContainerStatus newInstance(ContainerId
containerId, int version, ContainerState containerState, Resource allocatedResource, String diagnostics, int containerExitStatus, Priority priority, long creationTime) {
return newInstance(containerId, version, containerState, allocatedResource, diagnostics, containerExitStatus, priority, creationTime, CommonNodeLabelsManager.NO_LABEL, ExecutionType.GUARANTEED, -1);
} | 3.26 |
hadoop_WorkReport_getRetry_rdh | /**
*
* @return Number of unsuccessful attempts to process work.
*/
public int getRetry() {
return retry;
} | 3.26 |
hadoop_WorkReport_getException_rdh | /**
*
* @return Exception thrown while processing work.
*/
public Exception getException() {
return exception;
} | 3.26 |
hadoop_WorkReport_getSuccess_rdh | /**
*
* @return True if the work was processed successfully.
*/
public boolean getSuccess() {
return success;
} | 3.26 |
hadoop_BlockStorageMovementNeeded_markScanCompleted_rdh | /**
* Mark directory scan is completed.
*/
public synchronized void markScanCompleted() {
this.fullyScanned = true;
} | 3.26 |
hadoop_BlockStorageMovementNeeded_get_rdh | /**
* Gets the satisfier files for which block storage movements check necessary
* and make the movement if required.
*
* @return satisfier files
*/ public synchronized ItemInfo get() {
return storageMovementNeeded.poll();
} | 3.26 |
hadoop_BlockStorageMovementNeeded_add_rdh | /**
* Add the itemInfo to tracking list for which storage movement expected if
* necessary.
*
* @param itemInfo
* - child in the directory
* @param scanCompleted
* -Indicates whether the ItemInfo start id directory has no more
* elements to scan.
*/
@VisibleForTesting
public synchronized void add(ItemInfo itemInfo, boolean scanCompleted) {
storageMovementNeeded.add(itemInfo);
// This represents sps start id is file, so no need to update pending dir
// stats.
if (itemInfo.getStartPath() == itemInfo.getFile()) {
return;
}
updatePendingDirScanStats(itemInfo.getStartPath(), 1, scanCompleted);
} | 3.26 |
hadoop_BlockStorageMovementNeeded_removeItemTrackInfo_rdh | /**
* Decrease the pending child count for directory once one file blocks moved
* successfully. Remove the SPS xAttr if pending child count is zero.
*/
public synchronized void removeItemTrackInfo(ItemInfo trackInfo, boolean isSuccess) throws IOException {
if (trackInfo.isDir()) {
// If track is part of some start inode then reduce the pending
// directory work count.
long startId = trackInfo.getStartPath();
if (!ctxt.isFileExist(startId)) {
// directory deleted just remove it.
this.pendingWorkForDirectory.remove(startId);
} else {
DirPendingWorkInfo pendingWork = pendingWorkForDirectory.get(startId);
if (pendingWork
!= null) {
pendingWork.decrementPendingWorkCount();
if
(pendingWork.m0()) {
ctxt.removeSPSHint(startId);
pendingWorkForDirectory.remove(startId);
}
}
}
} else {
// Remove xAttr if trackID doesn't exist in
// storageMovementAttemptedItems or file policy satisfied.
ctxt.removeSPSHint(trackInfo.getFile());
}
} | 3.26 |
hadoop_BlockStorageMovementNeeded_clearQueuesWithNotification_rdh | /**
* Clean all the movements in spsDirsToBeTraveresed/storageMovementNeeded
* and notify to clean up required resources.
*/
public synchronized void clearQueuesWithNotification() {
// Remove xAttr from directories
Long trackId;
while ((trackId = ctxt.getNextSPSPath()) != null) {
try {// Remove xAttr for file
ctxt.removeSPSHint(trackId);
} catch (IOException ie) {
LOG.warn("Failed to remove SPS xattr for track id " + trackId, ie);
}
}
// File's directly added to storageMovementNeeded, So try to remove
// xAttr for file
ItemInfo itemInfo;
while ((itemInfo = get()) != null) {
try {
// Remove xAttr for file
if (!itemInfo.isDir()) {
ctxt.removeSPSHint(itemInfo.getFile());
}
} catch (IOException ie) {
LOG.warn("Failed to remove SPS xattr for track id " + itemInfo.getFile(), ie);
}
}
this.clearAll();
} | 3.26 |
hadoop_BlockStorageMovementNeeded_m0_rdh | /**
* Return true if all the pending work is done and directory fully
* scanned, otherwise false.
*/
public synchronized boolean m0() {
return (pendingWorkCount <= 0) && fullyScanned;
} | 3.26 |
hadoop_BlockStorageMovementNeeded_addPendingWorkCount_rdh | /**
* Increment the pending work count for directory.
*/
public synchronized void addPendingWorkCount(int count) {this.pendingWorkCount = this.pendingWorkCount + count;
} | 3.26 |
hadoop_BlockStorageMovementNeeded_size_rdh | /**
* Returns queue size.
*/
public synchronized int size() {
return storageMovementNeeded.size();
} | 3.26 |
hadoop_BlockStorageMovementNeeded_decrementPendingWorkCount_rdh | /**
* Decrement the pending work count for directory one track info is
* completed.
*/
public synchronized void decrementPendingWorkCount() {this.pendingWorkCount--;
} | 3.26 |
hadoop_BlockStorageMovementNeeded_addAll_rdh | /**
* Add the itemInfo list to tracking list for which storage movement expected
* if necessary.
*
* @param startPath
* - start path
* @param itemInfoList
* - List of child in the directory
* @param scanCompleted
* -Indicates whether the start id directory has no more elements to
* scan.
*/
@VisibleForTesting
public synchronized void addAll(long startPath, List<ItemInfo> itemInfoList, boolean scanCompleted)
{
storageMovementNeeded.addAll(itemInfoList);
updatePendingDirScanStats(startPath, itemInfoList.size(), scanCompleted);
} | 3.26 |
hadoop_FSDirAppendOp_computeQuotaDeltaForUCBlock_rdh | /**
* Compute quota change for converting a complete block to a UC block.
*/
private static QuotaCounts computeQuotaDeltaForUCBlock(FSNamesystem fsn, INodeFile
file) {
final QuotaCounts delta = new QuotaCounts.Builder().build();
final BlockInfo lastBlock = file.getLastBlock();
if (lastBlock != null) {
final long diff = file.getPreferredBlockSize() - lastBlock.getNumBytes();
final short repl = lastBlock.getReplication();
delta.addStorageSpace(diff * repl);
final BlockStoragePolicy policy = fsn.getFSDirectory().getBlockStoragePolicySuite().getPolicy(file.getStoragePolicyID());
List<StorageType> types = policy.chooseStorageTypes(repl);
for (StorageType v25 : types) {
if (v25.supportTypeQuota()) {
delta.addTypeSpace(v25, diff);
}
}
}
return delta;
} | 3.26 |
hadoop_FSDirAppendOp_appendFile_rdh | /**
* Append to an existing file.
* <p>
*
* The method returns the last block of the file if this is a partial block,
* which can still be used for writing more data. The client uses the
* returned block locations to form the data pipeline for this block.<br>
* The {@link LocatedBlock} will be null if the last block is full.
* The client then allocates a new block with the next call using
* {@link org.apache.hadoop.hdfs.protocol.ClientProtocol#addBlock}.
* <p>
*
* For description of parameters and exceptions thrown see
* {@link org.apache.hadoop.hdfs.protocol.ClientProtocol#append}
*
* @param fsn
* namespace
* @param srcArg
* path name
* @param pc
* permission checker to check fs permission
* @param holder
* client name
* @param clientMachine
* client machine info
* @param newBlock
* if the data is appended to a new block
* @param logRetryCache
* whether to record RPC ids in editlog for retry cache
* rebuilding
* @return the last block with status
*/
static LastBlockWithStatus appendFile(final FSNamesystem fsn, final String srcArg, final FSPermissionChecker
pc, final String holder, final String clientMachine, final boolean
newBlock, final boolean logRetryCache) throws IOException {
assert fsn.hasWriteLock();
final LocatedBlock lb;
final FSDirectory fsd = fsn.getFSDirectory();
final INodesInPath iip;
fsd.writeLock();
try {
iip = fsd.resolvePath(pc, srcArg, DirOp.WRITE);
// Verify that the destination does not exist as a directory already
final INode inode
= iip.getLastINode();
final String path = iip.getPath();
if ((inode != null) && inode.isDirectory()) {
throw new FileAlreadyExistsException(("Cannot append to directory " + path) + "; already exists as a directory.");
}if (fsd.isPermissionEnabled()) {
fsd.checkPathAccess(pc, iip, FsAction.WRITE);
}
if (inode == null) {
throw new FileNotFoundException((("Failed to append to non-existent file " + path) + " for client ") + clientMachine);
}
final INodeFile file = INodeFile.valueOf(inode, path, true);
if (file.isStriped() && (!newBlock))
{
throw new UnsupportedOperationException(("Append on EC file without new block is not supported. Use " + CreateFlag.NEW_BLOCK) + " create flag while appending file.");
}
BlockManager blockManager = fsd.getBlockManager();
final BlockStoragePolicy lpPolicy = blockManager.getStoragePolicy("LAZY_PERSIST");
if ((lpPolicy != null) && (lpPolicy.getId() == file.getStoragePolicyID())) {
throw new UnsupportedOperationException("Cannot append to lazy persist file " + path);
}
// Opening an existing file for append - may need to recover lease.
fsn.recoverLeaseInternal(RecoverLeaseOp.APPEND_FILE, iip, path, holder, clientMachine, false);
final BlockInfo lastBlock = file.getLastBlock();
// Check that the block has at least minimum replication.
if (lastBlock != null) {
if (lastBlock.getBlockUCState() == BlockUCState.COMMITTED) {
throw new RetriableException(new NotReplicatedYetException(((("append: lastBlock=" + lastBlock) + " of src=") + path) + " is COMMITTED but not yet COMPLETE."));
} else if (lastBlock.isComplete() && (!blockManager.isSufficientlyReplicated(lastBlock))) {
throw new IOException(((("append: lastBlock=" + lastBlock) + " of src=") + path) + " is not sufficiently replicated yet.");
}
}lb = prepareFileForAppend(fsn, iip, holder, clientMachine, newBlock, true, logRetryCache);
}
catch (IOException ie) {
NameNode.stateChangeLog.warn("DIR* NameSystem.append: " + ie.getMessage());
throw ie;
} finally {
fsd.writeUnlock();
}
HdfsFileStatus stat = FSDirStatAndListingOp.getFileInfo(fsd, iip, false, false);
if (lb != null) {
NameNode.stateChangeLog.debug("DIR* NameSystem.appendFile: file {} for {} at {} block {} block" + " size {}", srcArg, holder, clientMachine, lb.getBlock(), lb.getBlock().getNumBytes());
}
return
new LastBlockWithStatus(lb, stat);
} | 3.26 |
hadoop_FSDirAppendOp_prepareFileForAppend_rdh | /**
* Convert current node to under construction.
* Recreate in-memory lease record.
*
* @param fsn
* namespace
* @param iip
* inodes in the path containing the file
* @param leaseHolder
* identifier of the lease holder on this file
* @param clientMachine
* identifier of the client machine
* @param newBlock
* if the data is appended to a new block
* @param writeToEditLog
* whether to persist this change to the edit log
* @param logRetryCache
* whether to record RPC ids in editlog for retry cache
* rebuilding
* @return the last block locations if the block is partial or null otherwise
* @throws IOException
*/
static LocatedBlock prepareFileForAppend(final FSNamesystem fsn, final INodesInPath iip, final String
leaseHolder, final String clientMachine, final
boolean newBlock, final boolean writeToEditLog, final boolean logRetryCache) throws IOException {
assert fsn.hasWriteLock();
final INodeFile file = iip.getLastINode().asFile();
final QuotaCounts delta = verifyQuotaForUCBlock(fsn, file, iip);
file.recordModification(iip.getLatestSnapshotId());
file.toUnderConstruction(leaseHolder, clientMachine);
fsn.getLeaseManager().addLease(file.getFileUnderConstructionFeature().getClientName(), file.getId());
LocatedBlock ret = null;
if (!newBlock) {
FSDirectory fsd = fsn.getFSDirectory();
ret = fsd.getBlockManager().convertLastBlockToUnderConstruction(file, 0);
if ((ret != null) && (delta != null)) {
Preconditions.checkState(delta.getStorageSpace() >= 0, "appending to" + " a block with size larger than the preferred block size");
fsd.writeLock();try {
fsd.updateCountNoQuotaCheck(iip, iip.length() - 1, delta);
} finally {
fsd.writeUnlock();
}
}
} else {
BlockInfo lastBlock = file.getLastBlock();
if (lastBlock != null) {
ExtendedBlock blk = new ExtendedBlock(fsn.getBlockPoolId(), lastBlock);
ret = new LocatedBlock(blk, DatanodeInfo.EMPTY_ARRAY);
}
}
if (writeToEditLog) {
final String path = iip.getPath();
if (NameNodeLayoutVersion.supports(Feature.APPEND_NEW_BLOCK, fsn.getEffectiveLayoutVersion())) {
fsn.getEditLog().logAppendFile(path, file, newBlock, logRetryCache);
} else {fsn.getEditLog().logOpenFile(path, file, false, logRetryCache);
}
}
return ret;
} | 3.26 |
hadoop_CommitterEventHandler_touchz_rdh | // If job commit is repeatable, then we should allow
// startCommitFile/endCommitSuccessFile/endCommitFailureFile to be written
// by other AM before.
private void touchz(Path p, boolean overwrite) throws IOException {
fs.create(p, overwrite).close();
} | 3.26 |
hadoop_OBSWriteOperationHelper_writeSuccessful_rdh | /**
* Callback on a successful write.
*
* @param destKey
* object key
*/
void writeSuccessful(final String destKey) {
LOG.debug("Finished write to {}", destKey);
} | 3.26 |
hadoop_OBSWriteOperationHelper_completeMultipartUpload_rdh | /**
* Complete a multipart upload operation.
*
* @param destKey
* Object key
* @param uploadId
* multipart operation Id
* @param partETags
* list of partial uploads
* @return the result
* @throws ObsException
* on problems.
*/
CompleteMultipartUploadResult completeMultipartUpload(final String destKey, final String uploadId, final List<PartEtag> partETags) throws ObsException {
Preconditions.checkNotNull(uploadId);
Preconditions.checkNotNull(partETags);
Preconditions.checkArgument(!partETags.isEmpty(), "No partitions have been uploaded");
LOG.debug("Completing multipart upload {} with {} parts", uploadId, partETags.size());
// a copy of the list is required, so that the OBS SDK doesn't
// attempt to sort an unmodifiable list.
return obs.completeMultipartUpload(new CompleteMultipartUploadRequest(bucket, destKey, uploadId, new ArrayList<>(partETags)));
} | 3.26 |
hadoop_OBSWriteOperationHelper_putObject_rdh | /**
* PUT an object directly (i.e. not via the transfer manager).
*
* @param putObjectRequest
* the request
* @return the upload initiated
* @throws IOException
* on problems
*/
PutObjectResult putObject(final PutObjectRequest putObjectRequest) throws IOException {
try {
return OBSCommonUtils.putObjectDirect(owner, putObjectRequest);
} catch (ObsException e) {
throw OBSCommonUtils.translateException("put", putObjectRequest.getObjectKey(), e);
}
} | 3.26 |
hadoop_OBSWriteOperationHelper_newUploadPartRequest_rdh | /**
* Create request for uploading one part of a multipart task.
*
* @param destKey
* destination object key
* @param uploadId
* upload id
* @param partNumber
* part number
* @param size
* data size
* @param uploadStream
* upload stream for the part
* @return part upload request
*/
UploadPartRequest newUploadPartRequest(final String destKey, final String uploadId, final int partNumber, final int size, final InputStream uploadStream)
{
Preconditions.checkNotNull(uploadId);
Preconditions.checkArgument(uploadStream != null, "Data source");
Preconditions.checkArgument(size > 0, "Invalid partition size %s", size);
Preconditions.checkArgument((partNumber > 0) && (partNumber <= PART_NUMBER));
LOG.debug("Creating part upload request for {} #{} size {}", uploadId, partNumber, size);
UploadPartRequest request = new UploadPartRequest();
request.setUploadId(uploadId);
request.setBucketName(bucket);
request.setObjectKey(destKey);
request.setPartSize(((long) (size)));
request.setPartNumber(partNumber);
request.setInput(uploadStream);
if (owner.getSse().isSseCEnable()) {
request.setSseCHeader(owner.getSse().getSseCHeader());
}
return request;
} | 3.26 |
hadoop_OBSWriteOperationHelper_m0_rdh | /**
* Create a {@link PutObjectRequest} request. If {@code length} is set, the
* metadata is configured with the size of the upload.
*
* @param destKey
* key of object
* @param inputStream
* source data
* @param length
* size, if known. Use -1 for not known
* @return the request
*/
PutObjectRequest m0(final String destKey, final InputStream inputStream, final long length) {
return OBSCommonUtils.newPutObjectRequest(owner, destKey, newObjectMetadata(length), inputStream);
} | 3.26 |
hadoop_OBSWriteOperationHelper_newPutRequest_rdh | /**
* Create a {@link PutObjectRequest} request to upload a file.
*
* @param destKey
* object key for request
* @param sourceFile
* source file
* @return the request
*/
PutObjectRequest
newPutRequest(final String destKey, final File
sourceFile) {
int length = ((int) (sourceFile.length()));
return OBSCommonUtils.newPutObjectRequest(owner, destKey, newObjectMetadata(length), sourceFile);
} | 3.26 |
hadoop_OBSWriteOperationHelper_newObjectMetadata_rdh | /**
* Create a new object metadata instance. Any standard metadata headers are
* added here, for example: encryption.
*
* @param length
* size, if known. Use -1 for not known
* @return a new metadata instance
*/
public ObjectMetadata newObjectMetadata(final long length) {
return OBSObjectBucketUtils.newObjectMetadata(length);
} | 3.26 |
hadoop_GetClusterNodeLabelsResponsePBImpl_setNodeLabels_rdh | /**
*
* @deprecated Use {@link #setNodeLabelList(List)} instead.
*/
@Override
@Deprecated
public void setNodeLabels(Set<String> labels) {
List<NodeLabel> list = new ArrayList<>();
for (String s : labels) {
list.add(NodeLabel.newInstance(s));
}
setNodeLabelList(list);
} | 3.26 |
hadoop_ValidateRenamedFilesStage_addFileCommitted_rdh | /**
* Add a file entry to the list of committed files.
*
* @param entry
* entry
*/
private synchronized void addFileCommitted(FileEntry entry) {
filesCommitted.add(entry);
} | 3.26 |
hadoop_ValidateRenamedFilesStage_getFilesCommitted_rdh | /**
* Get the list of files committed.
*
* @return a possibly empty list.
*/
private synchronized List<FileEntry> getFilesCommitted() {
return filesCommitted;
} | 3.26 |
hadoop_ValidateRenamedFilesStage_validateOneFile_rdh | /**
* Validate a file.
*
* @param entry
* entry to probe for
* @throws IOException
* IO problem.
* @throws OutputValidationException
* if the entry is not valid
*/
private void validateOneFile(FileEntry entry) throws IOException {
updateAuditContext(OP_STAGE_JOB_VALIDATE_OUTPUT);
// report progress back
progress();
// look validate the file.
// raising an FNFE if the file isn't there.
FileStatus destStatus;final Path sourcePath = entry.getSourcePath();
Path destPath = entry.getDestPath();
try {
destStatus = getFileStatus(destPath);
// it must be a file
if (!destStatus.isFile()) {
throw new OutputValidationException(destPath, (("Expected a file renamed from " + sourcePath) + "; found ") + destStatus);
}
final long sourceSize = entry.getSize();
final long destSize = destStatus.getLen();
// etags, if the source had one.
final String sourceEtag = entry.getEtag();
if (getOperations().storePreservesEtagsThroughRenames(destStatus.getPath()) && isNotBlank(sourceEtag))
{
final String destEtag = ManifestCommitterSupport.getEtag(destStatus);
if (!sourceEtag.equals(destEtag)) {
LOG.warn("Etag of dest file {}: {} does not match that of manifest entry {}", destPath, destStatus, entry);
throw new OutputValidationException(destPath, String.format((("Expected the file" + " renamed from %s") + " with etag %s and length %s")
+ " but found a file with etag %s and length %d", sourcePath, sourceEtag, sourceSize, destEtag, destSize));
}
}
// check the expected length after any etag validation
if (destSize
!= sourceSize) {
LOG.warn("Length of dest file {}: {} does not match that of manifest entry {}",
destPath, destStatus, entry);
throw new OutputValidationException(destPath, String.format((("Expected the file" + " renamed from %s") + " with length %d") + " but found a file of length %d", sourcePath, sourceSize, destSize));
}
} catch (FileNotFoundException e) {
// file didn't exist
throw new OutputValidationException(destPath, "Expected a file, but it was not found", e);
}
addFileCommitted(entry);
} | 3.26 |
hadoop_ErasureCoderOptions_allowVerboseDump_rdh | /**
* Allow dump verbose debug info or not.
*
* @return true if verbose debug info is desired, false otherwise
*/public boolean allowVerboseDump() {
return f1;
} | 3.26 |
hadoop_ErasureCoderOptions_getNumParityUnits_rdh | /**
* The number of parity output units for the coding. A unit can be a byte,
* chunk, buffer or even a block.
*
* @return count of parity output units
*/
public int getNumParityUnits() {
return numParityUnits;
} | 3.26 |
hadoop_ErasureCoderOptions_getNumAllUnits_rdh | /**
* The number of all the involved units in the coding.
*
* @return count of all the data units and parity units
*/
public int getNumAllUnits() {
return numAllUnits;
} | 3.26 |
hadoop_ErasureCoderOptions_getNumDataUnits_rdh | /**
* The number of data input units for the coding. A unit can be a byte,
* chunk or buffer or even a block.
*
* @return count of data input units
*/
public int getNumDataUnits() {
return f0;
} | 3.26 |
hadoop_ErasureCoderOptions_allowChangeInputs_rdh | /**
* Allow changing input buffer content (not positions). Maybe better
* performance if not allowed.
*
* @return true if allowing input content to be changed, false otherwise
*/
public boolean allowChangeInputs() {
return allowChangeInputs;
} | 3.26 |
hadoop_ResourceInformation_getValue_rdh | /**
* Integer value of the resource.
*
* @return value
*/
@ApiModelProperty("Integer value of the resource.")
@JsonProperty("value")
public Long getValue() {
return value;
} | 3.26 |
hadoop_ResourceInformation_toIndentedString_rdh | /**
* Convert the given object to string with each line indented by 4 spaces
* (except the first line).
*/
private String toIndentedString(Object o) {
if (o == null) {
return "null";
}
return o.toString().replace("\n", "\n ");} | 3.26 |
hadoop_ResourceInformation_getUnit_rdh | /**
*
* @return unit
*/
@ApiModelProperty("")
@JsonProperty("unit")
public String getUnit() {
return unit == null ? "" : unit;
} | 3.26 |
hadoop_OBSPosixBucketUtils_fsRemoveKeysByDepth_rdh | // Batch delete sub objects one depth by one depth to avoid that parents and
// children in a same
// batch.
// A batch deletion might be split into some concurrent deletions to promote
// the performance, but
// it
// can't make sure that an object is deleted before it's children.
private static void fsRemoveKeysByDepth(final OBSFileSystem owner, final FileStatus[] arFileStatus) throws ObsException, IOException {
if (arFileStatus.length <= 0) {
// exit fast if there is no keys to delete
return;
}
// Find all leaf keys in the list.
String key;
int depth = Integer.MAX_VALUE;
List<KeyAndVersion> leafKeys = new ArrayList<>(owner.getMaxEntriesToDelete());
for (int idx = arFileStatus.length - 1; idx >= 0; idx--) {
if (leafKeys.size() >= owner.getMaxEntriesToDelete()) {
OBSCommonUtils.removeKeys(owner, leafKeys, true, false);
}
key = OBSCommonUtils.pathToKey(owner, arFileStatus[idx].getPath());
// Check file.
if (!arFileStatus[idx].isDirectory()) {
// A file must be a leaf.
leafKeys.add(new KeyAndVersion(key, null));
continue;
}
// Check leaf folder at current depth.
int keyDepth = fsGetObjectKeyDepth(key);
if (keyDepth == depth) {
// Any key at current depth must be a leaf.
leafKeys.add(new KeyAndVersion(key,
null));
continue;
}
if (keyDepth < depth) {
// The last batch delete at current depth.
OBSCommonUtils.removeKeys(owner, leafKeys, true, false);
// Go on at the upper depth.
depth = keyDepth;
leafKeys.add(new KeyAndVersion(key, null));continue;
}LOG.warn("The objects list is invalid because it isn't sorted by" + " path depth.");
throw
new ObsException("System failure");
}
// The last batch delete at the minimum depth of all keys.
OBSCommonUtils.removeKeys(owner, leafKeys, true, false);
} | 3.26 |
hadoop_OBSPosixBucketUtils_fsDelete_rdh | // Recursively delete a folder that might be not empty.
static boolean fsDelete(final OBSFileSystem owner, final FileStatus status, final boolean recursive) throws IOException, ObsException {
long startTime = System.currentTimeMillis();
long threadId = Thread.currentThread().getId();
Path f = status.getPath();
String key = OBSCommonUtils.pathToKey(owner, f);
if (!status.isDirectory()) {
LOG.debug("delete: Path is a file");
trashObjectIfNeed(owner, key);
} else {
LOG.debug("delete: Path is a directory: {} - recursive {}", f, recursive);
key = OBSCommonUtils.maybeAddTrailingSlash(key);
boolean isEmptyDir = OBSCommonUtils.isFolderEmpty(owner, key);
if (key.equals("")) {
return OBSCommonUtils.rejectRootDirectoryDelete(owner.getBucket(), isEmptyDir, recursive);
}
if ((!recursive) && (!isEmptyDir)) {
LOG.warn("delete: Path is not empty: {} - recursive {}", f, recursive);
throw new PathIsNotEmptyDirectoryException(f.toString());
}
if (isEmptyDir) {
LOG.debug("delete: Deleting fake empty directory {} - recursive {}",
f, recursive);
OBSCommonUtils.deleteObject(owner, key);
} else {
LOG.debug("delete: Deleting objects for directory prefix {} to " + "delete - recursive {}", f, recursive);
trashFolderIfNeed(owner, key, f);
}
}long endTime = System.currentTimeMillis();
LOG.debug("delete Path:{} thread:{}, timeUsedInMilliSec:{}", f,
threadId, endTime - startTime);
return true;
} | 3.26 |
hadoop_OBSPosixBucketUtils_fsCreateFolder_rdh | // Used to create a folder
static void fsCreateFolder(final OBSFileSystem owner, final String objectName) throws ObsException {
for (int retryTime = 1; retryTime < OBSCommonUtils.MAX_RETRY_TIME; retryTime++) {
try {
innerFsCreateFolder(owner, objectName);
return;
} catch (ObsException e) {
LOG.warn("Failed to create folder [{}], retry time [{}], " + "exception [{}]", objectName, retryTime, e);
try {
Thread.sleep(OBSCommonUtils.DELAY_TIME);
} catch (InterruptedException ie) {
throw e;
}
}
}
innerFsCreateFolder(owner, objectName);
} | 3.26 |
hadoop_OBSPosixBucketUtils_fsRenameToNewObject_rdh | /**
* Used to rename a source object to a destination object which is not existed
* before rename.
*
* @param owner
* OBS File System instance
* @param srcKey
* source object key
* @param dstKey
* destination object key
* @throws IOException
* io exception
*/static void fsRenameToNewObject(final OBSFileSystem owner, final String srcKey, final String dstKey) throws IOException {
String v16 = srcKey;
String newdstKey = dstKey;
v16 = OBSCommonUtils.maybeDeleteBeginningSlash(v16);
newdstKey = OBSCommonUtils.maybeDeleteBeginningSlash(newdstKey);
if (v16.endsWith("/")) {
// Rename folder.
fsRenameToNewFolder(owner, v16, newdstKey);
} else {
// Rename file.
innerFsRenameFile(owner, v16,
newdstKey);
}
} | 3.26 |
hadoop_OBSPosixBucketUtils_fsRemoveFile_rdh | // Delete a file.
private static int fsRemoveFile(final OBSFileSystem owner, final String sonObjectKey, final List<KeyAndVersion> files) throws IOException {
files.add(new KeyAndVersion(sonObjectKey));
if (files.size() == owner.getMaxEntriesToDelete()) { // batch delete files.
OBSCommonUtils.removeKeys(owner, files, true, false);
return owner.getMaxEntriesToDelete();
}
return 0;
} | 3.26 |
hadoop_OBSPosixBucketUtils_renameBasedOnPosix_rdh | /**
* The inner rename operation based on Posix bucket.
*
* @param owner
* OBS File System instance
* @param src
* source path to be renamed from
* @param dst
* destination path to be renamed to
* @return boolean
* @throws RenameFailedException
* if some criteria for a state changing rename
* was not met. This means work didn't happen;
* it's not something which is reported upstream
* to the FileSystem APIs, for which the
* semantics of "false" are pretty vague.
* @throws IOException
* on IO failure.
*/
static boolean renameBasedOnPosix(final OBSFileSystem owner, final Path src, final Path dst) throws IOException {
Path dstPath = dst;
String srcKey = OBSCommonUtils.pathToKey(owner, src);String dstKey = OBSCommonUtils.pathToKey(owner, dstPath);
if (srcKey.isEmpty()) {
LOG.error("rename: src [{}] is root directory", src);
return false;
}
try {
FileStatus dstStatus = owner.getFileStatus(dstPath);
if (dstStatus.isDirectory())
{
String newDstString = OBSCommonUtils.maybeAddTrailingSlash(dstPath.toString());
String filename = srcKey.substring(OBSCommonUtils.pathToKey(owner, src.getParent()).length() + 1);
dstPath = new Path(newDstString + filename);
dstKey = OBSCommonUtils.pathToKey(owner, dstPath);
LOG.debug("rename: dest is an existing directory and will be " + "changed to [{}]", dstPath);if (owner.exists(dstPath)) {
LOG.error(((("rename: failed to rename " + src) + " to ") + dstPath) + " because destination exists");
return
false;
}
} else if (srcKey.equals(dstKey)) {LOG.warn("rename: src and dest refer to the same " + "file or directory: {}", dstPath);
return true;
} else {
LOG.error(((("rename: failed to rename " + src) + " to ") + dstPath) + " because destination exists");
return false;
}
} catch (FileNotFoundException e) {
// if destination does not exist, do not change the
// destination key, and just do rename.
LOG.debug("rename: dest [{}] does not exist", dstPath);
} catch (FileConflictException e) {
Path parent = dstPath.getParent();
if (!OBSCommonUtils.pathToKey(owner, parent).isEmpty()) {
FileStatus dstParentStatus = owner.getFileStatus(parent);
if (!dstParentStatus.isDirectory()) {
throw new ParentNotDirectoryException(parent + " is not a directory");
}
}
}
if (dstKey.startsWith(srcKey) && (dstKey.equals(srcKey) || (dstKey.charAt(srcKey.length()) == Path.SEPARATOR_CHAR))) {
LOG.error("rename: dest [{}] cannot be a descendant of src [{}]", dstPath, src);return
false;
}
return innerFsRenameWithRetry(owner, src, dstPath, srcKey, dstKey);
} | 3.26 |
hadoop_OBSPosixBucketUtils_fsNonRecursivelyDelete_rdh | // List all sub objects at first, delete sub objects in batch secondly.
private static void fsNonRecursivelyDelete(final OBSFileSystem owner, final Path parent) throws IOException, ObsException {
// List sub objects sorted by path depth.
FileStatus[] arFileStatus = OBSCommonUtils.innerListStatus(owner, parent, true);
// Remove sub objects one depth by one depth to avoid that parents and
// children in a same batch.
fsRemoveKeys(owner, arFileStatus);
// Delete parent folder that should has become empty.
OBSCommonUtils.deleteObject(owner, OBSCommonUtils.pathToKey(owner, parent));
} | 3.26 |
hadoop_OBSPosixBucketUtils_fsGetObjectKeyDepth_rdh | /**
* Get the depth of an absolute path, that is the number of '/' in the path.
*
* @param key
* object key
* @return depth
*/
static int fsGetObjectKeyDepth(final String key) {
int depth = 0;
for (int idx = key.indexOf('/'); idx >= 0; idx = key.indexOf('/', idx + 1)) {
depth++;
}
return key.endsWith("/") ? depth - 1 : depth;
} | 3.26 |
hadoop_OBSPosixBucketUtils_innerFsGetObjectStatus_rdh | // Used to get the status of a file or folder in a file-gateway bucket.
static OBSFileStatus innerFsGetObjectStatus(final OBSFileSystem owner, final Path f) throws IOException {
final Path path
= OBSCommonUtils.qualify(owner, f);
String key = OBSCommonUtils.pathToKey(owner, path);
LOG.debug("Getting path status for {} ({})", path, key);
if (key.isEmpty()) {
LOG.debug("Found root directory");
return new OBSFileStatus(path, owner.getUsername());
}
try {
final GetAttributeRequest getAttrRequest = new GetAttributeRequest(owner.getBucket(), key);
ObsFSAttribute meta = owner.getObsClient().getAttribute(getAttrRequest);
owner.getSchemeStatistics().incrementReadOps(1);
if (fsIsFolder(meta)) { LOG.debug("Found file (with /): fake directory");
return new OBSFileStatus(path, OBSCommonUtils.dateToLong(meta.getLastModified()), owner.getUsername());
}
else {LOG.debug("Found file (with /): real file? should not happen: {}", key);
return new OBSFileStatus(meta.getContentLength(), OBSCommonUtils.dateToLong(meta.getLastModified()), path, owner.getDefaultBlockSize(path), owner.getUsername());
}
} catch (ObsException e) {
if (e.getResponseCode() == OBSCommonUtils.NOT_FOUND_CODE) {
LOG.debug("Not Found: {}", path);
throw new FileNotFoundException("No such file or directory: " + path);
}
if (e.getResponseCode() == OBSCommonUtils.CONFLICT_CODE) {
throw new FileConflictException("file conflicts: " + e.getResponseStatus());
}
throw OBSCommonUtils.translateException("getFileStatus", path, e);
}
} | 3.26 |
hadoop_OBSPosixBucketUtils_fsIsFolder_rdh | /**
* Used to judge that an object is a file or folder.
*
* @param attr
* posix object attribute
* @return is posix folder
*/
static boolean fsIsFolder(final ObsFSAttribute attr) {
final int ifDir = 0x4000;
int mode = attr.getMode();
// object mode is -1 when the object is migrated from
// object bucket to posix bucket.
// -1 is a file, not folder.
if (mode < 0) {
return false;
}
return (mode & ifDir) != 0;
} | 3.26 |
hadoop_OBSPosixBucketUtils_fsRenameToNewFolder_rdh | /**
* Used to rename a source folder to a destination folder that is not existed
* before rename.
*
* @param owner
* OBS File System instance
* @param src
* source folder key
* @param dst
* destination folder key that not existed before rename
* @throws IOException
* any io exception
* @throws ObsException
* any obs operation exception
*/
static void fsRenameToNewFolder(final OBSFileSystem owner, final String src, final String dst) throws IOException, ObsException {
LOG.debug("RenameFolder path {} to {}", src, dst);
try {
RenameRequest renameObjectRequest = new RenameRequest();
renameObjectRequest.setBucketName(owner.getBucket());
renameObjectRequest.setObjectKey(src);
renameObjectRequest.setNewObjectKey(dst);
owner.getObsClient().renameFolder(renameObjectRequest);
owner.getSchemeStatistics().incrementWriteOps(1); } catch (ObsException e) {
throw OBSCommonUtils.translateException(((("renameFile(" + src) + ", ") + dst) + ")", src, e);
}
} | 3.26 |
hadoop_OBSPosixBucketUtils_fsRemoveSubdir_rdh | // Delete a sub dir.
private static int fsRemoveSubdir(final OBSFileSystem owner, final String subdirKey, final List<KeyAndVersion> subdirList) throws IOException {
fsRecursivelyDeleteDir(owner, subdirKey, false);
subdirList.add(new KeyAndVersion(subdirKey));
if (subdirList.size() == owner.getMaxEntriesToDelete()) {
// batch delete subdirs.
OBSCommonUtils.removeKeys(owner, subdirList, true, false);
return owner.getMaxEntriesToDelete();
}
return 0;
} | 3.26 |
hadoop_TwoColumnLayout_preHead_rdh | /**
* Do what needs to be done before the header is rendered. This usually
* involves setting page variables for Javascript and CSS rendering.
*
* @param html
* the html to use to render.
*/protected void preHead(Page.HTML<__> html) {
} | 3.26 |
hadoop_TwoColumnLayout_render_rdh | /**
* A simpler two column layout implementation with a header, a navigation bar
* on the left, content on the right, and a footer. Works with resizable themes.
*
* @see TwoColumnCssLayout
*/
@InterfaceAudience.LimitedPrivate({ "YARN", "MapReduce" })public class TwoColumnLayout extends HtmlPage {
/* (non-Javadoc)
@see org.apache.hadoop.yarn.webapp.view.HtmlPage#render(org.apache.hadoop.yarn.webapp.hamlet.Hamlet.HTML)
*/
@Override
protected void render(Page.HTML<__> html) {
preHead(html);
html.title($(TITLE)).link(root_url("static", "yarn.css")).style("#layout { height: 100%; }", "#layout thead td { height: 3em; }", "#layout #navcell { width: 11em; padding: 0 1em; }", "#layout td.content { padding-top: 0 }", "#layout tbody { vertical-align: top; }", "#layout tfoot td { height: 4em; }").__(JQueryUI.class);
postHead(html);
JQueryUI.jsnotice(html);
html.table("#layout.ui-widget-content").thead().tr().td().$colspan(2).__(header()).__().__().__().tfoot().tr().td().$colspan(2).__(footer()).__().__().__().tbody().tr().td().$id("navcell").__(nav()).__().td().$class("content").__(content()).__().__().__().__().__();
} | 3.26 |
hadoop_TwoColumnLayout_header_rdh | /**
*
* @return the class that will render the header of the page.
*/
protected Class<? extends SubView> header() {
return HeaderBlock.class;
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.