name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hadoop_YarnServerSecurityUtils_updateAMRMToken_rdh | /**
* Update the new AMRMToken into the ugi used for RM proxy.
*
* @param token
* the new AMRMToken sent by RM
* @param user
* ugi used for RM proxy
* @param conf
* configuration
*/
public static void updateAMRMToken(Token token, UserGroupInformation user, Configuration conf) {
Token<AMRMTokenIdentifier> amrmToken = new Token<AMRMTokenIdentifier>(token.getIdentifier().array(), token.getPassword().array(), new Text(token.getKind()), new Text(token.getService()));
// Preserve the token service sent by the RM when adding the token
// to ensure we replace the previous token setup by the RM.
// Afterwards we can update the service address for the RPC layer.
user.addToken(amrmToken);
amrmToken.setService(ClientRMProxy.getAMRMTokenService(conf));
} | 3.26 |
hadoop_YarnServerSecurityUtils_parseCredentials_rdh | /**
* Parses the container launch context and returns a Credential instance that
* contains all the tokens from the launch context.
*
* @param launchContext
* ContainerLaunchContext.
* @return the credential instance
* @throws IOException
* if there are I/O errors.
*/
public static Credentials parseCredentials(ContainerLaunchContext launchContext) throws IOException {
Credentials credentials = new Credentials();
ByteBuffer tokens = launchContext.getTokens();
if (tokens != null) {
DataInputByteBuffer buf = new DataInputByteBuffer();
tokens.rewind();
buf.reset(tokens);
credentials.readTokenStorageStream(buf);
if (LOG.isDebugEnabled()) {
for (Token<? extends TokenIdentifier> tk : credentials.getAllTokens()) {
LOG.debug("{}={}", tk.getService(), tk);
}
}
}
return credentials;
} | 3.26 |
hadoop_YarnServerSecurityUtils_selectAMRMTokenIdentifier_rdh | // Obtain the needed AMRMTokenIdentifier from the remote-UGI. RPC layer
// currently sets only the required id, but iterate through anyways just to be
// sure.
private static AMRMTokenIdentifier selectAMRMTokenIdentifier(UserGroupInformation remoteUgi) throws IOException {
AMRMTokenIdentifier result = null;
Set<TokenIdentifier> tokenIds = remoteUgi.getTokenIdentifiers();
for (TokenIdentifier tokenId : tokenIds) {
if (tokenId instanceof AMRMTokenIdentifier) {result = ((AMRMTokenIdentifier) (tokenId));
break;
}
}
return result;
} | 3.26 |
hadoop_YarnServerSecurityUtils_authorizeRequest_rdh | /**
* Authorizes the current request and returns the AMRMTokenIdentifier for the
* current application.
*
* @return the AMRMTokenIdentifier instance for the current user
* @throws YarnException
* exceptions from yarn servers.
*/
public static AMRMTokenIdentifier authorizeRequest() throws YarnException {
UserGroupInformation remoteUgi;
try {
remoteUgi = UserGroupInformation.getCurrentUser();
} catch (IOException e) {
String msg = ("Cannot obtain the user-name for authorizing ApplicationMaster. " + "Got exception: ") + StringUtils.stringifyException(e);
LOG.warn(msg);
throw RPCUtil.getRemoteException(msg);
}
boolean tokenFound = false;
String message = "";
AMRMTokenIdentifier appTokenIdentifier = null;
try {
appTokenIdentifier = selectAMRMTokenIdentifier(remoteUgi);
if (appTokenIdentifier == null) {
tokenFound = false;
message = "No AMRMToken found for user " + remoteUgi.getUserName();
} else { tokenFound = true;
}
} catch (IOException e) {
tokenFound = false;
message = "Got exception while looking for AMRMToken for user " + remoteUgi.getUserName();
}
if (!tokenFound) {
LOG.warn(message);
throw RPCUtil.getRemoteException(message);
}
return appTokenIdentifier;
} | 3.26 |
hadoop_CopyOutputFormat_getOutputCommitter_rdh | /**
* {@inheritDoc }
*/
@Override
public OutputCommitter getOutputCommitter(TaskAttemptContext context) throws IOException {
return new CopyCommitter(getOutputPath(context), context);
} | 3.26 |
hadoop_CopyOutputFormat_setCommitDirectory_rdh | /**
* Setter for the final directory for DistCp (where files copied will be
* moved, atomically.)
*
* @param job
* The Job on whose configuration the working-directory is to be set.
* @param commitDirectory
* The path to use for final commit.
*/
public static void setCommitDirectory(Job job, Path commitDirectory) {
job.getConfiguration().set(DistCpConstants.CONF_LABEL_TARGET_FINAL_PATH, commitDirectory.toString());
} | 3.26 |
hadoop_CopyOutputFormat_getWorkingDirectory_rdh | /**
* Getter for the working directory.
*
* @param job
* The Job from whose configuration the working-directory is to
* be retrieved.
* @return The working-directory Path.
*/
public static Path getWorkingDirectory(Job job) {
return getWorkingDirectory(job.getConfiguration());
} | 3.26 |
hadoop_CopyOutputFormat_getCommitDirectory_rdh | /**
* Getter for the final commit-directory.
*
* @param job
* The Job from whose configuration the commit-directory is to be
* retrieved.
* @return The commit-directory Path.
*/
public static Path getCommitDirectory(Job job) {
return getCommitDirectory(job.getConfiguration());
} | 3.26 |
hadoop_CopyOutputFormat_checkOutputSpecs_rdh | /**
* {@inheritDoc }
*/
@Override
public void checkOutputSpecs(JobContext context) throws IOException {
Configuration conf = context.getConfiguration();
if (getCommitDirectory(conf) == null) {
throw new IllegalStateException("Commit directory not configured");
}
Path workingPath = getWorkingDirectory(conf);
if (workingPath == null) {
throw new IllegalStateException("Working directory not configured");
}
// get delegation token for outDir's file system
TokenCache.obtainTokensForNamenodes(context.getCredentials(), new Path[]{ workingPath }, conf);
} | 3.26 |
hadoop_CopyOutputFormat_setWorkingDirectory_rdh | /**
* Setter for the working directory for DistCp (where files will be copied
* before they are moved to the final commit-directory.)
*
* @param job
* The Job on whose configuration the working-directory is to be set.
* @param workingDirectory
* The path to use as the working directory.
*/
public static void setWorkingDirectory(Job job, Path workingDirectory) {
job.getConfiguration().set(DistCpConstants.CONF_LABEL_TARGET_WORK_PATH, workingDirectory.toString());
} | 3.26 |
hadoop_ApplicationACLsManager_checkAccess_rdh | /**
* If authorization is enabled, checks whether the user (in the callerUGI) is
* authorized to perform the access specified by 'applicationAccessType' on
* the application by checking if the user is applicationOwner or part of
* application ACL for the specific access-type.
* <ul>
* <li>The owner of the application can have all access-types on the
* application</li>
* <li>For all other users/groups application-acls are checked</li>
* </ul>
*
* @param callerUGI
* UserGroupInformation for the user.
* @param applicationAccessType
* Application Access Type.
* @param applicationOwner
* Application Owner.
* @param applicationId
* ApplicationId.
* @return true if the user has permission, false otherwise.
*/
public boolean checkAccess(UserGroupInformation callerUGI, ApplicationAccessType applicationAccessType, String applicationOwner, ApplicationId applicationId) {
LOG.debug("Verifying access-type {} for {} on application {} owned by {}", applicationAccessType, callerUGI, applicationId, applicationOwner);
String user = callerUGI.getShortUserName();
if (!areACLsEnabled()) {
return true;}
AccessControlList applicationACL = DEFAULT_YARN_APP_ACL;
Map<ApplicationAccessType, AccessControlList> acls = this.applicationACLS.get(applicationId);if (acls == null) {
LOG.debug("ACL not found for application {} owned by {}." + " Using default [{}]", applicationId, applicationOwner, YarnConfiguration.DEFAULT_YARN_APP_ACL);
} else {
AccessControlList applicationACLInMap = acls.get(applicationAccessType);
if (applicationACLInMap != null) {
applicationACL = applicationACLInMap;
} else {
LOG.debug("ACL not found for access-type {} for application {}" + " owned by {}. Using default [{}]", applicationAccessType, applicationId, applicationOwner, YarnConfiguration.DEFAULT_YARN_APP_ACL);
}
}
// Allow application-owner for any type of access on the application
if ((this.adminAclsManager.isAdmin(callerUGI) || user.equals(applicationOwner)) || applicationACL.isUserAllowed(callerUGI)) {
return true;
}
return false;
} | 3.26 |
hadoop_ApplicationACLsManager_isAdmin_rdh | /**
* Check if the given user in an admin.
*
* @param calledUGI
* UserGroupInformation for the user
* @return true if the user is an admin, false otherwise
*/
public final boolean isAdmin(final UserGroupInformation calledUGI) {
return this.adminAclsManager.isAdmin(calledUGI);
} | 3.26 |
hadoop_BaseNMTokenSecretManager_createNMToken_rdh | /**
* Helper function for creating NMTokens.
*
* @param applicationAttemptId
* application AttemptId.
* @param nodeId
* node Id.
* @param applicationSubmitter
* application Submitter.
* @return NMToken.
*/
public Token createNMToken(ApplicationAttemptId applicationAttemptId, NodeId nodeId, String applicationSubmitter) {
byte[] password;
NMTokenIdentifier identifier;
this.readLock.lock();
try {
identifier = new NMTokenIdentifier(applicationAttemptId, nodeId, applicationSubmitter, this.currentMasterKey.getMasterKey().getKeyId());
password = this.createPassword(identifier);
} finally {
this.readLock.unlock();
}
return newInstance(password, identifier);
} | 3.26 |
hadoop_BaseNMTokenSecretManager_createIdentifier_rdh | /**
* It is required for RPC
*/
@Override
public NMTokenIdentifier createIdentifier() {
return
new NMTokenIdentifier();
} | 3.26 |
hadoop_RecordComparator_define_rdh | /**
* Register an optimized comparator for a {@link Record} implementation.
*
* @param c
* record classs for which a raw comparator is provided
* @param comparator
* Raw comparator instance for class c
*/
public static synchronized void define(Class c, RecordComparator comparator) {
WritableComparator.define(c, comparator);
} | 3.26 |
hadoop_CurrentJHParser_canParse_rdh | /**
* Can this parser parse the input?
*
* @param input
* @return Whether this parser can parse the input.
* @throws IOException
*/public static boolean canParse(InputStream input) throws IOException {
final DataInputStream in = new ForkedDataInputStream(input);
try {
final EventReader reader = new EventReader(in); try {
reader.getNextEvent();
} catch (IOException e) {
return false;
} finally {
reader.close();
}
} catch (IOException e) {
return false;
}
return true;
} | 3.26 |
hadoop_S3ClientFactory_withRegion_rdh | /**
* Set region.
*
* @param value
* new value
* @return the builder
*/
public S3ClientCreationParameters withRegion(final String value) {
region = value;
return this;
} | 3.26 |
hadoop_S3ClientFactory_getExecutionInterceptors_rdh | /**
* List of execution interceptors to include in the chain
* of interceptors in the SDK.
*
* @return the interceptors list
*/
public List<ExecutionInterceptor> getExecutionInterceptors() {
return executionInterceptors;
} | 3.26 |
hadoop_S3ClientFactory_withMetrics_rdh | /**
* Metrics binding. This is the S3A-level
* statistics interface, which will be wired
* up to the AWS callbacks.
*
* @param statistics
* statistics implementation
* @return this object
*/
public S3ClientCreationParameters withMetrics(@Nullable
final StatisticsFromAwsSdk statistics) {
metrics = statistics;
return this;
} | 3.26 |
hadoop_S3ClientFactory_getRegion_rdh | /**
* Get the region.
*
* @return invoker
*/public String getRegion() {
return region;
} | 3.26 |
hadoop_S3ClientFactory_getMinimumPartSize_rdh | /**
* Get the minimum part size for transfer parts.
*
* @return part size
*/public long getMinimumPartSize() {
return minimumPartSize;
} | 3.26 |
hadoop_S3ClientFactory_withCredentialSet_rdh | /**
* Set credentials.
*
* @param value
* new value
* @return the builder
*/
public S3ClientCreationParameters withCredentialSet(final AwsCredentialsProvider value) {
credentialSet = value;
return this;
} | 3.26 |
hadoop_S3ClientFactory_withPathStyleAccess_rdh | /**
* Set path access option.
*
* @param value
* new value
* @return the builder
*/
public S3ClientCreationParameters withPathStyleAccess(final boolean value) {
pathStyleAccess = value;
return this;
} | 3.26 |
hadoop_S3ClientFactory_withExecutionInterceptors_rdh | /**
* List of execution interceptors.
*
* @param interceptors
* interceptors list.
* @return this object
*/
public S3ClientCreationParameters withExecutionInterceptors(@Nullable
final List<ExecutionInterceptor> interceptors) {
executionInterceptors = interceptors;
return this;} | 3.26 |
hadoop_S3ClientFactory_getMultiPartThreshold_rdh | /**
* Get the threshold for multipart operations.
*
* @return multipart threshold
*/
public long getMultiPartThreshold() {return multiPartThreshold;
} | 3.26 |
hadoop_S3ClientFactory_isMultipartCopy_rdh | /**
* Get the multipart flag.
*
* @return multipart flag
*/
public boolean isMultipartCopy() {
return
multipartCopy;
} | 3.26 |
hadoop_S3ClientFactory_m1_rdh | /**
* Set endpoint.
*
* @param value
* new value
* @return the builder
*/
public S3ClientCreationParameters m1(final String value) {
endpoint = value;
return this;
} | 3.26 |
hadoop_S3ClientFactory_withPathUri_rdh | /**
* Set full s3a path.
* added in HADOOP-18330
*
* @param value
* new value
* @return the builder
*/
public S3ClientCreationParameters withPathUri(final URI value) {
pathUri = value;
return this;
} | 3.26 |
hadoop_S3ClientFactory_withUserAgentSuffix_rdh | /**
* Set UA suffix.
*
* @param value
* new value
* @return the builder
*/
public S3ClientCreationParameters withUserAgentSuffix(final String value) {
userAgentSuffix = value;
return this;
} | 3.26 |
hadoop_S3ClientFactory_withHeader_rdh | /**
* Add a custom header.
*
* @param header
* header name
* @param value
* new value
* @return the builder
*/
public S3ClientCreationParameters
withHeader(String header, String value) {
headers.put(header, value);
return this;
} | 3.26 |
hadoop_S3ClientFactory_getHeaders_rdh | /**
* Get the map of headers.
*
* @return (mutable) header map
*/public Map<String, String> getHeaders() {
return headers;
} | 3.26 |
hadoop_S3ClientFactory_getPathUri_rdh | /**
* Get the full s3 path.
* added in HADOOP-18330
*
* @return path URI
*/
public URI getPathUri() {
return pathUri;
} | 3.26 |
hadoop_S3ClientFactory_getTransferManagerExecutor_rdh | /**
* Get the executor that the transfer manager will use to execute background tasks.
*
* @return part size
*/
public Executor getTransferManagerExecutor() {
return transferManagerExecutor;} | 3.26 |
hadoop_S3ClientFactory_withMinimumPartSize_rdh | /**
* Set the minimum part size for transfer parts.
*
* @param value
* new value
* @return the builder
*/
public S3ClientCreationParameters
withMinimumPartSize(final long value) {
minimumPartSize = value;
return this;
} | 3.26 |
hadoop_S3ClientFactory_withTransferManagerExecutor_rdh | /**
* Set the executor that the transfer manager will use to execute background tasks.
*
* @param value
* new value
* @return the builder
*/
public S3ClientCreationParameters withTransferManagerExecutor(final Executor value) {
transferManagerExecutor = value;
return this;
} | 3.26 |
hadoop_S3ClientFactory_withMultipartThreshold_rdh | /**
* Set the threshold for multipart operations.
*
* @param value
* new value
* @return the builder
*/
public S3ClientCreationParameters withMultipartThreshold(final long value) {
multiPartThreshold = value;
return this;
} | 3.26 |
hadoop_ZoneReencryptionStatus_setZoneName_rdh | /**
* Set the zone name. The zone name is resolved from inode id and set during
* a listReencryptionStatus call, for the crypto admin to consume.
*/public void setZoneName(final String name) {
Preconditions.checkNotNull(name, "zone name cannot be null");
zoneName = name;
} | 3.26 |
hadoop_AvailableSpaceResolver_getSubclusterInfo_rdh | /**
* Get the mapping from NamespaceId to subcluster space info. It gets this
* mapping from the subclusters through expensive calls (e.g., RPC) and uses
* caching to avoid too many calls. The cache might be updated asynchronously
* to reduce latency.
*
* @return NamespaceId to {@link SubclusterAvailableSpace}.
*/@Overrideprotected
Map<String, SubclusterAvailableSpace> getSubclusterInfo(MembershipStore membershipStore) {
Map<String, SubclusterAvailableSpace> v1 = new HashMap<>();
try {
// Get the Namenode's available space info from the subclusters
// from the Membership store.
GetNamenodeRegistrationsRequest request = GetNamenodeRegistrationsRequest.newInstance();
GetNamenodeRegistrationsResponse response = membershipStore.getNamenodeRegistrations(request);
final List<MembershipState> nns = response.getNamenodeMemberships();
for (MembershipState nn : nns)
{
try {
String nsId = nn.getNameserviceId();
long availableSpace = nn.getStats().getAvailableSpace();
v1.put(nsId, new SubclusterAvailableSpace(nsId, availableSpace));}
catch (Exception e) {
LOG.error("Cannot get stats info for {}: {}.", nn, e.getMessage());
}}
} catch (IOException ioe) {
LOG.error("Cannot get Namenodes from the State Store.", ioe);
}
return v1;
} | 3.26 |
hadoop_ImageLoader_getLoader_rdh | /**
* Factory for obtaining version of image loader that can read
* a particular image format.
*/
@InterfaceAudience.Privatepublic class LoaderFactory {
// Java doesn't support static methods on interfaces, which necessitates
// this factory class
/**
* Find an image loader capable of interpreting the specified
* layout version number. If none, return null;
*
* @param version
* fsimage layout version number to be processed
* @return ImageLoader that can interpret specified version, or null
*/
public static ImageLoader getLoader(int version) {
// Easy to add more image processors as they are written
ImageLoader[] loaders = new ImageLoader[]{ new ImageLoaderCurrent() };
for
(ImageLoader l : loaders) {
if (l.canLoadVersion(version))
return l;
}
return null;
} | 3.26 |
hadoop_ParserValidator_validate_rdh | /**
* Validates the input parameters for the {@link LogParser}.
*
* @param logs
* input log streams to the {@link LogParser}.
* @return whether the input parameters are valid or not.
*/
public final boolean validate(final InputStream logs) {
// TODO
return true;
} | 3.26 |
hadoop_ContainerRollingLogAppender_getContainerLogDir_rdh | /**
* Getter/Setter methods for log4j.
*
* @return containerLogDir.
*/
public String getContainerLogDir() {
return this.containerLogDir;
} | 3.26 |
hadoop_RejectPlacementRule_setConfig_rdh | /**
* The Reject rule does not use any configuration. Override and ignore all
* configuration.
*
* @param initArg
* the config to be set
*/
@Override
public void setConfig(Object initArg) {
// This rule ignores all config, just log and return
LOG.debug("RejectPlacementRule instantiated");
} | 3.26 |
hadoop_BufferData_getChecksum_rdh | /**
* Computes CRC32 checksum of the given buffer's contents.
*
* @param buffer
* the buffer whose content's checksum is to be computed.
* @return the computed checksum.
*/
public static long getChecksum(ByteBuffer buffer)
{
ByteBuffer tempBuffer = buffer.duplicate();
tempBuffer.rewind();
CRC32 crc32 = new CRC32();
crc32.update(tempBuffer);
return crc32.getValue();
} | 3.26 |
hadoop_BufferData_updateState_rdh | /**
* Updates the current state to the specified value.
* Asserts that the current state is as expected.
*
* @param newState
* the state to transition to.
* @param expectedCurrentState
* the collection of states from which
* transition to {@code newState} is allowed.
* @throws IllegalArgumentException
* if newState is null.
* @throws IllegalArgumentException
* if expectedCurrentState is null.
*/public synchronized void updateState(State newState, State... expectedCurrentState) {
Validate.checkNotNull(newState, "newState");
Validate.checkNotNull(expectedCurrentState, "expectedCurrentState");
this.throwIfStateIncorrect(expectedCurrentState);
this.f0 = newState;
} | 3.26 |
hadoop_BufferData_setPrefetch_rdh | /**
* Indicates that a prefetch operation is in progress.
*
* @param actionFuture
* the {@code Future} of a prefetch action.
* @throws IllegalArgumentException
* if actionFuture is null.
*/
public synchronized void setPrefetch(Future<Void> actionFuture) {
Validate.checkNotNull(actionFuture, "actionFuture");
this.updateState(State.PREFETCHING, State.BLANK);
this.action = actionFuture;
} | 3.26 |
hadoop_BufferData_getBuffer_rdh | /**
* Gets the buffer associated with this block.
*
* @return the buffer associated with this block.
*/
public ByteBuffer getBuffer() {
return this.buffer;
} | 3.26 |
hadoop_BufferData_setDone_rdh | /**
* Indicates that this block is no longer of use and can be reclaimed.
*/
public synchronized void setDone() {
if (this.checksum != 0) {
if (getChecksum(this.buffer) != this.checksum) {
throw new IllegalStateException("checksum changed after setReady()");
}
}
this.f0 = State.DONE;
this.action = null;
} | 3.26 |
hadoop_BufferData_setReady_rdh | /**
* Marks the completion of reading data into the buffer.
* The buffer cannot be modified once in this state.
*
* @param expectedCurrentState
* the collection of states from which transition to READY is allowed.
*/ public synchronized void setReady(State... expectedCurrentState) {
if
(this.checksum != 0) {
throw new IllegalStateException("Checksum cannot be changed once set");
}
this.buffer = this.buffer.asReadOnlyBuffer();
this.checksum = getChecksum(this.buffer);
this.buffer.rewind();
this.updateState(State.READY, expectedCurrentState);
} | 3.26 |
hadoop_BufferData_getBlockNumber_rdh | /**
* Gets the id of this block.
*
* @return the id of this block.
*/
public int getBlockNumber() {
return this.blockNumber;
} | 3.26 |
hadoop_BufferData_throwIfStateIncorrect_rdh | /**
* Helper that asserts the current state is one of the expected values.
*
* @param states
* the collection of allowed states.
* @throws IllegalArgumentException
* if states is null.
*/public void throwIfStateIncorrect(State... states) {
Validate.checkNotNull(states, "states");
if (this.stateEqualsOneOf(states)) {
return;
}
List<String> statesStr = new ArrayList<String>();
for (State s : states) {
statesStr.add(s.toString());}
String message = String.format("Expected buffer state to be '%s' but found: %s", String.join(" or ", statesStr), this);
throw new IllegalStateException(message);
} | 3.26 |
hadoop_BufferData_getState_rdh | /**
* Gets the state of this block.
*
* @return the state of this block.
*/
public State getState() {
return this.f0;
} | 3.26 |
hadoop_BufferData_setCaching_rdh | /**
* Indicates that a caching operation is in progress.
*
* @param actionFuture
* the {@code Future} of a caching action.
* @throws IllegalArgumentException
* if actionFuture is null.
*/
public synchronized void setCaching(Future<Void> actionFuture) {
Validate.checkNotNull(actionFuture, "actionFuture");
this.throwIfStateIncorrect(State.PREFETCHING, State.READY);
this.f0 = State.CACHING;
this.action = actionFuture;
} | 3.26 |
hadoop_FileSystemMultipartUploader_innerComplete_rdh | /**
* The upload complete operation.
*
* @param multipartUploadId
* the ID of the upload
* @param filePath
* path
* @param handleMap
* map of handles
* @return the path handle
* @throws IOException
* failure
*/
private PathHandle
innerComplete(UploadHandle multipartUploadId,
Path filePath, Map<Integer, PartHandle> handleMap) throws IOException {
checkPath(filePath);
checkUploadId(multipartUploadId.toByteArray());
checkPartHandles(handleMap);
List<Map.Entry<Integer, PartHandle>> handles = new ArrayList<>(handleMap.entrySet());
handles.sort(Comparator.comparingInt(Map.Entry::getKey));
List<Path> partHandles = handles.stream().map(pair -> {
byte[] byteArray = pair.getValue().toByteArray();
return new Path(new String(byteArray, 0, byteArray.length, StandardCharsets.UTF_8));
}).collect(Collectors.toList());
int count = partHandles.size();
// built up to identify duplicates -if the size of this set is
// below that of the number of parts, then there's a duplicate entry.
Set<Path>
v15 = new HashSet<>(count);
v15.addAll(partHandles);
Preconditions.checkArgument(v15.size() == count, "Duplicate PartHandles");
byte[] uploadIdByteArray = multipartUploadId.toByteArray();
Path collectorPath = new Path(new String(uploadIdByteArray, 0, uploadIdByteArray.length, StandardCharsets.UTF_8));
boolean emptyFile = totalPartsLen(partHandles) == 0;
if (emptyFile) {
fs.create(filePath).close();
} else {
Path filePathInsideCollector = mergePaths(collectorPath, new Path(Path.SEPARATOR + filePath.getName()));
fs.create(filePathInsideCollector).close();
fs.concat(filePathInsideCollector, partHandles.toArray(new Path[handles.size()]));
new InternalOperations().rename(fs, filePathInsideCollector, filePath, Rename.OVERWRITE);
}
fs.delete(collectorPath, true);
return getPathHandle(filePath);
} | 3.26 |
hadoop_S3ListResult_hasPrefixesOrObjects_rdh | /**
* Does this listing have prefixes or objects?
*
* @return true if the result is non-empty
*/
public boolean hasPrefixesOrObjects() {
return (!getCommonPrefixes().isEmpty()) || (!getS3Objects().isEmpty());
} | 3.26 |
hadoop_S3ListResult_representsEmptyDirectory_rdh | /**
* Does this listing represent an empty directory?
*
* @param dirKey
* directory key
* @return true if the list is considered empty.
*/
public boolean representsEmptyDirectory(final String dirKey) {
// If looking for an empty directory, the marker must exist but
// no children.
// So the listing must contain the marker entry only as an object,
// and prefixes is null
List<String> keys = objectKeys();
return ((keys.size() == 1) && keys.contains(dirKey)) && getCommonPrefixes().isEmpty();} | 3.26 |
hadoop_S3ListResult_v1_rdh | /**
* Restricted constructors to ensure v1 or v2, not both.
*
* @param result
* v1 result
* @return new list result container
*/
public static S3ListResult v1(ListObjectsResponse result)
{
return new S3ListResult(requireNonNull(result), null);
} | 3.26 |
hadoop_S3ListResult_m0_rdh | /**
* Dump the result at debug level.
*
* @param log
* log to use
*/
public void m0(Logger log) {
Collection<CommonPrefix> prefixes = getCommonPrefixes();
Collection<S3Object> v2 = getS3Objects();
log.debug("Prefix count = {}; object count={}", prefixes.size(), v2.size());
for (S3Object s3Object : v2) {
log.debug("Summary: {} {}", s3Object.key(), s3Object.size());
}
for (CommonPrefix prefix : prefixes) {
log.debug("Prefix: {}", prefix.prefix());
}
} | 3.26 |
hadoop_S3ListResult_isV1_rdh | /**
* Is this a v1 API result or v2?
*
* @return true if v1, false if v2
*/
public boolean isV1() {
return v1Result != null;
} | 3.26 |
hadoop_S3ListResult_objectKeys_rdh | /**
* Get the list of keys in the list result.
*
* @return a possibly empty list
*/
private List<String> objectKeys() {
return getS3Objects().stream().map(S3Object::key).collect(Collectors.toList());
} | 3.26 |
hadoop_S3ListResult_v2_rdh | /**
* Restricted constructors to ensure v1 or v2, not both.
*
* @param result
* v2 result
* @return new list result container
*/
public static S3ListResult v2(ListObjectsV2Response result) {
return new S3ListResult(null, requireNonNull(result));
} | 3.26 |
hadoop_ContainerSimulator_createFromTaskContainerDefinition_rdh | /**
* Invoked when AM schedules containers to allocate.
*
* @param def
* The task's definition object.
* @return ContainerSimulator object
*/
public static ContainerSimulator createFromTaskContainerDefinition(TaskContainerDefinition def) {
return new ContainerSimulator(def.getResource(), def.getDuration(), def.getHostname(), def.getPriority(), def.getType(), def.getExecutionType(), def.getAllocationId(), def.getRequestDelay());
} | 3.26 |
hadoop_ResourceCalculatorProcessTree_initialize_rdh | /**
* Initialize the object.
*
* @throws YarnException
* Throws an exception on error.
*/
public void initialize() throws YarnException {
} | 3.26 |
hadoop_S3AInMemoryInputStream_ensureCurrentBuffer_rdh | /**
* Ensures that a non-empty valid buffer is available for immediate reading.
* It returns true when at least one such buffer is available for reading.
* It returns false on reaching the end of the stream.
*
* @return true if at least one such buffer is available for reading, false otherwise.
*/
@Override
protected boolean ensureCurrentBuffer() throws IOException {
if (isClosed()) {
return false;
}
if (getBlockData().getFileSize() == 0) {
return false;
}
FilePosition filePosition = getFilePosition();
if (filePosition.isValid()) {
// Update current position (lazy seek).
filePosition.setAbsolute(getNextReadPos());
} else {
// Read entire file into buffer.
buffer.clear();
int numBytesRead = getReader().read(buffer, 0, buffer.capacity());
if
(numBytesRead <= 0) {
return false;
}
BufferData data = new BufferData(0, buffer);
filePosition.setData(data, 0, getNextReadPos());
}
return filePosition.buffer().hasRemaining();
} | 3.26 |
hadoop_CompressionCodec_createOutputStreamWithCodecPool_rdh | /**
* Create an output stream with a codec taken from the global CodecPool.
*
* @param codec
* The codec to use to create the output stream.
* @param conf
* The configuration to use if we need to create a new codec.
* @param out
* The output stream to wrap.
* @return The new output stream
* @throws IOException
*/
static CompressionOutputStream createOutputStreamWithCodecPool(CompressionCodec codec, Configuration conf, OutputStream out) throws IOException {
Compressor compressor = CodecPool.getCompressor(codec, conf);
CompressionOutputStream stream = null;
try {
stream = codec.createOutputStream(out,
compressor);
} finally {
if (stream == null) {
CodecPool.returnCompressor(compressor);
} else {
stream.setTrackedCompressor(compressor);
}
}
return stream;
} | 3.26 |
hadoop_CompressionCodec_createInputStreamWithCodecPool_rdh | /**
* Create an input stream with a codec taken from the global CodecPool.
*
* @param codec
* The codec to use to create the input stream.
* @param conf
* The configuration to use if we need to create a new codec.
* @param in
* The input stream to wrap.
* @return The new input stream
* @throws IOException
*/
static CompressionInputStream createInputStreamWithCodecPool(CompressionCodec codec, Configuration conf, InputStream in) throws IOException {
Decompressor decompressor = CodecPool.getDecompressor(codec);
CompressionInputStream stream = null;
try {
stream = codec.createInputStream(in, decompressor);
} finally {
if (stream == null) {
CodecPool.returnDecompressor(decompressor);
} else {
stream.setTrackedDecompressor(decompressor);
}
}
return stream;
} | 3.26 |
hadoop_PairedDurationTrackerFactory_asDuration_rdh | /**
*
* @return the global duration
*/
@Override
public Duration asDuration() {
return firstDuration.asDuration();
} | 3.26 |
hadoop_DefaultCodec_createDirectDecompressor_rdh | /**
* {@inheritDoc }
*/@Override
public DirectDecompressor createDirectDecompressor() {
return ZlibFactory.getZlibDirectDecompressor(conf);
} | 3.26 |
hadoop_SequenceFileAsTextRecordReader_nextKeyValue_rdh | /**
* Read key/value pair in a line.
*/
public synchronized boolean nextKeyValue() throws IOException, InterruptedException {
if (!sequenceFileRecordReader.nextKeyValue()) {
return false;
}
if (f0 == null) {
f0 = new Text();
}
if (value == null) {
value = new Text();
}
f0.set(sequenceFileRecordReader.getCurrentKey().toString());
value.set(sequenceFileRecordReader.getCurrentValue().toString());
return true;
} | 3.26 |
hadoop_GetClusterNodeAttributesRequest_newInstance_rdh | /**
* Create new instance of GetClusterNodeAttributesRequest.
*
* @return GetClusterNodeAttributesRequest is returned.
*/
public static GetClusterNodeAttributesRequest newInstance() {
return Records.newRecord(GetClusterNodeAttributesRequest.class);
} | 3.26 |
hadoop_RouterMetricsService_getNamenodeMetrics_rdh | /**
* Get the Namenode metrics.
*
* @return Namenode metrics.
*/
public NamenodeBeanMetrics getNamenodeMetrics() {
return this.nnMetrics;
} | 3.26 |
hadoop_RouterMetricsService_m0_rdh | /**
* Get the metrics system for the Router Client.
*
* @return Router Client metrics.
*/
public RouterClientMetrics m0() {
return this.routerClientMetrics;
} | 3.26 |
hadoop_RouterMetricsService_getRouterMetrics_rdh | /**
* Get the metrics system for the Router.
*
* @return Router metrics.
*/
public RouterMetrics getRouterMetrics() {
return this.routerMetrics;
} | 3.26 |
hadoop_RouterMetricsService_getJvmMetrics_rdh | /**
* Get the JVM metrics for the Router.
*
* @return JVM metrics.
*/
public JvmMetrics getJvmMetrics() {
if (this.routerMetrics == null) {
return null;
}
return this.routerMetrics.getJvmMetrics();
} | 3.26 |
hadoop_RouterMetricsService_getRBFMetrics_rdh | /**
* Get the federation metrics.
*
* @return Federation metrics.
*/
public RBFMetrics getRBFMetrics() {
return this.rbfMetrics;
} | 3.26 |
hadoop_DefaultAppReportFetcher_getApplicationReport_rdh | /**
* Get an application report for the specified application id from the RM and
* fall back to the Application History Server if not found in RM.
*
* @param appId
* id of the application to get.
* @return the ApplicationReport for the appId.
* @throws YarnException
* on any error.
* @throws IOException
* connection exception.
*/
@Override
public FetchedAppReport getApplicationReport(ApplicationId appId) throws YarnException, IOException {
return super.getApplicationReport(applicationsManager, appId);
} | 3.26 |
hadoop_AHSController_logs_rdh | /**
* Render the logs page.
*/
public void logs() {
render(AHSLogsPage.class);
} | 3.26 |
hadoop_JobBase_getDoubleValue_rdh | /**
*
* @param name
* the counter name
* @return return the value of the given counter.
*/
protected Double getDoubleValue(Object name) {
return this.doubleCounters.get(name);
} | 3.26 |
hadoop_JobBase_setLongValue_rdh | /**
* Set the given counter to the given value
*
* @param name
* the counter name
* @param value
* the value for the counter
*/
protected void setLongValue(Object name, long value) {
this.longCounters.put(name, Long.valueOf(value));
} | 3.26 |
hadoop_JobBase_addDoubleValue_rdh | /**
* Increment the given counter by the given incremental value If the counter
* does not exist, one is created with value 0.
*
* @param name
* the counter name
* @param inc
* the incremental value
* @return the updated value.
*/
protected Double addDoubleValue(Object name, double inc) {
Double val = this.doubleCounters.get(name);
Double v3 = null;
if (val == null) {
v3 = new Double(inc);
} else {
v3 = new Double(val.doubleValue() + inc);
}
this.doubleCounters.put(name, v3);
return v3;
} | 3.26 |
hadoop_JobBase_configure_rdh | /**
* Initializes a new instance from a {@link JobConf}.
*
* @param job
* the configuration
*/
public void configure(JobConf job) {
this.longCounters = new TreeMap<Object, Long>();
this.doubleCounters = new TreeMap<Object, Double>();
} | 3.26 |
hadoop_JobBase_addLongValue_rdh | /**
* Increment the given counter by the given incremental value If the counter
* does not exist, one is created with value 0.
*
* @param name
* the counter name
* @param inc
* the incremental value
* @return the updated value.
*/
protected Long addLongValue(Object name, long inc) {
Long val = this.longCounters.get(name);
Long retv = null;
if (val == null) {
retv = Long.valueOf(inc);
} else {
retv = Long.valueOf(val.longValue() + inc);
}this.longCounters.put(name, retv);
return retv;
} | 3.26 |
hadoop_JobBase_setDoubleValue_rdh | /**
* Set the given counter to the given value
*
* @param name
* the counter name
* @param value
* the value for the counter
*/
protected void setDoubleValue(Object
name, double value) {
this.doubleCounters.put(name, new Double(value));
} | 3.26 |
hadoop_JobBase_getReport_rdh | /**
* log the counters
*/
protected String getReport() {StringBuffer v4 = new StringBuffer();
Iterator iter = this.longCounters.entrySet().iterator();
while (iter.hasNext()) {Entry e = ((Entry) (iter.next()));v4.append(e.getKey().toString()).append("\t").append(e.getValue()).append("\n");
}
iter = this.doubleCounters.entrySet().iterator();
while (iter.hasNext()) {
Entry e = ((Entry) (iter.next()));
v4.append(e.getKey().toString()).append("\t").append(e.getValue()).append("\n");
}
return v4.toString();
} | 3.26 |
hadoop_JobBase_report_rdh | /**
* log the counters
*/
protected void report() {
LOG.info(getReport());
} | 3.26 |
hadoop_JobBase_getLongValue_rdh | /**
*
* @param name
* the counter name
* @return return the value of the given counter.
*/
protected Long getLongValue(Object name) {
return this.longCounters.get(name);
} | 3.26 |
hadoop_OSSListRequest_v2_rdh | /**
* Restricted constructors to ensure v1 or v2, not both.
*
* @param request
* v2 request
* @return new list request container
*/
public static OSSListRequest v2(ListObjectsV2Request request) {
return new OSSListRequest(null, request);
} | 3.26 |
hadoop_OSSListRequest_isV1_rdh | /**
* Is this a v1 API request or v2?
*
* @return true if v1, false if v2
*/
public boolean isV1() {
return v1Request != null;
} | 3.26 |
hadoop_FTPInputStream_markSupported_rdh | // Not supported.
@Override
public boolean markSupported() {
return false;
} | 3.26 |
hadoop_FTPInputStream_seek_rdh | // We don't support seek.
@Override
public void seek(long pos) throws IOException {
throw new IOException("Seek not supported");
} | 3.26 |
hadoop_GlobExpander_expandLeftmost_rdh | /**
* Expand the leftmost outer curly bracket pair containing a
* slash character ("/") in <code>filePattern</code>.
*
* @param filePatternWithOffset
* @return expanded file patterns
* @throws IOException
*/
private static List<StringWithOffset> expandLeftmost(StringWithOffset filePatternWithOffset) throws IOException {
String filePattern = filePatternWithOffset.string;
int leftmost = m0(filePattern, filePatternWithOffset.offset);
if (leftmost == (-1)) {
return null;
}
int curlyOpen = 0;
StringBuilder prefix = new StringBuilder(filePattern.substring(0, leftmost));
StringBuilder v8 = new StringBuilder();
List<String> alts = new ArrayList<String>();
StringBuilder alt = new StringBuilder();
StringBuilder
cur = prefix;
for (int i = leftmost; i < filePattern.length();
i++) {
char c = filePattern.charAt(i);
if (cur == v8) {
cur.append(c);
} else if (c == '\\') {
i++;
if (i >= filePattern.length()) {
throw new IOException(((("Illegal file pattern: " + "An escaped character does not present for glob ") + filePattern) + " at ") + i);
}
c = filePattern.charAt(i);
cur.append(c);
} else if (c == '{') {
if
((curlyOpen++) == 0) {
alt.setLength(0);
cur = alt;
} else {
cur.append(c);}
} else if ((c == '}') && (curlyOpen > 0)) {
if ((--curlyOpen) == 0) {
alts.add(alt.toString());
alt.setLength(0);
cur = v8;
} else {
cur.append(c);
}
} else if (c == ',') {
if (curlyOpen == 1) {
alts.add(alt.toString());
alt.setLength(0);
} else {
cur.append(c);
}
} else {
cur.append(c);
}
}
List<StringWithOffset> exp = new ArrayList<StringWithOffset>();
for (String string : alts) {
exp.add(new StringWithOffset((prefix + string)
+ v8, prefix.length()));
}
return exp;
} | 3.26 |
hadoop_GlobExpander_expand_rdh | /**
* Expand globs in the given <code>filePattern</code> into a collection of
* file patterns so that in the expanded set no file pattern has a slash
* character ("/") in a curly bracket pair.
* <p>
* Some examples of how the filePattern is expanded:<br>
* <pre>
* <b>
* filePattern - Expanded file pattern </b>
* {a/b} - a/b
* /}{a/b} - /}a/b
* p{a/b,c/d}s - pa/bs, pc/ds
* {a/b,c/d,{e,f}} - a/b, c/d, {e,f}
* {a/b,c/d}{e,f} - a/b{e,f}, c/d{e,f}
* {a,b}/{b,{c/d,e/f}} - {a,b}/b, {a,b}/c/d, {a,b}/e/f
* {a,b}/{c/\d} - {a,b}/c/d
* </pre>
*
* @param filePattern
* file pattern.
* @return expanded file patterns
* @throws IOException
* raised on errors performing I/O.
*/
public static List<String> expand(String filePattern) throws IOException {
List<String> fullyExpanded = new ArrayList<String>();
List<StringWithOffset> toExpand =
new ArrayList<StringWithOffset>();
toExpand.add(new StringWithOffset(filePattern, 0));
while (!toExpand.isEmpty()) {
StringWithOffset path = toExpand.remove(0);
List<StringWithOffset> expanded = expandLeftmost(path);
if (expanded == null) {
fullyExpanded.add(path.string);
} else {
toExpand.addAll(0, expanded);
}
}
return fullyExpanded;
} | 3.26 |
hadoop_VolumeFailureSummary_getLastVolumeFailureDate_rdh | /**
* Returns the date/time of the last volume failure in milliseconds since
* epoch.
*
* @return date/time of last volume failure in milliseconds since epoch
*/
public long getLastVolumeFailureDate() {
return this.lastVolumeFailureDate;
} | 3.26 |
hadoop_VolumeFailureSummary_getFailedStorageLocations_rdh | /**
* Returns each storage location that has failed, sorted.
*
* @return each storage location that has failed, sorted
*/public String[] getFailedStorageLocations() {
return this.failedStorageLocations;
} | 3.26 |
hadoop_VolumeFailureSummary_getEstimatedCapacityLostTotal_rdh | /**
* Returns estimate of capacity lost. This is said to be an estimate, because
* in some cases it's impossible to know the capacity of the volume, such as if
* we never had a chance to query its capacity before the failure occurred.
*
* @return estimate of capacity lost in bytes
*/
public long
getEstimatedCapacityLostTotal() {
return this.estimatedCapacityLostTotal;
} | 3.26 |
hadoop_Paths_getRelativePath_rdh | /**
* Using {@code URI#relativize()}, build the relative path from the
* base path to the full path.
* If {@code childPath} is not a child of {@code basePath} the outcome
* os undefined.
*
* @param basePath
* base path
* @param fullPath
* full path under the base path.
* @return the relative path
*/public static String getRelativePath(Path basePath, Path fullPath) {
return basePath.toUri().relativize(fullPath.toUri()).getPath();
} | 3.26 |
hadoop_Paths_getPartitions_rdh | /**
* Get the set of partitions from the list of files being staged.
* This is all immediate parents of those files. If a file is in the root
* dir, the partition is declared to be
* {@link StagingCommitterConstants#TABLE_ROOT}.
*
* @param attemptPath
* path for the attempt
* @param taskOutput
* list of output files.
* @return list of partitions.
* @throws IOException
* IO failure
*/
public static Set<String> getPartitions(Path attemptPath, List<? extends FileStatus> taskOutput) throws IOException {
// get a list of partition directories
Set<String>
partitions = new LinkedHashSet<>();
for (FileStatus fileStatus : taskOutput) {
// sanity check the output paths
Path outputFile = fileStatus.getPath();
if (!fileStatus.isFile()) {
throw new PathIsDirectoryException(outputFile.toString());
}
String partition = getPartition(getRelativePath(attemptPath, outputFile));
partitions.add(partition != null ? partition : TABLE_ROOT);
}
return partitions;
} | 3.26 |
hadoop_Paths_getPartition_rdh | /**
* Returns the partition of a relative file path, or null if the path is a
* file name with no relative directory.
*
* @param relative
* a relative file path
* @return the partition of the relative file path
*/
protected static String getPartition(String relative) {
return getParent(relative);
} | 3.26 |
hadoop_Paths_getStagingUploadsParentDirectory_rdh | /**
* Build a qualified parent path for the temporary multipart upload commit
* directory built by {@link #getMultipartUploadCommitsDirectory(Configuration, String)}.
*
* @param conf
* configuration defining default FS.
* @param uuid
* uuid of job
* @return a path which can be used for temporary work
* @throws IOException
* on an IO failure.
*/
public static Path getStagingUploadsParentDirectory(Configuration conf, String uuid) throws IOException {
return getMultipartUploadCommitsDirectory(conf, uuid).getParent();
} | 3.26 |
hadoop_Paths_path_rdh | /**
* Varags constructor of paths. Not very efficient.
*
* @param parent
* parent path
* @param child
* child entries. "" elements are skipped.
* @return the full child path.
*/
public static Path path(Path parent, String... child) {
Path p = parent;
for (String c : child)
{
if (!c.isEmpty()) {
p = new Path(p, c);
}
}
return p;
} | 3.26 |
hadoop_Paths_getLocalTaskAttemptTempDir_rdh | /**
* Get the task attempt temporary directory in the local filesystem.
* This must be unique to all tasks on all jobs running on all processes
* on this host.
* It's constructed as uuid+task-attempt-ID, relying on UUID to be unique
* for each job.
*
* @param conf
* configuration
* @param uuid
* some UUID, such as a job UUID
* @param attemptID
* attempt ID
* @return a local task attempt directory.
* @throws IOException
* IO problem.
*/public static Path getLocalTaskAttemptTempDir(final Configuration conf, final String uuid, final TaskAttemptID attemptID) throws IOException {
try {
final LocalDirAllocator allocator = new
LocalDirAllocator(Constants.BUFFER_DIR);
String name = (uuid + "-") + attemptID;
return tempFolders.get(name, () -> {
return FileSystem.getLocal(conf).makeQualified(allocator.getLocalPathForWrite(name, conf));
});
} catch (ExecutionException | UncheckedExecutionException e) {
Throwable cause = e.getCause();
if (cause instanceof RuntimeException) {
throw ((RuntimeException) (cause));
}
if
(cause instanceof IOException) {
throw ((IOException) (cause));
}
throw new IOException(e);
}
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.