name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hadoop_AllocateRequest_schedulingRequests_rdh | /**
* Set the <code>schedulingRequests</code> of the request.
*
* @see AllocateRequest#setSchedulingRequests(List)
* @param schedulingRequests
* <code>SchedulingRequest</code> of the request
* @return {@link AllocateRequestBuilder}
*/
@Public
@Unstable
public AllocateRequestBuilder schedulingRequests(List<SchedulingRequest> schedulingRequests) {
allocateRequest.setSchedulingRequests(schedulingRequests);
return this;}
/**
* Set the <code>trackingUrl</code> of the request.
*
* @see AllocateRequest#setTrackingUrl(String)
* @param trackingUrl
* new tracking url
* @return {@link AllocateRequestBuilder} | 3.26 |
hadoop_AllocateRequest_updateRequests_rdh | /**
* Set the <code>updateRequests</code> of the request.
*
* @see AllocateRequest#setUpdateRequests(List)
* @param updateRequests
* <code>updateRequests</code> of the request
* @return {@link AllocateRequestBuilder}
*/
@Public
@Unstable
public AllocateRequestBuilder updateRequests(List<UpdateContainerRequest> updateRequests) {
allocateRequest.setUpdateRequests(updateRequests);
return this;
} | 3.26 |
hadoop_AllocateRequest_responseId_rdh | /**
* Set the <code>responseId</code> of the request.
*
* @see AllocateRequest#setResponseId(int)
* @param responseId
* <code>responseId</code> of the request
* @return {@link AllocateRequestBuilder}
*/
@Public
@Stable
public AllocateRequestBuilder responseId(int responseId) {
allocateRequest.setResponseId(responseId);
return this;
}
/**
* Set the <code>progress</code> of the request.
*
* @see AllocateRequest#setProgress(float)
* @param progress
* <code>progress</code> of the request
* @return {@link AllocateRequestBuilder} | 3.26 |
hadoop_AllocateRequest_askList_rdh | /**
* Set the <code>askList</code> of the request.
*
* @see AllocateRequest#setAskList(List)
* @param askList
* <code>askList</code> of the request
* @return {@link AllocateRequestBuilder}
*/
@Public
@Stable
public AllocateRequestBuilder askList(List<ResourceRequest> askList) {
allocateRequest.setAskList(askList);
return this;
} | 3.26 |
hadoop_AllocateRequest_build_rdh | /**
* Return generated {@link AllocateRequest} object.
*
* @return {@link AllocateRequest}
*/
@Public
@Stable
public AllocateRequest build() {
return allocateRequest;
} | 3.26 |
hadoop_AllocateRequest_releaseList_rdh | /**
* Set the <code>releaseList</code> of the request.
*
* @see AllocateRequest#setReleaseList(List)
* @param releaseList
* <code>releaseList</code> of the request
* @return {@link AllocateRequestBuilder}
*/
@Public
@Stable
public AllocateRequestBuilder releaseList(List<ContainerId> releaseList) {
allocateRequest.setReleaseList(releaseList);
return this;
} | 3.26 |
hadoop_AllocateRequest_setSchedulingRequests_rdh | /**
* Set the list of Scheduling requests to inform the
* <code>ResourceManager</code> about the application's resource requirements
* (potentially including allocation tags and placement constraints).
*
* @param schedulingRequests
* list of {@link SchedulingRequest} to update
* the <code>ResourceManager</code> about the application's resource
* requirements.
*/
@Public
@Unstable
public void setSchedulingRequests(List<SchedulingRequest> schedulingRequests) {
} | 3.26 |
hadoop_HostSet_match_rdh | /**
* The function that checks whether there exists an entry foo in the set
* so that addr <= foo.
*/
boolean match(InetSocketAddress
addr) {
int port = addr.getPort();
Collection<Integer> ports = addrs.get(addr.getAddress());
boolean exactMatch = ports.contains(port);boolean genericMatch =
ports.contains(0);
return exactMatch || genericMatch;
} | 3.26 |
hadoop_HostSet_matchedBy_rdh | /**
* The function that checks whether there exists an entry foo in the set
* so that foo <= addr.
*/
boolean matchedBy(InetSocketAddress addr) {
Collection<Integer> ports = addrs.get(addr.getAddress());
return addr.getPort() == 0 ? !ports.isEmpty() : ports.contains(addr.getPort());
} | 3.26 |
hadoop_MutableGaugeFloat_toString_rdh | /**
*
* @return the value of the metric
*/
public String toString() {
return
value.toString();
} | 3.26 |
hadoop_OBSFileSystem_append_rdh | /**
* Append to an existing file (optional operation).
*
* @param f
* the existing file to be appended
* @param bufferSize
* the size of the buffer to be used
* @param progress
* for reporting progress if it is not null
* @throws IOException
* indicating that append is not supported
*/@Override
public FSDataOutputStream append(final Path f,
final int bufferSize, final Progressable progress) throws IOException {
if
(!isFsBucket()) {
throw new UnsupportedOperationException("non-posix bucket. Append is not supported " + "by OBSFileSystem");
}
f0.debug("append: Append file {}.", f);
String key
= OBSCommonUtils.pathToKey(this, f);
// get the status or throw an FNFE
FileStatus status = getFileStatus(f);
long objectLen = status.getLen();
// if the thread reaches here, there is something at the path
if (status.isDirectory()) {
// path references a directory: automatic error
throw new FileAlreadyExistsException(f + " is a directory");
}
return new FSDataOutputStream(new OBSBlockOutputStream(this, key, objectLen, new SemaphoredDelegatingExecutor(boundedMultipartUploadThreadPool, blockOutputActiveBlocks, true), true), null);
} | 3.26 |
hadoop_OBSFileSystem_exists_rdh | /**
* Check if a path exists.
*
* @param f
* source path
* @return true if the path exists
* @throws IOException
* IO failure
*/
@Overridepublic boolean exists(final Path f) throws IOException {
try {
return getFileStatus(f) != null;
} catch (FileNotFoundException | FileConflictException e) {
return false;
}
} | 3.26 |
hadoop_OBSFileSystem_listStatus_rdh | /**
* This public interface is provided specially for Huawei MRS. List the
* statuses of the files/directories in the given path if the path is a
* directory. When recursive is true, iterator all objects in the given path
* and its sub directories.
*
* @param f
* given path
* @param recursive
* whether to iterator objects in sub direcotries
* @return the statuses of the files/directories in the given patch
* @throws FileNotFoundException
* when the path does not exist
* @throws IOException
* see specific implementation
*/
public FileStatus[] listStatus(final Path f, final boolean recursive) throws FileNotFoundException, IOException {long startTime = System.currentTimeMillis();
long threadId = Thread.currentThread().getId();
try {
FileStatus[] statuses = OBSCommonUtils.innerListStatus(this, f,
recursive);
long endTime = System.currentTimeMillis();
f0.debug("List status for path:{}, thread:{}, timeUsedInMilliSec:{}", f, threadId, endTime - startTime);
return statuses;
} catch (ObsException e) {
throw OBSCommonUtils.translateException("listStatus with recursive flag[" + (recursive ? "true] " : "false] "), f, e);
}
} | 3.26 |
hadoop_OBSFileSystem_isReadTransformEnabled_rdh | /**
* Get read transform switch stat.
*
* @return is read transform enabled
*/
boolean isReadTransformEnabled() {
return readTransformEnable;
} | 3.26 |
hadoop_OBSFileSystem_innerGetFileStatus_rdh | /**
* Inner implementation without retry for {@link #getFileStatus(Path)}.
*
* @param f
* the path we want information from
* @return a FileStatus object
* @throws IOException
* on IO failure
*/
@VisibleForTesting
OBSFileStatus innerGetFileStatus(final Path f) throws IOException {
if (enablePosix) {
return OBSPosixBucketUtils.innerFsGetObjectStatus(this, f);
}
return OBSObjectBucketUtils.innerGetObjectStatus(this, f);
}
/**
* Return the {@link ContentSummary} of a given {@link Path}.
*
* @param f
* path to use
* @return the {@link ContentSummary} | 3.26 |
hadoop_OBSFileSystem_getSse_rdh | /**
* Return server-side encryption wrapper used by this filesystem instance.
*
* @return the server-side encryption wrapper
*/
SseWrapper getSse() {
return sse;
} | 3.26 |
hadoop_OBSFileSystem_mkdirs_rdh | /**
* Make the given path and all non-existent parents into directories. Has the
* semantics of Unix {@code 'mkdir -p'}. Existence of the directory hierarchy
* is not an error.
*
* @param path
* path to create
* @param permission
* to apply to f
* @return true if a directory was created
* @throws FileAlreadyExistsException
* there is a file at the path specified
* @throws IOException
* other IO problems
*/
@Override
public boolean mkdirs(final Path path, final FsPermission permission) throws IOException, FileAlreadyExistsException {
try {
return OBSCommonUtils.innerMkdirs(this, path);
} catch (ObsException e) {
throw OBSCommonUtils.translateException("mkdirs", path, e);
}
} | 3.26 |
hadoop_OBSFileSystem_getReadAheadRange_rdh | /**
* Return the read ahead range used by this filesystem.
*
* @return read ahead range
*/
@VisibleForTesting
long getReadAheadRange() {
return readAheadRange;
} | 3.26 |
hadoop_OBSFileSystem_getUri_rdh | /**
* Return a URI whose scheme and authority identify this FileSystem.
*
* @return the URI of this filesystem.
*/
@Override
public URI getUri() {
return uri;
}
/**
* Return the default port for this FileSystem.
*
* @return -1 to indicate the port is undefined, which agrees with the
contract of {@link URI#getPort()} | 3.26 |
hadoop_OBSFileSystem_isFsBucket_rdh | /**
* Is posix bucket or not.
*
* @return is it posix bucket
*/
boolean isFsBucket() {
return enablePosix;
} | 3.26 |
hadoop_OBSFileSystem_canonicalizeUri_rdh | /**
* Canonicalize the given URI.
*
* @param rawUri
* the URI to be canonicalized
* @return the canonicalized URI
*/
@Override
protected URI canonicalizeUri(final URI rawUri) {
return OBSLoginHelper.canonicalizeUri(rawUri, getDefaultPort());
} | 3.26 |
hadoop_OBSFileSystem_getCopyPartSize_rdh | /**
* Return copy part size.
*
* @return copy part size
*/
long getCopyPartSize() {
return copyPartSize;
} | 3.26 |
hadoop_OBSFileSystem_getBoundedCopyPartThreadPool_rdh | /**
* Return bounded thread pool for copy part.
*
* @return the bounded thread pool for copy part
*/
ThreadPoolExecutor getBoundedCopyPartThreadPool() {
return boundedCopyPartThreadPool;
} | 3.26 |
hadoop_OBSFileSystem_getObsClient_rdh | /**
* Return the OBS client used by this filesystem.
*
* @return OBS client
*/
@VisibleForTesting
ObsClient getObsClient() {
return obs;
} | 3.26 |
hadoop_OBSFileSystem_m2_rdh | /**
* Override {@code getCanonicalServiceName} and return {@code null} since
* delegation token is not supported.
*/
@Override
public String m2() {
// Does not support Token
return null;
} | 3.26 |
hadoop_OBSFileSystem_getWorkingDirectory_rdh | /**
* Return the current working directory for the given file system.
*
* @return the directory pathname
*/
@Override
public Path getWorkingDirectory() {
return workingDir;
} | 3.26 |
hadoop_OBSFileSystem_isEnableTrash_rdh | /**
* Return a flag that indicates if fast delete is enabled.
*
* @return the flag
*/
boolean isEnableTrash() {
return enableTrash;
} | 3.26 |
hadoop_OBSFileSystem_getUsername_rdh | /**
* Return the username of the filesystem.
*
* @return the short name of the user who instantiated the filesystem
*/
String getUsername() {
return username;
} | 3.26 |
hadoop_OBSFileSystem_getObsListing_rdh | /**
* Return the OBSListing instance used by this filesystem.
*
* @return the OBSListing instance
*/
OBSListing getObsListing() {
return obsListing;
} | 3.26 |
hadoop_OBSFileSystem_open_rdh | /**
* Open an FSDataInputStream at the indicated Path.
*
* @param f
* the file path to open
* @param bufferSize
* the size of the buffer to be used
* @return the FSDataInputStream for the file
* @throws IOException
* on any failure to open the file
*/
@Overridepublic FSDataInputStream open(final Path f, final int bufferSize) throws IOException {
f0.debug("Opening '{}' for reading.", f);
final FileStatus fileStatus = getFileStatus(f);
if (fileStatus.isDirectory()) {
throw new FileNotFoundException(("Can't open " + f) + " because it is a directory");
}return new FSDataInputStream(new OBSInputStream(bucket, OBSCommonUtils.pathToKey(this, f), fileStatus.getLen(), obs, statistics, readAheadRange, this));
} | 3.26 |
hadoop_OBSFileSystem_getSchemeStatistics_rdh | /**
* Return the {@link Statistics} instance used by this filesystem.
*
* @return the used {@link Statistics} instance
*/
Statistics getSchemeStatistics() {
return statistics;
} | 3.26 |
hadoop_OBSFileSystem_initCannedAcls_rdh | /**
* Initialize bucket acl for upload, write operation.
*
* @param conf
* the configuration to use for the FS.
*/
private void initCannedAcls(final Configuration conf) {
// No canned acl in obs
String cannedACLName = conf.get(OBSConstants.CANNED_ACL, OBSConstants.DEFAULT_CANNED_ACL);
if (!cannedACLName.isEmpty()) {
switch (cannedACLName) {
case "Private" :
case "PublicRead" :
case "PublicReadWrite" :
case "AuthenticatedRead" :
case "LogDeliveryWrite" :
case "BucketOwnerRead" :case "BucketOwnerFullControl" :
cannedACL = new AccessControlList();
break;
default :
cannedACL
= null;
}
} else {
cannedACL = null;
}
} | 3.26 |
hadoop_OBSFileSystem_getBoundedListThreadPool_rdh | /**
* Return bounded thread pool for list.
*
* @return bounded thread pool for list
*/
ThreadPoolExecutor getBoundedListThreadPool() {
return boundedListThreadPool;
} | 3.26 |
hadoop_OBSFileSystem_create_rdh | /**
* Create an FSDataOutputStream at the indicated Path with write-progress
* reporting.
*
* @param f
* the file name to create
* @param permission
* permission of
* @param flags
* {@link CreateFlag}s to use for this stream
* @param bufferSize
* the size of the buffer to be used
* @param replication
* required block replication for the file
* @param blkSize
* block size
* @param progress
* progress
* @param checksumOpt
* check sum option
* @throws IOException
* io exception
*/
@Override
@SuppressWarnings("checkstyle:parameternumber")
public FSDataOutputStream create(final Path f, final FsPermission permission, final EnumSet<CreateFlag> flags, final int bufferSize, final
short replication, final long blkSize, final Progressable progress, final ChecksumOpt checksumOpt) throws IOException {
f0.debug("create: Creating new file {}, flags:{}, isFsBucket:{}", f, flags, isFsBucket());
if ((null != flags) && flags.contains(CreateFlag.APPEND))
{
if (!isFsBucket()) {
throw new UnsupportedOperationException("non-posix bucket. Append is not supported by " + "OBSFileSystem");
}
String key = OBSCommonUtils.pathToKey(this, f);
FileStatus status;
long objectLen = 0;
try {
// get the status or throw an FNFE
status = getFileStatus(f);
objectLen = status.getLen();
// if the thread reaches here, there is something at the path
if (status.isDirectory()) {
// path references a directory: automatic error
throw new FileAlreadyExistsException(f + " is a directory");
}
} catch (FileNotFoundException e) {
f0.debug("FileNotFoundException, create: Creating new file {}", f);
}
return new FSDataOutputStream(new OBSBlockOutputStream(this, key, objectLen, new SemaphoredDelegatingExecutor(boundedMultipartUploadThreadPool, blockOutputActiveBlocks, true), true),
null);
} else {
return create(f, permission, (flags == null) || flags.contains(CreateFlag.OVERWRITE), bufferSize, replication, blkSize, progress);
}
} | 3.26 |
hadoop_OBSFileSystem_getBoundedCopyThreadPool_rdh | /**
* Return bounded thread pool for copy.
*
* @return the bounded thread pool for copy
*/
ThreadPoolExecutor getBoundedCopyThreadPool() {
return boundedCopyThreadPool;
} | 3.26 |
hadoop_OBSFileSystem_getBucket_rdh | /**
* Return the bucket of this filesystem.
*
* @return the bucket
*/
String getBucket() {
return bucket;
} | 3.26 |
hadoop_OBSFileSystem_close_rdh | /**
* Close the filesystem. This shuts down all transfers.
*
* @throws IOException
* IO problem
*/
@Overridepublic void close() throws IOException {
f0.debug("This Filesystem closed by user, clear resource.");
if (closed.getAndSet(true)) {
// already closed
return;
}
try {
super.close();
} finally {
OBSCommonUtils.shutdownAll(boundedMultipartUploadThreadPool, boundedCopyThreadPool,
boundedDeleteThreadPool, boundedCopyPartThreadPool, boundedListThreadPool);
}
} | 3.26 |
hadoop_OBSFileSystem_getMaxKeys_rdh | /**
* Return the maximum number of keys to get while paging through a directory
* listing.
*
* @return the maximum number of keys
*/
int getMaxKeys() {
return maxKeys;
} | 3.26 |
hadoop_OBSFileSystem_m0_rdh | /**
* Return the part size for multipart upload used by {@link OBSBlockOutputStream}.
*
* @return the part size
*/
long m0() {return partSize;
} | 3.26 |
hadoop_OBSFileSystem_getMultiDeleteThreshold_rdh | /**
* Return the minimum number of objects in one multi-object delete call.
*
* @return the minimum number of objects in one multi-object delete call
*/
int getMultiDeleteThreshold() {
return multiDeleteThreshold;
} | 3.26 |
hadoop_OBSFileSystem_getTrashDir_rdh | /**
* Return trash directory for fast delete.
*
* @return the trash directory
*/
String getTrashDir() {
return trashDir;
} | 3.26 |
hadoop_OBSFileSystem_isEnableMultiObjectDelete_rdh | /**
* Return a flag that indicates if multi-object delete is enabled.
*
* @return the flag
*/
boolean isEnableMultiObjectDelete() {
return enableMultiObjectDelete;
} | 3.26 |
hadoop_OBSFileSystem_delete_rdh | /**
* Delete a Path. This operation is at least {@code O(files)}, with added
* overheads to enumerate the path. It is also not atomic.
*
* @param f
* the path to delete
* @param recursive
* if path is a directory and set to true, the directory is
* deleted else throws an exception. In case of a file the
* recursive can be set to either true or false
* @return true if delete is successful else false
* @throws IOException
* due to inability to delete a directory or file
*/
@Override
public boolean delete(final Path f, final boolean recursive) throws IOException {
try {
FileStatus status = getFileStatus(f);
f0.debug("delete: path {} - recursive {}",
status.getPath(), recursive);
if (enablePosix) {
return OBSPosixBucketUtils.fsDelete(this, status, recursive);
}
return OBSObjectBucketUtils.objectDelete(this, status, recursive);
} catch (FileNotFoundException
e) {
f0.warn("Couldn't delete {} - does not exist", f);
return false;
} catch (ObsException e) {
throw OBSCommonUtils.translateException("delete", f, e);
}
} | 3.26 |
hadoop_OBSFileSystem_copyFromLocalFile_rdh | /**
* Copy the {@code src} file on the local disk to the filesystem at the given
* {@code dst} name.
*
* @param delSrc
* whether to delete the src
* @param overwrite
* whether to overwrite an existing file
* @param src
* path
* @param dst
* path
* @throws FileAlreadyExistsException
* if the destination file exists and
* overwrite == false
* @throws IOException
* IO problem
*/
@Override
public void copyFromLocalFile(final boolean delSrc, final boolean overwrite, final Path src, final Path dst) throws FileAlreadyExistsException, IOException {
try {
super.copyFromLocalFile(delSrc, overwrite, src, dst);
}
catch (ObsException e) {
throw OBSCommonUtils.translateException(((("copyFromLocalFile(" + src) + ", ") + dst) + ")", src, e);}
} | 3.26 |
hadoop_OBSFileSystem_getFileStatus_rdh | /**
* Return a file status object that represents the path.
*
* @param f
* the path we want information from
* @return a FileStatus object
* @throws FileNotFoundException
* when the path does not exist
* @throws IOException
* on other problems
*/
@Override
public FileStatus getFileStatus(final Path f) throws FileNotFoundException, IOException {
for (int v41 = 1; v41 < OBSCommonUtils.MAX_RETRY_TIME; v41++) {
try {
return innerGetFileStatus(f);
} catch (FileNotFoundException | FileConflictException e) {
throw e;
} catch (IOException e) {
f0.warn("Failed to get file status for [{}], retry time [{}], " + "exception [{}]", f, v41, e);
try {
Thread.sleep(OBSCommonUtils.DELAY_TIME);
} catch (InterruptedException ie) {
throw e;
}
}
}
return innerGetFileStatus(f);
} | 3.26 |
hadoop_OBSFileSystem_checkPath_rdh | /**
* Check that a Path belongs to this FileSystem. Unlike the superclass, this
* version does not look at authority, but only hostname.
*
* @param path
* the path to check
* @throws IllegalArgumentException
* if there is an FS mismatch
*/
@Override
public void checkPath(final Path path) {
OBSLoginHelper.checkPath(getConf(), getUri(), path, getDefaultPort());
} | 3.26 |
hadoop_OBSFileSystem_getDefaultBlockSize_rdh | /**
* Imitate HDFS to return the number of bytes that large input files should be
* optimally split into to minimize I/O time. The given path will be used to
* locate the actual filesystem. The full path does not have to exist.
*
* @param f
* path of file
* @return the default block size for the path's filesystem
*/
@Override
public long getDefaultBlockSize(final Path f) {
return blockSize;
} | 3.26 |
hadoop_OBSFileSystem_rename_rdh | /**
* Rename Path src to Path dst.
*
* @param src
* path to be renamed
* @param dst
* new path after rename
* @return true if rename is successful
* @throws IOException
* on IO failure
*/
@Override
public boolean rename(final Path src, final Path dst) throws IOException {
long startTime = System.currentTimeMillis();
long threadId = Thread.currentThread().getId();
f0.debug("Rename path {} to {} start", src, dst);
try {
if (enablePosix) {
return OBSPosixBucketUtils.renameBasedOnPosix(this, src, dst);
} else {
return OBSObjectBucketUtils.renameBasedOnObject(this, src, dst);
}
} catch (ObsException e) {throw
OBSCommonUtils.translateException(((("rename(" + src) + ", ") + dst) + ")", src,
e);
} catch (RenameFailedException e) {
f0.error(e.getMessage());
return e.getExitCode();
} catch (FileNotFoundException e) {
f0.error(e.toString());
return false;} finally {long endTime = System.currentTimeMillis();
f0.debug("Rename path {} to {} finished, thread:{}, " + "timeUsedInMilliSec:{}.", src, dst, threadId, endTime - startTime);
}
} | 3.26 |
hadoop_OBSFileSystem_toString_rdh | /**
* Return a string that describes this filesystem instance.
*
* @return the string
*/
@Override
public String toString() {
final StringBuilder sb = new StringBuilder("OBSFileSystem{");
sb.append("uri=").append(uri);sb.append(", workingDir=").append(workingDir);
sb.append(", partSize=").append(partSize);
sb.append(", enableMultiObjectsDelete=").append(enableMultiObjectDelete);
sb.append(", maxKeys=").append(maxKeys);
if (cannedACL != null) {
sb.append(", cannedACL=").append(cannedACL.toString());
}
sb.append(", readAheadRange=").append(readAheadRange);
sb.append(", blockSize=").append(getDefaultBlockSize());if (blockFactory !=
null) {
sb.append(", blockFactory=").append(blockFactory);
}
sb.append(", boundedMultipartUploadThreadPool=").append(boundedMultipartUploadThreadPool);
sb.append(", statistics {").append(statistics).append("}");
sb.append(", metrics {").append("}");
sb.append('}');
return sb.toString();
} | 3.26 |
hadoop_OBSFileSystem_createNonRecursive_rdh | /**
* Open an FSDataOutputStream at the indicated Path with write-progress
* reporting. Same as create(), except fails if parent directory doesn't
* already exist.
*
* @param path
* the file path to create
* @param permission
* file permission
* @param flags
* {@link CreateFlag}s to use for this stream
* @param bufferSize
* the size of the buffer to be used
* @param replication
* required block replication for the file
* @param blkSize
* block size
* @param progress
* the progress reporter
* @throws IOException
* IO failure
*/
@Override
public FSDataOutputStream createNonRecursive(final Path path, final
FsPermission permission, final EnumSet<CreateFlag> flags, final int bufferSize, final short replication, final long blkSize, final Progressable progress) throws IOException {
Path parent = path.getParent();
if ((parent != null) && (!getFileStatus(parent).isDirectory())) {
// expect this to raise an exception if there is no parent
throw new FileAlreadyExistsException("Not a directory: " + parent);
}
return create(path, permission, flags.contains(CreateFlag.OVERWRITE), bufferSize, replication, blkSize, progress);
} | 3.26 |
hadoop_OBSFileSystem_isObsClientDFSListEnable_rdh | /**
* Return a flag that indicates if OBS client specific depth first search
* (DFS) list is enabled.
*
* @return the flag
*/
boolean isObsClientDFSListEnable() {return obsClientDFSListEnable;
} | 3.26 |
hadoop_OBSFileSystem_getBlockFactory_rdh | /**
* Return the block factory used by {@link OBSBlockOutputStream}.
*
* @return the block factory
*/BlockFactory getBlockFactory() {
return blockFactory;
} | 3.26 |
hadoop_OBSFileSystem_getListParallelFactor_rdh | /**
* Return list parallel factor.
*
* @return the list parallel factor
*/
int getListParallelFactor() {
return listParallelFactor;
} | 3.26 |
hadoop_OBSFileSystem_getWriteHelper_rdh | /**
* Return the write helper used by {@link OBSBlockOutputStream}.
*
* @return the write helper
*/OBSWriteOperationHelper getWriteHelper() {
return writeHelper;
} | 3.26 |
hadoop_OBSFileSystem_getCannedACL_rdh | /**
* Get the bucket acl of user setting.
*
* @return bucket acl {@link AccessControlList}
*/
AccessControlList getCannedACL() {
return cannedACL;
} | 3.26 |
hadoop_OBSFileSystem_initialize_rdh | /**
* Initialize a FileSystem. Called after a new FileSystem instance is
* constructed.
*
* @param name
* a URI whose authority section names the host, port,
* etc. for this FileSystem
* @param originalConf
* the configuration to use for the FS. The
* bucket-specific options are patched over the base ones
* before any use is made of the config.
*/
@Override
public void initialize(final URI name, final Configuration originalConf) throws IOException
{
uri = URI.create((name.getScheme() + "://") + name.getAuthority());
bucket = name.getAuthority();
// clone the configuration into one with propagated bucket options
Configuration conf = OBSCommonUtils.propagateBucketOptions(originalConf, bucket);
OBSCommonUtils.patchSecurityCredentialProviders(conf);super.initialize(name, conf);
setConf(conf);
try {
// Username is the current user at the time the FS was instantiated.
username = UserGroupInformation.getCurrentUser().getShortUserName();
workingDir = new Path("/user", username).makeQualified(this.uri, this.getWorkingDirectory());
Class<? extends OBSClientFactory> obsClientFactoryClass = conf.getClass(OBSConstants.OBS_CLIENT_FACTORY_IMPL, OBSConstants.DEFAULT_OBS_CLIENT_FACTORY_IMPL, OBSClientFactory.class);
obs = ReflectionUtils.newInstance(obsClientFactoryClass, conf).createObsClient(name);
sse = new SseWrapper(conf);
OBSCommonUtils.verifyBucketExists(this);
enablePosix = OBSCommonUtils.getBucketFsStatus(obs, bucket);
maxKeys =
OBSCommonUtils.intOption(conf, OBSConstants.MAX_PAGING_KEYS, OBSConstants.DEFAULT_MAX_PAGING_KEYS, 1);
obsListing = new OBSListing(this);
partSize = OBSCommonUtils.getMultipartSizeProperty(conf, OBSConstants.MULTIPART_SIZE, OBSConstants.DEFAULT_MULTIPART_SIZE);
// check but do not store the block size
blockSize = OBSCommonUtils.longBytesOption(conf, OBSConstants.FS_OBS_BLOCK_SIZE, OBSConstants.DEFAULT_FS_OBS_BLOCK_SIZE, 1);
enableMultiObjectDelete = conf.getBoolean(OBSConstants.ENABLE_MULTI_DELETE, true);
maxEntriesToDelete = conf.getInt(OBSConstants.MULTI_DELETE_MAX_NUMBER, OBSConstants.DEFAULT_MULTI_DELETE_MAX_NUMBER);
enableMultiObjectDeleteRecursion = conf.getBoolean(OBSConstants.MULTI_DELETE_RECURSION, true);
obsContentSummaryEnable = conf.getBoolean(OBSConstants.OBS_CONTENT_SUMMARY_ENABLE, true);
readAheadRange = OBSCommonUtils.longBytesOption(conf, OBSConstants.READAHEAD_RANGE, OBSConstants.DEFAULT_READAHEAD_RANGE, 0);
readTransformEnable = conf.getBoolean(OBSConstants.READ_TRANSFORM_ENABLE, true);
multiDeleteThreshold = conf.getInt(OBSConstants.MULTI_DELETE_THRESHOLD, OBSConstants.MULTI_DELETE_DEFAULT_THRESHOLD);
initThreadPools(conf);
writeHelper = new OBSWriteOperationHelper(this);
initCannedAcls(conf);
OBSCommonUtils.initMultipartUploads(this, conf);
String blockOutputBuffer = conf.getTrimmed(OBSConstants.FAST_UPLOAD_BUFFER, OBSConstants.FAST_UPLOAD_BUFFER_DISK);
partSize = OBSCommonUtils.ensureOutputParameterInRange(OBSConstants.MULTIPART_SIZE, partSize);
blockFactory = OBSDataBlocks.createFactory(this, blockOutputBuffer);
blockOutputActiveBlocks = OBSCommonUtils.intOption(conf, OBSConstants.FAST_UPLOAD_ACTIVE_BLOCKS, OBSConstants.DEFAULT_FAST_UPLOAD_ACTIVE_BLOCKS, 1);
f0.debug("Using OBSBlockOutputStream with buffer = {}; block={};" + " queue limit={}", blockOutputBuffer, partSize, blockOutputActiveBlocks);
enableTrash = conf.getBoolean(OBSConstants.TRASH_ENABLE, OBSConstants.DEFAULT_TRASH);
if (enableTrash) {
if (!isFsBucket()) {
String errorMsg = String.format("The bucket [%s] is not posix. not supported for " + "trash.", bucket);
f0.warn(errorMsg);
enableTrash = false;
trashDir = null;
} else {trashDir =
conf.get(OBSConstants.TRASH_DIR);
if (StringUtils.isEmpty(trashDir)) {
String errorMsg = String.format((("The trash feature(fs.obs.trash.enable) is " + "enabled, but the ") + "configuration(fs.obs.trash.dir [%s]) ") + "is empty.", trashDir);
f0.error(errorMsg);
throw
new ObsException(errorMsg);
}
trashDir = OBSCommonUtils.maybeAddBeginningSlash(trashDir);
trashDir = OBSCommonUtils.maybeAddTrailingSlash(trashDir);
}
}
} catch (ObsException e) {
throw OBSCommonUtils.translateException("initializing ", new Path(name), e);
}
} | 3.26 |
hadoop_OBSFileSystem_isEnableMultiObjectDeleteRecursion_rdh | /**
* Return a flag that indicates if multi-object delete recursion is enabled.
*
* @return the flag
*/
boolean isEnableMultiObjectDeleteRecursion() {
return enableMultiObjectDeleteRecursion;
} | 3.26 |
hadoop_OBSFileSystem_getScheme_rdh | /**
* Return the protocol scheme for the FileSystem.
*
* @return "obs"
*/
@Override
public String getScheme() {
return "obs";
} | 3.26 |
hadoop_OBSFileSystem_getMaxEntriesToDelete_rdh | /**
* Return maximum number of entries in one multi-object delete call.
*
* @return the maximum number of entries in one multi-object delete call
*/
int getMaxEntriesToDelete() {
return maxEntriesToDelete;
} | 3.26 |
hadoop_Sets_union_rdh | /**
* Returns the union of two sets as an unmodifiable set.
* The returned set contains all elements that are contained in either
* backing set.
*
* <p>Results are undefined if {@code set1} and {@code set2} are sets
* based on different equivalence relations (as {@link HashSet},
* {@link TreeSet}, and the {@link Map#keySet} of an
* {@code IdentityHashMap} all are).
*
* @param set1
* set1.
* @param set2
* set2.
* @param <E>
* Generics Type E.
* @return a new, empty thread-safe {@code Set}.
*/
public static <E> Set<E> union(final Set<E> set1, final Set<E> set2) {
if (set1 == null) {
throw new NullPointerException("set1");
}
if (set2 == null) {
throw new NullPointerException("set2");
}
Set<E> newSet = new HashSet<>(set1);
newSet.addAll(set2);
return Collections.unmodifiableSet(newSet);
} | 3.26 |
hadoop_Sets_difference_rdh | /**
* Returns the difference of two sets as an unmodifiable set.
* The returned set contains all elements that are contained by {@code set1}
* and not contained by {@code set2}.
*
* <p>Results are undefined if {@code set1} and {@code set2} are sets based
* on different equivalence relations (as {@code HashSet}, {@code TreeSet},
* and the keySet of an {@code IdentityHashMap} all are).
*
* This method is used to find difference for HashSets. For TreeSets with
* strict order requirement, recommended method is
* {@link #differenceInTreeSets(Set, Set)}.
*
* @param set1
* set1.
* @param set2
* set2.
* @param <E>
* Generics Type E.
* @return a new, empty thread-safe {@code Set}.
*/
public static <E> Set<E> difference(final Set<E> set1, final Set<E> set2) {
if (set1 == null) {
throw new NullPointerException("set1");
}
if (set2 == null) {
throw new NullPointerException("set2");
}
Set<E> newSet = new HashSet<>(set1);
newSet.removeAll(set2);
return Collections.unmodifiableSet(newSet);
} | 3.26 |
hadoop_Sets_symmetricDifference_rdh | /**
* Returns the symmetric difference of two sets as an unmodifiable set.
* The returned set contains all elements that are contained in either
* {@code set1} or {@code set2} but not in both. The iteration order of the
* returned set is undefined.
*
* <p>Results are undefined if {@code set1} and {@code set2} are sets based
* on different equivalence relations (as {@code HashSet}, {@code TreeSet},
* and the keySet of an {@code IdentityHashMap} all are).
*
* @param set1
* set1.
* @param set2
* set2.
* @param <E>
* Generics Type E.
* @return a new, empty thread-safe {@code Set}.
*/
public static <E> Set<E> symmetricDifference(final Set<E> set1, final Set<E> set2) {
if (set1 == null) {
throw new NullPointerException("set1");
}
if (set2 == null) {
throw new NullPointerException("set2");
}
Set<E> intersection = new HashSet<>(set1);
intersection.retainAll(set2);
Set<E> symmetricDifference = new HashSet<>(set1);
symmetricDifference.addAll(set2);
symmetricDifference.removeAll(intersection);
return Collections.unmodifiableSet(symmetricDifference);
} | 3.26 |
hadoop_Sets_intersection_rdh | /**
* Returns the intersection of two sets as an unmodifiable set.
* The returned set contains all elements that are contained by both backing
* sets.
*
* <p>Results are undefined if {@code set1} and {@code set2} are sets based
* on different equivalence relations (as {@code HashSet}, {@code TreeSet},
* and the keySet of an {@code IdentityHashMap} all are).
*
* @param set1
* set1.
* @param set2
* set2.
* @param <E>
* Generics Type E.
* @return a new, empty thread-safe {@code Set}.
*/
public static <E> Set<E> intersection(final Set<E> set1, final Set<E> set2) {
if (set1 == null) {
throw new NullPointerException("set1");
}
if (set2 == null) {
throw new NullPointerException("set2");
}
Set<E> newSet = new HashSet<>(set1);
newSet.retainAll(set2);
return Collections.unmodifiableSet(newSet);
} | 3.26 |
hadoop_Sets_newConcurrentHashSet_rdh | /**
* Creates a thread-safe set backed by a hash map. The set is backed by a
* {@link ConcurrentHashMap} instance, and thus carries the same concurrency
* guarantees.
*
* <p>Unlike {@code HashSet}, this class does NOT allow {@code null} to be
* used as an element. The set is serializable.
*
* @param <E>
* Generics Type.
* @return a new, empty thread-safe {@code Set}
*/ public static <E> Set<E> newConcurrentHashSet() {
return Collections.newSetFromMap(new ConcurrentHashMap<E, Boolean>());
} | 3.26 |
hadoop_Sets_newTreeSet_rdh | /**
* Creates a <i>mutable</i> {@code TreeSet} instance containing the given
* elements sorted by their natural ordering.
*
* <p><b>Note:</b> if mutability is not required, use
* ImmutableSortedSet#copyOf(Iterable) instead.
*
* <p><b>Note:</b> If {@code elements} is a {@code SortedSet} with an
* explicit comparator, this method has different behavior than
* {@link TreeSet#TreeSet(SortedSet)}, which returns a {@code TreeSet}
* with that comparator.
*
* <p><b>Note for Java 7 and later:</b> this method is now unnecessary and
* should be treated as deprecated. Instead, use the {@code TreeSet}
* constructor directly, taking advantage of the new
* <a href="http://goo.gl/iz2Wi">"diamond" syntax</a>.
*
* <p>This method is just a small convenience for creating an empty set and
* then calling Iterables#addAll. This method is not very useful and will
* likely be deprecated in the future.
*
* @param <E>
* Generics Type E.
* @param elements
* the elements that the set should contain
* @return a new {@code TreeSet} containing those elements (minus duplicates)
*/
public static <E extends Comparable> TreeSet<E> newTreeSet(Iterable<? extends E> elements) {
TreeSet<E> set = newTreeSet();addAll(set, elements);
return set;
} | 3.26 |
hadoop_Sets_newHashSet_rdh | /**
* Creates a <i>mutable</i> {@code HashSet} instance containing the given
* elements. A very thin convenience for creating an empty set and then
* calling Iterators#addAll.
*
* <p><b>Note:</b> if mutability is not required and the elements are
* non-null, use ImmutableSet#copyOf(Iterator) instead.</p>
*
* <p><b>Note:</b> if {@code E} is an {@link Enum} type, you should create
* an {@link EnumSet} instead.</p>
*
* <p>Overall, this method is not very useful and will likely be deprecated
* in the future.</p>
*
* @param <E>
* Generics Type E.
* @param elements
* elements.
* @return a new, empty thread-safe {@code Set}.
*/
public static <E> HashSet<E> newHashSet(Iterator<? extends E> elements) {
HashSet<E> set = newHashSet();
addAll(set, elements);
return set;
}
/**
* Returns a new hash set using the smallest initial table size that can hold
* {@code expectedSize} elements without resizing. Note that this is not what
* {@link HashSet#HashSet(int)} does, but it is what most users want and
* expect it to do.
*
* <p>This behavior can't be broadly guaranteed, but has been tested with
* OpenJDK 1.7 and 1.8.</p>
*
* @param expectedSize
* the number of elements you expect to add to the
* returned set
* @param <E>
* Generics Type E.
* @return a new, empty hash set with enough capacity to hold
{@code expectedSize} | 3.26 |
hadoop_Sets_differenceInTreeSets_rdh | /**
* Returns the difference of two sets as an unmodifiable set.
* The returned set contains all elements that are contained by {@code set1}
* and not contained by {@code set2}.
*
* <p>Results are undefined if {@code set1} and {@code set2} are sets based
* on different equivalence relations (as {@code HashSet}, {@code TreeSet},
* and the keySet of an {@code IdentityHashMap} all are).
*
* This method is used to find difference for TreeSets. For HashSets,
* recommended method is {@link #difference(Set, Set)}.
*
* @param <E>
* Generics Type E.
* @param set1
* set1.
* @param set2
* set2.
* @return a new, empty thread-safe {@code Set}.
*/
public static <E> Set<E> differenceInTreeSets(final Set<E> set1, final Set<E> set2) {
if (set1 == null) {
throw new NullPointerException("set1");
}
if (set2 == null) {
throw new NullPointerException("set2");
}
Set<E> newSet = new TreeSet<>(set1);
newSet.removeAll(set2);
return Collections.unmodifiableSet(newSet);
} | 3.26 |
hadoop_Sets_capacity_rdh | /**
* Returns a capacity that is sufficient to keep the map from being resized
* as long as it grows no larger than expectedSize and the load factor
* is ≥ its default (0.75).
* The implementation of this method is adapted from Guava version 27.0-jre.
*/
private static int capacity(int expectedSize) {
if (expectedSize < 3) {
if (expectedSize < 0) {
throw new IllegalArgumentException("expectedSize cannot be negative but was: " + expectedSize);
}
return expectedSize + 1;
}
if (expectedSize < MAX_POWER_OF_TWO) {
// This is the calculation used in JDK8 to resize when a putAll
// happens; it seems to be the most conservative calculation we
// can make. 0.75 is the default load factor.
return ((int) ((((float) (expectedSize)) /
0.75F) + 1.0F));
}
return Integer.MAX_VALUE;// any large value
} | 3.26 |
hadoop_RetryReason_getAbbreviation_rdh | /**
* Method to get correct abbreviation for a given set of exception, statusCode,
* storageStatusCode.
*
* @param ex
* exception caught during server communication.
* @param statusCode
* statusCode in the server response.
* @param storageErrorMessage
* storageErrorMessage in the server response.
* @return abbreviation for the the given set of exception, statusCode, storageStatusCode.
*/
static String getAbbreviation(Exception ex, Integer statusCode, String storageErrorMessage) {
String result = null;
for
(RetryReasonCategory v1 : rankedReasonCategories) {
final String abbreviation = v1.captureAndGetAbbreviation(ex, statusCode, storageErrorMessage);
if (abbreviation != null) {
result = abbreviation;
}
}
return result;
} | 3.26 |
hadoop_LoadManifestsStage_coalesceDirectories_rdh | /**
* Coalesce all directories and clear the entry in the manifest.
* There's only ever one writer at a time, which it is hoped reduces
* contention. before the lock is acquired: if there are no new directories,
* the write lock is never needed.
*
* @param manifest
* manifest to process
* @return the number of directories created;
*/
@VisibleForTesting
int coalesceDirectories(final TaskManifest manifest) {
// build a list of dirs to create.
// this scans the map
final List<DirEntry> toCreate = manifest.getDestDirectories().stream().filter(e
-> !f0.containsKey(e)).collect(Collectors.toList());
if (!toCreate.isEmpty()) {
// need to add more directories;
// still a possibility that they may be created between the
// filtering and this thread having the write lock.
synchronized(f0) {
toCreate.forEach(entry -> {
f0.putIfAbsent(entry.getDir(), entry);
});
}
}
return toCreate.size();
} | 3.26 |
hadoop_LoadManifestsStage_toString_rdh | /**
* To String includes all summary info except statistics.
*
* @return string value
*/
@Override
public String toString() {
final StringBuilder sb = new StringBuilder("SummaryInfo{");
sb.append("manifestCount=").append(getManifestCount());
sb.append(", fileCount=").append(getFileCount());
sb.append(", directoryCount=").append(getDirectoryCount());
sb.append(", totalFileSize=").append(byteCountToDisplaySize(m0()));
sb.append('}');
return sb.toString();
} | 3.26 |
hadoop_LoadManifestsStage_getIOStatistics_rdh | /**
* Get the IOStatistics.
*
* @return aggregate IOStatistics
*/
@Override public IOStatisticsSnapshot getIOStatistics() {
return iostatistics;
} | 3.26 |
hadoop_LoadManifestsStage_add_rdh | /**
* Add all statistics; synchronized.
*
* @param manifest
* manifest to add.
*/
public synchronized void add(TaskManifest manifest)
{
manifestCount.incrementAndGet();
iostatistics.aggregate(manifest.getIOStatistics());
fileCount.addAndGet(manifest.getFilesToCommit().size());
directoryCount.addAndGet(manifest.getDestDirectories().size());
totalFileSize.addAndGet(manifest.getTotalFileSize());taskIDs.add(manifest.getTaskID());
taskAttemptIDs.add(manifest.getTaskAttemptID());
} | 3.26 |
hadoop_LoadManifestsStage_executeStage_rdh | /**
* Load the manifests.
*
* @param arguments
* stage arguments
* @return the summary and a list of manifests.
* @throws IOException
* IO failure.
*/
@Override
protected LoadManifestsStage.Result executeStage(final LoadManifestsStage.Arguments arguments) throws IOException {
EntryFileIO entryFileIO = new EntryFileIO(getStageConfig().getConf());
final Path manifestDir = getTaskManifestDir();
LOG.info("{}: Executing Manifest Job Commit with manifests in {}", getName(), manifestDir);
final Path entrySequenceData = arguments.getEntrySequenceData();
// the entry writer for queuing data.
entryWriter = entryFileIO.launchEntryWriter(entryFileIO.createWriter(entrySequenceData), arguments.queueCapacity);
try {
// sync fs before the list
msync(manifestDir);
// build a list of all task manifests successfully committed,
// which will break out if the writing is stopped (due to any failure)
final RemoteIterator<FileStatus> manifestFiles = haltableRemoteIterator(listManifests(), () -> entryWriter.isActive());
processAllManifests(manifestFiles);
maybeAddIOStatistics(getIOStatistics(), manifestFiles);
LOG.info("{}: Summary of {} manifests loaded in {}: {}", getName(), summaryInfo.manifestCount, manifestDir, summaryInfo);
// close cleanly
entryWriter.close();
// if anything failed, raise it.
entryWriter.maybeRaiseWriteException();// collect any stats
} catch (EntryWriteException e) {
// something went wrong while writing.
// raise anything on the write thread,
entryWriter.maybeRaiseWriteException();
// falling back to that from the worker thread
throw e; } finally
{
// close which is a no-op if the clean close was invoked;
// it is not a no-op if something went wrong with reading/parsing/processing
// the manifests.
entryWriter.close();
}
final LoadedManifestData loadedManifestData = // new array to free up the map
new LoadedManifestData(new ArrayList<>(f0.values()), entrySequenceData, entryWriter.getCount());
return new LoadManifestsStage.Result(summaryInfo, loadedManifestData);
} | 3.26 |
hadoop_LoadManifestsStage_processAllManifests_rdh | /**
* Load and process all the manifests.
*
* @param manifestFiles
* list of manifest files.
* @throws IOException
* failure to load/parse/queue
*/
private void processAllManifests(final RemoteIterator<FileStatus> manifestFiles) throws IOException {
trackDurationOfInvocation(getIOStatistics(), OP_LOAD_ALL_MANIFESTS, () -> TaskPool.foreach(manifestFiles).executeWith(getIOProcessors()).stopOnFailure().run(this::processOneManifest));
} | 3.26 |
hadoop_LoadManifestsStage_processOneManifest_rdh | /**
* Method invoked to process one manifest.
*
* @param status
* file to process.
* @throws IOException
* failure to load/parse/queue
*/
private void processOneManifest(FileStatus status) throws IOException {
updateAuditContext(OP_LOAD_ALL_MANIFESTS);TaskManifest manifest = fetchTaskManifest(status);
progress();
// update the directories
final int created = coalesceDirectories(manifest);
final String attemptID = manifest.getTaskAttemptID();
LOG.debug("{}: task attempt {} added {} directories", getName(), attemptID, created);
// add to the summary.
summaryInfo.add(manifest);
// clear the manifest extra data so if
// blocked waiting for queue capacity,
// memory use is reduced.
manifest.setIOStatistics(null);
manifest.getExtraData().clear();
// queue those files.
final boolean enqueued = entryWriter.enqueue(manifest.getFilesToCommit());if (!enqueued) {
LOG.warn("{}: Failed to write manifest for task {}", getName(), attemptID);
throw new EntryWriteException(attemptID);
}
} | 3.26 |
hadoop_RouterObserverReadProxyProvider_isRead_rdh | /**
* Check if a method is read-only.
*
* @return whether the 'method' is a read-only operation.
*/
private static boolean isRead(Method method) {
if (!method.isAnnotationPresent(ReadOnly.class)) {
return false;
}
return !method.getAnnotationsByType(ReadOnly.class)[0].activeOnly();
} | 3.26 |
hadoop_RouterObserverReadProxyProvider_getProxyAsClientProtocol_rdh | /**
* Return the input proxy, cast as a {@link ClientProtocol}. This catches any
* {@link ClassCastException} and wraps it in a more helpful message. This
* should ONLY be called if the caller is certain that the proxy is, in fact,
* a {@link ClientProtocol}.
*/
private ClientProtocol getProxyAsClientProtocol(T proxy) {
assert proxy instanceof ClientProtocol : ("BUG: Attempted to use proxy of class " +
proxy.getClass()) + " as if it was a ClientProtocol.";
return ((ClientProtocol) (proxy));} | 3.26 |
hadoop_RouterObserverReadProxyProvider_m0_rdh | /**
* This will call {@link ClientProtocol#msync()} on the active NameNode
* (via the {@link #innerProxy}) to update the state of this client, only
* if at least {@link #autoMsyncPeriodMs} ms has elapsed since the last time
* an msync was performed.
*
* @see #autoMsyncPeriodMs
*/
private void m0() throws IOException {
if (autoMsyncPeriodMs == 0) {
// Always msync
getProxyAsClientProtocol(innerProxy.getProxy().proxy).msync();
} else if (autoMsyncPeriodMs > 0) {
if ((Time.monotonicNow() - lastMsyncTimeMs) > autoMsyncPeriodMs) {
synchronized(this) {
// Use a synchronized block so that only one thread will msync
// if many operations are submitted around the same time.
// Re-check the entry criterion since the status may have changed
// while waiting for the lock.
if ((Time.monotonicNow() - lastMsyncTimeMs) > autoMsyncPeriodMs) {
getProxyAsClientProtocol(innerProxy.getProxy().proxy).msync();
lastMsyncTimeMs = Time.monotonicNow();
}
}
}
}
} | 3.26 |
hadoop_NodeAllocation_transformToTree_rdh | // In node allocation, transform each activity to a tree-like structure
// for frontend activity display.
// eg: root
// / \
// a b
// / \
// app1 app2
// / \
// CA1 CA2
// CA means Container Attempt
public void transformToTree()
{
List<ActivityNode> allocationTree = new ArrayList<>();
if (f0 == null) {
Set<String> names = Collections.newSetFromMap(new ConcurrentHashMap<>());
ListIterator<AllocationActivity> ite = allocationOperations.listIterator(allocationOperations.size());
while (ite.hasPrevious()) {
String name = ite.previous().getName();
if (name
!= null) {
if (!names.contains(name)) {
names.add(name);
} else {
ite.remove();
}
}
}
for (AllocationActivity allocationOperation : allocationOperations) {
ActivityNode node = allocationOperation.createTreeNode();
String name = node.getName();
for
(int i = allocationTree.size() - 1; i > (-1); i--) {
if (allocationTree.get(i).getParentName().equals(name)) {
node.addChild(allocationTree.get(i));
allocationTree.remove(i);
} else
{
break;
}}allocationTree.add(node);
}
f0 = allocationTree.get(0);
}
} | 3.26 |
hadoop_DirectoryDiffListFactory_randomLevel_rdh | /**
* Returns the level of a skip list node.
*
* @return A value in the range 0 to maxLevels.
*/
public static int randomLevel() {
final Random r = ThreadLocalRandom.current();
for (int
level = 0; level < maxLevels; level++) {
// skip to the next level with probability 1/skipInterval
if (r.nextInt(skipInterval) > 0) {return level;
}
}
return maxLevels;
} | 3.26 |
hadoop_ResourceSkyline_setJobFinishTime_rdh | /**
* Set jobFinishTime.
*
* @param jobFinishTimeConfig
* jobFinishTime.
*/
public final void setJobFinishTime(final long jobFinishTimeConfig) {
this.jobFinishTime = jobFinishTimeConfig; }
/**
* Get the resource spec of the job's allocated {@code container}s.
* <p> Key assumption: during job's lifespan, its allocated {@code container}s
* have the same {@link Resource} spec.
*
* @return the {@link Resource} spec of the job's allocated
{@code container} | 3.26 |
hadoop_ResourceSkyline_setJobId_rdh | /**
* Set jobId.
*
* @param jobIdConfig
* jobId.
*/public final void setJobId(final String jobIdConfig) {
this.f0 = jobIdConfig;} | 3.26 |
hadoop_ResourceSkyline_setContainerSpec_rdh | /**
* Set containerSpec.
*
* @param containerSpecConfig
* containerSpec.
*/
public final void setContainerSpec(final Resource containerSpecConfig) {
this.containerSpec = containerSpecConfig;
}
/**
* Get the list of {@link Resource}s allocated to the job.
*
* @return the {@link RLESparseResourceAllocation} which contains the list of
{@link Resource} | 3.26 |
hadoop_ResourceSkyline_getJobInputDataSize_rdh | /**
* Get the job's input data size.
*
* @return job's input data size.
*/
public final double getJobInputDataSize() {
return jobInputDataSize;
} | 3.26 |
hadoop_ResourceSkyline_getJobFinishTime_rdh | /**
* Get the job's finish time.
*
* @return job's finish time.
*/
public final long getJobFinishTime() {
return jobFinishTime;
} | 3.26 |
hadoop_ResourceSkyline_setSkylineList_rdh | /**
* Set skylineList.
*
* @param skylineListConfig
* skylineList.
*/public final void setSkylineList(final RLESparseResourceAllocation skylineListConfig) {this.skylineList = skylineListConfig;
} | 3.26 |
hadoop_ResourceSkyline_setJobSubmissionTime_rdh | /**
* Set jobSubmissionTime.
*
* @param jobSubmissionTimeConfig
* jobSubmissionTime.
*/
public final void setJobSubmissionTime(final long jobSubmissionTimeConfig) {
this.jobSubmissionTime = jobSubmissionTimeConfig;
} | 3.26 |
hadoop_ResourceSkyline_getJobId_rdh | /**
* Get the id of the job.
*
* @return the id of this job.
*/
public final String getJobId() {
return f0;} | 3.26 |
hadoop_ResourceSkyline_setJobInputDataSize_rdh | /**
* Set jobInputDataSize.
*
* @param jobInputDataSizeConfig
* jobInputDataSize.
*/
public final void setJobInputDataSize(final double jobInputDataSizeConfig) {
this.jobInputDataSize = jobInputDataSizeConfig;
} | 3.26 |
hadoop_TimelineEntities_addEntity_rdh | /**
* Add a single entity into the existing entity list
*
* @param entity
* a single entity
*/
public void addEntity(TimelineEntity entity) {
entities.add(entity);
} | 3.26 |
hadoop_TimelineEntities_addEntities_rdh | /**
* All a list of entities into the existing entity list
*
* @param entities
* a list of entities
*/
public void addEntities(List<TimelineEntity> entities) {
this.entities.addAll(entities);
} | 3.26 |
hadoop_TimelineEntities_setEntities_rdh | /**
* Set the entity list to the given list of entities
*
* @param entities
* a list of entities
*/
public void setEntities(List<TimelineEntity> entities) {
this.entities = entities;
} | 3.26 |
hadoop_TimelineEntities_getEntities_rdh | /**
* Get a list of entities
*
* @return a list of entities
*/
@XmlElement(name = "entities")
public List<TimelineEntity> getEntities() {
return entities;
} | 3.26 |
hadoop_MetricStringBuilder_tuple_rdh | /**
* Add any key,val pair to the string, between the prefix and suffix,
* separated by the separator.
*
* @param key
* key
* @param value
* value
* @return this instance
*/
public MetricStringBuilder
tuple(String key, String value) {
builder.append(prefix).append(key).append(separator).append(value).append(suffix);
return this;
} | 3.26 |
hadoop_SkylineStoreValidator_validate_rdh | /**
* Check if pipelineId is <em>null</em> or resourceOverTime is <em>null</em>.
*
* @param pipelineId
* the id of the recurring pipeline.
* @param resourceOverTime
* predicted {@code Resource} allocation to be added.
* @throws SkylineStoreException
* if input parameters are invalid.
*/
public final void validate(final String pipelineId, final RLESparseResourceAllocation resourceOverTime) throws SkylineStoreException {
validate(pipelineId);
if (resourceOverTime == null) {
StringBuilder sb = new StringBuilder(); sb.append(("Resource allocation for " + pipelineId) + " is null.");
LOGGER.error(sb.toString());
throw new NullRLESparseResourceAllocationException(sb.toString());}
} | 3.26 |
hadoop_S3ADtFetcher_getServiceName_rdh | /**
* Returns the service name for HDFS, which is also a valid URL prefix.
*/
public Text getServiceName() {
return new Text(SERVICE_NAME);
} | 3.26 |
hadoop_SlowPeerReports_equals_rdh | /**
* Return true if the two objects represent the same set slow peer
* entries. Primarily for unit testing convenience.
*/ @Override
public boolean equals(Object o) {
if (this == o) {return true;
}
if (!(o instanceof SlowPeerReports)) {
return false;
}
SlowPeerReports that = ((SlowPeerReports) (o));
return slowPeers.equals(that.slowPeers);
} | 3.26 |
hadoop_ExcessRedundancyMap_remove_rdh | /**
* Remove the redundancy corresponding to the given datanode and the given
* block.
*
* @return true if the block is removed.
*/
synchronized boolean remove(DatanodeDescriptor dn, BlockInfo blk) {
final LightWeightHashSet<BlockInfo> set = map.get(dn.getDatanodeUuid());if (set == null) {
return false;
}
final boolean removed = set.remove(blk);
if (removed) {
size.decrementAndGet();
blockLog.debug("BLOCK* ExcessRedundancyMap.remove({}, {})", dn, blk);
if (set.isEmpty()) {
map.remove(dn.getDatanodeUuid());
}
}return removed;
} | 3.26 |
hadoop_ExcessRedundancyMap_size_rdh | /**
*
* @return the number of redundancies in this map.
*/
long size() {return size.get();
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.