name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hadoop_OBSCommonUtils_objectRepresentsDirectory_rdh | /**
* Predicate: does the object represent a directory?.
*
* @param name
* object name
* @param size
* object size
* @return true if it meets the criteria for being an object
*/
public static boolean objectRepresentsDirectory(final String name, final long size) {
return ((!name.isEmpty()) && (name.charAt(name.length() - 1) == '/')) && (size == 0L);
} | 3.26 |
hadoop_OBSCommonUtils_closeAll_rdh | /**
* Close the Closeable objects and <b>ignore</b> any Exception or null
* pointers. (This is the SLF4J equivalent of that in {@code IOUtils}).
*
* @param closeables
* the objects to close
*/
static void closeAll(final Closeable... closeables) {
for (Closeable c : closeables) {
if (c != null) {
try {
if (LOG != null) {
LOG.debug("Closing {}", c);
}
c.close();
} catch (Exception e) {
if ((LOG != null) && LOG.isDebugEnabled()) {
LOG.debug("Exception in closing {}", c, e);}
}
}
}
} | 3.26 |
hadoop_OBSCommonUtils_innerMkdirs_rdh | /**
* Make the given path and all non-existent parents into directories.
*
* @param owner
* the owner OBSFileSystem instance
* @param path
* path to create
* @return true if a directory was created
* @throws FileAlreadyExistsException
* there is a file at the path specified
* @throws IOException
* other IO problems
* @throws ObsException
* on failures inside the OBS SDK
*/
static boolean innerMkdirs(final OBSFileSystem owner, final Path path) throws IOException, FileAlreadyExistsException, ObsException {
LOG.debug("Making directory: {}", path);
FileStatus fileStatus;
try {
fileStatus = owner.getFileStatus(path);
if (fileStatus.isDirectory()) {
return true;
} else {
throw new FileAlreadyExistsException("Path is a file: " + path);
}
} catch (FileNotFoundException e) {
Path fPart = path.getParent();
do {
try {
fileStatus = owner.getFileStatus(fPart);
if (fileStatus.isDirectory()) {
break;
}
if (fileStatus.isFile()) {
throw new
FileAlreadyExistsException(String.format("Can't make directory for path '%s'" + " since it is a file.", fPart));}
} catch (FileNotFoundException fnfe) {
LOG.debug("file {} not fount, but ignore.", path);
}
fPart = fPart.getParent();
} while (fPart != null );
String key = pathToKey(owner, path);
if (owner.isFsBucket()) {
OBSPosixBucketUtils.fsCreateFolder(owner, key);
} else {
OBSObjectBucketUtils.createFakeDirectory(owner, key);
}
return true;
}
} | 3.26 |
hadoop_OBSCommonUtils_maybeDeleteBeginningSlash_rdh | /**
* Delete obs key started '/'.
*
* @param key
* object key
* @return new key
*/
static String maybeDeleteBeginningSlash(final String key) {
return (!StringUtils.isEmpty(key)) && key.startsWith("/") ? key.substring(1) : key;
} | 3.26 |
hadoop_OBSCommonUtils_appendFile_rdh | /**
* Append File.
*
* @param owner
* the owner OBSFileSystem instance
* @param appendFileRequest
* append object request
* @throws IOException
* on any failure to append file
*/
static void appendFile(final OBSFileSystem owner, final WriteFileRequest appendFileRequest) throws IOException {
long len = 0;
if (appendFileRequest.getFile() != null) {
len = appendFileRequest.getFile().length();
}
try {
LOG.debug("Append file, key {} position {} size {}", appendFileRequest.getObjectKey(), appendFileRequest.getPosition(), len);
owner.getObsClient().writeFile(appendFileRequest);
owner.getSchemeStatistics().incrementWriteOps(1);
owner.getSchemeStatistics().incrementBytesWritten(len); } catch (ObsException e) {
throw translateException("AppendFile",
appendFileRequest.getObjectKey(), e);
}
} | 3.26 |
hadoop_OBSCommonUtils_m1_rdh | // Used to check if a folder is empty or not.
static boolean m1(final OBSFileSystem owner, final String key) throws FileNotFoundException, ObsException {
for (int retryTime = 1; retryTime <
MAX_RETRY_TIME; retryTime++) {
try {
return innerIsFolderEmpty(owner, key);
} catch (ObsException e) {
LOG.warn("Failed to check empty folder for [{}], retry time [{}], " + "exception [{}]", key, retryTime, e);
try {
Thread.sleep(DELAY_TIME);
} catch (InterruptedException ie)
{
throw e;}
}
}
return innerIsFolderEmpty(owner, key);
} | 3.26 |
hadoop_OBSCommonUtils_dateToLong_rdh | /**
* Date to long conversion. Handles null Dates that can be returned by OBS by
* returning 0
*
* @param date
* date from OBS query
* @return timestamp of the object
*/
public static long dateToLong(final Date date) {
if (date == null) {
return 0L;
}
return (date.getTime() / OBSConstants.SEC2MILLISEC_FACTOR) *
OBSConstants.SEC2MILLISEC_FACTOR;
} | 3.26 |
hadoop_OBSCommonUtils_deleteObject_rdh | /**
* Delete an object. Increments the {@code OBJECT_DELETE_REQUESTS} and write
* operation statistics.
*
* @param owner
* the owner OBSFileSystem instance
* @param key
* key to blob to delete.
* @throws IOException
* on any failure to delete object
*/
static void deleteObject(final OBSFileSystem owner, final String
key) throws IOException {
blockRootDelete(owner.getBucket(), key);
ObsException lastException = null;
for (int retryTime = 1; retryTime <= MAX_RETRY_TIME; retryTime++) {
try {
owner.getObsClient().deleteObject(owner.getBucket(), key);
owner.getSchemeStatistics().incrementWriteOps(1);
return;
} catch (ObsException e) {
lastException = e;
LOG.warn(("Delete path failed with [{}], " + "retry time [{}] - request id [{}] - ") + "error code [{}] - error message [{}]", e.getResponseCode(), retryTime, e.getErrorRequestId(), e.getErrorCode(), e.getErrorMessage());
if (retryTime < MAX_RETRY_TIME) {
try {
Thread.sleep(DELAY_TIME);
} catch (InterruptedException ie) {
throw translateException("delete", key, e);
}}
}
}
throw translateException(String.format("retry max times [%s] delete failed", MAX_RETRY_TIME), key, lastException);
} | 3.26 |
hadoop_OBSCommonUtils_continueListObjects_rdh | /**
* List the next set of objects.
*
* @param owner
* the owner OBSFileSystem instance
* @param objects
* paged result
* @return the next result object
* @throws IOException
* on any failure to list the next set of objects
*/
static ObjectListing continueListObjects(final OBSFileSystem owner, final ObjectListing objects) throws IOException {
if (((objects.getDelimiter() == null) && owner.isFsBucket()) && owner.isObsClientDFSListEnable()) {
return OBSFsDFSListing.fsDFSContinueListObjects(owner, ((OBSFsDFSListing) (objects)));
}
return commonContinueListObjects(owner, objects);
} | 3.26 |
hadoop_OBSCommonUtils_keyToQualifiedPath_rdh | /**
* Convert a key to a fully qualified path.
*
* @param owner
* the owner OBSFileSystem instance
* @param key
* input key
* @return the fully qualified path including URI scheme and bucket name.
*/
static Path keyToQualifiedPath(final OBSFileSystem owner, final String key) {
return qualify(owner, keyToPath(key));
} | 3.26 |
hadoop_OBSCommonUtils_createListObjectsRequest_rdh | /**
* Create a {@code ListObjectsRequest} request against this bucket.
*
* @param owner
* the owner OBSFileSystem instance
* @param key
* key for request
* @param delimiter
* any delimiter
* @return the request
*/
static ListObjectsRequest createListObjectsRequest(final OBSFileSystem owner, final String key, final String delimiter) {
return createListObjectsRequest(owner, key, delimiter, -1);
} | 3.26 |
hadoop_OBSCommonUtils_rejectRootDirectoryDelete_rdh | /**
* Implements the specific logic to reject root directory deletion. The caller
* must return the result of this call, rather than attempt to continue with
* the delete operation: deleting root directories is never allowed. This
* method simply implements the policy of when to return an exit code versus
* raise an exception.
*
* @param bucket
* bucket name
* @param isEmptyDir
* flag indicating if the directory is empty
* @param recursive
* recursive flag from command
* @return a return code for the operation
* @throws PathIOException
* if the operation was explicitly rejected.
*/
static boolean rejectRootDirectoryDelete(final String bucket, final boolean isEmptyDir,
final boolean recursive)
throws IOException {
LOG.info("obs delete the {} root directory of {}", bucket, recursive);if (isEmptyDir) {
return true;
}
if (recursive) {
return false;
} else {
// reject
throw new PathIOException(bucket, "Cannot delete root path");
}} | 3.26 |
hadoop_OBSCommonUtils_stringify_rdh | /**
* String information about a summary entry for debug messages.
*
* @param summary
* summary object
* @return string value
*/
static String stringify(final ObsObject summary) {
return (summary.getObjectKey() + " size=") + summary.getMetadata().getContentLength();
} | 3.26 |
hadoop_OBSCommonUtils_longBytesOption_rdh | /**
* Get a long option not smaller than the minimum allowed value, supporting
* memory prefixes K,M,G,T,P.
*
* @param conf
* configuration
* @param key
* key to look up
* @param defVal
* default value
* @param min
* minimum value
* @return the value
* @throws IllegalArgumentException
* if the value is below the minimum
*/
static long longBytesOption(final Configuration conf, final String key, final long defVal, final long min) {
long v = conf.getLongBytes(key, defVal);
Preconditions.checkArgument(v >= min, String.format("Value of %s: %d is below the minimum value %d", key, v, min));
LOG.debug("Value of {} is {}", key, v);
return v;
} | 3.26 |
hadoop_OBSCommonUtils_keyToPath_rdh | /**
* Convert a path back to a key.
*
* @param key
* input key
* @return the path from this key
*/
static Path keyToPath(final String key) {
return new Path("/" + key);
} | 3.26 |
hadoop_OBSCommonUtils_pathToKey_rdh | /**
* Turns a path (relative or otherwise) into an OBS key.
*
* @param owner
* the owner OBSFileSystem instance
* @param path
* input path, may be relative to the working dir
* @return a key excluding the leading "/", or, if it is the root path, ""
*/
static String pathToKey(final OBSFileSystem owner, final Path path) {
Path absolutePath = path;
if (!path.isAbsolute()) {
absolutePath = new Path(owner.getWorkingDirectory(), path);
}
if ((absolutePath.toUri().getScheme() != null) && absolutePath.toUri().getPath().isEmpty()) {
return "";
}
return absolutePath.toUri().getPath().substring(1);
} | 3.26 |
hadoop_OBSCommonUtils_uploadPart_rdh | /**
* Upload part of a multi-partition file. Increments the write and put
* counters. <i>Important: this call does not close any input stream in the
* request.</i>
*
* @param owner
* the owner OBSFileSystem instance
* @param request
* request
* @return the result of the operation.
* @throws ObsException
* on problems
*/
static UploadPartResult uploadPart(final OBSFileSystem owner, final UploadPartRequest request) throws ObsException {
long len = request.getPartSize();
UploadPartResult uploadPartResult = owner.getObsClient().uploadPart(request);
owner.getSchemeStatistics().incrementWriteOps(1);
owner.getSchemeStatistics().incrementBytesWritten(len);
return uploadPartResult;
} | 3.26 |
hadoop_OBSCommonUtils_createFileStatus_rdh | /**
* Create a files status instance from a listing.
*
* @param keyPath
* path to entry
* @param summary
* summary from OBS
* @param blockSize
* block size to declare.
* @param owner
* owner of the file
* @return a status entry
*/
static OBSFileStatus createFileStatus(final Path keyPath,
final ObsObject summary, final long blockSize, final String owner) {
if (objectRepresentsDirectory(summary.getObjectKey(), summary.getMetadata().getContentLength())) {
return new OBSFileStatus(keyPath, owner);
} else {
return new OBSFileStatus(summary.getMetadata().getContentLength(), dateToLong(summary.getMetadata().getLastModified()), keyPath, blockSize, owner);
}
} | 3.26 |
hadoop_OBSCommonUtils_initMultipartUploads_rdh | /**
* initialize multi-part upload, purge larger than the value of
* PURGE_EXISTING_MULTIPART_AGE.
*
* @param owner
* the owner OBSFileSystem instance
* @param conf
* the configuration to use for the FS
* @throws IOException
* on any failure to initialize multipart upload
*/
static void initMultipartUploads(final OBSFileSystem owner, final Configuration conf) throws IOException {
boolean purgeExistingMultipart = conf.getBoolean(OBSConstants.PURGE_EXISTING_MULTIPART,
OBSConstants.DEFAULT_PURGE_EXISTING_MULTIPART);
long purgeExistingMultipartAge = longOption(conf, OBSConstants.PURGE_EXISTING_MULTIPART_AGE, OBSConstants.DEFAULT_PURGE_EXISTING_MULTIPART_AGE, 0);
if (!purgeExistingMultipart) {
return;
}
final
Date purgeBefore = new Date(new Date().getTime() - (purgeExistingMultipartAge * 1000));
try {
ListMultipartUploadsRequest request = new ListMultipartUploadsRequest(owner.getBucket());while (true) {
// List + purge
MultipartUploadListing uploadListing = owner.getObsClient().listMultipartUploads(request);
for (MultipartUpload upload : uploadListing.getMultipartTaskList()) {
if (upload.getInitiatedDate().compareTo(purgeBefore) < 0) {
owner.getObsClient().abortMultipartUpload(new AbortMultipartUploadRequest(owner.getBucket(), upload.getObjectKey(), upload.getUploadId()));
}
}
if (!uploadListing.isTruncated()) {
break;
}
request.setUploadIdMarker(uploadListing.getNextUploadIdMarker());
request.setKeyMarker(uploadListing.getNextKeyMarker());
}
} catch (ObsException e) { if (e.getResponseCode() == FORBIDDEN_CODE) {
LOG.debug("Failed to purging multipart uploads against {}," + " FS may be read only", owner.getBucket(), e);
} else {
throw translateException("purging multipart uploads", owner.getBucket(), e);
}
}
} | 3.26 |
hadoop_OBSCommonUtils_m0_rdh | /**
* Initiate a {@code listObjects} operation, incrementing metrics in the
* process.
*
* @param owner
* the owner OBSFileSystem instance
* @param request
* request to initiate
* @return the results
* @throws IOException
* on any failure to list objects
*/
static ObjectListing m0(final OBSFileSystem owner, final ListObjectsRequest request) throws IOException {if ((((request.getDelimiter() == null) && (request.getMarker() == null)) && owner.isFsBucket()) && owner.isObsClientDFSListEnable()) {
return OBSFsDFSListing.fsDFSListObjects(owner, request);
}
return commonListObjects(owner, request);
} | 3.26 |
hadoop_OBSCommonUtils_intOption_rdh | /**
* Get a integer option not smaller than the minimum allowed value.
*
* @param conf
* configuration
* @param key
* key to look up
* @param defVal
* default value
* @param min
* minimum value
* @return the value
* @throws IllegalArgumentException
* if the value is below the minimum
*/
static int intOption(final Configuration conf, final String key, final int defVal, final int min) {
int v = conf.getInt(key, defVal);
Preconditions.checkArgument(v >= min, String.format("Value of %s: %d is below the minimum value %d", key, v, min));
LOG.debug("Value of {} is {}", key, v);
return v;
} | 3.26 |
hadoop_OBSCommonUtils_getPassword_rdh | /**
* Get a password from a configuration, or, if a value is passed in, pick that
* up instead.
*
* @param conf
* configuration
* @param key
* key to look up
* @param val
* current value: if non empty this is used instead of querying
* the configuration.
* @return a password or "".
* @throws IOException
* on any problem
*/
private static String getPassword(final Configuration conf, final String key, final String val) throws IOException { return StringUtils.isEmpty(val) ? lookupPassword(conf, key) : val;
} | 3.26 |
hadoop_OBSCommonUtils_newPutObjectRequest_rdh | /**
* Create a {@link PutObjectRequest} request. The metadata is assumed to have
* been configured with the size of the operation.
*
* @param owner
* the owner OBSFileSystem instance
* @param key
* key of object
* @param metadata
* metadata header
* @param inputStream
* source data.
* @return the request
*/
static PutObjectRequest newPutObjectRequest(final OBSFileSystem owner, final String key, final ObjectMetadata metadata, final InputStream inputStream) {
Preconditions.checkNotNull(inputStream);
PutObjectRequest putObjectRequest = new PutObjectRequest(owner.getBucket(), key, inputStream);
putObjectRequest.setAcl(owner.getCannedACL());
putObjectRequest.setMetadata(metadata);
if (owner.getSse().isSseCEnable()) {
putObjectRequest.setSseCHeader(owner.getSse().getSseCHeader());
} else if (owner.getSse().isSseKmsEnable()) {
putObjectRequest.setSseKmsHeader(owner.getSse().getSseKmsHeader());
}
return putObjectRequest;
} | 3.26 |
hadoop_ContainerReInitEvent_getResourceSet_rdh | /**
* Get the ResourceSet.
*
* @return ResourceSet.
*/
public ResourceSet getResourceSet() {
return resourceSet;
} | 3.26 |
hadoop_ContainerReInitEvent_getReInitLaunchContext_rdh | /**
* Get the Launch Context to be used for upgrade.
*
* @return ContainerLaunchContext
*/
public ContainerLaunchContext getReInitLaunchContext() {
return reInitLaunchContext;
} | 3.26 |
hadoop_MutableCSConfigurationProvider_getInitSchedulerConfig_rdh | // Unit test can overwrite this method
protected Configuration getInitSchedulerConfig() {
Configuration initialSchedConf = new Configuration(false);
initialSchedConf.addResource(YarnConfiguration.CS_CONFIGURATION_FILE);
return initialSchedConf;
} | 3.26 |
hadoop_EmptyIOStatisticsStore_getInstance_rdh | /**
* Get the single instance of this class.
*
* @return a shared, empty instance.
*/
static IOStatisticsStore getInstance() {
return INSTANCE;
} | 3.26 |
hadoop_ManifestSuccessData_toJson_rdh | /**
* To JSON.
*
* @return json string value.
* @throws IOException
* failure
*/
public String toJson() throws IOException {
return serializer().toJson(this);
} | 3.26 |
hadoop_ManifestSuccessData_dumpDiagnostics_rdh | /**
* Dump the diagnostics (if any) to a string.
*
* @param prefix
* prefix before every entry
* @param middle
* string between key and value
* @param suffix
* suffix to each entry
* @return the dumped string
*/
public String dumpDiagnostics(String prefix, String middle, String suffix) {
return joinMap(diagnostics, prefix, middle, suffix);
} | 3.26 |
hadoop_ManifestSuccessData_getFilenames_rdh | /**
*
* @return a list of filenames in the commit.
*/
public List<String> getFilenames() {
return filenames;
} | 3.26 |
hadoop_ManifestSuccessData_serializer_rdh | /**
* Get a JSON serializer for this class.
*
* @return a serializer.
*/
public static JsonSerialization<ManifestSuccessData> serializer() {
return new JsonSerialization<>(ManifestSuccessData.class, false, true);
} | 3.26 |
hadoop_ManifestSuccessData_getCommitter_rdh | /**
*
* @return committer name.
*/
public String getCommitter() {
return committer;
} | 3.26 |
hadoop_ManifestSuccessData_recordJobFailure_rdh | /**
* Note a failure by setting success flag to false,
* then add the exception to the diagnostics.
*
* @param thrown
* throwable
*/
public void recordJobFailure(Throwable thrown) {
setSuccess(false);
String stacktrace = ExceptionUtils.getStackTrace(thrown);
diagnostics.put(DiagnosticKeys.EXCEPTION, thrown.toString());
diagnostics.put(DiagnosticKeys.STACKTRACE, stacktrace);} | 3.26 |
hadoop_ManifestSuccessData_getDate_rdh | /**
*
* @return timestamp as date; no expectation of parseability.
*/
public String getDate() {
return date;} | 3.26 |
hadoop_ManifestSuccessData_getMetrics_rdh | /**
*
* @return any metrics.
*/
public Map<String, Long> getMetrics() {
return metrics;
} | 3.26 |
hadoop_ManifestSuccessData_getDescription_rdh | /**
*
* @return any description text.
*/
public String getDescription() {
return description;
} | 3.26 |
hadoop_ManifestSuccessData_dumpMetrics_rdh | /**
* Dump the metrics (if any) to a string.
* The metrics are sorted for ease of viewing.
*
* @param prefix
* prefix before every entry
* @param middle
* string between key and value
* @param suffix
* suffix to each entry
* @return the dumped string
*/
public String dumpMetrics(String prefix, String middle, String suffix) {
return joinMap(metrics, prefix, middle, suffix);
} | 3.26 |
hadoop_ManifestSuccessData_setFilenamePaths_rdh | /**
* Set the list of filename paths.
*/
@JsonIgnore
public void setFilenamePaths(List<Path> paths) {
setFilenames(new ArrayList<>(paths.stream().map(AbstractManifestData::marshallPath).collect(Collectors.toList())));} | 3.26 |
hadoop_ManifestSuccessData_getHostname_rdh | /**
*
* @return host which created the file (implicitly: committed the work).
*/ public String getHostname() {
return hostname;
} | 3.26 |
hadoop_ManifestSuccessData_setSuccess_rdh | /**
* Set the success flag.
*
* @param success
* did the job succeed?
*/
public void setSuccess(boolean success)
{
this.success = success;
} | 3.26 |
hadoop_ManifestSuccessData_getJobId_rdh | /**
*
* @return Job ID, if known.
*/
public String getJobId() {
return jobId;
} | 3.26 |
hadoop_ManifestSuccessData_load_rdh | /**
* Load an instance from a file, then validate it.
*
* @param fs
* filesystem
* @param path
* path
* @return the loaded instance
* @throws IOException
* IO failure
*/
public static ManifestSuccessData load(FileSystem fs, Path path) throws IOException {
LOG.debug("Reading success data from {}", path);
ManifestSuccessData instance = serializer().load(fs, path);
instance.validate();
return instance;
} | 3.26 |
hadoop_QueryResult_getRecords_rdh | /**
* Get the result of the query.
*
* @return List of records.
*/
public List<T> getRecords() {
return this.records;
} | 3.26 |
hadoop_QueryResult_getTimestamp_rdh | /**
* The timetamp in driver time of this query.
*
* @return Timestamp in driver time.
*/
public long getTimestamp() {
return this.timestamp;
} | 3.26 |
hadoop_ReadStatistics_getTotalZeroCopyBytesRead_rdh | /**
*
* @return The total number of zero-copy bytes read.
*/
public synchronized long getTotalZeroCopyBytesRead() {return f0;
} | 3.26 |
hadoop_ReadStatistics_getRemoteBytesRead_rdh | /**
*
* @return The total number of bytes read which were not local.
*/
public synchronized long getRemoteBytesRead() {
return totalBytesRead - totalLocalBytesRead;
} | 3.26 |
hadoop_ReadStatistics_getTotalEcDecodingTimeMillis_rdh | /**
* Return the total time in milliseconds used for erasure coding decoding.
*/
public synchronized long
getTotalEcDecodingTimeMillis() {
return totalEcDecodingTimeMillis;
} | 3.26 |
hadoop_ReadStatistics_getTotalShortCircuitBytesRead_rdh | /**
*
* @return The total short-circuit local bytes read.
*/
public synchronized long getTotalShortCircuitBytesRead() {
return totalShortCircuitBytesRead;
} | 3.26 |
hadoop_AbstractSchedulerPlanFollower_m0_rdh | /**
* Resizes reservations based on currently available resources.
*/
private Resource m0(ResourceCalculator rescCalculator, Resource availablePlanResources, Resource totalReservationResources, Resource reservationResources) {
return Resources.multiply(availablePlanResources, Resources.ratio(rescCalculator, reservationResources,
totalReservationResources));
} | 3.26 |
hadoop_AbstractSchedulerPlanFollower_arePlanResourcesLessThanReservations_rdh | /**
* Check if plan resources are less than expected reservation resources.
*/
private boolean arePlanResourcesLessThanReservations(ResourceCalculator rescCalculator, Resource clusterResources, Resource planResources, Resource reservedResources) {
return Resources.greaterThan(rescCalculator, clusterResources, reservedResources, planResources);
} | 3.26 |
hadoop_AbstractSchedulerPlanFollower_getReservationQueueName_rdh | // Schedulers have different ways of naming queues. See YARN-2773
protected String getReservationQueueName(String planQueueName, String reservationId) {
return reservationId;
} | 3.26 |
hadoop_AbstractSchedulerPlanFollower_calculateReservationToPlanRatio_rdh | /**
* Calculates ratio of reservationResources to planResources.
*/
private float calculateReservationToPlanRatio(ResourceCalculator rescCalculator, Resource clusterResources, Resource planResources, Resource reservationResources) {
return Resources.divide(rescCalculator, clusterResources, reservationResources, planResources); } | 3.26 |
hadoop_AbstractSchedulerPlanFollower_sortByDelta_rdh | /**
* Sort in the order from the least new amount of resources asked (likely
* negative) to the highest. This prevents "order-of-operation" errors related
* to exceeding 100% capacity temporarily.
*
* @param currentReservations
* the currently active reservations
* @param now
* the current time
* @param plan
* the {@link Plan} that is being considered
* @return the sorted list of {@link ReservationAllocation}s
*/protected List<ReservationAllocation> sortByDelta(List<ReservationAllocation> currentReservations, long now, Plan plan) {
Collections.sort(currentReservations, new ReservationAllocationComparator(now, this, plan));
return currentReservations;
} | 3.26 |
hadoop_AbstractSchedulerPlanFollower_cleanupExpiredQueues_rdh | /**
* First sets entitlement of queues to zero to prevent new app submission.
* Then move all apps in the set of queues to the parent plan queue's default
* reservation queue if move is enabled. Finally cleanups the queue by killing
* any apps (if move is disabled or move failed) and removing the queue
*
* @param planQueueName
* the name of {@code PlanQueue}
* @param shouldMove
* flag to indicate if any running apps should be moved or
* killed
* @param toRemove
* the remnant apps to clean up
* @param defReservationQueue
* the default {@code ReservationQueue} of the
* {@link Plan}
*/
protected void cleanupExpiredQueues(String planQueueName, boolean shouldMove, Set<String> toRemove, String defReservationQueue) {
for (String expiredReservationId : toRemove) {
try {
// reduce entitlement to 0
String expiredReservation = getReservationQueueName(planQueueName, expiredReservationId);
setQueueEntitlement(planQueueName, expiredReservation, 0.0F, 0.0F);
if (shouldMove) {
moveAppsInQueueSync(expiredReservation, defReservationQueue);
}
List<ApplicationAttemptId> appsInQueue = f0.getAppsInQueue(expiredReservation);
int size = (appsInQueue == null) ? 0 : appsInQueue.size();
if (size > 0) {
f0.killAllAppsInQueue(expiredReservation);
LOG.info("Killing applications in queue: {}", expiredReservation);
} else {
f0.removeQueue(expiredReservation);
LOG.info(("Queue: " + expiredReservation) + " removed");
}
} catch (YarnException e) {
LOG.warn("Exception while trying to expire reservation: {}", expiredReservationId, e);
}
}
} | 3.26 |
hadoop_ValueAggregatorBaseDescriptor_configure_rdh | /**
* get the input file name.
*
* @param job
* a job configuration object
*/public void configure(JobConf job)
{
super.configure(job);
maxNumItems = job.getLong("aggregate.max.num.unique.values", Long.MAX_VALUE);
} | 3.26 |
hadoop_ValueAggregatorBaseDescriptor_generateValueAggregator_rdh | /**
*
* @param type
* the aggregation type
* @return a value aggregator of the given type.
*/
public static ValueAggregator generateValueAggregator(String type) {ValueAggregator retv = null;if (type.compareToIgnoreCase(LONG_VALUE_SUM) == 0) {
retv
= new LongValueSum();
}
if (type.compareToIgnoreCase(f0) == 0) {
retv = new LongValueMax();
} else if (type.compareToIgnoreCase(LONG_VALUE_MIN) == 0) {retv = new LongValueMin();
} else if (type.compareToIgnoreCase(STRING_VALUE_MAX) == 0) {
retv = new StringValueMax();
} else if (type.compareToIgnoreCase(STRING_VALUE_MIN) == 0) {
retv = new StringValueMin();
} else if (type.compareToIgnoreCase(DOUBLE_VALUE_SUM) == 0) {
retv = new DoubleValueSum();
} else if (type.compareToIgnoreCase(UNIQ_VALUE_COUNT) == 0) {
retv = new UniqValueCount(maxNumItems);
} else if (type.compareToIgnoreCase(VALUE_HISTOGRAM) == 0) {
retv = new ValueHistogram();
}
return retv;
} | 3.26 |
hadoop_FilePosition_absolute_rdh | /**
* Gets the current absolute position within this file.
*
* @return the current absolute position within this file.
*/
public long absolute() {
throwIfInvalidBuffer();
return bufferStartOffset +
relative();
} | 3.26 |
hadoop_FilePosition_isValid_rdh | /**
* Determines if the current position is valid.
*
* @return true if the current position is valid, false otherwise.
*/
public boolean isValid() {
return buffer != null;
} | 3.26 |
hadoop_FilePosition_blockNumber_rdh | /**
* Gets the id of the current block.
*
* @return the id of the current block.
*/
public int blockNumber() {
throwIfInvalidBuffer();
return blockData.getBlockNumber(bufferStartOffset);
} | 3.26 |
hadoop_FilePosition_setData_rdh | /**
* Associates a buffer with this file.
*
* @param bufferData
* the buffer associated with this file.
* @param startOffset
* Start offset of the buffer relative to the start of a file.
* @param readOffset
* Offset where reading starts relative to the start of a file.
* @throws IllegalArgumentException
* if bufferData is null.
* @throws IllegalArgumentException
* if startOffset is negative.
* @throws IllegalArgumentException
* if readOffset is negative.
* @throws IllegalArgumentException
* if readOffset is outside the range [startOffset, buffer end].
*/
public void setData(BufferData bufferData, long startOffset, long readOffset) {
checkNotNull(bufferData, "bufferData");
checkNotNegative(startOffset, "startOffset");
checkNotNegative(readOffset, "readOffset");
checkWithinRange(readOffset, "readOffset",
startOffset, startOffset + bufferData.getBuffer().limit());
data = bufferData;
buffer =
bufferData.getBuffer().duplicate();
bufferStartOffset = startOffset;
readStartOffset = readOffset;
setAbsolute(readOffset);
resetReadStats();
} | 3.26 |
hadoop_FilePosition_m0_rdh | /**
* Determines whether the given absolute position lies within the current buffer.
*
* @param pos
* the position to check.
* @return true if the given absolute position lies within the current buffer, false otherwise.
*/public boolean m0(long pos) {
throwIfInvalidBuffer();
long bufferEndOffset = bufferStartOffset + buffer.limit();
return (pos >= bufferStartOffset) && (pos <= bufferEndOffset);
} | 3.26 |
hadoop_FilePosition_setAbsolute_rdh | /**
* If the given {@code pos} lies within the current buffer, updates the current position to
* the specified value and returns true; otherwise returns false without changing the position.
*
* @param pos
* the absolute position to change the current position to if possible.
* @return true if the given current position was updated, false otherwise.
*/
public boolean setAbsolute(long pos) {
if (isValid() && m0(pos)) {int relativePos = ((int) (pos - bufferStartOffset));buffer.position(relativePos);
return true;
} else {
return false;
}
} | 3.26 |
hadoop_FilePosition_invalidate_rdh | /**
* Marks the current position as invalid.
*/
public void invalidate() {
buffer
= null;
bufferStartOffset = -1;
data
= null;
} | 3.26 |
hadoop_FilePosition_relative_rdh | /**
* Gets the current position within this file relative to the start of the associated buffer.
*
* @return the current position within this file relative to the start of the associated buffer.
*/
public int relative() {
throwIfInvalidBuffer();
return buffer.position();
} | 3.26 |
hadoop_FilePosition_bufferStartOffset_rdh | /**
* Gets the start of the current block's absolute offset.
*
* @return the start of the current block's absolute offset.
*/public long bufferStartOffset() {
throwIfInvalidBuffer();
return bufferStartOffset;
} | 3.26 |
hadoop_FilePosition_isLastBlock_rdh | /**
* Determines whether the current block is the last block in this file.
*
* @return true if the current block is the last block in this file, false otherwise.
*/
public boolean isLastBlock() {
return blockData.isLastBlock(blockNumber());
} | 3.26 |
hadoop_FilePosition_bufferFullyRead_rdh | /**
* Determines whether the current buffer has been fully read.
*
* @return true if the current buffer has been fully read, false otherwise.
*/
public boolean bufferFullyRead() {
throwIfInvalidBuffer();
return
((bufferStartOffset == readStartOffset) && (relative() == buffer.limit())) && (f0 == buffer.limit());
} | 3.26 |
hadoop_AbstractManifestData_validateCollectionClass_rdh | /**
* Verify that all instances in a collection are of the given class.
*
* @param it
* iterator
* @param classname
* classname to require
* @throws IOException
* on a failure
*/
void validateCollectionClass(Iterable it, Class classname) throws IOException {
for (Object o : it) {
verify(o.getClass().equals(classname), "Collection element is not a %s: %s", classname, o.getClass());
}
} | 3.26 |
hadoop_AbstractManifestData_unmarshallPath_rdh | /**
* Convert a string path to Path type, by way of a URI.
*
* @param path
* path as a string
* @return path value
* @throws RuntimeException
* marshalling failure.
*/
public static Path unmarshallPath(String path) {
try {
return new Path(new URI(requireNonNull(path, "No path")));
} catch (URISyntaxException
e) {
throw new RuntimeException((("Failed to parse \"" + path) + "\" : ") + e, e);
}
} | 3.26 |
hadoop_AbstractManifestData_marshallPath_rdh | /**
* Convert a path to a string which can be included in the JSON.
*
* @param path
* path
* @return a string value, or, if path==null, null.
*/
public static String marshallPath(@Nullable
Path path) {
return path != null ? path.toUri().toString() : null;
} | 3.26 |
hadoop_ApplicationEntity_getApplicationEvent_rdh | /**
*
* @param te
* TimelineEntity object.
* @param eventId
* event with this id needs to be fetched
* @return TimelineEvent if TimelineEntity contains the desired event.
*/
public static TimelineEvent
getApplicationEvent(TimelineEntity te, String eventId) {
if (m0(te)) {
for (TimelineEvent event : te.getEvents()) {
if (event.getId().equals(eventId)) {
return event;
}
}
}
return null;
} | 3.26 |
hadoop_ApplicationEntity_m0_rdh | /**
* Checks if the input TimelineEntity object is an ApplicationEntity.
*
* @param te
* TimelineEntity object.
* @return true if input is an ApplicationEntity, false otherwise
*/
public static boolean m0(TimelineEntity te) {
return te == null ? false : te.getType().equals(TimelineEntityType.YARN_APPLICATION.toString());
} | 3.26 |
hadoop_BigDecimalSplitter_tryDivide_rdh | /**
* Divide numerator by denominator. If impossible in exact mode, use rounding.
*/
protected BigDecimal tryDivide(BigDecimal numerator, BigDecimal denominator) {
try {
return numerator.divide(denominator);
} catch (ArithmeticException ae) {
return numerator.divide(denominator, BigDecimal.ROUND_HALF_UP);
}
} | 3.26 |
hadoop_BigDecimalSplitter_split_rdh | /**
* Returns a list of BigDecimals one element longer than the list of input splits.
* This represents the boundaries between input splits.
* All splits are open on the top end, except the last one.
*
* So the list [0, 5, 8, 12, 18] would represent splits capturing the intervals:
*
* [0, 5)
* [5, 8)
* [8, 12)
* [12, 18] note the closed interval for the last split.
*/
List<BigDecimal> split(BigDecimal numSplits, BigDecimal minVal, BigDecimal maxVal) throws SQLException {
List<BigDecimal> splits = new ArrayList<BigDecimal>();
// Use numSplits as a hint. May need an extra task if the size doesn't
// divide cleanly.
BigDecimal splitSize = tryDivide(maxVal.subtract(minVal), numSplits);
if (splitSize.compareTo(MIN_INCREMENT) < 0) {
splitSize = MIN_INCREMENT;
LOG.warn("Set BigDecimal splitSize to MIN_INCREMENT");
} BigDecimal curVal = minVal;
while (curVal.compareTo(maxVal) <= 0) {
splits.add(curVal);
curVal = curVal.add(splitSize);
}
if ((splits.get(splits.size() - 1).compareTo(maxVal) != 0) || (splits.size() == 1)) {
// We didn't end on the maxVal. Add that to the end of the list.
splits.add(maxVal);
}
return splits;
} | 3.26 |
hadoop_Abfs_finalize_rdh | /**
* Close the file system; the FileContext API doesn't have an explicit close.
*/
@Override
protected void finalize() throws Throwable {
fsImpl.close();
super.finalize();
} | 3.26 |
hadoop_FieldSelectionMapper_m0_rdh | /**
* The identify function. Input key/value pair is written directly to output.
*/
public void m0(K key, V val, Context context) throws IOException, InterruptedException {
FieldSelectionHelper helper = new FieldSelectionHelper(FieldSelectionHelper.emptyText, FieldSelectionHelper.emptyText);
helper.extractOutputKeyValue(key.toString(), val.toString(), fieldSeparator, mapOutputKeyFieldList, mapOutputValueFieldList, allMapValueFieldsFrom, ignoreInputKey, true);
context.write(helper.getKey(), helper.getValue());
} | 3.26 |
hadoop_FileBasedCopyListing_getBytesToCopy_rdh | /**
* {@inheritDoc }
*/
@Override
protected long getBytesToCopy() {
return globbedListing.getBytesToCopy();
}
/**
* {@inheritDoc } | 3.26 |
hadoop_FileBasedCopyListing_validatePaths_rdh | /**
* {@inheritDoc }
*/
@Override
protected void validatePaths(DistCpContext context) throws IOException, InvalidInputException {
} | 3.26 |
hadoop_FileBasedCopyListing_doBuildListing_rdh | /**
* Implementation of CopyListing::buildListing().
* Iterates over all source paths mentioned in the input-file.
*
* @param pathToListFile
* Path on HDFS where the listing file is written.
* @param context
* Distcp context with associated input options.
* @throws IOException
*/
@Overridepublic void doBuildListing(Path pathToListFile, DistCpContext context) throws IOException {
context.setSourcePaths(fetchFileList(context.getSourceFileListing()));
globbedListing.buildListing(pathToListFile, context);
} | 3.26 |
hadoop_SendRequestIntercept_bind_rdh | /**
* Binds a new lister to the operation context so the WASB file system can
* appropriately intercept sends and allow concurrent OOB I/Os. This
* by-passes the blob immutability check when reading streams.
*
* @param opContext
* the operation context assocated with this request.
*/
public static void
bind(OperationContext opContext) {
opContext.getSendingRequestEventHandler().addListener(new SendRequestIntercept());
} | 3.26 |
hadoop_SendRequestIntercept_eventOccurred_rdh | /**
* Handler which processes the sending request event from Azure SDK. The
* handler simply sets reset the conditional header to make all read requests
* unconditional if reads with concurrent OOB writes are allowed.
*
* @param sendEvent
* - send event context from Windows Azure SDK.
*/
@Override
public void eventOccurred(SendingRequestEvent sendEvent) {
if (!(sendEvent.getConnectionObject() instanceof HttpURLConnection)) {
// Pass if there is no HTTP connection associated with this send
// request.
return;
}
// Capture the HTTP URL connection object and get size of the payload for
// the request.
HttpURLConnection urlConnection = ((HttpURLConnection) (sendEvent.getConnectionObject()));
// Determine whether this is a download request by checking that the request
// method
// is a "GET" operation.
if (urlConnection.getRequestMethod().equalsIgnoreCase("GET")) {
// If concurrent reads on OOB writes are allowed, reset the if-match
// condition on the conditional header.
urlConnection.setRequestProperty(HeaderConstants.IF_MATCH, ALLOW_ALL_REQUEST_PRECONDITIONS);
}
} | 3.26 |
hadoop_AbfsCountersImpl_getRegistry_rdh | /**
* Getter for MetricRegistry.
*
* @return MetricRegistry or null.
*/
private MetricsRegistry getRegistry() { return registry;
} | 3.26 |
hadoop_AbfsCountersImpl_formString_rdh | /**
* {@inheritDoc }
*
* Method to aggregate all the counters in the MetricRegistry and form a
* string with prefix, separator and suffix.
*
* @param prefix
* string that would be before metric.
* @param separator
* string that would be between metric name and value.
* @param suffix
* string that would be after metric value.
* @param all
* gets all the values even if unchanged.
* @return a String with all the metrics and their values.
*/
@Override
public String formString(String prefix, String separator, String suffix, boolean all) {
MetricStringBuilder metricStringBuilder = new MetricStringBuilder(null, prefix, separator, suffix);
registry.snapshot(metricStringBuilder, all);
return metricStringBuilder.toString();
} | 3.26 |
hadoop_AbfsCountersImpl_trackDuration_rdh | /**
* Tracks the duration of a statistic.
*
* @param key
* name of the statistic.
* @return DurationTracker for that statistic.
*/
@Override
public DurationTracker trackDuration(String key) {
return ioStatisticsStore.trackDuration(key);
} | 3.26 |
hadoop_AbfsCountersImpl_lookupCounter_rdh | /**
* Look up counter by name.
*
* @param name
* name of counter.
* @return counter if found, else null.
*/
private MutableCounterLong lookupCounter(String name) {
MutableMetric metric = lookupMetric(name);
if (metric == null) {
return null;
}
if (!(metric instanceof MutableCounterLong)) {
throw new IllegalStateException((("Metric " + name) + " is not a MutableCounterLong: ") + metric);
}
return ((MutableCounterLong) (metric));
} | 3.26 |
hadoop_AbfsCountersImpl_incrementCounter_rdh | /**
* {@inheritDoc }
*
* Increment a statistic with some value.
*
* @param statistic
* AbfsStatistic need to be incremented.
* @param value
* long value to be incremented by.
*/
@Override
public void incrementCounter(AbfsStatistic statistic, long value) {
ioStatisticsStore.incrementCounter(statistic.getStatName(), value);
MutableCounterLong v5 = lookupCounter(statistic.getStatName());
if (v5 != null) {
v5.incr(value);
}
} | 3.26 |
hadoop_AbfsCountersImpl_lookupMetric_rdh | /**
* Look up a Metric from registered set.
*
* @param name
* name of metric.
* @return the metric or null.
*/private MutableMetric lookupMetric(String name) {
return getRegistry().get(name);
} | 3.26 |
hadoop_AbfsCountersImpl_toMap_rdh | /**
* {@inheritDoc }
*
* Map of all the counters for testing.
*
* @return a map of the IOStatistics counters.
*/
@VisibleForTesting
@Override
public Map<String, Long> toMap() {
return ioStatisticsStore.counters();
} | 3.26 |
hadoop_AbfsCountersImpl_createCounter_rdh | /**
* Create a counter in the registry.
*
* @param stats
* AbfsStatistic whose counter needs to be made.
* @return counter or null.
*/
private MutableCounterLong createCounter(AbfsStatistic
stats) {
return registry.newCounter(stats.getStatName(), stats.getStatDescription(), 0L);
} | 3.26 |
hadoop_AbfsCountersImpl_getIOStatistics_rdh | /**
* Returning the instance of IOStatisticsStore used to collect the metrics
* in AbfsCounters.
*
* @return instance of IOStatistics.
*/@Override
public IOStatistics getIOStatistics() {
return ioStatisticsStore;
} | 3.26 |
hadoop_LocalResolver_getNamenodesSubcluster_rdh | /**
* Get the Namenode mapping from the subclusters from the Membership store. As
* the Routers are usually co-located with Namenodes, we also check for the
* local address for this Router here.
*
* @return NN IP -> Subcluster.
*/
private Map<String, String> getNamenodesSubcluster(MembershipStore membershipStore) {
// Manage requests from this hostname (127.0.0.1)
String v16 = "127.0.0.1";
String localHostname = v16;
try {
localHostname = InetAddress.getLocalHost().getHostName();
} catch (UnknownHostException e) {
LOG.error("Cannot get local host name");
}
Map<String, String> ret = new
HashMap<>();
try {
// Get the values from the store
GetNamenodeRegistrationsRequest request = GetNamenodeRegistrationsRequest.newInstance();
GetNamenodeRegistrationsResponse response = membershipStore.getNamenodeRegistrations(request);
final List<MembershipState> nns = response.getNamenodeMemberships();
for (MembershipState nn : nns) {
try {
String
nsId
= nn.getNameserviceId(); String rpcAddress = nn.getRpcAddress();
String v25 = HostAndPort.fromString(rpcAddress).getHost();ret.put(v25, nsId);
if (v25.equals(localHostname)) {
ret.put(v16, nsId);
}
InetAddress addr = InetAddress.getByName(v25);
String ipAddr = addr.getHostAddress();
ret.put(ipAddr, nsId);
} catch (Exception e) {
LOG.error("Cannot get address for {}: {}", nn, e.getMessage());
}
}
}
catch (IOException ioe) {
LOG.error("Cannot get Namenodes from the State Store", ioe);
}
return ret;
} | 3.26 |
hadoop_LocalResolver_chooseFirstNamespace_rdh | /**
* Get the local name space. This relies on the RPC Server to get the address
* from the client.
*
* TODO we only support DN and NN locations, we need to add others like
* Resource Managers.
*
* @param path
* Path ignored by this policy.
* @param loc
* Federated location with multiple destinations.
* @return Local name space. Null if we don't know about this machine.
*/
@Override
protected String chooseFirstNamespace(String path, PathLocation loc) {
String localSubcluster = null;
String clientAddr = getClientAddr();
Map<String, String> subclusterInfo = getSubclusterMapping();
if (subclusterInfo != null) {
localSubcluster = subclusterInfo.get(clientAddr);if (localSubcluster != null) {
LOG.debug("Local namespace for {} is {}", clientAddr, localSubcluster);
}
else {LOG.error("Cannot get local namespace for {}", clientAddr);
}
} else {
LOG.error("Cannot get node mapping when resolving {} at {} from {}", path, loc, clientAddr);}return localSubcluster;
} | 3.26 |
hadoop_LocalResolver_getSubclusterInfo_rdh | /**
* Get the mapping from nodes to subcluster. It gets this mapping from the
* subclusters through expensive calls (e.g., RPC) and uses caching to avoid
* too many calls. The cache might be updated asynchronously to reduce
* latency.
*
* @return Node IP to Subcluster.
*/
@Override
protected Map<String, String> getSubclusterInfo(MembershipStore membershipStore) {
Map<String, String> mapping = new HashMap<>();
Map<String, String> dnSubcluster = getDatanodesSubcluster();
if (dnSubcluster != null) {
mapping.putAll(dnSubcluster);
}
Map<String, String> nnSubcluster = getNamenodesSubcluster(membershipStore);
if (nnSubcluster != null)
{
mapping.putAll(nnSubcluster); }
return mapping;
} | 3.26 |
hadoop_LocalResolver_getDatanodesSubcluster_rdh | /**
* Get the Datanode mapping from the subclusters from the Namenodes. This
* needs to be done as a privileged action to use the user for the Router and
* not the one from the client in the RPC call.
*
* @return DN IP -> Subcluster.
*/
private Map<String, String> getDatanodesSubcluster() {
final RouterRpcServer rpcServer = getRpcServer();
if (rpcServer == null) {
LOG.error("Cannot access the Router RPC server");
return
null;
}
Map<String, String> ret = new HashMap<>();
try {
// We need to get the DNs as a privileged user
UserGroupInformation loginUser = UserGroupInformation.getLoginUser();
Map<String, DatanodeStorageReport[]> dnMap = loginUser.doAs(new PrivilegedAction<Map<String, DatanodeStorageReport[]>>() {@Override
public Map<String, DatanodeStorageReport[]> run() {
try {
return rpcServer.getDatanodeStorageReportMap(DatanodeReportType.ALL);
} catch (IOException e) {
LOG.error("Cannot get the datanodes from the RPC server", e);
return null; }
}
});
for (Entry<String, DatanodeStorageReport[]> entry : dnMap.entrySet()) {
String nsId = entry.getKey();
DatanodeStorageReport[] dns = entry.getValue();
for (DatanodeStorageReport dn : dns) {
DatanodeInfo dnInfo = dn.getDatanodeInfo();
String ipAddr = dnInfo.getIpAddr();
ret.put(ipAddr, nsId);
}
}
} catch (IOException e) {
LOG.error("Cannot get Datanodes from the Namenodes: {}", e.getMessage());
}
return ret;
} | 3.26 |
hadoop_CachingGetSpaceUsed_incDfsUsed_rdh | /**
* Increment the cached value of used space.
*
* @param value
* dfs used value.
*/
public void incDfsUsed(long value) {
used.addAndGet(value);
} | 3.26 |
hadoop_CachingGetSpaceUsed_initRefreshThread_rdh | /**
* RunImmediately should set true, if we skip the first refresh.
*
* @param runImmediately
* The param default should be false.
*/
private void initRefreshThread(boolean runImmediately) {
if (refreshInterval > 0) {
refreshUsed = new Thread(new RefreshThread(this, runImmediately), "refreshUsed-" + dirPath);
refreshUsed.setDaemon(true);
refreshUsed.start();} else {
running.set(false);
refreshUsed = null;
}} | 3.26 |
hadoop_CachingGetSpaceUsed_setShouldFirstRefresh_rdh | /**
* Reset that if we need to do the first refresh.
*
* @param shouldFirstRefresh
* The flag value to set.
*/
protected void setShouldFirstRefresh(boolean shouldFirstRefresh) {
this.shouldFirstRefresh = shouldFirstRefresh;
} | 3.26 |
hadoop_CachingGetSpaceUsed_getRefreshInterval_rdh | /**
* How long in between runs of the background refresh.
*
* @return refresh interval.
*/
@VisibleForTesting
public long getRefreshInterval() {
return refreshInterval;
} | 3.26 |
hadoop_CachingGetSpaceUsed_setUsed_rdh | /**
* Reset the current used data amount. This should be called
* when the cached value is re-computed.
*
* @param usedValue
* new value that should be the disk usage.
*/
protected void setUsed(long usedValue) {
this.used.set(usedValue);
} | 3.26 |
hadoop_CachingGetSpaceUsed_getDirPath_rdh | /**
*
* @return The directory path being monitored.
*/
public String getDirPath() {
return dirPath;
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.