name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hadoop_ManifestCommitterSupport_maybeAddIOStatistics_rdh | /**
* If the object is an IOStatisticsSource, get and add
* its IOStatistics.
*
* @param o
* source object.
*/
public static void maybeAddIOStatistics(IOStatisticsAggregator ios, Object o) {
if (o instanceof IOStatisticsSource) {ios.aggregate(((IOStatisticsSource) (o)).getIOStatistics());
}
} | 3.26 |
hadoop_ManifestCommitterSupport_getPendingJobAttemptsPath_rdh | /**
* Get the location of pending job attempts.
*
* @param out
* the base output directory.
* @return the location of pending job attempts.
*/
public static Path getPendingJobAttemptsPath(Path out) {
return new Path(out, PENDING_DIR_NAME);
} | 3.26 |
hadoop_ManifestCommitterSupport_getEtag_rdh | /**
* Get an etag from a FileStatus which MUST BE
* an implementation of EtagSource and
* whose etag MUST NOT BE null/empty.
*
* @param status
* the status; may be null.
* @return the etag or null if not provided
*/
public static String getEtag(FileStatus status) {
if (status instanceof EtagSource) {
return ((EtagSource) (status)).getEtag();
} else {
return null;
}} | 3.26 |
hadoop_ManifestCommitterSupport_buildJobUUID_rdh | /**
* Build a Job UUID from the job conf (if it is
* {@link ManifestCommitterConstants#SPARK_WRITE_UUID}
* or the MR job ID.
*
* @param conf
* job/task configuration
* @param jobId
* job ID from YARN or spark.
* @return (a job ID, source)
*/public static Pair<String, String> buildJobUUID(Configuration conf, JobID jobId) {
String jobUUID = conf.getTrimmed(SPARK_WRITE_UUID, "");
if (jobUUID.isEmpty()) {
jobUUID = jobId.toString();
return Pair.of(jobUUID, JOB_ID_SOURCE_MAPREDUCE);
} else {
return Pair.of(jobUUID, SPARK_WRITE_UUID);
}
} | 3.26 |
hadoop_ManifestCommitterSupport_createManifestOutcome_rdh | /**
* Create success/outcome data.
*
* @param stageConfig
* configuration.
* @param stage
* @return a _SUCCESS object with some diagnostics.
*/
public static ManifestSuccessData createManifestOutcome(StageConfig stageConfig, String stage) {
final ManifestSuccessData outcome = new ManifestSuccessData();
outcome.setJobId(stageConfig.getJobId());
outcome.setJobIdSource(stageConfig.getJobIdSource());
outcome.setCommitter(MANIFEST_COMMITTER_CLASSNAME);
// real timestamp
outcome.setTimestamp(System.currentTimeMillis());
final ZonedDateTime now = ZonedDateTime.now();
outcome.setDate(now.toString());
outcome.setHostname(NetUtils.getLocalHostname());
// add some extra diagnostics which can still be parsed by older
// builds of test applications.
// Audit Span information can go in here too, in future.
try {
outcome.putDiagnostic(PRINCIPAL, UserGroupInformation.getCurrentUser().getShortUserName());
} catch (IOException ignored) {
// don't know who we are? exclude from the diagnostics.
}
outcome.putDiagnostic(STAGE, stage); return outcome;
} | 3.26 |
hadoop_ManifestCommitterSupport_getAppAttemptId_rdh | /**
* Get the Application Attempt Id for this job
* by looking for {@link MRJobConfig#APPLICATION_ATTEMPT_ID}
* in the configuration, falling back to 0 if unset.
* For spark it will always be 0, for MR it will be set in the AM
* to the {@code ApplicationAttemptId} the AM is launched with.
*
* @param conf
* job configuration.
* @return the Application Attempt Id for the job.
*/
public static int getAppAttemptId(Configuration conf) {
return conf.getInt(MRJobConfig.APPLICATION_ATTEMPT_ID, INITIAL_APP_ATTEMPT_ID);
} | 3.26 |
hadoop_ManifestCommitterSupport_manifestPathForTask_rdh | /**
* Get the path in the job attempt dir for a manifest for a task.
*
* @param manifestDir
* manifest directory
* @param taskId
* taskID.
* @return the final path to rename the manifest file to
*/
public static Path manifestPathForTask(Path manifestDir, String taskId) {
return new Path(manifestDir, taskId + MANIFEST_SUFFIX);
} | 3.26 |
hadoop_AbstractMultipartUploader_close_rdh | /**
* Perform any cleanup.
* The upload is not required to support any operations after this.
*
* @throws IOException
* problems on close.
*/@Override
public void close() throws IOException {
} | 3.26 |
hadoop_AbstractMultipartUploader_abortUploadsUnderPath_rdh | /**
* {@inheritDoc }.
*
* @param path
* path to abort uploads under.
* @return a future to -1.
* @throws IOException
* raised on errors performing I/O.
*/
public CompletableFuture<Integer> abortUploadsUnderPath(Path path)
throws IOException
{ m0(path);
CompletableFuture<Integer> f = new CompletableFuture<>();
f.complete(-1);return f;
} | 3.26 |
hadoop_AbstractMultipartUploader_checkPutArguments_rdh | /**
* Check all the arguments to the
* {@link MultipartUploader#putPart(UploadHandle, int, Path, InputStream, long)}
* operation.
*
* @param filePath
* Target path for upload (as {@link #startUpload(Path)}).
* @param inputStream
* Data for this part. Implementations MUST close this
* stream after reading in the data.
* @param partNumber
* Index of the part relative to others.
* @param uploadId
* Identifier from {@link #startUpload(Path)}.
* @param lengthInBytes
* Target length to read from the stream.
* @throws IllegalArgumentException
* invalid argument
*/
protected void checkPutArguments(Path filePath, InputStream inputStream, int partNumber,
UploadHandle uploadId, long lengthInBytes) throws IllegalArgumentException {m0(filePath);
checkArgument(inputStream != null, "null inputStream");
checkArgument(partNumber > 0,
"Invalid part number: %d", partNumber);
checkArgument(uploadId != null, "null uploadId");
checkArgument(lengthInBytes >= 0, "Invalid part length: %d", lengthInBytes); } | 3.26 |
hadoop_AbstractMultipartUploader_checkUploadId_rdh | /**
* Utility method to validate uploadIDs.
*
* @param uploadId
* Upload ID
* @throws IllegalArgumentException
* invalid ID
*/
protected void checkUploadId(byte[] uploadId) throws IllegalArgumentException {
checkArgument(uploadId != null, "null uploadId");
checkArgument(uploadId.length > 0, "Empty UploadId is not valid");
} | 3.26 |
hadoop_AbstractMultipartUploader_m0_rdh | /**
* Validate a path.
*
* @param path
* path to check.
*/
protected void m0(Path path) {
Objects.requireNonNull(path, "null path");
checkArgument(path.toString().startsWith(basePath.toString()), "Path %s is not under %s", path, basePath);
} | 3.26 |
hadoop_AbstractMultipartUploader_checkPartHandles_rdh | /**
* Utility method to validate partHandles.
*
* @param partHandles
* handles
* @throws IllegalArgumentException
* if the parts are invalid
*/
protected void checkPartHandles(Map<Integer, PartHandle> partHandles) {
checkArgument(!partHandles.isEmpty(), "Empty upload");
partHandles.keySet().stream().forEach(key -> checkArgument(key > 0, "Invalid part handle index %s", key));
} | 3.26 |
hadoop_MkdirOperation_getPathStatusExpectingDir_rdh | /**
* Get the status of a path -optimized for paths
* where there is a directory marker or child entries.
*
* Under a magic path, there's no check for a file,
* just the listing.
*
* @param path
* path to probe.
* @return the status
* @throws IOException
* failure
*/
private S3AFileStatus getPathStatusExpectingDir(final Path path) throws IOException {S3AFileStatus status = probePathStatusOrNull(path, StatusProbeEnum.DIRECTORIES);
if ((status == null) && (!isMagicPath)) {
status = probePathStatusOrNull(path, StatusProbeEnum.FILE);
}
return status;
} | 3.26 |
hadoop_MkdirOperation_execute_rdh | /**
* Make the given path and all non-existent parents into
* directories.
*
* @return true if a directory was created or already existed
* @throws FileAlreadyExistsException
* there is a file at the path specified
* @throws IOException
* other IO problems
*/
@Override
@Retries.RetryTranslated
public Boolean execute() throws IOException {
LOG.debug("Making directory: {}",
dir);
if (dir.isRoot()) {
// fast exit for root.
return true;
}
// get the file status of the path.
// this is done even for a magic path, to avoid always issuing PUT
// requests. Doing that without a check wouild seem to be an
// optimization, but it is not because
// 1. PUT is slower than HEAD
// 2. Write capacity is less than read capacity on a shard
// 3. It adds needless entries in versioned buckets, slowing
// down subsequent operations.
FileStatus fileStatus
= getPathStatusExpectingDir(dir);
if (fileStatus != null) {
if (fileStatus.isDirectory()) {
return true;
} else {
throw new FileAlreadyExistsException("Path is a file: " + dir);
}
}
// file status was null
// is the path magic?
// If so, we declare success without looking any further
if (isMagicPath) {
// Create the marker file immediately,
// and don't delete markers
f0.createFakeDirectory(dir, true);
return true;
}
// Walk path to root, ensuring closest ancestor is a directory, not file
Path fPart = dir.getParent();
try {
while ((fPart != null) && (!fPart.isRoot())) {
fileStatus = getPathStatusExpectingDir(fPart);
if (fileStatus == null) {
// nothing at this path, so validate the parent
fPart = fPart.getParent();
continue;}
if (fileStatus.isDirectory()) {
// the parent dir exists. All is good.
break;
}
// there's a file at the parent entry
throw new FileAlreadyExistsException(String.format("Can't make directory for path '%s' since it is a file.", fPart));
}
} catch (AccessDeniedException e) {
LOG.info("mkdirs({}}: Access denied when looking" + " for parent directory {}; skipping checks", dir, fPart);
LOG.debug("{}", e, e);
} | 3.26 |
hadoop_MkdirOperation_probePathStatusOrNull_rdh | /**
* Get the status of a path, downgrading FNFE to null result.
*
* @param path
* path to probe.
* @param probes
* probes to exec
* @return the status or null
* @throws IOException
* failure other than FileNotFound
*/
private S3AFileStatus probePathStatusOrNull(final Path path, final Set<StatusProbeEnum> probes) throws IOException {
try {
return f0.probePathStatus(path, probes);
} catch (FileNotFoundException fnfe) {
return null;
}
} | 3.26 |
hadoop_CommitTaskStage_getTaskManifest_rdh | /**
* Get the manifest.
*
* @return The manifest.
*/
public TaskManifest getTaskManifest() {
return taskManifest;
} | 3.26 |
hadoop_CommitTaskStage_getPath_rdh | /**
* Get the manifest path.
*
* @return The path the manifest was saved to.
*/
public Path getPath() {
return path;
} | 3.26 |
hadoop_CommitTaskStage_executeStage_rdh | /**
* Scan the task attempt dir then save the manifest.
* A snapshot of the IOStats will be included in the manifest;
* this includes the scan time.
*
* @param arguments
* arguments to the function.
* @return the path the manifest was saved to, and the manifest.
* @throws IOException
* IO failure.
*/
@Overrideprotected CommitTaskStage.Result executeStage(final Void arguments) throws IOException {
LOG.info("{}: Committing task \"{}\"", getName(), getTaskAttemptId());
// execute the scan
final TaskAttemptScanDirectoryStage scanStage = new TaskAttemptScanDirectoryStage(getStageConfig());
TaskManifest manifest = scanStage.apply(arguments);
// add the scan as task commit. It's not quite, as it doesn't include
// the saving, but ...
scanStage.addExecutionDurationToStatistics(getIOStatistics(), OP_STAGE_TASK_COMMIT);
// save a snapshot of the IO Statistics
final IOStatisticsSnapshot manifestStats = snapshotIOStatistics();
manifestStats.aggregate(getIOStatistics());
manifest.setIOStatistics(manifestStats);
// Now save with rename
Path manifestPath = new SaveTaskManifestStage(getStageConfig()).apply(manifest);
return new CommitTaskStage.Result(manifestPath, manifest);
} | 3.26 |
hadoop_ValueAggregatorMapper_map_rdh | /**
* the map function. It iterates through the value aggregator descriptor
* list to generate aggregation id/value pairs and emit them.
*/
public void map(K1 key, V1 value, Context context) throws IOException, InterruptedException {
Iterator<?> iter = ValueAggregatorJobBase.aggregatorDescriptorList.iterator();
while (iter.hasNext()) {
ValueAggregatorDescriptor ad = ((ValueAggregatorDescriptor) (iter.next()));Iterator<Entry<Text, Text>> ens = ad.generateKeyValPairs(key, value).iterator();
while (ens.hasNext()) {
Entry<Text, Text> en = ens.next();
context.write(en.getKey(), en.getValue());
}
}
} | 3.26 |
hadoop_ExternalSPSFilePathCollector_processPath_rdh | /**
* Recursively scan the given path and add the file info to SPS service for
* processing.
*/
private long processPath(Long startID, String childPath) {
long pendingWorkCount = 0;// to be satisfied file counter
for (byte[] lastReturnedName = HdfsFileStatus.EMPTY_NAME; ;) {
final DirectoryListing children;
try {
children = dfs.getClient().listPaths(childPath, lastReturnedName, false);
} catch (IOException e) {
f0.warn(("Failed to list directory " + childPath) + ". Ignore the directory and continue.", e);
return pendingWorkCount;
}
if (children == null) {
if (f0.isDebugEnabled()) {f0.debug(("The scanning start dir/sub dir " + childPath) + " does not have childrens.");
}
return pendingWorkCount;
}
for (HdfsFileStatus child : children.getPartialListing()) {
if (child.isFile()) {
service.addFileToProcess(new ItemInfo(startID, child.getFileId()), false);
checkProcessingQueuesFree();
pendingWorkCount++;// increment to be satisfied file count
} else {
String
childFullPathName = child.getFullName(childPath);
if (child.isDirectory()) {
if (!childFullPathName.endsWith(Path.SEPARATOR)) {
childFullPathName = childFullPathName + Path.SEPARATOR;
}
pendingWorkCount += processPath(startID, childFullPathName);
}
}
}
if (children.hasMore()) {
lastReturnedName = children.getLastName();
}
else {
return pendingWorkCount;
}
}
} | 3.26 |
hadoop_ExternalSPSFilePathCollector_remainingCapacity_rdh | /**
* Returns queue remaining capacity.
*/
public int remainingCapacity() {
int size = service.processingQueueSize();
int remainingSize = 0;
if (size < maxQueueLimitToScan) {
remainingSize = maxQueueLimitToScan - size;
}
if (f0.isDebugEnabled()) {
f0.debug("SPS processing Q -> maximum capacity:{}, current size:{}," + " remaining size:{}", maxQueueLimitToScan, size, remainingSize);
}
return remainingSize;
} | 3.26 |
hadoop_RawErasureDecoder_m0_rdh | /**
* Should be called when release this coder. Good chance to release encoding
* or decoding buffers
*/
public void m0() {
// Nothing to do here.
} | 3.26 |
hadoop_RawErasureDecoder_allowChangeInputs_rdh | /**
* Allow change into input buffers or not while perform encoding/decoding.
*
* @return true if it's allowed to change inputs, false otherwise
*/
public boolean allowChangeInputs() {
return coderOptions.allowChangeInputs();
} | 3.26 |
hadoop_RawErasureDecoder_decode_rdh | /**
* Decode with inputs and erasedIndexes, generates outputs. More see above.
*
* Note, for both input and output ECChunks, no mixing of on-heap buffers and
* direct buffers are allowed.
*
* @param inputs
* input buffers to read data from
* @param erasedIndexes
* indexes of erased units in the inputs array
* @param outputs
* output buffers to put decoded data into according to
* erasedIndexes, ready for read after the call
* @throws IOException
* if the decoder is closed
*/
public synchronized void decode(ECChunk[] inputs, int[] erasedIndexes, ECChunk[] outputs) throws IOException {
ByteBuffer[] newInputs = CoderUtil.toBuffers(inputs);
ByteBuffer[] newOutputs = CoderUtil.toBuffers(outputs);
decode(newInputs, erasedIndexes, newOutputs);
} | 3.26 |
hadoop_ExternalSPSBeanMetrics_close_rdh | /**
* Unregister the JMX interfaces.
*/ public void close() {
if (externalSPSBeanName != null) {
MBeans.unregister(externalSPSBeanName);
externalSPSBeanName = null;
}
} | 3.26 |
hadoop_Sender_op_rdh | /**
* Initialize a operation.
*/private static void op(final DataOutput out, final Op op) throws IOException {out.writeShort(DataTransferProtocol.DATA_TRANSFER_VERSION);
op.write(out);
} | 3.26 |
hadoop_CreateDirsMojo_getTestsThreadCount_rdh | /**
* Get the real number of parallel threads.
*
* @return int number of threads
*/
public int getTestsThreadCount() {
int threadCount = 1;
if (testsThreadCount != null) {
String trimProp = testsThreadCount.trim();
if (trimProp.endsWith("C")) {
double multiplier = Double.parseDouble(trimProp.substring(0, trimProp.length() - 1));double calculated = multiplier * ((double) (Runtime.getRuntime().availableProcessors()));
threadCount = (calculated > 0.0) ? Math.max(((int) (calculated)), 1) : 0;
} else {
threadCount = Integer.parseInt(testsThreadCount);
}
}
return threadCount;
} | 3.26 |
hadoop_NativeAzureFileSystemHelper_logAllLiveStackTraces_rdh | /* Helper method that logs stack traces from all live threads. */
public static void logAllLiveStackTraces() {
for (Map.Entry<Thread, StackTraceElement[]> entry : Thread.getAllStackTraces().entrySet()) {
f0.debug("Thread " + entry.getKey().getName());
StackTraceElement[]
trace = entry.getValue();
for (int j = 0; j < trace.length; j++) {
f0.debug("\tat " + trace[j]);
}
}
} | 3.26 |
hadoop_NativeAzureFileSystemHelper_validateReadArgs_rdh | /**
* Validation code, based on
* {@code FSInputStream.validatePositionedReadArgs()}.
*
* @param buffer
* destination buffer
* @param offset
* offset within the buffer
* @param length
* length of bytes to read
* @throws EOFException
* if the position is negative
* @throws IndexOutOfBoundsException
* if there isn't space for the amount of
* data requested.
* @throws IllegalArgumentException
* other arguments are invalid.
*/
static void validateReadArgs(byte[] buffer, int offset, int length) throws EOFException {
Preconditions.checkArgument(length >= 0, "length is negative");
Preconditions.checkArgument(buffer != null, "Null buffer");
if ((buffer.length - offset) < length) {
throw new IndexOutOfBoundsException((((((FSExceptionMessages.TOO_MANY_BYTES_FOR_DEST_BUFFER + ": request length=") + length) + ", with offset =") + offset) + "; buffer capacity =") + (buffer.length - offset));}
} | 3.26 |
hadoop_EnumSetParam_toString_rdh | /**
* Convert an EnumSet to a string of comma separated values.
*/
public static <E extends Enum<E>> String toString(EnumSet<E> set) {
if ((set == null) || set.isEmpty()) {
return "";
} else {
final StringBuilder b = new StringBuilder();
final Iterator<E> i = set.iterator();
b.append(i.next());
while (i.hasNext()) {b.append(',').append(i.next());
}
return b.toString();
}
} | 3.26 |
hadoop_ImageVisitor_visitEnclosingElement_rdh | // Convenience methods to automatically convert value types to strings
void visitEnclosingElement(ImageElement element, ImageElement key, int value) throws IOException {
visitEnclosingElement(element, key, Integer.toString(value));
} | 3.26 |
hadoop_ImageVisitor_visit_rdh | // Convenience methods to automatically convert numeric value types to strings
void visit(ImageElement element, int value) throws IOException {
visit(element, Integer.toString(value));
} | 3.26 |
hadoop_ProtobufWrapperLegacy_isUnshadedProtobufMessage_rdh | /**
* Is a message an unshaded protobuf message?
*
* @param payload
* payload
* @return true if protobuf.jar is on the classpath and the payload is a Message
*/
public static boolean isUnshadedProtobufMessage(Object payload) {
if (PROTOBUF_KNOWN_NOT_FOUND.get()) {
// protobuf is known to be absent. fail fast without examining
// jars or generating exceptions.
return false;
}
// load the protobuf message class.
// if it does not load, then the payload is guaranteed not to be
// an unshaded protobuf message
// this relies on classloader caching for performance
try {
Class<?> protobufMessageClazz = Class.forName("com.google.protobuf.Message");
return protobufMessageClazz.isAssignableFrom(payload.getClass());
} catch (ClassNotFoundException e) {
PROTOBUF_KNOWN_NOT_FOUND.set(true);
return false;
}
} | 3.26 |
hadoop_BalanceProcedure_delayMillisBeforeRetry_rdh | /**
* The time in milliseconds the procedure should wait before retry.
*/
public long delayMillisBeforeRetry() {
return delayDuration;
} | 3.26 |
hadoop_BalanceProcedure_name_rdh | /**
* Get the procedure name.
*/
public String name() {
return name;
} | 3.26 |
hadoop_BalanceProcedure_isSchedulerShutdown_rdh | /**
* The active flag.
*/
protected boolean isSchedulerShutdown()
{return job.isSchedulerShutdown();
} | 3.26 |
hadoop_BalanceProcedure_nextProcedure_rdh | /**
* Get the next procedure.
*/
public String nextProcedure() {
return nextProcedure;
} | 3.26 |
hadoop_AbstractAuditSpanImpl_close_rdh | /**
* Invoke {@link AuditSpan#deactivate()}.
* This is final: subclasses MUST override the
* {@code deactivate()} method.
*/
@Override
public final void close() {
deactivate();
} | 3.26 |
hadoop_ProcessIdFileReader_getProcessId_rdh | /**
* Get the process id from specified file path.
* Parses each line to find a valid number
* and returns the first one found.
*
* @return Process Id if obtained from path specified else null
* @throws IOException
*/
public static String getProcessId(Path path) throws IOException {
if (path == null) {
throw new IOException("Trying to access process id from a null path");
}
LOG.debug("Accessing pid from pid file {}", path); String processId = null;
BufferedReader bufReader = null; try {
File file = new File(path.toString());
if (file.exists()) {
FileInputStream fis = new FileInputStream(file);
bufReader = new BufferedReader(new InputStreamReader(fis, "UTF-8"));
while (true) {
String line = bufReader.readLine();
if (line == null) {
break;
}
String temp = line.trim();
if (!temp.isEmpty()) {
if (Shell.WINDOWS) {
// On Windows, pid is expected to be a container ID, so find first
// line that parses successfully as a container ID.
try
{
ContainerId.fromString(temp);
processId = temp;
break;
} catch (Exception e) {
// do nothing
}
} else {
// Otherwise, find first line containing a numeric pid.
try {
long pid = Long.parseLong(temp);
if (pid > 0) {processId = temp;
break;
}
} catch (Exception e) {
// do nothing
}
}
}
}
}
} finally {
if (bufReader != null) {
bufReader.close();
}
}LOG.debug("Got pid {} from path {}", processId != null ? processId : "null", path);
return processId;
} | 3.26 |
hadoop_JobContextImpl_getJobConf_rdh | /**
* Get the job Configuration
*
* @return JobConf
*/
public JobConf getJobConf() {
return job;
} | 3.26 |
hadoop_JobContextImpl_getProgressible_rdh | /**
* Get the progress mechanism for reporting progress.
*
* @return progress mechanism
*/
public Progressable getProgressible() {return progress;
} | 3.26 |
hadoop_AWSRequestAnalyzer_writing_rdh | /**
* A write request of some form.
*
* @param verb
* verb
* @param key
* object/prefix, etc.
* @param size
* nullable size
* @return request info
*/
private RequestInfo writing(final String verb, final String key, final Number size) {
return request(verb, true, key, size);
} | 3.26 |
hadoop_AWSRequestAnalyzer_request_rdh | /**
* A request.
*
* @param verb
* verb
* @param mutating
* does this update the store
* @param key
* object/prefix, etc.
* @param size
* nullable size
* @return request info
*/
private RequestInfo request(final String verb, final boolean mutating, final String key, final Number size) {
return new RequestInfo(verb, mutating, key, size);
} | 3.26 |
hadoop_AWSRequestAnalyzer_m0_rdh | /**
* A read request.
*
* @param verb
* verb
* @param key
* object/prefix, etc.
* @param size
* nullable size
* @return request info
*/
private RequestInfo m0(final String verb, final String key, final Number size) {return request(verb, false, key, size);
} | 3.26 |
hadoop_AWSRequestAnalyzer_isRequestMultipartIO_rdh | /**
* Predicate which returns true if the request is part of the
* multipart upload API -and which therefore must be rejected
* if multipart upload is disabled.
*
* @param request
* request
* @return true if the transfer manager creates them.
*/
public static boolean isRequestMultipartIO(final Object request) {
return (((request instanceof UploadPartCopyRequest) || (request instanceof CompleteMultipartUploadRequest)) || (request instanceof CreateMultipartUploadRequest)) || (request instanceof UploadPartRequest);
} | 3.26 |
hadoop_AWSRequestAnalyzer_isRequestNotAlwaysInSpan_rdh | /**
* Predicate which returns true if the request is of a kind which
* could be outside a span because of how the AWS SDK generates them.
*
* @param request
* request
* @return true if the transfer manager creates them.
*/
public static boolean isRequestNotAlwaysInSpan(final Object request) {return ((request instanceof UploadPartCopyRequest) || (request instanceof CompleteMultipartUploadRequest)) || (request instanceof GetBucketLocationRequest);
} | 3.26 |
hadoop_AWSRequestAnalyzer_analyze_rdh | /**
* Given an AWS request, try to analyze it to operation,
* read/write and path.
*
* @param request
* request.
* @return information about the request.
*/
public RequestInfo analyze(SdkRequest request) {
// this is where Scala's case statement would massively
// simplify life.
// Please Keep in Alphabetical Order.
if (request instanceof AbortMultipartUploadRequest) {
return writing(MULTIPART_UPLOAD_ABORTED, ((AbortMultipartUploadRequest) (request)).key(), 0);
} else if (request instanceof CompleteMultipartUploadRequest) {
CompleteMultipartUploadRequest r = ((CompleteMultipartUploadRequest) (request));
return writing(MULTIPART_UPLOAD_COMPLETED, r.key(), r.multipartUpload().parts().size());
} else if (request instanceof CreateMultipartUploadRequest) {
return writing(MULTIPART_UPLOAD_STARTED, ((CreateMultipartUploadRequest) (request)).key(), 0);
} else if (request instanceof DeleteObjectRequest) {
// DeleteObject: single object
return writing(OBJECT_DELETE_REQUEST, ((DeleteObjectRequest) (request)).key(), 1);
} else if (request instanceof DeleteObjectsRequest) {
// DeleteObjects: bulk delete
// use first key as the path
DeleteObjectsRequest r = ((DeleteObjectsRequest) (request));
List<ObjectIdentifier> objectIdentifiers = r.delete().objects();
return writing(OBJECT_BULK_DELETE_REQUEST, objectIdentifiers.isEmpty() ? null : objectIdentifiers.get(0).key(), objectIdentifiers.size());
} else
if (request instanceof GetBucketLocationRequest) {
GetBucketLocationRequest r = ((GetBucketLocationRequest) (request));
return m0(STORE_EXISTS_PROBE, r.bucket(), 0);
} else if (request instanceof GetObjectRequest) {
GetObjectRequest r = ((GetObjectRequest) (request));
return m0(ACTION_HTTP_GET_REQUEST, r.key(), sizeFromRangeHeader(r.range()));
} else if (request instanceof HeadObjectRequest) {
return m0(ACTION_HTTP_HEAD_REQUEST, ((HeadObjectRequest) (request)).key(), 0);
} else if (request instanceof ListMultipartUploadsRequest) {ListMultipartUploadsRequest r = ((ListMultipartUploadsRequest) (request));
return m0(MULTIPART_UPLOAD_LIST, r.prefix(), r.maxUploads());
} else if (request instanceof ListObjectsRequest) {
ListObjectsRequest r = ((ListObjectsRequest) (request));
return m0(OBJECT_LIST_REQUEST, r.prefix(), r.maxKeys());
} else if (request instanceof ListObjectsV2Request) {
ListObjectsV2Request r = ((ListObjectsV2Request) (request));
return m0(OBJECT_LIST_REQUEST, r.prefix(), r.maxKeys());
} else if (request instanceof PutObjectRequest) {
PutObjectRequest r = ((PutObjectRequest) (request));
return writing(OBJECT_PUT_REQUEST, r.key(), 0);
} else if (request instanceof SelectObjectContentRequest) {
SelectObjectContentRequest r = ((SelectObjectContentRequest) (request));
return m0(OBJECT_SELECT_REQUESTS, r.key(), 1);
} else if (request instanceof UploadPartRequest) {
UploadPartRequest r = ((UploadPartRequest) (request));
return writing(MULTIPART_UPLOAD_PART_PUT, r.key(),
r.contentLength());
}
// no explicit support, return classname
return writing(request.getClass().getName(), null, 0);
} | 3.26 |
hadoop_AbfsClientContextBuilder_build_rdh | /**
* Build the context and get the instance with the properties selected.
*
* @return an instance of AbfsClientContext.
*/
public AbfsClientContext build() {
// validate the values
return new AbfsClientContext(exponentialRetryPolicy, abfsPerfTracker, abfsCounters);
} | 3.26 |
hadoop_AbfsClientThrottlingIntercept_updateMetrics_rdh | /**
* Updates the metrics for successful and failed read and write operations.
*
* @param operationType
* Only applicable for read and write operations.
* @param abfsHttpOperation
* Used for status code and data transferred.
*/
@Override
public void updateMetrics(AbfsRestOperationType operationType, AbfsHttpOperation abfsHttpOperation) {
if (abfsHttpOperation == null) {
return;
}
int status = abfsHttpOperation.getStatusCode();
long contentLength = 0;
// If the socket is terminated prior to receiving a response, the HTTP
// status may be 0 or -1. A status less than 200 or greater than or equal
// to 500 is considered an error.
boolean isFailedOperation = (status < HttpURLConnection.HTTP_OK) || (status >= HttpURLConnection.HTTP_INTERNAL_ERROR);
// If status code is 503, it is considered as a throttled operation.
boolean isThrottledOperation = status == HTTP_UNAVAILABLE;
switch
(operationType) {
case Append :
contentLength = abfsHttpOperation.getBytesSent();
if (contentLength == 0) {
/* Signifies the case where we could not update the bytesSent due to
throttling but there were some expectedBytesToBeSent.
*/
if (updateBytesTransferred(isThrottledOperation, abfsHttpOperation)) {
LOG.debug("Updating metrics due to throttling for path {}", abfsHttpOperation.getConnUrl().getPath());
contentLength = abfsHttpOperation.getExpectedBytesToBeSent();
}
}
if (contentLength > 0) {
writeThrottler.addBytesTransferred(contentLength, isFailedOperation);
}
break;
case ReadFile :
String range = abfsHttpOperation.getConnection().getRequestProperty(HttpHeaderConfigurations.RANGE);
contentLength = getContentLengthIfKnown(range);
if (contentLength > 0) {
readThrottler.addBytesTransferred(contentLength, isFailedOperation);
}
break;
default :
break;}
} | 3.26 |
hadoop_AbfsClientThrottlingIntercept_setAnalyzer_rdh | /**
* Sets the analyzer for the intercept.
*
* @param name
* Name of the analyzer.
* @param abfsConfiguration
* The configuration.
* @return AbfsClientThrottlingAnalyzer instance.
*/
private AbfsClientThrottlingAnalyzer
setAnalyzer(String name,
AbfsConfiguration abfsConfiguration) {
return new AbfsClientThrottlingAnalyzer(name, abfsConfiguration);
} | 3.26 |
hadoop_AbfsClientThrottlingIntercept_initializeSingleton_rdh | /**
* Creates a singleton object of the AbfsClientThrottlingIntercept.
* which is shared across all filesystem instances.
*
* @param abfsConfiguration
* configuration set.
* @return singleton object of intercept.
*/
static AbfsClientThrottlingIntercept initializeSingleton(AbfsConfiguration abfsConfiguration) {
if (singleton == null) {
LOCK.lock();
try {
if (singleton == null) {
singleton = new AbfsClientThrottlingIntercept(abfsConfiguration);
LOG.debug("Client-side throttling is enabled for the ABFS file system.");
}
} finally {
LOCK.unlock();
}
}
return singleton;
} | 3.26 |
hadoop_AbfsClientThrottlingIntercept_updateBytesTransferred_rdh | /**
* Updates the metrics for the case when response code signifies throttling
* but there are some expected bytes to be sent.
*
* @param isThrottledOperation
* returns true if status code is HTTP_UNAVAILABLE
* @param abfsHttpOperation
* Used for status code and data transferred.
* @return true if the operation is throttled and has some bytes to transfer.
*/
private boolean updateBytesTransferred(boolean isThrottledOperation, AbfsHttpOperation abfsHttpOperation) {
return isThrottledOperation && (abfsHttpOperation.getExpectedBytesToBeSent() > 0);
} | 3.26 |
hadoop_AbfsClientThrottlingIntercept_getWriteThrottler_rdh | /**
* Returns the analyzer for write operations.
*
* @return AbfsClientThrottlingAnalyzer for write.
*/
AbfsClientThrottlingAnalyzer getWriteThrottler() {
return writeThrottler;} | 3.26 |
hadoop_AbfsClientThrottlingIntercept_getReadThrottler_rdh | /**
* Returns the analyzer for read operations.
*
* @return AbfsClientThrottlingAnalyzer for read.
*/
AbfsClientThrottlingAnalyzer getReadThrottler() {
return readThrottler;
} | 3.26 |
hadoop_AbfsClientThrottlingIntercept_sendingRequest_rdh | /**
* Called before the request is sent. Client-side throttling
* uses this to suspend the request, if necessary, to minimize errors and
* maximize throughput.
*/
@Override
public void sendingRequest(AbfsRestOperationType operationType, AbfsCounters abfsCounters) {
switch (operationType) {
case ReadFile :
if (readThrottler.suspendIfNecessary() && (abfsCounters != null)) {
abfsCounters.incrementCounter(AbfsStatistic.READ_THROTTLES, 1);
}
break;
case Append :
if (writeThrottler.suspendIfNecessary() && (abfsCounters != null)) {
abfsCounters.incrementCounter(AbfsStatistic.WRITE_THROTTLES,
1);
}
break;
default :
break;
}
} | 3.26 |
hadoop_RollingWindow_isStaleNow_rdh | /**
* Check whether the last time that the bucket was updated is no longer
* covered by rolling window.
*
* @param time
* the current time
* @return true if the bucket state is stale
*/
boolean isStaleNow(long time) {
long utime = updateTime.get();
return (utime == (-1)) || ((time - utime) >= windowLenMs);
} | 3.26 |
hadoop_RollingWindow_inc_rdh | /**
* Increment the bucket. It assumes that staleness check is already
* performed. We do not need to update the {@link #updateTime} because as
* long as the {@link #updateTime} belongs to the current view of the
* rolling window, the algorithm works fine.
*
* @param delta
*/
void inc(long delta) {
value.addAndGet(delta);
} | 3.26 |
hadoop_RollingWindow_incAt_rdh | /**
* When an event occurs at the specified time, this method reflects that in
* the rolling window.
* <p>
*
* @param time
* the time at which the event occurred
* @param delta
* the delta that will be added to the window
*/
public void incAt(long time, long delta) {
int bi = computeBucketIndex(time);
Bucket bucket = buckets[bi];
// If the last time the bucket was updated is out of the scope of the
// rolling window, reset the bucket.
if (bucket.isStaleNow(time)) {
bucket.safeReset(time);}
bucket.inc(delta);
} | 3.26 |
hadoop_RollingWindow_safeReset_rdh | /**
* Safely reset the bucket state considering concurrent updates (inc) and
* resets.
*
* @param time
* the current time
*/
void safeReset(long time) {
// At any point in time, only one thread is allowed to reset the
// bucket
synchronized(this) {
if (isStaleNow(time)) {
// reset the value before setting the time, it allows other
// threads to safely assume that the value is updated if the
// time is not stale
value.set(0);
updateTime.set(time);
}
// else a concurrent thread has already reset it: do nothing
}
} | 3.26 |
hadoop_RollingWindow_getSum_rdh | /**
* Get value represented by this window at the specified time
* <p>
*
* If time lags behind the latest update time, the new updates are still
* included in the sum
*
* @param time
* @return number of events occurred in the past period
*/
public long getSum(long time) {
long sum = 0;
for (Bucket bucket : buckets) {
boolean stale = bucket.isStaleNow(time);
if (!stale) {
sum += bucket.value.get();
}
if (LOG.isDebugEnabled()) {
long bucketTime = bucket.updateTime.get();
String timeStr = new Date(bucketTime).toString();
LOG.debug((((((((("Sum: + " + sum) + " Bucket: updateTime: ") + timeStr) + " (")
+ bucketTime) + ") isStale ") + stale) + " at ") + time);
}
}
return sum;} | 3.26 |
hadoop_RouterView_preHead_rdh | /**
* View for the Router Web UI.
*/public class RouterView extends TwoColumnLayout {@Override
protected void preHead(Page.HTML<__> html) {
commonPreHead(html);
setTitle("Router");
} | 3.26 |
hadoop_StateStoreMetrics_setLocationCache_rdh | /**
* set the count of the location cache access information.
*
* @param name
* Name of the record.
* @param count
* count of the record.
*/
public void setLocationCache(String name, long count) {
MutableGaugeLong counter = ((MutableGaugeLong) (registry.get(name)));
if (counter == null) {
counter = registry.newGauge(name, name, count);
}
counter.set(count);
} | 3.26 |
hadoop_StateStoreMetrics_setCacheLoading_rdh | /**
* Set the cache loading metrics for the state store interface.
*
* @param name
* Name of the record of the cache.
* @param value
* The time duration interval as the cache value.
*/
public void setCacheLoading(String
name, long value) {
String cacheLoad = ("Cache" + name) + "Load";
MutableRate cacheLoadMetric = cacheLoadMetrics.get(cacheLoad);
if (cacheLoadMetric == null) {
cacheLoadMetric = registry.newRate(cacheLoad, name, false);
cacheLoadMetrics.put(cacheLoad, cacheLoadMetric);
}
cacheLoadMetrics.get(cacheLoad).add(value);
} | 3.26 |
hadoop_StateStoreMetrics_setCacheSize_rdh | /**
* Set the size of the cache for a State Store interface.
*
* @param name
* Name of the record to cache.
* @param size
* Number of records.
*/
public void setCacheSize(String name, int size) {
String counterName = ("Cache" + name) + "Size";
MutableGaugeInt v2 = cacheSizes.get(counterName);
if (v2 == null) {
v2 = registry.newGauge(counterName, name, size);
cacheSizes.put(counterName, v2);}
v2.set(size);
} | 3.26 |
hadoop_StateStoreMetrics_getCacheLoadMetrics_rdh | /**
* Retrieve unmodifiable map of cache loading metrics.
*
* @return unmodifiable map of cache loading metrics.
*/
@VisibleForTesting
public Map<String, MutableRate> getCacheLoadMetrics() {
return Collections.unmodifiableMap(cacheLoadMetrics);
} | 3.26 |
hadoop_EntityColumn_getColumnQualifier_rdh | /**
*
* @return the column name value
*/
private String getColumnQualifier() {
return columnQualifier;
} | 3.26 |
hadoop_FsServerDefaults_write_rdh | // /////////////////////////////////////////
// Writable
// /////////////////////////////////////////
@Override
@InterfaceAudience.Private
public void write(DataOutput out) throws IOException {
out.writeLong(blockSize);
out.writeInt(bytesPerChecksum);
out.writeInt(writePacketSize);
out.writeShort(replication);
out.writeInt(fileBufferSize);
WritableUtils.writeEnum(out, checksumType);
out.writeByte(f0);
} | 3.26 |
hadoop_SCMStore_cleanResourceReferences_rdh | /**
* Clean all resource references to a cache resource that contain application
* ids pointing to finished applications. If the resource key does not exist,
* do nothing.
*
* @param key
* a unique identifier for a resource
* @throws YarnException
*/
@Private
public void cleanResourceReferences(String key) throws YarnException {
Collection<SharedCacheResourceReference> refs = getResourceReferences(key); if (!refs.isEmpty()) {
Set<SharedCacheResourceReference> refsToRemove = new HashSet<SharedCacheResourceReference>();
for (SharedCacheResourceReference r : refs) {
if (!appChecker.isApplicationActive(r.getAppId())) {
// application in resource reference is dead, it is safe to remove the
// reference
refsToRemove.add(r);
}
}
if (refsToRemove.size() > 0) {
removeResourceReferences(key, refsToRemove, false);}
}
} | 3.26 |
hadoop_SCMStore_createAppCheckerService_rdh | /**
* Create an instance of the AppChecker service via reflection based on the
* {@link YarnConfiguration#SCM_APP_CHECKER_CLASS} parameter.
*
* @param conf
* @return an instance of the AppChecker class
*/
@Private
@SuppressWarnings("unchecked")
public static AppChecker createAppCheckerService(Configuration conf) {
Class<? extends AppChecker> defaultCheckerClass;
try {
defaultCheckerClass = ((Class<? extends AppChecker>) (Class.forName(YarnConfiguration.DEFAULT_SCM_APP_CHECKER_CLASS)));
} catch (Exception e) {
throw new YarnRuntimeException("Invalid default scm app checker class" + YarnConfiguration.DEFAULT_SCM_APP_CHECKER_CLASS, e);
}
AppChecker checker = ReflectionUtils.newInstance(conf.getClass(YarnConfiguration.SCM_APP_CHECKER_CLASS, defaultCheckerClass, AppChecker.class), conf);
return checker;
} | 3.26 |
hadoop_StageAllocatorLowCostAligned_numCanFit_rdh | // numCanFit() - returns the maximal number of requestedResources can be
// allocated during the durationInterval without violating
// capacity constraints
public int numCanFit() {
return gangsCanFit;
} | 3.26 |
hadoop_StageAllocatorLowCostAligned_canAllocate_rdh | // canAllocate() - boolean function, returns whether requestedResources
// can be allocated during the durationInterval without
// violating capacity constraints
public boolean canAllocate() {
return gangsCanFit > 0;
} | 3.26 |
hadoop_LocalCacheDirectoryManager_incrementFileCountForPath_rdh | /**
* Increment the file count for a relative directory within the cache
*
* @param relPath
* the relative path
*/
public synchronized void incrementFileCountForPath(String relPath) {
relPath = (relPath == null) ? "" : relPath.trim();
Directory subDir = knownDirectories.get(relPath);
if (subDir == null) {
int dirnum = Directory.getDirectoryNumber(relPath);
totalSubDirectories = Math.max(dirnum, totalSubDirectories);
subDir = new Directory(dirnum);
nonFullDirectories.add(subDir);
knownDirectories.put(subDir.getRelativePath(), subDir);
}
if (subDir.incrementAndGetCount() >= perDirectoryFileLimit) {
nonFullDirectories.remove(subDir);
}
} | 3.26 |
hadoop_LocalCacheDirectoryManager_getCacheDirectoryRoot_rdh | /**
* Given a path to a directory within a local cache tree return the
* root of the cache directory.
*
* @param path
* the directory within a cache directory
* @return the local cache directory root or null if not found
*/public static Path getCacheDirectoryRoot(Path path) {
while (path != null) {
String name = path.getName();
if (name.length() != 1) {
return path;
}
int dirnum = DIRECTORIES_PER_LEVEL;
try {
dirnum = Integer.parseInt(name, DIRECTORIES_PER_LEVEL);
} catch (NumberFormatException e) {
}
if (dirnum >= DIRECTORIES_PER_LEVEL) {
return
path;
}path = path.getParent();
}
return path;
} | 3.26 |
hadoop_LocalCacheDirectoryManager_decrementFileCountForPath_rdh | /**
* This method will reduce the file count for the directory represented by
* path. The root directory of this Local cache directory manager is
* represented by an empty string.
*/
public synchronized void decrementFileCountForPath(String relPath) {
relPath = (relPath == null) ? "" : relPath.trim();
Directory subDir = knownDirectories.get(relPath);
int oldCount = subDir.getCount();
if ((subDir.decrementAndGetCount() < perDirectoryFileLimit) && (oldCount >= perDirectoryFileLimit)) {
nonFullDirectories.add(subDir);
}
} | 3.26 |
hadoop_LocalCacheDirectoryManager_getRelativePathForLocalization_rdh | /**
* This method will return relative path from the first available vacant
* directory.
*
* @return {@link String} relative path for localization
*/
public synchronized String getRelativePathForLocalization() {
if (nonFullDirectories.isEmpty()) {
totalSubDirectories++;
Directory newDir = new Directory(totalSubDirectories);
nonFullDirectories.add(newDir);
knownDirectories.put(newDir.getRelativePath(), newDir);
}
Directory subDir = nonFullDirectories.peek();
if (subDir.incrementAndGetCount() >= perDirectoryFileLimit) {nonFullDirectories.remove();
}return subDir.getRelativePath();
} | 3.26 |
hadoop_SlowPeerTracker_isSlowPeerTrackerEnabled_rdh | /**
* If SlowPeerTracker is enabled, return true, else returns false.
*
* @return true if slow peer tracking is enabled, else false.
*/
public boolean isSlowPeerTrackerEnabled() {
return true;
} | 3.26 |
hadoop_SlowPeerTracker_getSlowNodes_rdh | /**
* Returns all tracking slow peers.
*
* @param numNodes
* @return */
public List<String> getSlowNodes(int numNodes) {
Collection<SlowPeerJsonReport> jsonReports = getJsonReports(numNodes);
ArrayList<String> slowNodes = new ArrayList<>();
for (SlowPeerJsonReport jsonReport
: jsonReports)
{
slowNodes.add(jsonReport.getSlowNode());
}
if (!slowNodes.isEmpty()) {
f0.warn("Slow nodes list: " + slowNodes);
}
return slowNodes;
} | 3.26 |
hadoop_SlowPeerTracker_addReport_rdh | /**
* Add a new report. DatanodeIds can be the DataNodeIds or addresses
* We don't care as long as the caller is consistent.
*
* @param slowNode
* DataNodeId of the peer suspected to be slow.
* @param reportingNode
* DataNodeId of the node reporting on its peer.
* @param slowNodeMetrics
* Aggregate latency metrics of slownode as reported by the
* reporting node.
*/
public void addReport(String slowNode, String reportingNode, OutlierMetrics slowNodeMetrics) {ConcurrentMap<String, LatencyWithLastReportTime> nodeEntries = allReports.get(slowNode);
if (nodeEntries == null) {
// putIfAbsent guards against multiple writers.
allReports.putIfAbsent(slowNode, new ConcurrentHashMap<>());nodeEntries = allReports.get(slowNode);
}
// Replace the existing entry from this node, if any.
nodeEntries.put(reportingNode, new LatencyWithLastReportTime(timer.monotonicNow(), slowNodeMetrics));
} | 3.26 |
hadoop_SlowPeerTracker_getReportsForNode_rdh | /**
* Retrieve the non-expired reports that mark a given DataNode
* as slow. Stale reports are excluded.
*
* @param slowNode
* target node Id.
* @return set of reports which implicate the target node as being slow.
*/
public Set<SlowPeerLatencyWithReportingNode> getReportsForNode(String slowNode) {
final ConcurrentMap<String, LatencyWithLastReportTime> nodeEntries = allReports.get(slowNode);
if ((nodeEntries == null) || nodeEntries.isEmpty()) {
return Collections.emptySet();
}
return filterNodeReports(nodeEntries, timer.monotonicNow());
} | 3.26 |
hadoop_SlowPeerTracker_getJsonReports_rdh | /**
* Retrieve reports in a structure for generating JSON, limiting the
* output to the top numNodes nodes i.e nodes with the most reports.
*
* @param numNodes
* number of nodes to return. This is to limit the
* size of the generated JSON.
*/
private Collection<SlowPeerJsonReport> getJsonReports(int numNodes) {
if (allReports.isEmpty()) {
return Collections.emptyList();
}
final PriorityQueue<SlowPeerJsonReport> topNReports = new PriorityQueue<>(allReports.size(), (o1, o2) -> Ints.compare(o1.getSlowPeerLatencyWithReportingNodes().size(), o2.getSlowPeerLatencyWithReportingNodes().size()));
final long now = timer.monotonicNow();
for (Map.Entry<String, ConcurrentMap<String,
LatencyWithLastReportTime>> entry : allReports.entrySet()) {
SortedSet<SlowPeerLatencyWithReportingNode> validReports = filterNodeReports(entry.getValue(), now);
if (!validReports.isEmpty()) {
if (topNReports.size() < numNodes) {
topNReports.add(new SlowPeerJsonReport(entry.getKey(), validReports));
} else if ((topNReports.peek() != null) && (topNReports.peek().getSlowPeerLatencyWithReportingNodes().size() < validReports.size())) {
// Remove the lowest element
topNReports.poll();
topNReports.add(new SlowPeerJsonReport(entry.getKey(), validReports));
}
}
}
return topNReports;
} | 3.26 |
hadoop_SlowPeerTracker_filterNodeReports_rdh | /**
* Filter the given reports to return just the valid ones.
*
* @param reports
* Current set of reports.
* @param now
* Current time.
* @return Set of valid reports that were created within last reportValidityMs millis.
*/
private SortedSet<SlowPeerLatencyWithReportingNode> filterNodeReports(ConcurrentMap<String, LatencyWithLastReportTime> reports, long now) {
final SortedSet<SlowPeerLatencyWithReportingNode> validReports = new TreeSet<>();
for (Map.Entry<String, LatencyWithLastReportTime> entry : reports.entrySet()) {
if ((now - entry.getValue().getTime()) < reportValidityMs) {
OutlierMetrics outlierMetrics
= entry.getValue().getLatency();
validReports.add(new SlowPeerLatencyWithReportingNode(entry.getKey(), outlierMetrics.getActualLatency(), outlierMetrics.getMedian(), outlierMetrics.getMad(), outlierMetrics.getUpperLimitLatency()));
}
}
return validReports;
} | 3.26 |
hadoop_SlowPeerTracker_getReportsForAllDataNodes_rdh | /**
* Retrieve all reports for all nodes. Stale reports are excluded.
*
* @return map from SlowNodeId {@literal ->} (set of nodes reporting peers).
*/
public Map<String, SortedSet<SlowPeerLatencyWithReportingNode>> getReportsForAllDataNodes() {
if (allReports.isEmpty()) {
return ImmutableMap.of();
}
final Map<String, SortedSet<SlowPeerLatencyWithReportingNode>> v2 = new HashMap<>();
final long now = timer.monotonicNow();
for (Map.Entry<String, ConcurrentMap<String, LatencyWithLastReportTime>> entry : allReports.entrySet()) {
SortedSet<SlowPeerLatencyWithReportingNode> validReports = filterNodeReports(entry.getValue(), now);if (!validReports.isEmpty()) {
v2.put(entry.getKey(), validReports);
}
}
return
v2;
} | 3.26 |
hadoop_InternalOperations_rename_rdh | // rename w/ OVERWRITE
@SuppressWarnings("deprecation")
public void rename(FileSystem fs, final Path src, final Path dst, final Options... options) throws IOException {
fs.rename(src, dst, options);} | 3.26 |
hadoop_ShortWritable_hashCode_rdh | /**
* hash code
*/
@Override
public int hashCode() {
return value;
} | 3.26 |
hadoop_ShortWritable_set_rdh | /**
* Set the value of this ShortWritable.
*
* @param value
* input value.
*/
public void set(short value) {
this.value =
value;
} | 3.26 |
hadoop_ShortWritable_toString_rdh | /**
* Short values in string format
*/
@Override
public String toString() {
return Short.toString(value);
} | 3.26 |
hadoop_ShortWritable_write_rdh | /**
* write short value
*/
@Override
public void write(DataOutput out) throws IOException {
out.writeShort(value);
} | 3.26 |
hadoop_ShortWritable_m0_rdh | /**
* Compares two ShortWritable.
*/
@Overridepublic int m0(ShortWritable o) {
short thisValue = this.value;
short thatValue = o.value;
return thisValue < thatValue ? -1 : thisValue == thatValue ? 0 : 1;
} | 3.26 |
hadoop_ShortWritable_readFields_rdh | /**
* read the short value
*/
@Override
public void readFields(DataInput in) throws IOException {
value = in.readShort();
} | 3.26 |
hadoop_ShortWritable_get_rdh | /**
*
* @return Return the value of this ShortWritable.
*/
public short get() {
return value;
} | 3.26 |
hadoop_LocalTempDir_tempFile_rdh | /**
* Create a temp file.
*
* @param conf
* configuration to use when creating the allocator
* @param prefix
* filename prefix
* @param size
* file size, or -1 if not known
* @return the temp file. The file has been created.
* @throws IOException
* IO failure
*/
public static File tempFile(Configuration conf, String prefix, long size) throws IOException {
return getAllocator(conf, BUFFER_DIR).createTmpFileForWrite(prefix, size, conf);
} | 3.26 |
hadoop_LocalTempDir_tempPath_rdh | /**
* Get a temporary path.
*
* @param conf
* configuration to use when creating the allocator
* @param prefix
* filename prefix
* @param size
* file size, or -1 if not known
* @return the temp path.
* @throws IOException
* IO failure
*/
public static Path tempPath(Configuration conf, String prefix, long size) throws IOException {
return getAllocator(conf, BUFFER_DIR).getLocalPathForWrite(prefix, size, conf);
} | 3.26 |
hadoop_HttpFSServerWebApp_get_rdh | /**
* Returns HttpFSServer server singleton, configuration and services are
* accessible through it.
*
* @return the HttpFSServer server singleton.
*/
public static HttpFSServerWebApp get() {
return SERVER;
} | 3.26 |
hadoop_HttpFSServerWebApp_getAdminGroup_rdh | /**
* Returns HttpFSServer admin group.
*
* @return httpfs admin group.
*/
public String getAdminGroup() {
return adminGroup;
} | 3.26 |
hadoop_HttpFSServerWebApp_init_rdh | /**
* Initializes the HttpFSServer server, loads configuration and required
* services.
*
* @throws ServerException
* thrown if HttpFSServer server could not be
* initialized.
*/
@Override
public void init() throws ServerException {
if (SERVER != null) {
throw new RuntimeException("HttpFSServer server already initialized");
}
SERVER = this;
super.init();
adminGroup = getConfig().get(getPrefixedName(CONF_ADMIN_GROUP), "admin");
LOG.info("Connects to Namenode [{}]", get().get(FileSystemAccess.class).getFileSystemConfiguration().getTrimmed(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY));
setMetrics(getConfig());
} | 3.26 |
hadoop_HttpFSServerWebApp_getMetrics_rdh | /**
* gets the HttpFSServerMetrics instance.
*
* @return the HttpFSServerMetrics singleton.
*/
public static HttpFSServerMetrics getMetrics() {
return metrics;
} | 3.26 |
hadoop_HttpFSServerWebApp_destroy_rdh | /**
* Shutdowns all running services.
*/
@Override
public void destroy() {
SERVER = null;
if (metrics != null) {
metrics.shutdown();
}
super.destroy();
} | 3.26 |
hadoop_MultiSchemeDelegationTokenAuthenticationHandler_authenticate_rdh | /**
* This method is overridden to restrict HTTP authentication schemes
* available for delegation token management functionality. The
* authentication schemes to be used for delegation token management are
* configured using {@link DELEGATION_TOKEN_SCHEMES_PROPERTY}
*
* The basic logic here is to check if the current request is for delegation
* token management. If yes then check if the request contains an
* "Authorization" header. If it is missing, then return the HTTP 401
* response with WWW-Authenticate header for each scheme configured for
* delegation token management.
*
* It is also possible for a client to preemptively send Authorization header
* for a scheme not configured for delegation token management. We detect
* this case and return the HTTP 401 response with WWW-Authenticate header
* for each scheme configured for delegation token management.
*
* If a client has sent a request with "Authorization" header for a scheme
* configured for delegation token management, then it is forwarded to
* underlying {@link MultiSchemeAuthenticationHandler} for actual
* authentication.
*
* Finally all other requests (excluding delegation token management) are
* forwarded to underlying {@link MultiSchemeAuthenticationHandler} for
* actual authentication.
*/
@Override
public AuthenticationToken
authenticate(HttpServletRequest request, HttpServletResponse response) throws IOException, AuthenticationException {
String authorization = request.getHeader(HttpConstants.AUTHORIZATION_HEADER);
if (isManagementOperation(request)) {
boolean schemeConfigured = false;
if (authorization != null) {
for (String scheme : delegationAuthSchemes) {
if (AuthenticationHandlerUtil.matchAuthScheme(scheme,
authorization)) {
schemeConfigured = true;
break;
}
}
}
if (!schemeConfigured) {
response.setStatus(HttpServletResponse.SC_UNAUTHORIZED);
for (String scheme : delegationAuthSchemes) {
response.addHeader(WWW_AUTHENTICATE, scheme);
}
return null;
}
}
return super.authenticate(request,
response);
} | 3.26 |
hadoop_GetSubClusterPolicyConfigurationRequest_newInstance_rdh | /**
* GetSubClusterPolicyConfigurationRequest is a request to the
* {@code FederationPolicyStore} to get the configuration of a policy for a
* given queue.
*/
@Private
@Unstablepublic abstract class GetSubClusterPolicyConfigurationRequest {
@Private
@Unstable
public static GetSubClusterPolicyConfigurationRequest newInstance(String queueName) {
GetSubClusterPolicyConfigurationRequest request = Records.newRecord(GetSubClusterPolicyConfigurationRequest.class);
request.m0(queueName);
return request;
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.