name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hadoop_ServletUtil_htmlFooter_rdh | /**
* HTML footer to be added in the jsps.
*
* @return the HTML footer.
*/
public static String htmlFooter() {
return HTML_TAIL;
} | 3.26 |
hadoop_CloseableReferenceCount_setClosed_rdh | /**
* Mark the status as closed.
*
* Once the status is closed, it cannot be reopened.
*
* @return The current reference count.
* @throws ClosedChannelException
* If someone else closes the object
* before we do.
*/
public int setClosed() throws ClosedChannelException {
while (true) {
int curBits = status.get();
if ((curBits & STATUS_CLOSED_MASK) != 0) {
throw new ClosedChannelException();
}
if (status.compareAndSet(curBits, curBits | STATUS_CLOSED_MASK)) {
return curBits & (~STATUS_CLOSED_MASK);
}
}
} | 3.26 |
hadoop_CloseableReferenceCount_getReferenceCount_rdh | /**
* Get the current reference count.
*
* @return The current reference count.
*/
public int getReferenceCount() {
return status.get() & (~STATUS_CLOSED_MASK);
} | 3.26 |
hadoop_CloseableReferenceCount_isOpen_rdh | /**
* Return true if the status is currently open.
*
* @return True if the status is currently open.
*/
public boolean isOpen() {
return (status.get() & STATUS_CLOSED_MASK) == 0;
} | 3.26 |
hadoop_CloseableReferenceCount_unreferenceCheckClosed_rdh | /**
* Decrement the reference count, checking to make sure that the
* CloseableReferenceCount is not closed.
*
* @throws AsynchronousCloseException
* If the status is closed.
*/
public void unreferenceCheckClosed() throws ClosedChannelException {
int newVal = status.decrementAndGet();
if ((newVal & STATUS_CLOSED_MASK) != 0) {
throw new AsynchronousCloseException();
}
} | 3.26 |
hadoop_CloseableReferenceCount_reference_rdh | /**
* Increment the reference count.
*
* @throws ClosedChannelException
* If the status is closed.
*/
public void reference() throws ClosedChannelException {
int curBits = status.incrementAndGet();
if ((curBits & STATUS_CLOSED_MASK) != 0) {
status.decrementAndGet();
throw new ClosedChannelException();
}
} | 3.26 |
hadoop_IngressPortBasedResolver_getServerProperties_rdh | /**
* Identify the Sasl Properties to be used for a connection with a client.
*
* @param clientAddress
* client's address
* @param ingressPort
* the port that the client is connecting
* @return the sasl properties to be used for the connection.
*/
@Override
@VisibleForTestingpublic Map<String, String>
getServerProperties(InetAddress clientAddress, int ingressPort) {
LOG.debug((("Resolving SASL properties for " + clientAddress) + " ") + ingressPort);
if (!f1.containsKey(ingressPort)) {LOG.warn(("An un-configured port is being requested " + ingressPort) + " using default");return getDefaultProperties();
}
return f1.get(ingressPort);
} | 3.26 |
hadoop_QueueCapacityUpdateContext_addUpdateWarning_rdh | /**
* Adds an update warning to the context.
*
* @param warning
* warning during update phase
*/
public void addUpdateWarning(QueueUpdateWarning warning) {
warnings.add(warning);
} | 3.26 |
hadoop_QueueCapacityUpdateContext_getUpdatedClusterResource_rdh | /**
* Returns the overall cluster resource available for the update phase of empty label.
*
* @return cluster resource
*/
public Resource getUpdatedClusterResource() {
return updatedClusterResource;} | 3.26 |
hadoop_QueueCapacityUpdateContext_getUpdateWarnings_rdh | /**
* Returns all update warnings occurred in this update phase.
*
* @return update warnings
*/
public List<QueueUpdateWarning> getUpdateWarnings() {
return warnings;
} | 3.26 |
hadoop_AggregatedLogsPage_preHead_rdh | /* (non-Javadoc)
@see org.apache.hadoop.yarn.server.nodemanager.webapp.NMView#preHead(org.apache.hadoop.yarn.webapp.hamlet.Hamlet.HTML)
*/
@Override
protected void preHead(Page.HTML<__> html) {
String logEntity = $(ENTITY_STRING);
if ((logEntity == null) || logEntity.isEmpty())
{
logEntity = $(CONTAINER_ID);
}
if ((logEntity == null) || logEntity.isEmpty()) {
logEntity =
"UNKNOWN";
}
set(TITLE, join("Logs for ", logEntity));
set(ACCORDION_ID,
"nav");
set(initID(ACCORDION, "nav"), "{autoHeight:false, active:0}");
} | 3.26 |
hadoop_ReverseZoneUtils_getSubnetCountForReverseZones_rdh | /**
* When splitting the reverse zone, return the number of subnets needed,
* given the range and netmask.
*
* @param conf
* the Hadoop configuration.
* @return The number of subnets given the range and netmask.
*/
protected static long getSubnetCountForReverseZones(Configuration conf) {
String subnet = conf.get(KEY_DNS_ZONE_SUBNET);
String mask = conf.get(KEY_DNS_ZONE_MASK);
String range = conf.get(KEY_DNS_SPLIT_REVERSE_ZONE_RANGE);
int parsedRange;
try {
parsedRange = Integer.parseInt(range);
} catch (NumberFormatException e) {LOG.error("The supplied range is not a valid integer: Supplied range: ", range);
throw e;
}
if (parsedRange < 0) {
String msg = String.format("Range cannot be negative: Supplied range: %d", parsedRange);
LOG.error(msg);
throw new IllegalArgumentException(msg);
}
long ipCount;
try {
SubnetUtils subnetUtils = new SubnetUtils(subnet, mask);
subnetUtils.setInclusiveHostCount(true);
ipCount = subnetUtils.getInfo().getAddressCountLong(); } catch (IllegalArgumentException e) {
LOG.error("The subnet or mask is invalid: Subnet: {} Mask: {}", subnet, mask);
throw e;
}
if (parsedRange == 0) {
return ipCount;
}
return ipCount
/ parsedRange;
} | 3.26 |
hadoop_ReverseZoneUtils_getReverseZoneNetworkAddress_rdh | /**
* Given a baseIp, range and index, return the network address for the
* reverse zone.
*
* @param baseIp
* base ip address to perform calculations against.
* @param range
* number of ip addresses per subnet.
* @param index
* the index of the subnet to calculate.
* @return the calculated ip address.
* @throws UnknownHostException
* if an invalid ip is provided.
*/
protected static String getReverseZoneNetworkAddress(String baseIp, int range, int index) throws UnknownHostException {
if (index < 0) {
throw new IllegalArgumentException(String.format("Invalid index provided, must be positive: %d", index));}
if (range < 0) {
throw new IllegalArgumentException(String.format("Invalid range provided, cannot be negative: %d", range));}
return calculateIp(baseIp, range, index);
} | 3.26 |
hadoop_AbstractRouterPolicy_prefilterSubClusters_rdh | /**
* Filter chosen SubCluster based on reservationId.
*
* @param reservationId
* the globally unique identifier for a reservation.
* @param activeSubClusters
* the map of ids to info for all active subclusters.
* @return the chosen sub-cluster
* @throws YarnException
* if the policy fails to choose a sub-cluster
*/
protected Map<SubClusterId, SubClusterInfo> prefilterSubClusters(ReservationId reservationId, Map<SubClusterId, SubClusterInfo> activeSubClusters) throws YarnException {
// if a reservation exists limit scope to the sub-cluster this
// reservation is mapped to
if (reservationId != null) {
// note this might throw YarnException if the reservation is
// unknown. This is to be expected, and should be handled by
// policy invoker.
FederationStateStoreFacade stateStoreFacade = getPolicyContext().getFederationStateStoreFacade();
SubClusterId resSubCluster = stateStoreFacade.getReservationHomeSubCluster(reservationId);
SubClusterInfo subClusterInfo = activeSubClusters.get(resSubCluster);
return Collections.singletonMap(resSubCluster, subClusterInfo);
}
return activeSubClusters;
}
/**
* Simply picks from alphabetically-sorted active subclusters based on the
* hash of query name. Jobs of the same queue will all be routed to the same
* sub-cluster, as far as the number of active sub-cluster and their names
* remain the same.
*
* @param appContext
* the {@link ApplicationSubmissionContext} that
* has to be routed to an appropriate subCluster for execution.
* @param blackLists
* the list of subClusters as identified by
* {@link SubClusterId} to blackList from the selection of the home
* subCluster.
* @return a hash-based chosen {@link SubClusterId} | 3.26 |
hadoop_AbstractOperationAuditor_getOptions_rdh | /**
* Get the options this auditor was initialized with.
*
* @return options.
*/
protected OperationAuditorOptions getOptions() {
return options;
} | 3.26 |
hadoop_AbstractOperationAuditor_getIOStatistics_rdh | /**
* Get the IOStatistics Store.
*
* @return the IOStatistics store updated with statistics.
*/
public IOStatisticsStore getIOStatistics() {
return iostatistics;
} | 3.26 |
hadoop_AbstractOperationAuditor_init_rdh | /**
* Sets the IOStats and then calls init().
*
* @param opts
* options to initialize with.
*/
@Override
public void init(final OperationAuditorOptions opts) {
this.options = opts;
this.iostatistics = opts.getIoStatisticsStore();
init(opts.getConfiguration());
} | 3.26 |
hadoop_AbstractOperationAuditor_createSpanID_rdh | /**
* Create a span ID.
*
* @return a unique span ID.
*/
protected final String createSpanID() {
return String.format("%s-%08d", auditorID, SPAN_ID_COUNTER.incrementAndGet());
} | 3.26 |
hadoop_DeletedDirTracker_shouldDelete_rdh | /**
* Should a file or directory be deleted?
* The cache of deleted directories will be updated with the path
* of the status if it references a directory.
*
* @param status
* file/path to check
* @return true if the path should be deleted.
*/
boolean shouldDelete(CopyListingFileStatus status) {
Path path = status.getPath();
Preconditions.checkArgument(!path.isRoot(), "Root Dir");
if (status.isDirectory()) {
boolean deleted = isDirectoryOrAncestorDeleted(path);
// even if an ancestor has been deleted, add this entry as
// a deleted directory.
directories.put(path, path);
return !deleted;
} else {
return !isInDeletedDirectory(path);
}
} | 3.26 |
hadoop_DeletedDirTracker_isInDeletedDirectory_rdh | /**
* Probe for a path being deleted by virtue of the fact that an
* ancestor dir has already been deleted.
*
* @param path
* path to check
* @return true if the parent dir is deleted.
*/
private boolean
isInDeletedDirectory(Path path) {
Preconditions.checkArgument(!path.isRoot(), "Root Dir");
return isDirectoryOrAncestorDeleted(path.getParent());
} | 3.26 |
hadoop_DeletedDirTracker_size_rdh | /**
* Return the current size of the tracker, as in #of entries in the cache.
*
* @return tracker size.
*/
long size() {
return directories.size();
} | 3.26 |
hadoop_DeletedDirTracker_isDirectoryOrAncestorDeleted_rdh | /**
* Recursive scan for a directory being in the cache of deleted paths.
*
* @param dir
* directory to look for.
* @return true iff the path or a parent is in the cache.
*/
boolean isDirectoryOrAncestorDeleted(Path dir) {
if (dir == null) {
// at root
return false;
} else if (isContained(dir)) {
// cache hit
return true;} else {
// cache miss, check parent
return isDirectoryOrAncestorDeleted(dir.getParent());
}
} | 3.26 |
hadoop_DeletedDirTracker_isContained_rdh | /**
* Is a path directly contained in the set of deleted directories.
*
* @param dir
* directory to probe
* @return true if this directory is recorded as being deleted.
*/
boolean isContained(Path dir) {
return directories.getIfPresent(dir) != null;
} | 3.26 |
hadoop_EagerKeyGeneratorKeyProviderCryptoExtension_rollNewVersion_rdh | /**
* Roll a new version of the given key generating the material for it.
* <p>
* Due to the caching on the ValueQueue, even after a rollNewVersion call,
* {@link #generateEncryptedKey(String)} may still return an old key - even
* when we drain the queue here, the async thread may later fill in old keys.
* This is acceptable since old version keys are still able to decrypt, and
* client shall make no assumptions that it will get a new versioned key
* after rollNewVersion.
*/
@Override
public KeyVersion rollNewVersion(String name) throws NoSuchAlgorithmException, IOException {
KeyVersion keyVersion = super.rollNewVersion(name);
getExtension().drain(name);
return keyVersion;
} | 3.26 |
hadoop_SelectTool_run_rdh | /**
* Execute the select operation.
*
* @param args
* argument list
* @param out
* output stream
* @return an exit code
* @throws IOException
* IO failure
* @throws ExitUtil.ExitException
* managed failure
*/
public int run(String[] args, PrintStream out) throws IOException, ExitException {
final List<String> parsedArgs;
try {
parsedArgs = parseArgs(args);
} catch (CommandFormat.UnknownOptionException e) {
errorln(getUsage());
throw new ExitUtil.ExitException(EXIT_USAGE, e.getMessage(), e);
}
if (parsedArgs.size() < 2) {
errorln(getUsage());
throw new ExitUtil.ExitException(EXIT_USAGE, TOO_FEW_ARGUMENTS);
}
// read mandatory arguments
final String file = parsedArgs.get(0);
final Path v5 = new Path(file);
String expression = parsedArgs.get(1);
println(out, "selecting file %s with query %s", v5, expression);
// and the optional arguments to adjust the configuration.
final Optional<String> header = getOptValue(OPT_HEADER);
header.ifPresent(h -> println(out, "Using header option %s", h));
Path destPath = getOptValue(OPT_OUTPUT).map(output -> {
println(out, "Saving output to %s", output);
return new Path(output);
}).orElse(null);
final boolean
toConsole = destPath == null;
// expected lines are only checked if empty
final Optional<Integer> expectedLines = (toConsole) ? getIntValue(OPT_EXPECTED) : Optional.empty();
final Optional<Integer> limit = getIntValue(OPT_LIMIT);
if (limit.isPresent()) {
final int l = limit.get();
println(out, "Using line limit %s", l);
if (expression.toLowerCase(Locale.ENGLISH).contains(" limit ")) {
println(out, "line limit already specified in SELECT expression");
} else {
expression = (expression + " LIMIT ") + l;
}
}
// now bind to the filesystem.
FileSystem fs = bindFilesystem(v5.getFileSystem(getConf()));
if (!fs.hasPathCapability(v5, S3_SELECT_CAPABILITY)) {
// capability disabled
throw new ExitUtil.ExitException(EXIT_SERVICE_UNAVAILABLE, (SELECT_IS_DISABLED + " for ") + file);
}
linesRead = 0;
selectDuration = new OperationDuration();
// open and scan the stream.
final FutureDataInputStreamBuilder builder = fs.openFile(v5).must(SELECT_SQL, expression);header.ifPresent(h -> builder.must(CSV_INPUT_HEADER, h));
getOptValue(OPT_COMPRESSION).ifPresent(compression -> builder.must(SELECT_INPUT_COMPRESSION, compression.toUpperCase(Locale.ENGLISH)));
getOptValue(OPT_INPUTFORMAT).ifPresent(opt -> {
if (!"csv".equalsIgnoreCase(opt)) {
throw invalidArgs("Unsupported input format %s", opt);
}
});
getOptValue(OPT_OUTPUTFORMAT).ifPresent(opt -> {if (!"csv".equalsIgnoreCase(opt)) {
throw invalidArgs("Unsupported output format %s", opt);
}
});
// turn on SQL error reporting.
builder.opt(SELECT_ERRORS_INCLUDE_SQL, true);
FSDataInputStream stream;
try (DurationInfo ignored = new DurationInfo(LOG, "Selecting stream")) {stream = FutureIO.awaitFuture(builder.build());
} catch (FileNotFoundException e) {
// the source file is missing.
throw notFound(e); }
try {
if (toConsole) {
// logging to console
bytesRead = 0;
@SuppressWarnings("IOResourceOpenedButNotSafelyClosed")
Scanner scanner = new Scanner(new BufferedReader(new InputStreamReader(stream, StandardCharsets.UTF_8)));
scanner.useDelimiter("\n");
while (scanner.hasNextLine()) {
linesRead++;
String l = scanner.nextLine();
bytesRead += l.length() + 1;
println(out, "%s", l);
}
} else {
// straight dump of whole file; no line counting
FileSystem destFS = destPath.getFileSystem(getConf());try (DurationInfo ignored = new DurationInfo(LOG, "Copying File");OutputStream destStream = destFS.createFile(destPath).overwrite(true).build()) {
bytesRead = IOUtils.copy(stream, destStream);
}
}
// close the stream.
// this will take time if there's a lot of data remaining
try (DurationInfo ignored = new DurationInfo(LOG, "Closing stream")) {
stream.close();
}
// generate a meaningful result depending on the operation
String result = (toConsole) ? String.format("%s lines", linesRead) : String.format("%s bytes", bytesRead);
// print some statistics
selectDuration.finished();
println(out, "Read %s in time %s", result, selectDuration.getDurationString());
println(out, "Bytes Read: %,d bytes", bytesRead);
println(out, "Bandwidth: %,.1f MiB/s", bandwidthMBs(bytesRead, selectDuration.value()));
} finally {
cleanupWithLogger(LOG, stream);
}LOG.debug("Statistics {}", stream);expectedLines.ifPresent(l -> {if (l != linesRead) {
throw exitException(EXIT_FAIL, "Expected %d rows but the operation returned %d", l, linesRead);
}
});
out.flush();
return EXIT_SUCCESS;
} | 3.26 |
hadoop_SelectTool_bandwidthMBs_rdh | /**
* Work out the bandwidth in MB/s.
*
* @param bytes
* bytes
* @param durationMillisNS
* duration in nanos
* @return the number of megabytes/second of the recorded operation
*/
public static double bandwidthMBs(long bytes, long durationMillisNS) {
return durationMillisNS > 0 ? ((bytes / 1048576.0) * 1000) / durationMillisNS : 0;
} | 3.26 |
hadoop_SelectTool_getLinesRead_rdh | /**
* Number of lines read, when printing to the console.
*
* @return line count. 0 if writing direct to a file.
*/
public long getLinesRead() {
return linesRead;
} | 3.26 |
hadoop_ListResultEntrySchema_group_rdh | /**
* Get the group value.
*
* @return the group value
*/
public String group() {
return group;
} | 3.26 |
hadoop_ListResultEntrySchema_withName_rdh | /**
* Set the name value.
*
* @param name
* the name value to set
* @return the ListEntrySchema object itself.
*/
public ListResultEntrySchema withName(String name) {
this.name = name;
return this;
} | 3.26 |
hadoop_ListResultEntrySchema_eTag_rdh | /**
* Get the etag value.
*
* @return the etag value
*/
public String eTag() {
return eTag;
} | 3.26 |
hadoop_ListResultEntrySchema_withLastModified_rdh | /**
* Set the lastModified value.
*
* @param lastModified
* the lastModified value to set
* @return the ListEntrySchema object itself.
*/
public ListResultEntrySchema withLastModified(String lastModified) {
this.lastModified = lastModified;
return this;
} | 3.26 |
hadoop_ListResultEntrySchema_withIsDirectory_rdh | /**
* Set the isDirectory value.
*
* @param isDirectory
* the isDirectory value to set
* @return the ListEntrySchema object itself.
*/
public ListResultEntrySchema withIsDirectory(final Boolean isDirectory) {
this.isDirectory = isDirectory;return this;
} | 3.26 |
hadoop_ListResultEntrySchema_isDirectory_rdh | /**
* Get the isDirectory value.
*
* @return the isDirectory value
*/
public Boolean isDirectory() {
return isDirectory;
} | 3.26 |
hadoop_ListResultEntrySchema_withGroup_rdh | /**
* Set the group value.
*
* @param group
* the group value to set
* @return the ListEntrySchema object itself.
*/
public ListResultEntrySchema withGroup(final String
group) {
this.group = group;
return this;
} | 3.26 |
hadoop_ListResultEntrySchema_withETag_rdh | /**
* Set the eTag value.
*
* @param eTag
* the eTag value to set
* @return the ListEntrySchema object itself.
*/
public ListResultEntrySchema withETag(final String eTag) {
this.eTag = eTag;
return this;
} | 3.26 |
hadoop_ListResultEntrySchema_contentLength_rdh | /**
* Get the contentLength value.
*
* @return the contentLength value
*/
public Long contentLength() {
return contentLength;
} | 3.26 |
hadoop_ListResultEntrySchema_name_rdh | /**
* Get the name value.
*
* @return the name value
*/public String name() {
return name;
} | 3.26 |
hadoop_ListResultEntrySchema_withOwner_rdh | /**
* Set the owner value.
*
* @param owner
* the owner value to set
* @return the ListEntrySchema object itself.
*/
public ListResultEntrySchema withOwner(final
String owner) {
this.owner =
owner;
return this; } | 3.26 |
hadoop_ListResultEntrySchema_withContentLength_rdh | /**
* Set the contentLength value.
*
* @param contentLength
* the contentLength value to set
* @return the ListEntrySchema object itself.
*/
public ListResultEntrySchema withContentLength(final Long contentLength) {
this.contentLength = contentLength;
return this;
} | 3.26 |
hadoop_ListResultEntrySchema_owner_rdh | /**
* Get the owner value.
*
* @return the owner value
*/
public String owner() {
return owner;
} | 3.26 |
hadoop_ListResultEntrySchema_withPermissions_rdh | /**
* Set the permissions value.
*
* @param permissions
* the permissions value to set
* @return the ListEntrySchema object itself.
*/
public ListResultEntrySchema withPermissions(final String permissions) {
this.permissions = permissions;
return this;
} | 3.26 |
hadoop_ListResultEntrySchema_permissions_rdh | /**
* Get the permissions value.
*
* @return the permissions value
*/
public String permissions() {
return permissions;
} | 3.26 |
hadoop_ServiceRecord_m0_rdh | /**
* Look up an internal endpoint
*
* @param api
* API
* @return the endpoint or null if there was no match
*/
public Endpoint m0(String api) {
return findByAPI(internal, api);
} | 3.26 |
hadoop_ServiceRecord_clone_rdh | /**
* Shallow clone: all endpoints will be shared across instances
*
* @return a clone of the instance
* @throws CloneNotSupportedException
*/
@Override
protected Object clone() throws CloneNotSupportedException {
return super.clone();
} | 3.26 |
hadoop_ServiceRecord_set_rdh | /**
* Handle unknown attributes by storing them in the
* {@link #attributes} map
*
* @param key
* attribute name
* @param value
* attribute value.
*/
@JsonAnySetter
public void set(String key, Object value) {
attributes.put(key, value.toString());
} | 3.26 |
hadoop_ServiceRecord_attributes_rdh | /**
* The map of "other" attributes set when parsing. These
* are not included in the JSON value of this record when it
* is generated.
*
* @return a map of any unknown attributes in the deserialized JSON.
*/
@JsonAnyGetter
public Map<String, String> attributes() {
return
attributes;
} | 3.26 |
hadoop_ServiceRecord_addExternalEndpoint_rdh | /**
* Add an external endpoint
*
* @param endpoint
* endpoint to set
*/
public void addExternalEndpoint(Endpoint endpoint) {
Preconditions.checkArgument(endpoint != null);
endpoint.validate();
external.add(endpoint);
} | 3.26 |
hadoop_ServiceRecord_getExternalEndpoint_rdh | /**
* Look up an external endpoint
*
* @param api
* API
* @return the endpoint or null if there was no match
*/
public Endpoint getExternalEndpoint(String api) {
return findByAPI(external, api);
} | 3.26 |
hadoop_ServiceRecord_addInternalEndpoint_rdh | /**
* Add an internal endpoint
*
* @param endpoint
* endpoint to set
*/
public void addInternalEndpoint(Endpoint endpoint) {
Preconditions.checkArgument(endpoint != null);
endpoint.validate();
internal.add(endpoint);
} | 3.26 |
hadoop_ServiceRecord_get_rdh | /**
* Get the "other" attribute with a specific key
*
* @param key
* key to look up
* @return the value or null
*/
public String get(String key) {
return attributes.get(key);
} | 3.26 |
hadoop_RouterCacheAdmin_getRemoteMap_rdh | /**
* Returns a map with the CacheDirectiveInfo mapped to each location.
*
* @param path
* CacheDirectiveInfo to be mapped to the locations.
* @param locations
* the locations to map.
* @return map with CacheDirectiveInfo mapped to the locations.
*/
private Map<RemoteLocation, CacheDirectiveInfo> getRemoteMap(CacheDirectiveInfo path, final List<RemoteLocation> locations) {
final Map<RemoteLocation, CacheDirectiveInfo> dstMap = new HashMap<>();
Iterator<RemoteLocation> iterator = locations.iterator();
while (iterator.hasNext()) {
dstMap.put(iterator.next(), path);
}
return dstMap;} | 3.26 |
hadoop_AclFeature_getEntryAt_rdh | /**
* Get the entry at the specified position
*
* @param pos
* Position of the entry to be obtained
* @return integer representation of AclEntry
* @throws IndexOutOfBoundsException
* if pos out of bound
*/
int getEntryAt(int pos)
{
if ((pos < 0) || (pos > entries.length)) {
throw new IndexOutOfBoundsException("Invalid position for AclEntry");
}
return entries[pos];
} | 3.26 |
hadoop_AclFeature_m0_rdh | /**
* Get the number of entries present
*/
int m0() {
return entries.length;
} | 3.26 |
hadoop_ProducerConsumer_run_rdh | /**
* The worker continuously gets an item from input queue, process it and
* then put the processed result into output queue. It waits to get an item
* from input queue if there's none.
*/
public void run() {
while (true) {
WorkRequest<T> work;
try {
work = inputQueue.take();
} catch (InterruptedException e) {
// It is assumed that if an interrupt occurs while taking a work
// out from input queue, the interrupt is likely triggered by
// ProducerConsumer.shutdown(). Therefore, exit the thread.
LOG.debug("Interrupted while waiting for requests from inputQueue.");
return;
}
boolean isDone = false;
while (!isDone) {try {
// if the interrupt happens while the work is being processed,
// go back to process the same work again.
WorkReport<R> result = processor.processItem(work);
f0.put(result);
isDone = true;
}
catch (InterruptedException ie) {
LOG.debug("Worker thread was interrupted while processing an item," + " or putting into outputQueue. Retrying...");
}
}
}
} | 3.26 |
hadoop_ProducerConsumer_take_rdh | /**
* Blocking take from ProducerConsumer output queue that can be interrupted.
*
* @throws InterruptedException
* if interrupted before an element becomes
* available.
* @return item returned by processor's processItem().
*/
public WorkReport<R> take() throws InterruptedException {
WorkReport<R> v1 = f0.take();
workCnt.decrementAndGet();
return v1;
} | 3.26 |
hadoop_ProducerConsumer_addWorker_rdh | /**
* Add another worker that will consume WorkRequest{@literal <T>} items
* from input queue, process each item using supplied processor, and for
* every processed item output WorkReport{@literal <R>} to output queue.
*
* @param processor
* Processor implementing WorkRequestProcessor interface.
*/public void addWorker(WorkRequestProcessor<T,
R> processor) {executor.execute(new Worker(processor));
} | 3.26 |
hadoop_ProducerConsumer_put_rdh | /**
* Blocking put workRequest to ProducerConsumer input queue.
*
* @param workRequest
* item to be processed.
*/
public void put(WorkRequest<T> workRequest) {
boolean isDone = false;
while (!isDone) {
try {
inputQueue.put(workRequest);
workCnt.incrementAndGet();
isDone = true;
} catch (InterruptedException ie) {
LOG.error("Could not put workRequest into inputQueue. Retrying...");
}
}
} | 3.26 |
hadoop_ProducerConsumer_blockingTake_rdh | /**
* Blocking take from ProducerConsumer output queue (catches exceptions and
* retries forever).
*
* @return item returned by processor's processItem().
*/
public WorkReport<R> blockingTake() {
while (true) {
try {
WorkReport<R> report = f0.take();
workCnt.decrementAndGet();
return report;
} catch (InterruptedException ie) {
LOG.debug("Retrying in blockingTake...");
}
}
} | 3.26 |
hadoop_ProducerConsumer_shutdown_rdh | /**
* Shutdown ProducerConsumer worker thread-pool without waiting for
* completion of any pending work.
*/
public void shutdown() {
if (hasWork()) {
LOG.warn("Shutdown() is called but there are still unprocessed work!");
}
executor.shutdownNow();
} | 3.26 |
hadoop_AbfsDtFetcher_getScheme_rdh | /**
* Get the scheme for this specific fetcher.
*
* @return a scheme.
*/
protected String getScheme() {
return FileSystemUriSchemes.ABFS_SCHEME;
} | 3.26 |
hadoop_AbfsDtFetcher_addDelegationTokens_rdh | /**
* Returns Token object via FileSystem, null if bad argument.
*
* @param conf
* - a Configuration object used with FileSystem.get()
* @param creds
* - a Credentials object to which token(s) will be added
* @param renewer
* - the renewer to send with the token request
* @param url
* - the URL to which the request is sent
* @return a Token, or null if fetch fails.
*/public Token<?> addDelegationTokens(Configuration conf, Credentials creds,
String renewer, String url) throws Exception {
if (!url.startsWith(getServiceName().toString())) {
url = (getServiceName().toString() + "://") + url;
}
FileSystem fs = FileSystem.get(URI.create(url), conf);
Token<?> token = fs.getDelegationToken(renewer);
if (token == null) {
throw new
IOException((FETCH_FAILED + ": ") + url);}
creds.addToken(token.getService(), token);
return token;
} | 3.26 |
hadoop_AbfsDtFetcher_getServiceName_rdh | /**
* Returns the service name for the scheme..
*/
public Text getServiceName() {
return new Text(getScheme());
} | 3.26 |
hadoop_NameCache_put_rdh | /**
* Add a given name to the cache or track use count.
* exist. If the name already exists, then the internal value is returned.
*
* @param name
* name to be looked up
* @return internal value for the name if found; otherwise null
*/
K put(final K name) {
K internal = cache.get(name);
if (internal != null) {
lookups++;
return internal;
}
// Track the usage count only during initialization
if (!initialized) {
UseCount useCount = transientMap.get(name);
if (useCount != null) { useCount.increment();
if (useCount.get() >= useThreshold) {
promote(name);
}
return useCount.value;
}
useCount = new UseCount(name);
transientMap.put(name, useCount);
}
return null;
} | 3.26 |
hadoop_NameCache_m0_rdh | /**
* Size of the cache
*
* @return Number of names stored in the cache
*/
int m0() {
return cache.size();
} | 3.26 |
hadoop_NameCache_promote_rdh | /**
* Promote a frequently used name to the cache
*/
private void promote(final K name) {
transientMap.remove(name);
cache.put(name, name);
lookups += useThreshold;
} | 3.26 |
hadoop_NameCache_initialized_rdh | /**
* Mark the name cache as initialized. The use count is no longer tracked
* and the transient map used for initializing the cache is discarded to
* save heap space.
*/
void initialized() {
LOG.info(((("initialized with " + m0()) + " entries ") + lookups) + " lookups");this.initialized = true;
transientMap.clear();
transientMap = null;
} | 3.26 |
hadoop_NameCache_getLookupCount_rdh | /**
* Lookup count when a lookup for a name returned cached object
*
* @return number of successful lookups
*/
int getLookupCount() {
return lookups;
} | 3.26 |
hadoop_JobDefinition_getParams_rdh | // Currently unused
public Map<String, String> getParams() {
return params;} | 3.26 |
hadoop_XORErasureDecoder_getOutputBlocks_rdh | /**
* Which blocks were erased ? For XOR it's simple we only allow and return one
* erased block, either data or parity.
*
* @param blockGroup
* blockGroup.
* @return output blocks to recover
*/
@Override
protected ECBlock[] getOutputBlocks(ECBlockGroup blockGroup) {
/**
* If more than one blocks (either data or parity) erased, then it's not
* edible to recover. We don't have the check here since it will be done
* by upper level: ErasreCoder call can be avoid if not possible to recover
* at all.
*/
int erasedNum = getNumErasedBlocks(blockGroup);
ECBlock[] outputBlocks = new ECBlock[erasedNum];
int idx = 0;
for (int i = 0; i < getNumParityUnits(); i++) {
if (blockGroup.getParityBlocks()[i].isErased())
{
outputBlocks[idx++] = blockGroup.getParityBlocks()[i];
}
}for (int i = 0; i < getNumDataUnits(); i++) {
if
(blockGroup.getDataBlocks()[i].isErased()) {
outputBlocks[idx++] =
blockGroup.getDataBlocks()[i];
}
}
return
outputBlocks;
} | 3.26 |
hadoop_UnionStorageStatistics_isTracked_rdh | /**
* Return true if a statistic is being tracked.
*
* @return True only if the statistic is being tracked.
*/
@Override
public boolean isTracked(String key) {
for (StorageStatistics stat : stats) {
if (stat.isTracked(key)) {
return true;
}
}
return false;
} | 3.26 |
hadoop_IntegerSplitter_split_rdh | /**
* Returns a list of longs one element longer than the list of input splits.
* This represents the boundaries between input splits.
* All splits are open on the top end, except the last one.
*
* So the list [0, 5, 8, 12, 18] would represent splits capturing the intervals:
*
* [0, 5)
* [5, 8)
* [8, 12)
* [12, 18] note the closed interval for the last split.
*/
List<Long> split(long numSplits, long minVal, long maxVal) throws SQLException {
List<Long> splits = new ArrayList<Long>();
// Use numSplits as a hint. May need an extra task if the size doesn't
// divide cleanly.
long splitSize = (maxVal - minVal) / numSplits;
if (splitSize < 1) {
splitSize = 1;
}
long v13 = minVal;
while (v13 <= maxVal) {
splits.add(v13);
v13 += splitSize;
}
if ((splits.get(splits.size() - 1) != maxVal) || (splits.size() == 1)) {
// We didn't end on the maxVal. Add that to the end of the list.
splits.add(maxVal);}
return splits;
} | 3.26 |
hadoop_VolumeManagerImpl_getAdaptorByDriverName_rdh | /**
* Returns a CsiAdaptorProtocol client by the given driver name,
* returns null if no adaptor is found for the driver, that means
* the driver has not registered to the volume manager yet enhance not valid.
*
* @param driverName
* the name of the driver
* @return CsiAdaptorProtocol client or null if driver not registered
*/
public CsiAdaptorProtocol getAdaptorByDriverName(String driverName) {
return csiAdaptorMap.get(driverName);
} | 3.26 |
hadoop_VolumeManagerImpl_initCsiAdaptorCache_rdh | // it is then added to the cache. Note, we don't allow two drivers
// specified with same driver-name even version is different.
private void initCsiAdaptorCache(final Map<String, CsiAdaptorProtocol> adaptorMap, Configuration conf) throws IOException, YarnException {
LOG.info("Initializing cache for csi-driver-adaptors");
String[] addresses = conf.getStrings(YarnConfiguration.NM_CSI_ADAPTOR_ADDRESSES);
if ((addresses != null) && (addresses.length > 0)) {
for (String addr : addresses) {
LOG.info("Found csi-driver-adaptor socket address: " + addr);
InetSocketAddress address = NetUtils.createSocketAddr(addr);
YarnRPC rpc = YarnRPC.create(conf);
UserGroupInformation currentUser = UserGroupInformation.getCurrentUser();
CsiAdaptorProtocol adaptorClient = NMProxy.createNMProxy(conf, CsiAdaptorProtocol.class, currentUser, rpc, address);
// Attempt to resolve the driver by contacting to
// the diver's identity service on the given address.
// If the call failed, the initialization is also failed
// in order running into inconsistent state.
LOG.info("Retrieving info from csi-driver-adaptor on address " + addr);
GetPluginInfoResponse response = adaptorClient.getPluginInfo(GetPluginInfoRequest.newInstance());
if (!Strings.isNullOrEmpty(response.getDriverName())) {
String driverName = response.getDriverName();
if (adaptorMap.containsKey(driverName)) {
throw new YarnException(("Duplicate driver adaptor found," + " driver name: ") + driverName);
}
adaptorMap.put(driverName, adaptorClient);
LOG.info((("CSI Adaptor added to the cache, adaptor name: " + driverName) + ", driver version: ") + response.getVersion());
}
}
}
} | 3.26 |
hadoop_ResourceSet_getLocalizationStatuses_rdh | /**
* Get all the localization statuses.
*
* @return the localization statuses.
*/
public List<LocalizationStatus> getLocalizationStatuses() {List<LocalizationStatus> statuses = new
ArrayList<>();
localizedResources.forEach((key, path) -> {
LocalizationStatus status = LocalizationStatus.newInstance(key, LocalizationState.COMPLETED);
statuses.add(status);
});
pendingResources.forEach((lrReq, keys) -> keys.forEach(key -> {
LocalizationStatus status = LocalizationStatus.newInstance(key, LocalizationState.PENDING);
statuses.add(status);
}));
synchronized(resourcesFailedToBeLocalized) {
statuses.addAll(resourcesFailedToBeLocalized);
}
return statuses;
} | 3.26 |
hadoop_ResourceSet_storeSharedCacheUploadPolicy_rdh | /**
* Store the resource's shared cache upload policies
* Given LocalResourceRequest can be shared across containers in
* LocalResourcesTrackerImpl, we preserve the upload policies here.
* In addition, it is possible for the application to create several
* "identical" LocalResources as part of
* ContainerLaunchContext.setLocalResources with different symlinks.
* There is a corner case where these "identical" local resources have
* different upload policies. For that scenario, upload policy will be set to
* true as long as there is at least one LocalResource entry with
* upload policy set to true.
*/
private void storeSharedCacheUploadPolicy(LocalResourceRequest resourceRequest, Boolean uploadPolicy) {
Boolean storedUploadPolicy = resourcesUploadPolicies.get(resourceRequest);
if ((storedUploadPolicy == null) || ((!storedUploadPolicy) && uploadPolicy)) {
resourcesUploadPolicies.put(resourceRequest, uploadPolicy);
}} | 3.26 |
hadoop_ResourceSet_resourceLocalized_rdh | /**
* Called when resource localized.
*
* @param request
* The original request for the localized resource
* @param location
* The path where the resource is localized
* @return The list of symlinks for the localized resources.
*/public Set<String> resourceLocalized(LocalResourceRequest request, Path location) {
Set<String> v8 = pendingResources.remove(request);
if (v8 == null) {
return null;
} else {
for (String symlink : v8) {
localizedResources.put(symlink, location);
}
return v8;
}
} | 3.26 |
hadoop_ExtensionHelper_close_rdh | /**
* Close an extension if it is closeable.
* Any error raised is caught and logged.
*
* @param extension
* extension instance.
*/
public static void close(Object extension) {
ifBoundDTExtension(extension, v -> {
IOUtils.closeStreams(v);
return null;
});
} | 3.26 |
hadoop_ExtensionHelper_getUserAgentSuffix_rdh | /**
* Invoke {@link BoundDTExtension#getUserAgentSuffix()} or
* return the default value.
*
* @param extension
* extension to invoke
* @param def
* default if the class is of the wrong type.
* @return a user agent suffix
*/
public static String getUserAgentSuffix(Object extension, String def) {
return ifBoundDTExtension(extension, BoundDTExtension::getUserAgentSuffix).orElse(def);
}
/**
* Invoke {@link BoundDTExtension#getCanonicalServiceName()} | 3.26 |
hadoop_ExtensionHelper_bind_rdh | /**
* If the passed in extension class implements {@link BoundDTExtension}
* then it will have its {@link BoundDTExtension#bind(URI, Configuration)}
* method called.
*
* @param extension
* extension to examine and maybe invoke
* @param uri
* URI of the filesystem.
* @param conf
* configuration of this extension.
* @throws IOException
* failure during binding.
*/
public static void bind(Object
extension, URI uri, Configuration conf) throws IOException {
if (extension instanceof BoundDTExtension) {
((BoundDTExtension) (extension)).bind(uri, conf);
}
} | 3.26 |
hadoop_VolumeStates_addVolumeIfAbsent_rdh | /**
* Add volume if it is not yet added.
* If a new volume is added with a same {@link VolumeId}
* with a existing volume, existing volume will be returned.
*
* @param volume
* volume to add
* @return volume added or existing volume
*/
public Volume addVolumeIfAbsent(Volume volume) {
if (volume.getVolumeId() != null) {
return volumeStates.putIfAbsent(volume.getVolumeId(), volume);
} else {
// for dynamical provisioned volumes,
// the volume ID might not be available at time being.
// we can makeup one with the combination of driver+volumeName+timestamp
// once the volume ID is generated, we should replace ID.
return volume;
}
} | 3.26 |
hadoop_ByteBufferEncodingState_convertToByteArrayState_rdh | /**
* Convert to a ByteArrayEncodingState when it's backed by on-heap arrays.
*/
ByteArrayEncodingState convertToByteArrayState() {
int[] inputOffsets = new int[inputs.length];
int[] outputOffsets = new int[outputs.length];
byte[][] newInputs = new byte[inputs.length][];
byte[][] newOutputs = new byte[outputs.length][];
ByteBuffer buffer;
for (int i = 0; i < inputs.length; ++i) {
buffer = inputs[i];
inputOffsets[i] = buffer.arrayOffset() + buffer.position();
newInputs[i] = buffer.array();
}
for (int i = 0; i < outputs.length; ++i) {
buffer = outputs[i];
outputOffsets[i] = buffer.arrayOffset() + buffer.position();
newOutputs[i] = buffer.array();
}
ByteArrayEncodingState baeState = new ByteArrayEncodingState(encoder, encodeLength, newInputs, inputOffsets, newOutputs, outputOffsets);
return baeState;
} | 3.26 |
hadoop_ByteBufferEncodingState_checkBuffers_rdh | /**
* Check and ensure the buffers are of the desired length and type, direct
* buffers or not.
*
* @param buffers
* the buffers to check
*/
void checkBuffers(ByteBuffer[] buffers) {
for (ByteBuffer buffer : buffers) {
if (buffer == null) {
throw new HadoopIllegalArgumentException("Invalid buffer found, not allowing null");
}
if (buffer.remaining() != encodeLength) {
throw new HadoopIllegalArgumentException("Invalid buffer, not of length " + encodeLength);
}
if (buffer.isDirect() != usingDirectBuffer) {
throw new HadoopIllegalArgumentException("Invalid buffer, isDirect should be " + usingDirectBuffer);
}
}
} | 3.26 |
hadoop_SWebHdfs_createSWebHdfsFileSystem_rdh | /**
* Returns a new {@link SWebHdfsFileSystem}, with the given configuration.
*
* @param conf
* configuration
* @return new SWebHdfsFileSystem
*/
private static SWebHdfsFileSystem createSWebHdfsFileSystem(Configuration conf) {
SWebHdfsFileSystem fs = new SWebHdfsFileSystem();
fs.setConf(conf);
return fs;
} | 3.26 |
hadoop_TextInputWriter_writeUTF8_rdh | // Write an object to the output stream using UTF-8 encoding
protected void writeUTF8(Object object) throws IOException {
byte[] bval;
int v1;
if (object instanceof BytesWritable) {
BytesWritable val = ((BytesWritable) (object));
bval = val.getBytes();
v1 = val.getLength();
} else if
(object instanceof Text) {
Text val = ((Text) (object));
bval
= val.getBytes();
v1 = val.getLength();
} else {
String sval = object.toString();
bval = sval.getBytes(StandardCharsets.UTF_8);
v1 = bval.length;
}
clientOut.write(bval, 0, v1);
} | 3.26 |
hadoop_GetClusterNodeLabelsResponse_newInstance_rdh | /**
* Creates a new instance.
*
* @param labels
* Node labels
* @return response
* @deprecated Use {@link #newInstance(List)} instead.
*/@Deprecated
public static GetClusterNodeLabelsResponse newInstance(Set<String> labels) {List<NodeLabel> list = new ArrayList<>();
for (String label : labels) {
list.add(NodeLabel.newInstance(label));
}
return newInstance(list);
} | 3.26 |
hadoop_PartHandle_toByteArray_rdh | /**
*
* @return Serialized from in bytes.
*/
default byte[] toByteArray() {
ByteBuffer bb = bytes();
byte[] ret = new byte[bb.remaining()];
bb.get(ret);
return ret;
} | 3.26 |
hadoop_MapReduceBase_configure_rdh | /**
* Default implementation that does nothing.
*/
public void configure(JobConf job) {
} | 3.26 |
hadoop_MapReduceBase_close_rdh | /**
* Default implementation that does nothing.
*/
public void close() throws IOException {
} | 3.26 |
hadoop_AuxiliaryService_setAuxiliaryLocalPathHandler_rdh | /**
* Method that sets the local dirs path handler for this Auxiliary Service.
*
* @param auxiliaryLocalPathHandler
* the pathHandler for this auxiliary service
*/
public void setAuxiliaryLocalPathHandler(AuxiliaryLocalPathHandler auxiliaryLocalPathHandler) {
this.auxiliaryLocalPathHandler = auxiliaryLocalPathHandler;
} | 3.26 |
hadoop_AuxiliaryService_initializeContainer_rdh | /**
* A new container is started on this NodeManager. This is a signal to
* this {@link AuxiliaryService} about the container initialization.
* This method is called when the NodeManager receives the container launch
* command from the ApplicationMaster and before the container process is
* launched.
*
* @param initContainerContext
* context for the container's initialization
*/
public void
initializeContainer(ContainerInitializationContext initContainerContext) {
} | 3.26 |
hadoop_AuxiliaryService_stopContainer_rdh | /**
* A container is finishing on this NodeManager. This is a signal to this
* {@link AuxiliaryService} about the same.
*
* @param stopContainerContext
* context for the container termination
*/
public void stopContainer(ContainerTerminationContext stopContainerContext) {
} | 3.26 |
hadoop_AuxiliaryService_setRecoveryPath_rdh | /**
* Set the path for this auxiliary service to use for storing state
* that will be used during recovery.
*
* @param recoveryPath
* where recoverable state should be stored
*/
public void setRecoveryPath(Path recoveryPath) {
this.f0 = recoveryPath;
} | 3.26 |
hadoop_ResourceTypeInfo_copy_rdh | /**
* Copies the content of the source ResourceTypeInfo object to the
* destination object, overwriting all properties of the destination object.
*
* @param src
* Source ResourceTypeInfo object
* @param dst
* Destination ResourceTypeInfo object
*/
public static void copy(ResourceTypeInfo src, ResourceTypeInfo dst) {
dst.setName(src.getName());
dst.setResourceType(src.getResourceType());
dst.setDefaultUnit(src.getDefaultUnit());
} | 3.26 |
hadoop_ResourceTypeInfo_newInstance_rdh | /**
* Create a new instance of ResourceTypeInfo from name, units.
*
* @param name
* name of resource type
* @param units
* units of resource type
* @return the new ResourceTypeInfo object
*/
@InterfaceAudience.Public
@InterfaceStability.Unstable
public static ResourceTypeInfo newInstance(String name, String units) {
return ResourceTypeInfo.newInstance(name, units, ResourceTypes.COUNTABLE);
} | 3.26 |
hadoop_OpportunisticContainerContext_matchAllocationToOutstandingRequest_rdh | /**
* This method matches a returned list of Container Allocations to any
* outstanding OPPORTUNISTIC ResourceRequest.
*
* @param capability
* Capability
* @param allocations
* Allocations.
*/
public void matchAllocationToOutstandingRequest(Resource capability, List<Allocation> allocations) {
for (OpportunisticContainerAllocator.Allocation allocation : allocations) {
SchedulerRequestKey schedulerKey = SchedulerRequestKey.extractFrom(allocation.getContainer());
Map<Resource, EnrichedResourceRequest> asks = outstandingOpReqs.get(schedulerKey);
if (asks == null) { continue;
}
EnrichedResourceRequest err = asks.get(capability);
if (err != null) {
int numContainers = err.getRequest().getNumContainers();
numContainers--;
err.getRequest().setNumContainers(numContainers);
if (numContainers == 0) {
asks.remove(capability);
if (asks.size() == 0) {
outstandingOpReqs.remove(schedulerKey);
}
} else
if (!ResourceRequest.isAnyLocation(allocation.getResourceName())) {
err.removeLocation(allocation.getResourceName());
}
getOppSchedulerMetrics().addAllocateOLatencyEntry(Time.monotonicNow() - err.getTimestamp());
}
}
} | 3.26 |
hadoop_OpportunisticContainerContext_addToOutstandingReqs_rdh | /**
* Takes a list of ResourceRequests (asks), extracts the key information viz.
* (Priority, ResourceName, Capability) and adds to the outstanding
* OPPORTUNISTIC outstandingOpReqs map. The nested map is required to enforce
* the current YARN constraint that only a single ResourceRequest can exist at
* a give Priority and Capability.
*
* @param resourceAsks
* the list with the {@link ResourceRequest}s
*/
public void addToOutstandingReqs(List<ResourceRequest> resourceAsks) {
for (ResourceRequest request : resourceAsks) {
SchedulerRequestKey schedulerKey = SchedulerRequestKey.create(request);
Map<Resource, EnrichedResourceRequest> reqMap = outstandingOpReqs.get(schedulerKey);
if (reqMap == null) {reqMap = new HashMap<>();
outstandingOpReqs.put(schedulerKey, reqMap);
}
EnrichedResourceRequest eReq = reqMap.get(request.getCapability());
if (eReq == null) {
eReq = new EnrichedResourceRequest(request);
reqMap.put(request.getCapability(), eReq);
}
// Set numContainers only for ANY request
if
(ResourceRequest.isAnyLocation(request.getResourceName())) {
eReq.getRequest().setResourceName(ResourceRequest.ANY);
eReq.getRequest().setNumContainers(request.getNumContainers());
}
else {
eReq.addLocation(request.getResourceName(), request.getNumContainers());
}
if (ResourceRequest.isAnyLocation(request.getResourceName())) {
LOG.info(((((((((((("# of outstandingOpReqs in ANY (at " + "priority = ") + schedulerKey.getPriority())
+ ", allocationReqId = ") + schedulerKey.getAllocationRequestId())
+ ", with capability = ") + request.getCapability()) + " ) : ") + ", with location = ") + request.getResourceName()) + " ) : ") + ", numContainers = ") + eReq.getRequest().getNumContainers());
}
}
} | 3.26 |
hadoop_DiskBalancerWorkItem_setMaxDiskErrors_rdh | /**
* Sets maximum disk errors to tolerate before we fail this copy step.
*
* @param maxDiskErrors
* long
*/
public void setMaxDiskErrors(long maxDiskErrors) {
this.maxDiskErrors = maxDiskErrors;
} | 3.26 |
hadoop_DiskBalancerWorkItem_setSecondsElapsed_rdh | /**
* Sets number of seconds elapsed.
*
* This is updated whenever we update the other counters.
*
* @param secondsElapsed
* - seconds elapsed.
*/
public void setSecondsElapsed(long secondsElapsed) {
this.secondsElapsed = secondsElapsed;
} | 3.26 |
hadoop_DiskBalancerWorkItem_toJson_rdh | /**
* returns a serialized json string.
*
* @return String - json
* @throws IOException
*/
public String toJson() throws IOException {
return MAPPER.writeValueAsString(this);
} | 3.26 |
hadoop_DiskBalancerWorkItem_setStartTime_rdh | /**
* Sets the Start time.
*
* @param startTime
* - Time stamp for start of execution.
*/
public void setStartTime(long startTime) {
this.startTime = startTime;
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.