name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hadoop_CachingGetSpaceUsed_getJitter_rdh | /**
* Randomize the refresh interval timing by this amount, the actual interval will be chosen
* uniformly between {@code interval-jitter} and {@code interval+jitter}.
*
* @return between interval-jitter and interval+jitter.
*/
@VisibleForTesting
public long getJitter() {
return jitter;
} | 3.26 |
hadoop_CachingGetSpaceUsed_getUsed_rdh | /**
*
* @return an estimate of space used in the directory path.
*/
@Override
public long getUsed() throws IOException {
return Math.max(used.get(), 0);
} | 3.26 |
hadoop_CachingGetSpaceUsed_running_rdh | /**
* Is the background thread running.
*/
boolean running() {
return running.get();} | 3.26 |
hadoop_BoundedAppender_append_rdh | /**
* Append a {@link CharSequence} considering {@link #limit}, truncating
* from the head of {@code csq} or {@link #messages} when necessary.
*
* @param csq
* the {@link CharSequence} to append
* @return this
*/
public BoundedAppender append(final CharSequence csq) {
appendAndCount(csq);
checkAndCut();
return this;
} | 3.26 |
hadoop_BoundedAppender_length_rdh | /**
* Get current length of messages considering truncates
* without header and ellipses.
*
* @return current length
*/
public int length() {return f0.length();
} | 3.26 |
hadoop_BoundedAppender_toString_rdh | /**
* Get a string representation of the actual contents, displaying also a
* header and ellipses when there was a truncate.
*
* @return String representation of the {@link #messages}
*/
@Override
public String toString() {
if (f0.length()
< totalCharacterCount) {
return String.format(TRUNCATED_MESSAGES_TEMPLATE, f0.length(), totalCharacterCount, f0.toString());
}
return f0.toString();
} | 3.26 |
hadoop_LoggingStateChangeListener_stateChanged_rdh | /**
* Callback for a state change event: log it
*
* @param service
* the service that has changed.
*/
@Override
public void stateChanged(Service service) {
log.info((("Entry to state " + service.getServiceState()) + " for ")
+ service.getName());
} | 3.26 |
hadoop_MetricsAnnotations_makeSource_rdh | /**
* Make an metrics source from an annotated object.
*
* @param source
* the annotated object.
* @return a metrics source
*/
public static MetricsSource makeSource(Object source) {
return new MetricsSourceBuilder(source, DefaultMetricsFactory.getAnnotatedMetricsFactory()).build();
} | 3.26 |
hadoop_Preconditions_checkState_rdh | /**
* Preconditions that the expression involving one or more parameters to the calling method.
*
* <p>The message of the exception is {@code msgSupplier.get()}.</p>
*
* @param expression
* a boolean expression
* @param msgSupplier
* the {@link Supplier#get()} set the
* exception message if valid. Otherwise,
* the message is {@link #CHECK_STATE_EX_MESSAGE}
* @throws IllegalStateException
* if {@code expression} is false
*/
public static void checkState(final boolean expression, final Supplier<String> msgSupplier) {
if (!expression) {
String msg;
try {
// note that we can get NPE evaluating the message itself;
// but we do not want this to override the actual NPE.
msg = msgSupplier.get();
} catch (Exception e) {
LOG.debug("Error formatting message", e);
msg = CHECK_STATE_EX_MESSAGE;
}
throw new IllegalStateException(msg);
}
} | 3.26 |
hadoop_Preconditions_getDefaultNullMSG_rdh | /* @VisibleForTesting */
static String getDefaultNullMSG() {
return f0;} | 3.26 |
hadoop_Preconditions_getDefaultCheckArgumentMSG_rdh | /* @VisibleForTesting */
static String getDefaultCheckArgumentMSG() {
return CHECK_ARGUMENT_EX_MESSAGE;
} | 3.26 |
hadoop_Preconditions_checkArgument_rdh | /**
* Preconditions that the expression involving one or more parameters to the calling method.
*
* <p>The message of the exception is {@code msgSupplier.get()}.</p>
*
* @param expression
* a boolean expression
* @param msgSupplier
* the {@link Supplier#get()} set the
* exception message if valid. Otherwise,
* the message is {@link #CHECK_ARGUMENT_EX_MESSAGE}
* @throws IllegalArgumentException
* if {@code expression} is false
*/
public static void checkArgument(final boolean expression, final Supplier<String> msgSupplier) {
if (!expression) {
String msg;
try {
// note that we can get NPE evaluating the message itself;
// but we do not want this to override the actual NPE.
msg = msgSupplier.get();
} catch (Exception e) {LOG.debug("Error formatting message", e);
msg = CHECK_ARGUMENT_EX_MESSAGE;
}
throw new IllegalArgumentException(msg);
}
} | 3.26 |
hadoop_Preconditions_getDefaultCheckStateMSG_rdh | /* @VisibleForTesting */
static String getDefaultCheckStateMSG() {
return CHECK_STATE_EX_MESSAGE;
} | 3.26 |
hadoop_Preconditions_checkNotNull_rdh | /**
* Preconditions that the specified argument is not {@code null},
* throwing a NPE exception otherwise.
*
* <p>The message of the exception is {@code msgSupplier.get()}.</p>
*
* @param <T>
* the object type
* @param obj
* the object to check
* @param msgSupplier
* the {@link Supplier#get()} set the
* exception message if valid. Otherwise,
* the message is {@link #VALIDATE_IS_NOT_NULL_EX_MESSAGE}
* @return the validated object (never {@code null} for method chaining)
* @throws NullPointerException
* if the object is {@code null}
*/
public static <T> T checkNotNull(final T obj, final Supplier<String> msgSupplier) {
if (obj == null) {
String msg;
try {
// note that we can get NPE evaluating the message itself;
// but we do not want this to override the actual NPE.
msg = msgSupplier.get();
} catch (Exception e) {
// ideally we want to log the error to capture. This may cause log files
// to bloat. On the other hand, swallowing the exception may hide a bug
// in the caller. Debug level is a good compromise between the two
// concerns.
LOG.debug("Error formatting message", e); msg = f0;
}
throw new NullPointerException(msg);}
return obj;
} | 3.26 |
hadoop_AuxServiceRecord_launchTime_rdh | /**
* The time when the service was created, e.g. 2016-03-16T01:01:49.000Z.
*/public AuxServiceRecord launchTime(Date time) {
this.launchTime = (time == null) ? null : ((Date)
(time.clone()));
return this;
} | 3.26 |
hadoop_AuxServiceRecord_name_rdh | /**
* A unique service name.
*/
public AuxServiceRecord name(String n)
{this.name = n;
return this;
} | 3.26 |
hadoop_AuxServiceRecord_version_rdh | /**
* Version of the service.
*/
public AuxServiceRecord version(String v) {
this.version = v;
return this;
} | 3.26 |
hadoop_AuxServiceRecord_configuration_rdh | /**
* Config properties of an service. Configurations provided at the
* service/global level are available to all the components. Specific
* properties can be overridden at the component level.
*/
public AuxServiceRecord configuration(AuxServiceConfiguration conf) {
this.configuration = conf;
return this;
} | 3.26 |
hadoop_AuxServiceRecord_toIndentedString_rdh | /**
* Convert the given object to string with each line indented by 4 spaces
* (except the first line).
*/
private String toIndentedString(Object o) {
if (o == null)
{
return "null";
}
return o.toString().replace("\n", "\n ");
} | 3.26 |
hadoop_AuxServiceRecord_description_rdh | /**
* Description of the service.
*/
public AuxServiceRecord description(String d) {
this.description = d;
return this;
} | 3.26 |
hadoop_LocalityMulticastAMRMProxyPolicy_getSubClusterForUnResolvedRequest_rdh | /**
* For requests whose location cannot be resolved, choose an active and
* enabled sub-cluster to forward this requestId to.
*/
private SubClusterId getSubClusterForUnResolvedRequest(long allocationId) {
if (unResolvedRequestLocation.containsKey(allocationId)) {
return unResolvedRequestLocation.get(allocationId);
}
int id = rand.nextInt(activeAndEnabledSC.size());
for (SubClusterId subclusterId : activeAndEnabledSC) {
if (id == 0) {
unResolvedRequestLocation.put(allocationId, subclusterId);
return subclusterId;
}
id--;
}
throw new RuntimeException((("Should not be here. activeAndEnabledSC size = " + activeAndEnabledSC.size()) + " id = ") + id);
}
/**
* Return all known subclusters associated with an allocation id.
*
* @param allocationId
* the allocation id considered
* @return the list of {@link SubClusterId} | 3.26 |
hadoop_LocalityMulticastAMRMProxyPolicy_addLocalizedNodeRR_rdh | /**
* Add to the answer a localized node request, and keeps track of statistics
* on a per-allocation-id and per-subcluster bases.
*/
private void addLocalizedNodeRR(SubClusterId targetId, ResourceRequest rr) {
Preconditions.checkArgument(!ResourceRequest.isAnyLocation(rr.getResourceName()));
if (rr.getNumContainers() > 0) {
if (!countContainersPerRM.containsKey(rr.getAllocationRequestId())) {
countContainersPerRM.put(rr.getAllocationRequestId(), new HashMap<>());
}
if (!countContainersPerRM.get(rr.getAllocationRequestId()).containsKey(targetId)) {
countContainersPerRM.get(rr.getAllocationRequestId()).put(targetId, new AtomicLong(0));
}
countContainersPerRM.get(rr.getAllocationRequestId()).get(targetId).addAndGet(rr.getNumContainers());
if (!totNumLocalizedContainers.containsKey(rr.getAllocationRequestId())) {
totNumLocalizedContainers.put(rr.getAllocationRequestId(), new AtomicLong(0));
}
totNumLocalizedContainers.get(rr.getAllocationRequestId()).addAndGet(rr.getNumContainers());
}
internalAddToAnswer(targetId, rr, false);
} | 3.26 |
hadoop_LocalityMulticastAMRMProxyPolicy_prettyPrintRequests_rdh | /**
* Print a list of Resource Requests into a one line string.
*
* @param response
* list of ResourceRequest
* @param max
* number of ResourceRequest to print
* @return the printed one line string
*/public static String prettyPrintRequests(List<ResourceRequest> response, int max) {
StringBuilder builder = new StringBuilder();
for
(ResourceRequest rr : response) {
builder.append("[id:").append(rr.getAllocationRequestId()).append(" loc:").append(rr.getResourceName()).append(" num:").append(rr.getNumContainers()).append(" pri:").append(rr.getPriority() != null ? rr.getPriority().getPriority() : -1).append("], ");
if (max !=
(-1)) {
if ((max--) <= 0) {
break;}
}
}
return builder.toString();
} | 3.26 |
hadoop_LocalityMulticastAMRMProxyPolicy_computeIntegerAssignment_rdh | /**
* Split the integer into bins according to the weights.
*
* @param totalNum
* total number of containers to split
* @param weightsList
* the weights for each subcluster
* @return the container allocation after split
* @throws YarnException
* if fails
*/
@VisibleForTesting
protected ArrayList<Integer> computeIntegerAssignment(int totalNum, ArrayList<Float> weightsList) throws YarnException {
int i;
int residue;
ArrayList<Integer> ret = new ArrayList<>();
float totalWeight = 0;
float totalNumFloat = totalNum;
if (weightsList.size() == 0) {return ret;
}
for (i = 0; i < weightsList.size(); i++) {
ret.add(0);
if (weightsList.get(i)
> 0) {
totalWeight += weightsList.get(i);
}
}
if (totalWeight == 0) {
StringBuilder sb = new StringBuilder();
for (Float weight : weightsList) {
sb.append(weight + ", ");
}
throw new FederationPolicyException("No positive value found in weight array " + sb.toString());
}
// First pass, do flooring for all bins
residue = totalNum;
for (i = 0; i < weightsList.size(); i++) {
if (weightsList.get(i) > 0) {
int base = ((int) ((totalNumFloat * weightsList.get(i)) / totalWeight));
ret.set(i, ret.get(i) + base);
residue -= base;
} }
// By now residue < weights.length, assign one a time
for (i = 0; i < residue; i++) {
int index = FederationPolicyUtils.getWeightedRandom(weightsList);
ret.set(index, ret.get(index) + 1);
}
return ret;
} | 3.26 |
hadoop_LocalityMulticastAMRMProxyPolicy_m1_rdh | /**
* Return the set of sub-clusters that are both active and allowed by our
* policy (weight > 0).
*
* @return a set of active and enabled {@link SubClusterId}s
*/
private Set<SubClusterId> m1() {
return activeAndEnabledSC;
} | 3.26 |
hadoop_LocalityMulticastAMRMProxyPolicy_getNumLocalizedContainers_rdh | /**
* Returns the number of containers matching an allocation Id that are
* localized in the targetId subcluster.
*/
private long getNumLocalizedContainers(long allocationId, SubClusterId targetId) {
AtomicLong c = countContainersPerRM.get(allocationId).get(targetId);
return c == null ? 0 : c.get();
} | 3.26 |
hadoop_LocalityMulticastAMRMProxyPolicy_getAnswer_rdh | /**
* Return the answer accumulated so far.
*
* @return the answer
*/
private Map<SubClusterId, List<ResourceRequest>> getAnswer() {
Iterator<Entry<SubClusterId, List<ResourceRequest>>> answerIter = answer.entrySet().iterator();
// Remove redundant rack RR before returning the answer
while (answerIter.hasNext()) {
Entry<SubClusterId, List<ResourceRequest>> entry = answerIter.next();
SubClusterId scId = entry.getKey();
Set<Long> mask = maskForRackDeletion.get(scId);
if (mask != null) {
Iterator<ResourceRequest> rrIter = entry.getValue().iterator();
while (rrIter.hasNext()) {
ResourceRequest rr = rrIter.next();if (!mask.contains(rr.getAllocationRequestId())) {
rrIter.remove();
}
}
}
if ((mask == null) || (entry.getValue().size() == 0)) {
answerIter.remove();
LOG.info("removing {} from output because it has only rack RR", scId);
}
}
return answer;
} | 3.26 |
hadoop_LocalityMulticastAMRMProxyPolicy_chooseSubClusterIdForMaxLoadSC_rdh | /**
* Check if the current target subcluster is over max load, and if it is
* reroute it.
*
* @param targetId
* the original target subcluster id
* @param maxThreshold
* the max load threshold to reroute
* @param activeAndEnabledSCs
* the list of active and enabled subclusters
* @return targetId if it is within maxThreshold, otherwise a new id
*/
private SubClusterId chooseSubClusterIdForMaxLoadSC(SubClusterId targetId, int maxThreshold, Set<SubClusterId> activeAndEnabledSCs) {
ArrayList<Float> weight = new ArrayList<>();
ArrayList<SubClusterId> scIds = new ArrayList<>();
int targetLoad = m0(targetId);
if ((targetLoad == (-1)) || (!activeAndEnabledSCs.contains(targetId))) {
// Probably a SC that's not active and enabled. Forcing a reroute
targetLoad = Integer.MAX_VALUE;
}
/* Prepare the weight for a random draw among all known SCs.
For SC with pending bigger than maxThreshold / 2, use maxThreshold /
pending as weight. We multiplied by maxThreshold so that the weight
won't be too small in value.
For SC with pending less than maxThreshold / 2, we cap the weight at 2
= (maxThreshold / (maxThreshold / 2)) so that SC with small pending
will not get a huge weight and thus get swamped.
*/
for (SubClusterId sc : activeAndEnabledSCs) {
int scLoad = m0(sc);
if (scLoad > targetLoad) {
// Never mind if it is not the most loaded SC
return targetId;
}
if (scLoad <= (maxThreshold / 2)) {
weight.add(2.0F);
} else {
weight.add(((float) (maxThreshold)) / scLoad);
}
scIds.add(sc);
}
if (weights.size() == 0) {
return targetId;
}
return scIds.get(FederationPolicyUtils.getWeightedRandom(weight));
} | 3.26 |
hadoop_LocalityMulticastAMRMProxyPolicy_getHeadroomWeighting_rdh | /**
* Compute the weighting based on available headroom. This is proportional to
* the available headroom memory announced by RM, or to 1/N for RMs we have
* not seen yet. If all RMs report zero headroom, we fallback to 1/N again.
*/
private float getHeadroomWeighting(SubClusterId targetId, AllocationBookkeeper allocationBookkeeper) {
// baseline weight for all RMs
float headroomWeighting = 1 / ((float) (allocationBookkeeper.m1().size()));
// if we have headroom information for this sub-cluster (and we are safe
// from /0 issues)
if (headroom.containsKey(targetId)
&& (allocationBookkeeper.totHeadroomMemory > 0)) {
// compute which portion of the RMs that are active/enabled have reported
// their headroom (needed as adjustment factor)
// (note: getActiveAndEnabledSC should never be null/zero)
float ratioHeadroomKnown =
allocationBookkeeper.totHeadRoomEnabledRMs /
((float) (allocationBookkeeper.m1().size()));
// headroomWeighting is the ratio of headroom memory in the targetId
// cluster / total memory. The ratioHeadroomKnown factor is applied to
// adjust for missing information and ensure sum of allocated containers
// closely approximate what the user asked (small excess).
headroomWeighting = (headroom.get(targetId).getMemorySize() / allocationBookkeeper.totHeadroomMemory) * ratioHeadroomKnown;
}
return headroomWeighting;
} | 3.26 |
hadoop_LocalityMulticastAMRMProxyPolicy_m0_rdh | /**
* get the Load data of the subCluster.
*
* @param subClusterId
* subClusterId.
* @return The number of pending containers for the subCluster.
*/
private int m0(SubClusterId
subClusterId) {
EnhancedHeadroom headroomData = this.enhancedHeadroom.get(subClusterId);
if (headroomData == null) {
return -1;
}
// Use new data from enhanced headroom
boolean useActiveCoreEnabled = conf.getBoolean(LOAD_BASED_SC_SELECTOR_USE_ACTIVE_CORE, DEFAULT_LOAD_BASED_SC_SELECTOR_USE_ACTIVE_CORE);
// If we consider the number of vCores in the subCluster
if (useActiveCoreEnabled) {
// If the vcore of the subCluster is less than or equal to 0,
// it means that containers cannot be scheduled to this subCluster,
// and we will return a very large number, indicating that the subCluster is unavailable.
if (headroomData.getTotalActiveCores() <= 0) {
return Integer.MAX_VALUE;
}
// Multiply by a constant factor, to ensure the numerator > denominator.
// We will normalize the PendingCount, using PendingCount * multiplier / TotalActiveCores.
long multiplier = conf.getLong(LOAD_BASED_SC_SELECTOR_MULTIPLIER, DEFAULT_LOAD_BASED_SC_SELECTOR_MULTIPLIER);
double v58 = headroomData.getNormalizedPendingCount(multiplier) / headroomData.getTotalActiveCores();
if (v58 > Integer.MAX_VALUE) {
return Integer.MAX_VALUE;
} else {
return ((int) (v58));
}
} else {
// If the number of vcores in the subCluster is not considered,
// we directly return the number of pending containers in the subCluster.
return headroomData.getTotalPendingCount();
}
} | 3.26 |
hadoop_LocalityMulticastAMRMProxyPolicy_getLocalityBasedWeighting_rdh | /**
* Compute the weight to assign to a subcluster based on how many local
* requests a subcluster is target of.
*/private float getLocalityBasedWeighting(long reqId, SubClusterId targetId, AllocationBookkeeper allocationBookkeeper) {
float totWeight = allocationBookkeeper.getTotNumLocalizedContainers(reqId);
float localWeight = allocationBookkeeper.getNumLocalizedContainers(reqId, targetId);return totWeight > 0 ? localWeight / totWeight : 0;
} | 3.26 |
hadoop_LocalityMulticastAMRMProxyPolicy_getTotNumLocalizedContainers_rdh | /**
* Return the total number of container coming from localized requests
* matching an allocation Id.
*/
private long getTotNumLocalizedContainers(long allocationId) {
AtomicLong c = totNumLocalizedContainers.get(allocationId);
return c == null ? 0 : c.get();
} | 3.26 |
hadoop_LocalityMulticastAMRMProxyPolicy_addAnyRR_rdh | /**
* Add an ANY request to the final answer.
*/
private void addAnyRR(SubClusterId targetId, ResourceRequest rr) {Preconditions.checkArgument(ResourceRequest.isAnyLocation(rr.getResourceName()));
internalAddToAnswer(targetId, rr, false);
} | 3.26 |
hadoop_LocalityMulticastAMRMProxyPolicy_getPolicyConfigWeighting_rdh | /**
* Compute the "weighting" to give to a sublcuster based on the configured
* policy weights (for the active subclusters).
*/
private float getPolicyConfigWeighting(SubClusterId targetId, AllocationBookkeeper allocationBookkeeper) {
float totWeight = allocationBookkeeper.totPolicyWeight;
Float v45 = allocationBookkeeper.policyWeights.get(targetId);
return (v45
!= null) && (totWeight > 0) ? v45 / totWeight : 0;
} | 3.26 |
hadoop_LocalityMulticastAMRMProxyPolicy_routeNodeRequestIfNeeded_rdh | /**
* When certain subcluster is too loaded, reroute Node requests going there.
*
* @param targetId
* current subClusterId where request is sent
* @param maxThreshold
* threshold for Pending count
* @param activeAndEnabledSCs
* list of active sc
* @return subClusterId target sc id
*/
protected SubClusterId routeNodeRequestIfNeeded(SubClusterId targetId, int maxThreshold, Set<SubClusterId> activeAndEnabledSCs) {
// If targetId is not in the active and enabled SC list, reroute the traffic
if (activeAndEnabledSCs.contains(targetId)) {
int targetPendingCount = m0(targetId);
if ((targetPendingCount == (-1)) || (targetPendingCount < maxThreshold)) {
return targetId;
}
}
SubClusterId scId = chooseSubClusterIdForMaxLoadSC(targetId, maxThreshold, activeAndEnabledSCs);
return scId;
} | 3.26 |
hadoop_LocalityMulticastAMRMProxyPolicy_splitAnyRequests_rdh | /**
* It splits a list of non-localized resource requests among sub-clusters.
*/
private void splitAnyRequests(List<ResourceRequest> originalResourceRequests,
AllocationBookkeeper allocationBookkeeper) throws YarnException {
for (ResourceRequest resourceRequest : originalResourceRequests) {
// FIRST: pick the target set of subclusters (based on whether this RR
// is associated with other localized requests via an allocationId)
Long allocationId = resourceRequest.getAllocationRequestId();
Set<SubClusterId> targetSubclusters;if (allocationBookkeeper.getSubClustersForId(allocationId) != null) {
targetSubclusters = allocationBookkeeper.getSubClustersForId(allocationId);
} else {
targetSubclusters = allocationBookkeeper.m1();
}
// SECOND: pick how much to ask each RM for each request
splitIndividualAny(resourceRequest, targetSubclusters, allocationBookkeeper);
}
} | 3.26 |
hadoop_LocalityMulticastAMRMProxyPolicy_addRackRR_rdh | /**
* Add a rack-local request to the final answer.
*/
private void addRackRR(SubClusterId targetId,
ResourceRequest rr) {
Preconditions.checkArgument(!ResourceRequest.isAnyLocation(rr.getResourceName()));
internalAddToAnswer(targetId, rr, true);
} | 3.26 |
hadoop_LocalityMulticastAMRMProxyPolicy_isActiveAndEnabled_rdh | /**
* Returns true is the subcluster request is both active and enabled.
*/
private boolean isActiveAndEnabled(SubClusterId targetId) {
if (targetId == null) {
return false;
} else {
return m1().contains(targetId);
}
} | 3.26 |
hadoop_LocalityMulticastAMRMProxyPolicy_splitIndividualAny_rdh | /**
* Return a projection of this ANY {@link ResourceRequest} that belongs to
* this sub-cluster. This is done based on the "count" of the containers that
* require locality in each sublcuster (if any) or based on the "weights" and
* headroom.
*/
private void splitIndividualAny(ResourceRequest originalResourceRequest, Set<SubClusterId> targetSubclusters, AllocationBookkeeper allocationBookkeeper) throws YarnException {
long allocationId = originalResourceRequest.getAllocationRequestId();
int numContainer = originalResourceRequest.getNumContainers();
// If the ANY request has 0 containers to begin with we must forward it to
// any RM we have previously contacted (this might be the user way
// to cancel a previous request).
if (numContainer == 0) {
for (SubClusterId targetId : headroom.keySet()) {
allocationBookkeeper.addAnyRR(targetId, originalResourceRequest);
}
return;
}
// List preserves iteration order
List<SubClusterId> targetSCs = new ArrayList<>(targetSubclusters);
// Compute the distribution weights
ArrayList<Float> weightsList = new ArrayList<>();for (SubClusterId targetId : targetSCs) {
// If ANY is associated with localized asks, split based on their ratio
if (allocationBookkeeper.getSubClustersForId(allocationId) != null) {weightsList.add(getLocalityBasedWeighting(allocationId, targetId, allocationBookkeeper));
} else {
// split ANY based on load and policy configuration
float headroomWeighting = getHeadroomWeighting(targetId, allocationBookkeeper);
float
policyWeighting = getPolicyConfigWeighting(targetId, allocationBookkeeper);
// hrAlpha controls how much headroom influencing decision
weightsList.add((f0 * headroomWeighting) + ((1 - f0) * policyWeighting));
}
}
// Compute the integer container counts for each sub-cluster
ArrayList<Integer> containerNums = computeIntegerAssignment(numContainer, weightsList);
int i = 0;
for (SubClusterId
targetId : targetSCs) {
// if the calculated request is non-empty add it to the answer
if (containerNums.get(i) > 0) {
ResourceRequest out = ResourceRequest.clone(originalResourceRequest);
out.setNumContainers(containerNums.get(i));
if (ResourceRequest.isAnyLocation(out.getResourceName())) {allocationBookkeeper.addAnyRR(targetId, out);
} else {
allocationBookkeeper.addRackRR(targetId, out);
}
}
i++; }
} | 3.26 |
hadoop_StoreContextBuilder_setEnableCSE_rdh | /**
* set is client side encryption boolean value.
*
* @param value
* value indicating if client side encryption is enabled or not.
* @return builder instance.
*/ public StoreContextBuilder setEnableCSE(boolean value) {
isCSEEnabled = value;return this;
} | 3.26 |
hadoop_StoreContextBuilder_setAuditor_rdh | /**
* Set builder value.
*
* @param value
* new value
* @return the builder
*/
public StoreContextBuilder setAuditor(final AuditSpanSource<AuditSpanS3A> value) {
auditor = value;
return this;
} | 3.26 |
hadoop_PlacementConstraintManager_validateConstraint_rdh | /**
* Validate a placement constraint and the set of allocation tags that will
* enable it.
*
* @param sourceTags
* the associated allocation tags
* @param placementConstraint
* the constraint
* @return true if constraint and tags are valid
*/
default boolean validateConstraint(Set<String> sourceTags, PlacementConstraint placementConstraint) {
return true;
} | 3.26 |
hadoop_EntityColumnPrefix_getColumnPrefix_rdh | /**
*
* @return the column name value
*/
public String getColumnPrefix() {
return columnPrefix;
} | 3.26 |
hadoop_ReservationInterval_isOverlap_rdh | /**
* Returns whether the interval is active at the specified instant of time
*
* @param tick
* the instance of the time to check
* @return true if active, false otherwise
*/
public boolean isOverlap(long tick) {
return (startTime <= tick) && (tick <= endTime);
} | 3.26 |
hadoop_ReservationInterval_getEndTime_rdh | /**
* Get the end time of the reservation interval
*
* @return the endTime
*/
public long getEndTime() {
return endTime;} | 3.26 |
hadoop_ReservationInterval_getStartTime_rdh | /**
* Get the start time of the reservation interval
*
* @return the startTime
*/
public long getStartTime() {
return
startTime;
} | 3.26 |
hadoop_Times_elapsed_rdh | // A valid elapsed is supposed to be non-negative. If finished/current time
// is ahead of the started time, return -1 to indicate invalid elapsed time,
// and record a warning log.
public static long elapsed(long started, long finished, boolean isRunning) {
if ((finished > 0) && (started > 0)) {
long elapsed = finished - started;
if (elapsed >= 0) {
return elapsed;
} else {
LOG.warn((("Finished time " + finished) + " is ahead of started time ") + started);
return -1;
}
}
if (isRunning) {
long current = System.currentTimeMillis();
long elapsed = (started > 0) ? current - started : 0;
if (elapsed >= 0) {
return elapsed;
} else {
LOG.warn((("Current time " + current) + " is ahead of started time ") + started);
return -1;
}
} else {
return -1;
}
} | 3.26 |
hadoop_Times_parseISO8601ToLocalTimeInMillis_rdh | /**
* Given ISO formatted string with format "yyyy-MM-dd'T'HH:mm:ss.SSSZ", return
* epoch time for local Time zone.
*
* @param isoString
* in format of "yyyy-MM-dd'T'HH:mm:ss.SSSZ".
* @return epoch time for local time zone.
* @throws ParseException
* if given ISO formatted string can not be parsed.
*/
public static long parseISO8601ToLocalTimeInMillis(String isoString) throws ParseException {
if (isoString == null)
{ throw new ParseException("Invalid input.", -1);
}
return Instant.from(ISO_OFFSET_DATE_TIME.parse(isoString)).toEpochMilli();
} | 3.26 |
hadoop_Times_formatISO8601_rdh | /**
* Given a time stamp returns ISO-8601 formated string in format
* "yyyy-MM-dd'T'HH:mm:ss.SSSZ".
*
* @param ts
* to be formatted in ISO format.
* @return ISO 8601 formatted string.
*/
public static String formatISO8601(long ts) {
return ISO_OFFSET_DATE_TIME.format(Instant.ofEpochMilli(ts));
} | 3.26 |
hadoop_MutableGaugeInt_toString_rdh | /**
*
* @return the value of the metric
*/
public String toString() {return value.toString();
} | 3.26 |
hadoop_MutableGaugeInt_set_rdh | /**
* Set the value of the metric
*
* @param value
* to set
*/public void set(int value) {
this.value.set(value);
setChanged();
} | 3.26 |
hadoop_MutableGaugeInt_decr_rdh | /**
* decrement by delta
*
* @param delta
* of the decrement
*/
public void decr(int delta) {
value.addAndGet(-delta);
setChanged();
} | 3.26 |
hadoop_MutableGaugeInt_incr_rdh | /**
* Increment by delta
*
* @param delta
* of the increment
*/
public void incr(int delta) {
value.addAndGet(delta);
setChanged();
} | 3.26 |
hadoop_BalancerBandwidthCommand_m0_rdh | /**
* Get current value of the max balancer bandwidth in bytes per second.
*
* @return bandwidth Blanacer bandwidth in bytes per second for this datanode.
*/
public long m0() {
return this.bandwidth;
} | 3.26 |
hadoop_QuotaUsage_getStorageTypeHeader_rdh | /**
* return the header of with the StorageTypes.
*
* @param storageTypes
* storage types.
* @return storage header string
*/
public static String getStorageTypeHeader(List<StorageType> storageTypes) {
StringBuilder header = new StringBuilder();
for (StorageType st : storageTypes) {
/* the field length is 13/17 for quota and remain quota
as the max length for quota name is ARCHIVE_QUOTA
and remain quota name REM_ARCHIVE_QUOTA
*/
String storageName = st.toString();
header.append(String.format(STORAGE_TYPE_SUMMARY_FORMAT, storageName + "_QUOTA", ("REM_" + storageName) + "_QUOTA"));
}
return header.toString();
} | 3.26 |
hadoop_QuotaUsage_m0_rdh | /**
* Return storage type consumed.
*
* @param type
* storage type.
* @return type consumed.
*/
public long m0(StorageType type) {
return typeConsumed != null ? typeConsumed[type.ordinal()] : 0L;
} | 3.26 |
hadoop_QuotaUsage_getFileAndDirectoryCount_rdh | /**
* Return the directory count.
*
* @return file and directory count.
*/
public long getFileAndDirectoryCount() {
return fileAndDirectoryCount;
} | 3.26 |
hadoop_QuotaUsage_getSpaceConsumed_rdh | /**
* Return (disk) space consumed.
*
* @return space consumed.
*/
public long getSpaceConsumed() {
return spaceConsumed;
} | 3.26 |
hadoop_QuotaUsage_isTypeQuotaSet_rdh | /**
* Return true if any storage type quota has been set.
*
* @return if any storage type quota has been set true, not false.
*/
public boolean isTypeQuotaSet() {
if (typeQuota != null) {
for (StorageType t : StorageType.getTypesSupportingQuota())
{
if (typeQuota[t.ordinal()] > 0L) {
return true;
}
}
}
return false;
} | 3.26 |
hadoop_QuotaUsage_getSpaceQuota_rdh | /**
* Return (disk) space quota.
*
* @return space quota.
*/
public long getSpaceQuota() {
return spaceQuota;
} | 3.26 |
hadoop_QuotaUsage_getHeader_rdh | /**
* Return the header of the output.
*
* @return the header of the output
*/
public static String getHeader() {
return QUOTA_HEADER;
} | 3.26 |
hadoop_QuotaUsage_toString_rdh | /**
* Return the string representation of the object in the output format.
* if hOption is false file sizes are returned in bytes
* if hOption is true file sizes are returned in human readable
*
* @param hOption
* a flag indicating if human readable output if to be used
* @param tOption
* type option.
* @param types
* storage types.
* @return the string representation of the object.
*/
public String toString(boolean hOption, boolean tOption, List<StorageType> types) {
if (tOption) {
return getTypesQuotaUsage(hOption, types);
}
return getQuotaUsage(hOption);
} | 3.26 |
hadoop_QuotaUsage_formatSize_rdh | /**
* Formats a size to be human readable or in bytes.
*
* @param size
* value to be formatted
* @param humanReadable
* flag indicating human readable or not
* @return String representation of the size
*/
private String formatSize(long
size, boolean humanReadable) {
return humanReadable ? StringUtils.TraditionalBinaryPrefix.long2String(size, "", 1) : String.valueOf(size);
} | 3.26 |
hadoop_QuotaUsage_getQuota_rdh | /**
* Return the directory quota.
*
* @return quota.
*/
public long getQuota() {
return quota;} | 3.26 |
hadoop_RouterQuotaManager_getPaths_rdh | /**
* Get children paths (can include itself) under specified federation path.
*
* @param parentPath
* Federated path.
* @return Set of children paths.
*/
public Set<String> getPaths(String parentPath) {
readLock.lock();
try {
String from = parentPath;
String to = parentPath + Character.MAX_VALUE;
SortedMap<String, RouterQuotaUsage> subMap = this.cache.subMap(from, to);
Set<String> validPaths = new HashSet<>();
if (subMap != null) {
for (String path : subMap.keySet()) {
if (isParentEntry(path, parentPath)) {
validPaths.add(path);
}
}
}
return validPaths;
} finally {
readLock.unlock();
}
} | 3.26 |
hadoop_RouterQuotaManager_updateQuota_rdh | /**
* Update quota in cache. The usage will be preserved.
*
* @param path
* Mount table path.
* @param quota
* Corresponding quota value.
*/
public void updateQuota(String path, RouterQuotaUsage quota) {
writeLock.lock();
try {
RouterQuotaUsage.Builder builder = new RouterQuotaUsage.Builder().quota(quota.getQuota()).spaceQuota(quota.getSpaceQuota());
RouterQuotaUsage current = this.cache.get(path);
if (current != null) {
builder.fileAndDirectoryCount(current.getFileAndDirectoryCount()).spaceConsumed(current.getSpaceConsumed());
}
this.cache.put(path, builder.build());
} finally {
writeLock.unlock();
}
} | 3.26 |
hadoop_RouterQuotaManager_isQuotaSet_rdh | /**
* Check if the quota was set.
*
* @param quota
* the quota usage.
* @return True if the quota is set.
*/
public static boolean isQuotaSet(QuotaUsage quota) {
if (quota
!= null) {
long nsQuota = quota.getQuota();
long ssQuota = quota.getSpaceQuota();
// once nsQuota or ssQuota was set, this mount table is quota set
if (((nsQuota != HdfsConstants.QUOTA_RESET) || (ssQuota != HdfsConstants.QUOTA_RESET)) || Quota.orByStorageType(t -> quota.getTypeQuota(t) != HdfsConstants.QUOTA_RESET)) {
return true;
}
}
return false;
} | 3.26 |
hadoop_RouterQuotaManager_clear_rdh | /**
* Clean up the cache.
*/
public void clear() {
writeLock.lock();
try {
this.cache.clear();
} finally {
writeLock.unlock();
}
} | 3.26 |
hadoop_RouterQuotaManager_getQuotaUsage_rdh | /**
* Get the nearest ancestor's quota usage, and meanwhile its quota was set.
*
* @param path
* The path being written.
* @return RouterQuotaUsage Quota usage.
*/
public RouterQuotaUsage getQuotaUsage(String path) {
readLock.lock();
try {
RouterQuotaUsage quotaUsage = this.cache.get(path);
if ((quotaUsage != null) && isQuotaSet(quotaUsage)) {
return quotaUsage;
}
// If not found, look for its parent path usage value.
int pos = path.lastIndexOf(Path.SEPARATOR);
if (pos != (-1)) {
String parentPath = path.substring(0, pos);
return getQuotaUsage(parentPath);
}
} finally {
readLock.unlock();
}
return null;
} | 3.26 |
hadoop_RouterQuotaManager_remove_rdh | /**
* Remove the entity from cache.
*
* @param path
* Mount table path.
*/
public void remove(String path) {
writeLock.lock();
try {
this.cache.remove(path);
} finally {
writeLock.unlock();
}
} | 3.26 |
hadoop_RouterQuotaManager_put_rdh | /**
* Put new entity into cache.
*
* @param path
* Mount table path.
* @param quotaUsage
* Corresponding cache value.
*/
public void put(String path, RouterQuotaUsage quotaUsage) {
writeLock.lock();
try {
this.cache.put(path, quotaUsage);
} finally {
writeLock.unlock();
}
} | 3.26 |
hadoop_RouterQuotaManager_isMountEntry_rdh | /**
* Is the path a mount entry.
*
* @param path
* the path.
* @return {@code true} if path is a mount entry; {@code false} otherwise.
*/
boolean isMountEntry(String path) {
readLock.lock();try {
return this.cache.containsKey(path);
} finally {
readLock.unlock();
}
} | 3.26 |
hadoop_RouterQuotaManager_getAll_rdh | /**
* Get all the mount quota paths.
*
* @return All the mount quota paths.
*/
public Set<String> getAll() {
readLock.lock();
try {
return this.cache.keySet();
} finally {
readLock.unlock();
}} | 3.26 |
hadoop_RouterQuotaManager_getParentsContainingQuota_rdh | /**
* Get parent paths (including itself) and quotas of the specified federation
* path. Only parents containing quota are returned.
*
* @param childPath
* Federated path.
* @return TreeMap of parent paths and quotas.
*/
TreeMap<String, RouterQuotaUsage> getParentsContainingQuota(String childPath) {
TreeMap<String, RouterQuotaUsage> res = new TreeMap<>();
readLock.lock();
try {
Entry<String, RouterQuotaUsage> entry = this.cache.floorEntry(childPath);
while (entry != null) {
String mountPath =
entry.getKey();
RouterQuotaUsage quota = entry.getValue();
if (isQuotaSet(quota) && isParentEntry(childPath, mountPath)) {
res.put(mountPath, quota);
}
entry = this.cache.lowerEntry(mountPath);
}
return res;
} finally {
readLock.unlock();
}
} | 3.26 |
hadoop_Tristate_isBoolean_rdh | /**
* Does this value map to a boolean.
*
* @return true if the state is one of true or false.
*/
public boolean isBoolean() {
return
mapping.isPresent();
} | 3.26 |
hadoop_Tristate_m0_rdh | /**
* Build a tristate from a boolean.
*
* @param b
* source optional
* @return a tristate derived from the argument.
*/
public static Tristate m0(Optional<Boolean> b) {
return b.map(Tristate::fromBool).orElse(UNKNOWN);
} | 3.26 |
hadoop_UriUtils_extractAccountNameFromHostName_rdh | /**
* Extracts the account name from the host name.
*
* @param hostName
* the fully-qualified domain name of the storage service
* endpoint (e.g. {account}.dfs.core.windows.net.
* @return the storage service account name.
*/
public static String extractAccountNameFromHostName(final String hostName) {
if ((hostName == null) || hostName.isEmpty()) {
return null;
}
if (!containsAbfsUrl(hostName)) {
return null;
}
String[] splitByDot = hostName.split("\\.");
if (splitByDot.length == 0) {
return null;
}
return splitByDot[0];
} | 3.26 |
hadoop_UriUtils_generateUniqueTestPath_rdh | /**
* Generate unique test path for multiple user tests.
*
* @return root test path
*/
public static String generateUniqueTestPath() {
String
testUniqueForkId = System.getProperty("test.unique.fork.id");
return testUniqueForkId == null ? "/test" : ("/" + testUniqueForkId) + "/test";
} | 3.26 |
hadoop_UriUtils_maskUrlQueryParameters_rdh | /**
* Generic function to mask a set of query parameters partially/fully and
* return the resultant query string
*
* @param keyValueList
* List of NameValuePair instances for query keys/values
* @param queryParamsForFullMask
* values for these params will appear as "XXXX"
* @param queryParamsForPartialMask
* values will be masked with 'X', except for
* the last PARTIAL_MASK_VISIBLE_LEN characters
* @param queryLen
* to initialize StringBuilder for the masked query
* @return the masked url query part
*/
public static String
maskUrlQueryParameters(List<NameValuePair>
keyValueList, Set<String> queryParamsForFullMask, Set<String> queryParamsForPartialMask, int queryLen) {StringBuilder maskedUrl = new StringBuilder(queryLen);
for (NameValuePair keyValuePair : keyValueList) {
String key
= keyValuePair.getName();
if (key.isEmpty()) {
throw new IllegalArgumentException("Query param key should not be empty");
}
String value = keyValuePair.getValue();
maskedUrl.append(key);
maskedUrl.append(EQUAL);
if ((value != null) && (!value.isEmpty())) {
// no mask
if (queryParamsForFullMask.contains(key)) {
maskedUrl.append(FULL_MASK);
} else if (queryParamsForPartialMask.contains(key)) {
int valueLen =
value.length();
int maskedLen = (valueLen > PARTIAL_MASK_VISIBLE_LEN) ? PARTIAL_MASK_VISIBLE_LEN : valueLen / 2;
maskedUrl.append(value, 0, valueLen - maskedLen);
maskedUrl.append(StringUtils.repeat(CHAR_MASK, maskedLen));
} else {
maskedUrl.append(value);}
}
maskedUrl.append(AND_MARK);
}
maskedUrl.deleteCharAt(maskedUrl.length() - 1);
return maskedUrl.toString();
} | 3.26 |
hadoop_UriUtils_containsAbfsUrl_rdh | /**
* Checks whether a string includes abfs url.
*
* @param string
* the string to check.
* @return true if string has abfs url.
*/
public static boolean containsAbfsUrl(final String string) {
if ((string == null) || string.isEmpty()) {
return false;
}
return ABFS_URI_PATTERN.matcher(string).matches();
} | 3.26 |
hadoop_HistoryServerStateStoreService_serviceInit_rdh | /**
* Initialize the state storage
*
* @param conf
* the configuration
* @throws IOException
*/
@Override
public void serviceInit(Configuration conf) throws IOException {
initStorage(conf);
} | 3.26 |
hadoop_HistoryServerStateStoreService_serviceStop_rdh | /**
* Shutdown the state storage.
*
* @throws IOException
*/
@Override
public void serviceStop() throws IOException {
closeStorage();
} | 3.26 |
hadoop_HistoryServerStateStoreService_serviceStart_rdh | /**
* Start the state storage for use
*
* @throws IOException
*/
@Override
public void serviceStart() throws IOException {startStorage();
} | 3.26 |
hadoop_DomainNameResolverFactory_newInstance_rdh | /**
* This function gets the instance based on the config.
*
* @param conf
* Configuration
* @param configKey
* config key name.
* @return Domain name resolver.
*/
public static DomainNameResolver newInstance(Configuration conf, String configKey) {
Class<? extends DomainNameResolver> resolverClass = conf.getClass(configKey, DNSDomainNameResolver.class, DomainNameResolver.class);
return ReflectionUtils.newInstance(resolverClass, conf);} | 3.26 |
hadoop_TimelineMetricCalculator_sum_rdh | /**
* Sum up two Numbers.
*
* @param n1
* Number n1
* @param n2
* Number n2
* @return Number represent to (n1 + n2).
*/
public static Number sum(Number n1, Number n2) {
if (n1 == null) {
return n2;
} else if (n2 == null) {
return n1;
}
if ((n1 instanceof Integer) || (n1 instanceof Long)) {
return n1.longValue() + n2.longValue();
}
if ((n1 instanceof Float) || (n1 instanceof Double)) {return n1.doubleValue() + n2.doubleValue();
}
// TODO throw warnings/exceptions for other types of number.
return null;
} | 3.26 |
hadoop_TimelineMetricCalculator_sub_rdh | /**
* Subtract operation between two Numbers.
*
* @param n1
* Number n1
* @param n2
* Number n2
* @return Number represent to (n1 - n2).
*/
public static Number sub(Number n1, Number n2) {
if (n1 == null) {
throw new YarnRuntimeException("Number to be subtracted shouldn't be null.");
} else if (n2 == null) {
return n1;
}if ((n1 instanceof Integer) || (n1 instanceof Long)) {
return n1.longValue() - n2.longValue();
}
if ((n1 instanceof Float) || (n1 instanceof Double)) {
return n1.doubleValue() - n2.doubleValue();
}
// TODO throw warnings/exceptions for other types of number.
return null;} | 3.26 |
hadoop_ManifestPrinter_println_rdh | /**
* Print a line to the output stream.
*
* @param format
* format string
* @param args
* arguments.
*/
private void println(String format, Object... args) {
out.format(format, args);
out.println();
} | 3.26 |
hadoop_ManifestPrinter_loadAndPrintManifest_rdh | /**
* Load and print a manifest.
*
* @param fs
* filesystem.
* @param path
* path
* @throws IOException
* failure to load
* @return the manifest
*/
public ManifestSuccessData loadAndPrintManifest(FileSystem fs, Path path) throws IOException {
// load the manifest
println("Manifest file: %s", path);
final ManifestSuccessData success = ManifestSuccessData.load(fs, path);
printManifest(success);
return success;
} | 3.26 |
hadoop_ManifestPrinter_field_rdh | /**
* Print a field, if non-null.
*
* @param name
* field name.
* @param value
* value.
*/
private void field(String name, Object value) {
if (value != null) {
println("%s: %s", name, value);
}
} | 3.26 |
hadoop_UpdateContainerSchedulerEvent_isIncrease_rdh | /**
* isIncrease.
*
* @return isIncrease.
*/
public boolean isIncrease() {
return containerEvent.isIncrease();
} | 3.26 |
hadoop_UpdateContainerSchedulerEvent_isExecTypeUpdate_rdh | /**
* isExecTypeUpdate.
*
* @return isExecTypeUpdate.
*/
public boolean isExecTypeUpdate() {
return containerEvent.isExecTypeUpdate();
} | 3.26 |
hadoop_UpdateContainerSchedulerEvent_getOriginalToken_rdh | /**
* Original Token before update.
*
* @return Container Token.
*/
public ContainerTokenIdentifier getOriginalToken() {
return this.originalToken;
} | 3.26 |
hadoop_UpdateContainerSchedulerEvent_isResourceChange_rdh | /**
* isResourceChange.
*
* @return isResourceChange.
*/
public boolean isResourceChange() {
return containerEvent.isResourceChange();
} | 3.26 |
hadoop_UpdateContainerSchedulerEvent_getUpdatedToken_rdh | /**
* Update Container Token.
*
* @return Container Token.
*/
public ContainerTokenIdentifier getUpdatedToken() {
return containerEvent.getUpdatedToken();
} | 3.26 |
hadoop_S3ARemoteObjectReader_read_rdh | /**
* Stars reading at {@code offset} and reads upto {@code size} bytes into {@code buffer}.
*
* @param buffer
* the buffer into which data is returned
* @param offset
* the absolute offset into the underlying file where reading starts.
* @param size
* the number of bytes to be read.
* @return number of bytes actually read.
* @throws IOException
* if there is an error reading from the file.
* @throws IllegalArgumentException
* if buffer is null.
* @throws IllegalArgumentException
* if offset is outside of the range [0, file size].
* @throws IllegalArgumentException
* if size is zero or negative.
*/
public int read(ByteBuffer buffer, long offset, int
size) throws IOException {
Validate.checkNotNull(buffer, "buffer");
Validate.checkWithinRange(offset, "offset", 0, this.remoteObject.size());Validate.checkPositiveInteger(size, "size");
if (this.closed) {
return -1;
}
int reqSize = ((int) (Math.min(size, this.remoteObject.size() - offset)));return readOneBlockWithRetries(buffer,
offset, reqSize);
} | 3.26 |
hadoop_IOStatisticsStore_addSample_rdh | /**
* Add a statistics sample as a min, max and mean and count.
*
* @param key
* key to add.
* @param count
* count.
*/
default void addSample(String key, long count) {
incrementCounter(key, count);
addMeanStatisticSample(key, count);
addMaximumSample(key, count);
addMinimumSample(key, count);
} | 3.26 |
hadoop_IOStatisticsStore_incrementCounter_rdh | /**
* Increment a counter by one.
*
* No-op if the counter is unknown.
*
* @param key
* statistics key
* @return old value or, if the counter is unknown: 0
*/
default long incrementCounter(String key) {
return incrementCounter(key, 1);
} | 3.26 |
hadoop_ExitStatus_getExitCode_rdh | /**
*
* @return the command line exit code.
*/
public int getExitCode() {
return code;} | 3.26 |
hadoop_ServletUtil_getParameter_rdh | /**
* Get a parameter from a ServletRequest.
* Return null if the parameter contains only white spaces.
*
* @param request
* request.
* @param name
* name.
* @return get a parameter from a ServletRequest.
*/
public static String getParameter(ServletRequest request, String name) {
String s = request.getParameter(name);if (s == null) {
return null;}
s = s.trim();
return s.length() == 0 ? null
: s;
} | 3.26 |
hadoop_ServletUtil_getRawPath_rdh | /**
* Parse the path component from the given request and return w/o decoding.
*
* @param request
* Http request to parse
* @param servletName
* the name of servlet that precedes the path
* @return path component, null if the default charset is not supported
*/
public static String getRawPath(final HttpServletRequest request, String servletName) {
Preconditions.checkArgument(request.getRequestURI().startsWith(servletName + "/"));
return request.getRequestURI().substring(servletName.length());
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.