name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
hadoop_AbstractDTService_getCanonicalUri_rdh
/** * Get the canonical URI of the filesystem, which is what is * used to identify the tokens. * * @return the URI. */ public URI getCanonicalUri() { return canonicalUri; }
3.26
hadoop_AbstractDTService_requireServiceState_rdh
/** * Require that the service is in a given state. * * @param state * desired state. * @throws IllegalStateException * if the condition is not met */ protected void requireServiceState(final STATE state) throws IllegalStateException { Preconditions.checkState(isInState(state), "Required State: %s; Actual State %s", state, getServiceState()); }
3.26
hadoop_AbstractDTService_bindToFileSystem_rdh
/** * Bind to the filesystem. * Subclasses can use this to perform their own binding operations - * but they must always call their superclass implementation. * This <i>Must</i> be called before calling {@code init()}. * * <b>Important:</b> * This binding will happen during FileSystem.initialize(); the FS * is not live for actual use and will not yet have interacted with * AWS services. * * @param uri * the canonical URI of the FS. * @param context * store context * @param delegationOperations * delegation operations * @throws IOException * failure. */ public void bindToFileSystem(final URI uri, final StoreContext context, final DelegationOperations delegationOperations) throws IOException { requireServiceState(STATE.NOTINITED); Preconditions.checkState(canonicalUri == null, "bindToFileSystem called twice"); this.canonicalUri = requireNonNull(uri); this.storeContext = requireNonNull(context); this.owner = context.getOwner(); this.policyProvider = delegationOperations; }
3.26
hadoop_CommonCallableSupplier_maybeAwaitCompletion_rdh
/** * Block awaiting completion for any non-null future passed in; * No-op if a null arg was supplied. * * @param future * future * @throws IOException * if one of the called futures raised an IOE. * @throws RuntimeException * if one of the futures raised one. */ public static void maybeAwaitCompletion(@Nullable final CompletableFuture<Void> future) throws IOException { if (future != null) { waitForCompletion(future); } }
3.26
hadoop_CommonCallableSupplier_waitForCompletionIgnoringExceptions_rdh
/** * Wait for a single of future to complete, ignoring exceptions raised. * * @param future * future to wait for. * @param <T> * Generics Type T. */ public static <T> void waitForCompletionIgnoringExceptions(@Nullable final CompletableFuture<T> future) { if (future != null) { try (DurationInfo ignore = new DurationInfo(LOG, false, "Waiting for task completion")) { future.join(); } catch (Exception e) { LOG.debug("Ignoring exception raised in task completion: "); }}}
3.26
hadoop_CommonCallableSupplier_waitForCompletion_rdh
/** * Wait for a single of future to complete, extracting IOEs afterwards. * * @param <T> * Generics Type T. * @param future * future to wait for. * @throws IOException * if one of the called futures raised an IOE. * @throws RuntimeException * if one of the futures raised one. */ public static <T> void waitForCompletion(final CompletableFuture<T> future) throws IOException { try (DurationInfo ignore = new DurationInfo(LOG, false, "Waiting for task completion")) { future.join(); } catch (CancellationException e) { throw new IOException(e); } catch (CompletionException e) { raiseInnerCause(e); } }
3.26
hadoop_CommonCallableSupplier_submit_rdh
/** * Submit a callable into a completable future. * RTEs are rethrown. * Non RTEs are caught and wrapped; IOExceptions to * {@code RuntimeIOException} instances. * * @param executor * executor. * @param call * call to invoke * @param <T> * type * @return the future to wait for */ @SuppressWarnings("unchecked") public static <T> CompletableFuture<T> submit(final Executor executor, final Callable<T> call) { return CompletableFuture.supplyAsync(new CommonCallableSupplier<T>(call), executor);}
3.26
hadoop_DefaultAuditLogger_setCallerContextEnabled_rdh
/** * Enable or disable CallerContext. * * @param value * true, enable CallerContext, otherwise false to disable it. */ void setCallerContextEnabled(final boolean value) { isCallerContextEnabled = value; }
3.26
hadoop_AbfsRestOperation_executeHttpOperation_rdh
/** * Executes a single HTTP operation to complete the REST operation. If it * fails, there may be a retry. The retryCount is incremented with each * attempt. */ private boolean executeHttpOperation(final int retryCount, TracingContext tracingContext) throws AzureBlobFileSystemException { AbfsHttpOperation httpOperation; try { // initialize the HTTP request and open the connection httpOperation = createHttpOperation(); incrementCounter(AbfsStatistic.CONNECTIONS_MADE, 1); tracingContext.constructHeader(httpOperation, failureReason); signRequest(httpOperation, hasRequestBody ? f1 : 0); } catch (IOException e) { LOG.debug("Auth failure: {}, {}", method, url); throw new AbfsRestOperationException(-1, null, "Auth failure: " + e.getMessage(), e); } try { // dump the headers AbfsIoUtils.dumpHeadersToDebugLog("Request Headers", httpOperation.getConnection().getRequestProperties()); intercept.sendingRequest(operationType, abfsCounters); if (hasRequestBody) { // HttpUrlConnection requires httpOperation.sendRequest(buffer, bufferOffset, f1); incrementCounter(AbfsStatistic.SEND_REQUESTS, 1); incrementCounter(AbfsStatistic.BYTES_SENT, f1); } httpOperation.processResponse(buffer, bufferOffset, f1); incrementCounter(AbfsStatistic.GET_RESPONSES, 1); // Only increment bytesReceived counter when the status code is 2XX. if ((httpOperation.getStatusCode() >= HttpURLConnection.HTTP_OK) && (httpOperation.getStatusCode() <= HttpURLConnection.HTTP_PARTIAL)) { incrementCounter(AbfsStatistic.BYTES_RECEIVED, httpOperation.getBytesReceived()); } else if (httpOperation.getStatusCode() == HttpURLConnection.HTTP_UNAVAILABLE) { incrementCounter(AbfsStatistic.SERVER_UNAVAILABLE, 1); }} catch (UnknownHostException ex) { String hostname = null; hostname = httpOperation.getHost(); failureReason = RetryReason.getAbbreviation(ex, null, null); LOG.warn("Unknown host name: {}. Retrying to resolve the host name...", hostname); if (!f0.getRetryPolicy().shouldRetry(retryCount, -1)) { throw new InvalidAbfsRestOperationException(ex, retryCount); } return false; } catch (IOException ex) { if (LOG.isDebugEnabled()) { LOG.debug("HttpRequestFailure: {}, {}", httpOperation, ex); } failureReason = RetryReason.getAbbreviation(ex, -1, ""); if (!f0.getRetryPolicy().shouldRetry(retryCount, -1)) { throw new InvalidAbfsRestOperationException(ex, retryCount); } return false; } finally { int status = httpOperation.getStatusCode(); /* A status less than 300 (2xx range) or greater than or equal to 500 (5xx range) should contribute to throttling metrics being updated. Less than 200 or greater than or equal to 500 show failed operations. 2xx range contributes to successful operations. 3xx range is for redirects and 4xx range is for user errors. These should not be a part of throttling backoff computation. */boolean updateMetricsResponseCode = (status < HttpURLConnection.HTTP_MULT_CHOICE) || (status >= HttpURLConnection.HTTP_INTERNAL_ERROR); if (updateMetricsResponseCode) { intercept.updateMetrics(operationType, httpOperation); } } LOG.debug("HttpRequest: {}: {}", operationType, httpOperation); if (f0.getRetryPolicy().shouldRetry(retryCount, httpOperation.getStatusCode())) { int status = httpOperation.getStatusCode(); failureReason = RetryReason.getAbbreviation(null, status, httpOperation.getStorageErrorMessage()); return false; } result = httpOperation; return true;}
3.26
hadoop_AbfsRestOperation_hasResult_rdh
/** * Checks if there is non-null HTTP response. * * @return true if there is a non-null HTTP response from the ABFS call. */public boolean hasResult() { return result != null; }
3.26
hadoop_AbfsRestOperation_incrementCounter_rdh
/** * Incrementing Abfs counters with a long value. * * @param statistic * the Abfs statistic that needs to be incremented. * @param value * the value to be incremented by. */ private void incrementCounter(AbfsStatistic statistic, long value) { if (abfsCounters != null) { abfsCounters.incrementCounter(statistic, value); } }
3.26
hadoop_AbfsRestOperation_createHttpOperation_rdh
/** * Creates new object of {@link AbfsHttpOperation} with the url, method, and * requestHeaders fields of the AbfsRestOperation object. */ @VisibleForTesting AbfsHttpOperation createHttpOperation() throws IOException { return new AbfsHttpOperation(url, method, requestHeaders); }
3.26
hadoop_AbfsRestOperation_completeExecute_rdh
/** * Executes the REST operation with retry, by issuing one or more * HTTP operations. * * @param tracingContext * TracingContext instance to track correlation IDs */ void completeExecute(TracingContext tracingContext) throws AzureBlobFileSystemException { // see if we have latency reports from the previous requests String latencyHeader = getClientLatency(); if ((latencyHeader != null) && (!latencyHeader.isEmpty())) { AbfsHttpHeader httpHeader = new AbfsHttpHeader(HttpHeaderConfigurations.X_MS_ABFS_CLIENT_LATENCY, latencyHeader); requestHeaders.add(httpHeader); } retryCount = 0; LOG.debug("First execution of REST operation - {}", operationType);while (!executeHttpOperation(retryCount, tracingContext)) { try { ++retryCount; tracingContext.setRetryCount(retryCount); LOG.debug("Retrying REST operation {}. RetryCount = {}", operationType, retryCount); Thread.sleep(f0.getRetryPolicy().getRetryInterval(retryCount)); } catch (InterruptedException ex) { Thread.currentThread().interrupt(); } } int status = result.getStatusCode(); /* If even after exhausting all retries, the http status code has an invalid value it qualifies for InvalidAbfsRestOperationException. All http status code less than 1xx range are considered as invalid status codes. */ if (status < HTTP_CONTINUE) { throw new InvalidAbfsRestOperationException(null, retryCount); } if (status >= HttpURLConnection.HTTP_BAD_REQUEST) { throw new AbfsRestOperationException(result.getStatusCode(), result.getStorageErrorCode(), result.getStorageErrorMessage(), null, result); } LOG.trace("{} REST operation complete", operationType); }
3.26
hadoop_AbfsRestOperation_signRequest_rdh
/** * Sign an operation. * * @param httpOperation * operation to sign * @param bytesToSign * how many bytes to sign for shared key auth. * @throws IOException * failure */ @VisibleForTesting public void signRequest(final AbfsHttpOperation httpOperation, int bytesToSign) throws IOException { switch (f0.getAuthType()) { case Custom : case OAuth : LOG.debug("Authenticating request with OAuth2 access token"); httpOperation.getConnection().setRequestProperty(HttpHeaderConfigurations.AUTHORIZATION, f0.getAccessToken()); break; case SAS : // do nothing; the SAS token should already be appended to the query string httpOperation.setMaskForSAS();// mask sig/oid from url for logs break; case SharedKey : default : // sign the HTTP request LOG.debug("Signing request with shared key"); // sign the HTTP request f0.getSharedKeyCredentials().signRequest(httpOperation.getConnection(), bytesToSign); break; } }
3.26
hadoop_AbfsRestOperation_createNewTracingContext_rdh
/** * Creates a new Tracing context before entering the retry loop of a rest operation. * This will ensure all rest operations have unique * tracing context that will be used for all the retries. * * @param tracingContext * original tracingContext. * @return tracingContext new tracingContext object created from original one. */ @VisibleForTesting public TracingContext createNewTracingContext(final TracingContext tracingContext) { return new TracingContext(tracingContext); }
3.26
hadoop_AbfsRestOperation_execute_rdh
/** * Execute a AbfsRestOperation. Track the Duration of a request if * abfsCounters isn't null. * * @param tracingContext * TracingContext instance to track correlation IDs */ public void execute(TracingContext tracingContext) throws AzureBlobFileSystemException { // Since this might be a sub-sequential or parallel rest operation // triggered by a single file system call, using a new tracing context. lastUsedTracingContext = createNewTracingContext(tracingContext); try { IOStatisticsBinding.trackDurationOfInvocation(abfsCounters, AbfsStatistic.getStatNameFromHttpCall(method), () -> completeExecute(lastUsedTracingContext)); } catch (AzureBlobFileSystemException aze) { throw aze; } catch (IOException e) { throw new UncheckedIOException("Error while tracking Duration of an " + "AbfsRestOperation call", e); } }
3.26
hadoop_ContainerServiceRecordProcessor_createAInfo_rdh
/** * Creates a container A (IPv4) record descriptor. * * @param record * service record. * @throws Exception * if the descriptor creation yields an issue. */ protected void createAInfo(ServiceRecord record) throws Exception { AContainerRecordDescriptor recordInfo = new AContainerRecordDescriptor(getPath(), record); registerRecordDescriptor(Type.A, recordInfo); }
3.26
hadoop_ContainerServiceRecordProcessor_init_rdh
/** * Initializes the descriptor parameters. * * @param serviceRecord * the service record. */ @Override protected void init(ServiceRecord serviceRecord) { super.init(serviceRecord); try { this.setTarget(getIpv6Address(getTarget())); } catch (UnknownHostException e) { throw new IllegalStateException(e); } }
3.26
hadoop_ContainerServiceRecordProcessor_createTXTInfo_rdh
/** * Create a container TXT record descriptor. * * @param serviceRecord * the service record. * @throws Exception * if the descriptor creation yields an issue. */ protected void createTXTInfo(ServiceRecord serviceRecord) throws Exception { TXTContainerRecordDescriptor txtInfo = new TXTContainerRecordDescriptor(getPath(), serviceRecord); registerRecordDescriptor(Type.TXT, txtInfo); }
3.26
hadoop_ContainerServiceRecordProcessor_getRecordTypes_rdh
/** * Returns the record types associated with a container service record. * * @return the record type array */ @Override public int[] getRecordTypes() { return new int[]{ Type.A, Type.AAAA, Type.PTR, Type.TXT }; }
3.26
hadoop_ContainerServiceRecordProcessor_createAAAAInfo_rdh
/** * Creates a container AAAA (IPv6) record descriptor. * * @param record * the service record * @throws Exception * if the descriptor creation yields an issue. */ protected void createAAAAInfo(ServiceRecord record) throws Exception { AAAAContainerRecordDescriptor recordInfo = new AAAAContainerRecordDescriptor(getPath(), record); registerRecordDescriptor(Type.AAAA, recordInfo); }
3.26
hadoop_ContainerServiceRecordProcessor_createPTRInfo_rdh
/** * Creates a container PTR record descriptor. * * @param record * the service record. * @throws Exception * if the descriptor creation yields an issue. */ protected void createPTRInfo(ServiceRecord record) throws Exception { PTRContainerRecordDescriptor ptrInfo = new PTRContainerRecordDescriptor(getPath(), record); registerRecordDescriptor(Type.PTR, ptrInfo); }
3.26
hadoop_ContainerServiceRecordProcessor_initTypeToInfoMapping_rdh
/** * Initializes the DNS record type to descriptor mapping based on the * provided service record. * * @param serviceRecord * the registry service record. * @throws Exception * if an issue arises. */ @Override public void initTypeToInfoMapping(ServiceRecord serviceRecord) throws Exception { if (serviceRecord.get(YarnRegistryAttributes.YARN_IP) != null) { for (int type : getRecordTypes()) { switch (type) { case Type.A : createAInfo(serviceRecord); break; case Type.AAAA : createAAAAInfo(serviceRecord); break; case Type.PTR : createPTRInfo(serviceRecord);break; case Type.TXT : createTXTInfo(serviceRecord); break; default : throw new IllegalArgumentException("Unknown type " + type); } } } }
3.26
hadoop_StringInterner_m0_rdh
/** * Interns and returns a reference to the representative instance * for any of a collection of string instances that are equal to each other. * Retains strong reference to the instance, * thus preventing it from being garbage-collected. * * @param sample * string instance to be interned * @return strong reference to interned string instance */ public static String m0(String sample) { if (sample == null) { return null; } return STRONG_INTERNER.intern(sample); }
3.26
hadoop_StringInterner_weakIntern_rdh
/** * Interns and returns a reference to the representative instance * for any of a collection of string instances that are equal to each other. * Retains weak reference to the instance, * and so does not prevent it from being garbage-collected. * * @param sample * string instance to be interned * @return weak reference to interned string instance */ public static String weakIntern(String sample) { if (sample == null) { return null; } return sample.intern(); }
3.26
hadoop_StringInterner_internStringsInArray_rdh
/** * Interns all the strings in the given array in place, * returning the same array. * * @param strings * strings. * @return internStringsInArray. */ public static String[] internStringsInArray(String[] strings) { for (int i = 0; i < strings.length; i++) { strings[i] = weakIntern(strings[i]); } return strings; }
3.26
hadoop_CSQueueStore_remove_rdh
/** * Method for removing a queue from the store by name. * * @param name * A deterministic name for the queue to be removed */ public void remove(String name) { CSQueue queue = get(name); if (queue != null) { remove(queue); } }
3.26
hadoop_CSQueueStore_updateGetMapForShortName_rdh
/** * This method will update the getMap for the short name provided, depending * on how many queues are present with the same shortname. * * @param shortName * The short name of the queue to be updated */ private void updateGetMapForShortName(String shortName) { // we protect the root, since root can be both a full path and a short name // we simply deny adding root as a shortname to the getMap. if (shortName.equals(CapacitySchedulerConfiguration.ROOT)) { return; }// getting all queues with the same short name Set<String> fullNames = this.shortNameToLongNames.get(shortName); // if there is only one queue we add it to the getMap if ((fullNames != null) && (fullNames.size() == 1)) { getMap.put(shortName, fullNameQueues.get(fullNames.iterator().next())); } else { // in all other cases using only shortName cannot disambigously identifiy // a queue getMap.remove(shortName); } }
3.26
hadoop_CSQueueStore_getQueues_rdh
/** * Returns all queues as a list. * * @return List containing all the queues */ public Collection<CSQueue> getQueues() { try { modificationLock.readLock().lock(); return ImmutableList.copyOf(fullNameQueues.values()); } finally { modificationLock.readLock().unlock(); } }
3.26
hadoop_CSQueueStore_getFullNameQueues_rdh
/** * This getter method will return an immutable map with all the queues with * queue path as the key. * * @return Map containing all queues and having path as key */ Map<String, CSQueue> getFullNameQueues() { return ImmutableMap.copyOf(fullNameQueues); }
3.26
hadoop_CSQueueStore_clear_rdh
/** * Clears the store, removes all queue references. */ public void clear() { try { modificationLock.writeLock().lock(); fullNameQueues.clear(); shortNameToLongNames.clear(); getMap.clear(); } finally { modificationLock.writeLock().unlock(); } }
3.26
hadoop_CSQueueStore_getByFullName_rdh
/** * Returns a queue by looking it up by its fully qualified name. * * @param fullName * The full name/path of the queue * @return The queue or null if none found */CSQueue getByFullName(String fullName) { if (fullName == null) { return null; }try { modificationLock.readLock().lock(); return fullNameQueues.getOrDefault(fullName, null); } finally { modificationLock.readLock().unlock(); } }
3.26
hadoop_CSQueueStore_getShortNameQueues_rdh
/** * This getter method will return an immutable map with all queues * which can be disambiguously referenced by short name, using short name * as the key. * * @return Map containing queues and having short name as key */ @VisibleForTesting Map<String, CSQueue> getShortNameQueues() { // this is not the most efficient way to create a short named list // but this method is only used in tests try { modificationLock.readLock().lock(); return ImmutableMap.copyOf(// making a map from the stream // filtering the list to contain only disambiguous short names // keeping queues where get(queueShortname) == queue // these are the ambigous references // getting all queues from path->queue map fullNameQueues.entrySet().stream().filter(entry -> getMap.get(entry.getValue().getQueueShortName()) == entry.getValue()).collect(// using the queue's short name as key // using the queue as value Collectors.toMap(entry -> entry.getValue().getQueueShortName(), entry -> entry.getValue()))); } finally { modificationLock.readLock().unlock(); }}
3.26
hadoop_CSQueueStore_get_rdh
/** * Getter method for the queue it can find queues by both full and * short names. * * @param name * Full or short name of the queue * @return the queue */ public CSQueue get(String name) { if (name == null) { return null; } try { modificationLock.readLock().lock(); return getMap.getOrDefault(name, null); } finally { modificationLock.readLock().unlock(); } }
3.26
hadoop_CSQueueStore_isAmbiguous_rdh
/** * Check for name ambiguity returns true, if there are at least two queues * with the same short name. Queue named "root" is protected, and it will * always return the root queue regardless of ambiguity. * * @param shortName * The short name to be checked for ambiguity * @return true if there are at least two queues found false otherwise */ boolean isAmbiguous(String shortName) { if (shortName == null) { return false; } boolean ret = true; try { modificationLock.readLock().lock(); Set<String> fullNamesSet = this.shortNameToLongNames.get(shortName); if ((fullNamesSet == null) || (fullNamesSet.size() <= 1)) { ret = false; } } finally { modificationLock.readLock().unlock();} return ret; }
3.26
hadoop_CSQueueStore_add_rdh
/** * Method for adding a queue to the store. * * @param queue * Queue to be added */ public void add(CSQueue queue) { String fullName = queue.getQueuePath(); String shortName = queue.getQueueShortName(); try { modificationLock.writeLock().lock(); fullNameQueues.put(fullName, queue); getMap.put(fullName, queue); // we only update short queue name ambiguity for non root queues if (!shortName.equals(CapacitySchedulerConfiguration.ROOT)) { // getting or creating the ambiguity set for the current queue Set<String> fullNamesSet = this.shortNameToLongNames.getOrDefault(shortName, new HashSet<>()); // adding the full name to the queue fullNamesSet.add(fullName); this.shortNameToLongNames.put(shortName, fullNamesSet); } // updating the getMap references for the queue updateGetMapForShortName(shortName); } finally { modificationLock.writeLock().unlock(); } }
3.26
hadoop_ScriptBasedNodeLabelsProvider_cleanUp_rdh
/** * Method used to terminate the Node Labels Fetch script. */ @Override public void cleanUp() { if (f0 != null) { f0.cleanUp(); } }
3.26
hadoop_ScriptBasedNodeLabelsProvider_serviceInit_rdh
/* Method which initializes the values for the script path and interval time. */ @Override protected void serviceInit(Configuration conf) throws Exception { String nodeLabelsScriptPath = conf.get(YarnConfiguration.NM_SCRIPT_BASED_NODE_LABELS_PROVIDER_PATH); long v1 = conf.getLong(YarnConfiguration.NM_NODE_LABELS_PROVIDER_FETCH_TIMEOUT_MS, YarnConfiguration.DEFAULT_NM_NODE_LABELS_PROVIDER_FETCH_TIMEOUT_MS); String[] scriptArgs = conf.getStrings(YarnConfiguration.NM_SCRIPT_BASED_NODE_LABELS_PROVIDER_SCRIPT_OPTS, new String[]{ }); verifyConfiguredScript(nodeLabelsScriptPath); long taskInterval = conf.getLong(YarnConfiguration.NM_NODE_LABELS_PROVIDER_FETCH_INTERVAL_MS, YarnConfiguration.DEFAULT_NM_NODE_LABELS_PROVIDER_FETCH_INTERVAL_MS); this.setIntervalTime(taskInterval); this.f0 = new NodeLabelScriptRunner(nodeLabelsScriptPath, scriptArgs, v1, this);super.serviceInit(conf); }
3.26
hadoop_PlacementFactory_getPlacementRule_rdh
/** * Create a new {@link PlacementRule} based on the rule class from the * configuration. This is used to instantiate rules by the scheduler which * does not resolve the class before this call. * * @param ruleStr * The name of the class to instantiate * @param conf * The configuration object to set for the rule * @return Created class instance * @throws ClassNotFoundException * no definition for the class with the specified name could be found. */ public static PlacementRule getPlacementRule(String ruleStr, Configuration conf) throws ClassNotFoundException { Class<? extends PlacementRule> ruleClass = Class.forName(ruleStr).asSubclass(PlacementRule.class); LOG.info("Using PlacementRule implementation - " + ruleClass); return ReflectionUtils.newInstance(ruleClass, conf); } /** * Create a new {@link PlacementRule}
3.26
hadoop_RmSingleLineParser_aggregateSkyline_rdh
/** * Aggregates different jobs' {@link ResourceSkyline}s within the same * pipeline together. * * @param resourceSkyline * newly extracted {@link ResourceSkyline}. * @param recurrenceId * the {@link RecurrenceId} which the resourceSkyline * belongs to. * @param skylineRecords * a {@link Map} which stores the * {@link ResourceSkyline}s for all pipelines during this parsing. */ private void aggregateSkyline(final ResourceSkyline resourceSkyline, final RecurrenceId recurrenceId, final Map<RecurrenceId, List<ResourceSkyline>> skylineRecords) { List<ResourceSkyline> v0 = skylineRecords.get(recurrenceId); if (v0 == null) { v0 = new ArrayList<ResourceSkyline>(); skylineRecords.put(recurrenceId, v0);} v0.add(resourceSkyline); }
3.26
hadoop_ReplicaUnderConstruction_getState_rdh
/** * Get replica state as reported by the data-node. */ ReplicaState getState() { return state; }
3.26
hadoop_ReplicaUnderConstruction_equals_rdh
// Block @Override public boolean equals(Object obj) { // Sufficient to rely on super's implementation return (this == obj) || super.equals(obj); }
3.26
hadoop_ReplicaUnderConstruction_getChosenAsPrimary_rdh
/** * Whether the replica was chosen for recovery. */ boolean getChosenAsPrimary() { return chosenAsPrimary; }
3.26
hadoop_ReplicaUnderConstruction_setChosenAsPrimary_rdh
/** * Set whether this replica was chosen for recovery. */ void setChosenAsPrimary(boolean chosenAsPrimary) { this.chosenAsPrimary = chosenAsPrimary; }
3.26
hadoop_ReplicaUnderConstruction_hashCode_rdh
// Block @Overridepublic int hashCode() { return super.hashCode(); }
3.26
hadoop_ReplicaUnderConstruction_m0_rdh
/** * Set replica state. */ void m0(HdfsServerConstants.ReplicaState s) { state = s; }
3.26
hadoop_ReplicaUnderConstruction_getExpectedStorageLocation_rdh
/** * Expected block replica location as assigned when the block was allocated. * This defines the pipeline order. * It is not guaranteed, but expected, that the data-node actually has * the replica. */ DatanodeStorageInfo getExpectedStorageLocation() { return expectedLocation; }
3.26
hadoop_ReplicaUnderConstruction_isAlive_rdh
/** * Is data-node the replica belongs to alive. */ boolean isAlive() { return expectedLocation.getDatanodeDescriptor().isAlive(); }
3.26
hadoop_LoggingAuditor_activate_rdh
/** * Activate: log at TRACE. * * @return this span. */ @Override public AuditSpanS3A activate() { LOG.trace("[{}] {} Activate {}", currentThreadID(), getSpanId(), getDescription()); return this; }
3.26
hadoop_LoggingAuditor_attachRangeFromRequest_rdh
/** * Attach Range of data for GetObject Request. * * @param request * the sdk request to be modified * @param executionAttributes * execution attributes for this request */ private void attachRangeFromRequest(SdkHttpRequest request, ExecutionAttributes executionAttributes) { String operationName = executionAttributes.getAttribute(AwsExecutionAttribute.OPERATION_NAME); if ((operationName != null) && operationName.equals("GetObject")) { if ((request.headers() != null) && (request.headers().get("Range") != null)) { String[] rangeHeader = request.headers().get("Range").get(0).split("="); // only set header if range unit is bytes if (rangeHeader[0].equals("bytes")) { referrer.set(AuditConstants.PARAM_RANGE, rangeHeader[1]); } } }}
3.26
hadoop_LoggingAuditor_serviceInit_rdh
/** * Service init, look for jobID and attach as an attribute in log entries. * This is where the warning span is created, so the relevant attributes * (and filtering options) are applied. * * @param conf * configuration * @throws Exception * failure */ @Override protected void serviceInit(final Configuration conf) throws Exception { super.serviceInit(conf); rejectOutOfSpan = conf.getBoolean(REJECT_OUT_OF_SPAN_OPERATIONS, false); // attach the job ID if there is one in the configuration used // to create this file. String jobID = extractJobID(conf); if (jobID != null) { addAttribute(AuditConstants.PARAM_JOB_ID, jobID); } headerEnabled = getConfig().getBoolean(REFERRER_HEADER_ENABLED, REFERRER_HEADER_ENABLED_DEFAULT); filters = conf.getTrimmedStringCollection(REFERRER_HEADER_FILTER); final CommonAuditContext currentContext = currentAuditContext(); warningSpan = new WarningSpan(OUTSIDE_SPAN, currentContext, createSpanID(), null, null); isMultipartUploadEnabled = conf.getBoolean(MULTIPART_UPLOADS_ENABLED, DEFAULT_MULTIPART_UPLOAD_ENABLED);}
3.26
hadoop_LoggingAuditor_beforeExecution_rdh
/** * Handle requests made without a real context by logging and * increment the failure count. * Some requests (e.g. copy part) are not expected in spans due * to how they are executed; these do not trigger failures. * * @param context * The current state of the execution, including * the unmodified SDK request from the service * client call. * @param executionAttributes * A mutable set of attributes scoped * to one specific request/response * cycle that can be used to give data * to future lifecycle methods. */ @Override public void beforeExecution(Context.BeforeExecution context, ExecutionAttributes executionAttributes) { String error = "executing a request outside an audit span " + f0.analyze(context.request()); final String unaudited = (((getSpanId() + " ") + UNAUDITED_OPERATION) + " ") + error; if (isRequestNotAlwaysInSpan(context.request())) { // can get by auditing during a copy, so don't overreact LOG.debug(unaudited); } else { final RuntimeException ex = new AuditFailureException(unaudited); LOG.debug(unaudited, ex); if (rejectOutOfSpan) { throw ex; } } // now hand off to the superclass for its normal preparation super.beforeExecution(context, executionAttributes); }
3.26
hadoop_LoggingAuditor_m0_rdh
/** * Get the referrer; visible for tests. * * @return the referrer. */ HttpReferrerAuditHeader m0() { return referrer; }
3.26
hadoop_LoggingAuditor_deactivate_rdh
/** * Log at TRACE. */ @Override public void deactivate() { LOG.trace("[{}] {} Deactivate {}", currentThreadID(), getSpanId(), getDescription()); }
3.26
hadoop_LoggingAuditor_getLastHeader_rdh
/** * Get the last header used. * * @return the last referrer header generated. */ public String getLastHeader() { return lastHeader; }
3.26
hadoop_LoggingAuditor_addAttribute_rdh
/** * Add an attribute. * * @param key * key * @param value * value */ public final void addAttribute(String key, String value) { attributes.put(key, value); }
3.26
hadoop_LoggingAuditor_getDescription_rdh
/** * Get the span description built in the constructor. * * @return description text. */ protected String getDescription() { return description; }
3.26
hadoop_LoggingAuditor_attachDeleteKeySizeAttribute_rdh
/** * For delete requests, attach delete key size as a referrer attribute. * * @param request * the request object. */ private void attachDeleteKeySizeAttribute(SdkRequest request) { if (request instanceof DeleteObjectsRequest) { int v10 = ((DeleteObjectsRequest) (request)).delete().objects().size(); referrer.set(DELETE_KEYS_SIZE, String.valueOf(v10)); } else if (request instanceof DeleteObjectRequest) { String key = ((DeleteObjectRequest) (request)).key(); if ((key != null) && (key.length() > 0)) { referrer.set(DELETE_KEYS_SIZE, "1"); }} }
3.26
hadoop_LoggingAuditor_set_rdh
/** * Pass to the HTTP referrer. * {@inheritDoc } */ @Override public void set(final String key, final String value) { referrer.set(key, value); } /** * Before transmitting a request, the logging auditor * always builds the referrer header, saves to the outer * class (where {@link #getLastHeader()}
3.26
hadoop_LoggingAuditor_prepareActiveContext_rdh
/** * Get/Prepare the active context for a span. * * @return the common audit context. */ private CommonAuditContext prepareActiveContext() { return currentAuditContext(); }
3.26
hadoop_NoopAuditor_createAndStartNoopAuditor_rdh
/** * Create, init and start an instance. * * @param conf * configuration. * @param activationCallbacks * Activation callbacks. * @return a started instance. */ public static NoopAuditor createAndStartNoopAuditor(Configuration conf, NoopSpan.SpanActivationCallbacks activationCallbacks) {NoopAuditor noop = new NoopAuditor(activationCallbacks); final OperationAuditorOptions options = OperationAuditorOptions.builder().withConfiguration(conf).withIoStatisticsStore(iostatisticsStore().build()); noop.init(options); noop.start(); return noop; }
3.26
hadoop_OutputReader_initialize_rdh
/** * Initializes the OutputReader. This method has to be called before * calling any of the other methods. */ public void initialize(PipeMapRed pipeMapRed) throws IOException { // nothing here yet, but that might change in the future }
3.26
hadoop_ExpressionFactory_addClass_rdh
/** * Register the given class as handling the given list of expression names. * * @param expressionClass * the class implementing the expression names * @param names * one or more command names that will invoke this class * @throws IOException * if the expression is not of an expected type */ void addClass(Class<? extends Expression> expressionClass, String... names) throws IOException { for (String name : names) expressionMap.put(name, expressionClass); }
3.26
hadoop_ExpressionFactory_registerExpression_rdh
/** * Invokes "static void registerExpression(FindExpressionFactory)" on the * given class. This method abstracts the contract between the factory and the * expression class. Do not assume that directly invoking registerExpression * on the given class will have the same effect. * * @param expressionClass * class to allow an opportunity to register */ void registerExpression(Class<? extends Expression> expressionClass) { try { Method register = expressionClass.getMethod(REGISTER_EXPRESSION_METHOD, ExpressionFactory.class); if (register != null) { register.invoke(null, this); } } catch (Exception e) {throw new RuntimeException(StringUtils.stringifyException(e)); } }
3.26
hadoop_ExpressionFactory_isExpression_rdh
/** * Determines whether the given expression name represents and actual * expression. * * @param expressionName * name of the expression * @return true if expressionName represents an expression */ boolean isExpression(String expressionName) { return expressionMap.containsKey(expressionName); }
3.26
hadoop_ExpressionFactory_getExpression_rdh
/** * Get an instance of the requested expression * * @param expressionName * name of the command to lookup * @param conf * the Hadoop configuration * @return the {@link Expression} or null if the expression is unknown */ Expression getExpression(String expressionName, Configuration conf) { if (conf == null) throw new NullPointerException("configuration is null"); Class<? extends Expression> expressionClass = expressionMap.get(expressionName); Expression instance = createExpression(expressionClass, conf); return instance; }
3.26
hadoop_ExpressionFactory_createExpression_rdh
/** * Creates an instance of the requested {@link Expression} class. * * @param expressionClassname * name of the {@link Expression} class to be instantiated * @param conf * the Hadoop configuration * @return a new instance of the requested {@link Expression} class */ Expression createExpression(String expressionClassname, Configuration conf) { try { Class<? extends Expression> expressionClass = Class.forName(expressionClassname).asSubclass(Expression.class); return createExpression(expressionClass, conf); } catch (ClassNotFoundException e) { throw new IllegalArgumentException("Invalid classname " + expressionClassname); } }
3.26
hadoop_AbstractClientRequestInterceptor_shutdown_rdh
/** * Disposes the {@link ClientRequestInterceptor}. */ @Override public void shutdown() { if (this.nextInterceptor != null) { this.nextInterceptor.shutdown(); } }
3.26
hadoop_AbstractClientRequestInterceptor_setConf_rdh
/** * Sets the {@link Configuration}. */@Override public void setConf(Configuration conf) { this.conf = conf; if (this.nextInterceptor != null) { this.nextInterceptor.setConf(conf); } }
3.26
hadoop_AbstractClientRequestInterceptor_getConf_rdh
/** * Gets the {@link Configuration}. */ @Override public Configuration getConf() { return this.conf; }
3.26
hadoop_AbstractClientRequestInterceptor_init_rdh
/** * Initializes the {@link ClientRequestInterceptor}. */ @Override public void init(String userName) { this.user = RouterServerUtil.setupUser(userName); if (this.nextInterceptor != null) { this.nextInterceptor.init(userName); } }
3.26
hadoop_AbstractClientRequestInterceptor_setNextInterceptor_rdh
/** * Sets the {@link ClientRequestInterceptor} in the chain. */ @Override public void setNextInterceptor(ClientRequestInterceptor nextInterceptor) { this.nextInterceptor = nextInterceptor; }
3.26
hadoop_AbstractClientRequestInterceptor_getNextInterceptor_rdh
/** * Gets the next {@link ClientRequestInterceptor} in the chain. */ @Override public ClientRequestInterceptor getNextInterceptor() { return this.nextInterceptor; }
3.26
hadoop_FederationNamenodeServiceState_getState_rdh
// When the nameservice is disabled. public static FederationNamenodeServiceState getState(HAServiceState state) { switch (state) { case ACTIVE : return FederationNamenodeServiceState.ACTIVE; case OBSERVER : return FederationNamenodeServiceState.OBSERVER; case STANDBY : return FederationNamenodeServiceState.STANDBY; case INITIALIZING : return FederationNamenodeServiceState.UNAVAILABLE; case STOPPING : return FederationNamenodeServiceState.UNAVAILABLE; default : return FederationNamenodeServiceState.UNAVAILABLE;} }
3.26
hadoop_RouterClientRMService_createRequestInterceptorChain_rdh
/** * This method creates and returns reference of the first interceptor in the * chain of request interceptor instances. * * @return the reference of the first interceptor in the chain */ @VisibleForTesting protected ClientRequestInterceptor createRequestInterceptorChain() { Configuration conf = getConfig(); return RouterServerUtil.createRequestInterceptorChain(conf, YarnConfiguration.ROUTER_CLIENTRM_INTERCEPTOR_CLASS_PIPELINE, YarnConfiguration.DEFAULT_ROUTER_CLIENTRM_INTERCEPTOR_CLASS, ClientRequestInterceptor.class); }
3.26
hadoop_RouterClientRMService_init_rdh
/** * Initializes the wrapper with the specified parameters. * * @param interceptor * the first interceptor in the pipeline */ public synchronized void init(ClientRequestInterceptor interceptor) { this.f0 = interceptor; }
3.26
hadoop_RouterClientRMService_finalize_rdh
/** * Shutdown the chain of interceptors when the object is destroyed. */ @Override protected void finalize() { f0.shutdown(); }
3.26
hadoop_RouterClientRMService_getRootInterceptor_rdh
/** * Gets the root request interceptor. * * @return the root request interceptor */public synchronized ClientRequestInterceptor getRootInterceptor() { return f0; }
3.26
hadoop_RouterClientRMService_getPipelines_rdh
/** * Gets the Request interceptor chains for all the users. * * @return the request interceptor chains. */ @VisibleForTesting protected Map<String, RequestInterceptorChainWrapper> getPipelines() { return this.userPipelineMap;}
3.26
hadoop_RouterClientRMService_initializePipeline_rdh
/** * Initializes the request interceptor pipeline for the specified application. * * @param user */ private RequestInterceptorChainWrapper initializePipeline(String user) { synchronized(this.userPipelineMap) { if (this.userPipelineMap.containsKey(user)) { LOG.info("Request to start an already existing user: {}" + " was received, so ignoring.", user);return userPipelineMap.get(user); } RequestInterceptorChainWrapper v46 = new RequestInterceptorChainWrapper(); try { // We should init the pipeline instance after it is created and then // add to the map, to ensure thread safe. LOG.info("Initializing request processing pipeline for application for the user: {}.", user); ClientRequestInterceptor interceptorChain = this.createRequestInterceptorChain(); interceptorChain.init(user); // We set the RouterDelegationTokenSecretManager instance to the interceptorChain // and let the interceptor use it. if (routerDTSecretManager != null) { interceptorChain.setTokenSecretManager(routerDTSecretManager); } v46.init(interceptorChain); } catch (Exception e) { LOG.error("Init ClientRequestInterceptor error for user: {}.", user, e); throw e; } this.userPipelineMap.put(user, v46); return v46; } }
3.26
hadoop_WeightedPolicyInfo_setAMRMPolicyWeights_rdh
/** * Setter method for ARMRMProxy weights. * * @param policyWeights * the amrmproxy weights. */ public void setAMRMPolicyWeights(Map<SubClusterIdInfo, Float> policyWeights) { this.amrmPolicyWeights = policyWeights; }
3.26
hadoop_WeightedPolicyInfo_getRouterPolicyWeights_rdh
/** * Getter of the router weights. * * @return the router weights. */ public Map<SubClusterIdInfo, Float> getRouterPolicyWeights() { return routerPolicyWeights; }
3.26
hadoop_WeightedPolicyInfo_getHeadroomAlpha_rdh
/** * Return the parameter headroomAlpha, used by policies that balance * weight-based and load-based considerations in their decisions. * * For policies that use this parameter, values close to 1 indicate that most * of the decision should be based on currently observed headroom from various * sub-clusters, values close to zero, indicate that the decision should be * mostly based on weights and practically ignore current load. * * @return the value of headroomAlpha. */ public float getHeadroomAlpha() { return headroomAlpha; }
3.26
hadoop_WeightedPolicyInfo_getAMRMPolicyWeights_rdh
/** * Getter for AMRMProxy weights. * * @return the AMRMProxy weights. */ public Map<SubClusterIdInfo, Float> getAMRMPolicyWeights() { return amrmPolicyWeights; }
3.26
hadoop_WeightedPolicyInfo_fromByteBuffer_rdh
/** * Deserializes a {@link WeightedPolicyInfo} from a byte UTF-8 JSON * representation. * * @param bb * the input byte representation. * @return the {@link WeightedPolicyInfo} represented. * @throws FederationPolicyInitializationException * if a deserialization error * occurs. */ public static WeightedPolicyInfo fromByteBuffer(ByteBuffer bb) throws FederationPolicyInitializationException { if (jsonjaxbContext == null) { throw new FederationPolicyInitializationException("JSONJAXBContext should" + " not be null."); } try { JSONUnmarshaller unmarshaller = jsonjaxbContext.createJSONUnmarshaller(); final byte[] bytes = new byte[bb.remaining()]; bb.get(bytes); String params = new String(bytes, StandardCharsets.UTF_8); WeightedPolicyInfo weightedPolicyInfo = unmarshaller.unmarshalFromJSON(new StringReader(params), WeightedPolicyInfo.class); return weightedPolicyInfo; } catch (JAXBException j) { throw new FederationPolicyInitializationException(j); } }
3.26
hadoop_WeightedPolicyInfo_toByteBuffer_rdh
/** * Converts the policy into a byte array representation in the input * {@link ByteBuffer}. * * @return byte array representation of this policy configuration. * @throws FederationPolicyInitializationException * if a serialization error * occurs. */ public ByteBuffer toByteBuffer() throws FederationPolicyInitializationException { if (jsonjaxbContext == null) { throw new FederationPolicyInitializationException("JSONJAXBContext should" + " not be null."); } try { String s = toJSONString(); return ByteBuffer.wrap(s.getBytes(StandardCharsets.UTF_8)); } catch (JAXBException j) { throw new FederationPolicyInitializationException(j); } }
3.26
hadoop_WeightedPolicyInfo_setHeadroomAlpha_rdh
/** * Set the parameter headroomAlpha, used by policies that balance weight-based * and load-based considerations in their decisions. * * For policies that use this parameter, values close to 1 indicate that most * of the decision should be based on currently observed headroom from various * sub-clusters, values close to zero, indicate that the decision should be * mostly based on weights and practically ignore current load. * * @param headroomAlpha * the value to use for balancing. */ public void setHeadroomAlpha(float headroomAlpha) { this.headroomAlpha = headroomAlpha; }
3.26
hadoop_WeightedPolicyInfo_setRouterPolicyWeights_rdh
/** * Setter method for Router weights. * * @param policyWeights * the router weights. */ public void setRouterPolicyWeights(Map<SubClusterIdInfo, Float> policyWeights) { this.routerPolicyWeights = policyWeights; }
3.26
hadoop_LoggedLocation_setUnknownAttribute_rdh
// for input parameter ignored. @JsonAnySetter public void setUnknownAttribute(String attributeName, Object ignored) {if (!alreadySeenAnySetterAttributes.contains(attributeName)) { alreadySeenAnySetterAttributes.add(attributeName); System.err.println(("In LoggedJob, we saw the unknown attribute " + attributeName) + "."); } }
3.26
hadoop_LoggedLocation_compareStrings_rdh
// I'll treat this as an atomic object type private void compareStrings(List<NodeName> c1, List<NodeName> c2, TreePath loc, String eltname) throws DeepInequalityException { if ((c1 == null) && (c2 == null)) { return; } TreePath recursePath = new TreePath(loc, eltname); if (((c1 == null) || (c2 == null)) || (c1.size() != c2.size())) { throw new DeepInequalityException(eltname + " miscompared", recursePath); } for (NodeName n1 : c1) { boolean found = false; for (NodeName n2 : c2) { if (n1.getValue().equals(n2.getValue())) { found = true; break; } } if (!found) { throw new DeepInequalityException(((eltname + " miscompared [") + n1.getValue()) + "]", recursePath); } } }
3.26
hadoop_BinaryPartitioner_setLeftOffset_rdh
/** * Set the subarray to be used for partitioning to * <code>bytes[offset:]</code> in Python syntax. * * @param conf * configuration object * @param offset * left Python-style offset */ public static void setLeftOffset(Configuration conf, int offset) {conf.setInt(LEFT_OFFSET_PROPERTY_NAME, offset); }
3.26
hadoop_BinaryPartitioner_getPartition_rdh
/** * Use (the specified slice of the array returned by) * {@link BinaryComparable#getBytes()} to partition. */ @Override public int getPartition(BinaryComparable key, V value, int numPartitions) { int length = key.getLength(); int leftIndex = (leftOffset + length) % length; int rightIndex = (rightOffset + length) % length;int hash = WritableComparator.hashBytes(key.getBytes(), leftIndex, (rightIndex - leftIndex) + 1); return (hash & Integer.MAX_VALUE) % numPartitions; }
3.26
hadoop_BinaryPartitioner_setOffsets_rdh
/** * Set the subarray to be used for partitioning to * <code>bytes[left:(right+1)]</code> in Python syntax. * * @param conf * configuration object * @param left * left Python-style offset * @param right * right Python-style offset */ public static void setOffsets(Configuration conf, int left, int right) { conf.setInt(LEFT_OFFSET_PROPERTY_NAME, left); conf.setInt(RIGHT_OFFSET_PROPERTY_NAME, right); }
3.26
hadoop_BinaryPartitioner_setRightOffset_rdh
/** * Set the subarray to be used for partitioning to * <code>bytes[:(offset+1)]</code> in Python syntax. * * @param conf * configuration object * @param offset * right Python-style offset */ public static void setRightOffset(Configuration conf, int offset) { conf.setInt(RIGHT_OFFSET_PROPERTY_NAME, offset); }
3.26
hadoop_StartupProgressServlet_writeNumberFieldIfDefined_rdh
/** * Writes a JSON number field only if the value is defined. * * @param json * JsonGenerator to receive output * @param key * String key to put * @param value * long value to put * @throws IOException * if there is an I/O error */ private static void writeNumberFieldIfDefined(JsonGenerator json, String key, long value) throws IOException { if (value != Long.MIN_VALUE) { json.writeNumberField(key, value); }}
3.26
hadoop_StartupProgressServlet_writeStringFieldIfNotNull_rdh
/** * Writes a JSON string field only if the value is non-null. * * @param json * JsonGenerator to receive output * @param key * String key to put * @param value * String value to put * @throws IOException * if there is an I/O error */ private static void writeStringFieldIfNotNull(JsonGenerator json, String key, String value) throws IOException { if (value != null) { json.writeStringField(key, value); } }
3.26
hadoop_Interns_m0_rdh
/** * Get a metrics tag. * * @param info * of the tag * @param value * of the tag * @return an interned metrics tag */ public static MetricsTag m0(MetricsInfo info, String value) { return Tags.INSTANCE.cache.add(info, value); }
3.26
hadoop_Interns_info_rdh
/** * Get a metric info object. * * @param name * Name of metric info object * @param description * Description of metric info object * @return an interned metric info object */ public static MetricsInfo info(String name, String description) { return Info.INSTANCE.cache.add(name, description); }
3.26
hadoop_Interns_tag_rdh
/** * Get a metrics tag. * * @param name * of the tag * @param description * of the tag * @param value * of the tag * @return an interned metrics tag */ public static MetricsTag tag(String name, String description, String value) { return Tags.INSTANCE.cache.add(info(name, description), value); }
3.26
hadoop_OpenFileCtxCache_getEntryToEvict_rdh
/** * The entry to be evicted is based on the following rules:<br> * 1. if the OpenFileCtx has any pending task, it will not be chosen.<br> * 2. if there is inactive OpenFileCtx, the first found one is to evict. <br> * 3. For OpenFileCtx entries don't belong to group 1 or 2, the idlest one * is select. If it's idle longer than OUTPUT_STREAM_TIMEOUT_MIN_DEFAULT, it * will be evicted. Otherwise, the whole eviction request is failed. */ @VisibleForTesting Entry<FileHandle, OpenFileCtx> getEntryToEvict() { Iterator<Entry<FileHandle, OpenFileCtx>> it = openFileMap.entrySet().iterator(); if (LOG.isTraceEnabled()) { LOG.trace("openFileMap size:" + size()); } Entry<FileHandle, OpenFileCtx> idlest = null; while (it.hasNext()) { Entry<FileHandle, OpenFileCtx> pairs = it.next(); OpenFileCtx ctx = pairs.getValue(); if (!ctx.getActiveState()) { if (LOG.isDebugEnabled()) { LOG.debug("Got one inactive stream: " + ctx); } return pairs; } if (ctx.hasPendingWork()) { // Always skip files with pending work. continue; } if (idlest == null) { idlest = pairs;} else if (ctx.getLastAccessTime() < idlest.getValue().getLastAccessTime()) { idlest = pairs; }} if (idlest == null) { LOG.warn("No eviction candidate. All streams have pending work."); return null; } else { long idleTime = Time.monotonicNow() - idlest.getValue().getLastAccessTime(); if (idleTime < NfsConfigKeys.DFS_NFS_STREAM_TIMEOUT_MIN_DEFAULT) { if (LOG.isDebugEnabled()) { LOG.debug("idlest stream's idle time:" + idleTime); } LOG.warn("All opened streams are busy, can't remove any from cache."); return null; } else { return idlest; } } }
3.26