name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
hadoop_LogAggregationWebUtils_getLogStartIndex_rdh
/** * Parse start index from html. * * @param html * the html * @param startStr * the start index string * @return the startIndex */ public static long getLogStartIndex(Block html, String startStr) throws NumberFormatException { long start = -4096; if ((startStr != null) && (!startStr.isEmpty())) { start = Long.parseLong(startStr); } return start;}
3.26
hadoop_UpdateApplicationTimeoutsResponse_newInstance_rdh
/** * <p> * The response sent by the <code>ResourceManager</code> to the client on update * application timeout. * </p> * <p> * A response without exception means that the update has completed * successfully. * </p> */ @Public @Unstable
3.26
hadoop_AWSAuditEventCallbacks_requestCreated_rdh
/** * Callback when a request is created in the S3A code. * This is called in {@code RequestFactoryImpl} after * each request is created. * It is not invoked on any AWS requests created in the SDK. * Avoid raising exceptions or talking to any remote service; * this callback is for annotation rather than validation. * * @param builder * the request builder. */ default void requestCreated(SdkRequest.Builder builder) { }
3.26
hadoop_FederationProxyProviderUtil_createRMProxy_rdh
/** * Create a proxy for the specified protocol in the context of Federation. For * non-HA, this is a direct connection to the ResourceManager address. When HA * is enabled, the proxy handles the failover between the ResourceManagers as * well. * * @param configuration * Configuration to generate {@link ClientRMProxy} * @param protocol * Protocol for the proxy * @param subClusterId * the unique identifier or the sub-cluster * @param user * the user on whose behalf the proxy is being created * @param token * the auth token to use for connection * @param <T> * Type information of the proxy * @return Proxy to the RM * @throws IOException * on failure */ @Public @Unstable public static <T> T createRMProxy(Configuration configuration, final Class<T> protocol, SubClusterId subClusterId, UserGroupInformation user, Token<? extends TokenIdentifier> token) throws IOException { final YarnConfiguration config = new YarnConfiguration(configuration); updateConfForFederation(config, subClusterId.getId()); return AMRMClientUtils.createRMProxy(config, protocol, user, token); }
3.26
hadoop_FederationProxyProviderUtil_updateConfForFederation_rdh
/** * Updating the conf with Federation as long as certain subclusterId. * * @param conf * configuration * @param subClusterId * subclusterId for the conf */ public static void updateConfForFederation(Configuration conf, String subClusterId) { conf.set(YarnConfiguration.RM_CLUSTER_ID, subClusterId); /* In a Federation setting, we will connect to not just the local cluster RM but also multiple external RMs. The membership information of all the RMs that are currently participating in Federation is available in the central FederationStateStore. So we will: 1. obtain the RM service addresses from FederationStateStore using the FederationRMFailoverProxyProvider. 2. disable traditional HA as that depends on local configuration lookup for RMs using indexes. 3. we will enable federation failover IF traditional HA is enabled so that the appropriate failover RetryPolicy is initialized. */ conf.setBoolean(YarnConfiguration.FEDERATION_ENABLED, true); conf.setClass(YarnConfiguration.CLIENT_FAILOVER_PROXY_PROVIDER, FederationRMFailoverProxyProvider.class, RMFailoverProxyProvider.class); if (HAUtil.isHAEnabled(conf)) { conf.setBoolean(YarnConfiguration.FEDERATION_FAILOVER_ENABLED, true); conf.setBoolean(YarnConfiguration.RM_HA_ENABLED, false); } }
3.26
hadoop_TimelinePutResponse_addError_rdh
/** * Add a single {@link TimelinePutError} instance into the existing list * * @param error * a single {@link TimelinePutError} instance */ public void addError(TimelinePutError error) { errors.add(error); } /** * Add a list of {@link TimelinePutError} instances into the existing list * * @param errors * a list of {@link TimelinePutError}
3.26
hadoop_TimelinePutResponse_setErrors_rdh
/** * Set the list to the given list of {@link TimelinePutError} instances * * @param errors * a list of {@link TimelinePutError} instances */ public void setErrors(List<TimelinePutError> errors) { this.errors.clear(); this.errors.addAll(errors); }
3.26
hadoop_TimelinePutResponse_setErrorCode_rdh
/** * Set the error code to the given error code * * @param errorCode * an error code */ public void setErrorCode(int errorCode) { this.errorCode = errorCode; }
3.26
hadoop_TimelinePutResponse_setEntityType_rdh
/** * Set the entity type * * @param entityType * the entity type */ public void setEntityType(String entityType) { this.entityType = entityType; }
3.26
hadoop_TimelinePutResponse_setEntityId_rdh
/** * Set the entity Id * * @param entityId * the entity Id */ public void setEntityId(String entityId) { this.entityId = entityId; }
3.26
hadoop_TimelinePutResponse_getEntityType_rdh
/** * Get the entity type * * @return the entity type */ @XmlElement(name = "entitytype") public String getEntityType() { return entityType; }
3.26
hadoop_TimelinePutResponse_getEntityId_rdh
/** * Get the entity Id * * @return the entity Id */ @XmlElement(name = "entity") public String getEntityId() { return entityId; }
3.26
hadoop_TimelinePutResponse_getErrors_rdh
/** * Get a list of {@link TimelinePutError} instances * * @return a list of {@link TimelinePutError} instances */ @XmlElement(name = "errors") public List<TimelinePutError> getErrors() { return errors; }
3.26
hadoop_TimelinePutResponse_getErrorCode_rdh
/** * Get the error code * * @return an error code */ @XmlElement(name = "errorcode") public int getErrorCode() { return errorCode; }
3.26
hadoop_Canceler_cancel_rdh
/** * Requests that the current operation be canceled if it is still running. * This does not block until the cancellation is successful. * * @param reason * the reason why cancellation is requested */ public void cancel(String reason) { this.cancelReason = reason; }
3.26
hadoop_BatchedRequests_getApplicationId_rdh
/** * Get Application Id. * * @return Application Id. */ public ApplicationId getApplicationId() { return applicationId; }
3.26
hadoop_BatchedRequests_getSchedulingRequests_rdh
/** * Get Collection of SchedulingRequests in this batch. * * @return Collection of Scheduling Requests. */ @Override public Collection<SchedulingRequest> getSchedulingRequests() { return requests; }
3.26
hadoop_BatchedRequests_iterator_rdh
/** * Exposes SchedulingRequest Iterator interface which can be used * to traverse requests using different heuristics i.e. Tag Popularity * * @return SchedulingRequest Iterator. */ @Override public Iterator<SchedulingRequest> iterator() { switch (this.iteratorType) { case SERIAL : return new SerialIterator(requests); case POPULAR_TAGS : return new PopularTagsIterator(requests); default : return null; } }
3.26
hadoop_BatchedRequests_addToBatch_rdh
/** * Add a Scheduling request to the batch. * * @param req * Scheduling Request. */ public void addToBatch(SchedulingRequest req) { requests.add(req); }
3.26
hadoop_BatchedRequests_getPlacementAttempt_rdh
/** * Get placement attempt. * * @return PlacementAlgorithmOutput placement Attempt. */ public int getPlacementAttempt() { return placementAttempt; }
3.26
hadoop_BatchedRequests_getIteratorType_rdh
/** * Get Iterator type. * * @return Iterator type. */ public IteratorType getIteratorType() { return iteratorType; }
3.26
hadoop_IOStatisticsSnapshot_clear_rdh
/** * Clear all the maps. */ public synchronized void clear() { counters.clear(); gauges.clear(); minimums.clear(); maximums.clear(); meanStatistics.clear(); }
3.26
hadoop_IOStatisticsSnapshot_snapshot_rdh
/** * Take a snapshot. * * This completely overwrites the map data with the statistics * from the source. * * @param source * statistics source. */ public synchronized void snapshot(IOStatistics source) { checkNotNull(source); counters = snapshotMap(source.counters()); gauges = snapshotMap(source.gauges()); minimums = snapshotMap(source.minimums()); maximums = snapshotMap(source.maximums()); meanStatistics = snapshotMap(source.meanStatistics(), MeanStatistic::copy); }
3.26
hadoop_IOStatisticsSnapshot_serializer_rdh
/** * Get a JSON serializer for this class. * * @return a serializer. */ public static JsonSerialization<IOStatisticsSnapshot> serializer() { return new JsonSerialization<>(IOStatisticsSnapshot.class, false, true); }
3.26
hadoop_IOStatisticsSnapshot_requiredSerializationClasses_rdh
/** * What classes are needed to deserialize this class? * Needed to securely unmarshall this from untrusted sources. * * @return a list of required classes to deserialize the data. */ public static List<Class> requiredSerializationClasses() { return Arrays.stream(DESERIALIZATION_CLASSES).collect(Collectors.toList()); }
3.26
hadoop_IOStatisticsSnapshot_aggregate_rdh
/** * Aggregate the current statistics with the * source reference passed in. * * The operation is synchronized. * * @param source * source; may be null * @return true if a merge took place. */ @Override public synchronized boolean aggregate(@Nullable IOStatistics source) { if (source == null) { return false; } aggregateMaps(counters, source.counters(), IOStatisticsBinding::aggregateCounters, IOStatisticsBinding::passthroughFn);aggregateMaps(gauges, source.gauges(), IOStatisticsBinding::aggregateGauges, IOStatisticsBinding::passthroughFn); aggregateMaps(minimums, source.minimums(), IOStatisticsBinding::aggregateMinimums, IOStatisticsBinding::passthroughFn); aggregateMaps(maximums, source.maximums(), IOStatisticsBinding::aggregateMaximums, IOStatisticsBinding::passthroughFn); aggregateMaps(meanStatistics, source.meanStatistics(), IOStatisticsBinding::aggregateMeanStatistics, MeanStatistic::copy); return true; }
3.26
hadoop_IOStatisticsSnapshot_readObject_rdh
/** * Deserialize by loading each TreeMap, and building concurrent * hash maps from them. * * @param s * ObjectInputStream. * @throws IOException * raised on errors performing I/O. * @throws ClassNotFoundException * class not found exception */ private void readObject(final ObjectInputStream s) throws IOException, ClassNotFoundException { // read in core s.defaultReadObject(); // and rebuild a concurrent hashmap from every serialized tree map // read back from the stream. counters = new ConcurrentHashMap<>(((TreeMap<String, Long>) (s.readObject()))); gauges = new ConcurrentHashMap<>(((TreeMap<String, Long>) (s.readObject()))); minimums = new ConcurrentHashMap<>(((TreeMap<String, Long>) (s.readObject()))); maximums = new ConcurrentHashMap<>(((TreeMap<String, Long>) (s.readObject())));meanStatistics = new ConcurrentHashMap<>(((TreeMap<String, MeanStatistic>) (s.readObject()))); }
3.26
hadoop_IOStatisticsSnapshot_writeObject_rdh
/** * Serialize by converting each map to a TreeMap, and saving that * to the stream. * * @param s * ObjectOutputStream. * @throws IOException * raised on errors performing I/O. */ private synchronized void writeObject(ObjectOutputStream s) throws IOException { // Write out the core s.defaultWriteObject(); s.writeObject(new TreeMap<String, Long>(counters)); s.writeObject(new TreeMap<String, Long>(gauges)); s.writeObject(new TreeMap<String, Long>(minimums)); s.writeObject(new TreeMap<String, Long>(maximums)); s.writeObject(new TreeMap<String, MeanStatistic>(meanStatistics)); }
3.26
hadoop_IOStatisticsSnapshot_createMaps_rdh
/** * Create the maps. */ private synchronized void createMaps() { counters = new ConcurrentHashMap<>(); gauges = new ConcurrentHashMap<>(); minimums = new ConcurrentHashMap<>(); maximums = new ConcurrentHashMap<>(); meanStatistics = new ConcurrentHashMap<>(); }
3.26
hadoop_ReconfigurationTaskStatus_stopped_rdh
/** * Return true if the latest reconfiguration task has finished and there is * no another active task running. * * @return true if endTime &gt; 0; false if not. */ public boolean stopped() { return endTime > 0; }
3.26
hadoop_ReconfigurationTaskStatus_hasTask_rdh
/** * Return true if * - A reconfiguration task has finished or * - an active reconfiguration task is running. * * @return true if startTime &gt; 0; false if not. */ public boolean hasTask() { return startTime > 0; }
3.26
hadoop_AppIdKeyConverter_getKeySize_rdh
/** * Returns the size of app id after encoding. * * @return size of app id after encoding. */ public static int getKeySize() { return Bytes.SIZEOF_LONG + Bytes.SIZEOF_INT; }
3.26
hadoop_RejectedSchedulingRequest_newInstance_rdh
/** * Create new RejectedSchedulingRequest. * * @param reason * Rejection Reason. * @param request * Rejected Scheduling Request. * @return RejectedSchedulingRequest. */public static RejectedSchedulingRequest newInstance(RejectionReason reason, SchedulingRequest request) { RejectedSchedulingRequest instance = Records.newRecord(RejectedSchedulingRequest.class); instance.setReason(reason); instance.setRequest(request); return instance; }
3.26
hadoop_TracingContext_getHeader_rdh
/** * Return header representing the request associated with the tracingContext * * @return Header string set into X_MS_CLIENT_REQUEST_ID */ public String getHeader() { return header; }
3.26
hadoop_TracingContext_constructHeader_rdh
/** * Concatenate all identifiers separated by (:) into a string and set into * X_MS_CLIENT_REQUEST_ID header of the http operation * * @param httpOperation * AbfsHttpOperation instance to set header into * connection * @param previousFailure * Failure seen before this API trigger on same operation * from AbfsClient. */ public void constructHeader(AbfsHttpOperation httpOperation, String previousFailure) { clientRequestId = UUID.randomUUID().toString(); switch (format) { case ALL_ID_FORMAT : // Optional IDs (e.g. streamId) may be empty header = (((((((((((clientCorrelationID + ":") + clientRequestId) + ":") + fileSystemID) + ":") + m0(retryCount > 0)) + ":") + streamID) + ":") + opType) + ":") + retryCount; header = addFailureReasons(header, previousFailure); break; case TWO_ID_FORMAT : header = (clientCorrelationID + ":") + clientRequestId; break; default : header = clientRequestId;// case SINGLE_ID_FORMAT } if (listener != null) { // for testing listener.callTracingHeaderValidator(header, format); } httpOperation.setRequestProperty(HttpHeaderConfigurations.X_MS_CLIENT_REQUEST_ID, header); /* In case the primaryRequestId is an empty-string and if it is the first try to API call (previousFailure shall be null), maintain the last part of clientRequestId's UUID in primaryRequestIdForRetry. This field shall be used as primaryRequestId part of the x-ms-client-request-id header in case of retry of the same API-request. */ if (primaryRequestId.isEmpty() && (previousFailure == null)) { String[] clientRequestIdParts = clientRequestId.split("-"); primaryRequestIdForRetry = clientRequestIdParts[clientRequestIdParts.length - 1];} } /** * Provide value to be used as primaryRequestId part of x-ms-client-request-id header. * * @param isRetry * define if it's for a retry case. * @return {@link #primaryRequestIdForRetry}:If the {@link #primaryRequestId} is an empty-string, and it's a retry iteration. {@link #primaryRequestId}
3.26
hadoop_QueueStateHelper_setQueueState_rdh
/** * Sets the current state of the queue based on its previous state, its parent's state and its * configured state. * * @param queue * the queue whose state is set */ public static void setQueueState(AbstractCSQueue queue) { QueueState v0 = queue.getState(); QueueState configuredState = queue.getQueueContext().getConfiguration().getConfiguredState(queue.getQueuePath()); QueueState parentState = (queue.getParent() == null) ? null : queue.getParent().getState(); // verify that we can not any value for State other than RUNNING/STOPPED if ((configuredState != null) && (!VALID_STATE_CONFIGURATIONS.contains(configuredState))) { throw new IllegalArgumentException("Invalid queue state configuration." + " We can only use RUNNING or STOPPED.");} if (v0 == null) { initializeState(queue, configuredState, parentState); } else { reinitializeState(queue, v0, configuredState); } }
3.26
hadoop_CsiConfigUtils_getCsiAdaptorAddressForDriver_rdh
/** * Resolve the CSI adaptor address for a CSI driver from configuration. * Expected configuration property name is * yarn.nodemanager.csi-driver-adaptor.${driverName}.address. * * @param driverName * driver name. * @param conf * configuration. * @return adaptor service address * @throws YarnException * exceptions from yarn servers. */ public static InetSocketAddress getCsiAdaptorAddressForDriver(String driverName, Configuration conf) throws YarnException { String v2 = (YarnConfiguration.NM_CSI_ADAPTOR_PREFIX + driverName) + YarnConfiguration.NM_CSI_ADAPTOR_ADDRESS_SUFFIX; String errorMessage = ((("Failed to load CSI adaptor address for driver " + driverName) + ", configuration property ") + v2) + " is not defined or invalid."; try { InetSocketAddress address = conf.getSocketAddr(v2, null, -1); if (address == null) { throw new YarnException(errorMessage); } return address; } catch (IllegalArgumentException e) { throw new YarnException(errorMessage); } }
3.26
hadoop_JobTokenSecretManager_addTokenForJob_rdh
/** * Add the job token of a job to cache * * @param jobId * the job that owns the token * @param token * the job token */ public void addTokenForJob(String jobId, Token<JobTokenIdentifier> token) { SecretKey tokenSecret = createSecretKey(token.getPassword()); synchronized(currentJobTokens) { currentJobTokens.put(jobId, tokenSecret); } }
3.26
hadoop_JobTokenSecretManager_createIdentifier_rdh
/** * Create an empty job token identifier * * @return a newly created empty job token identifier */ @Override public JobTokenIdentifier createIdentifier() { return new JobTokenIdentifier(); }
3.26
hadoop_JobTokenSecretManager_createPassword_rdh
/** * Create a new password/secret for the given job token identifier. * * @param identifier * the job token identifier * @return token password/secret */ @Override public byte[] createPassword(JobTokenIdentifier identifier) { byte[] result = createPassword(identifier.getBytes(), masterKey); return result; }
3.26
hadoop_JobTokenSecretManager_retrieveTokenSecret_rdh
/** * Look up the token password/secret for the given jobId. * * @param jobId * the jobId to look up * @return token password/secret as SecretKey * @throws InvalidToken */ public SecretKey retrieveTokenSecret(String jobId) throws InvalidToken { SecretKey tokenSecret = null; synchronized(currentJobTokens) { tokenSecret = currentJobTokens.get(jobId); } if (tokenSecret == null) { throw new InvalidToken(("Can't find job token for job " + jobId) + " !!"); } return tokenSecret; }
3.26
hadoop_JobTokenSecretManager_computeHash_rdh
/** * Compute the HMAC hash of the message using the key * * @param msg * the message to hash * @param key * the key to use * @return the computed hash */ public static byte[] computeHash(byte[] msg, SecretKey key) { return createPassword(msg, key); }
3.26
hadoop_JobTokenSecretManager_createSecretKey_rdh
/** * Convert the byte[] to a secret key * * @param key * the byte[] to create the secret key from * @return the secret key */ public static SecretKey createSecretKey(byte[] key) { return SecretManager.createSecretKey(key); }
3.26
hadoop_JobTokenSecretManager_retrievePassword_rdh
/** * Look up the token password/secret for the given job token identifier. * * @param identifier * the job token identifier to look up * @return token password/secret as byte[] * @throws InvalidToken */ @Override public byte[] retrievePassword(JobTokenIdentifier identifier) throws InvalidToken { return retrieveTokenSecret(identifier.getJobId().toString()).getEncoded(); }
3.26
hadoop_BlockMissingException_getFile_rdh
/** * Returns the name of the corrupted file. * * @return name of corrupted file */ public String getFile() { return filename; }
3.26
hadoop_BlockMissingException_m0_rdh
/** * Returns the offset at which this file is corrupted * * @return offset of corrupted file */ public long m0() { return offset; }
3.26
hadoop_CompressionOutputStream_getIOStatistics_rdh
/** * Return any IOStatistics provided by the underlying stream. * * @return IO stats from the inner stream. */ @Override public IOStatistics getIOStatistics() { return IOStatisticsSupport.retrieveIOStatistics(out); }
3.26
hadoop_UpdateContainerTokenEvent_isExecTypeUpdate_rdh
/** * Is this update an ExecType Update. * * @return isExecTypeUpdate. */public boolean isExecTypeUpdate() { return isExecTypeUpdate; }
3.26
hadoop_UpdateContainerTokenEvent_getUpdatedToken_rdh
/** * Update Container Token. * * @return Container Token. */ public ContainerTokenIdentifier getUpdatedToken() { return updatedToken; }
3.26
hadoop_UpdateContainerTokenEvent_isIncrease_rdh
/** * Is this a container Increase. * * @return isIncrease. */ public boolean isIncrease() { return isIncrease; }
3.26
hadoop_UpdateContainerTokenEvent_isResourceChange_rdh
/** * Is this update a ResourceChange. * * @return isResourceChange. */ public boolean isResourceChange() {return isResourceChange; }
3.26
hadoop_ReencryptionHandler_startUpdaterThread_rdh
/** * Start the re-encryption updater thread. */ void startUpdaterThread() { updaterExecutor = Executors.newSingleThreadExecutor(new ThreadFactoryBuilder().setDaemon(true).setNameFormat("reencryptionUpdaterThread #%d").build()); updaterExecutor.execute(reencryptionUpdater); }
3.26
hadoop_ReencryptionHandler_checkINodeReady_rdh
/** * Check whether zone is ready for re-encryption. Throws IOE if it's not. 1. * If EZ is deleted. 2. if the re-encryption is canceled. 3. If NN is not * active or is in safe mode. * * @throws IOException * if zone does not exist / is cancelled, or if NN is not ready * for write. */ @Override protected void checkINodeReady(long zoneId) throws IOException { final ZoneReencryptionStatus zs = getReencryptionStatus().getZoneStatus(zoneId); if (zs == null) {throw new IOException(("Zone " + zoneId) + " status cannot be found."); } if (zs.isCanceled()) { throw new IOException("Re-encryption is canceled for zone " + zoneId); } dir.getFSNamesystem().checkNameNodeSafeMode("NN is in safe mode, cannot re-encrypt."); // re-encryption should be cancelled when NN goes to standby. Just // double checking for sanity. dir.getFSNamesystem().checkOperation(OperationCategory.WRITE); }
3.26
hadoop_ReencryptionHandler_unprotectedGetTracker_rdh
/** * Get the tracker without holding the FSDirectory lock. * The submissions object is protected by object lock. */ synchronized ZoneSubmissionTracker unprotectedGetTracker(final long zoneId) { return submissions.get(zoneId); }
3.26
hadoop_ReencryptionHandler_run_rdh
/** * Main loop. It takes at most 1 zone per scan, and executes until the zone * is completed. * {@link #reencryptEncryptionZone(long)}. */ @Override public void run() { f0.info("Starting up re-encrypt thread with interval={} millisecond.", interval); while (true) { try { synchronized(this) { wait(interval); } traverser.checkPauseForTesting(); } catch (InterruptedException ie) { f0.info("Re-encrypt handler interrupted. Exiting"); Thread.currentThread().interrupt(); return; } final Long zoneId; dir.getFSNamesystem().readLock(); try { zoneId = getReencryptionStatus().getNextUnprocessedZone(); if (zoneId == null) { // empty queue. continue; } f0.info("Executing re-encrypt commands on zone {}. Current zones:{}", zoneId, getReencryptionStatus()); getReencryptionStatus().markZoneStarted(zoneId); resetSubmissionTracker(zoneId); } finally { dir.getFSNamesystem().readUnlock("reEncryptThread"); } try { reencryptEncryptionZone(zoneId); } catch (RetriableException | SafeModeException re) { f0.info("Re-encryption caught exception, will retry", re); getReencryptionStatus().markZoneForRetry(zoneId);} catch (IOException ioe) { f0.warn("IOException caught when re-encrypting zone {}", zoneId, ioe); } catch (InterruptedException ie) { f0.info("Re-encrypt handler interrupted. Exiting."); Thread.currentThread().interrupt(); return; } catch (Throwable t) { f0.error("Re-encrypt handler thread exiting. Exception caught when" + " re-encrypting zone {}.", zoneId, t); return; } } }
3.26
hadoop_ReencryptionHandler_resetSubmissionTracker_rdh
/** * Reset the zone submission tracker for re-encryption. * * @param zoneId */ private synchronized void resetSubmissionTracker(final long zoneId) { ZoneSubmissionTracker zst = submissions.get(zoneId); if (zst == null) { zst = new ZoneSubmissionTracker(); submissions.put(zoneId, zst); } else { zst.reset(); } }
3.26
hadoop_ReencryptionHandler_restoreFromLastProcessedFile_rdh
/** * Restore the re-encryption from the progress inside ReencryptionStatus. * This means start from exactly the lastProcessedFile (LPF), skipping all * earlier paths in lexicographic order. Lexicographically-later directories * on the LPF parent paths are added to subdirs. */ private void restoreFromLastProcessedFile(final long zoneId, final ZoneReencryptionStatus zs) throws IOException, InterruptedException { final INodeDirectory parent; final byte[] startAfter; final INodesInPath lpfIIP = dir.getINodesInPath(zs.getLastCheckpointFile(), DirOp.READ); parent = lpfIIP.getLastINode().getParent(); startAfter = lpfIIP.getLastINode().getLocalNameBytes();traverser.traverseDir(parent, zoneId, startAfter, new ZoneTraverseInfo(zs.getEzKeyVersionName())); }
3.26
hadoop_ReencryptionHandler_addDummyTracker_rdh
/** * Add a dummy tracker (with 1 task that has 0 files to re-encrypt) * for the zone. This is necessary to complete the re-encryption in case * no file in the entire zone needs re-encryption at all. We cannot simply * update zone status and set zone xattrs, because in the handler we only hold * readlock, and setting xattrs requires upgrading to a writelock. * * @param zoneId */ void addDummyTracker(final long zoneId, ZoneSubmissionTracker zst) { assert dir.hasReadLock();if (zst == null) { zst = new ZoneSubmissionTracker(); } zst.setSubmissionDone(); final Future future = batchService.submit(new EDEKReencryptCallable(zoneId, new ReencryptionBatch(), this)); zst.addTask(future); synchronized(this) { submissions.put(zoneId, zst); } }
3.26
hadoop_ReencryptionHandler_stopThreads_rdh
/** * Stop the re-encryption updater thread, as well as all EDEK re-encryption * tasks submitted. */void stopThreads() { assert dir.hasWriteLock(); synchronized(this) { for (ZoneSubmissionTracker zst : submissions.values()) { zst.cancelAllTasks(); } }if (updaterExecutor != null) { updaterExecutor.shutdownNow(); } }
3.26
hadoop_ReencryptionHandler_notifyNewSubmission_rdh
/** * Called when a new zone is submitted for re-encryption. This will interrupt * the background thread if it's waiting for the next * DFS_NAMENODE_REENCRYPT_SLEEP_INTERVAL_KEY. */ synchronized void notifyNewSubmission() {f0.debug("Notifying handler for new re-encryption command."); this.notify(); }
3.26
hadoop_ReencryptionHandler_submitCurrentBatch_rdh
/** * Submit the current batch to the thread pool. * * @param zoneId * Id of the EZ INode * @throws IOException * @throws InterruptedException */ @Override protected void submitCurrentBatch(final Long zoneId) throws IOException, InterruptedException { if (currentBatch.isEmpty()) { return; } ZoneSubmissionTracker zst; synchronized(ReencryptionHandler.this) { zst = submissions.get(zoneId);if (zst == null) { zst = new ZoneSubmissionTracker(); submissions.put(zoneId, zst); } Future future = batchService.submit(new EDEKReencryptCallable(zoneId, currentBatch, reencryptionHandler)); zst.addTask(future); } f0.info("Submitted batch (start:{}, size:{}) of zone {} to re-encrypt.", currentBatch.getFirstFilePath(), currentBatch.size(), zoneId); currentBatch = new ReencryptionBatch(reencryptBatchSize); // flip the pause flag if this is nth submission. // The actual pause need to happen outside of the lock. if (pauseAfterNthSubmission > 0) { if ((--pauseAfterNthSubmission) == 0) { shouldPauseForTesting = true; } } }
3.26
hadoop_ReencryptionHandler_reencryptEncryptionZone_rdh
/** * Re-encrypts a zone by recursively iterating all paths inside the zone, * in lexicographic order. * Files are re-encrypted, and subdirs are processed during iteration. * * @param zoneId * the Zone's id. * @throws IOException * @throws InterruptedException */ void reencryptEncryptionZone(final long zoneId) throws IOException, InterruptedException { throttleTimerAll.reset().start(); throttleTimerLocked.reset(); final INode zoneNode; final ZoneReencryptionStatus zs; traverser.readLock(); try { zoneNode = dir.getInode(zoneId); // start re-encrypting the zone from the beginning if (zoneNode == null) { f0.info("Directory with id {} removed during re-encrypt, skipping", zoneId); return; } if (!zoneNode.isDirectory()) { f0.info("Cannot re-encrypt directory with id {} because it's not a" + " directory.", zoneId); return; } zs = getReencryptionStatus().getZoneStatus(zoneId); assert zs != null; // Only costly log FullPathName here once, and use id elsewhere. f0.info("Re-encrypting zone {}(id={})", zoneNode.getFullPathName(), zoneId); if (zs.getLastCheckpointFile() == null) { // new re-encryption traverser.traverseDir(zoneNode.asDirectory(), zoneId, HdfsFileStatus.EMPTY_NAME, new ZoneTraverseInfo(zs.getEzKeyVersionName())); } else { // resuming from a past re-encryption restoreFromLastProcessedFile(zoneId, zs); } // save the last batch and mark complete traverser.submitCurrentBatch(zoneId); f0.info("Submission completed of zone {} for re-encryption.", zoneId); reencryptionUpdater.markZoneSubmissionDone(zoneId); } finally { traverser.readUnlock(); } }
3.26
hadoop_ReencryptionHandler_throttle_rdh
/** * Throttles the ReencryptionHandler in 3 aspects: * 1. Prevents generating more Callables than the CPU could possibly * handle. * 2. Prevents generating more Callables than the ReencryptionUpdater * can handle, under its own throttling. * 3. Prevents contending FSN/FSD read locks. This is done based * on the DFS_NAMENODE_REENCRYPT_THROTTLE_LIMIT_RATIO_KEY configuration. * <p> * Item 1 and 2 are to control NN heap usage. * * @throws InterruptedException */ @VisibleForTesting @Override protected void throttle() throws InterruptedException { assert !dir.hasReadLock(); assert !dir.getFSNamesystem().hasReadLock(); final int numCores = Runtime.getRuntime().availableProcessors(); if (taskQueue.size() >= numCores) { f0.debug("Re-encryption handler throttling because queue size {} is" + "larger than number of cores {}", taskQueue.size(), numCores); while (taskQueue.size() >= numCores) { Thread.sleep(100); } } // 2. if tasks are piling up on the updater, don't create new callables // until the queue size goes down. final int maxTasksPiled = Runtime.getRuntime().availableProcessors() * 2; int numTasks = numTasksSubmitted(); if (numTasks >= maxTasksPiled) { f0.debug("Re-encryption handler throttling because total tasks pending" + " re-encryption updater is {}", numTasks); while (numTasks >= maxTasksPiled) { Thread.sleep(500);numTasks = numTasksSubmitted(); } } // 3. if (throttleLimitHandlerRatio >= 1.0) {return; } final long expect = ((long) (throttleTimerAll.now(TimeUnit.MILLISECONDS) * throttleLimitHandlerRatio)); final long actual = throttleTimerLocked.now(TimeUnit.MILLISECONDS); if (f0.isDebugEnabled()) { f0.debug("Re-encryption handler throttling expect: {}, actual: {}," + " throttleTimerAll:{}", expect, actual, throttleTimerAll.now(TimeUnit.MILLISECONDS)); } if ((expect - actual) < 0) { // in case throttleLimitHandlerRatio is very small, expect will be 0. // so sleepMs should not be calculated from expect, to really meet the // ratio. e.g. if ratio is 0.001, expect = 0 and actual = 1, sleepMs // should be 1000 - throttleTimerAll.now() final long sleepMs = ((long) (actual / throttleLimitHandlerRatio)) - throttleTimerAll.now(TimeUnit.MILLISECONDS); f0.debug("Throttling re-encryption, sleeping for {} ms", sleepMs); Thread.sleep(sleepMs); } throttleTimerAll.reset().start();throttleTimerLocked.reset(); }
3.26
hadoop_AltKerberosAuthenticationHandler_authenticate_rdh
/** * It enforces the the Kerberos SPNEGO authentication sequence returning an * {@link AuthenticationToken} only after the Kerberos SPNEGO sequence has * completed successfully (in the case of Java access) and only after the * custom authentication implemented by the subclass in alternateAuthenticate * has completed successfully (in the case of browser access). * * @param request * the HTTP client request. * @param response * the HTTP client response. * @return an authentication token if the request is authorized or null * @throws IOException * thrown if an IO error occurred * @throws AuthenticationException * thrown if an authentication error occurred */ @Override public AuthenticationToken authenticate(HttpServletRequest request, HttpServletResponse response) throws IOException, AuthenticationException { AuthenticationToken token; if (m0(request.getHeader("User-Agent"))) { token = alternateAuthenticate(request, response); } else { token = super.authenticate(request, response); } return token; }
3.26
hadoop_AltKerberosAuthenticationHandler_m0_rdh
/** * This method parses the User-Agent String and returns whether or not it * refers to a browser. If its not a browser, then Kerberos authentication * will be used; if it is a browser, alternateAuthenticate from the subclass * will be used. * <p> * A User-Agent String is considered to be a browser if it does not contain * any of the values from alt-kerberos.non-browser.user-agents; the default * behavior is to consider everything a browser unless it contains one of: * "java", "curl", "wget", or "perl". Subclasses can optionally override * this method to use different behavior. * * @param userAgent * The User-Agent String, or null if there isn't one * @return true if the User-Agent String refers to a browser, false if not */ protected boolean m0(String userAgent) { if (userAgent == null) { return false; } userAgent = userAgent.toLowerCase(Locale.ENGLISH); boolean isBrowser = true; for (String nonBrowserUserAgent : nonBrowserUserAgents) { if (userAgent.contains(nonBrowserUserAgent)) { isBrowser = false; break; } } return isBrowser; }
3.26
hadoop_AllocationFileQueueParser_loadQueue_rdh
/** * Loads a queue from a queue element in the configuration file. */ private void loadQueue(String parentName, Element element, QueueProperties.Builder builder) throws AllocationConfigurationException { String queueName = FairSchedulerUtilities.trimQueueName(element.getAttribute("name"));if (queueName.contains(".")) { throw new AllocationConfigurationException((("Bad fair scheduler config " + "file: queue name (") + queueName) + ") shouldn't contain period."); } if (queueName.isEmpty()) { throw new AllocationConfigurationException(("Bad fair scheduler config " + "file: queue name shouldn't be empty or ") + "consist only of whitespace."); } if (parentName != null) { queueName = (parentName + ".") + queueName; } NodeList v4 = element.getChildNodes(); boolean isLeaf = true; boolean isReservable = false; boolean isMaxAMShareSet = false; for (int j = 0; j < v4.getLength(); j++) { Node fieldNode = v4.item(j); if (!(fieldNode instanceof Element)) { continue;} Element field = ((Element) (fieldNode)); if (MIN_RESOURCES.equals(field.getTagName())) { String text = getTrimmedTextData(field); ConfigurableResource val = FairSchedulerConfiguration.parseResourceConfigValue(text, 0L); builder.minQueueResources(queueName, val.getResource()); } else if (MAX_RESOURCES.equals(field.getTagName())) { String text = getTrimmedTextData(field); ConfigurableResource val = FairSchedulerConfiguration.parseResourceConfigValue(text); builder.maxQueueResources(queueName, val); } else if (MAX_CHILD_RESOURCES.equals(field.getTagName())) { String text = getTrimmedTextData(field); ConfigurableResource val = FairSchedulerConfiguration.parseResourceConfigValue(text); builder.maxChildQueueResources(queueName, val); } else if (MAX_RUNNING_APPS.equals(field.getTagName())) { String text = getTrimmedTextData(field); int val = Integer.parseInt(text); builder.queueMaxApps(queueName, val); } else if (MAX_AMSHARE.equals(field.getTagName())) { String text = getTrimmedTextData(field); float val = Float.parseFloat(text); val = Math.min(val, 1.0F); builder.queueMaxAMShares(queueName, val); isMaxAMShareSet = true; } else if (MAX_CONTAINER_ALLOCATION.equals(field.getTagName())) { String text = getTrimmedTextData(field); ConfigurableResource val = FairSchedulerConfiguration.parseResourceConfigValue(text); builder.queueMaxContainerAllocation(queueName, val.getResource()); } else if (WEIGHT.equals(field.getTagName())) { String text = getTrimmedTextData(field); double val = Double.parseDouble(text); builder.queueWeights(queueName, ((float) (val))); } else if (MIN_SHARE_PREEMPTION_TIMEOUT.equals(field.getTagName())) { String v25 = getTrimmedTextData(field); long val = Long.parseLong(v25) * 1000L; builder.minSharePreemptionTimeouts(queueName, val); } else if (FAIR_SHARE_PREEMPTION_TIMEOUT.equals(field.getTagName())) {String text = getTrimmedTextData(field); long val = Long.parseLong(text) * 1000L; builder.fairSharePreemptionTimeouts(queueName, val); } else if (FAIR_SHARE_PREEMPTION_THRESHOLD.equals(field.getTagName())) { String text = getTrimmedTextData(field); float val = Float.parseFloat(text); val = Math.max(Math.min(val, 1.0F), 0.0F); builder.fairSharePreemptionThresholds(queueName, val); } else if (SCHEDULING_POLICY.equals(field.getTagName()) || SCHEDULING_MODE.equals(field.getTagName())) { String text = getTrimmedTextData(field); SchedulingPolicy policy = SchedulingPolicy.parse(text); builder.queuePolicies(queueName, policy); } else if (ACL_SUBMIT_APPS.equals(field.getTagName())) { String text = ((Text) (field.getFirstChild())).getData(); builder.queueAcls(queueName, AccessType.SUBMIT_APP, new AccessControlList(text)); } else if (ACL_ADMINISTER_APPS.equals(field.getTagName())) { String text = ((Text) (field.getFirstChild())).getData(); builder.queueAcls(queueName, AccessType.ADMINISTER_QUEUE, new AccessControlList(text)); } else if (ACL_ADMINISTER_RESERVATIONS.equals(field.getTagName())) { String text = ((Text) (field.getFirstChild())).getData(); builder.reservationAcls(queueName, ReservationACL.ADMINISTER_RESERVATIONS, new AccessControlList(text)); } else if (ACL_LIST_RESERVATIONS.equals(field.getTagName())) { String text = ((Text) (field.getFirstChild())).getData();builder.reservationAcls(queueName, ReservationACL.LIST_RESERVATIONS, new AccessControlList(text)); } else if (ACL_SUBMIT_RESERVATIONS.equals(field.getTagName())) { String text = ((Text) (field.getFirstChild())).getData(); builder.reservationAcls(queueName, ReservationACL.SUBMIT_RESERVATIONS, new AccessControlList(text)); } else if (RESERVATION.equals(field.getTagName())) { isReservable = true; builder.reservableQueues(queueName); builder.configuredQueues(FSQueueType.PARENT, queueName); } else if (ALLOW_PREEMPTION_FROM.equals(field.getTagName())) { String text = getTrimmedTextData(field); if (!Boolean.parseBoolean(text)) { builder.nonPreemptableQueues(queueName); } } else if (QUEUE.endsWith(field.getTagName()) || POOL.equals(field.getTagName())) { loadQueue(queueName, field, builder); isLeaf = false; } } // if a leaf in the alloc file is marked as type='parent' // then store it as a parent queue if (isLeaf && (!"parent".equals(element.getAttribute("type")))) { // reservable queue has been already configured as parent if (!isReservable) { builder.configuredQueues(FSQueueType.LEAF, queueName); } } else { if (isReservable) { throw new AllocationConfigurationException(getErrorString(queueName, RESERVATION)); } else if (isMaxAMShareSet) { throw new AllocationConfigurationException(getErrorString(queueName, MAX_AMSHARE)); } builder.configuredQueues(FSQueueType.PARENT, queueName); } // Set default acls if not defined // The root queue defaults to all access for (QueueACL acl : QueueACL.values()) { AccessType accessType = SchedulerUtils.toAccessType(acl); if (!builder.isAclDefinedForAccessType(queueName, accessType)) { AccessControlList defaultAcl = (queueName.equals(ROOT)) ? EVERYBODY_ACL : NOBODY_ACL; builder.queueAcls(queueName, accessType, defaultAcl); } } checkMinAndMaxResource(builder.getMinQueueResources(), builder.getMaxQueueResources(), queueName); }
3.26
hadoop_AllocationFileQueueParser_getErrorString_rdh
/** * Set up the error string based on the supplied parent queueName and element. * * @param parentQueueName * the parent queue name. * @param element * the element that should not be present for the parent queue. * @return the error string. */private String getErrorString(String parentQueueName, String element) { return (((((("The configuration settings" + " for ") + parentQueueName) + " are invalid. A queue element that ") + "contains child queue elements or that has the type='parent' ") + "attribute cannot also include a ") + element) + " element."; }
3.26
hadoop_AllocationFileQueueParser_parse_rdh
// Load queue elements. A root queue can either be included or omitted. If // it's included, all other queues must be inside it. public QueueProperties parse() throws AllocationConfigurationException {QueueProperties.Builder queuePropertiesBuilder = new QueueProperties.Builder(); for (Element element : elements) { String parent = ROOT; if (element.getAttribute("name").equalsIgnoreCase(ROOT)) { if (elements.size() > 1) { throw new AllocationConfigurationException("If configuring root queue," + " no other queues can be placed alongside it."); } parent = null; } loadQueue(parent, element, queuePropertiesBuilder); } return queuePropertiesBuilder.build(); }
3.26
hadoop_HdfsDtFetcher_getServiceName_rdh
/** * Returns the service name for HDFS, which is also a valid URL prefix. */ public Text getServiceName() { return new Text(SERVICE_NAME); }
3.26
hadoop_HdfsDtFetcher_addDelegationTokens_rdh
/** * Returns Token object via FileSystem, null if bad argument. * * @param conf * - a Configuration object used with FileSystem.get() * @param creds * - a Credentials object to which token(s) will be added * @param renewer * - the renewer to send with the token request * @param url * - the URL to which the request is sent * @return a Token, or null if fetch fails. */ public Token<?> addDelegationTokens(Configuration conf, Credentials creds, String renewer, String url) throws Exception { if (!url.startsWith(getServiceName().toString())) { url = (getServiceName().toString() + "://") + url; } FileSystem fs = FileSystem.get(URI.create(url), conf); Token<?> token = fs.getDelegationToken(renewer); if (token == null) { LOG.error(FETCH_FAILED); throw new IOException(FETCH_FAILED); } creds.addToken(token.getService(), token); return token; }
3.26
hadoop_ContainerInfo_getAllocatedResources_rdh
/** * Return a map of the allocated resources. The map key is the resource name, * and the value is the resource value. * * @return the allocated resources map */ public Map<String, Long> getAllocatedResources() { return Collections.unmodifiableMap(allocatedResources); }
3.26
hadoop_DocumentStoreTimelineReaderImpl_applyFilters_rdh
// for honoring all filters from {@link TimelineEntityFilters} private Set<TimelineEntity> applyFilters(TimelineEntityFilters filters, TimelineDataToRetrieve dataToRetrieve, List<TimelineEntityDocument> entityDocs) throws IOException { Set<TimelineEntity> timelineEntities = new HashSet<>(); for (TimelineEntityDocument entityDoc : entityDocs) { final TimelineEntity timelineEntity = entityDoc.fetchTimelineEntity(); if (DocumentStoreUtils.isFilterNotMatching(filters, timelineEntity)) { continue; } TimelineEntity entityToBeReturned = DocumentStoreUtils.createEntityToBeReturned(entityDoc, dataToRetrieve); timelineEntities.add(entityToBeReturned);} return timelineEntities; }
3.26
hadoop_NMStateStoreService_serviceStart_rdh
/** * Start the state storage for use */ @Override public void serviceStart() throws IOException { startStorage(); }
3.26
hadoop_NMStateStoreService_serviceInit_rdh
/** * Initialize the state storage */ @Override public void serviceInit(Configuration conf) throws IOException { initStorage(conf); }
3.26
hadoop_NMStateStoreService_serviceStop_rdh
/** * Shutdown the state storage. */ @Override public void serviceStop() throws IOException { closeStorage(); }
3.26
hadoop_NMStateStoreService_releaseAssignedResources_rdh
/** * Delete the assigned resources of a container of specific resourceType. * * @param containerId * Container Id * @param resourceType * resource Type * @throws IOException * while releasing resources */ public void releaseAssignedResources(ContainerId containerId, String resourceType) throws IOException { }
3.26
hadoop_HadoopUncaughtExceptionHandler_uncaughtException_rdh
/** * Uncaught exception handler. * If an error is raised: shutdown * The state of the system is unknown at this point -attempting * a clean shutdown is dangerous. Instead: exit * * @param thread * thread that failed * @param exception * the raised exception */ @Override public void uncaughtException(Thread thread, Throwable exception) { if (ShutdownHookManager.get().isShutdownInProgress()) { LOG.error("Thread {} threw an error during shutdown: {}.", thread.toString(), exception, exception); } else if (exception instanceof Error) { try { LOG.error("Thread {} threw an error: {}. Shutting down", thread.toString(), exception, exception); } catch (Throwable err) { // We don't want to not exit because of an issue with logging } if (exception instanceof OutOfMemoryError) { // After catching an OOM java says it is undefined behavior, so don't // even try to clean up or we can get stuck on shutdown. try { System.err.println("Halting due to Out Of Memory Error..."); } catch (Throwable err) { // Again we don't want to exit because of logging issues. } ExitUtil.haltOnOutOfMemory(((OutOfMemoryError) (exception))); } else { // error other than OutOfMemory ExitUtil.ExitException ee = ServiceLauncher.convertToExitException(exception); ExitUtil.terminate(ee.status, ee); } } else { // simple exception in a thread. There's a policy decision here: // terminate the process vs. keep going after a thread has failed // base implementation: do nothing but log LOG.error("Thread {} threw an exception: {}", thread.toString(), exception, exception); if (delegate != null) { delegate.uncaughtException(thread, exception); } } }
3.26
hadoop_AzureADAuthenticator_getHttpErrorCode_rdh
/** * Gets Http error status code. * * @return http error code. */ public int getHttpErrorCode() {return this.httpErrorCode; }
3.26
hadoop_AzureADAuthenticator_getTokenUsingRefreshToken_rdh
/** * Gets Azure Active Directory token using refresh token. * * @param authEndpoint * the OAuth 2.0 token endpoint associated * with the user's directory (obtain from * Active Directory configuration) * @param clientId * the client ID (GUID) of the client web app obtained from Azure Active Directory configuration * @param refreshToken * the refresh token * @return {@link AzureADToken} obtained using the refresh token * @throws IOException * throws IOException if there is a failure in connecting to Azure AD */ public static AzureADToken getTokenUsingRefreshToken(final String authEndpoint, final String clientId, final String refreshToken) throws IOException { QueryParams qp = new QueryParams(); qp.add("grant_type", "refresh_token"); qp.add("refresh_token", refreshToken); if (clientId != null) {qp.add("client_id", clientId); } LOG.debug("AADToken: starting to fetch token using refresh token for client ID " + clientId); return getTokenCall(authEndpoint, qp.serialize(), null, null); }
3.26
hadoop_AzureADAuthenticator_getTokenUsingClientCreds_rdh
/** * gets Azure Active Directory token using the user ID and password of * a service principal (that is, Web App in Azure Active Directory). * * Azure Active Directory allows users to set up a web app as a * service principal. Users can optionally obtain service principal keys * from AAD. This method gets a token using a service principal's client ID * and keys. In addition, it needs the token endpoint associated with the * user's directory. * * @param authEndpoint * the OAuth 2.0 token endpoint associated * with the user's directory (obtain from * Active Directory configuration) * @param clientId * the client ID (GUID) of the client web app * btained from Azure Active Directory configuration * @param clientSecret * the secret key of the client web app * @return {@link AzureADToken} obtained using the creds * @throws IOException * throws IOException if there is a failure in connecting to Azure AD */ public static AzureADToken getTokenUsingClientCreds(String authEndpoint, String clientId, String clientSecret) throws IOException { Preconditions.checkNotNull(authEndpoint, "authEndpoint"); Preconditions.checkNotNull(clientId, "clientId"); Preconditions.checkNotNull(clientSecret, "clientSecret"); boolean isVersion2AuthenticationEndpoint = authEndpoint.contains("/oauth2/v2.0/"); QueryParams qp = new QueryParams(); if (isVersion2AuthenticationEndpoint) { qp.add("scope", SCOPE); } else { qp.add("resource", RESOURCE_NAME);} qp.add("grant_type", "client_credentials"); qp.add("client_id", clientId); qp.add("client_secret", clientSecret);LOG.debug("AADToken: starting to fetch token using client creds for client ID " + clientId); return getTokenCall(authEndpoint, qp.serialize(), null, null); }
3.26
hadoop_AzureADAuthenticator_getTokenFromMsi_rdh
/** * Gets AAD token from the local virtual machine's VM extension. This only works on * an Azure VM with MSI extension * enabled. * * @param authEndpoint * the OAuth 2.0 token endpoint associated * with the user's directory (obtain from * Active Directory configuration) * @param tenantGuid * (optional) The guid of the AAD tenant. Can be {@code null}. * @param clientId * (optional) The clientId guid of the MSI service * principal to use. Can be {@code null}. * @param bypassCache * {@code boolean} specifying whether a cached token is acceptable or a fresh token * request should me made to AAD * @return {@link AzureADToken} obtained using the creds * @throws IOException * throws IOException if there is a failure in obtaining the token */ public static AzureADToken getTokenFromMsi(final String authEndpoint, final String tenantGuid, final String clientId, String authority, boolean bypassCache) throws IOException { QueryParams qp = new QueryParams();qp.add("api-version", "2018-02-01"); qp.add("resource", RESOURCE_NAME); if ((tenantGuid != null) && (tenantGuid.length() > 0)) { authority = authority + tenantGuid;LOG.debug("MSI authority : {}", authority); qp.add("authority", authority); } if ((clientId != null) && (clientId.length() > 0)) { qp.add("client_id", clientId); } if (bypassCache) { qp.add("bypass_cache", "true"); } Hashtable<String, String> headers = new Hashtable<>(); headers.put("Metadata", "true"); LOG.debug("AADToken: starting to fetch token using MSI"); return getTokenCall(authEndpoint, qp.serialize(), headers, "GET", true); }
3.26
hadoop_AzureADAuthenticator_getRequestId_rdh
/** * Gets http request id . * * @return http request id. */ public String getRequestId() { return this.requestId; }
3.26
hadoop_DataNodeFaultInjector_interceptBlockReader_rdh
/** * Used as a hook to inject intercept When finish reading from block. */ public void interceptBlockReader() { }
3.26
hadoop_DataNodeFaultInjector_delayAckLastPacket_rdh
/** * Used as a hook to delay sending the response of the last packet. */ public void delayAckLastPacket() throws IOException { }
3.26
hadoop_DataNodeFaultInjector_logDelaySendingPacketDownstream_rdh
/** * Used as a hook to intercept the latency of sending packet. */ public void logDelaySendingPacketDownstream(final String mirrAddr, final long delayMs) throws IOException { }
3.26
hadoop_DataNodeFaultInjector_interceptFreeBlockReaderBuffer_rdh
/** * Used as a hook to inject intercept when free the block reader buffer. */ public void interceptFreeBlockReaderBuffer() { }
3.26
hadoop_DataNodeFaultInjector_delayDeleteReplica_rdh
/** * Just delay delete replica a while. */ public void delayDeleteReplica() { }
3.26
hadoop_DataNodeFaultInjector_delayWriteToOsCache_rdh
/** * Used as a hook to delay writing a packet to os cache. */ public void delayWriteToOsCache() { }
3.26
hadoop_DataNodeFaultInjector_delayWriteToDisk_rdh
/** * Used as a hook to delay writing a packet to disk. */ public void delayWriteToDisk() { }
3.26
hadoop_DataNodeFaultInjector_delayWhenOfferServiceHoldLock_rdh
/** * Used as a hook to inject intercept when BPOfferService hold lock. */ public void delayWhenOfferServiceHoldLock() { }
3.26
hadoop_DataNodeFaultInjector_stripedBlockReconstruction_rdh
/** * Used as a hook to inject failure in erasure coding reconstruction * process. */ public void stripedBlockReconstruction() throws IOException { }
3.26
hadoop_DataNodeFaultInjector_badDecoding_rdh
/** * Used as a hook to inject data pollution * into an erasure coding reconstruction. */ public void badDecoding(ByteBuffer[] outputs) { }
3.26
hadoop_DataNodeFaultInjector_logDelaySendingAckToUpstream_rdh
/** * Used as a hook to intercept the latency of sending ack. */ public void logDelaySendingAckToUpstream(final String upstreamAddr, final long delayMs) throws IOException { }
3.26
hadoop_DataNodeFaultInjector_blockUtilSendFullBlockReport_rdh
/** * Used as a hook to inject intercept when re-register. */ public void blockUtilSendFullBlockReport() { }
3.26
hadoop_DataNodeFaultInjector_stripedBlockChecksumReconstruction_rdh
/** * Used as a hook to inject failure in erasure coding checksum reconstruction * process. */ public void stripedBlockChecksumReconstruction() throws IOException { }
3.26
hadoop_DataNodeFaultInjector_delay_rdh
/** * Just delay a while. */ public void delay() { }
3.26
hadoop_DataNodeFaultInjector_delayBlockReader_rdh
/** * Used as a hook to inject latency when read block * in erasure coding reconstruction process. */ public void delayBlockReader() { }
3.26
hadoop_WebPageUtils_appendToolSection_rdh
/** * Creates the tool section after a closed section. If it is not enabled, * the section is created without any links. * * @param section * a closed HTML div section * @param conf * configuration object * @return the tool section, if it is enabled, null otherwise */ public static Hamlet.UL<Hamlet.DIV<Hamlet>> appendToolSection(Hamlet.DIV<Hamlet> section, Configuration conf) { boolean isToolsEnabled = conf.getBoolean(YarnConfiguration.YARN_WEBAPP_UI1_ENABLE_TOOLS, true); Hamlet.DIV<Hamlet> tools = null; Hamlet.UL<Hamlet.DIV<Hamlet>> enabledTools = null; if (isToolsEnabled) { tools = section.h3("Tools"); enabledTools = tools.ul().li().a("/conf", "Configuration").__().li().a("/logs", "Local logs").__().li().a("/stacks", "Server stacks").__().li().a("/jmx?qry=Hadoop:*", "Server metrics").__(); } else { section.h4("Tools (DISABLED)").__(); } return enabledTools; }
3.26
hadoop_WorkRequest_getRetry_rdh
/** * * @return Number of previous attempts to process this work request. */ public int getRetry() { return retry; }
3.26
hadoop_WordMedian_map_rdh
/** * Emits a key-value pair for counting the word. Outputs are (IntWritable, * IntWritable). * * @param value * This will be a line of text coming in from our input file. */ public void map(Object key, Text value, Context context) throws IOException, InterruptedException { StringTokenizer v0 = new StringTokenizer(value.toString()); while (v0.hasMoreTokens()) { String string = v0.nextToken(); length.set(string.length()); context.write(length, ONE); } }
3.26