name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hadoop_AbstractDelegationTokenBinding_getDescription_rdh | /**
* Return a description.
* This is logged during after service start and binding:
* it should be as informative as possible.
*
* @return a description to log.
*/
public String getDescription() {
return "Token binding " + getKind().toString();
} | 3.26 |
hadoop_AbstractDelegationTokenBinding_createSecretMananger_rdh | /**
* Create a secret manager.
*
* @return a secret manager.
* @throws IOException
* on failure
*/
protected SecretManager<AbstractS3ATokenIdentifier> createSecretMananger() throws IOException {
return new TokenSecretManager();
} | 3.26 |
hadoop_AbstractDelegationTokenBinding_getSecretManagerPasssword_rdh | /**
* Get the password to use in secret managers.
* This is a constant; its just recalculated every time to stop findbugs
* highlighting security risks of shared mutable byte arrays.
*
* @return a password.
*/
protected static byte[] getSecretManagerPasssword() {
return "non-password".getBytes(StandardCharsets.UTF_8);} | 3.26 |
hadoop_AbstractDelegationTokenBinding_deploy_rdh | /**
* Deploy, returning the binding information.
* The base implementation calls
*
* @param retrievedIdentifier
* any identifier -null if deployed unbonded.
* @return binding information
* @throws IOException
* any failure.
*/
public DelegationBindingInfo deploy(AbstractS3ATokenIdentifier retrievedIdentifier) throws IOException
{
requireServiceStarted();
AWSCredentialProviderList credentialProviders = (retrievedIdentifier == null) ? deployUnbonded() : bindToTokenIdentifier(retrievedIdentifier);
return new DelegationBindingInfo().withCredentialProviders(credentialProviders);
} | 3.26 |
hadoop_AbstractDelegationTokenBinding_getUserAgentField_rdh | /**
* Return a string for use in building up the User-Agent field, so
* get into the S3 access logs. Useful for diagnostics.
*
* @return a string for the S3 logs or "" for "nothing to add"
*/
public String getUserAgentField() {
return "";
} | 3.26 |
hadoop_AbstractDelegationTokenBinding_serviceStart_rdh | /**
* Service startup: create the secret manager.
*
* @throws Exception
* failure.
*/
@Override
protected void serviceStart() throws Exception {
super.serviceStart();
secretManager = createSecretMananger();
} | 3.26 |
hadoop_AbstractDelegationTokenBinding_getTokenIssuingPolicy_rdh | /**
* Predicate: will this binding issue a DT?
* That is: should the filesystem declare that it is issuing
* delegation tokens? If true
*
* @return a declaration of what will happen when asked for a token.
*/
public TokenIssuingPolicy getTokenIssuingPolicy() {
return TokenIssuingPolicy.RequestNewToken;
} | 3.26 |
hadoop_AbstractDelegationTokenBinding_getKind_rdh | /**
* Get the kind of the tokens managed here.
*
* @return the token kind.
*/
public Text getKind() {
return kind;
} | 3.26 |
hadoop_AbstractDelegationTokenBinding_convertTokenIdentifier_rdh | /**
* Verify that a token identifier is of a specific class.
* This will reject subclasses (i.e. it is stricter than
* {@code instanceof}, then cast it to that type.
*
* @param <T>
* type of S3A delegation ttoken identifier.
* @param identifier
* identifier to validate
* @param expectedClass
* class of the expected token identifier.
* @return token identifier.
* @throws DelegationTokenIOException
* If the wrong class was found.
*/
protected <T extends AbstractS3ATokenIdentifier> T convertTokenIdentifier(final AbstractS3ATokenIdentifier identifier, final Class<T> expectedClass) throws DelegationTokenIOException {
if (!identifier.getClass().equals(expectedClass)) {
throw new DelegationTokenIOException((((((DelegationTokenIOException.TOKEN_WRONG_CLASS + "; expected a token identifier of type ") + expectedClass) + " but got ") + identifier.getClass()) + " and kind ") + identifier.getKind());
}
return ((T) (identifier));
} | 3.26 |
hadoop_AbstractDelegationTokenBinding_createDelegationToken_rdh | /**
* Create a delegation token for the user.
* This will only be called if a new DT is needed, that is: the
* filesystem has been deployed unbonded.
*
* @param policy
* minimum policy to use, if known.
* @param encryptionSecrets
* encryption secrets for the token.
* @param renewer
* the principal permitted to renew the token.
* @return the token or null if the back end does not want to issue one.
* @throws IOException
* if one cannot be created
*/
public Token<AbstractS3ATokenIdentifier> createDelegationToken(final Optional<RoleModel.Policy>
policy, final EncryptionSecrets encryptionSecrets, final Text renewer) throws IOException {
requireServiceStarted();
final AbstractS3ATokenIdentifier v0 = createTokenIdentifier(policy, encryptionSecrets, renewer);
if (v0 != null) {
Token<AbstractS3ATokenIdentifier> token = new Token<>(v0, secretManager);
token.setKind(getKind());
LOG.debug("Created token {} with token identifier {}", token, v0);
return token;
} else {
return null;
}
} | 3.26 |
hadoop_AbstractDelegationTokenBinding_getOwnerText_rdh | /**
* Return the name of the owner to be used in tokens.
* This may be that of the UGI owner, or it could be related to
* the AWS login.
*
* @return a text name of the owner.
*/
public Text getOwnerText() {
return new Text(getOwner().getUserName());
} | 3.26 |
hadoop_NMContainerTokenSecretManager_startContainerSuccessful_rdh | /**
* Container start has gone through. We need to store the containerId in order
* to block future container start requests with same container token. This
* container token needs to be saved till its container token expires.
*/
public synchronized void startContainerSuccessful(ContainerTokenIdentifier tokenId) {
removeAnyContainerTokenIfExpired();
ContainerId containerId = tokenId.getContainerID();
Long v10 = tokenId.getExpiryTimeStamp();
// We might have multiple containers with same expiration time.
if (!recentlyStartedContainerTracker.containsKey(v10)) {
recentlyStartedContainerTracker.put(v10, new ArrayList<ContainerId>());
}
recentlyStartedContainerTracker.get(v10).add(containerId);
try {
stateStore.storeContainerToken(containerId, v10);
} catch (IOException e) {
LOG.error("Unable to store token for container " + containerId, e);
}
} | 3.26 |
hadoop_NMContainerTokenSecretManager_retrievePassword_rdh | /**
* Override of this is to validate ContainerTokens generated by using
* different {@link MasterKey}s.
*/
@Override
public synchronized byte[] retrievePassword(ContainerTokenIdentifier identifier) throws InvalidToken {
int keyId = identifier.getMasterKeyId();
MasterKeyData masterKeyToUse = null;
if ((this.previousMasterKey != null) && (keyId == this.previousMasterKey.getMasterKey().getKeyId())) {
// A container-launch has come in with a token generated off the last
// master-key
masterKeyToUse = this.previousMasterKey;
} else if (keyId == super.currentMasterKey.getMasterKey().getKeyId()) {
// A container-launch has come in with a token generated off the current
// master-key
masterKeyToUse = super.currentMasterKey;
}
if ((nodeHostAddr != null) && (!identifier.getNmHostAddress().equals(nodeHostAddr))) {
// Valid container token used for incorrect node.
throw new SecretManager.InvalidToken((((("Given Container " + identifier.getContainerID().toString()) + " identifier is not valid for current Node manager. Expected : ") + nodeHostAddr) + " Found : ") + identifier.getNmHostAddress());
}
if (masterKeyToUse != null) {
return retrievePasswordInternal(identifier, masterKeyToUse);
}
// Invalid request. Like startContainer() with token generated off
// old-master-keys.
throw new SecretManager.InvalidToken(("Given Container " + identifier.getContainerID().toString()) + " seems to have an illegally generated token.");
} | 3.26 |
hadoop_NMContainerTokenSecretManager_isValidStartContainerRequest_rdh | /**
* Container will be remembered based on expiration time of the container
* token used for starting the container. It is safe to use expiration time
* as there is one to many mapping between expiration time and containerId.
*
* @return true if the current token identifier is not present in cache.
*/
public synchronized boolean isValidStartContainerRequest(ContainerTokenIdentifier containerTokenIdentifier) {
removeAnyContainerTokenIfExpired();
Long expTime = containerTokenIdentifier.getExpiryTimeStamp();
List<ContainerId> containers = this.recentlyStartedContainerTracker.get(expTime);
if ((containers == null) || (!containers.contains(containerTokenIdentifier.getContainerID()))) {
return true;
} else {return false;
}
} | 3.26 |
hadoop_NMContainerTokenSecretManager_setMasterKey_rdh | /**
* Used by NodeManagers to create a token-secret-manager with the key obtained
* from the RM. This can happen during registration or when the RM rolls the
* master-key and signals the NM.
*
* @param masterKeyRecord
*/
@Private
public synchronized void
setMasterKey(MasterKey masterKeyRecord) {
// Update keys only if the key has changed.
if ((super.currentMasterKey == null) || (super.currentMasterKey.getMasterKey().getKeyId() != masterKeyRecord.getKeyId())) {
LOG.info("Rolling master-key for container-tokens, got key with id " + masterKeyRecord.getKeyId());
if (super.currentMasterKey != null) {
updatePreviousMasterKey(super.currentMasterKey);
}
updateCurrentMasterKey(new MasterKeyData(masterKeyRecord, createSecretKey(masterKeyRecord.getBytes().array())));
}
} | 3.26 |
hadoop_DefaultLCEResourcesHandler_preExecute_rdh | /* LCE Resources Handler interface */
public void preExecute(ContainerId containerId, Resource
containerResource) {
} | 3.26 |
hadoop_AbstractTracking_copy_rdh | /**
* Subclass instances may call this method during cloning to copy the values of
* all properties stored in this base class.
*
* @param dest
* AbstractTracking destination for copying properties
*/
protected void copy(AbstractTracking dest) {
dest.beginTime = beginTime;
dest.endTime
= endTime;
} | 3.26 |
hadoop_KeyProviderCache_invalidateCache_rdh | /**
* Invalidate cache. KeyProviders in the cache will be closed by cache hook.
*/
@VisibleForTesting
synchronized void invalidateCache() {
LOG.debug("Invalidating all cached KeyProviders.");
if (cache != null) {
cache.invalidateAll();
}
} | 3.26 |
hadoop_DatanodeLocalInfo_getUptime_rdh | /**
* get uptime
*/
public long getUptime() {
return this.uptime;
} | 3.26 |
hadoop_DatanodeLocalInfo_getConfigVersion_rdh | /**
* get config version
*/
public String getConfigVersion() {
return this.configVersion;
} | 3.26 |
hadoop_DatanodeLocalInfo_getSoftwareVersion_rdh | /**
* get software version
*/
public String getSoftwareVersion() {
return this.softwareVersion;
} | 3.26 |
hadoop_DatanodeLocalInfo_getDatanodeLocalReport_rdh | /**
* A formatted string for printing the status of the DataNode.
*/
public String getDatanodeLocalReport() {
return (((("Uptime: " + getUptime()) + ", Software version: ") + getSoftwareVersion()) + ", Config version: ") + getConfigVersion();
} | 3.26 |
hadoop_GetApplicationsRequest_newInstance_rdh | /**
* <p>
* The request from clients to get a report of Applications matching the
* giving and application types and application states in the cluster from the
* <code>ResourceManager</code>.
* </p>
*
* @see ApplicationClientProtocol#getApplications(GetApplicationsRequest)
* @param applicationStates
* application states.
* @param applicationTypes
* application types.
* @return a report of Applications in <code>GetApplicationsRequest</code>
*/
@Public
@Stable
public static GetApplicationsRequest newInstance(Set<String> applicationTypes, EnumSet<YarnApplicationState> applicationStates) {
GetApplicationsRequest request = Records.newRecord(GetApplicationsRequest.class);
request.setApplicationTypes(applicationTypes);
request.setApplicationStates(applicationStates);return request;
} | 3.26 |
hadoop_AccessTokenTimer_setExpiresIn_rdh | /**
* Set when the access token will expire as reported by the oauth server,
* ie in seconds from now.
*
* @param expiresIn
* Access time expiration as reported by OAuth server
*/
public void setExpiresIn(String expiresIn) {
this.nextRefreshMSSinceEpoch = convertExpiresIn(timer, expiresIn);
} | 3.26 |
hadoop_AccessTokenTimer_getNextRefreshMSSinceEpoch_rdh | /**
* Get next time we should refresh the token.
*
* @return Next time since epoch we'll need to refresh the token.
*/
public long getNextRefreshMSSinceEpoch() {
return nextRefreshMSSinceEpoch;
} | 3.26 |
hadoop_AccessTokenTimer_convertExpiresIn_rdh | /**
* The expires_in param from OAuth is in seconds-from-now. Convert to
* milliseconds-from-epoch
*/
static Long convertExpiresIn(Timer timer, String expiresInSecs) {
long expiresSecs = Long.parseLong(expiresInSecs);long expiresMs = expiresSecs * 1000;
return timer.now() + expiresMs;
} | 3.26 |
hadoop_AccessTokenTimer_shouldRefresh_rdh | /**
* Return true if the current token has expired or will expire within the
* EXPIRE_BUFFER_MS (to give ample wiggle room for the call to be made to
* the server).
*/
public boolean shouldRefresh() {
long lowerLimit = nextRefreshMSSinceEpoch - EXPIRE_BUFFER_MS;
long currTime = timer.now();
return currTime > lowerLimit;
} | 3.26 |
hadoop_AccessTokenTimer_setExpiresInMSSinceEpoch_rdh | /**
* Set when the access token will expire in milliseconds from epoch,
* as required by the WebHDFS configuration. This is a bit hacky and lame.
*
* @param expiresInMSSinceEpoch
* Access time expiration in ms since epoch.
*/
public void setExpiresInMSSinceEpoch(String expiresInMSSinceEpoch) {
this.nextRefreshMSSinceEpoch = Long.parseLong(expiresInMSSinceEpoch);
} | 3.26 |
hadoop_SyncableDataOutputStream_getOutStream_rdh | /**
* Get a reference to the wrapped output stream.
*
* @return the underlying output stream
*/
@InterfaceAudience.LimitedPrivate({ "HDFS" })
public OutputStream getOutStream() {
return out;
} | 3.26 |
hadoop_Hash_getInstance_rdh | /**
* Get a singleton instance of hash function of a type
* defined in the configuration.
*
* @param conf
* current configuration
* @return defined hash type, or null if type is invalid
*/
public static Hash getInstance(Configuration conf) {
int type = getHashType(conf);
return getInstance(type);
} | 3.26 |
hadoop_Hash_hash_rdh | /**
* Calculate a hash using all bytes from the input argument,
* and a provided seed value.
*
* @param bytes
* input bytes
* @param initval
* seed value
* @return hash value
*/
public int hash(byte[] bytes, int initval) {
return hash(bytes, bytes.length, initval);} | 3.26 |
hadoop_Hash_getHashType_rdh | /**
* This utility method converts the name of the configured
* hash type to a symbolic constant.
*
* @param conf
* configuration
* @return one of the predefined constants
*/
public static int getHashType(Configuration conf) {
String name = conf.get(HADOOP_UTIL_HASH_TYPE_KEY, HADOOP_UTIL_HASH_TYPE_DEFAULT);
return parseHashType(name);
} | 3.26 |
hadoop_Hash_parseHashType_rdh | /**
* This utility method converts String representation of hash function name
* to a symbolic constant. Currently two function types are supported,
* "jenkins" and "murmur".
*
* @param name
* hash function name
* @return one of the predefined constants
*/
public static int parseHashType(String name) {
if ("jenkins".equalsIgnoreCase(name)) {return JENKINS_HASH;
} else if ("murmur".equalsIgnoreCase(name)) {
return MURMUR_HASH;
} else {
return INVALID_HASH;
}
} | 3.26 |
hadoop_DateTimeUtils_m0_rdh | /**
* Tries to identify if an operation was recently executed based on the LMT of
* a file or folder. LMT needs to be more recent that the original request
* start time. To include any clock skew with server, LMT within
* DEFAULT_CLOCK_SKEW_WITH_SERVER_IN_MS from the request start time is going
* to be considered to qualify for recent operation.
*
* @param lastModifiedTime
* File/Folder LMT
* @param expectedLMTUpdateTime
* original request timestamp which should
* have updated the LMT on target
* @return true if the LMT is within timespan for recent operation, else false
*/
public static boolean m0(final String lastModifiedTime, final Instant expectedLMTUpdateTime) {
long lmtEpochTime = DateTimeUtils.parseLastModifiedTime(lastModifiedTime);
long currentEpochTime = expectedLMTUpdateTime.toEpochMilli();
return (lmtEpochTime > currentEpochTime) || ((currentEpochTime - lmtEpochTime) <= DEFAULT_CLOCK_SKEW_WITH_SERVER_IN_MS);
} | 3.26 |
hadoop_SimpleTcpServer_getBoundPort_rdh | // boundPort will be set only after server starts
public int getBoundPort() {
return this.boundPort;
} | 3.26 |
hadoop_FederationProtocolPBTranslator_readInstance_rdh | /**
* Read instance from base64 data.
*
* @param base64String
* String containing Base64 data.
* @throws IOException
* If the protobuf message build fails.
*/
@SuppressWarnings("unchecked")
public void readInstance(String base64String) throws IOException {
byte[] bytes = Base64.decodeBase64(base64String);
Message msg = getBuilder().mergeFrom(bytes).build();
this.proto = ((P) (msg));
} | 3.26 |
hadoop_FederationProtocolPBTranslator_getBuilder_rdh | /**
* Create or return the cached protobuf builder for this translator.
*
* @return cached Builder instance
*/
@SuppressWarnings("unchecked")
public B getBuilder() {
if (this.builder == null) {
try {
Method method = protoClass.getMethod("newBuilder");
this.builder = ((B) (method.invoke(null)));
if (this.proto != null) {
// Merge in existing immutable proto
this.builder.mergeFrom(this.proto);
}
} catch (ReflectiveOperationException e) {
this.builder = null;
}
}
return this.builder;
} | 3.26 |
hadoop_FederationProtocolPBTranslator_build_rdh | /**
* Get the serialized proto object. If the translator was created from a byte
* stream, returns the initial byte stream. Otherwise, creates a new byte
* stream from the cached builder.
*
* @return Protobuf message object
*/
@SuppressWarnings("unchecked")
public P build() {
if (this.builder != null) {
// serialize from builder (mutable) first
Message m = this.builder.build();
return
((P) (m));
} else if (this.proto != null) {
// Use immutable message source, message is unchanged
return this.proto;}
return null;
} | 3.26 |
hadoop_FederationProtocolPBTranslator_getProtoOrBuilder_rdh | /**
* Returns an interface to access data stored within this object. The object
* may have been initialized either via a builder or by an existing protobuf
* byte stream.
*
* @return MessageOrBuilder protobuf interface for the requested class.
*/
@SuppressWarnings("unchecked")
public T getProtoOrBuilder() {
if (this.builder !=
null) {
// Use mutable builder if it exists
return ((T) (this.builder)); } else if (this.proto != null) {
// Use immutable message source
return ((T) (this.proto));
} else {
// Construct empty builder
return ((T) (this.getBuilder()));
}} | 3.26 |
hadoop_ApplicationMaster_addAsLocalResourceFromEnv_rdh | /**
* Add the given resource into the map of resources, using information from
* the supplied environment variables.
*
* @param resource
* The resource to add.
* @param localResources
* Map of local resources to insert into.
* @param env
* Map of environment variables.
*/public void addAsLocalResourceFromEnv(DynoResource resource, Map<String, LocalResource> localResources, Map<String, String> env) {
LOG.debug("Adding resource to localResources: " + resource);
String resourcePath
= resource.getResourcePath();
if (resourcePath == null) {
// Default to using the file name in the path
resourcePath = resource.getPath(env).getName();
}
localResources.put(resourcePath, LocalResource.newInstance(URL.fromPath(resource.getPath(env)), resource.getType(), LocalResourceVisibility.APPLICATION, resource.getLength(env), resource.getTimestamp(env)));
} | 3.26 |
hadoop_ApplicationMaster_waitForCompletion_rdh | /**
* Wait until the application has finished and is ready for cleanup.
*/
private void waitForCompletion() throws InterruptedException {
synchronized(completionLock) {
while (!completed) {
completionLock.wait();
}
}
} | 3.26 |
hadoop_ApplicationMaster_isDataNode_rdh | /**
* Return true iff {@code containerId} represents a DataNode container.
*/
private boolean isDataNode(ContainerId containerId) {
return datanodeContainers.containsKey(containerId);
} | 3.26 |
hadoop_ApplicationMaster_printUsage_rdh | /**
* Helper function to print usage.
*
* @param opts
* arsed command line options
*/
private void printUsage(Options opts) {
new HelpFormatter().printHelp("ApplicationMaster", opts);
} | 3.26 |
hadoop_ApplicationMaster_getLocalResources_rdh | /**
* Get the map of local resources to be used for launching this container.
*/
private Map<String, LocalResource> getLocalResources() {
Map<String, LocalResource> localResources = new
HashMap<>();
Map<String, String> envs = System.getenv();
addAsLocalResourceFromEnv(DynoConstants.CONF_ZIP, localResources, envs);
addAsLocalResourceFromEnv(DynoConstants.START_SCRIPT, localResources, envs);
addAsLocalResourceFromEnv(DynoConstants.HADOOP_BINARY, localResources, envs);
addAsLocalResourceFromEnv(DynoConstants.VERSION, localResources, envs);
addAsLocalResourceFromEnv(DynoConstants.DYNO_DEPENDENCIES, localResources, envs);
if (isNameNodeLauncher) {addAsLocalResourceFromEnv(DynoConstants.FS_IMAGE, localResources, envs);
addAsLocalResourceFromEnv(DynoConstants.FS_IMAGE_MD5, localResources, envs);
} else {
int blockFilesToLocalize = Math.max(1, amOptions.getDataNodesPerCluster());
for (int i = 0; i < blockFilesToLocalize; i++) {
try {
localResources.put(DynoConstants.BLOCK_LIST_RESOURCE_PATH_PREFIX + i, blockListFiles.remove(0));
} catch (IndexOutOfBoundsException e) {
break;
}
}
}
return localResources;
} | 3.26 |
hadoop_ApplicationMaster_init_rdh | /**
* Parse command line options.
*
* @param args
* Command line args
* @return Whether init successful and run should be invoked
* @throws ParseException
* on error while parsing options
*/
public boolean init(String[] args) throws ParseException {
Options opts = new Options();
AMOptions.setOptions(opts);
CommandLine cliParser = new GnuParser().parse(opts, args);
if (args.length == 0) {
printUsage(opts);
throw new IllegalArgumentException("No args specified for application master to initialize");
}
if (cliParser.hasOption("help")) {
printUsage(opts);return false;
}
Map<String, String> envs = System.getenv();
remoteStoragePath = new Path(envs.get(DynoConstants.REMOTE_STORAGE_PATH_ENV));
applicationAcls = new HashMap<>();
applicationAcls.put(ApplicationAccessType.VIEW_APP, envs.get(DynoConstants.JOB_ACL_VIEW_ENV));
launchingUser = envs.get(Environment.USER.name());
if (envs.containsKey(DynoConstants.REMOTE_NN_RPC_ADDR_ENV)) {
launchNameNode = false;
namenodeServiceRpcAddress = envs.get(DynoConstants.REMOTE_NN_RPC_ADDR_ENV);
} else {
launchNameNode =
true;
// namenodeServiceRpcAddress will be set in run() once properties are
// available
}
ContainerId containerId = ContainerId.fromString(envs.get(Environment.CONTAINER_ID.name()));ApplicationAttemptId appAttemptID = containerId.getApplicationAttemptId();
LOG.info("Application master for app: appId={}, clusterTimestamp={}, " + "attemptId={}", appAttemptID.getApplicationId().getId(), appAttemptID.getApplicationId().getClusterTimestamp(), appAttemptID.getAttemptId());
amOptions = AMOptions.initFromParser(cliParser);
return true;
} | 3.26 |
hadoop_ApplicationMaster_m0_rdh | /**
*
* @return True iff the application successfully completed
*/
private boolean m0() {
// Join all launched threads
// needed for when we time out
// and we need to release containers
for (Thread launchThread : f1) {
try {
launchThread.join(10000);
} catch (InterruptedException e) {
LOG.info("Exception thrown in thread join: " + e.getMessage());
e.printStackTrace();
}
}
// When the application completes, it should stop all running containers
LOG.info("Application completed. Stopping running containers");
nmClientAsync.stop();
// When the application completes, it should send a finish application
// signal to the RM
LOG.info("Application completed. Signalling finish to RM");
FinalApplicationStatus appStatus;
String appMessage = null;
boolean success;
if ((numFailedDataNodeContainers.get() == 0) && (numCompletedDataNodeContainers.get() == numTotalDataNodes)) {
appStatus = FinalApplicationStatus.SUCCEEDED;
success = true;
} else {
appStatus = FinalApplicationStatus.FAILED;
appMessage = (((((("Diagnostics: total=" + numTotalDataNodeContainers)
+ ", completed=") + numCompletedDataNodeContainers.get()) + ", allocated=") + numAllocatedDataNodeContainers.get()) + ", failed=") + numFailedDataNodeContainers.get();success = false;
}
try {
amRMClient.unregisterApplicationMaster(appStatus, appMessage, null);
} catch (YarnException | IOException ex) {
LOG.error("Failed to unregister application", ex);
}
amRMClient.stop();
return success;
} | 3.26 |
hadoop_ApplicationMaster_markCompleted_rdh | /**
* Mark that this application should begin cleaning up and exit.
*/
private void markCompleted() {
synchronized(completionLock) {
completed = true;completionLock.notify();
}
} | 3.26 |
hadoop_ApplicationMaster_setupContainerAskForRM_rdh | /**
* Setup the request that will be sent to the RM for the container ask.
*
* @return the setup ResourceRequest to be sent to RM
*/
private ContainerRequest setupContainerAskForRM(int memory, int vcores, int
priority, String nodeLabel) {
Priority v45 = Records.newRecord(Priority.class);
v45.setPriority(priority);
// Set up resource type requirements
// For now, memory and CPU are supported so we set memory and cpu
// requirements
Resource capability = Records.newRecord(Resource.class);
capability.setMemorySize(memory);
capability.setVirtualCores(vcores);
return new ContainerRequest(capability, null, null, v45, true, nodeLabel);
} | 3.26 |
hadoop_ApplicationMaster_run_rdh | /**
* Connects to CM, sets up container launch context for shell command and
* eventually dispatches the container start request to the CM.
*/
@Override
public void run() {
LOG.info((("Setting up container launch context for containerid=" + container.getId()) + ", isNameNode=") + isNameNodeLauncher);
ContainerLaunchContext ctx = Records.newRecord(ContainerLaunchContext.class);
// Set the environment
ctx.setEnvironment(amOptions.getShellEnv());
ctx.setApplicationACLs(applicationAcls);
try {
ctx.setLocalResources(getLocalResources());
ctx.setCommands(getContainerStartCommand());
} catch (IOException e) {
LOG.error("Error while configuring container!", e);
return;
}
// Set up tokens for the container
ctx.setTokens(allTokens.duplicate());
nmClientAsync.startContainerAsync(container, ctx);
LOG.info("Starting {}; track at: http://{}/node/containerlogs/{}/{}/", isNameNodeLauncher ? "NAMENODE" : "DATANODE", container.getNodeHttpAddress(), container.getId(), launchingUser);
} | 3.26 |
hadoop_ApplicationMaster_isNameNode_rdh | /**
* Return true iff {@code containerId} represents the NameNode container.
*/
private boolean isNameNode(ContainerId containerId) {
return (namenodeContainer != null) && namenodeContainer.getId().equals(containerId);
} | 3.26 |
hadoop_ApplicationMaster_getContainerStartCommand_rdh | /**
* Return the command used to start this container.
*/
private List<String> getContainerStartCommand() throws IOException {
// Set the necessary command to execute on the allocated container
List<String> vargs = new ArrayList<>();
// Set executable command
vargs.add("./" +
DynoConstants.START_SCRIPT.getResourcePath());
String component = (isNameNodeLauncher) ? "namenode" : "datanode";
vargs.add(component);
if (isNameNodeLauncher) {
vargs.add(remoteStoragePath.getFileSystem(conf).makeQualified(remoteStoragePath).toString());
} else { vargs.add(namenodeServiceRpcAddress);
vargs.add(String.valueOf(amOptions.getDataNodeLaunchDelaySec() < 1 ? 0 : RAND.nextInt(Ints.checkedCast(amOptions.getDataNodeLaunchDelaySec()))));
}// Add log redirect params
vargs.add(("1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR) + "/stdout");
vargs.add(("2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR) + "/stderr");
LOG.info((("Completed setting up command for " + component) + ": ") + vargs);
return Lists.newArrayList(Joiner.on(" ").join(vargs));
} | 3.26 |
hadoop_ApplicationMaster_isComplete_rdh | /**
* Check completion status of the application.
*
* @return True iff it has completed.
*/
private boolean isComplete() {
synchronized(completionLock) {
return completed;
}
} | 3.26 |
hadoop_ApplicationMaster_main_rdh | /**
*
* @param args
* Command line args
*/
public static void main(String[] args) {
boolean result = false;
try {
ApplicationMaster appMaster = new ApplicationMaster();
LOG.info("Initializing ApplicationMaster");
boolean doRun = appMaster.init(args);
if (!doRun) {
System.exit(0);
}
result = appMaster.run();
} catch (Throwable t) {
LOG.error("Error running ApplicationMaster", t);
System.exit(1);
}
if (result) {
LOG.info("Application Master completed successfully. exiting");
System.exit(0);
} else {
LOG.info("Application Master failed. exiting");
System.exit(2);
}
} | 3.26 |
hadoop_FederationCache_buildSubClusterInfoMap_rdh | /**
* According to the subClusters, build SubClusterInfoMap.
*
* @param subClusters
* subCluster List.
* @return SubClusterInfoMap.
*/private static
Map<SubClusterId, SubClusterInfo> buildSubClusterInfoMap(List<SubClusterInfo> subClusters) {
Map<SubClusterId, SubClusterInfo> subClustersMap = new HashMap<>(subClusters.size());
for (SubClusterInfo subCluster : subClusters) {
subClustersMap.put(subCluster.getSubClusterId(), subCluster);
}
return subClustersMap;
} | 3.26 |
hadoop_FederationCache_buildSubClusterInfoResponse_rdh | /**
* Build SubClusterInfo Response.
*
* @param filterInactiveSubClusters
* whether to filter out inactive sub-clusters.
* @return SubClusterInfo Response.
* @throws YarnException
* exceptions from yarn servers.
*/
private CacheResponse<SubClusterInfo> buildSubClusterInfoResponse(final boolean filterInactiveSubClusters) throws
YarnException {
GetSubClustersInfoRequest request = GetSubClustersInfoRequest.newInstance(filterInactiveSubClusters);
GetSubClustersInfoResponse subClusters = stateStore.getSubClusters(request);
CacheResponse<SubClusterInfo> response = new SubClusterInfoCacheResponse();
response.setList(subClusters.getSubClusters());
return response;
} | 3.26 |
hadoop_FederationCache_buildSubClusterPolicyConfigurationResponse_rdh | /**
* Build SubClusterPolicyConfiguration Response.
*
* @return SubClusterPolicyConfiguration Response.
* @throws YarnException
* exceptions from yarn servers.
*/
private CacheResponse<SubClusterPolicyConfiguration> buildSubClusterPolicyConfigurationResponse() throws YarnException {
GetSubClusterPoliciesConfigurationsRequest request = GetSubClusterPoliciesConfigurationsRequest.newInstance();
GetSubClusterPoliciesConfigurationsResponse response = stateStore.getPoliciesConfigurations(request);
List<SubClusterPolicyConfiguration> policyConfigs = response.getPoliciesConfigs();
CacheResponse<SubClusterPolicyConfiguration> cacheResponse = new SubClusterPolicyConfigurationCacheResponse();
cacheResponse.setList(policyConfigs);
return cacheResponse;
} | 3.26 |
hadoop_FederationCache_buildGetSubClustersCacheRequest_rdh | // ------------------------------------ SubClustersCache -------------------------
/**
* Build GetSubClusters CacheRequest.
*
* @param cacheKey
* cacheKey.
* @param filterInactiveSubClusters
* filter Inactive SubClusters.
* @return CacheRequest.
* @throws YarnException
* exceptions from yarn servers.
*/protected CacheRequest<String, CacheResponse<SubClusterInfo>> buildGetSubClustersCacheRequest(String cacheKey, final boolean filterInactiveSubClusters) throws YarnException {
CacheResponse<SubClusterInfo> response = buildSubClusterInfoResponse(filterInactiveSubClusters);
CacheRequest<String, CacheResponse<SubClusterInfo>> cacheRequest = new CacheRequest<>(cacheKey, response);
return cacheRequest;
} | 3.26 |
hadoop_FederationCache_buildSubClusterIdResponse_rdh | /**
* Build SubClusterId Response.
*
* @param applicationId
* applicationId.
* @return subClusterId
* @throws YarnException
* exceptions from yarn servers.
*/
private CacheResponse<SubClusterId> buildSubClusterIdResponse(final ApplicationId applicationId) throws YarnException
{
GetApplicationHomeSubClusterRequest request = GetApplicationHomeSubClusterRequest.newInstance(applicationId);
GetApplicationHomeSubClusterResponse response = stateStore.getApplicationHomeSubCluster(request);
ApplicationHomeSubCluster appHomeSubCluster = response.getApplicationHomeSubCluster();
SubClusterId subClusterId = appHomeSubCluster.getHomeSubCluster();
CacheResponse<SubClusterId> cacheResponse = new ApplicationHomeSubClusterCacheResponse();
cacheResponse.setItem(subClusterId);
return cacheResponse;
} | 3.26 |
hadoop_FederationCache_buildGetPoliciesConfigurationsCacheRequest_rdh | // ------------------------------ SubClusterPolicyConfigurationCache -------------------------
/**
* Build GetPoliciesConfigurations CacheRequest.
*
* @param cacheKey
* cacheKey.
* @return CacheRequest.
* @throws YarnException
* exceptions from yarn servers.
*/
protected CacheRequest<String, CacheResponse<SubClusterPolicyConfiguration>> buildGetPoliciesConfigurationsCacheRequest(String cacheKey) throws YarnException {
CacheResponse<SubClusterPolicyConfiguration> response = buildSubClusterPolicyConfigurationResponse();
return new CacheRequest<>(cacheKey, response);
} | 3.26 |
hadoop_FederationCache_buildPolicyConfigMap_rdh | /**
* According to the cacheRequest, build PolicyConfigMap.
*
* @param cacheRequest
* CacheRequest.
* @return PolicyConfigMap.
*/
public static Map<String, SubClusterPolicyConfiguration> buildPolicyConfigMap(CacheRequest<String, ?> cacheRequest) { Object value = cacheRequest.value;
SubClusterPolicyConfigurationCacheResponse response = FederationCache.SubClusterPolicyConfigurationCacheResponse.class.cast(value);
List<SubClusterPolicyConfiguration> subClusters = response.getList();
return buildPolicyConfigMap(subClusters);
} | 3.26 |
hadoop_FederationCache_buildGetApplicationHomeSubClusterRequest_rdh | // ------------------------------------ ApplicationHomeSubClusterCache -------------------------
/**
* Build GetApplicationHomeSubCluster CacheRequest.
*
* @param cacheKey
* cacheKey.
* @param applicationId
* applicationId.
* @return CacheRequest.
* @throws YarnException
* exceptions from yarn servers.
*/
protected CacheRequest<String, CacheResponse<SubClusterId>> buildGetApplicationHomeSubClusterRequest(String cacheKey, ApplicationId applicationId) throws YarnException {
CacheResponse<SubClusterId> response = buildSubClusterIdResponse(applicationId);
return new CacheRequest<>(cacheKey, response);
} | 3.26 |
hadoop_SinglePendingCommit_serializer_rdh | /**
* Get a JSON serializer for this class.
*
* @return a serializer.
*/
public static JsonSerialization<SinglePendingCommit> serializer() {
return new JsonSerialization<>(SinglePendingCommit.class, false, false);
} | 3.26 |
hadoop_SinglePendingCommit_getCreated_rdh | /**
* When was the upload created?
*
* @return timestamp
*/
public long getCreated() {
return created;
} | 3.26 |
hadoop_SinglePendingCommit_getJobId_rdh | /**
*
* @return Job ID, if known.
*/
public String getJobId() {
return
jobId;
} | 3.26 |
hadoop_SinglePendingCommit_getVersion_rdh | /**
*
* @return version marker.
*/
public int getVersion() {
return version;
} | 3.26 |
hadoop_SinglePendingCommit_getDestinationKey_rdh | /**
*
* @return destination key in the bucket.
*/
public String getDestinationKey() {
return destinationKey;
} | 3.26 |
hadoop_SinglePendingCommit_getUri_rdh | /**
*
* @return path URI of the destination.
*/
public String getUri() {
return uri;
} | 3.26 |
hadoop_SinglePendingCommit_getTaskId_rdh | /**
*
* @return Task ID, if known.
*/
public String getTaskId() {
return taskId;
} | 3.26 |
hadoop_SinglePendingCommit_getDate_rdh | /**
* Timestamp as date; no expectation of parseability.
*
* @return date string
*/
public String getDate() {
return date;
} | 3.26 |
hadoop_SinglePendingCommit_getEtags_rdh | /**
*
* @return ordered list of etags.
*/
public List<String> getEtags() {
return etags;
} | 3.26 |
hadoop_SinglePendingCommit_bindCommitData_rdh | /**
* Set the commit data.
*
* @param parts
* ordered list of etags.
* @throws ValidationFailure
* if the data is invalid
*/ public void bindCommitData(List<CompletedPart> parts) throws ValidationFailure {
etags = new ArrayList<>(parts.size());
int counter = 1;
for (CompletedPart part : parts) {
verify(part.partNumber() == counter, "Expected part number %s but got %s", counter,
part.partNumber());
etags.add(part.eTag());
counter++;
}
} | 3.26 |
hadoop_SinglePendingCommit_iterator_rdh | /**
* Iterate over the etags.
*
* @return an iterator.
*/
@Override
public Iterator<String> iterator() {
return etags.iterator();
} | 3.26 |
hadoop_SinglePendingCommit_putExtraData_rdh | /**
* Set/Update an extra data entry.
*
* @param key
* key
* @param value
* value
*/public void putExtraData(String key, String value) {
extraData.put(key, value);
} | 3.26 |
hadoop_SinglePendingCommit_getPartCount_rdh | /**
* Get the number of etags.
*
* @return the size of the etag list.
*/
public int getPartCount() {
return etags.size();
} | 3.26 |
hadoop_SinglePendingCommit_touch_rdh | /**
* Set the various timestamp fields to the supplied value.
*
* @param millis
* time in milliseconds
*/
public void touch(long millis) {
created = millis;
saved = millis;
date = new Date(millis).toString();
} | 3.26 |
hadoop_SinglePendingCommit_destinationPath_rdh | /**
* Build the destination path of the object.
*
* @return the path
* @throws IllegalStateException
* if the URI is invalid
*/
public Path destinationPath() {
Preconditions.checkState(StringUtils.isNotEmpty(uri), "Empty uri");
try {return new Path(new URI(uri));
} catch (URISyntaxException e) {
throw new IllegalStateException("Cannot parse URI " + uri);
}
} | 3.26 |
hadoop_SinglePendingCommit_readObject_rdh | /**
* Deserialize via java Serialization API: deserialize the instance
* and then call {@link #validate()} to verify that the deserialized
* data is valid.
*
* @param inStream
* input stream
* @throws IOException
* IO problem
* @throws ClassNotFoundException
* reflection problems
* @throws ValidationFailure
* validation failure
*/
private void readObject(ObjectInputStream inStream) throws IOException, ClassNotFoundException {
inStream.defaultReadObject();
m0();
} | 3.26 |
hadoop_SinglePendingCommit_getSaved_rdh | /**
* When was the upload saved?
*
* @return timestamp
*/
public long getSaved() {
return saved;
} | 3.26 |
hadoop_SinglePendingCommit_getLength_rdh | /**
* Destination file size.
*
* @return size of destination object
*/
public long getLength() {
return length;
} | 3.26 |
hadoop_SinglePendingCommit_getUploadId_rdh | /**
*
* @return ID of the upload.
*/
public String getUploadId() {
return f0;
} | 3.26 |
hadoop_SinglePendingCommit_getFilename_rdh | /**
* This is the filename of the pending file itself.
* Used during processing; it's persistent value, if any, is ignored.
*
* @return filename
*/
public String getFilename() {
return filename;
} | 3.26 |
hadoop_SinglePendingCommit_getBucket_rdh | /**
*
* @return destination bucket.
*/
public String
getBucket() {
return bucket;
} | 3.26 |
hadoop_SinglePendingCommit_load_rdh | /**
* Load an instance from a file, then validate it.
*
* @param fs
* filesystem
* @param path
* path
* @param serDeser
* deserializer
* @param status
* status of file to load or null
* @return the loaded instance
* @throws IOException
* IO failure
* @throws ValidationFailure
* if the data is invalid
*/
public static SinglePendingCommit load(FileSystem fs, Path path, JsonSerialization<SinglePendingCommit> serDeser, @Nullable
FileStatus status) throws IOException {
JsonSerialization<SinglePendingCommit> jsonSerialization = (serDeser != null) ? serDeser : serializer();
SinglePendingCommit instance = jsonSerialization.load(fs, path, status);
instance.filename = path.toString();
instance.m0();
return instance;
} | 3.26 |
hadoop_SinglePendingCommit_getExtraData_rdh | /**
* Any custom extra data committer subclasses may choose to add.
*
* @return custom data
*/
public Map<String, String> getExtraData() {
return extraData;
} | 3.26 |
hadoop_JobTokenSelector_selectToken_rdh | /**
* Look through tokens to find the first job token that matches the service
* and return it.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstablepublic class JobTokenSelector implements TokenSelector<JobTokenIdentifier> {
@SuppressWarnings("unchecked")
@Override
public Token<JobTokenIdentifier> selectToken(Text
service,
Collection<Token<? extends TokenIdentifier>> tokens)
{
if (service == null) {return null;
}
for (Token<? extends TokenIdentifier> token : tokens) {
if (JobTokenIdentifier.KIND_NAME.equals(token.getKind()) && service.equals(token.getService())) {
return ((Token<JobTokenIdentifier>) (token));
}
}
return null;
} | 3.26 |
hadoop_S3A_finalize_rdh | /**
* Close the file system; the FileContext API doesn't have an explicit close.
*/
@Override
protected void finalize() throws Throwable {
fsImpl.close();
super.finalize();
} | 3.26 |
hadoop_StringValueMin_reset_rdh | /**
* reset the aggregator
*/
public void reset() {
minVal = null;
} | 3.26 |
hadoop_StringValueMin_m0_rdh | /**
*
* @return the string representation of the aggregated value
*/
public String m0() {
return minVal;
} | 3.26 |
hadoop_StringValueMin_getVal_rdh | /**
*
* @return the aggregated value
*/
public String getVal() {
return this.minVal;
} | 3.26 |
hadoop_StringValueMin_addNextValue_rdh | /**
* add a value to the aggregator
*
* @param val
* a string.
*/
public void addNextValue(Object val) {
String newVal = val.toString();
if ((this.minVal == null) || (this.minVal.compareTo(newVal) > 0)) {
this.minVal = newVal;
}
} | 3.26 |
hadoop_BaseResource_getUri_rdh | /**
* Resource location for a service, e.g.
* /app/v1/services/helloworld
*/
public String getUri() {
return uri;
} | 3.26 |
hadoop_StringValueMax_reset_rdh | /**
* reset the aggregator
*/
public void reset() {
maxVal = null;
} | 3.26 |
hadoop_StringValueMax_getVal_rdh | /**
*
* @return the aggregated value
*/
public String getVal() {
return this.maxVal;
} | 3.26 |
hadoop_StringValueMax_addNextValue_rdh | /**
* add a value to the aggregator
*
* @param val
* a string.
*/
public void addNextValue(Object val) {
String newVal
= val.toString();
if ((this.maxVal == null) || (this.maxVal.compareTo(newVal) < 0)) {
this.maxVal = newVal;
}
} | 3.26 |
hadoop_StringValueMax_getReport_rdh | /**
*
* @return the string representation of the aggregated value
*/
public String getReport() {
return maxVal;
} | 3.26 |
hadoop_LogAggregationWebUtils_verifyAndGetNodeId_rdh | /**
* Verify and parse NodeId.
*
* @param html
* the html
* @param nodeIdStr
* the nodeId string
* @return the {@link NodeId}
*/
public static NodeId verifyAndGetNodeId(Block html, String nodeIdStr) {
if ((nodeIdStr == null) ||
nodeIdStr.isEmpty()) {
html.h1().__("Cannot get container logs without a NodeId").__();
return null;
}
NodeId nodeId = null;
try {
nodeId = NodeId.fromString(nodeIdStr);
} catch (IllegalArgumentException e) {
html.h1().__("Cannot get container logs. Invalid nodeId: " + nodeIdStr).__();
return null;
}
return nodeId;
} | 3.26 |
hadoop_LogAggregationWebUtils_getLogStartTime_rdh | /**
* Parse log start time from html.
*
* @param startStr
* the start time string
* @return the startIndex
*/
public static long getLogStartTime(String startStr) throws NumberFormatException {
long start = 0;
if ((startStr != null) && (!startStr.isEmpty())) {
start = Long.parseLong(startStr);
}
return start;
} | 3.26 |
hadoop_LogAggregationWebUtils_verifyAndGetContainerId_rdh | /**
* Verify and parse containerId.
*
* @param html
* the html
* @param containerIdStr
* the containerId string
* @return the {@link ContainerId}
*/
public static ContainerId verifyAndGetContainerId(Block html, String containerIdStr) {
if ((containerIdStr == null) || containerIdStr.isEmpty()) {
html.h1().__("Cannot get container logs without a ContainerId").__();
return null;}
ContainerId containerId = null;
try {
containerId = ContainerId.fromString(containerIdStr);
} catch (IllegalArgumentException e) {
html.h1().__("Cannot get container logs for invalid containerId: " + containerIdStr).__();
return null;
}
return containerId;
} | 3.26 |
hadoop_LogAggregationWebUtils_getLogEndIndex_rdh | /**
* Parse end index from html.
*
* @param html
* the html
* @param endStr
* the end index string
* @return the endIndex
*/
public static long getLogEndIndex(Block html, String endStr) throws NumberFormatException {long end = Long.MAX_VALUE;
if ((endStr != null) && (!endStr.isEmpty())) {
end = Long.parseLong(endStr);
}
return end;
} | 3.26 |
hadoop_LogAggregationWebUtils_verifyAndGetAppOwner_rdh | /**
* Verify and parse the application owner.
*
* @param html
* the html
* @param appOwner
* the Application owner
* @return the appOwner
*/
public static String verifyAndGetAppOwner(Block html, String appOwner) {
if ((appOwner == null) || appOwner.isEmpty()) {
html.h1().__("Cannot get container logs without an app owner").__();
}
return appOwner;
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.