name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hadoop_AuditingFunctions_withinAuditSpan_rdh | /**
* Given a function, return a new function which
* activates and deactivates the span around the inner one.
*
* @param auditSpan
* audit span
* @param operation
* operation
* @param <T>
* Generics Type T.
* @param <R>
* Generics Type R.
* @return a new invocation.
*/
public static <T, R> FunctionRaisingIOE<T, R> withinAuditSpan(@Nullable
AuditSpan auditSpan, FunctionRaisingIOE<T, R> operation) {return auditSpan == null ? operation : x -> {
auditSpan.activate();
return operation.apply(x);
};
} | 3.26 |
hadoop_RegistryOperationsFactory_createAnonymousInstance_rdh | /**
* Create and initialize an anonymous read/write registry operations instance.
* In a secure cluster, this instance will only have read access to the
* registry.
*
* @param conf
* configuration
* @return an anonymous registry operations instance
* @throws ServiceStateException
* on any failure to initialize
*/
public static RegistryOperations createAnonymousInstance(Configuration conf) {
Preconditions.checkArgument(conf != null, "Null configuration");
conf.set(KEY_REGISTRY_CLIENT_AUTH, REGISTRY_CLIENT_AUTH_ANONYMOUS);
return createInstance("AnonymousRegistryOperations", conf);
} | 3.26 |
hadoop_RegistryOperationsFactory_createKerberosInstance_rdh | /**
* Create a kerberos registry service client
*
* @param conf
* configuration
* @param jaasClientEntry
* the name of the login config entry
* @param principal
* principal of the client.
* @param keytab
* location to the keytab file
* @return a registry service client instance
*/
public static RegistryOperations createKerberosInstance(Configuration conf, String jaasClientEntry, String principal, String keytab) {
Preconditions.checkArgument(conf != null, "Null configuration");
conf.set(KEY_REGISTRY_CLIENT_AUTH, REGISTRY_CLIENT_AUTH_KERBEROS);
conf.set(KEY_REGISTRY_CLIENT_JAAS_CONTEXT, jaasClientEntry);
RegistryOperationsClient operations = new RegistryOperationsClient("KerberosRegistryOperations");
operations.setKerberosPrincipalAndKeytab(principal, keytab);
operations.init(conf);
return operations;
} | 3.26 |
hadoop_RegistryOperationsFactory_createAuthenticatedInstance_rdh | /**
* Create and initialize an operations instance authenticated with write
* access via an <code>id:password</code> pair.
*
* The instance will have the read access
* across the registry, but write access only to that part of the registry
* to which it has been give the relevant permissions.
*
* @param conf
* configuration
* @param id
* user ID
* @param password
* password
* @return a registry operations instance
* @throws ServiceStateException
* on any failure to initialize
* @throws IllegalArgumentException
* if an argument is invalid
*/
public static RegistryOperations createAuthenticatedInstance(Configuration conf, String
id, String password) {
Preconditions.checkArgument(!StringUtils.isEmpty(id), "empty Id");
Preconditions.checkArgument(!StringUtils.isEmpty(password), "empty Password");
Preconditions.checkArgument(conf != null, "Null configuration");
conf.set(KEY_REGISTRY_CLIENT_AUTH, REGISTRY_CLIENT_AUTH_DIGEST);
conf.set(KEY_REGISTRY_CLIENT_AUTHENTICATION_ID, id);
conf.set(KEY_REGISTRY_CLIENT_AUTHENTICATION_PASSWORD, password);
return createInstance("DigestRegistryOperations", conf);
} | 3.26 |
hadoop_RegistryOperationsFactory_createInstance_rdh | /**
* Create and initialize a registry operations instance.
* Access writes will be determined from the configuration
*
* @param conf
* configuration
* @return a registry operations instance
* @throws ServiceStateException
* on any failure to initialize
*/
public static RegistryOperations createInstance(Configuration conf) {
return createInstance("RegistryOperations", conf);
} | 3.26 |
hadoop_AbstractConfigurableFederationPolicy_setPolicyInfo_rdh | /**
* Setter method for the configuration weights.
*
* @param policyInfo
* the {@link WeightedPolicyInfo} representing the policy
* configuration.
*/
public void setPolicyInfo(WeightedPolicyInfo policyInfo) {
this.policyInfo = policyInfo;
} | 3.26 |
hadoop_AbstractConfigurableFederationPolicy_getActiveSubclusters_rdh | /**
* This methods gets active subclusters map from the {@code FederationStateStoreFacade} and validate it not being null/empty.
*
* @return the map of ids to info for all active subclusters.
* @throws YarnException
* if we can't get the list.
*/
protected Map<SubClusterId, SubClusterInfo> getActiveSubclusters() throws YarnException {
Map<SubClusterId, SubClusterInfo> activeSubclusters = getPolicyContext().getFederationStateStoreFacade().getSubClusters(true);
if ((activeSubclusters == null) || (activeSubclusters.size() < 1)) {
throw new NoActiveSubclustersException("Zero active subclusters, cannot pick where to send job.");
}
return activeSubclusters;
} | 3.26 |
hadoop_AbstractConfigurableFederationPolicy_getPolicyContext_rdh | /**
* Getter method for the {@link FederationPolicyInitializationContext}.
*
* @return the context for this policy.
*/
public FederationPolicyInitializationContext getPolicyContext() {
return policyContext;
} | 3.26 |
hadoop_AbstractConfigurableFederationPolicy_getIsDirty_rdh | /**
* Returns true whether the last reinitialization requires actual changes, or
* was "free" as the weights have not changed. This is used by subclasses
* overriding reinitialize and calling super.reinitialize() to know whether to
* quit early.
*
* @return whether more work is needed to initialize.
*/public boolean getIsDirty() {
return isDirty;
}
/**
* Getter method for the configuration weights.
*
* @return the {@link WeightedPolicyInfo} | 3.26 |
hadoop_AbstractConfigurableFederationPolicy_validate_rdh | /**
* Overridable validation step for the policy configuration.
*
* @param newPolicyInfo
* the configuration to test.
* @throws FederationPolicyInitializationException
* if the configuration is not
* valid.
*/
public void validate(WeightedPolicyInfo newPolicyInfo) throws FederationPolicyInitializationException {
if (newPolicyInfo == null) {
throw new FederationPolicyInitializationException("The policy to " + "validate should not be null.");
}
} | 3.26 |
hadoop_AbstractConfigurableFederationPolicy_setPolicyContext_rdh | /**
* Setter method for the {@link FederationPolicyInitializationContext}.
*
* @param policyContext
* the context to assign to this policy.
*/
public void setPolicyContext(FederationPolicyInitializationContext policyContext) {
this.policyContext = policyContext;
} | 3.26 |
hadoop_MarshalledCredentialProvider_createCredentials_rdh | /**
* Perform the binding, looking up the DT and parsing it.
*
* @return true if there were some credentials
* @throws CredentialInitializationException
* validation failure
* @throws IOException
* on a failure
*/
@Override
protected AwsCredentials createCredentials(final Configuration config) throws IOException {
return toAWSCredentials(credentials, typeRequired, component);
} | 3.26 |
hadoop_NamenodePriorityComparator_compareModDates_rdh | /**
* Compare the modification dates.
*
* @param o1
* Context 1.
* @param o2
* Context 2.
* @return Comparison between dates.
*/
private int compareModDates(FederationNamenodeContext o1, FederationNamenodeContext o2) {
// Reverse sort, lowest position is highest priority.
return ((int) (o2.getDateModified() -
o1.getDateModified()));
} | 3.26 |
hadoop_PathFinder_getAbsolutePath_rdh | /**
* Returns the full path name of this file if it is listed in the path
*/
public File getAbsolutePath(String filename) {if (((pathenv == null) || (pathSep == null)) || (fileSep == null)) {
return null;
}
int val = -1;
String classvalue = pathenv + pathSep;
while (((val = classvalue.indexOf(pathSep)) >= 0) && (classvalue.length() > 0)) {
// Extract each entry from the pathenv
String entry = classvalue.substring(0,
val).trim();
File f = new File(entry);
if (f.isDirectory()) {
// this entry in the pathenv is a directory.
// see if the required file is in this directory
f = new File((entry + fileSep) + filename);
}
// see if the filename matches and we can read it
if (f.isFile() && FileUtil.canRead(f)) {
return f;
}
classvalue = classvalue.substring(val + 1).trim();
}
return null;
} | 3.26 |
hadoop_PathFinder_prependPathComponent_rdh | /**
* Appends the specified component to the path list
*/
public void prependPathComponent(String str) {pathenv
= (str + pathSep) + pathenv;
} | 3.26 |
hadoop_UnmanagedApplicationManager_finishApplicationMaster_rdh | /**
* Unregisters from the resource manager and stops the request handler thread.
*
* @param request
* the finishApplicationMaster request
* @return the response
* @throws YarnException
* if finishAM call fails
* @throws IOException
* if finishAM call fails
*/
public FinishApplicationMasterResponse finishApplicationMaster(FinishApplicationMasterRequest request) throws YarnException, IOException {if (this.userUgi == null) {
if (this.connectionInitiated) {
// This is possible if the async launchUAM is still
// blocked and retrying. Return a dummy response in this case.
LOG.warn("Unmanaged AM still not successfully launched/registered yet." + " Stopping the UAM heartbeat thread anyways.");
return FinishApplicationMasterResponse.newInstance(false);
} else {
throw new YarnException("finishApplicationMaster should not " + "be called before createAndRegister");
}
}
FinishApplicationMasterResponse response = this.rmProxyRelayer.finishApplicationMaster(request);
if
(response.getIsUnregistered()) {
shutDownConnections();
}
return response;
} | 3.26 |
hadoop_UnmanagedApplicationManager_launchUAM_rdh | /**
* Launch a new UAM in the resource manager.
*
* @return identifier uam identifier
* @throws YarnException
* if fails
* @throws IOException
* if fails
*/
public Token<AMRMTokenIdentifier> launchUAM() throws YarnException, IOException {
this.connectionInitiated = true;
// Blocking call to RM
Token<AMRMTokenIdentifier> amrmToken = initializeUnmanagedAM(this.applicationId);
// Creates the UAM connection
createUAMProxy(amrmToken);
return amrmToken;
} | 3.26 |
hadoop_UnmanagedApplicationManager_monitorCurrentAppAttempt_rdh | /**
* Monitor the submitted application and attempt until it reaches certain
* states.
*
* @param appId
* Application Id of application to be monitored
* @param appStates
* acceptable application state
* @param attemptState
* acceptable application attempt state
* @return the application report
* @throws YarnException
* if getApplicationReport fails
* @throws IOException
* if getApplicationReport fails
*/
private ApplicationAttemptReport monitorCurrentAppAttempt(ApplicationId appId, Set<YarnApplicationState> appStates, YarnApplicationAttemptState attemptState) throws YarnException, IOException {
long startTime = System.currentTimeMillis();
ApplicationAttemptId appAttemptId = null;
while (true) {
if (appAttemptId == null) {
// Get application report for the appId we are interested in
ApplicationReport report = getApplicationReport(appId);
YarnApplicationState state = report.getYarnApplicationState();
if (appStates.contains(state)) {
if (state != YarnApplicationState.ACCEPTED) {
throw new YarnRuntimeException(((((("Received non-accepted application state: " + state) + " for ") + appId) + ". This is likely because this is not the first ") + "app attempt in home sub-cluster, and AMRMProxy HA ") + "(yarn.nodemanager.amrmproxy.ha.enable) is not enabled.");
}
appAttemptId = getApplicationReport(appId).getCurrentApplicationAttemptId();} else {
LOG.info("Current application state of {} is {}, will retry later.", appId, state);
}
}
if (appAttemptId != null) {
GetApplicationAttemptReportRequest req = this.recordFactory.newRecordInstance(GetApplicationAttemptReportRequest.class);
req.setApplicationAttemptId(appAttemptId);GetApplicationAttemptReportResponse appAttemptReport = this.rmClient.getApplicationAttemptReport(req);
ApplicationAttemptReport
attemptReport = appAttemptReport.getApplicationAttemptReport();
YarnApplicationAttemptState appAttemptState = attemptReport.getYarnApplicationAttemptState();
if (attemptState.equals(appAttemptState)) {
return attemptReport;
}
LOG.info("Current attempt state of {} is {}, waiting for current attempt to reach {}.", appAttemptId, appAttemptState, attemptState);
}
try {
Thread.sleep(this.asyncApiPollIntervalMillis);
} catch (InterruptedException e) {
LOG.warn("Interrupted while waiting for current attempt of {} to reach {}.", appId, attemptState);
}
if ((System.currentTimeMillis() - startTime)
> AM_STATE_WAIT_TIMEOUT_MS) {
throw new RuntimeException((("Timeout for waiting current attempt of " + appId) + " to reach ") + attemptState);
}}
} | 3.26 |
hadoop_UnmanagedApplicationManager_getUAMToken_rdh | /**
* Gets the amrmToken of the unmanaged AM.
*
* @return the amrmToken of the unmanaged AM.
* @throws IOException
* if getApplicationReport fails
* @throws YarnException
* if getApplicationReport fails
*/
protected Token<AMRMTokenIdentifier> getUAMToken() throws IOException, YarnException {
Token<AMRMTokenIdentifier> token = null;
Token amrmToken = getApplicationReport(this.applicationId).getAMRMToken();if (amrmToken != null) {
token = ConverterUtils.convertFromYarn(amrmToken, ((Text) (null)));
} else {
LOG.warn("AMRMToken not found in the application report for application: {}", this.applicationId);
}
return token;
} | 3.26 |
hadoop_UnmanagedApplicationManager_forceKillApplication_rdh | /**
* Force kill the UAM.
*
* @return kill response
* @throws IOException
* if fails to create rmProxy
* @throws YarnException
* if force kill fails
*/
public KillApplicationResponse forceKillApplication() throws IOException, YarnException {
shutDownConnections();
KillApplicationRequest request = KillApplicationRequest.newInstance(this.applicationId);
if (this.rmClient == null) {this.rmClient = m0(ApplicationClientProtocol.class, this.conf, UserGroupInformation.createRemoteUser(this.submitter), null);
}
return this.rmClient.forceKillApplication(request);
} | 3.26 |
hadoop_UnmanagedApplicationManager_shutDownConnections_rdh | /**
* Shutdown this UAM client, without killing the UAM in the YarnRM side.
*/
public void shutDownConnections() {
this.heartbeatHandler.shutdown();
this.rmProxyRelayer.shutdown();
} | 3.26 |
hadoop_UnmanagedApplicationManager_getAppId_rdh | /**
* Returns the application id of the UAM.
*
* @return application id of the UAM
*/
public ApplicationId getAppId() {
return this.applicationId;
} | 3.26 |
hadoop_UnmanagedApplicationManager_allocateAsync_rdh | /**
* Sends the specified heart beat request to the resource manager and invokes
* the callback asynchronously with the response.
*
* @param request
* the allocate request
* @param callback
* the callback method for the request
* @throws YarnException
* if registerAM is not called yet
*/
public void allocateAsync(AllocateRequest request, AsyncCallback<AllocateResponse> callback) throws YarnException {
this.heartbeatHandler.allocateAsync(request, callback);
// Two possible cases why the UAM is not successfully registered yet:
// 1. launchUAM is not called at all. Should throw here.
// 2. launchUAM is called but hasn't successfully returned.
//
// In case 2, we have already save the allocate request above, so if the
// registration succeed later, no request is lost.
if (this.userUgi == null) {
if (this.connectionInitiated) {
LOG.info("Unmanaged AM still not successfully launched/registered yet." + " Saving the allocate request and send later.");
} else {
throw new YarnException("AllocateAsync should not be called before launchUAM");
}
}
} | 3.26 |
hadoop_UnmanagedApplicationManager_registerApplicationMaster_rdh | /**
* Registers this {@link UnmanagedApplicationManager} with the resource
* manager.
*
* @param request
* RegisterApplicationMasterRequest
* @return register response
* @throws YarnException
* if register fails
* @throws IOException
* if register fails
*/
public RegisterApplicationMasterResponse registerApplicationMaster(RegisterApplicationMasterRequest request) throws YarnException, IOException {
// Save the register request for re-register later
this.registerRequest = request;
LOG.info("Registering the Unmanaged application master {}", this.applicationId);
RegisterApplicationMasterResponse response = this.rmProxyRelayer.registerApplicationMaster(this.registerRequest);
this.heartbeatHandler.resetLastResponseId();
if (LOG.isDebugEnabled()) {
for (Container container : response.getContainersFromPreviousAttempts()) {
LOG.debug("RegisterUAM returned existing running container {}", container.getId());
}
for (NMToken nmToken : response.getNMTokensFromPreviousAttempts()) {
LOG.debug("RegisterUAM returned existing NM token for node {}", nmToken.getNodeId());
}
}
LOG.info("RegisterUAM returned {} existing running container and {} NM tokens", response.getContainersFromPreviousAttempts().size(), response.getNMTokensFromPreviousAttempts().size());
// Only when register succeed that we start the heartbeat thread
this.heartbeatHandler.setDaemon(true);
this.heartbeatHandler.start();
return response;
} | 3.26 |
hadoop_UnmanagedApplicationManager_initializeUnmanagedAM_rdh | /**
* Launch and initialize an unmanaged AM. First, it creates a new application
* on the RM and negotiates a new attempt id. Then it waits for the RM
* application attempt state to reach YarnApplicationAttemptState.LAUNCHED
* after which it returns the AM-RM token.
*
* @param appId
* application id
* @return the UAM token
* @throws IOException
* if initialize fails
* @throws YarnException
* if initialize fails
*/
protected Token<AMRMTokenIdentifier> initializeUnmanagedAM(ApplicationId appId) throws IOException, YarnException {
try {
UserGroupInformation appSubmitter;
if (UserGroupInformation.isSecurityEnabled()) {
appSubmitter = UserGroupInformation.createProxyUser(this.submitter, UserGroupInformation.getLoginUser());
} else {appSubmitter = UserGroupInformation.createRemoteUser(this.submitter);
}
this.rmClient = m0(ApplicationClientProtocol.class, this.conf, appSubmitter, null);
// Submit the application
submitUnmanagedApp(appId);
// Monitor the application attempt to wait for launch state
monitorCurrentAppAttempt(appId, EnumSet.of(YarnApplicationState.ACCEPTED, YarnApplicationState.RUNNING, YarnApplicationState.KILLED, YarnApplicationState.FAILED, YarnApplicationState.FINISHED), YarnApplicationAttemptState.LAUNCHED);
return getUAMToken();
} finally {
this.rmClient = null;
}
} | 3.26 |
hadoop_UnmanagedApplicationManager_m0_rdh | /**
* Returns RM proxy for the specified protocol type. Unit test cases can
* override this method and return mock proxy instances.
*
* @param protocol
* protocol of the proxy
* @param config
* configuration
* @param user
* ugi for the proxy connection
* @param token
* token for the connection
* @param <T>
* type of the proxy
* @return the proxy instance
* @throws IOException
* if fails to create the proxy
*/
protected <T> T m0(Class<T> protocol, Configuration config, UserGroupInformation user, Token<AMRMTokenIdentifier> token) throws IOException {
return AMRMClientUtils.createRMProxy(config, protocol, user,
token);
} | 3.26 |
hadoop_UnmanagedApplicationManager_getAMRMClientRelayer_rdh | /**
* Returns the rmProxy relayer of this UAM.
*
* @return rmProxy relayer of the UAM
*/
public AMRMClientRelayer getAMRMClientRelayer() {
return this.rmProxyRelayer;
} | 3.26 |
hadoop_CacheStats_reserve_rdh | /**
* Try to reserve more bytes.
*
* @param count
* The number of bytes to add. We will round this up to the page
* size.
* @return The new number of usedBytes if we succeeded; -1 if we failed.
*/
long reserve(long count) {
return usedBytesCount.reserve(count);
} | 3.26 |
hadoop_CacheStats_getCacheCapacity_rdh | /**
* Get the maximum amount of bytes we can cache. This is a constant.
*/
public long getCacheCapacity() {
return maxBytes;
} | 3.26 |
hadoop_CacheStats_roundUpPageSize_rdh | /**
* Round up to the OS page size.
*/long roundUpPageSize(long count) {
return usedBytesCount.f0.roundUp(count);
} | 3.26 |
hadoop_CacheStats_getPageSize_rdh | /**
* Get the OS page size.
*
* @return the OS page size.
*/
long getPageSize() {
return usedBytesCount.f0.osPageSize;
} | 3.26 |
hadoop_CacheStats_roundUp_rdh | /**
* Round up a number to the operating system page size.
*/
public long roundUp(long count) {
return ((count + osPageSize) - 1) & (~(osPageSize - 1));
} | 3.26 |
hadoop_CacheStats_release_rdh | /**
* Release some bytes that we're using.
*
* @param count
* The number of bytes to release. We will round this up to the
* page size.
* @return The new number of usedBytes.
*/
long release(long count) {
return usedBytesCount.release(count);
} | 3.26 |
hadoop_CacheStats_releaseRoundDown_rdh | /**
* Release some bytes that we're using rounded down to the page size.
*
* @param count
* The number of bytes to release. We will round this down to the
* page size.
* @return The new number of usedBytes.
*/
long releaseRoundDown(long count) {
return usedBytesCount.releaseRoundDown(count);
} | 3.26 |
hadoop_CacheStats_roundDown_rdh | /**
* Round down a number to the operating system page size.
*/
public long roundDown(long count) {
return count & (~(osPageSize - 1));
} | 3.26 |
hadoop_CacheStats_getCacheUsed_rdh | // Stats related methods for FSDatasetMBean
/**
* Get the approximate amount of cache space used.
*/
public long getCacheUsed() {return usedBytesCount.get();
} | 3.26 |
hadoop_HsController_index_rdh | /* (non-Javadoc)
@see org.apache.hadoop.mapreduce.v2.app.webapp.AppController#index()
*/
@Override
public void index() {
setTitle("JobHistory");} | 3.26 |
hadoop_HsController_logs_rdh | /**
* Render the logs page.
*/public void logs() {
String logEntity = $(ENTITY_STRING);
JobID jid = null;
try {
jid = JobID.forName(logEntity);
set(JOB_ID, logEntity);
requireJob();
} catch (Exception e) {
// fall below
}
if (jid == null) {
try {
TaskAttemptID taskAttemptId = TaskAttemptID.forName(logEntity);
set(TASK_ID, taskAttemptId.getTaskID().toString());set(JOB_ID, taskAttemptId.getJobID().toString());
requireTask();
requireJob();
} catch (Exception e) {
// fall below
}
}
render(HsLogsPage.class); } | 3.26 |
hadoop_HsController_taskPage_rdh | /* (non-Javadoc)
@see org.apache.hadoop.mapreduce.v2.app.webapp.AppController#taskPage()
*/
@Override
protected Class<? extends View> taskPage() {
return HsTaskPage.class;
} | 3.26 |
hadoop_HsController_nmlogs_rdh | /**
* Render the nm logs page.
*/
public void nmlogs() {
render(AggregatedLogsPage.class);
} | 3.26 |
hadoop_HsController_singleTaskCounter_rdh | /* (non-Javadoc)
@see org.apache.hadoop.mapreduce.v2.app.webapp.AppController#singleTaskCounter()
*/
@Override
public void singleTaskCounter() throws IOException {
super.singleTaskCounter();
} | 3.26 |
hadoop_HsController_singleJobCounter_rdh | /* (non-Javadoc)
@see org.apache.hadoop.mapreduce.v2.app.webapp.AppController#singleJobCounter()
*/
@Override
public void singleJobCounter() throws IOException {
super.singleJobCounter();
} | 3.26 |
hadoop_HsController_countersPage_rdh | /* (non-Javadoc)
@see org.apache.hadoop.mapreduce.v2.app.webapp.AppController#countersPage()
*/
@Override
public Class<?
extends View> countersPage()
{
return HsCountersPage.class;
} | 3.26 |
hadoop_HsController_confPage_rdh | /**
*
* @return the page that will be used to render the /conf page
*/ @Override
protected Class<? extends View> confPage() {
return HsConfPage.class;
} | 3.26 |
hadoop_HsController_task_rdh | /* (non-Javadoc)
@see org.apache.hadoop.mapreduce.v2.app.webapp.AppController#task()
*/
@Override
public void task() {
super.task();
} | 3.26 |
hadoop_HsController_tasks_rdh | /* (non-Javadoc)
@see org.apache.hadoop.mapreduce.v2.app.webapp.AppController#tasks()
*/
@Overridepublic void tasks() {
super.tasks();
} | 3.26 |
hadoop_HsController_singleCounterPage_rdh | /* (non-Javadoc)
@see org.apache.hadoop.mapreduce.v2.app.webapp.AppController#singleCounterPage()
*/
@Override
protected Class<? extends View> singleCounterPage() {
return HsSingleCounterPage.class;
} | 3.26 |
hadoop_HsController_jobCounters_rdh | /* (non-Javadoc)
@see org.apache.hadoop.mapreduce.v2.app.webapp.AppController#jobCounters()
*/
@Override
public void jobCounters() {
super.jobCounters();
} | 3.26 |
hadoop_HsController_aboutPage_rdh | /**
*
* @return the page about the current server.
*/
protected Class<? extends View> aboutPage() {
return HsAboutPage.class;
} | 3.26 |
hadoop_HsController_attempts_rdh | /* (non-Javadoc)
@see org.apache.hadoop.mapreduce.v2.app.webapp.AppController#attempts()
*/
@Override
public void attempts() {
super.attempts();
} | 3.26 |
hadoop_HsController_about_rdh | /**
* Render a page about the current server.
*/
public void about() {
render(aboutPage());
} | 3.26 |
hadoop_HsController_attemptsPage_rdh | /* (non-Javadoc)
@see org.apache.hadoop.mapreduce.v2.app.webapp.AppController#attemptsPage()
*/
@Override
protected Class<? extends View> attemptsPage() {
return HsAttemptsPage.class;
} | 3.26 |
hadoop_HsController_job_rdh | // Need all of these methods here also as Guice doesn't look into parent
// classes.
/* (non-Javadoc)
@see org.apache.hadoop.mapreduce.v2.app.webapp.AppController#job()
*/
@Override
public void job() {
super.job();
} | 3.26 |
hadoop_HsController_tasksPage_rdh | /* (non-Javadoc)
@see org.apache.hadoop.mapreduce.v2.app.webapp.AppController#tasksPage()
*/
@Override
protected Class<? extends View> tasksPage() {
return HsTasksPage.class;
} | 3.26 |
hadoop_HsController_jobPage_rdh | /* (non-Javadoc)
@see org.apache.hadoop.mapreduce.v2.app.webapp.AppController#jobPage()
*/
@Override
protected Class<? extends View> jobPage() {
return HsJobPage.class;
} | 3.26 |
hadoop_DockerKillCommand_setSignal_rdh | /**
* Set the signal for the {@link DockerKillCommand}.
*
* @param signal
* the signal to send to the container.
* @return the {@link DockerKillCommand} with the signal set.
*/
public DockerKillCommand setSignal(String
signal) {
super.addCommandArguments("signal", signal);
return this;
} | 3.26 |
hadoop_AbstractRESTRequestInterceptor_setConf_rdh | /**
* Sets the {@link Configuration}.
*/
@Override
public void setConf(Configuration conf) {
this.conf = conf;
if (this.nextInterceptor != null) { this.nextInterceptor.setConf(conf);
}
} | 3.26 |
hadoop_AbstractRESTRequestInterceptor_getConf_rdh | /**
* Gets the {@link Configuration}.
*/
@Override
public Configuration getConf() {
return this.conf;
}
/**
* Initializes the {@link RESTRequestInterceptor} | 3.26 |
hadoop_AbstractRESTRequestInterceptor_shutdown_rdh | /**
* Disposes the {@link RESTRequestInterceptor}.
*/
@Override
public void shutdown() {
if (this.nextInterceptor != null) {
this.nextInterceptor.shutdown();
}
} | 3.26 |
hadoop_AbstractRESTRequestInterceptor_getNextInterceptor_rdh | /**
* Gets the next {@link RESTRequestInterceptor} in the chain.
*/
@Override
public RESTRequestInterceptor getNextInterceptor() {
return this.nextInterceptor;
} | 3.26 |
hadoop_AbstractRESTRequestInterceptor_setNextInterceptor_rdh | /**
* Sets the {@link RESTRequestInterceptor} in the chain.
*/
@Override
public void setNextInterceptor(RESTRequestInterceptor nextInterceptor) {
this.nextInterceptor = nextInterceptor;
} | 3.26 |
hadoop_FlowRunCoprocessor_getCellTimestamp_rdh | /**
* Determines if the current cell's timestamp is to be used or a new unique
* cell timestamp is to be used. The reason this is done is to inadvertently
* overwrite cells when writes come in very fast. But for metric cells, the
* cell timestamp signifies the metric timestamp. Hence we don't want to
* overwrite it.
*
* @param timestamp
* @param tags
* @return cell timestamp
*/private long getCellTimestamp(long timestamp, List<Tag> tags) {
// if ts not set (hbase sets to HConstants.LATEST_TIMESTAMP by default)
// then use the generator
if (timestamp == HConstants.LATEST_TIMESTAMP) {
return timestampGenerator.getUniqueTimestamp();} else {
return timestamp;
}
}
/* (non-Javadoc)
Creates a {@link FlowScanner} Scan so that it can correctly process the
contents of {@link FlowRunTable} | 3.26 |
hadoop_CMgrUpdateContainersEvent_getContainersToUpdate_rdh | /**
* Get containers to update.
*
* @return List of containers to update.
*/
public List<Container> getContainersToUpdate() {
return this.containersToUpdate;
} | 3.26 |
hadoop_GroupsService_getGroups_rdh | /**
*
* @deprecated use {@link #getGroupsSet(String user)}
*/
@Deprecated
@Override
public List<String> getGroups(String user) throws IOException {
return hGroups.getGroups(user);} | 3.26 |
hadoop_XMLParserMapper_map_rdh | /**
* Read the input XML file line by line, and generate list of blocks. The
* actual parsing logic is handled by {@link XMLParser}. This mapper just
* delegates to that class and then writes the blocks to the corresponding
* index to be processed by reducers.
*/
@Override
public void map(LongWritable lineNum, Text line, Mapper<LongWritable, Text, IntWritable, BlockInfo>.Context context) throws IOException, InterruptedException {
List<BlockInfo> blockInfos = parser.parseLine(line.toString());
for (BlockInfo blockInfo : blockInfos) {
for (short i = 0; i < blockInfo.getReplication(); i++) {
context.write(new IntWritable((blockIndex + i) % numDataNodes), blockInfo);
}
blockIndex++;
if ((blockIndex % 1000000) == 0) {
LOG.info(("Processed " + blockIndex)
+ " blocks");
}
}
} | 3.26 |
hadoop_FutureIO_awaitFuture_rdh | /**
* Given a future, evaluate it.
* <p>
* Any exception generated in the future is
* extracted and rethrown.
* </p>
*
* @param future
* future to evaluate
* @param timeout
* timeout to wait
* @param unit
* time unit.
* @param <T>
* type of the result.
* @return the result, if all went well.
* @throws InterruptedIOException
* future was interrupted
* @throws IOException
* if something went wrong
* @throws RuntimeException
* any nested RTE thrown
* @throws TimeoutException
* the future timed out.
*/
public static <T>
T awaitFuture(final Future<T> future, final long timeout, final TimeUnit unit) throws
InterruptedIOException, IOException,
RuntimeException, TimeoutException {
try {
return future.get(timeout, unit);
} catch (InterruptedException e) {
throw ((InterruptedIOException) (new InterruptedIOException(e.toString()).initCause(e)));
} catch (ExecutionException e) {
return raiseInnerCause(e);
}
} | 3.26 |
hadoop_FutureIO_eval_rdh | /**
* Evaluate a CallableRaisingIOE in the current thread,
* converting IOEs to RTEs and propagating.
*
* @param callable
* callable to invoke
* @param <T>
* Return type.
* @return the evaluated result.
* @throws UnsupportedOperationException
* fail fast if unsupported
* @throws IllegalArgumentException
* invalid argument
*/
public static <T> CompletableFuture<T> eval(CallableRaisingIOE<T> callable) {
CompletableFuture<T> result = new CompletableFuture<>();
try {
result.complete(callable.apply());
} catch (UnsupportedOperationException | IllegalArgumentException tx)
{
// fail fast here
throw tx;
} catch (Throwable tx) {
// fail lazily here to ensure callers expect all File IO operations to
// surface later
result.completeExceptionally(tx);
}
return result;
} | 3.26 |
hadoop_FutureIO_unwrapInnerException_rdh | /**
* From the inner cause of an execution exception, extract the inner cause
* to an IOException, raising RuntimeExceptions and Errors immediately.
* <ol>
* <li> If it is an IOE: Return.</li>
* <li> If it is a {@link UncheckedIOException}: return the cause</li>
* <li> Completion/Execution Exceptions: extract and repeat</li>
* <li> If it is an RTE or Error: throw.</li>
* <li> Any other type: wrap in an IOE</li>
* </ol>
*
* Recursively handles wrapped Execution and Completion Exceptions in
* case something very complicated has happened.
*
* @param e
* exception.
* @return an IOException extracted or built from the cause.
* @throws RuntimeException
* if that is the inner cause.
* @throws Error
* if that is the inner cause.
*/
@SuppressWarnings("ChainOfInstanceofChecks")
public static IOException unwrapInnerException(final Throwable e) {
Throwable cause = e.getCause();
if (cause instanceof IOException) {
return ((IOException) (cause));
} else if (cause instanceof UncheckedIOException) {
// this is always an IOException
return ((UncheckedIOException) (cause)).getCause(); } else if (cause instanceof CompletionException) {
return unwrapInnerException(cause);
} else if (cause instanceof ExecutionException) {
return unwrapInnerException(cause);
} else if (cause instanceof RuntimeException) {
throw ((RuntimeException) (cause));
} else if (cause instanceof Error) {
throw ((Error) (cause));
} else if (cause != null) {
// other type: wrap with a new IOE
return new IOException(cause);
} else {
// this only happens if there was no cause.
return new IOException(e);
}
} | 3.26 |
hadoop_FutureIO_propagateOptions_rdh | /**
* Propagate options to any builder, converting everything with the
* prefix to an option where, if there were 2+ dot-separated elements,
* it is converted to a schema.
* <pre>
* fs.example.s3a.option becomes "s3a.option"
* fs.example.fs.io.policy becomes "fs.io.policy"
* fs.example.something becomes "something"
* </pre>
*
* @param builder
* builder to modify
* @param conf
* configuration to read
* @param prefix
* prefix to scan/strip
* @param mandatory
* are the options to be mandatory or optional?
*/
public static void propagateOptions(final FSBuilder<?, ?> builder, final Configuration conf, final String prefix, final boolean mandatory) {
final String p = (prefix.endsWith(".")) ? prefix : prefix + ".";
final Map<String, String> propsWithPrefix = conf.getPropsWithPrefix(p);
for (Map.Entry<String, String> entry : propsWithPrefix.entrySet()) {
// change the schema off each entry
String key = entry.getKey();
String val = entry.getValue();
if (mandatory) {
builder.must(key, val);
} else {
builder.opt(key, val);
}
}
} | 3.26 |
hadoop_FutureIO_raiseInnerCause_rdh | /**
* Extract the cause of a completion failure and rethrow it if an IOE
* or RTE.
*
* @param e
* exception.
* @param <T>
* type of return value.
* @return nothing, ever.
* @throws IOException
* either the inner IOException, or a wrapper around
* any non-Runtime-Exception
* @throws RuntimeException
* if that is the inner cause.
*/
public static <T> T raiseInnerCause(final CompletionException e) throws
IOException {
throw unwrapInnerException(e);
} | 3.26 |
hadoop_IOStatisticsSupport_retrieveIOStatistics_rdh | /**
* Get the IOStatistics of the source, casting it
* if it is of the relevant type, otherwise,
* if it implements {@link IOStatisticsSource}
* extracting the value.
*
* Returns null if the source isn't of the write type
* or the return value of
* {@link IOStatisticsSource#getIOStatistics()} was null.
*
* @param source
* source.
* @return an IOStatistics instance or null
*/
public static IOStatistics retrieveIOStatistics(final Object source) {
if (source instanceof IOStatistics) {
return ((IOStatistics) (source));
} else if (source instanceof IOStatisticsSource) {
return ((IOStatisticsSource) (source)).getIOStatistics();
} else {
// null source or interface not implemented
return null;
}
} | 3.26 |
hadoop_IOStatisticsSupport_snapshotIOStatistics_rdh | /**
* Create a snapshot statistics instance ready to aggregate data.
*
* The instance can be serialized, and its
* {@code toString()} method lists all the values.
*
* @return an empty snapshot
*/
public static IOStatisticsSnapshot
snapshotIOStatistics() {
return new IOStatisticsSnapshot();
} | 3.26 |
hadoop_IOStatisticsSupport_m0_rdh | /**
* Get a stub duration tracker.
*
* @return a stub tracker.
*/
public static DurationTracker m0() {
return StubDurationTracker.STUB_DURATION_TRACKER;
} | 3.26 |
hadoop_IOStatisticsSupport_stubDurationTrackerFactory_rdh | /**
* Return a stub duration tracker factory whose returned trackers
* are always no-ops.
*
* As singletons are returned, this is very low-cost to use.
*
* @return a duration tracker factory.
*/
public static DurationTrackerFactory stubDurationTrackerFactory() {
return StubDurationTrackerFactory.STUB_DURATION_TRACKER_FACTORY;
} | 3.26 |
hadoop_AppPlacementAllocator_getPreferredNodeIterator_rdh | /**
* Get iterator of preferred node depends on requirement and/or availability.
*
* @param candidateNodeSet
* input CandidateNodeSet
* @return iterator of preferred node
*/
public Iterator<N> getPreferredNodeIterator(CandidateNodeSet<N> candidateNodeSet) {
// Now only handle the case that single node in the candidateNodeSet
// TODO, Add support to multi-hosts inside candidateNodeSet which is passed
// in.
N singleNode = CandidateNodeSetUtils.getSingleNode(candidateNodeSet);
if (singleNode != null) {
return IteratorUtils.singletonIterator(singleNode);
}
// singleNode will be null if Multi-node placement lookup is enabled, and
// hence could consider sorting policies.
return multiNodeSortingManager.getMultiNodeSortIterator(candidateNodeSet.getAllNodes().values(), candidateNodeSet.getPartition(), multiNodeSortPolicyName);
} | 3.26 |
hadoop_AppPlacementAllocator_initialize_rdh | /**
* Initialize this allocator, this will be called by Factory automatically.
*
* @param appSchedulingInfo
* appSchedulingInfo
* @param schedulerRequestKey
* schedulerRequestKey
* @param rmContext
* rmContext
*/
public void initialize(AppSchedulingInfo appSchedulingInfo, SchedulerRequestKey schedulerRequestKey, RMContext rmContext) {
this.appSchedulingInfo = appSchedulingInfo;
this.rmContext = rmContext;
this.schedulerRequestKey = schedulerRequestKey;
multiNodeSortPolicyName = appSchedulingInfo.getApplicationSchedulingEnvs().get(ApplicationSchedulingConfig.ENV_MULTI_NODE_SORTING_POLICY_CLASS);
multiNodeSortingManager = ((MultiNodeSortingManager<N>) (rmContext.getMultiNodeSortingManager()));
if (LOG.isDebugEnabled()) {
LOG.debug((("nodeLookupPolicy used for " + appSchedulingInfo.getApplicationId()) + " is ") + (multiNodeSortPolicyName != null ? multiNodeSortPolicyName : ""));
}
} | 3.26 |
hadoop_WrappedFailoverProxyProvider_useLogicalURI_rdh | /**
* Assume logical URI is used for old proxy provider implementations.
*/
@Override
public boolean useLogicalURI() {
return true;
} | 3.26 |
hadoop_WrappedFailoverProxyProvider_close_rdh | /**
* Close the proxy,
*/
@Overridepublic synchronized void close() throws IOException {
proxyProvider.close();
} | 3.26 |
hadoop_EncryptionSecrets_getEncryptionMethod_rdh | /**
* Get the encryption method.
*
* @return the encryption method
*/
public S3AEncryptionMethods getEncryptionMethod() {
return encryptionMethod;
} | 3.26 |
hadoop_EncryptionSecrets_m0_rdh | /**
* Init all state, including after any read.
*
* @throws IOException
* error rebuilding state.
*/
private void m0() throws IOException {encryptionMethod = S3AEncryptionMethods.getMethod(encryptionAlgorithm);
} | 3.26 |
hadoop_EncryptionSecrets_toString_rdh | /**
* String function returns the encryption mode but not any other
* secrets.
*
* @return a string safe for logging.
*/
@Override
public String toString() {
return S3AEncryptionMethods.NONE.equals(encryptionMethod) ? "(no encryption)" : encryptionMethod.getMethod();} | 3.26 |
hadoop_EncryptionSecrets_hasEncryptionAlgorithm_rdh | /**
* Does this instance have encryption options?
* That is: is the algorithm non-null.
*
* @return true if there's an encryption algorithm.
*/
public boolean hasEncryptionAlgorithm() {
return StringUtils.isNotEmpty(encryptionAlgorithm);
} | 3.26 |
hadoop_EncryptionSecrets_hasEncryptionKey_rdh | /**
* Does this instance have an encryption key?
*
* @return true if there's an encryption key.
*/
public boolean hasEncryptionKey() {
return StringUtils.isNotEmpty(encryptionKey);
} | 3.26 |
hadoop_EncryptionSecrets_write_rdh | /**
* Write out the encryption secrets.
*
* @param out
* {@code DataOutput} to serialize this object into.
* @throws IOException
* IO failure
*/
@Override
public void write(final DataOutput out) throws IOException {
new LongWritable(serialVersionUID).write(out);
Text.writeString(out, encryptionAlgorithm);
Text.writeString(out, encryptionKey);
} | 3.26 |
hadoop_EncryptionSecrets_readFields_rdh | /**
* Read in from the writable stream.
* After reading, call {@link #init()}.
*
* @param in
* {@code DataInput} to deserialize this object from.
* @throws IOException
* failure to read/validate data.
*/
@Override
public void readFields(final DataInput in) throws IOException {
final LongWritable version = new LongWritable(); version.readFields(in);
if (version.get() != serialVersionUID) {
throw new DelegationTokenIOException("Incompatible EncryptionSecrets version");
}
encryptionAlgorithm = Text.readString(in, MAX_SECRET_LENGTH);
encryptionKey = Text.readString(in, MAX_SECRET_LENGTH);
m0();
} | 3.26 |
hadoop_EncryptionSecrets_readObject_rdh | /**
* For java serialization: read and then call {@link #init()}.
*
* @param in
* input
* @throws IOException
* IO problem
* @throws ClassNotFoundException
* problem loading inner class.
*/
private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException {
in.defaultReadObject();
m0();
} | 3.26 |
hadoop_S3APrefetchingInputStream_available_rdh | /**
* Returns the number of bytes available for reading without blocking.
*
* @return the number of bytes available for reading without blocking.
* @throws IOException
* if there is an IO error during this operation.
*/
@Override
public synchronized int available() throws
IOException {
throwIfClosed();
return f0.available();
} | 3.26 |
hadoop_S3APrefetchingInputStream_getS3AStreamStatistics_rdh | /**
* Access the input stream statistics.
* This is for internal testing and may be removed without warning.
*
* @return the statistics for this input stream
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
@VisibleForTesting
public S3AInputStreamStatistics getS3AStreamStatistics() {
if (!isClosed())
{ inputStreamStatistics = f0.getS3AStreamStatistics();
}
return inputStreamStatistics;
} | 3.26 |
hadoop_S3APrefetchingInputStream_setReadahead_rdh | /**
* Sets the number of bytes to read ahead each time.
*
* @param readahead
* the number of bytes to read ahead each time..
*/
@Override
public synchronized void setReadahead(Long readahead) {
if (!isClosed()) {
f0.setReadahead(readahead);
}
} | 3.26 |
hadoop_S3APrefetchingInputStream_close_rdh | /**
* Closes this stream and releases all acquired resources.
*
* @throws IOException
* if there is an IO error during this operation.
*/
@Override
public synchronized void close() throws IOException {
if (f0 != null) {
f0.close();f0 = null;
super.close();
}} | 3.26 |
hadoop_S3APrefetchingInputStream_read_rdh | /**
* Reads up to {@code len} bytes from this stream and copies them into
* the given {@code buffer} starting at the given {@code offset}.
* Returns the number of bytes actually copied in to the given buffer.
*
* @param buffer
* the buffer to copy data into.
* @param offset
* data is copied starting at this offset.
* @param len
* max number of bytes to copy.
* @return the number of bytes actually copied in to the given buffer.
* @throws IOException
* if there is an IO error during this operation.
*/@Override
public synchronized int read(byte[] buffer, int offset, int len) throws IOException {
throwIfClosed();
return f0.read(buffer, offset, len);
} | 3.26 |
hadoop_S3APrefetchingInputStream_getIOStatistics_rdh | /**
* Gets the internal IO statistics.
*
* @return the internal IO statistics.
*/
@Override
public IOStatistics getIOStatistics() {
if (!isClosed()) {
ioStatistics = f0.getIOStatistics();
}
return ioStatistics;
} | 3.26 |
hadoop_S3APrefetchingInputStream_seek_rdh | /**
* Updates internal data such that the next read will take place at the given {@code pos}.
*
* @param pos
* new read position.
* @throws IOException
* if there is an IO error during this operation.
*/
@Override
public synchronized void seek(long pos) throws IOException {
throwIfClosed();
f0.seek(pos);
} | 3.26 |
hadoop_S3APrefetchingInputStream_hasCapability_rdh | /**
* Indicates whether the given {@code capability} is supported by this stream.
*
* @param capability
* the capability to check.
* @return true if the given {@code capability} is supported by this stream, false otherwise.
*/
@Override
public boolean hasCapability(String capability) {
if (!isClosed()) {
return f0.hasCapability(capability);
}
return false;
} | 3.26 |
hadoop_S3APrefetchingInputStream_getPos_rdh | /**
* Gets the current position. If the underlying S3 input stream is closed,
* it returns last read current position from the underlying steam. If the
* current position was never read and the underlying input stream is closed,
* this would return 0.
*
* @return the current position.
* @throws IOException
* if there is an IO error during this operation.
*/
@Override
public synchronized long getPos() throws IOException {
if (!isClosed()) {
lastReadCurrentPos = f0.getPos();
}
return lastReadCurrentPos;
} | 3.26 |
hadoop_NodeType_getIndex_rdh | /**
*
* @return the index of the node type
*/
public int getIndex() {
return index;} | 3.26 |
hadoop_FSEditLogAsync_logEdit_rdh | // return whether edit log wants to sync.
boolean logEdit() {
return log.doEditTransaction(op);} | 3.26 |
hadoop_FSEditLogAsync_tryRelease_rdh | // while draining, count the releases until release(int)
private void tryRelease(int permits) {
pendingReleases.getAndAdd(permits);
if (!draining.get()) {
super.release(pendingReleases.getAndSet(0));
}
} | 3.26 |
hadoop_AbfsInputStream_getBytesFromRemoteRead_rdh | /**
* Getter for bytes read remotely from the data store.
*
* @return value of the counter in long.
*/@VisibleForTesting
public long getBytesFromRemoteRead() {
return bytesFromRemoteRead;
} | 3.26 |
hadoop_AbfsInputStream_markSupported_rdh | /**
* gets whether mark and reset are supported by {@code ADLFileInputStream}. Always returns false.
*
* @return always {@code false}
*/
@Override
public boolean markSupported() {
return false;
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.