name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hadoop_AddMountTableEntriesResponse_newInstance_rdh | /**
* API response for adding multiple mount table entries to the state store.
*/public abstract class AddMountTableEntriesResponse {
public static AddMountTableEntriesResponse newInstance() throws IOException {
return StateStoreSerializer.newRecord(AddMountTableEntriesResponse.class);
} | 3.26 |
hadoop_HostnameFilter_doFilter_rdh | /**
* Resolves the requester hostname and delegates the request to the chain.
* <p>
* The requester hostname is available via the {@link #get} method.
*
* @param request
* servlet request.
* @param response
* servlet response.
* @param chain
* filter chain.
* @throws IOException
* thrown if an IO error occurs.
* @throws ServletException
* thrown if a servlet error occurs.
*/
@Override
public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain) throws IOException, ServletException {
try {
String hostname;
try {
String address = request.getRemoteAddr();
if (address != null) {
hostname = InetAddress.getByName(address).getCanonicalHostName();
} else {log.warn("Request remote address is NULL");
hostname = "???";
}
} catch (UnknownHostException ex) {
log.warn("Request remote address could not be resolved, {0}", ex.toString(),
ex);
hostname = "???";
}
HOSTNAME_TL.set(hostname);
chain.doFilter(request, response);
} finally {
HOSTNAME_TL.remove();
}
} | 3.26 |
hadoop_HostnameFilter_get_rdh | /**
* Returns the requester hostname.
*
* @return the requester hostname.
*/
public static String get() {
return HOSTNAME_TL.get();
} | 3.26 |
hadoop_HostnameFilter_init_rdh | /**
* Initializes the filter.
* <p>
* This implementation is a NOP.
*
* @param config
* filter configuration.
* @throws ServletException
* thrown if the filter could not be initialized.
*/
@Override
public void init(FilterConfig config) throws ServletException {
} | 3.26 |
hadoop_FederationStateStoreFacade_addReservationHomeSubCluster_rdh | /**
* Save Reservation And HomeSubCluster Mapping.
*
* @param reservationId
* reservationId
* @param homeSubCluster
* homeSubCluster
* @throws YarnException
* on failure
*/
public void addReservationHomeSubCluster(ReservationId reservationId, ReservationHomeSubCluster homeSubCluster) throws YarnException {
try {// persist the mapping of reservationId and the subClusterId which has
// been selected as its home
addReservationHomeSubCluster(homeSubCluster);
} catch (YarnException e) {
String msg = String.format("Unable to insert the ReservationId %s into the FederationStateStore.", reservationId);
throw new YarnException(msg,
e);
}
} | 3.26 |
hadoop_FederationStateStoreFacade_getRetryNumbers_rdh | /**
* Get the number of retries.
*
* @param configRetries
* User-configured number of retries.
* @return number of retries.
* @throws YarnException
* yarn exception.
*/
public int getRetryNumbers(int configRetries) throws YarnException
{
int activeSubClustersCount = getActiveSubClustersCount();
int actualRetryNums = Math.min(activeSubClustersCount, configRetries);
// Normally, we don't set a negative number for the number of retries,
// but if the user sets a negative number for the number of retries,
// we will return 0
if (actualRetryNums < 0) {
return 0;
}
return actualRetryNums;
} | 3.26 |
hadoop_FederationStateStoreFacade_existsReservationHomeSubCluster_rdh | /**
* Exists ReservationHomeSubCluster Mapping.
*
* @param reservationId
* reservationId
* @return true - exist, false - not exist
*/
public boolean existsReservationHomeSubCluster(ReservationId reservationId) {
try {
SubClusterId subClusterId = getReservationHomeSubCluster(reservationId);
if (subClusterId != null) {
return true;
}} catch (YarnException e) {
LOG.debug("get homeSubCluster by reservationId = {} error.", reservationId, e);
}
return false;
} | 3.26 |
hadoop_FederationStateStoreFacade_reinitialize_rdh | /**
* Delete and re-initialize the cache, to force it to use the given
* configuration.
*
* @param store
* the {@link FederationStateStore} instance to reinitialize with
* @param config
* the updated configuration to reinitialize with
*/
@VisibleForTesting
public synchronized void reinitialize(FederationStateStore store, Configuration config) {
this.conf = config;
this.stateStore = store;
federationCache.clearCache();
federationCache.initCache(config, stateStore);
} | 3.26 |
hadoop_FederationStateStoreFacade_m2_rdh | /**
* The Router Supports Remove MasterKey (RouterMasterKey{@link RouterMasterKey}).
*
* @param newKey
* Key used for generating and verifying delegation tokens
* @throws YarnException
* if the call to the state store is unsuccessful
* @throws IOException
* An IO Error occurred
*/
public void m2(DelegationKey newKey) throws YarnException, IOException {
LOG.info("Removing master key with keyID {}.", newKey.getKeyId());
ByteBuffer keyBytes = ByteBuffer.wrap(newKey.getEncodedKey());
RouterMasterKey masterKey = RouterMasterKey.newInstance(newKey.getKeyId(), keyBytes, newKey.getExpiryDate());
RouterMasterKeyRequest keyRequest = RouterMasterKeyRequest.newInstance(masterKey);
stateStore.removeStoredMasterKey(keyRequest);
} | 3.26 |
hadoop_FederationStateStoreFacade_createRetryInstance_rdh | /**
* Helper method to create instances of Object using the class name defined in
* the configuration object. The instances creates {@link RetryProxy} using
* the specific {@link RetryPolicy}.
*
* @param conf
* the yarn configuration
* @param configuredClassName
* the configuration provider key
* @param defaultValue
* the default implementation for fallback
* @param type
* the class for which a retry proxy is required
* @param retryPolicy
* the policy for retrying method call failures
* @param <T>
* The type of the instance.
* @return a retry proxy for the specified interface
*/
public static <T> Object createRetryInstance(Configuration conf, String configuredClassName, String defaultValue, Class<T> type, RetryPolicy retryPolicy) {
return RetryProxy.create(type, createInstance(conf, configuredClassName, defaultValue, type), retryPolicy);
} | 3.26 |
hadoop_FederationStateStoreFacade_getRandomActiveSubCluster_rdh | /**
* Randomly pick ActiveSubCluster.
* During the selection process, we will exclude SubClusters from the blacklist.
*
* @param activeSubClusters
* List of active subClusters.
* @param blackList
* blacklist.
* @return Active SubClusterId.
* @throws YarnException
* When there is no Active SubCluster,
* an exception will be thrown (No active SubCluster available to submit the request.)
*/
public static SubClusterId getRandomActiveSubCluster(Map<SubClusterId, SubClusterInfo> activeSubClusters, List<SubClusterId> blackList) throws YarnException {
// Check if activeSubClusters is empty, if it is empty, we need to throw an exception
if (MapUtils.isEmpty(activeSubClusters)) {
throw new FederationPolicyException(FederationPolicyUtils.NO_ACTIVE_SUBCLUSTER_AVAILABLE);
}
// Change activeSubClusters to List
List<SubClusterId> subClusterIds = new ArrayList<>(activeSubClusters.keySet());
// If the blacklist is not empty, we need to remove all the subClusters in the blacklist
if (CollectionUtils.isNotEmpty(blackList)) {
subClusterIds.removeAll(blackList);
}
// Check there are still active subcluster after removing the blacklist
if (CollectionUtils.isEmpty(subClusterIds)) {
throw new FederationPolicyException(FederationPolicyUtils.NO_ACTIVE_SUBCLUSTER_AVAILABLE);
}
// Randomly choose a SubCluster
return subClusterIds.get(rand.nextInt(subClusterIds.size()));
} | 3.26 |
hadoop_FederationStateStoreFacade_getSubClusterResolver_rdh | /**
* Get the singleton instance of SubClusterResolver.
*
* @return SubClusterResolver instance
*/
public SubClusterResolver getSubClusterResolver() {
return this.subclusterResolver;
} | 3.26 |
hadoop_FederationStateStoreFacade_getTokenByRouterStoreToken_rdh | /**
* The Router Supports GetTokenByRouterStoreToken{@link RMDelegationTokenIdentifier}.
*
* @param identifier
* delegation tokens from the RM
* @return RouterStoreToken
* @throws YarnException
* if the call to the state store is unsuccessful
* @throws IOException
* An IO Error occurred
*/
public RouterRMTokenResponse getTokenByRouterStoreToken(RMDelegationTokenIdentifier identifier) throws YarnException, IOException {
LOG.info("get RouterStoreToken token with sequence number: {}.", identifier.getSequenceNumber());
RouterStoreToken storeToken = RouterStoreToken.newInstance(identifier, 0L);
RouterRMTokenRequest v37 = RouterRMTokenRequest.newInstance(storeToken);
return
stateStore.getTokenByRouterStoreToken(v37);
} | 3.26 |
hadoop_FederationStateStoreFacade_updateApplicationHomeSubCluster_rdh | /**
* Updates the home {@link SubClusterId} for the specified
* {@link ApplicationId}.
*
* @param appHomeSubCluster
* the mapping of the application to it's home
* sub-cluster
* @throws YarnException
* if the call to the state store is unsuccessful
*/
public void updateApplicationHomeSubCluster(ApplicationHomeSubCluster appHomeSubCluster) throws YarnException {
stateStore.updateApplicationHomeSubCluster(UpdateApplicationHomeSubClusterRequest.newInstance(appHomeSubCluster));
} | 3.26 |
hadoop_FederationStateStoreFacade_setPolicyConfiguration_rdh | /**
* Set a policy configuration into the state store.
*
* @param policyConf
* the policy configuration to set
* @throws YarnException
* if the request is invalid/fails
*/
public void setPolicyConfiguration(SubClusterPolicyConfiguration policyConf) throws YarnException {
stateStore.setPolicyConfiguration(SetSubClusterPolicyConfigurationRequest.newInstance(policyConf));
} | 3.26 |
hadoop_FederationStateStoreFacade_removeStoredToken_rdh | /**
* The Router Supports Remove RMDelegationTokenIdentifier{@link RMDelegationTokenIdentifier}.
*
* @param identifier
* delegation tokens from the RM
* @throws YarnException
* if the call to the state store is unsuccessful
* @throws IOException
* An IO Error occurred
*/
public void removeStoredToken(RMDelegationTokenIdentifier identifier) throws YarnException, IOException {
LOG.info("removing RMDelegation token with sequence number: {}.", identifier.getSequenceNumber());
RouterStoreToken storeToken = RouterStoreToken.newInstance(identifier, 0L);
RouterRMTokenRequest request = RouterRMTokenRequest.newInstance(storeToken);
stateStore.removeStoredToken(request);
} | 3.26 |
hadoop_FederationStateStoreFacade_getInstance_rdh | /**
* Returns the singleton instance of the FederationStateStoreFacade object.
*
* @param conf
* configuration.
* @return the singleton {@link FederationStateStoreFacade} instance
*/
public static FederationStateStoreFacade getInstance(Configuration conf) {
return getInstanceInternal(conf);
} | 3.26 |
hadoop_FederationStateStoreFacade_m3_rdh | /**
* Update ApplicationHomeSubCluster to FederationStateStore.
*
* @param subClusterId
* homeSubClusterId
* @param applicationId
* applicationId.
* @param homeSubCluster
* homeSubCluster, homeSubCluster selected according to policy.
* @throws YarnException
* yarn exception.
*/
public void m3(SubClusterId subClusterId, ApplicationId applicationId, ApplicationHomeSubCluster homeSubCluster) throws YarnException {
try {updateApplicationHomeSubCluster(homeSubCluster);} catch (YarnException e) {
SubClusterId subClusterIdInStateStore = getApplicationHomeSubCluster(applicationId);
if (subClusterId == subClusterIdInStateStore) {
LOG.info("Application {} already submitted on SubCluster {}.", applicationId, subClusterId);
} else {
String msg = String.format("Unable to update the ApplicationId %s into the FederationStateStore.", applicationId);
throw new YarnException(msg, e);
}
}
} | 3.26 |
hadoop_FederationStateStoreFacade_deleteReservationHomeSubCluster_rdh | /**
* Delete the home {@link SubClusterId} for the specified
* {@link ReservationId}.
*
* @param reservationId
* the identifier of the reservation
* @throws YarnException
* if the call to the state store is unsuccessful
*/
public void deleteReservationHomeSubCluster(ReservationId reservationId)
throws YarnException {
DeleteReservationHomeSubClusterRequest request = DeleteReservationHomeSubClusterRequest.newInstance(reservationId);
stateStore.deleteReservationHomeSubCluster(request);
} | 3.26 |
hadoop_FederationStateStoreFacade_deregisterSubCluster_rdh | /**
* Deregister subCluster, Update the subCluster state to
* SC_LOST、SC_DECOMMISSIONED etc.
*
* @param subClusterId
* subClusterId.
* @param subClusterState
* The state of the subCluster to be updated.
* @throws YarnException
* yarn exception.
* @return If Deregister subCluster is successful, return true, otherwise, return false.
*/
public boolean deregisterSubCluster(SubClusterId subClusterId, SubClusterState subClusterState) throws YarnException {
SubClusterDeregisterRequest deregisterRequest =
SubClusterDeregisterRequest.newInstance(subClusterId, subClusterState);
SubClusterDeregisterResponse response = stateStore.deregisterSubCluster(deregisterRequest);
// If the response is not empty, deregisterSubCluster is successful.
if (response != null) {return true;
}
return false;
} | 3.26 |
hadoop_FederationStateStoreFacade_storeNewToken_rdh | /**
* The Router Supports Store RMDelegationTokenIdentifier{@link RMDelegationTokenIdentifier}.
*
* @param identifier
* delegation tokens from the RM.
* @param renewDate
* renewDate.
* @param tokenInfo
* tokenInfo.
* @throws YarnException
* if the call to the state store is unsuccessful.
* @throws IOException
* An IO Error occurred.
*/
public void storeNewToken(RMDelegationTokenIdentifier identifier, long renewDate, String tokenInfo) throws YarnException,
IOException {
LOG.info("storing RMDelegation token with sequence number: {}.", identifier.getSequenceNumber());
RouterStoreToken storeToken = RouterStoreToken.newInstance(identifier, renewDate, tokenInfo);
RouterRMTokenRequest request = RouterRMTokenRequest.newInstance(storeToken);
stateStore.storeNewToken(request);
} | 3.26 |
hadoop_FederationStateStoreFacade_getApplicationHomeSubCluster_rdh | /**
* Returns the home {@link SubClusterId} for the specified
* {@link ApplicationId}.
*
* @param appId
* the identifier of the application
* @return the home sub cluster identifier
* @throws YarnException
* if the call to the state store is unsuccessful
*/
public SubClusterId getApplicationHomeSubCluster(ApplicationId appId) throws YarnException {
try {
if (federationCache.isCachingEnabled()) {
return federationCache.getApplicationHomeSubCluster(appId);
} else {
GetApplicationHomeSubClusterResponse response = stateStore.getApplicationHomeSubCluster(GetApplicationHomeSubClusterRequest.newInstance(appId));
return response.getApplicationHomeSubCluster().getHomeSubCluster();
}
} catch
(Throwable ex) {
throw new YarnException(ex);
}} | 3.26 |
hadoop_FederationStateStoreFacade_incrementCurrentKeyId_rdh | /**
* stateStore provides CurrentKeyId increase.
*
* @return currentKeyId.
*/
public int incrementCurrentKeyId() {
return stateStore.incrementCurrentKeyId();
} | 3.26 |
hadoop_FederationStateStoreFacade_getDelegationTokenSeqNum_rdh | /**
* Get SeqNum from stateStore.
*
* @return delegationTokenSequenceNumber.
*/
public int getDelegationTokenSeqNum() {
return stateStore.getDelegationTokenSeqNum();
} | 3.26 |
hadoop_FederationStateStoreFacade_getActiveSubClustersCount_rdh | /**
* Get the number of active cluster nodes.
*
* @return number of active cluster nodes.
* @throws YarnException
* if the call to the state store is unsuccessful.
*/
public int getActiveSubClustersCount() throws YarnException {
Map<SubClusterId, SubClusterInfo> v38 = getSubClusters(true);
if ((v38 == null) || v38.isEmpty()) {
return 0;
} else {
return v38.size();
}
} | 3.26 |
hadoop_FederationStateStoreFacade_getApplicationsHomeSubCluster_rdh | /**
* Get the {@code ApplicationHomeSubCluster} list representing the mapping of
* all submitted applications to it's home sub-cluster.
*
* @return the mapping of all submitted application to it's home sub-cluster
* @throws YarnException
* if the request is invalid/fails
*/
public List<ApplicationHomeSubCluster> getApplicationsHomeSubCluster() throws YarnException {
GetApplicationsHomeSubClusterResponse response = stateStore.getApplicationsHomeSubCluster(GetApplicationsHomeSubClusterRequest.newInstance());return response.getAppsHomeSubClusters();
} | 3.26 |
hadoop_FederationStateStoreFacade_updateReservationHomeSubCluster_rdh | /**
* Update Reservation And HomeSubCluster Mapping.
*
* @param subClusterId
* subClusterId
* @param reservationId
* reservationId
* @param homeSubCluster
* homeSubCluster
* @throws YarnException
* on failure
*/
public void updateReservationHomeSubCluster(SubClusterId subClusterId, ReservationId reservationId, ReservationHomeSubCluster homeSubCluster) throws YarnException {
try {
// update the mapping of reservationId and the home subClusterId to
// the new subClusterId we have selected
updateReservationHomeSubCluster(homeSubCluster);
} catch (YarnException e) {
SubClusterId subClusterIdInStateStore = getReservationHomeSubCluster(reservationId);
if (subClusterId == subClusterIdInStateStore) {
LOG.info("Reservation {} already submitted on SubCluster {}.", reservationId, subClusterId);
} else {
String
msg = String.format("Unable to update the ReservationId %s into the FederationStateStore.", reservationId);throw new YarnException(msg, e);
}
}
} | 3.26 |
hadoop_FederationStateStoreFacade_m1_rdh | /**
* Get the configuration.
*
* @return configuration object
*/
public Configuration m1() {
return this.conf;
} | 3.26 |
hadoop_FederationStateStoreFacade_createRetryPolicy_rdh | /**
* Create a RetryPolicy for {@code FederationStateStoreFacade}. In case of
* failure, it retries for:
* <ul>
* <li>{@code FederationStateStoreRetriableException}</li>
* <li>{@code CacheLoaderException}</li>
* </ul>
*
* @param conf
* the updated configuration
* @return the RetryPolicy for FederationStateStoreFacade
*/
public static RetryPolicy createRetryPolicy(Configuration conf) {
// Retry settings for StateStore
RetryPolicy basePolicy = RetryPolicies.exponentialBackoffRetry(conf.getInt(YarnConfiguration.CLIENT_FAILOVER_RETRIES, Integer.SIZE),
conf.getLong(YarnConfiguration.CLIENT_FAILOVER_SLEEPTIME_BASE_MS, YarnConfiguration.DEFAULT_RESOURCEMANAGER_CONNECT_RETRY_INTERVAL_MS), TimeUnit.MILLISECONDS);
Map<Class<? extends Exception>, RetryPolicy> exceptionToPolicyMap = new HashMap<>();
exceptionToPolicyMap.put(FederationStateStoreRetriableException.class, basePolicy);
exceptionToPolicyMap.put(CacheLoaderException.class, basePolicy);
exceptionToPolicyMap.put(PoolInitializationException.class, basePolicy);
RetryPolicy retryPolicy = RetryPolicies.retryByException(RetryPolicies.TRY_ONCE_THEN_FAIL, exceptionToPolicyMap);
return retryPolicy;
} | 3.26 |
hadoop_FederationStateStoreFacade_getApplicationSubmissionContext_rdh | /**
* Get ApplicationSubmissionContext according to ApplicationId.
* We don't throw exceptions. If the application cannot be found, we return null.
*
* @param appId
* ApplicationId
* @return ApplicationSubmissionContext of ApplicationId
*/
public ApplicationSubmissionContext getApplicationSubmissionContext(ApplicationId appId) {
try {
GetApplicationHomeSubClusterResponse response = stateStore.getApplicationHomeSubCluster(GetApplicationHomeSubClusterRequest.newInstance(appId));
ApplicationHomeSubCluster v59 = response.getApplicationHomeSubCluster();
return v59.getApplicationSubmissionContext();} catch (Exception e) {
LOG.error("getApplicationSubmissionContext error, applicationId = {}.", appId, e);
return null;
}
} | 3.26 |
hadoop_FederationStateStoreFacade_getSubCluster_rdh | /**
* Updates the cache with the central {@link FederationStateStore} and returns
* the {@link SubClusterInfo} for the specified {@link SubClusterId}.
*
* @param subClusterId
* the identifier of the sub-cluster
* @param flushCache
* flag to indicate if the cache should be flushed or not
* @return the sub cluster information
* @throws YarnException
* if the call to the state store is unsuccessful
*/
public SubClusterInfo getSubCluster(final SubClusterId subClusterId, final boolean flushCache) throws YarnException {
if (flushCache && federationCache.isCachingEnabled()) {
LOG.info("Flushing subClusters from cache and rehydrating from store," + " most likely on account of RM failover.");
federationCache.removeSubCluster(false);
}
return getSubCluster(subClusterId);
} | 3.26 |
hadoop_FederationStateStoreFacade_getInstanceInternal_rdh | /**
* Returns the singleton instance of the FederationStateStoreFacade object.
*
* @param conf
* configuration.
* @return the singleton {@link FederationStateStoreFacade} instance
*/
private static FederationStateStoreFacade
getInstanceInternal(Configuration conf) {
if (facade != null) {
return facade;
}
m0(conf);
return facade;
} | 3.26 |
hadoop_FederationStateStoreFacade_storeNewMasterKey_rdh | /**
* The Router Supports Store NewMasterKey (RouterMasterKey{@link RouterMasterKey}).
*
* @param newKey
* Key used for generating and verifying delegation tokens
* @throws YarnException
* if the call to the state store is unsuccessful
* @throws IOException
* An IO Error occurred
* @return RouterMasterKeyResponse
*/
public RouterMasterKeyResponse storeNewMasterKey(DelegationKey newKey) throws YarnException, IOException {
LOG.info("Storing master key with keyID {}.", newKey.getKeyId());
ByteBuffer keyBytes = ByteBuffer.wrap(newKey.getEncodedKey());
RouterMasterKey masterKey = RouterMasterKey.newInstance(newKey.getKeyId(), keyBytes, newKey.getExpiryDate());
RouterMasterKeyRequest keyRequest = RouterMasterKeyRequest.newInstance(masterKey);
return stateStore.storeNewMasterKey(keyRequest);
} | 3.26 |
hadoop_FederationStateStoreFacade_getPoliciesConfigurations_rdh | /**
* Get the policies that is represented as
* {@link SubClusterPolicyConfiguration} for all currently active queues in
* the system.
*
* @return the policies for all currently active queues in the system
* @throws YarnException
* if the call to the state store is unsuccessful
*/
public Map<String, SubClusterPolicyConfiguration> getPoliciesConfigurations() throws YarnException {
try {
if (federationCache.isCachingEnabled()) {
return federationCache.getPoliciesConfigurations();
} else {
GetSubClusterPoliciesConfigurationsRequest request = GetSubClusterPoliciesConfigurationsRequest.newInstance();
return buildPolicyConfigMap(stateStore.getPoliciesConfigurations(request));
}
} catch (Throwable ex)
{
throw new YarnException(ex);
}
} | 3.26 |
hadoop_FederationStateStoreFacade_incrementDelegationTokenSeqNum_rdh | /**
* stateStore provides DelegationTokenSeqNum increase.
*
* @return delegationTokenSequenceNumber.
*/
public int
incrementDelegationTokenSeqNum() {
return stateStore.incrementDelegationTokenSeqNum();
} | 3.26 |
hadoop_FederationStateStoreFacade_getCurrentKeyId_rdh | /**
* Get CurrentKeyId from stateStore.
*
* @return currentKeyId.
*/
public int getCurrentKeyId() {
return stateStore.getCurrentKeyId();
} | 3.26 |
hadoop_FederationStateStoreFacade_addApplicationHomeSubCluster_rdh | /**
* Add ApplicationHomeSubCluster to FederationStateStore.
*
* @param applicationId
* applicationId.
* @param homeSubCluster
* homeSubCluster, homeSubCluster selected according to policy.
* @throws YarnException
* yarn exception.
*/
public void addApplicationHomeSubCluster(ApplicationId applicationId, ApplicationHomeSubCluster homeSubCluster) throws YarnException {
try {
addApplicationHomeSubCluster(homeSubCluster);
} catch (YarnException e) {
String msg = String.format("Unable to insert the ApplicationId %s into the FederationStateStore.", applicationId);
throw new YarnException(msg, e);}
} | 3.26 |
hadoop_FederationStateStoreFacade_existsApplicationHomeSubCluster_rdh | /**
* Query SubClusterId By applicationId.
*
* If SubClusterId is not empty, it means it exists and returns true;
* if SubClusterId is empty, it means it does not exist and returns false.
*
* @param applicationId
* applicationId
* @return true, SubClusterId exists; false, SubClusterId not exists.
*/
public boolean existsApplicationHomeSubCluster(ApplicationId applicationId) {
try {
SubClusterId subClusterId = getApplicationHomeSubCluster(applicationId);
if (subClusterId != null) {
return true;
}
} catch (YarnException e)
{
LOG.debug("get homeSubCluster by applicationId = {} error.", applicationId, e);
}
return false;
} | 3.26 |
hadoop_FederationStateStoreFacade_createInstance_rdh | /**
* Helper method to create instances of Object using the class name specified
* in the configuration object.
*
* @param conf
* the yarn configuration
* @param configuredClassName
* the configuration provider key
* @param defaultValue
* the default implementation class
* @param type
* the required interface/base class
* @param <T>
* The type of the instance to create
* @return the instances created
*/
@SuppressWarnings("unchecked")
public static <T> T createInstance(Configuration conf, String configuredClassName, String
defaultValue, Class<T> type) {
String className = conf.get(configuredClassName, defaultValue);
try {
Class<?> clusterResolverClass = conf.getClassByName(className);if (type.isAssignableFrom(clusterResolverClass)) {
return ((T) (ReflectionUtils.newInstance(clusterResolverClass, conf)));
} else {
throw new YarnRuntimeException((("Class: " + className) + " not instance of ") + type.getCanonicalName());
}
} catch (ClassNotFoundException e) {
throw new YarnRuntimeException("Could not instantiate : " + className, e);
}
} | 3.26 |
hadoop_FederationStateStoreFacade_updateStoredToken_rdh | /**
* The Router Supports Update RMDelegationTokenIdentifier{@link RMDelegationTokenIdentifier}.
*
* @param identifier
* delegation tokens from the RM
* @param renewDate
* renewDate
* @param tokenInfo
* tokenInfo.
* @throws YarnException
* if the call to the state store is unsuccessful.
* @throws IOException
* An IO Error occurred.
*/public void updateStoredToken(RMDelegationTokenIdentifier identifier, long renewDate, String tokenInfo) throws YarnException, IOException {
LOG.info("updating RMDelegation token with sequence number: {}.", identifier.getSequenceNumber());
RouterStoreToken storeToken = RouterStoreToken.newInstance(identifier, renewDate, tokenInfo);
RouterRMTokenRequest request = RouterRMTokenRequest.newInstance(storeToken);
stateStore.updateStoredToken(request);
} | 3.26 |
hadoop_FederationStateStoreFacade_m0_rdh | /**
* Generate the singleton instance of the FederationStateStoreFacade object.
*
* @param conf
* configuration.
*/
private static void m0(Configuration conf) {
if (facade == null) {
synchronized(FederationStateStoreFacade.class) {
if (facade == null) {
Configuration yarnConf = new Configuration();
if (conf != null) {
yarnConf = conf;
}
facade = new FederationStateStoreFacade(yarnConf);
}
}
}
}
/**
* Returns the {@link SubClusterInfo} for the specified {@link SubClusterId}.
*
* @param subClusterId
* the identifier of the sub-cluster
* @return the sub cluster information, or
{@code null} | 3.26 |
hadoop_FederationStateStoreFacade_getReservationHomeSubCluster_rdh | /**
* Returns the home {@link SubClusterId} for the specified {@link ReservationId}.
*
* @param reservationId
* the identifier of the reservation
* @return the home subCluster identifier
* @throws YarnException
* if the call to the state store is unsuccessful
*/
public SubClusterId getReservationHomeSubCluster(ReservationId reservationId) throws YarnException {
GetReservationHomeSubClusterResponse v12 = stateStore.getReservationHomeSubCluster(GetReservationHomeSubClusterRequest.newInstance(reservationId));
return v12.getReservationHomeSubCluster().getHomeSubCluster();
} | 3.26 |
hadoop_FederationStateStoreFacade_getMasterKeyByDelegationKey_rdh | /**
* The Router Supports GetMasterKeyByDelegationKey.
*
* @param newKey
* Key used for generating and verifying delegation tokens
* @throws YarnException
* if the call to the state store is unsuccessful
* @throws IOException
* An IO Error occurred
* @return RouterMasterKeyResponse
*/
public RouterMasterKeyResponse getMasterKeyByDelegationKey(DelegationKey newKey) throws YarnException, IOException {
LOG.info("Storing master key with keyID {}.", newKey.getKeyId());
ByteBuffer v23 = ByteBuffer.wrap(newKey.getEncodedKey());
RouterMasterKey masterKey = RouterMasterKey.newInstance(newKey.getKeyId(),
v23, newKey.getExpiryDate());
RouterMasterKeyRequest keyRequest = RouterMasterKeyRequest.newInstance(masterKey);
return stateStore.getMasterKeyByDelegationKey(keyRequest);
} | 3.26 |
hadoop_FederationStateStoreFacade_m4_rdh | /**
* Add or Update ReservationHomeSubCluster.
*
* @param reservationId
* reservationId.
* @param subClusterId
* homeSubClusterId, this is selected by strategy.
* @param retryCount
* number of retries.
* @throws YarnException
* yarn exception.
*/
public void m4(ReservationId reservationId, SubClusterId subClusterId, int retryCount) throws YarnException {
Boolean exists = existsReservationHomeSubCluster(reservationId);
ReservationHomeSubCluster reservationHomeSubCluster = ReservationHomeSubCluster.newInstance(reservationId, subClusterId);
if ((!exists) || (retryCount == 0)) {
// persist the mapping of reservationId and the subClusterId which has
// been selected as its home.
addReservationHomeSubCluster(reservationId,
reservationHomeSubCluster);
} else {
// update the mapping of reservationId and the home subClusterId to
// the new subClusterId we have selected.
updateReservationHomeSubCluster(subClusterId, reservationId, reservationHomeSubCluster);
}
} | 3.26 |
hadoop_FederationStateStoreFacade_getActiveSubClusters_rdh | /**
* Get active subclusters.
*
* @return We will return a list of active subclusters as a Collection.
*/
public Collection<SubClusterInfo> getActiveSubClusters() throws NotFoundException {
try {
Map<SubClusterId, SubClusterInfo> subClusterMap = getSubClusters(true);if (MapUtils.isEmpty(subClusterMap)) {
throw new NotFoundException("Not Found SubClusters.");
}
return subClusterMap.values();
} catch (Exception e) {
LOG.error("getActiveSubClusters failed.", e);
return null;
}
} | 3.26 |
hadoop_FederationStateStoreFacade_deleteApplicationHomeSubCluster_rdh | /**
* Delete the mapping of home {@code SubClusterId} of a previously submitted
* {@code ApplicationId}. Currently response is empty if the operation is
* successful, if not an exception reporting reason for a failure.
*
* @param applicationId
* the application to delete the home sub-cluster of
* @throws YarnException
* if the request is invalid/fails
*/
public void deleteApplicationHomeSubCluster(ApplicationId applicationId) throws YarnException {
stateStore.deleteApplicationHomeSubCluster(DeleteApplicationHomeSubClusterRequest.newInstance(applicationId));} | 3.26 |
hadoop_FederationStateStoreFacade_getSubClusters_rdh | /**
* Updates the cache with the central {@link FederationStateStore} and returns
* the {@link SubClusterInfo} of all active sub cluster(s).
*
* @param filterInactiveSubClusters
* whether to filter out inactive
* sub-clusters
* @param flushCache
* flag to indicate if the cache should be flushed or not
* @return the sub cluster information
* @throws YarnException
* if the call to the state store is unsuccessful
*/
public Map<SubClusterId, SubClusterInfo> getSubClusters(final boolean filterInactiveSubClusters, final boolean flushCache) throws YarnException {
if (flushCache && federationCache.isCachingEnabled()) {
LOG.info("Flushing subClusters from cache and rehydrating from store.");
federationCache.removeSubCluster(flushCache);
}
return getSubClusters(filterInactiveSubClusters);
}
/**
* Returns the {@link SubClusterPolicyConfiguration} for the specified queue.
*
* @param queue
* the queue whose policy is required
* @return the corresponding configured policy, or {@code null} | 3.26 |
hadoop_GetAllResourceTypeInfoResponsePBImpl_initResourceTypeInfosList_rdh | // Once this is called. containerList will never be null - until a getProto
// is called.
private void initResourceTypeInfosList() {
if (this.resourceTypeInfo != null) {
return;
}
GetAllResourceTypeInfoResponseProtoOrBuilder p = (viaProto) ? proto : builder;
List<ResourceTypeInfoProto> list = p.getResourceTypeInfoList();
resourceTypeInfo = new ArrayList<ResourceTypeInfo>();
for (ResourceTypeInfoProto a : list) {
resourceTypeInfo.add(convertFromProtoFormat(a));
}
} | 3.26 |
hadoop_OBSDataBlocks_getOutstandingBufferCount_rdh | /**
* Get count of outstanding buffers.
*
* @return the current buffer count
*/
public int getOutstandingBufferCount() {
return BUFFERS_OUTSTANDING.get();
} | 3.26 |
hadoop_OBSDataBlocks_flush_rdh | /**
* Flush operation will flush to disk.
*
* @throws IOException
* IOE raised on FileOutputStream
*/
@Override
void flush() throws IOException {
super.flush();
out.flush();
} | 3.26 |
hadoop_OBSDataBlocks_write_rdh | /**
* Write a series of bytes from the buffer, from the offset. Returns the
* number of bytes written. Only valid in the state {@code Writing}. Base
* class verifies the state but does no writing.
*
* @param buffer
* buffer
* @param offset
* offset
* @param length
* length of write
* @return number of bytes written
* @throws IOException
* trouble
*/
int
write(final byte[] buffer, final int offset, final int length) throws IOException {
verifyState(DestState.Writing);
Preconditions.checkArgument(buffer != null, "Null buffer");
Preconditions.checkArgument(length >= 0, "length is negative");
Preconditions.checkArgument(offset >= 0, "offset is negative");
Preconditions.checkArgument(!((buffer.length - offset) < length), "buffer shorter than amount of data to write");
return 0;
} | 3.26 |
hadoop_OBSDataBlocks_getInputStream_rdh | /**
* InputStream backed by the internal byte array.
*
* @return input stream
*/
ByteArrayInputStream getInputStream() {
ByteArrayInputStream bin = new ByteArrayInputStream(this.buf, 0, count);
this.reset();
this.buf = null;
return bin;
} | 3.26 |
hadoop_OBSDataBlocks_firstBlockSize_rdh | /**
* Returns the block first block size.
*
* @return the block first block size
*/
@VisibleForTesting
public int firstBlockSize() {
return this.firstBlockSize;
} | 3.26 |
hadoop_OBSDataBlocks_innerClose_rdh | /**
* The close operation will delete the destination file if it still exists.
*/
@Override
protected void innerClose() {
final DestState state = getState();
LOG.debug("Closing {}", this);
switch (state) {
case Writing :
if (bufferFile.exists())
{
// file was not uploaded
LOG.debug("Block[{}]: Deleting buffer file as upload " + "did not start", getIndex());
closeBlock();
}
break;
case Upload :
LOG.debug("Block[{}]: Buffer file {} exists close upload stream", getIndex(), bufferFile);break;
case Closed :
closeBlock();
break;
default :
// this state can never be reached, but checkstyle
// complains, so it is here.
}
} | 3.26 |
hadoop_OBSDataBlocks_getOwner_rdh | /**
* Owner.
*
* @return obsFileSystem instance
*/
protected OBSFileSystem
getOwner() {
return owner;
} | 3.26 |
hadoop_OBSDataBlocks_hasRemaining_rdh | /**
* Check if there is data left.
*
* @return true if there is data remaining in the buffer.
*/
public synchronized boolean
hasRemaining() {
return
f1.hasRemaining();
} | 3.26 |
hadoop_OBSDataBlocks_dataSize_rdh | /**
* Get the amount of data; if there is no buffer then the size is 0.
*
* @return the amount of data available to upload.
*/
@Override
int dataSize() {
return dataSize != null ? dataSize : bufferCapacityUsed();
} | 3.26 |
hadoop_OBSDataBlocks_verifyState_rdh | /**
* Verify that the block is in the declared state.
*
* @param expected
* expected state.
* @throws IllegalStateException
* if the DataBlock is in the wrong state
*/
protected final void verifyState(final DestState expected) throws IllegalStateException {
if ((expected != null) && (f0 != expected)) {
throw new IllegalStateException((((("Expected stream state " + expected) + " -but actual state is ") + f0) + " in ") + this);
}
} | 3.26 |
hadoop_OBSDataBlocks_createTmpFileForWrite_rdh | /**
* Demand create the directory allocator, then create a temporary file.
* {@link LocalDirAllocator#createTmpFileForWrite(String, long,
* Configuration)}.
*
* @param pathStr
* prefix for the temporary file
* @param size
* the size of the file that is going to be written
* @param conf
* the Configuration object
* @return a unique temporary file
* @throws IOException
* IO problems
*/
static synchronized File createTmpFileForWrite(final String pathStr, final long size, final Configuration conf) throws IOException {
if (directoryAllocator == null) {
String bufferDir = (conf.get(OBSConstants.BUFFER_DIR) != null) ? OBSConstants.BUFFER_DIR : "hadoop.tmp.dir";
directoryAllocator = new LocalDirAllocator(bufferDir);
}
return directoryAllocator.createTmpFileForWrite(pathStr, size, conf);} | 3.26 |
hadoop_OBSDataBlocks_createFactory_rdh | /**
* Create a factory.
*
* @param owner
* factory owner
* @param name
* factory name -the option from {@link OBSConstants}.
* @return the factory, ready to be initialized.
* @throws IllegalArgumentException
* if the name is unknown.
*/
static BlockFactory createFactory(final
OBSFileSystem owner, final String name) {
switch (name) {
case OBSConstants.FAST_UPLOAD_BUFFER_ARRAY :
return new ByteArrayBlockFactory(owner);
case OBSConstants.FAST_UPLOAD_BUFFER_DISK :
return new DiskBlockFactory(owner);case OBSConstants.FAST_UPLOAD_BYTEBUFFER :
return new ByteBufferBlockFactory(owner);
default :throw new IllegalArgumentException((("Unsupported block buffer" + " \"") + name) + '"');
}
} | 3.26 |
hadoop_OBSDataBlocks_closeBlock_rdh | /**
* Close the block. This will delete the block's buffer file if the block
* has not previously been closed.
*/
void closeBlock() {
LOG.debug("block[{}]: closeBlock()", getIndex());
if (!closed.getAndSet(true))
{
if ((!bufferFile.delete()) && bufferFile.exists()) {
LOG.warn("delete({}) returned false", bufferFile.getAbsoluteFile());
}
} else {
LOG.debug("block[{}]: skipping re-entrant closeBlock()", getIndex());
}
} | 3.26 |
hadoop_OBSDataBlocks_getState_rdh | /**
* Current state.
*
* @return the current state.
*/
protected final DestState getState() {
return f0;
} | 3.26 |
hadoop_OBSDataBlocks_startUpload_rdh | /**
* Switch to the upload state and return a stream for uploading. Base class
* calls {@link #enterState(DestState, DestState)} to manage the state
* machine.
*
* @return the stream
* @throws IOException
* trouble
*/
Object startUpload() throws IOException {
LOG.debug("Start datablock[{}] upload", index);
enterState(DestState.Writing, DestState.Upload);
return null;
} | 3.26 |
hadoop_OBSDataBlocks_read_rdh | /**
* Read in data.
*
* @param b
* destination buffer
* @param offset
* offset within the buffer
* @param length
* length of bytes to read
* @return read size
* @throws EOFException
* if the position is negative
* @throws IndexOutOfBoundsException
* if there isn't space for the amount
* of data requested.
* @throws IllegalArgumentException
* other arguments are invalid.
*/
public synchronized int read(final byte[] b, final int offset, final int length) throws IOException {
Preconditions.checkArgument(length >= 0, "length is negative");
Preconditions.checkArgument(b != null, "Null buffer");
if ((b.length - offset) < length) {
throw
new IndexOutOfBoundsException((((((FSExceptionMessages.TOO_MANY_BYTES_FOR_DEST_BUFFER + ": request length =") + length)
+ ", with offset =") + offset) + "; buffer capacity =") + (b.length - offset));}
verifyOpen();
if (!hasRemaining()) {
return -1;
}
int toRead = Math.min(length, available());
f1.get(b, offset, toRead);
return toRead;
} | 3.26 |
hadoop_OBSDataBlocks_validateWriteArgs_rdh | /**
* Validate args to a write command. These are the same validation checks
* expected for any implementation of {@code OutputStream.write()}.
*
* @param b
* byte array containing data
* @param off
* offset in array where to start
* @param len
* number of bytes to be written
* @throws NullPointerException
* for a null buffer
* @throws IndexOutOfBoundsException
* if indices are out of range
*/
static void validateWriteArgs(final byte[] b, final int off, final int len) {
Preconditions.checkNotNull(b);
if (((((off < 0) || (off > b.length)) || (len < 0)) || ((off + len) > b.length)) || ((off + len) < 0)) {
throw new IndexOutOfBoundsException(((((("write (b[" +
b.length) + "], ") + off) + ", ") + len) + ')');
}
} | 3.26 |
hadoop_OBSDataBlocks_position_rdh | /**
* Get the current buffer position.
*
* @return the buffer position
*/
public synchronized int position() {
return f1.position();
} | 3.26 |
hadoop_OBSDataBlocks_create_rdh | /**
* Create a temp file and a {@link DiskBlock} instance to manage it.
*
* @param index
* block index
* @param limit
* limit of the block.
* @return the new block
* @throws IOException
* IO problems
*/
@Override
DataBlock create(final long index, final int limit) throws IOException {
File destFile = createTmpFileForWrite(String.format("obs-block-%04d-", index), limit, getOwner().getConf());
return new DiskBlock(destFile, limit, index);
} | 3.26 |
hadoop_OBSDataBlocks_hasData_rdh | /**
* Predicate to check if there is data in the block.
*
* @return true if there is
*/
boolean hasData() {
return dataSize() > 0;
} | 3.26 |
hadoop_ApplicationFinishEvent_getDiagnostic_rdh | /**
* Why the app was aborted
*
* @return diagnostic message
*/
public String getDiagnostic() {
return diagnostic;
} | 3.26 |
hadoop_DocumentStoreFactory_createDocumentStoreWriter_rdh | /**
* Creates a DocumentStoreWriter for a {@link DocumentStoreVendor}.
*
* @param conf
* for creating client connection
* @param <Document>
* type of Document for which the writer has to be created,
* i.e TimelineEntityDocument, FlowActivityDocument etc
* @return document store writer
* @throws DocumentStoreNotSupportedException
* if there is no implementation
* for a configured {@link DocumentStoreVendor} or unknown
* {@link DocumentStoreVendor} is configured.
* @throws YarnException
* if the required configs for DocumentStore is missing.
*/
public static <Document extends TimelineDocument> DocumentStoreWriter<Document> createDocumentStoreWriter(Configuration conf) throws YarnException {
final DocumentStoreVendor storeType = getStoreVendor(conf); switch (storeType) {
case COSMOS_DB :
DocumentStoreUtils.validateCosmosDBConf(conf);return new CosmosDBDocumentStoreWriter<>(conf);
default :
throw new DocumentStoreNotSupportedException("Unable to create DocumentStoreWriter for type : " + storeType);
}
}
/**
* Creates a DocumentStoreReader for a {@link DocumentStoreVendor}.
*
* @param conf
* for creating client connection
* @param <Document>
* type of Document for which the writer has to be created,
* i.e TimelineEntityDocument, FlowActivityDocument etc
* @return document store reader
* @throws DocumentStoreNotSupportedException
* if there is no implementation
* for a configured {@link DocumentStoreVendor} or unknown
* {@link DocumentStoreVendor} | 3.26 |
hadoop_DatanodeCacheManager_getLiveDatanodeStorageReport_rdh | /**
* Returns the live datanodes and its storage details, which has available
* space (> 0) to schedule block moves. This will return array of datanodes
* from its local cache. It has a configurable refresh interval in millis and
* periodically refresh the datanode cache by fetching latest
* {@link Context#getLiveDatanodeStorageReport()} once it elapsed refresh
* interval.
*
* @throws IOException
*/
public DatanodeMap getLiveDatanodeStorageReport(Context spsContext) throws IOException {
long now = Time.monotonicNow();
long elapsedTimeMs = now - lastAccessedTime;
boolean v2 = elapsedTimeMs >= refreshIntervalMs;
lastAccessedTime = now;
if (v2) {if (LOG.isDebugEnabled()) {
LOG.debug("elapsedTimeMs > refreshIntervalMs : {} > {}," + " so refreshing cache", elapsedTimeMs, refreshIntervalMs);
}
datanodeMap.reset();// clear all previously cached items.
// Fetch live datanodes from namenode and prepare DatanodeMap.
DatanodeStorageReport[] liveDns = spsContext.getLiveDatanodeStorageReport();
for (DatanodeStorageReport storage : liveDns) {
StorageReport[] storageReports = storage.getStorageReports();
List<StorageType> storageTypes = new ArrayList<>();
List<Long> remainingSizeList = new ArrayList<>();
for (StorageReport t : storageReports) {
if (t.getRemaining() >
0)
{
storageTypes.add(t.getStorage().getStorageType());
remainingSizeList.add(t.getRemaining());
}
}
datanodeMap.addTarget(storage.getDatanodeInfo(), storageTypes, remainingSizeList);
}
if (LOG.isDebugEnabled()) {LOG.debug("LIVE datanodes: {}", datanodeMap);
}
// get network topology
cluster = spsContext.getNetworkTopology(datanodeMap);
}
return datanodeMap;
} | 3.26 |
hadoop_MawoConfiguration_getZKAcl_rdh | /**
* Get ZooKeeper Acls.
*
* @return value of ZooKeeper.acl
*/
public String getZKAcl() {
return
configsMap.get(ZK_ACL);
} | 3.26 |
hadoop_MawoConfiguration_getRpcServerPort_rdh | /**
* Get MaWo RPC server Port.
*
* @return value of rpc.server.port
*/
public int getRpcServerPort() {
return Integer.parseInt(configsMap.get(RPC_SERVER_PORT));
} | 3.26 |
hadoop_MawoConfiguration_getJobQueueStorageEnabled_rdh | /**
* Check if Job Queue Storage is Enabled.
*
* @return True if Job queue storage is enabled otherwise False
*/
public boolean getJobQueueStorageEnabled() {
return Boolean.parseBoolean(configsMap.get(JOB_QUEUE_STORAGE_ENABLED));
} | 3.26 |
hadoop_MawoConfiguration_getZKSessionTimeoutMS_rdh | /**
* Get ZooKeeper session timeout in milli seconds.
*
* @return value of ZooKeeper.session.timeout.ms
*/
public int getZKSessionTimeoutMS() {
return Integer.parseInt(configsMap.get(ZK_SESSION_TIMEOUT_MS));
} | 3.26 |
hadoop_MawoConfiguration_getJobBuilderClass_rdh | /**
* Get job builder class.
*
* @return value of mawo.job-builder.class
*/
public String getJobBuilderClass() {
return configsMap.get(JOB_BUILDER_CLASS);
} | 3.26 |
hadoop_MawoConfiguration_getZKRetryIntervalMS_rdh | /**
* Get ZooKeeper retry interval value in milli seconds.
*
* @return value of ZooKeeper.retry.interval.ms
*/
public int getZKRetryIntervalMS() {
return Integer.parseInt(configsMap.get(ZK_RETRY_INTERVAL_MS));
} | 3.26 |
hadoop_MawoConfiguration_readConfigFile_rdh | /**
* Find, read, and parse the configuration file.
*
* @return the properties that were found or empty if no file was found
*/
private static Properties readConfigFile() {
Properties properties = new Properties();
// Get property file stream from classpath
LOG.info((("Configuration file being loaded: " + CONFIG_FILE) + ". Found in classpath at ") + MawoConfiguration.class.getClassLoader().getResource(CONFIG_FILE));
InputStream inputStream = MawoConfiguration.class.getClassLoader().getResourceAsStream(CONFIG_FILE);
if (inputStream == null) {
throw new RuntimeException(CONFIG_FILE + " not found in classpath");
}
// load the properties
try {
properties.load(inputStream);
inputStream.close();
} catch (FileNotFoundException fnf) {
LOG.error(("No configuration file " + CONFIG_FILE) + " found in classpath.");
} catch (IOException ie) {
throw new IllegalArgumentException("Can't read configuration file " + CONFIG_FILE, ie);
}
return properties;
} | 3.26 |
hadoop_MawoConfiguration_getConfigsMap_rdh | /**
* Get MaWo config map.
*
* @return the config map for MaWo properties
*/
public Map<String, String> getConfigsMap() {
return configsMap;
} | 3.26 |
hadoop_MawoConfiguration_getWorkerConcurrentTasksLimit_rdh | /**
* Get number of tasks a worker can run in parallel.
*
* @return value of worker.num.tasks
*/public int getWorkerConcurrentTasksLimit() {
return Integer.parseInt(configsMap.get(WORKER_NUM_TASKS));
} | 3.26 |
hadoop_MawoConfiguration_getClusterManagerURL_rdh | /**
* Get cluster manager URL.
*
* @return value of ycloud.url
*/
public String getClusterManagerURL() {
return configsMap.get(CLUSTER_MANAGER_URL);
} | 3.26 |
hadoop_MawoConfiguration_getZKRetriesNum_rdh | /**
* Get ZooKeeper retries number.
*
* @return value of ZooKeeper.retries.num
*/
public int getZKRetriesNum()
{
return Integer.parseInt(configsMap.get(ZK_RETRIES_NUM));
} | 3.26 |
hadoop_MawoConfiguration_getWorkerWorkSpace_rdh | /**
* Get worker work space.
*
* @return value of worker.workspace
*/
public String getWorkerWorkSpace() {
return configsMap.get(f1);
} | 3.26 |
hadoop_MawoConfiguration_getAutoShutdownWorkers_rdh | /**
* Check if worker auto shutdown feature is enabled.
*
* @return value of mawo.master.auto-shutdown-workers
*/
public boolean getAutoShutdownWorkers() {
return Boolean.parseBoolean(configsMap.get(AUTO_SHUTDOWN_WORKERS));
} | 3.26 |
hadoop_MawoConfiguration_getZKAddress_rdh | /**
* Get ZooKeeper Address.
*
* @return value of ZooKeeper.address
*/
public String getZKAddress() {
return configsMap.get(ZK_ADDRESS); } | 3.26 |
hadoop_MawoConfiguration_getMasterTasksStatusLogPath_rdh | /**
* Get Task status log file path on master host.
*
* @return value of master.tasks-status.log.path
*/
public String getMasterTasksStatusLogPath() {
return configsMap.get(MASTER_TASKS_STATUS_LOG_PATH);
} | 3.26 |
hadoop_MawoConfiguration_getZKParentPath_rdh | /**
* Get ZooKeeper parent Path.
*
* @return value of ZooKeeper.parent.path
*/
public String getZKParentPath() {
return configsMap.get(f0);
} | 3.26 |
hadoop_MawoConfiguration_getMasterDrainEventsTimeout_rdh | /**
* Get Master drain event timeout.
*
* @return value of master.drain-events.timeout
*/
public long getMasterDrainEventsTimeout() {
return Long.parseLong(configsMap.get(f2));
} | 3.26 |
hadoop_MawoConfiguration_getRpcHostName_rdh | /**
* Get RPC Host map.
*
* @return value of rpc.server.hostname
*/
public String getRpcHostName() {return configsMap.get(RPC_SERVER_HOSTNAME);
} | 3.26 |
hadoop_MawoConfiguration_getWorkerWhiteListEnv_rdh | /**
* Get Worker whitelist env params.
* These params will be set in all tasks.
*
* @return list of white list environment
*/
public List<String> getWorkerWhiteListEnv() {
List<String> whiteList = new ArrayList<String>();
String v3 = configsMap.get(WORKER_WHITELIST_ENV);
if ((v3 != null) && (!v3.isEmpty())) {
String[] variables = v3.split(COMMA_SPLITTER);
for (String variable : variables) {
variable = variable.trim();
if (variable.startsWith("$")) {
variable =
variable.substring(1);
}
if (!variable.isEmpty()) {
whiteList.add(variable);
}
}
}
return whiteList;
} | 3.26 |
hadoop_MawoConfiguration_getTeardownWorkerValidityInterval_rdh | /**
* Get Teardown worker validity interval.
*
* @return value of master.teardown-worker.validity-interval.ms
*/
public long getTeardownWorkerValidityInterval() {
return Long.parseLong(configsMap.get(MASTER_TEARDOWN_WORKER_VALIDITY_INTERVAL_MS));} | 3.26 |
hadoop_ClusterTopologyReader_get_rdh | /**
* Get the {@link LoggedNetworkTopology} object.
*
* @return The {@link LoggedNetworkTopology} object parsed from the input.
*/
public LoggedNetworkTopology get() {
return topology;
} | 3.26 |
hadoop_ConfigurationWithLogging_get_rdh | /**
* See {@link Configuration#get(String, String)}.
*/
@Override
public String get(String name, String defaultValue) {
String value = super.get(name, defaultValue);
log.info("Got {} = '{}' (default '{}')", name, redactor.redact(name, value), redactor.redact(name, defaultValue));
return value;
} | 3.26 |
hadoop_ConfigurationWithLogging_getFloat_rdh | /**
* See {@link Configuration#getFloat(String, float)}.
*/
@Override
public float getFloat(String name, float defaultValue) {
float value = super.getFloat(name, defaultValue);
log.info("Got {} = '{}' (default '{}')", name, value, defaultValue);
return value;
} | 3.26 |
hadoop_ConfigurationWithLogging_getInt_rdh | /**
* See {@link Configuration#getInt(String, int)}.
*/
@Override
public int getInt(String
name, int defaultValue) {
int value = super.getInt(name, defaultValue);
log.info("Got {} = '{}' (default '{}')", name, value, defaultValue);
return value;
} | 3.26 |
hadoop_ConfigurationWithLogging_getLong_rdh | /**
* See {@link Configuration#getLong(String, long)}.
*/@Override
public long getLong(String name, long defaultValue) {
long value = super.getLong(name, defaultValue);log.info("Got {} = '{}' (default '{}')", name, value, defaultValue);
return value;
} | 3.26 |
hadoop_ConfigurationWithLogging_getBoolean_rdh | /**
* See {@link Configuration#getBoolean(String, boolean)}.
*/
@Override
public boolean getBoolean(String name, boolean defaultValue) {
boolean value = super.getBoolean(name, defaultValue);
log.info("Got {} = '{}' (default '{}')", name, value, defaultValue);
return value;
} | 3.26 |
hadoop_ConfigurationWithLogging_set_rdh | /**
* See {@link Configuration#set(String, String, String)}.
*/
@Override
public void set(String name, String value, String source) {
log.info("Set {} to '{}'{}", name, redactor.redact(name, value), source == null ? "" : " from " + source);
super.set(name, value, source);
} | 3.26 |
hadoop_MountTableRefresherService_refresh_rdh | /**
* Refresh mount table cache of this router as well as all other routers.
*
* @throws StateStoreUnavailableException
* if the state store is not available.
*/
public void refresh() throws StateStoreUnavailableException {
RouterStore routerStore = router.getRouterStateManager();
try {
routerStore.loadCache(true);
} catch (IOException e) {
LOG.warn("RouterStore load cache failed,", e);
}
List<RouterState> cachedRecords = routerStore.getCachedRecords();
List<MountTableRefresherThread> v6 = new ArrayList<>();
for (RouterState routerState : cachedRecords) {
String adminAddress = routerState.getAdminAddress();
if ((adminAddress == null) || (adminAddress.length() == 0)) {
// this router has not enabled router admin.
continue;}
// No use of calling refresh on router which is not running state
if (routerState.getStatus() != RouterServiceState.RUNNING) {
LOG.info("Router {} is not running. Mount table cache will not refresh.", routerState.getAddress());
// remove if RouterClient is cached.
removeFromCache(adminAddress);
} else if (isLocalAdmin(adminAddress)) {
/* Local router's cache update does not require RPC call, so no need for
RouterClient
*/
v6.add(getLocalRefresher(adminAddress));
} else {
try {
RouterClient client = routerClientsCache.get(adminAddress);
v6.add(new MountTableRefresherThread(client.getMountTableManager(), adminAddress));
} catch (ExecutionException execExcep) {
// Can not connect, seems router is stopped now.
LOG.warn(ROUTER_CONNECT_ERROR_MSG, adminAddress, execExcep);
}
}
}
if (!v6.isEmpty()) {
invokeRefresh(v6);
}
} | 3.26 |
hadoop_MountTableRefresherService_getClientCreator_rdh | /**
* Creates RouterClient and caches it.
*/
private CacheLoader<String, RouterClient> getClientCreator() {
return new CacheLoader<String, RouterClient>() {
public RouterClient m0(String adminAddress) throws IOException {
InetSocketAddress v1
= NetUtils.createSocketAddr(adminAddress);
Configuration config = getConfig();
return createRouterClient(v1, config);
}};
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.