name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hadoop_ManifestStoreOperations_getEtag_rdh | /**
* Extract an etag from a status if the conditions are met.
* If the conditions are not met, return null or ""; they will
* both be treated as "no etags available"
* <pre>
* 1. The status is of a type which the implementation recognizes
* as containing an etag.
* 2. After casting the etag field can be retrieved
* 3. and that value is non-null/non-empty.
* </pre>
*
* @param status
* status, which may be null of any subclass of FileStatus.
* @return either a valid etag, or null or "".
*/
public String getEtag(FileStatus status) {
return ManifestCommitterSupport.getEtag(status);
} | 3.26 |
hadoop_ManifestStoreOperations_bindToFileSystem_rdh | /**
* Bind to the filesystem.
* This is called by the manifest committer after the operations
* have been instantiated.
*
* @param fileSystem
* target FS
* @param path
* actual path under FS.
* @throws IOException
* if there are binding problems.
*/
public void bindToFileSystem(FileSystem fileSystem, Path path) throws IOException {
}
/**
* Forward to {@link FileSystem#getFileStatus(Path)} | 3.26 |
hadoop_ManifestStoreOperations_isFile_rdh | /**
* Is a path a file? Used during directory creation.
* The is a copy & paste of FileSystem.isFile();
* {@code StoreOperationsThroughFileSystem} calls into
* the FS direct so that stores which optimize their probes
* can save on IO.
*
* @param path
* path to probe
* @return true if the path exists and resolves to a file
* @throws IOException
* failure other than FileNotFoundException
*/
public boolean isFile(Path path) throws IOException {
try {
return getFileStatus(path).isFile();
} catch (FileNotFoundException e) {
return false;
}
} | 3.26 |
hadoop_ManifestStoreOperations_fromResilientCommit_rdh | /**
* Full commit result.
*
* @param recovered
* Did recovery take place?
* @param waitTime
* any time spent waiting for IO capacity.
*/
public static CommitFileResult fromResilientCommit(final boolean recovered, final Duration waitTime) {return new CommitFileResult(recovered, waitTime);
} | 3.26 |
hadoop_ManifestStoreOperations_storeSupportsResilientCommit_rdh | /**
* Does the store provide rename resilience through an
* implementation of {@link #commitFile(FileEntry)}?
* If true then that method will be invoked to commit work
*
* @return true if resilient commit support is available.
*/
public boolean storeSupportsResilientCommit() {
return false;
} | 3.26 |
hadoop_ManifestStoreOperations_storePreservesEtagsThroughRenames_rdh | /**
* Does the store preserve etags through renames.
* If true, and if the source listing entry has an etag,
* it will be used to attempt to validate a failed rename.
*
* @param path
* path to probe.
* @return true if etag comparison is a valid strategy.
*/
public boolean storePreservesEtagsThroughRenames(Path path) {
return false;
} | 3.26 |
hadoop_ManifestStoreOperations_renameDir_rdh | /**
* Rename a dir; defaults to invoking
* Forward to {@link #renameFile(Path, Path)}.
* Usual "what does 'false' mean?" ambiguity.
*
* @param source
* source file
* @param dest
* destination path -which must not exist.
* @return true if the directory was created.
* @throws IOException
* failure.
*/
public boolean renameDir(Path source, Path dest) throws IOException {
return renameFile(source, dest);
} | 3.26 |
hadoop_FederationBlock_initLocalClusterPage_rdh | /**
* Initialize the Federation page of the local-cluster.
*
* @param tbody
* HTML tbody.
* @param lists
* subCluster page data list.
*/
private void initLocalClusterPage(TBODY<TABLE<Hamlet>> tbody, List<Map<String, String>> lists) {
Configuration config = this.router.getConfig();
SubClusterInfo localCluster = getSubClusterInfoByLocalCluster(config);if
(localCluster != null) {
try {
initSubClusterPageItem(tbody, localCluster, lists);
} catch (Exception e) {
LOG.error("init LocalCluster = {} page data error.", localCluster, e);
}
}
} | 3.26 |
hadoop_FederationBlock_initSubClusterPageItem_rdh | /**
* We will initialize the specific SubCluster's data within this method.
*
* @param tbody
* HTML TBody.
* @param subClusterInfo
* Sub-cluster information.
* @param lists
* Used to record data that needs to be displayed in JS.
*/
private void initSubClusterPageItem(TBODY<TABLE<Hamlet>> tbody, SubClusterInfo subClusterInfo, List<Map<String, String>> lists) {
Map<String, String> subClusterMap = new HashMap<>();
// Prepare subCluster
SubClusterId subClusterId = subClusterInfo.getSubClusterId();
String subClusterIdText = subClusterId.getId();// Prepare WebAppAddress
String webAppAddress = subClusterInfo.getRMWebServiceAddress();
String herfWebAppAddress = "";if ((webAppAddress != null) && (!webAppAddress.isEmpty())) {
herfWebAppAddress = WebAppUtils.getHttpSchemePrefix(this.router.getConfig()) + webAppAddress;
}
// Prepare Capability
String capability = subClusterInfo.getCapability();
ClusterMetricsInfo subClusterMetricsInfo = getClusterMetricsInfo(capability);
if (subClusterMetricsInfo == null) {return;
}
// Prepare LastStartTime & LastHeartBeat
Date lastStartTime = new Date(subClusterInfo.getLastStartTime());
Date lastHeartBeat = new Date(subClusterInfo.getLastHeartBeat());
// Prepare Resource
long totalMB = subClusterMetricsInfo.getTotalMB();
String totalMBDesc = StringUtils.byteDesc(totalMB * BYTES_IN_MB);
long totalVirtualCores = subClusterMetricsInfo.getTotalVirtualCores();
String resources = String.format("<memory:%s, vCores:%s>", totalMBDesc, totalVirtualCores);
// Prepare Node
long totalNodes = subClusterMetricsInfo.getTotalNodes();
long activeNodes = subClusterMetricsInfo.getActiveNodes();String nodes = String.format("<totalNodes:%s, activeNodes:%s>", totalNodes, activeNodes);
// Prepare HTML Table
String stateStyle = "color:#dc3545;font-weight:bolder";
SubClusterState state = subClusterInfo.getState();
if (SubClusterState.SC_RUNNING == state) {
stateStyle = "color:#28a745;font-weight:bolder";
}
tbody.tr().$id(subClusterIdText).td().$class("details-control").a(herfWebAppAddress, subClusterIdText).__().td().$style(stateStyle).__(state.name()).__().td().__(lastStartTime).__().td().__(lastHeartBeat).__().td(resources).td(nodes).__();
// Formatted memory information
long allocatedMB = subClusterMetricsInfo.getAllocatedMB();
String allocatedMBDesc = StringUtils.byteDesc(allocatedMB *
BYTES_IN_MB);
long availableMB = subClusterMetricsInfo.getAvailableMB();
String availableMBDesc = StringUtils.byteDesc(availableMB * BYTES_IN_MB);
long pendingMB = subClusterMetricsInfo.getPendingMB();
String pendingMBDesc = StringUtils.byteDesc(pendingMB * BYTES_IN_MB);
long reservedMB = subClusterMetricsInfo.getReservedMB();
String reservedMBDesc = StringUtils.byteDesc(reservedMB * BYTES_IN_MB);
subClusterMap.put("totalmemory", totalMBDesc);
subClusterMap.put("allocatedmemory", allocatedMBDesc);
subClusterMap.put("availablememory", availableMBDesc);
subClusterMap.put("pendingmemory", pendingMBDesc);
subClusterMap.put("reservedmemory", reservedMBDesc);
subClusterMap.put("subcluster", subClusterId.getId());
subClusterMap.put("capability", capability);
lists.add(subClusterMap);
} | 3.26 |
hadoop_FederationBlock_getClusterMetricsInfo_rdh | /**
* Parse the capability and obtain the metric information of the cluster.
*
* @param capability
* metric json obtained from RM.
* @return ClusterMetricsInfo Object
*/
protected ClusterMetricsInfo getClusterMetricsInfo(String capability) {
try {
if ((capability != null) && (!capability.isEmpty())) {
JSONJAXBContext jc = new JSONJAXBContext(JSONConfiguration.mapped().rootUnwrapping(false).build(), ClusterMetricsInfo.class);
JSONUnmarshaller unmarShaller = jc.createJSONUnmarshaller();
StringReader stringReader = new StringReader(capability);ClusterMetricsInfo clusterMetrics = unmarShaller.unmarshalFromJSON(stringReader, ClusterMetricsInfo.class);
return clusterMetrics;
}
} catch (Exception e) {
LOG.error("Cannot parse SubCluster info", e);
}
return null;
} | 3.26 |
hadoop_FederationBlock_initFederationSubClusterDetailTableJs_rdh | /**
* Initialize the subCluster details JavaScript of the Federation page.
*
* This part of the js script will control to display or hide the detailed information
* of the subCluster when the user clicks on the subClusterId.
*
* We will obtain the specific information of a SubCluster,
* including the information of Applications, Resources, and Nodes.
*
* @param html
* html object
* @param subClusterDetailMap
* subCluster Detail Map
*/
private void initFederationSubClusterDetailTableJs(Block html, List<Map<String, String>> subClusterDetailMap) {
Gson gson = new Gson();
html.script().$type("text/javascript").__((" var scTableData = " + gson.toJson(subClusterDetailMap)) + "; ").__();
html.script(root_url("static/federation/federation.js"));
} | 3.26 |
hadoop_FederationBlock_initHtmlPageFederation_rdh | /**
* Initialize the Html page.
*
* @param html
* html object
*/
private void initHtmlPageFederation(Block html, boolean isEnabled) {
List<Map<String, String>> lists = new ArrayList<>();
// Table header
TBODY<TABLE<Hamlet>> tbody = html.table("#rms").$class("cell-border").$style("width:100%").thead().tr().th(".id", "SubCluster").th(".state", "State").th(".lastStartTime", "LastStartTime").th(".lastHeartBeat", "LastHeartBeat").th(".resources", "Resources").th(".nodes", "Nodes").__().__().tbody();
try {
if (isEnabled) {
initSubClusterPage(tbody, lists);
} else {
initLocalClusterPage(tbody, lists);
}
} catch (Exception e) {
LOG.error("Cannot render Router Federation.", e);
}
// Init FederationBlockTableJs
initFederationSubClusterDetailTableJs(html, lists);
// Tips
tbody.__().__().div().p().$style("color:red").__("*The application counts are local per subcluster").__().__();
} | 3.26 |
hadoop_FederationBlock_initSubClusterPage_rdh | /**
* Initialize the Federation page of the sub-cluster.
*
* @param tbody
* HTML tbody.
* @param lists
* subCluster page data list.
*/
private void initSubClusterPage(TBODY<TABLE<Hamlet>> tbody, List<Map<String, String>> lists) {
// Sort the SubClusters
List<SubClusterInfo> subClusters
= getSubClusterInfoList();
// Iterate through the sub-clusters and display data for each sub-cluster.
// If a sub-cluster cannot display data, skip it.
for (SubClusterInfo subCluster : subClusters) {
try {
initSubClusterPageItem(tbody, subCluster, lists);} catch (Exception
e) {
LOG.error("init subCluster = {} page data error.", subCluster, e);
}
}
} | 3.26 |
hadoop_ErrorTranslation_wrapWithInnerIOE_rdh | /**
* Given an outer and an inner exception, create a new IOE
* of the inner type, with the outer exception as the cause.
* The message is derived from both.
* This only works if the inner exception has a constructor which
* takes a string; if not a PathIOException is created.
* <p>
* See {@code NetUtils}.
*
* @param <T>
* type of inner exception.
* @param path
* path of the failure.
* @param outer
* outermost exception.
* @param inner
* inner exception.
* @return the new exception.
*/
@SuppressWarnings("unchecked")
private static <T extends IOException> IOException wrapWithInnerIOE(String path, Throwable outer, T inner) {
String msg = (outer.toString() + ": ") + inner.getMessage();
Class<? extends Throwable>
clazz = inner.getClass();
try {
Constructor<? extends Throwable> ctor = clazz.getConstructor(String.class);
Throwable t = ctor.newInstance(msg);
return ((T) (t.initCause(outer)));
} catch (Throwable e) {
return new PathIOException(path, msg, outer);
}
} | 3.26 |
hadoop_ErrorTranslation_maybeExtractIOException_rdh | /**
* Translate an exception if it or its inner exception is an
* IOException.
* If this condition is not met, null is returned.
*
* @param path
* path of operation.
* @param thrown
* exception
* @return a translated exception or null.
*/
public static IOException maybeExtractIOException(String path, Throwable thrown) {
if (thrown == null) {
return null;
}
// look inside
Throwable cause = thrown.getCause();
while ((cause != null) && (cause.getCause() != null)) {
cause = cause.getCause();
}
if (!(cause
instanceof IOException)) {
return null;
}
// the cause can be extracted to an IOE.
// rather than just return it, we try to preserve the stack trace
// of the outer exception.
// as a new instance is created through reflection, the
// class of the returned instance will be that of the innermost,
// unless no suitable constructor is available.
final IOException ioe = ((IOException) (cause));
return wrapWithInnerIOE(path, thrown, ioe);
} | 3.26 |
hadoop_Quota_setQuotaInternal_rdh | /**
* Set quota for the federation path.
*
* @param path
* Federation path.
* @param locations
* Locations of the Federation path.
* @param namespaceQuota
* Name space quota.
* @param storagespaceQuota
* Storage space quota.
* @param type
* StorageType that the space quota is intended to be set on.
* @throws IOException
* If the quota system is disabled.
*/
void setQuotaInternal(String path, List<RemoteLocation> locations, long namespaceQuota, long storagespaceQuota, StorageType type) throws IOException {
rpcServer.checkOperation(OperationCategory.WRITE);
// Set quota for current path and its children mount table path.
if (locations == null) {
locations = getQuotaRemoteLocations(path);
} if (LOG.isDebugEnabled()) {
for (RemoteLocation loc : locations) {
LOG.debug("Set quota for path: nsId: {}, dest: {}.", loc.getNameserviceId(), loc.getDest());
}
}
RemoteMethod method = new RemoteMethod("setQuota", new Class<?>[]{ String.class, long.class, long.class, StorageType.class }, new RemoteParam(), namespaceQuota, storagespaceQuota, type);
rpcClient.invokeConcurrent(locations, method,
false, false);
} | 3.26 |
hadoop_Quota_eachByStorageType_rdh | /**
* Invoke consumer by each storage type.
*
* @param consumer
* the function consuming the storage type.
*/
public static void eachByStorageType(Consumer<StorageType> consumer) {for (StorageType type : StorageType.values()) {
consumer.accept(type);
}
} | 3.26 |
hadoop_Quota_isMountEntry_rdh | /**
* Is the path a mount entry.
*
* @param path
* the path to be checked.
* @return {@code true} if path is a mount entry; {@code false} otherwise.
*/
private boolean isMountEntry(String path) {
return router.getQuotaManager().isMountEntry(path);
}
/**
* Get valid quota remote locations used in {@link #getQuotaUsage(String)}.
* Differentiate the method {@link #getQuotaRemoteLocations(String)} | 3.26 |
hadoop_Quota_andByStorageType_rdh | /**
* Invoke predicate by each storage type and bitwise AND the results.
*
* @param predicate
* the function test the storage type.
* @return true if bitwise AND by all storage type returns true, false otherwise.
*/
public static boolean andByStorageType(Predicate<StorageType> predicate) {
boolean res = true;
for (StorageType type : StorageType.values()) {
res &= predicate.test(type);
}
return res;
} | 3.26 |
hadoop_Quota_getGlobalQuota_rdh | /**
* Get global quota for the federation path.
*
* @param path
* Federation path.
* @return global quota for path.
* @throws IOException
* If the quota system is disabled.
*/
QuotaUsage getGlobalQuota(String path) throws IOException {if (!router.isQuotaEnabled()) {
throw new IOException("The quota system is disabled in Router.");}
long nQuota
= HdfsConstants.QUOTA_RESET;
long v6 = HdfsConstants.QUOTA_RESET;
long[] typeQuota =
new long[StorageType.values().length];
eachByStorageType(t -> typeQuota[t.ordinal()] = HdfsConstants.QUOTA_RESET);
RouterQuotaManager
manager = this.router.getQuotaManager();
TreeMap<String, RouterQuotaUsage> pts = manager.getParentsContainingQuota(path);
Entry<String, RouterQuotaUsage> entry = pts.lastEntry();
while ((entry != null) && (((nQuota ==
HdfsConstants.QUOTA_RESET) || (v6 == HdfsConstants.QUOTA_RESET)) || orByStorageType(t -> typeQuota[t.ordinal()] == HdfsConstants.QUOTA_RESET))) {
String ppath = entry.getKey();
QuotaUsage quota = entry.getValue();
if (nQuota == HdfsConstants.QUOTA_RESET) {
nQuota = quota.getQuota();
}
if (v6 == HdfsConstants.QUOTA_RESET) {
v6 = quota.getSpaceQuota();
}
eachByStorageType(t -> {
if (typeQuota[t.ordinal()] ==
HdfsConstants.QUOTA_RESET) {
typeQuota[t.ordinal()] = quota.getTypeQuota(t);
}
});
entry = pts.lowerEntry(ppath);
}
return new QuotaUsage.Builder().quota(nQuota).spaceQuota(v6).typeQuota(typeQuota).build();
} | 3.26 |
hadoop_Quota_getQuotaUsage_rdh | /**
* Get aggregated quota usage for the federation path.
*
* @param path
* Federation path.
* @return Aggregated quota.
* @throws IOException
* If the quota system is disabled.
*/
public QuotaUsage getQuotaUsage(String path) throws IOException {return aggregateQuota(path, getEachQuotaUsage(path));
} | 3.26 |
hadoop_Quota_aggregateQuota_rdh | /**
* Aggregate quota that queried from sub-clusters.
*
* @param path
* Federation path of the results.
* @param results
* Quota query result.
* @return Aggregated Quota.
*/
QuotaUsage aggregateQuota(String path, Map<RemoteLocation, QuotaUsage> results) throws IOException {
long nsCount = 0;
long ssCount = 0;
long[] typeCount = new long[StorageType.values().length];
long nsQuota = HdfsConstants.QUOTA_RESET;
long ssQuota = HdfsConstants.QUOTA_RESET;
long[] typeQuota = new long[StorageType.values().length];
eachByStorageType(t -> typeQuota[t.ordinal()] = HdfsConstants.QUOTA_RESET);
boolean v26 = false;
boolean isMountEntry = isMountEntry(path);
for (Map.Entry<RemoteLocation, QuotaUsage> entry : results.entrySet()) {
RemoteLocation loc = entry.getKey();
QuotaUsage usage = entry.getValue();
if (isMountEntry) {
nsCount += usage.getFileAndDirectoryCount();
ssCount += usage.getSpaceConsumed();
eachByStorageType(t -> typeCount[t.ordinal()] += usage.getTypeConsumed(t));
} else if (usage != null) {
// If quota is not set in real FileSystem, the usage
// value will return -1.
if (!RouterQuotaManager.isQuotaSet(usage)) {
v26 = true;
}
nsQuota = usage.getQuota();
ssQuota = usage.getSpaceQuota();
eachByStorageType(t -> typeQuota[t.ordinal()] = usage.getTypeQuota(t));
nsCount += usage.getFileAndDirectoryCount();
ssCount += usage.getSpaceConsumed();
eachByStorageType(t -> typeCount[t.ordinal()] += usage.getTypeConsumed(t));
LOG.debug("Get quota usage for path: nsId: {}, dest: {}," + " nsCount: {}, ssCount: {}, typeCount: {}.", loc.getNameserviceId(), loc.getDest(), usage.getFileAndDirectoryCount(), usage.getSpaceConsumed(), usage.toString(false, true, Arrays.asList(StorageType.values())));
}
}
if (isMountEntry) {
QuotaUsage quota = getGlobalQuota(path);
nsQuota = quota.getQuota();ssQuota = quota.getSpaceQuota();
eachByStorageType(t -> typeQuota[t.ordinal()] = quota.getTypeQuota(t));
}
QuotaUsage.Builder builder = new QuotaUsage.Builder().fileAndDirectoryCount(nsCount).spaceConsumed(ssCount).typeConsumed(typeCount);
if (v26) {
builder.quota(HdfsConstants.QUOTA_RESET).spaceQuota(HdfsConstants.QUOTA_RESET);
eachByStorageType(t -> builder.typeQuota(t, HdfsConstants.QUOTA_RESET));
} else {
builder.quota(nsQuota).spaceQuota(ssQuota);
eachByStorageType(t -> builder.typeQuota(t, typeQuota[t.ordinal()]));
}
return builder.build();
} | 3.26 |
hadoop_Quota_setQuota_rdh | /**
* Set quota for the federation path.
*
* @param path
* Federation path.
* @param namespaceQuota
* Name space quota.
* @param storagespaceQuota
* Storage space quota.
* @param type
* StorageType that the space quota is intended to be set on.
* @param checkMountEntry
* whether to check the path is a mount entry.
* @throws AccessControlException
* If the quota system is disabled or if
* checkMountEntry is true and the path is a mount entry.
*/
public void setQuota(String path, long namespaceQuota, long storagespaceQuota, StorageType type, boolean checkMountEntry) throws IOException {
if (!router.isQuotaEnabled()) {
throw new IOException("The quota system is disabled in Router.");
}
if (checkMountEntry && isMountEntry(path)) {
throw new
AccessControlException((("Permission denied: " + RouterRpcServer.getRemoteUser()) + " is not allowed to change quota of ") + path);
}
setQuotaInternal(path, null, namespaceQuota, storagespaceQuota, type);} | 3.26 |
hadoop_Quota_getQuotaRemoteLocations_rdh | /**
* Get all quota remote locations across subclusters under given
* federation path.
*
* @param path
* Federation path.
* @return List of quota remote locations.
* @throws IOException
*/
private List<RemoteLocation> getQuotaRemoteLocations(String path) throws IOException {
List<RemoteLocation> locations = new ArrayList<>();
RouterQuotaManager manager =
this.router.getQuotaManager();
if (manager != null) {
Set<String> childrenPaths = manager.getPaths(path);
for (String childPath : childrenPaths) {
locations.addAll(rpcServer.getLocationsForPath(childPath,
false, false));
}
}
if (locations.size() >= 1) {
return locations;} else {
locations.addAll(rpcServer.getLocationsForPath(path, false, false));
return locations;
}
} | 3.26 |
hadoop_Quota_orByStorageType_rdh | /**
* Invoke predicate by each storage type and bitwise inclusive OR the results.
*
* @param predicate
* the function test the storage type.
* @return true if bitwise OR by all storage type returns true, false otherwise.
*/
public static boolean
orByStorageType(Predicate<StorageType> predicate) {
boolean res = false;
for (StorageType type : StorageType.values()) {
res |= predicate.test(type);
}
return res;
} | 3.26 |
hadoop_Quota_getEachQuotaUsage_rdh | /**
* Get quota usage for the federation path.
*
* @param path
* Federation path.
* @return quota usage for each remote location.
* @throws IOException
* If the quota system is disabled.
*/
Map<RemoteLocation, QuotaUsage> getEachQuotaUsage(String path) throws IOException {
rpcServer.checkOperation(OperationCategory.READ);
if (!router.isQuotaEnabled()) {
throw new IOException("The quota system is disabled in Router.");
}
final List<RemoteLocation> quotaLocs = getValidQuotaLocations(path);
RemoteMethod method = new RemoteMethod("getQuotaUsage", new Class<?>[]{ String.class }, new RemoteParam());
Map<RemoteLocation, QuotaUsage> results = rpcClient.invokeConcurrent(quotaLocs, method, true, false, QuotaUsage.class);
return results;
} | 3.26 |
hadoop_BlockResolver_resolve_rdh | /**
*
* @param s
* the external reference.
* @return sequence of blocks that make up the reference.
*/
public Iterable<BlockProto> resolve(FileStatus
s) {
List<Long> lengths = blockLengths(s);
ArrayList<BlockProto> ret = new ArrayList<>(lengths.size());
long tot = 0;
for (long l : lengths) {tot += l;
ret.add(buildBlock(nextId(),
l));
}
if (tot !=
s.getLen()) {
// log a warning?
throw new IllegalStateException((("Expected " + s.getLen()) + " found ") + tot);}
return ret;
} | 3.26 |
hadoop_BlockResolver_preferredBlockSize_rdh | /**
*
* @param status
* the external reference.
* @return the block size to assign to this external reference.
*/
public long preferredBlockSize(FileStatus status) {
return status.getBlockSize();
} | 3.26 |
hadoop_MapReduceJobPropertiesParser_accept_rdh | // Accepts a key if there is a corresponding key in the current mapreduce
// configuration
private boolean accept(String key) {
return getLatestKeyName(key) != null;
} | 3.26 |
hadoop_MapReduceJobPropertiesParser_extractMinHeapOpts_rdh | /**
* Extracts the -Xms heap option from the specified string.
*/
public static void extractMinHeapOpts(String javaOptions, List<String> heapOpts, List<String> others) {
for (String opt : javaOptions.split(" ")) {
Matcher matcher = MIN_HEAP_PATTERN.matcher(opt);
if (matcher.find()) {
heapOpts.add(opt);
} else {others.add(opt);
}
}
} | 3.26 |
hadoop_MapReduceJobPropertiesParser_fromString_rdh | // Maps the value of the specified key.
private DataType<?>
fromString(String key, String value) {
DefaultDataType
defaultValue = new DefaultDataType(value);
if (value != null) {
// check known configs
// job-name
String latestKey = getLatestKeyName(key);
if (MRJobConfig.JOB_NAME.equals(latestKey)) {
return new JobName(value);
}
// user-name
if (MRJobConfig.USER_NAME.equals(latestKey)) {
return new UserName(value);
}
// queue-name
if (MRJobConfig.QUEUE_NAME.equals(latestKey)) {
return new QueueName(value);
}
if (MRJobConfig.MAP_JAVA_OPTS.equals(latestKey)
|| MRJobConfig.REDUCE_JAVA_OPTS.equals(latestKey)) {
List<String> heapOptions = new ArrayList<String>();
extractMaxHeapOpts(value, heapOptions, new ArrayList<String>());
extractMinHeapOpts(value, heapOptions, new ArrayList<String>());
return new DefaultDataType(StringUtils.join(heapOptions, ' '));
}
// TODO compression?
// TODO Other job configs like FileOutputFormat/FileInputFormat etc
// check if the config parameter represents a number
try {
format.parse(value);return defaultValue;
} catch (ParseException pe) {
}
// check if the config parameters represents a boolean
// avoiding exceptions
if ("true".equals(value) || "false".equals(value)) {
return defaultValue;
}
// check if the config parameter represents a class
if (latestKey.endsWith(".class") || latestKey.endsWith(".codec")) {return new ClassName(value);
}
// handle distributed cache sizes and timestamps
if (latestKey.endsWith("sizes") || latestKey.endsWith(".timestamps")) {
return defaultValue;
}// check if the config parameter represents a file-system path
// TODO: Make this concrete .location .path .dir .jar?
if (((((((latestKey.endsWith(".dir") || latestKey.endsWith(".location")) || latestKey.endsWith(".jar")) || latestKey.endsWith(".path")) || latestKey.endsWith(".logfile")) || latestKey.endsWith(".file")) || latestKey.endsWith(".files")) || latestKey.endsWith(".archives")) {
try {
return new FileName(value);
} catch (Exception ioe) {
}
}
}
return null;
} | 3.26 |
hadoop_MapReduceJobPropertiesParser_extractMaxHeapOpts_rdh | /**
* Extracts the -Xmx heap option from the specified string.
*/
public static void extractMaxHeapOpts(final String javaOptions, List<String> heapOpts, List<String>
others) {
for
(String opt : javaOptions.split(" ")) {
Matcher matcher = MAX_HEAP_PATTERN.matcher(opt);
if (matcher.find()) {heapOpts.add(opt);
} else {
others.add(opt);
}
}
} | 3.26 |
hadoop_MapReduceJobPropertiesParser_getLatestKeyName_rdh | // Finds a corresponding key for the specified key in the current mapreduce
// setup.
// Note that this API uses a cached copy of the Configuration object. This is
// purely for performance reasons.
private String getLatestKeyName(String key) {
// set the specified key
configuration.set(key, key);
try {
// check if keys in MRConfig maps to the specified key.
for (Field f : mrFields) {
String mrKey = f.get(f.getName()).toString();
if (configuration.get(mrKey) != null) {
return mrKey;
}
}
// unset the key
return null;
} catch (IllegalAccessException iae) {
throw new RuntimeException(iae);
} finally {
// clean up!
configuration.clear();
}
} | 3.26 |
hadoop_ResourceRequestSetKey_m0_rdh | /**
* Extract the corresponding ResourceRequestSetKey for an allocated container
* from a given set. Return null if not found.
*
* @param container
* the allocated container
* @param keys
* the set of keys to look from
* @return ResourceRequestSetKey
*/
public static ResourceRequestSetKey m0(Container container, Set<ResourceRequestSetKey> keys) {
ResourceRequestSetKey resourceRequestSetKey = new ResourceRequestSetKey(container.getAllocationRequestId(), container.getPriority(), container.getResource(), container.getExecutionType());
if (keys.contains(resourceRequestSetKey)) {
return resourceRequestSetKey;}if (container.getAllocationRequestId() > 0) {
// If no exact match, look for the one with the same (non-zero)
// allocationRequestId
for (ResourceRequestSetKey candidate : keys) {
if (candidate.getAllocationRequestId() == container.getAllocationRequestId()) {
if (LOG.isDebugEnabled()) {LOG.debug("Using possible match for {} : {}", resourceRequestSetKey, candidate);}
return candidate;
}
}
}
if (LOG.isDebugEnabled()) {
LOG.debug("not match found for container {}.", container.getId());
for (ResourceRequestSetKey candidate :
keys) {
LOG.debug("candidate set keys: {}.", candidate.toString());
}
}
return null;
} | 3.26 |
hadoop_SpillCallBackPathsFinder_getInvalidSpillEntries_rdh | /**
* Gets the set of path:pos of the entries that were accessed incorrectly.
*
* @return a set of string in the format of {@literal Path[Pos]}
*/
public Set<String> getInvalidSpillEntries() {
Set<String> result = new LinkedHashSet<>();
for (Entry<Path, Set<Long>> spillMapEntry : invalidAccessMap.entrySet()) {
for (Long singleEntry : spillMapEntry.getValue()) {
result.add(String.format("%s[%d]", spillMapEntry.getKey(), singleEntry));
}
}
return result;
} | 3.26 |
hadoop_PlacementConstraint_type_rdh | /**
* The type of placement.
*/
public PlacementConstraint type(PlacementType type) {
this.type = type;
return this;
} | 3.26 |
hadoop_PlacementConstraint_scope_rdh | /**
* The scope of placement.
*/
public PlacementConstraint scope(PlacementScope scope) {
this.scope = scope;
return this;
} | 3.26 |
hadoop_PlacementConstraint_maxCardinality_rdh | /**
* When placement type is cardinality, the maximum number of containers of the
* depending component that a host should have, where containers of this
* component can be allocated on.
*/
public PlacementConstraint maxCardinality(Long maxCardinality) {
this.maxCardinality = maxCardinality;
return this;
} | 3.26 |
hadoop_PlacementConstraint_toIndentedString_rdh | /**
* Convert the given object to string with each line indented by 4 spaces
* (except the first line).
*/
private String toIndentedString(Object o) {
if (o == null) {
return "null";
}
return o.toString().replace("\n", "\n ");
} | 3.26 |
hadoop_PlacementConstraint_name_rdh | /**
* An optional name associated to this constraint.
*/
public PlacementConstraint name(String name) {
this.name = name;
return this;
} | 3.26 |
hadoop_PlacementConstraint_minCardinality_rdh | /**
* When placement type is cardinality, the minimum number of containers of the
* depending component that a host should have, where containers of this
* component can be allocated on.
*/
public PlacementConstraint minCardinality(Long minCardinality) {
this.minCardinality = minCardinality;
return this;
} | 3.26 |
hadoop_PlacementConstraint_nodePartitions_rdh | /**
* Node partitions where the containers of this component can run.
*/
public PlacementConstraint nodePartitions(List<String> nodePartitions) {
this.nodePartitions = nodePartitions;
return this;
} | 3.26 |
hadoop_PlacementConstraint_nodeAttributes_rdh | /**
* Node attributes are a set of key:value(s) pairs associated with nodes.
*/
public PlacementConstraint nodeAttributes(Map<String, List<String>> nodeAttributes) {
this.nodeAttributes = nodeAttributes;
return this;
} | 3.26 |
hadoop_PlacementConstraint_targetTags_rdh | /**
* The name of the components that this component's placement policy is
* depending upon are added as target tags. So for affinity say, this
* component's containers are requesting to be placed on hosts where
* containers of the target tag component(s) are running on. Target tags can
* also contain the name of this component, in which case it implies that for
* anti-affinity say, no more than one container of this component can be
* placed on a host. Similarly, for cardinality, it would mean that containers
* of this component is requesting to be placed on hosts where at least
* minCardinality but no more than maxCardinality containers of the target tag
* component(s) are running.
*/
public PlacementConstraint targetTags(List<String> targetTags) {
this.targetTags = targetTags;
return this;
} | 3.26 |
hadoop_HttpExceptionUtils_createServletExceptionResponse_rdh | /**
* Creates a HTTP servlet response serializing the exception in it as JSON.
*
* @param response
* the servlet response
* @param status
* the error code to set in the response
* @param ex
* the exception to serialize in the response
* @throws IOException
* thrown if there was an error while creating the
* response
*/
public static void createServletExceptionResponse(HttpServletResponse response, int status, Throwable ex) throws IOException {
response.setStatus(status);
response.setContentType(APPLICATION_JSON_MIME);
Map<String, Object> json = new LinkedHashMap<String, Object>();
json.put(ERROR_MESSAGE_JSON, getOneLineMessage(ex));
json.put(ERROR_EXCEPTION_JSON, ex.getClass().getSimpleName());
json.put(ERROR_CLASSNAME_JSON, ex.getClass().getName());
Map<String, Object> jsonResponse = Collections.singletonMap(ERROR_JSON, json);
Writer writer = response.getWriter();
JsonSerialization.writer().writeValue(writer, jsonResponse);
writer.flush();
} | 3.26 |
hadoop_HttpExceptionUtils_throwEx_rdh | // trick, riding on generics to throw an undeclared exception
private static void throwEx(Throwable ex) {
HttpExceptionUtils.<RuntimeException>throwException(ex);
} | 3.26 |
hadoop_HttpExceptionUtils_createJerseyExceptionResponse_rdh | /**
* Creates a HTTP JAX-RPC response serializing the exception in it as JSON.
*
* @param status
* the error code to set in the response
* @param ex
* the exception to serialize in the response
* @return the JAX-RPC response with the set error and JSON encoded exception
*/
public static Response createJerseyExceptionResponse(Response.Status status, Throwable
ex) {
Map<String, Object>
json = new LinkedHashMap<String, Object>();
json.put(ERROR_MESSAGE_JSON, getOneLineMessage(ex));
json.put(ERROR_EXCEPTION_JSON, ex.getClass().getSimpleName());
json.put(ERROR_CLASSNAME_JSON, ex.getClass().getName());
Map<String, Object> response = Collections.singletonMap(ERROR_JSON, json);
return Response.status(status).type(MediaType.APPLICATION_JSON).entity(response).build();
} | 3.26 |
hadoop_HttpExceptionUtils_validateResponse_rdh | /**
* Validates the status of an <code>HttpURLConnection</code> against an
* expected HTTP status code. If the current status code is not the expected
* one it throws an exception with a detail message using Server side error
* messages if available.
* <p>
* <b>NOTE:</b> this method will throw the deserialized exception even if not
* declared in the <code>throws</code> of the method signature.
*
* @param conn
* the <code>HttpURLConnection</code>.
* @param expectedStatus
* the expected HTTP status code.
* @throws IOException
* thrown if the current status code does not match the
* expected one.
*/
@SuppressWarnings("unchecked")
public static void validateResponse(HttpURLConnection conn, int expectedStatus) throws IOException {
if (conn.getResponseCode() != expectedStatus) {
Exception toThrow;
InputStream es = null;
try {
es = conn.getErrorStream();
Map json = JsonSerialization.mapReader().readValue(es);
json = ((Map) (json.get(ERROR_JSON)));
String exClass = ((String) (json.get(ERROR_CLASSNAME_JSON)));
String exMsg = ((String) (json.get(ERROR_MESSAGE_JSON)));
if (exClass != null) {
try {
ClassLoader cl = HttpExceptionUtils.class.getClassLoader();
Class klass = cl.loadClass(exClass);
Constructor constr = klass.getConstructor(String.class);
toThrow = ((Exception) (constr.newInstance(exMsg)));
} catch (Exception ex) {
toThrow = new IOException(String.format("HTTP status [%d], exception [%s], message [%s], URL [%s]", conn.getResponseCode(), exClass, exMsg, conn.getURL()));
}
} else
{
String msg = (exMsg != null) ? exMsg : conn.getResponseMessage();
toThrow = new IOException(String.format("HTTP status [%d], message [%s], URL [%s]",
conn.getResponseCode(), msg, conn.getURL()));
}
} catch (Exception ex) {
toThrow = new IOException(String.format("HTTP status [%d], message [%s], URL [%s], exception [%s]", conn.getResponseCode(), conn.getResponseMessage(), conn.getURL(), ex.toString()), ex);
} finally {
if (es != null) {
try {
es.close();
} catch (IOException ex) {
// ignore
}
}
}
throwEx(toThrow);
}
} | 3.26 |
hadoop_ClientGSIContext_receiveResponseState_rdh | /**
* Client side implementation for receiving state alignment info
* in responses.
*/
@Overridepublic synchronized void receiveResponseState(RpcResponseHeaderProto header) {if (header.hasRouterFederatedState()) {
routerFederatedState = mergeRouterFederatedState(this.routerFederatedState, header.getRouterFederatedState());
} else {
lastSeenStateId.accumulate(header.getStateId());
}
} | 3.26 |
hadoop_ClientGSIContext_updateRequestState_rdh | /**
* Client side implementation for providing state alignment info in requests.
*/
@Override
public synchronized void updateRequestState(RpcRequestHeaderProto.Builder header) {
if (lastSeenStateId.get() != Long.MIN_VALUE) {
header.setStateId(lastSeenStateId.get());
}if (routerFederatedState != null) {
header.setRouterFederatedState(routerFederatedState);
}
} | 3.26 |
hadoop_ClientGSIContext_getRouterFederatedStateMap_rdh | /**
* Utility function to parse routerFederatedState field in RPC headers.
*/
public static Map<String, Long>
getRouterFederatedStateMap(ByteString byteString) {
if (byteString != null)
{
try {
RouterFederatedStateProto federatedState = RouterFederatedStateProto.parseFrom(byteString);
return
federatedState.getNamespaceStateIdsMap();
}
catch (InvalidProtocolBufferException
e) {
// Ignore this exception and will return an empty map
}
}
return Collections.emptyMap();
} | 3.26 |
hadoop_ClientGSIContext_mergeRouterFederatedState_rdh | /**
* Merge state1 and state2 to get the max value for each namespace.
*
* @param state1
* input ByteString.
* @param state2
* input ByteString.
* @return one ByteString object which contains the max value of each namespace.
*/
public static ByteString mergeRouterFederatedState(ByteString state1, ByteString state2) {
Map<String, Long> v1 = new HashMap<>(getRouterFederatedStateMap(state1));Map<String, Long> mapping2 = getRouterFederatedStateMap(state2);
mapping2.forEach((k, v) -> {
long localValue = v1.getOrDefault(k, 0L);
v1.put(k, Math.max(v, localValue));
});
RouterFederatedStateProto.Builder v4 = RouterFederatedStateProto.newBuilder();
v1.forEach(v4::putNamespaceStateIds);return v4.build().toByteString();
} | 3.26 |
hadoop_ClientGSIContext_updateResponseState_rdh | /**
* Client side implementation only receives state alignment info.
* It does not provide state alignment info therefore this does nothing.
*/
@Override
public void updateResponseState(RpcResponseHeaderProto.Builder header) {
// Do nothing.
} | 3.26 |
hadoop_RouterDistCpProcedure_enableWrite_rdh | /**
* Enable write.
*/
@Override
protected void enableWrite() throws IOException {
// do nothing.
} | 3.26 |
hadoop_StreamXmlRecordReader_nextState_rdh | /* also updates firstMatchStart_; */
int nextState(int state, int input, int bufPos) {switch (state) {
case CDATA_UNK :
case CDATA_OUT :
switch (input) {
case CDATA_BEGIN :
return CDATA_IN;
case CDATA_END :
if (state == CDATA_OUT) {
// System.out.println("buggy XML " + bufPos);
}
return CDATA_OUT;
case RECORD_MAYBE :
return state == CDATA_UNK ? CDATA_UNK : f0;
}
break;
case CDATA_IN :
return input == CDATA_END ? CDATA_OUT :
CDATA_IN;
}
throw new IllegalStateException((((((state + " ") + input) + " ") + bufPos) + " ") + splitName_);
} | 3.26 |
hadoop_GetClusterNodeAttributesResponse_newInstance_rdh | /**
* Create instance of GetClusterNodeAttributesResponse.
*
* @param attributes
* Map of Node attributeKey to Type.
* @return GetClusterNodeAttributesResponse.
*/
public static GetClusterNodeAttributesResponse
newInstance(Set<NodeAttributeInfo> attributes) {
GetClusterNodeAttributesResponse response = Records.newRecord(GetClusterNodeAttributesResponse.class);
response.setNodeAttributes(attributes);
return response;
} | 3.26 |
hadoop_S3ACachingBlockManager_read_rdh | /**
* Reads into the given {@code buffer} {@code size} bytes from the underlying file
* starting at {@code startOffset}.
*
* @param buffer
* the buffer to read data in to.
* @param startOffset
* the offset at which reading starts.
* @param size
* the number bytes to read.
* @return number of bytes read.
*/
@Override
public int read(ByteBuffer buffer, long startOffset, int size) throws IOException {
return this.reader.read(buffer, startOffset, size);
} | 3.26 |
hadoop_AbfsStatistic_getStatName_rdh | /**
* Getter for statistic name.
*
* @return Name of statistic.
*/
public String getStatName() {
return statName;
} | 3.26 |
hadoop_AbfsStatistic_getStatNameFromHttpCall_rdh | /**
* Get the statistic name using the http call name.
*
* @param httpCall
* The HTTP call used to get the statistic name.
* @return Statistic name.
*/
public static String getStatNameFromHttpCall(String httpCall) {
return HTTP_CALL_TO_NAME_MAP.get(httpCall);
} | 3.26 |
hadoop_AbfsStatistic_getHttpCall_rdh | /**
* Getter for http call for HTTP duration trackers.
*
* @return http call of a statistic.
*/
public String getHttpCall() {
return httpCall;
} | 3.26 |
hadoop_AbfsStatistic_getStatDescription_rdh | /**
* Getter for statistic description.
*
* @return Description of statistic.
*/
public String getStatDescription() {
return statDescription;
} | 3.26 |
hadoop_NMClient_getLocalizationStatuses_rdh | /**
* Get the localization statuses of a container.
*
* @param containerId
* the Id of the container
* @param nodeId
* node Id of the container
* @return the status of a container.
* @throws YarnException
* YarnException.
* @throws IOException
* IOException.
*/
@InterfaceStability.Unstable
public List<LocalizationStatus> getLocalizationStatuses(ContainerId containerId, NodeId nodeId) throws YarnException, IOException {
return null;
} | 3.26 |
hadoop_NMClient_localize_rdh | /**
* Localize resources for a container.
*
* @param containerId
* the ID of the container
* @param nodeId
* node Id of the container
* @param localResources
* resources to localize
*/
@InterfaceStability.Unstable
public void localize(ContainerId containerId, NodeId
nodeId, Map<String, LocalResource> localResources) throws YarnException, IOException {
// do nothing.
} | 3.26 |
hadoop_NMClient_getNMTokenCache_rdh | /**
* Get the NM token cache of the <code>NMClient</code>. This cache must be
* shared with the {@link AMRMClient} that requested the containers managed
* by this <code>NMClient</code>
* <p>
* If a NM token cache is not set, the {@link NMTokenCache#getSingleton()}
* singleton instance will be used.
*
* @return the NM token cache
*/
public NMTokenCache getNMTokenCache() {
return nmTokenCache;
} | 3.26 |
hadoop_NMClient_createNMClient_rdh | /**
* Create a new instance of NMClient.
*/
@Public
public static NMClient createNMClient(String name) {
NMClient client = new NMClientImpl(name);
return client;
} | 3.26 |
hadoop_NMClient_setNMTokenCache_rdh | /**
* Set the NM Token cache of the <code>NMClient</code>. This cache must be
* shared with the {@link AMRMClient} that requested the containers managed
* by this <code>NMClient</code>
* <p>
* If a NM token cache is not set, the {@link NMTokenCache#getSingleton()}
* singleton instance will be used.
*
* @param nmTokenCache
* the NM token cache to use.
*/
public void setNMTokenCache(NMTokenCache nmTokenCache) {
this.nmTokenCache = nmTokenCache;
} | 3.26 |
hadoop_NMClient_getNodeIdOfStartedContainer_rdh | /**
* Get the NodeId of the node on which container is running. It returns
* null if the container if container is not found or if it is not running.
*
* @param containerId
* Container Id of the container.
* @return NodeId of the container on which it is running.
*/
public NodeId getNodeIdOfStartedContainer(ContainerId containerId) {
return null;
} | 3.26 |
hadoop_WebHdfs_createWebHdfsFileSystem_rdh | /**
* Returns a new {@link WebHdfsFileSystem}, with the given configuration.
*
* @param conf
* configuration
* @return new WebHdfsFileSystem
*/
private static WebHdfsFileSystem createWebHdfsFileSystem(Configuration conf) {
WebHdfsFileSystem fs = new WebHdfsFileSystem();
fs.setConf(conf);
return fs;
} | 3.26 |
hadoop_MountVolumeMap_getCapacityRatioByMountAndStorageType_rdh | /**
* Return capacity ratio.
* If not exists, return 1 to use full capacity.
*/
double getCapacityRatioByMountAndStorageType(String mount, StorageType storageType) {
if (mountVolumeMapping.containsKey(mount)) {
return mountVolumeMapping.get(mount).getCapacityRatio(storageType);
}
return 1;
} | 3.26 |
hadoop_FlowActivityDocument_merge_rdh | /**
* Merge the {@link FlowActivityDocument} that is passed with the current
* document for upsert.
*
* @param flowActivityDocument
* that has to be merged
*/
@Override
public void merge(FlowActivityDocument flowActivityDocument) {
if (flowActivityDocument.getDayTimestamp() > 0) {
this.dayTimestamp = flowActivityDocument.getDayTimestamp();
}
this.flowName = flowActivityDocument.getFlowName();
this.user = flowActivityDocument.getUser();this.id = flowActivityDocument.getId();
this.flowActivities.addAll(flowActivityDocument.getFlowActivities());
} | 3.26 |
hadoop_CsiGrpcClient_createNodeBlockingStub_rdh | /**
* Creates a blocking stub for CSI node plugin on the given channel.
*
* @return the blocking stub
*/
public NodeBlockingStub createNodeBlockingStub() {
return NodeGrpc.newBlockingStub(channel);
} | 3.26 |
hadoop_CsiGrpcClient_close_rdh | /**
* Shutdown the communication channel gracefully,
* wait for 5 seconds before it is enforced.
*/
@Override
public void close() {
try {
this.channel.shutdown().awaitTermination(5, TimeUnit.SECONDS);
} catch (InterruptedException e)
{
LOG.error("Failed to gracefully shutdown" + " gRPC communication channel in 5 seconds", e);
}
} | 3.26 |
hadoop_CsiGrpcClient_createControllerBlockingStub_rdh | /**
* Creates a blocking stub for CSI controller plugin on the given channel.
*
* @return the blocking stub
*/
public ControllerBlockingStub createControllerBlockingStub() {
return ControllerGrpc.newBlockingStub(channel);
} | 3.26 |
hadoop_CsiGrpcClient_createIdentityBlockingStub_rdh | /**
* Creates a blocking stub for CSI identity plugin on the given channel.
*
* @return the blocking stub
*/
public IdentityBlockingStub createIdentityBlockingStub() {
return IdentityGrpc.newBlockingStub(channel);
} | 3.26 |
hadoop_FsGetter_get_rdh | /**
* Gets file system instance of given uri.
*
* @param uri
* uri.
* @param conf
* configuration.
* @throws IOException
* raised on errors performing I/O.
* @return FileSystem.
*/
public FileSystem get(URI uri, Configuration conf) throws IOException {
return FileSystem.get(uri, conf);
} | 3.26 |
hadoop_ConfiguredNodeLabels_setLabelsByQueue_rdh | /**
* Set node labels for a specific queue.
*
* @param queuePath
* path of the queue
* @param nodeLabels
* configured node labels to set
*/
public void setLabelsByQueue(String queuePath, Collection<String> nodeLabels) {
f0.put(queuePath, new HashSet<>(nodeLabels));
} | 3.26 |
hadoop_ConfiguredNodeLabels_getAllConfiguredLabels_rdh | /**
* Get all configured node labels aggregated from each queue.
*
* @return all node labels
*/
public Set<String> getAllConfiguredLabels() {
Set<String> nodeLabels = f0.values().stream().flatMap(Set::stream).collect(Collectors.toSet());if (nodeLabels.size() == 0) {
nodeLabels = NO_LABEL;
}
return nodeLabels;
} | 3.26 |
hadoop_DeletionTaskRecoveryInfo_getDeletionTimestamp_rdh | /**
* Return the deletion timestamp.
*
* @return the deletion timestamp.
*/
public long getDeletionTimestamp() {
return deletionTimestamp;
} | 3.26 |
hadoop_DeletionTaskRecoveryInfo_getTask_rdh | /**
* Return the recovered DeletionTask.
*
* @return the recovered DeletionTask.
*/
public DeletionTask getTask() {
return task;
} | 3.26 |
hadoop_DeletionTaskRecoveryInfo_getSuccessorTaskIds_rdh | /**
* Return all of the dependent DeletionTasks.
*
* @return the dependent DeletionTasks.
*/
public List<Integer> getSuccessorTaskIds() {
return successorTaskIds;
} | 3.26 |
hadoop_DeSelectFields_toString_rdh | /**
* use literals as toString.
*
* @return the literals of this type.
*/
@Override
public String toString() {
return literals;
} | 3.26 |
hadoop_DeSelectFields_obtainType_rdh | /**
* Obtain the <code>DeSelectType</code> by the literals given behind
* <code>deSelects</code> in URL.
* <br> e.g: deSelects="resourceRequests"
*
* @param literals
* e.g: resourceRequests
* @return <code>DeSelectType</code> e.g: DeSelectType.RESOURCE_REQUESTS
*/
public static DeSelectType obtainType(String literals) {
for (DeSelectType type : values()) {
if (type.literals.equalsIgnoreCase(literals)) {
return type;
}
}
return null;
} | 3.26 |
hadoop_DeSelectFields_initFields_rdh | /**
* Initial DeSelectFields with unselected fields.
*
* @param unselectedFields
* a set of unselected field.
*/
public void initFields(Set<String> unselectedFields) {
if
(unselectedFields == null) {
return;
}
for (String field
: unselectedFields) {
if (!field.trim().isEmpty()) {
String[] literalsArray = field.split(",");
for (String literals : literalsArray) {
if ((literals
!= null) && (!literals.trim().isEmpty())) {
DeSelectType type = DeSelectType.obtainType(literals);
if (type == null) {
LOG.warn("Invalid deSelects string " + literals.trim());
DeSelectType[] typeArray = DeSelectType.values();
String allSupportLiterals = Arrays.toString(typeArray);
throw new BadRequestException((("Invalid deSelects string " + literals.trim()) +
" specified. It should be one of ") + allSupportLiterals);
} else {
this.types.add(type);
}
}
}
}
}
} | 3.26 |
hadoop_DeSelectFields_contains_rdh | /**
* Determine to deselect type should be handled or not.
*
* @param type
* deselected type
* @return true if the deselect type should be handled
*/
public boolean contains(DeSelectType type) {
return
types.contains(type);
} | 3.26 |
hadoop_AMRMProxyService_authorizeAndGetInterceptorChain_rdh | /**
* Authorizes the request and returns the application specific request
* processing pipeline.
*
* @return the interceptor wrapper instance
* @throws YarnException
* if fails
*/
private RequestInterceptorChainWrapper authorizeAndGetInterceptorChain() throws YarnException {
AMRMTokenIdentifier tokenIdentifier = YarnServerSecurityUtils.authorizeRequest();
return getInterceptorChain(tokenIdentifier);
} | 3.26 |
hadoop_AMRMProxyService_initializePipeline_rdh | /**
* Initializes the request interceptor pipeline for the specified application.
*
* @param applicationAttemptId
* attempt id
* @param user
* user name
* @param amrmToken
* amrmToken issued by RM
* @param localToken
* amrmToken issued by AMRMProxy
* @param recoveredDataMap
* the recovered states for AMRMProxy from NMSS
* @param isRecovery
* whether this is to recover a previously existing pipeline
*/
protected void initializePipeline(ApplicationAttemptId applicationAttemptId, String user, Token<AMRMTokenIdentifier> amrmToken, Token<AMRMTokenIdentifier> localToken, Map<String, byte[]>
recoveredDataMap, boolean isRecovery, Credentials credentials) {
RequestInterceptorChainWrapper chainWrapper = null;
synchronized(applPipelineMap) {
if (applPipelineMap.containsKey(applicationAttemptId.getApplicationId())) {
LOG.warn(("Request to start an already existing appId was received. " + " This can happen if an application failed and a new attempt ") + "was created on this machine. ApplicationId: {}.", applicationAttemptId);
RequestInterceptorChainWrapper chainWrapperBackup = this.applPipelineMap.get(applicationAttemptId.getApplicationId());
if (((chainWrapperBackup != null) &&
(chainWrapperBackup.getApplicationAttemptId() != null)) && (!chainWrapperBackup.getApplicationAttemptId().equals(applicationAttemptId))) {
// TODO: revisit in AMRMProxy HA in YARN-6128
// Remove the existing pipeline
LOG.info("Remove the previous pipeline for ApplicationId: {}.", applicationAttemptId);
RequestInterceptorChainWrapper pipeline = applPipelineMap.remove(applicationAttemptId.getApplicationId());
if ((!isRecovery) && (this.nmContext.getNMStateStore() != null)) {
try {
this.nmContext.getNMStateStore().removeAMRMProxyAppContext(applicationAttemptId);
} catch (IOException ioe) {
LOG.error("Error removing AMRMProxy application context for {}.", applicationAttemptId, ioe);
}
}try {
pipeline.getRootInterceptor().shutdown();
} catch (Throwable ex) {LOG.warn("Failed to shutdown the request processing pipeline for app: {}.", applicationAttemptId.getApplicationId(), ex);
}
} else {
return;
}
}
chainWrapper = new RequestInterceptorChainWrapper();
this.applPipelineMap.put(applicationAttemptId.getApplicationId(), chainWrapper);
}
// We register the pipeline instance in the map first and then initialize it
// later because chain initialization can be expensive, and we would like to
// release the lock as soon as possible to prevent other applications from
// blocking when one application's chain is initializing
LOG.info("Initializing request processing pipeline for application. " + " ApplicationId: {} for the user: {}.", applicationAttemptId, user);
try {
RequestInterceptor interceptorChain = this.createRequestInterceptorChain();
interceptorChain.init(createApplicationMasterContext(this.nmContext, applicationAttemptId, user, amrmToken, localToken, credentials, this.registry));
if (isRecovery) {
if (recoveredDataMap == null) {
throw new YarnRuntimeException("null recoveredDataMap received for recover");}
interceptorChain.recover(recoveredDataMap);
}
chainWrapper.init(interceptorChain, applicationAttemptId);
if ((!isRecovery) && (this.nmContext.getNMStateStore() != null)) {
try {
this.nmContext.getNMStateStore().storeAMRMProxyAppContextEntry(applicationAttemptId, NMSS_USER_KEY, user.getBytes(StandardCharsets.UTF_8));
this.nmContext.getNMStateStore().storeAMRMProxyAppContextEntry(applicationAttemptId, NMSS_AMRMTOKEN_KEY, amrmToken.encodeToUrlString().getBytes(StandardCharsets.UTF_8));
} catch (IOException e) {
LOG.error("Error storing AMRMProxy application context entry for {}.",
applicationAttemptId, e);
}
}
} catch (Exception e) {
this.applPipelineMap.remove(applicationAttemptId.getApplicationId());
throw e;
}
} | 3.26 |
hadoop_AMRMProxyService_processApplicationStartRequest_rdh | /**
* Callback from the ContainerManager implementation for initializing the
* application request processing pipeline.
*
* @param request
* - encapsulates information for starting an AM
* @throws IOException
* if fails
* @throws YarnException
* if fails
*/
public void processApplicationStartRequest(StartContainerRequest request) throws IOException, YarnException {
this.metrics.incrRequestCount();
long v28 = clock.getTime();
try {
ContainerTokenIdentifier containerTokenIdentifierForKey = BuilderUtils.newContainerTokenIdentifier(request.getContainerToken());
ApplicationAttemptId appAttemptId = containerTokenIdentifierForKey.getContainerID().getApplicationAttemptId();
ApplicationId applicationID = appAttemptId.getApplicationId();
// Checking if application is there in federation state store only
// if federation is enabled. If
// application is submitted to router then it adds it in statestore.
// if application is not found in statestore that means its
// submitted to RM
if (!checkIfAppExistsInStateStore(applicationID)) {
return;
}
LOG.info("Callback received for initializing request processing pipeline for an AM.");
Credentials credentials = YarnServerSecurityUtils.parseCredentials(request.getContainerLaunchContext());
Token<AMRMTokenIdentifier> amrmToken = getFirstAMRMToken(credentials.getAllTokens());
if (amrmToken ==
null) {
throw new YarnRuntimeException("AMRMToken not found in the start container request for application:" + appAttemptId);
}
// Substitute the existing AMRM Token with a local one. Keep the rest of
// the tokens in the credentials intact.
Token<AMRMTokenIdentifier> localToken = this.secretManager.createAndGetAMRMToken(appAttemptId);
credentials.addToken(localToken.getService(), localToken);
DataOutputBuffer dob = new DataOutputBuffer();
credentials.writeTokenStorageToStream(dob);
request.getContainerLaunchContext().setTokens(ByteBuffer.wrap(dob.getData(), 0, dob.getLength()));
initializePipeline(appAttemptId, containerTokenIdentifierForKey.getApplicationSubmitter(), amrmToken, localToken, null, false, credentials);
long endTime = clock.getTime();
this.metrics.succeededAppStartRequests(endTime - v28);
} catch (Throwable t) {
this.metrics.incrFailedAppStartRequests();
throw t;
}
} | 3.26 |
hadoop_AMRMProxyService_createRequestInterceptorChain_rdh | /**
* This method creates and returns reference of the first interceptor in the
* chain of request interceptor instances.
*
* @return the reference of the first interceptor in the chain
*/
protected RequestInterceptor createRequestInterceptorChain() {
Configuration conf = getConfig();
List<String> interceptorClassNames = getInterceptorClassNames(conf);
RequestInterceptor pipeline = null;
RequestInterceptor current = null;
for (String interceptorClassName : interceptorClassNames) {
try {
Class<?> interceptorClass = conf.getClassByName(interceptorClassName);
if (RequestInterceptor.class.isAssignableFrom(interceptorClass)) {
RequestInterceptor interceptorInstance = ((RequestInterceptor) (ReflectionUtils.newInstance(interceptorClass, conf)));
if (pipeline == null) {
pipeline = interceptorInstance;
current = interceptorInstance;
continue;
} else {
current.setNextInterceptor(interceptorInstance);
current = interceptorInstance;
}
} else {
throw new YarnRuntimeException((("Class: " + interceptorClassName) + " not instance of ") + RequestInterceptor.class.getCanonicalName());
}
} catch (ClassNotFoundException e) {
throw new YarnRuntimeException("Could not instantiate ApplicationMasterRequestInterceptor: " + interceptorClassName, e);
}
}
if (pipeline == null) {
throw new YarnRuntimeException("RequestInterceptor pipeline is not configured in the system");
}return pipeline;
} | 3.26 |
hadoop_AMRMProxyService_getInterceptorClassNames_rdh | /**
* Returns the comma separated interceptor class names from the configuration.
*
* @param conf
* configuration
* @return the interceptor class names as an instance of ArrayList
*/
private List<String> getInterceptorClassNames(Configuration conf) {
String configuredInterceptorClassNames = conf.get(YarnConfiguration.AMRM_PROXY_INTERCEPTOR_CLASS_PIPELINE, YarnConfiguration.DEFAULT_AMRM_PROXY_INTERCEPTOR_CLASS_PIPELINE);
List<String> interceptorClassNames = new ArrayList<>();
Collection<String> tempList = StringUtils.getStringCollection(configuredInterceptorClassNames);
for (String item : tempList) {
interceptorClassNames.add(item.trim());
}
// Make sure DistributedScheduler is present at the beginning of the chain.
if
(this.nmContext.isDistributedSchedulingEnabled()) {
interceptorClassNames.add(0, DistributedScheduler.class.getName());
}
return interceptorClassNames;
} | 3.26 |
hadoop_AMRMProxyService_m1_rdh | /**
* This is called by the AMs started on this node to register with the RM.
* This method does the initial authorization and then forwards the request to
* the application instance specific interceptor chain.
*/
@Override
public RegisterApplicationMasterResponse m1(RegisterApplicationMasterRequest request) throws YarnException, IOException {
this.metrics.incrRequestCount();
long startTime = clock.getTime();
try {
RequestInterceptorChainWrapper v16 = authorizeAndGetInterceptorChain();
LOG.info("RegisteringAM Host: {}, Port: {}, Tracking Url: {} for application {}. ", request.getHost(), request.getRpcPort(), request.getTrackingUrl(), v16.getApplicationAttemptId());
RegisterApplicationMasterResponse response = v16.getRootInterceptor().registerApplicationMaster(request);
long endTime = clock.getTime();
this.metrics.succeededRegisterAMRequests(endTime - startTime);
LOG.info("RegisterAM processing finished in {} ms for application {}.", endTime - startTime, v16.getApplicationAttemptId());
return response;
} catch (Throwable t) { this.metrics.incrFailedRegisterAMRequests();
throw t;
}
} | 3.26 |
hadoop_AMRMProxyService_getApplicationAttemptId_rdh | /**
* Gets the application attempt identifier.
*
* @return the application attempt identifier
*/
public synchronized ApplicationAttemptId getApplicationAttemptId() {
return f0;
} | 3.26 |
hadoop_AMRMProxyService_recover_rdh | /**
* Recover from NM state store. Called after serviceInit before serviceStart.
*
* @throws IOException
* if recover fails
*/
public void recover() throws IOException {
LOG.info("Recovering AMRMProxyService.");
RecoveredAMRMProxyState state = this.nmContext.getNMStateStore().loadAMRMProxyState();
this.secretManager.recover(state);
LOG.info("Recovering {} running applications for AMRMProxy.", state.getAppContexts().size());
for (Map.Entry<ApplicationAttemptId, Map<String, byte[]>> entry : state.getAppContexts().entrySet()) {
ApplicationAttemptId attemptId = entry.getKey();
LOG.info("Recovering app attempt {}.", attemptId);
long startTime = clock.getTime();
// Try recover for the running application attempt
try {
String user = null;
Token<AMRMTokenIdentifier> amrmToken = null;
for (Map.Entry<String, byte[]> contextEntry : entry.getValue().entrySet()) {
if (contextEntry.getKey().equals(NMSS_USER_KEY)) {user = new String(contextEntry.getValue(), StandardCharsets.UTF_8);
} else if (contextEntry.getKey().equals(NMSS_AMRMTOKEN_KEY)) {
amrmToken = new Token<>();
amrmToken.decodeFromUrlString(new
String(contextEntry.getValue(), StandardCharsets.UTF_8));
// Clear the service field, as if RM just issued the token
amrmToken.setService(new Text());
}
}
if (amrmToken == null) {
throw new IOException("No amrmToken found for app attempt " + attemptId);}
if (user == null) { throw new IOException("No user found for app attempt " + attemptId);
}
// Regenerate the local AMRMToken for the AM
Token<AMRMTokenIdentifier> localToken = this.secretManager.createAndGetAMRMToken(attemptId);
// Retrieve the AM container credentials from NM context
Credentials amCred = null;
for (Container container :
this.nmContext.getContainers().values()) {
LOG.debug("From NM Context container {}.", container.getContainerId());
if (container.getContainerId().getApplicationAttemptId().equals(attemptId) && (container.getContainerTokenIdentifier() != null)) {
LOG.debug("Container type {}.", container.getContainerTokenIdentifier().getContainerType());
if (container.getContainerTokenIdentifier().getContainerType() == ContainerType.APPLICATION_MASTER) {
LOG.info("AM container {} found in context, has credentials: {}.", container.getContainerId(), container.getCredentials() != null);
amCred = container.getCredentials();
}
}
}
if (amCred == null) {
LOG.error("No credentials found for AM container of {}. " + "Yarn registry access might not work.", attemptId);}
// Create the interceptor pipeline for the AM
initializePipeline(attemptId, user, amrmToken, localToken, entry.getValue(), true, amCred);
long
endTime = clock.getTime();this.metrics.succeededRecoverRequests(endTime - startTime);
} catch (Throwable e) {
LOG.error("Exception when recovering {}, removing it from NMStateStore and move on.", attemptId, e);this.metrics.incrFailedAppRecoveryCount();
this.nmContext.getNMStateStore().removeAMRMProxyAppContext(attemptId);
}
}
} | 3.26 |
hadoop_AMRMProxyService_stopApplication_rdh | /**
* Shuts down the request processing pipeline for the specified application
* attempt id.
*
* @param applicationId
* application id
*/
protected void stopApplication(ApplicationId applicationId) {
this.metrics.incrRequestCount();
Preconditions.checkArgument(applicationId != null, "applicationId is null");
RequestInterceptorChainWrapper pipeline = this.applPipelineMap.remove(applicationId);
boolean isStopSuccess = true;
long startTime = clock.getTime();
if (pipeline == null) {
LOG.info("No interceptor pipeline for application {}," + " likely because its AM is not run in this node.", applicationId);
isStopSuccess = false;
} else {
// Remove the appAttempt in AMRMTokenSecretManager
this.secretManager.applicationMasterFinished(pipeline.getApplicationAttemptId());
LOG.info("Stopping the request processing pipeline for application: {}.", applicationId);
try {
pipeline.getRootInterceptor().shutdown();
} catch (Throwable ex) {
LOG.warn("Failed to shutdown the request processing pipeline for app: {}.", applicationId, ex);
isStopSuccess = false;
}
// Remove the app context from NMSS after the interceptors are shutdown
if (this.nmContext.getNMStateStore() != null) {
try {
this.nmContext.getNMStateStore().removeAMRMProxyAppContext(pipeline.getApplicationAttemptId());
} catch (IOException e) {LOG.error("Error removing AMRMProxy application context for {}.", applicationId, e);
isStopSuccess = false;
}
}
}
if (isStopSuccess) {
long endTime = clock.getTime();
this.metrics.succeededAppStopRequests(endTime - startTime);
} else {
this.metrics.incrFailedAppStopRequests();
}
} | 3.26 |
hadoop_AMRMProxyService_init_rdh | /**
* Initializes the wrapper with the specified parameters.
*
* @param interceptor
* the root request interceptor
* @param appAttemptId
* attempt id
*/
public synchronized void init(RequestInterceptor interceptor, ApplicationAttemptId appAttemptId) {
rootInterceptor = interceptor;
f0 = appAttemptId;
} | 3.26 |
hadoop_AMRMProxyService_getPipelines_rdh | /**
* Gets the Request interceptor chains for all the applications.
*
* @return the request interceptor chains.
*/
protected Map<ApplicationId, RequestInterceptorChainWrapper> getPipelines() {
return this.applPipelineMap;
} | 3.26 |
hadoop_AMRMProxyService_getRootInterceptor_rdh | /**
* Gets the root request interceptor.
*
* @return the root request interceptor
*/
public synchronized RequestInterceptor getRootInterceptor() {
return rootInterceptor;} | 3.26 |
hadoop_AMRMProxyService_m2_rdh | /**
* This is called by the AMs started on this node to send heart beat to RM.
* This method does the initial authorization and then forwards the request to
* the application instance specific pipeline, which is a chain of request
* interceptor objects. One application request processing pipeline is created
* per AM instance.
*/
@Override
public AllocateResponse m2(AllocateRequest request) throws YarnException, IOException {
this.metrics.incrAllocateCount();
long
startTime = clock.getTime();
try {
AMRMTokenIdentifier amrmTokenIdentifier = YarnServerSecurityUtils.authorizeRequest();RequestInterceptorChainWrapper pipeline = getInterceptorChain(amrmTokenIdentifier);
AllocateResponse allocateResponse = pipeline.getRootInterceptor().allocate(request);
updateAMRMTokens(amrmTokenIdentifier, pipeline, allocateResponse);
long endTime = clock.getTime();
this.metrics.succeededAllocateRequests(endTime - startTime);
LOG.info("Allocate processing finished in {} ms for application {}.", endTime - startTime, pipeline.getApplicationAttemptId());
return allocateResponse;
} catch (Throwable t) {
this.metrics.incrFailedAllocateRequests();
throw t;
}
} | 3.26 |
hadoop_AMRMProxyService_finishApplicationMaster_rdh | /**
* This is called by the AMs started on this node to unregister from the RM.
* This method does the initial authorization and then forwards the request to
* the application instance specific interceptor chain.
*/
@Override
public FinishApplicationMasterResponse finishApplicationMaster(FinishApplicationMasterRequest request) throws YarnException, IOException {
this.metrics.incrRequestCount();
long startTime = clock.getTime();
try {
RequestInterceptorChainWrapper pipeline = authorizeAndGetInterceptorChain();
LOG.info("Finishing application master for {}. Tracking Url: {}.", pipeline.getApplicationAttemptId(), request.getTrackingUrl());
FinishApplicationMasterResponse
response = pipeline.getRootInterceptor().finishApplicationMaster(request);
long endTime = clock.getTime();
this.metrics.succeededFinishAMRequests(endTime - startTime);
LOG.info("FinishAM finished with isUnregistered = {} in {} ms for {}.", response.getIsUnregistered(), endTime -
startTime, pipeline.getApplicationAttemptId());
return response;
} catch (Throwable t) {
this.metrics.incrFailedFinishAMRequests();
throw t;
}
} | 3.26 |
hadoop_FSTreeTraverser_traverseDir_rdh | /**
* Iterate through all files directly inside parent, and recurse down
* directories. The listing is done in batch, and can optionally start after
* a position. The iteration of the inode tree is done in a depth-first
* fashion. But instead of holding all {@link INodeDirectory}'s in memory
* on the fly, only the path components to the current inode is held. This
* is to reduce memory consumption.
*
* @param parent
* The inode id of parent directory
* @param startId
* Id of the start inode.
* @param startAfter
* Full path of a file the traverse should start after.
* @param traverseInfo
* info which may required for processing the child's.
* @throws IOException
* @throws InterruptedException
*/
protected void traverseDir(final INodeDirectory parent, final long startId, byte[] startAfter, final TraverseInfo traverseInfo) throws IOException, InterruptedException {
List<byte[]> startAfters = new ArrayList<>();
if (parent == null)
{
return;
}
INode curr = parent;
// construct startAfters all the way up to the zone inode.
startAfters.add(startAfter);
while (curr.getId() != startId) {
startAfters.add(0, curr.getLocalNameBytes());
curr = curr.getParent();
}
curr = traverseDirInt(startId, parent, startAfters, traverseInfo);
while (!startAfters.isEmpty()) {
if (curr == null) {
// lock was reacquired, re-resolve path.
curr = m0(startId, startAfters);
}
curr = traverseDirInt(startId, curr, startAfters, traverseInfo);
}
} | 3.26 |
hadoop_Result_isPass_rdh | /**
* Should processing continue.
*
* @return if is pass true,not false.
*/
public boolean isPass() {
return this.success;
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.