name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hadoop_ParsedTaskAttempt_putDiagnosticInfo_rdh | /**
* Set the task attempt diagnostic-info
*/
public void putDiagnosticInfo(String msg) {
diagnosticInfo = msg;
} | 3.26 |
hadoop_ParsedTaskAttempt_obtainCounters_rdh | /**
*
* @return the task attempt counters
*/
public Map<String, Long> obtainCounters() {
return
countersMap;
} | 3.26 |
hadoop_ParsedTaskAttempt_m0_rdh | /**
* Set the task attempt counters
*/
public void m0(Map<String, Long> counters) {
this.countersMap = counters;
} | 3.26 |
hadoop_ParsedTaskAttempt_dumpParsedTaskAttempt_rdh | /**
* Dump the extra info of ParsedTaskAttempt
*/
void dumpParsedTaskAttempt() {
LOG.info((((((((((((("ParsedTaskAttempt details:" + obtainCounters()) + ";DiagnosticInfo=") + obtainDiagnosticInfo()) + "\n") + obtainTrackerName()) + ";") + obtainHttpPort()) + ";") + obtainShufflePort()) + ";rack=") + getHostName().getRackName()) + ";host=") + getHostName().getHostName());
} | 3.26 |
hadoop_ParsedTaskAttempt_obtainHttpPort_rdh | /**
*
* @return http port if set. Returns null otherwise.
*/
public Integer obtainHttpPort() {
return httpPort;
} | 3.26 |
hadoop_ParsedTaskAttempt_incorporateCounters_rdh | /**
* incorporate event counters
*/
public void incorporateCounters(JhCounters counters) {
Map<String, Long> countersMap = JobHistoryUtils.extractCounters(counters);
m0(countersMap);
super.incorporateCounters(counters);
} | 3.26 |
hadoop_ParsedTaskAttempt_obtainShufflePort_rdh | /**
*
* @return shuffle port if set. Returns null otherwise.
*/
public Integer obtainShufflePort() {
return shufflePort;
} | 3.26 |
hadoop_AMOptions_verify_rdh | /**
* Same as {@link #verify(long, int)} but does not set a max.
*/
void verify() throws IllegalArgumentException {
verify(Integer.MAX_VALUE, Integer.MAX_VALUE);
} | 3.26 |
hadoop_AMOptions_initFromParser_rdh | /**
* Initialize an {@code AMOptions} from a command line parser.
*
* @param cliParser
* Where to initialize from.
* @return A new {@code AMOptions} filled out with options from the parser.
*/
static AMOptions initFromParser(CommandLine cliParser) {
Map<String, String> originalShellEnv =
new HashMap<>();
if (cliParser.hasOption(SHELL_ENV_ARG)) {
for (String env : cliParser.getOptionValues(SHELL_ENV_ARG)) {
String trimmed = env.trim();
int index = trimmed.indexOf('=');if (index == (-1)) {
originalShellEnv.put(trimmed, "");
continue;
}
String key = trimmed.substring(0, index);
String val = "";
if (index < (trimmed.length() - 1)) {
val = trimmed.substring(index + 1);
}
originalShellEnv.put(key, val);
}
}
return new AMOptions(Integer.parseInt(cliParser.getOptionValue(DATANODE_MEMORY_MB_ARG, DATANODE_MEMORY_MB_DEFAULT)), Integer.parseInt(cliParser.getOptionValue(DATANODE_VCORES_ARG, DATANODE_VCORES_DEFAULT)), cliParser.getOptionValue(DATANODE_ARGS_ARG, ""), cliParser.getOptionValue(DATANODE_NODELABEL_ARG, ""), Integer.parseInt(cliParser.getOptionValue(DATANODES_PER_CLUSTER_ARG, DATANODES_PER_CLUSTER_DEFAULT)), cliParser.getOptionValue(DATANODE_LAUNCH_DELAY_ARG, DATANODE_LAUNCH_DELAY_DEFAULT), Integer.parseInt(cliParser.getOptionValue(NAMENODE_MEMORY_MB_ARG, f0)), Integer.parseInt(cliParser.getOptionValue(NAMENODE_VCORES_ARG, NAMENODE_VCORES_DEFAULT)), cliParser.getOptionValue(f1, ""), cliParser.getOptionValue(NAMENODE_NODELABEL_ARG,
""), Integer.parseInt(cliParser.getOptionValue(NAMENODE_METRICS_PERIOD_ARG, NAMENODE_METRICS_PERIOD_DEFAULT)), cliParser.getOptionValue(NAMENODE_NAME_DIR_ARG,
""), cliParser.getOptionValue(NAMENODE_EDITS_DIR_ARG, ""), originalShellEnv);
} | 3.26 |
hadoop_AMOptions_setOptions_rdh | /**
* Set all of the command line options relevant to this class into the passed
* {@link Options}.
*
* @param opts
* Where to set the command line options.
*/
static void
setOptions(Options opts) {
opts.addOption(SHELL_ENV_ARG, true, "Environment for shell script. Specified as env_key=env_val pairs");
opts.addOption(NAMENODE_MEMORY_MB_ARG,
true, (("Amount of memory in MB to be requested to run the NN (default " + f0) + "). ") + "Ignored unless the NameNode is run within YARN.");
opts.addOption(NAMENODE_VCORES_ARG, true, (("Amount of virtual cores to be requested to run the NN (default " + NAMENODE_VCORES_DEFAULT) + "). ") + "Ignored unless the NameNode is run within YARN.");
opts.addOption(f1, true, "Additional arguments to add when starting the NameNode. " + "Ignored unless the NameNode is run within YARN.");
opts.addOption(NAMENODE_NODELABEL_ARG,
true, "The node label to specify for the container to use to " + "run the NameNode.");
opts.addOption(NAMENODE_METRICS_PERIOD_ARG, true, (((("The period in seconds for the NameNode's metrics to be emitted to " + "file; if <=0, disables this functionality. Otherwise, a ") + "metrics file will be stored in the container logs for the ") + "NameNode (default ") + NAMENODE_METRICS_PERIOD_DEFAULT) + ").");
opts.addOption(NAMENODE_NAME_DIR_ARG, true, ("The directory to use for the NameNode's name data directory. " + "If not specified, a location within the container's working ") + "directory will be used.");
opts.addOption(NAMENODE_EDITS_DIR_ARG, true, ("The directory to use for the NameNode's edits directory. " + "If not specified, a location within the container's working ") + "directory will be used.");
opts.addOption(DATANODE_MEMORY_MB_ARG, true, ("Amount of memory in MB to be requested to run the DNs (default " + DATANODE_MEMORY_MB_DEFAULT) + ")");
opts.addOption(DATANODE_VCORES_ARG, true, ("Amount of virtual cores to be requested to run the DNs (default " + DATANODE_VCORES_DEFAULT) + ")");
opts.addOption(DATANODE_ARGS_ARG, true, "Additional arguments to add when starting the DataNodes.");
opts.addOption(DATANODE_NODELABEL_ARG, true, "The node label to specify " + "for the container to use to run the DataNode.");
opts.addOption(DATANODES_PER_CLUSTER_ARG, true, (("How many simulated DataNodes to run within each YARN container " + "(default ") + DATANODES_PER_CLUSTER_DEFAULT) + ")"); opts.addOption(DATANODE_LAUNCH_DELAY_ARG, true, (((("The period over which to launch the DataNodes; this will " + "be used as the maximum delay and each DataNode container will ") + "be launched with some random delay less than this value. ") + "Accepts human-readable time durations (e.g. 10s, 1m) (default ") + DATANODE_LAUNCH_DELAY_DEFAULT) + ")");
opts.addOption("help", false, "Print usage");
} | 3.26 |
hadoop_FlowRunEntityReader_updateFixedColumns_rdh | /**
* Add {@link QualifierFilter} filters to filter list for each column of flow
* run table.
*
* @return filter list to which qualifier filters have been added.
*/
private FilterList updateFixedColumns() {
FilterList columnsList = new FilterList(Operator.MUST_PASS_ONE);
for (FlowRunColumn column : FlowRunColumn.values()) {
columnsList.addFilter(new QualifierFilter(CompareOp.EQUAL, new BinaryComparator(column.getColumnQualifierBytes())));
}return columnsList;
} | 3.26 |
hadoop_FlowRunEntityReader_getTable_rdh | /**
* Uses the {@link FlowRunTableRW}.
*/
@Override
protected BaseTableRW<?> getTable() {
return
FLOW_RUN_TABLE;
} | 3.26 |
hadoop_QueueACLsManager_getQueueACLsManager_rdh | /**
* Get queue acl manager corresponding to the scheduler.
*
* @param scheduler
* the scheduler for which the queue acl manager is required
* @param conf
* Configuration.
* @return {@link QueueACLsManager}
*/
public static QueueACLsManager getQueueACLsManager(ResourceScheduler scheduler, Configuration conf) {
if (scheduler instanceof CapacityScheduler) {
return new CapacityQueueACLsManager(scheduler, conf);
} else if (scheduler instanceof FairScheduler) {
return new FairQueueACLsManager(scheduler, conf);
} else
{
return new GenericQueueACLsManager(scheduler, conf);
}
} | 3.26 |
hadoop_RouterPermissionChecker_checkPermission_rdh | /**
* Whether a mount table entry can be accessed by the current context.
*
* @param mountTable
* MountTable being accessed
* @param access
* type of action being performed on the mount table entry
* @throws AccessControlException
* if mount table cannot be accessed
*/
public void checkPermission(MountTable mountTable, FsAction access) throws AccessControlException {
if (isSuperUser()) {
return;
}
FsPermission mode = mountTable.getMode();
if (getUser().equals(mountTable.getOwnerName()) && mode.getUserAction().implies(access)) {
return;
}
if (isMemberOfGroup(mountTable.getGroupName()) && mode.getGroupAction().implies(access)) {
return;
}
if (((!getUser().equals(mountTable.getOwnerName())) && (!isMemberOfGroup(mountTable.getGroupName()))) && mode.getOtherAction().implies(access)) {
return;
}
throw new AccessControlException(((((("Permission denied while accessing mount table " + mountTable.getSourcePath()) + ": user ") + getUser()) + " does not have ") + access.toString()) + " permissions.");
} | 3.26 |
hadoop_RouterPermissionChecker_checkSuperuserPrivilege_rdh | /**
* Check the superuser privileges of the current RPC caller. This method is
* based on Datanode#checkSuperuserPrivilege().
*
* @throws AccessControlException
* If the user is not authorized.
*/
@Override
public void checkSuperuserPrivilege() throws
AccessControlException {
// Try to get the ugi in the RPC call.
UserGroupInformation ugi = null;
try {
ugi = NameNode.getRemoteUser();
} catch (IOException
e) {
// Ignore as we catch it afterwards
}
if (ugi == null) {
LOG.error("Cannot get the remote user name");throw new AccessControlException("Cannot get the remote user name");
}
// Is this by the Router user itself?
if (ugi.getShortUserName().equals(superUser)) {
return;
}
// Is the user a member of the super group?
if (ugi.getGroupsSet().contains(superGroup)) {
return;
}
// Not a superuser
throw new AccessControlException(ugi.getUserName() + " is not a super user");
} | 3.26 |
hadoop_JobInfo_countTasksAndAttempts_rdh | /**
* Go through a job and update the member variables with counts for
* information to output in the page.
*
* @param job
* the job to get counts for.
*/
private void countTasksAndAttempts(Job job) {
numReduces = 0;
numMaps = 0;
final Map<TaskId, Task> tasks = job.getTasks();
if (tasks == null) {
return;
}
for (Task task : tasks.values()) {
// Attempts counts
Map<TaskAttemptId, TaskAttempt> attempts = task.getAttempts();
int successful;
int v12;
int killed;
for (TaskAttempt attempt : attempts.values()) {
successful = 0;
v12 = 0;
killed = 0;
if (TaskAttemptStateUI.NEW.correspondsTo(attempt.getState())) {
// Do Nothing
} else if (TaskAttemptStateUI.RUNNING.correspondsTo(attempt.getState())) {
// Do Nothing
} else if (TaskAttemptStateUI.SUCCESSFUL.correspondsTo(attempt.getState())) {
++successful;
} else if (TaskAttemptStateUI.FAILED.correspondsTo(attempt.getState())) {
++v12;} else if (TaskAttemptStateUI.KILLED.correspondsTo(attempt.getState())) {
++killed;
}
switch (task.getType()) {
case MAP :
successfulMapAttempts += successful;
failedMapAttempts += v12;
killedMapAttempts += killed;
if (attempt.getState() == TaskAttemptState.SUCCEEDED) {
numMaps++;
avgMapTime += attempt.getFinishTime() - attempt.getLaunchTime();
}
break;
case REDUCE :
successfulReduceAttempts += successful;
failedReduceAttempts += v12;
killedReduceAttempts += killed;
if (attempt.getState() == TaskAttemptState.SUCCEEDED) {
numReduces++;
avgShuffleTime += attempt.getShuffleFinishTime() - attempt.getLaunchTime();
avgMergeTime += attempt.getSortFinishTime() - attempt.getShuffleFinishTime();
avgReduceTime += attempt.getFinishTime() - attempt.getSortFinishTime();
}
break;
}
}
}
if (numMaps > 0) {
avgMapTime = avgMapTime / numMaps;
}
if (numReduces > 0) {
avgReduceTime = avgReduceTime / numReduces;
avgShuffleTime = avgShuffleTime / numReduces;
avgMergeTime = avgMergeTime /
numReduces;
}
} | 3.26 |
hadoop_ConsistentHashRing_getLocation_rdh | /**
* Return location (owner) of specified item. Owner is the next
* entry on the hash ring (with a hash value > hash value of item).
*
* @param item
* Item to look for.
* @return The location of the item.
*/
public String getLocation(String item) {
readLock.lock();
try {
if (ring.isEmpty()) {
return null;
}
String hash = getHash(item);
if (!ring.containsKey(hash)) {
SortedMap<String, String> tailMap = ring.tailMap(hash);
hash = (tailMap.isEmpty()) ? ring.firstKey() : tailMap.firstKey();
}
String virtualNode = ring.get(hash);
int index = virtualNode.lastIndexOf(SEPARATOR);
if (index >= 0) {
return virtualNode.substring(0, index);
} else {
return virtualNode;
}
} finally {
readLock.unlock();
}} | 3.26 |
hadoop_ConsistentHashRing_addLocation_rdh | /**
* Add entry to consistent hash ring.
*
* @param location
* Node to add to the ring.
* @param numVirtualNodes
* Number of virtual nodes to add.
*/public void addLocation(String location, int numVirtualNodes) {
writeLock.lock();
try {
entryToVirtualNodes.put(location, numVirtualNodes);
for (int i = 0; i < numVirtualNodes; i++) {
String key = String.format(VIRTUAL_NODE_FORMAT, location, i);
String hash = getHash(key);
ring.put(hash, key);
}
} finally {
writeLock.unlock();
}
} | 3.26 |
hadoop_ConsistentHashRing_removeLocation_rdh | /**
* Remove specified entry from hash ring.
*
* @param location
* Node to remove from the ring.
*/
public void removeLocation(String location) {
writeLock.lock();
try {
Integer numVirtualNodes = entryToVirtualNodes.remove(location);
for (int i = 0; i < numVirtualNodes; i++) {String key = String.format(VIRTUAL_NODE_FORMAT,
location, i);
String hash = getHash(key);
ring.remove(hash);
}
} finally { writeLock.unlock();
}
} | 3.26 |
hadoop_NMTokenCache_getNMToken_rdh | /**
* Returns NMToken, null if absent. Only the singleton obtained from
* {@link #getSingleton()} is looked at for the tokens. If you are using your
* own NMTokenCache that is different from the singleton, use
* {@link #getToken(String)}
*
* @param nodeAddr
* @return {@link Token} NMToken required for communicating with node manager
*/
@Public
public static Token getNMToken(String nodeAddr) {
return NM_TOKEN_CACHE.getToken(nodeAddr);
}
/**
* Sets the NMToken for node address only in the singleton obtained from
* {@link #getSingleton()}. If you are using your own NMTokenCache that is
* different from the singleton, use {@link #setToken(String, Token)} | 3.26 |
hadoop_NMTokenCache_removeToken_rdh | /**
* Removes NMToken for specified node manager
*
* @param nodeAddr
* node address (host:port)
*/
@Private
@VisibleForTesting
public void removeToken(String nodeAddr) {
nmTokens.remove(nodeAddr);
} | 3.26 |
hadoop_NMTokenCache_containsToken_rdh | /**
* Returns true if NMToken is present in cache.
*/
@Private
@VisibleForTesting
public boolean containsToken(String nodeAddr)
{
return nmTokens.containsKey(nodeAddr);
} | 3.26 |
hadoop_NMTokenCache_getSingleton_rdh | /**
* Returns the singleton NM token cache.
*
* @return the singleton NM token cache.
*/
public static NMTokenCache getSingleton() {
return NM_TOKEN_CACHE;
} | 3.26 |
hadoop_NMTokenCache_setToken_rdh | /**
* Sets the NMToken for node address
*
* @param nodeAddr
* node address (host:port)
* @param token
* NMToken
*/
@Public
@Evolving
public void setToken(String nodeAddr, Token token) {
nmTokens.put(nodeAddr, token);
} | 3.26 |
hadoop_NMTokenCache_numberOfTokensInCache_rdh | /**
* Returns the number of NMTokens present in cache.
*/
@Private
@VisibleForTestingpublic int numberOfTokensInCache() {
return nmTokens.size();
} | 3.26 |
hadoop_JobQueueChangeEvent_getJobQueueName_rdh | /**
* Get the new Job queue name
*/
public String getJobQueueName() {
if (datum.jobQueueName != null) {
return datum.jobQueueName.toString();
}return null;
} | 3.26 |
hadoop_JobQueueChangeEvent_getJobId_rdh | /**
* Get the Job ID
*/
public JobID getJobId() {
return JobID.forName(datum.jobid.toString());
} | 3.26 |
hadoop_StepType_getDescription_rdh | /**
* Returns step type description.
*
* @return String step type description
*/
public String getDescription() {
return description;
} | 3.26 |
hadoop_StepType_getName_rdh | /**
* Returns step type name.
*
* @return String step type name
*/
public String getName() {
return name;
} | 3.26 |
hadoop_BlockGrouper_getRequiredNumParityBlocks_rdh | /**
* Get required parity blocks count in a BlockGroup.
*
* @return count of required parity blocks
*/
public int getRequiredNumParityBlocks() {
return schema.getNumParityUnits();
} | 3.26 |
hadoop_BlockGrouper_setSchema_rdh | /**
* Set EC schema.
*
* @param schema
* schema.
*/
public void setSchema(ECSchema schema) {
this.schema = schema;
} | 3.26 |
hadoop_BlockGrouper_makeBlockGroup_rdh | /**
* Calculating and organizing BlockGroup, to be called by ECManager
*
* @param dataBlocks
* Data blocks to compute parity blocks against
* @param parityBlocks
* To be computed parity blocks
* @return ECBlockGroup.
*/
public ECBlockGroup makeBlockGroup(ECBlock[] dataBlocks, ECBlock[] parityBlocks) {
ECBlockGroup blockGroup = new ECBlockGroup(dataBlocks, parityBlocks);
return blockGroup;
} | 3.26 |
hadoop_BlockGrouper_anyRecoverable_rdh | /**
* Given a BlockGroup, tell if any of the missing blocks can be recovered,
* to be called by ECManager
*
* @param blockGroup
* a blockGroup that may contain erased blocks but not sure
* recoverable or not
* @return true if any erased block recoverable, false otherwise
*/public boolean anyRecoverable(ECBlockGroup blockGroup) {
int erasedCount = blockGroup.getErasedCount();return (erasedCount > 0) && (erasedCount <= getRequiredNumParityBlocks());
} | 3.26 |
hadoop_Abfss_finalize_rdh | /**
* Close the file system; the FileContext API doesn't have an explicit close.
*/
@Override
protected void finalize() throws Throwable {
fsImpl.close();
super.finalize();
} | 3.26 |
hadoop_YarnServiceConf_getLong_rdh | /**
* Get long value for the property. First get from the userConf, if not
* present, get from systemConf.
*
* @param name
* name of the property
* @param defaultValue
* default value of the property, if it is not defined in
* userConf and systemConf.
* @param userConf
* Configuration provided by client in the JSON definition
* @param systemConf
* The YarnConfiguration in the system.
* @return long value for the property
*/
public static long getLong(String name, long defaultValue, Configuration userConf, Configuration systemConf) {
return userConf.getPropertyLong(name, systemConf.getLong(name, defaultValue));
} | 3.26 |
hadoop_CustomResourceMetrics_m0_rdh | /**
* Get a map of all custom resource metric.
*
* @return map of custom resource
*/
public Map<String, Long> m0() {
Map<String, Long> customResources = new HashMap<String, Long>();
ResourceInformation[] resources = ResourceUtils.getResourceTypesArray();
for (int i = 2; i < resources.length; i++) {
ResourceInformation resource = resources[i];
customResources.put(resource.getName(), Long.valueOf(0));
}
return customResources;
} | 3.26 |
hadoop_CustomResourceMetrics_registerCustomResources_rdh | /**
* As and when this metric object construction happens for any queue, all
* custom resource metrics value would be initialized with '0' like any other
* mandatory resources metrics.
*
* @param customResources
* Map containing all custom resource types
* @param registry
* of the metric type
* @param metricPrefix
* prefix in metric name
* @param metricDesc
* suffix for metric name
*/
public void registerCustomResources(Map<String, Long> customResources, MetricsRegistry registry, String metricPrefix, String metricDesc) {
for (Map.Entry<String, Long> entry : customResources.entrySet()) {
String resourceName = entry.getKey();
Long resourceValue = entry.getValue();
MutableGaugeLong
resourceMetric
= ((MutableGaugeLong) (registry.get(metricPrefix + resourceName)));
if (resourceMetric == null) {
resourceMetric = registry.newGauge(metricPrefix + resourceName, metricDesc.replace("NAME", resourceName), 0L);
}resourceMetric.set(resourceValue);
}
} | 3.26 |
hadoop_MetricsLoggerTask_run_rdh | /**
* Write metrics to the metrics appender when invoked.
*/
@Override
public void run() {
// Skip querying metrics if there are no known appenders.
if (((!metricsLog.isInfoEnabled()) || (!hasAppenders(metricsLog))) || (f0 == null)) {
return;
}
metricsLog.info((" >> Begin " + nodeName) + " metrics dump");
final MBeanServer server = ManagementFactory.getPlatformMBeanServer();
// Iterate over each MBean.
for (final ObjectName mbeanName : server.queryNames(f0, null)) {
try {
MBeanInfo mBeanInfo = server.getMBeanInfo(mbeanName);
final String mBeanNameName = MBeans.getMbeanNameName(mbeanName);
final Set<String> attributeNames = getFilteredAttributes(mBeanInfo);
final AttributeList attributes = server.getAttributes(mbeanName, attributeNames.toArray(new String[attributeNames.size()]));
for (Object o : attributes) {
final Attribute v7 = ((Attribute) (o));
final Object value = v7.getValue();
final String valueStr = (value != null) ? value.toString() : "null";
// Truncate the value if it is too long
metricsLog.info((((mBeanNameName + ":") + v7.getName()) + "=") + trimLine(valueStr));
}
} catch (Exception e) {
metricsLog.error((("Failed to get " + nodeName) + " metrics for mbean ") + mbeanName.toString(), e);
}
}
metricsLog.info((" << End " + nodeName) + " metrics dump");
} | 3.26 |
hadoop_MetricsLoggerTask_hasAppenders_rdh | // TODO : hadoop-logging module to hide log4j implementation details, this method
// can directly call utility from hadoop-logging.
private static boolean hasAppenders(Logger logger) {
return log4j.Logger.getLogger(logger.getName()).getAllAppenders().hasMoreElements();
} | 3.26 |
hadoop_MetricsLoggerTask_getFilteredAttributes_rdh | /**
* Get the list of attributes for the MBean, filtering out a few attribute
* types.
*/
private static Set<String> getFilteredAttributes(MBeanInfo mBeanInfo) {
Set<String> attributeNames = new HashSet<>();
for (MBeanAttributeInfo attributeInfo : mBeanInfo.getAttributes()) {
if (((!attributeInfo.getType().equals("javax.management.openmbean.TabularData")) && (!attributeInfo.getType().equals("javax.management.openmbean.CompositeData"))) && (!attributeInfo.getType().equals("[Ljavax.management.openmbean.CompositeData;"))) {
attributeNames.add(attributeInfo.getName());
}
}
return attributeNames;
} | 3.26 |
hadoop_HistoryServerStateStoreServiceFactory_getStore_rdh | /**
* Constructs an instance of the configured storage class
*
* @param conf
* the configuration
* @return the state storage instance
*/
public static HistoryServerStateStoreService getStore(Configuration conf) {
Class<? extends HistoryServerStateStoreService> storeClass = HistoryServerNullStateStoreService.class;
boolean recoveryEnabled = conf.getBoolean(JHAdminConfig.MR_HS_RECOVERY_ENABLE,
JHAdminConfig.DEFAULT_MR_HS_RECOVERY_ENABLE);
if (recoveryEnabled) {
storeClass = conf.getClass(JHAdminConfig.MR_HS_STATE_STORE, null, HistoryServerStateStoreService.class); if (storeClass == null) {
throw new RuntimeException("Unable to locate storage class, check " + JHAdminConfig.MR_HS_STATE_STORE);
}
}
return ReflectionUtils.newInstance(storeClass, conf);
} | 3.26 |
hadoop_FilterFileSystem_open_rdh | /**
* Opens an FSDataInputStream at the indicated Path.
*
* @param f
* the file name to open
* @param bufferSize
* the size of the buffer to be used.
*/
@Override
public FSDataInputStream open(Path f, int bufferSize) throws IOException {
return fs.open(f, bufferSize);
} | 3.26 |
hadoop_FilterFileSystem_getWorkingDirectory_rdh | /**
* Get the current working directory for the given file system
*
* @return the directory pathname
*/
@Override
public Path getWorkingDirectory() {
return fs.getWorkingDirectory();
} | 3.26 |
hadoop_FilterFileSystem_m0_rdh | /**
* List files and its block locations in a directory.
*/
@Override
public RemoteIterator<LocatedFileStatus> m0(Path f) throws IOException {
return fs.listLocatedStatus(f);
} | 3.26 |
hadoop_FilterFileSystem_getRawFileSystem_rdh | /**
* Get the raw file system
*
* @return FileSystem being filtered
*/
public FileSystem getRawFileSystem() {
return fs;
} | 3.26 |
hadoop_FilterFileSystem_m4_rdh | // FileSystem
@Overridepublic Path m4(Path path, String snapshotName) throws IOException {
return fs.createSnapshot(path, snapshotName);
} | 3.26 |
hadoop_FilterFileSystem_checkPath_rdh | // /////////////////////////////////////////////////////////////
// FileSystem
// /////////////////////////////////////////////////////////////
/**
* Check that a Path belongs to this FileSystem.
*/
@Override
protected void checkPath(Path path) {fs.checkPath(path);
} | 3.26 |
hadoop_FilterFileSystem_getFileStatus_rdh | /**
* Get file status.
*/
@Override
public FileStatus getFileStatus(Path f) throws IOException {
return fs.getFileStatus(f);
} | 3.26 |
hadoop_FilterFileSystem_initialize_rdh | /**
* Called after a new FileSystem instance is constructed.
*
* @param name
* a uri whose authority section names the host, port, etc.
* for this FileSystem
* @param conf
* the configuration
*/
@Override
public void initialize(URI name, Configuration conf) throws IOException {
super.initialize(name, conf);
// this is less than ideal, but existing filesystems sometimes neglect
// to initialize the embedded filesystem
if (fs.getConf() == null) {
fs.initialize(name, conf);
}
String scheme = name.getScheme();
if (!scheme.equals(fs.getUri().getScheme())) {
swapScheme = scheme;
}
} | 3.26 |
hadoop_FilterFileSystem_completeLocalOutput_rdh | /**
* Called when we're all done writing to the target. A local FS will
* do nothing, because we've written to exactly the right place. A remote
* FS will copy the contents of tmpLocalFile to the correct target at
* fsOutputFile.
*/
@Override
public void completeLocalOutput(Path fsOutputFile, Path tmpLocalFile) throws IOException {
fs.completeLocalOutput(fsOutputFile, tmpLocalFile);
} | 3.26 |
hadoop_FilterFileSystem_renameSnapshot_rdh | // FileSystem
@Override
public void renameSnapshot(Path path, String snapshotOldName, String snapshotNewName)
throws IOException {
fs.renameSnapshot(path, snapshotOldName, snapshotNewName);
} | 3.26 |
hadoop_FilterFileSystem_delete_rdh | /**
* Delete a file
*/
@Override
public boolean delete(Path f, boolean recursive) throws IOException {
return fs.delete(f, recursive);
} | 3.26 |
hadoop_FilterFileSystem_copyFromLocalFile_rdh | /**
* The src file is on the local disk. Add it to FS at
* the given dst name.
* delSrc indicates if the source should be removed
*/
@Override
public void copyFromLocalFile(boolean delSrc, boolean overwrite, Path src, Path dst) throws IOException {
fs.copyFromLocalFile(delSrc, overwrite, src, dst);
} | 3.26 |
hadoop_FilterFileSystem_deleteSnapshot_rdh | // FileSystem
@Override
public void deleteSnapshot(Path path, String snapshotName)
throws IOException {
fs.deleteSnapshot(path, snapshotName);
} | 3.26 |
hadoop_FilterFileSystem_listStatus_rdh | /**
* List files in a directory.
*/
@Override
public FileStatus[] listStatus(Path f) throws IOException {
return fs.listStatus(f);
} | 3.26 |
hadoop_FilterFileSystem_getDefaultBlockSize_rdh | // path variants delegate to underlying filesystem
@Override
public long getDefaultBlockSize(Path f) {
return fs.getDefaultBlockSize(f);
} | 3.26 |
hadoop_FilterFileSystem_getChildFileSystems_rdh | // FileSystem
@Override
public FileSystem[] getChildFileSystems() {
return new FileSystem[]{ fs };
} | 3.26 |
hadoop_FilterFileSystem_getUsed_rdh | /**
* Return the total size of all files from a specified path.
*/
@Override
public long getUsed(Path path) throws IOException {
return
fs.getUsed(path);
} | 3.26 |
hadoop_FilterFileSystem_getUri_rdh | /**
* Returns a URI whose scheme and authority identify this FileSystem.
*/
@Override
public URI getUri() {
return fs.getUri();
} | 3.26 |
hadoop_FilterFileSystem_makeQualified_rdh | /**
* Make sure that a path specifies a FileSystem.
*/
@Override
public Path makeQualified(Path path) {
Path fqPath = fs.makeQualified(path);
// swap in our scheme if the filtered fs is using a different scheme
if (swapScheme != null) {
try {
// NOTE: should deal with authority, but too much other stuff is broken
fqPath = new Path(new URI(swapScheme, fqPath.toUri().getSchemeSpecificPart(), null));
} catch (URISyntaxException e) {
throw new IllegalArgumentException(e);
}
}
return fqPath;
} | 3.26 |
hadoop_FilterFileSystem_copyToLocalFile_rdh | /**
* The src file is under FS, and the dst is on the local disk.
* Copy it from FS control to the local dst name.
* delSrc indicates if the src will be removed or not.
*/
@Override
public void copyToLocalFile(boolean delSrc, Path src, Path dst) throws IOException {
fs.copyToLocalFile(delSrc, src, dst);} | 3.26 |
hadoop_NMClientAsync_onContainerReInitialize_rdh | /**
* Callback for container re-initialization request.
*
* @param containerId
* the Id of the container to be Re-Initialized.
*/
public void onContainerReInitialize(ContainerId containerId) {
} | 3.26 |
hadoop_NMClientAsync_onCommitLastReInitialization_rdh | /**
* Callback for commit of last re-initialization.
*
* @param containerId
* the Id of the container to commit reInitialize.
*/
public void onCommitLastReInitialization(ContainerId containerId) {
} | 3.26 |
hadoop_NMClientAsync_onContainerRestart_rdh | /**
* Callback for container restart.
*
* @param containerId
* the Id of the container to restart.
*/
public void onContainerRestart(ContainerId containerId) {
} | 3.26 |
hadoop_NMClientAsync_onCommitLastReInitializationError_rdh | /**
* Error Callback for commit of last re-initialization.
*
* @param containerId
* the Id of the container to commit reInitialize.
* @param t
* a Throwable.
*/
public void onCommitLastReInitializationError(ContainerId containerId, Throwable t) {
} | 3.26 |
hadoop_NMClientAsync_onContainerReInitializeError_rdh | /**
* Error Callback for container re-initialization request.
*
* @param containerId
* the Id of the container to be Re-Initialized.
* @param t
* a Throwable.
*/
public void onContainerReInitializeError(ContainerId containerId, Throwable t) {
} | 3.26 |
hadoop_NMClientAsync_onRollbackLastReInitialization_rdh | /**
* Callback for rollback of last re-initialization.
*
* @param containerId
* the Id of the container to restart.
*/
public void onRollbackLastReInitialization(ContainerId containerId) {
} | 3.26 |
hadoop_NMClientAsync_onContainerRestartError_rdh | /**
* Error Callback for container restart.
*
* @param containerId
* the Id of the container to restart.
* @param t
* a Throwable.
*/
public void onContainerRestartError(ContainerId containerId, Throwable t) {
} | 3.26 |
hadoop_InstrumentedWriteLock_startLockTiming_rdh | /**
* Starts timing for the instrumented write lock.
*/
@Override
protected void startLockTiming() {
if (readWriteLock.getWriteHoldCount() == 1) {
writeLockHeldTimeStamp = getTimer().monotonicNow();
}
} | 3.26 |
hadoop_StartupProgress_setTotal_rdh | /**
* Sets the total associated with the specified phase and step. For example,
* this can be used while loading edits to indicate the number of operations to
* be applied.
*
* @param phase
* Phase to set
* @param step
* Step to set
* @param total
* long to set
*/
public void setTotal(Phase phase, Step step, long total) {
if (!isComplete(phase)) {
lazyInitStep(phase, step).total
= total;
}
} | 3.26 |
hadoop_StartupProgress_setSize_rdh | /**
* Sets the optional size in bytes associated with the specified phase. For
* example, this can be used while loading fsimage to indicate the size of the
* fsimage file.
*
* @param phase
* Phase to set
* @param size
* long to set
*/
public void setSize(Phase phase, long size) {
if (!isComplete()) {
phases.get(phase).size = size;
}
} | 3.26 |
hadoop_StartupProgress_getCounter_rdh | /**
* Returns a counter associated with the specified phase and step. Typical
* usage is to increment a counter within a tight loop. Callers may use this
* method to obtain a counter once and then increment that instance repeatedly
* within a loop. This prevents redundant lookup operations and object
* creation within the tight loop. Incrementing the counter is an atomic
* operation, so there is no risk of lost updates even if multiple threads
* increment the same counter.
*
* @param phase
* Phase to get
* @param step
* Step to get
* @return Counter associated with phase and step
*/
public Counter getCounter(Phase phase, Step step) {
if (!isComplete(phase)) {
final StepTracking tracking = lazyInitStep(phase, step);
return new Counter() {
@Overridepublic void increment() {
tracking.count.incrementAndGet();
}
};
} else {
return new Counter() {
@Override
public void increment() {
// no-op, because startup has completed
}
};
}} | 3.26 |
hadoop_StartupProgress_lazyInitStep_rdh | /**
* Lazily initializes the internal data structure for tracking the specified
* phase and step. Returns either the newly initialized data structure or the
* existing one. Initialization is atomic, so there is no risk of lost updates
* even if multiple threads attempt to initialize the same step simultaneously.
*
* @param phase
* Phase to initialize
* @param step
* Step to initialize
* @return StepTracking newly initialized, or existing if found
*/
private StepTracking lazyInitStep(Phase phase, Step step) {
ConcurrentMap<Step, StepTracking> steps = phases.get(phase).steps;
if (!steps.containsKey(step)) {
steps.putIfAbsent(step, new StepTracking());
}
return steps.get(step);
} | 3.26 |
hadoop_StartupProgress_isComplete_rdh | /**
* Returns true if the given startup phase has been completed.
*
* @param phase
* Which phase to check for completion
* @return boolean true if the given startup phase has completed.
*/
private boolean isComplete(Phase phase) {
return m0(phase) == Status.COMPLETE;
} | 3.26 |
hadoop_StartupProgress_createView_rdh | /**
* Creates a {@link StartupProgressView} containing data cloned from this
* StartupProgress. Subsequent updates to this StartupProgress will not be
* shown in the view. This gives a consistent, unchanging view for callers
* that need to perform multiple related read operations. Calculations that
* require aggregation, such as overall percent complete, will not be impacted
* by mutations performed in other threads mid-way through the calculation.
*
* @return StartupProgressView containing cloned data
*/
public StartupProgressView createView() {
return new StartupProgressView(this);
} | 3.26 |
hadoop_StartupProgress_endStep_rdh | /**
* Ends execution of the specified step within the specified phase. This is
* a no-op if the phase is already completed.
*
* @param phase
* Phase within which the step should be ended
* @param step
* Step to end
*/
public void endStep(Phase phase, Step step) {
if (!isComplete(phase)) {
lazyInitStep(phase, step).endTime = monotonicNow();
}
LOG.debug("End of the step. Phase: {}, Step: {}", phase, step);
} | 3.26 |
hadoop_StartupProgress_beginStep_rdh | /**
* Begins execution of the specified step within the specified phase. This is
* a no-op if the phase is already completed.
*
* @param phase
* Phase within which the step should be started
* @param step
* Step to begin
*/
public void beginStep(Phase phase, Step
step) {if (!isComplete(phase)) {
lazyInitStep(phase, step).beginTime = monotonicNow();
}
LOG.debug("Beginning of the step. Phase: {}, Step: {}", phase, step);
} | 3.26 |
hadoop_StartupProgress_m1_rdh | /**
* Sets the optional file name associated with the specified phase. For
* example, this can be used while loading fsimage to indicate the full path to
* the fsimage file.
*
* @param phase
* Phase to set
* @param file
* String file name to set
*/
public void m1(Phase phase, String file) {
if (!isComplete()) {
phases.get(phase).file = file;
}
} | 3.26 |
hadoop_StartupProgress_setCount_rdh | /**
* Sets counter to the specified value.
*
* @param phase
* Phase to set
* @param step
* Step to set
* @param count
* long to set
*/
public void setCount(Phase phase, Step step, long count) {
lazyInitStep(phase, step).count.set(count);
} | 3.26 |
hadoop_StartupProgress_beginPhase_rdh | /**
* Begins execution of the specified phase.
*
* @param phase
* Phase to begin
*/
public void beginPhase(Phase phase) {
if (!isComplete()) {
phases.get(phase).beginTime = monotonicNow();
}
LOG.debug("Beginning of the phase: {}", phase);
} | 3.26 |
hadoop_StartupProgress_m0_rdh | /**
* Returns the current run status of the specified phase.
*
* @param phase
* Phase to get
* @return Status run status of phase
*/
public Status m0(Phase phase) {
PhaseTracking tracking = phases.get(phase);
if (tracking.beginTime == Long.MIN_VALUE) {
return Status.PENDING;
} else if (tracking.endTime == Long.MIN_VALUE) {
return Status.RUNNING;
} else {
return Status.COMPLETE;
}
} | 3.26 |
hadoop_StartupProgress_endPhase_rdh | /**
* Ends execution of the specified phase.
*
* @param phase
* Phase to end
*/
public void endPhase(Phase phase) {
if (!isComplete()) {
phases.get(phase).endTime = monotonicNow();
}
LOG.debug("End of the phase: {}", phase);
} | 3.26 |
hadoop_StripedBlockChecksumReconstructor_clearBuffers_rdh | /**
* Clear all associated buffers.
*/
private void clearBuffers() {
getStripedReader().clearBuffers();
targetBuffer.clear();
} | 3.26 |
hadoop_StripedBlockChecksumReconstructor_getBufferArray_rdh | /**
* Gets an array corresponding the buffer.
*
* @param buffer
* the input buffer.
* @return the array with content of the buffer.
*/
private static byte[] getBufferArray(ByteBuffer buffer) {
byte[] buff = new byte[buffer.remaining()];
if (buffer.hasArray()) {
buff = buffer.array();
}
else
{
buffer.slice().get(buff);
}
return buff;
} | 3.26 |
hadoop_FixedLengthInputFormat_getRecordLength_rdh | /**
* Get record length value
*
* @param conf
* configuration
* @return the record length, zero means none was set
*/
public static int getRecordLength(Configuration conf)
{
return conf.getInt(FIXED_RECORD_LENGTH, 0);
} | 3.26 |
hadoop_FixedLengthInputFormat_setRecordLength_rdh | /**
* Set the length of each record
*
* @param conf
* configuration
* @param recordLength
* the length of a record
*/
public static void setRecordLength(Configuration conf, int recordLength) {
conf.setInt(FIXED_RECORD_LENGTH, recordLength);
} | 3.26 |
hadoop_IncrementalBlockReportManager_sendIBRs_rdh | /**
* Send IBRs to namenode.
*/
void sendIBRs(DatanodeProtocol namenode, DatanodeRegistration registration, String bpid, String nnRpcLatencySuffix) throws IOException {
// Generate a list of the pending reports for each storage under the lock
final StorageReceivedDeletedBlocks[] reports = generateIBRs();
if (reports.length == 0) {
// Nothing new to report.
return;
}
// Send incremental block reports to the Namenode outside the lock
if (LOG.isDebugEnabled()) {
LOG.debug("call blockReceivedAndDeleted: " + Arrays.toString(reports));
}
boolean success = false;
final long startTime = monotonicNow();
try
{
namenode.blockReceivedAndDeleted(registration, bpid, reports);
success = true;
} finally {
if (success) {
dnMetrics.addIncrementalBlockReport(monotonicNow() - startTime, nnRpcLatencySuffix);
lastIBR = startTime;
} else {
// If we didn't succeed in sending the report, put all of the
// blocks back onto our queue, but only in the case where we
// didn't put something newer in the meantime.
putMissing(reports);
LOG.warn("Failed to call blockReceivedAndDeleted: {}, nnId: {}" + ", duration(ms): {}", Arrays.toString(reports), nnRpcLatencySuffix, monotonicNow() - startTime);
}
}
} | 3.26 |
hadoop_IncrementalBlockReportManager_addRDBI_rdh | /**
* Add a block for notification to NameNode.
* If another entry exists for the same block it is removed.
*/
@VisibleForTesting
synchronized void addRDBI(ReceivedDeletedBlockInfo rdbi, DatanodeStorage storage) {
// Make sure another entry for the same block is first removed.
// There may only be one such entry.
for (PerStorageIBR perStorage : pendingIBRs.values()) {
if (perStorage.remove(rdbi.getBlock()) != null) {
break;
}
}
getPerStorageIBR(storage).put(rdbi);
} | 3.26 |
hadoop_IncrementalBlockReportManager_getPerStorageIBR_rdh | /**
*
* @return the pending IBR for the given {@code storage}
*/
private PerStorageIBR getPerStorageIBR(DatanodeStorage
storage) {
PerStorageIBR perStorage = pendingIBRs.get(storage);
if (perStorage == null) {
// This is the first time we are adding incremental BR state for
// this storage so create a new map. This is required once per
// storage, per service actor.
perStorage = new PerStorageIBR(dnMetrics);
pendingIBRs.put(storage, perStorage);
}
return perStorage;
} | 3.26 |
hadoop_IncrementalBlockReportManager_remove_rdh | /**
* Remove the given block from this IBR
*
* @return true if the block was removed; otherwise, return false.
*/
ReceivedDeletedBlockInfo remove(Block block) {
return blocks.remove(block);
} | 3.26 |
hadoop_IncrementalBlockReportManager_putMissing_rdh | /**
* Put the all blocks to this IBR unless the block already exists.
*
* @param rdbis
* list of blocks to add.
* @return the number of missing blocks added.
*/
int putMissing(ReceivedDeletedBlockInfo[] rdbis) {
int count = 0;
for (ReceivedDeletedBlockInfo rdbi : rdbis) {
if (!blocks.containsKey(rdbi.getBlock())) {
put(rdbi);
count++;
}
}
return count;
} | 3.26 |
hadoop_IncrementalBlockReportManager_put_rdh | /**
* Put the block to this IBR.
*/
void put(ReceivedDeletedBlockInfo rdbi) {
blocks.put(rdbi.getBlock(), rdbi);
increaseBlocksCounter(rdbi);
} | 3.26 |
hadoop_FederationStateStoreClientMetrics_getNumFailedCallsForMethod_rdh | // Getters for unit testing
@VisibleForTesting
static long getNumFailedCallsForMethod(String
methodName) {
return API_TO_FAILED_CALLS.get(methodName).value();
} | 3.26 |
hadoop_Container_setVersion_rdh | /**
* Set the version of this container.
*
* @param version
* of this container.
*/
@Private
@Unstablepublic void setVersion(int version) {
throw new UnsupportedOperationException();
} | 3.26 |
hadoop_Container_getVersion_rdh | /**
* Get the version of this container. The version will be incremented when
* a container is updated.
*
* @return version of this container.
*/
@Private
@Unstable
public int getVersion() {
return 0;
} | 3.26 |
hadoop_Container_setAllocationRequestId_rdh | /**
* Set the optional <em>ID</em> corresponding to the original {@code ResourceRequest{@link #setAllocationRequestId(long)}
* etAllocationRequestId()}}s which is satisfied by this allocated {@code Container}.
* <p>
* The scheduler may return multiple {@code AllocateResponse}s corresponding
* to the same ID as and when scheduler allocates {@code Container}s.
* <b>Applications</b> can continue to completely ignore the returned ID in
* the response and use the allocation for any of their outstanding requests.
* If the ID is not set, scheduler will continue to work as previously and all
* allocated {@code Container}(s) will have the default ID, -1.
* <p>
*
* @param allocationRequestID
* the <em>ID</em> corresponding to the original
* allocation request which is satisfied by this
* allocation.
*/
@Private
@Unstable
public void
setAllocationRequestId(long allocationRequestID) {
throw new UnsupportedOperationException();
} | 3.26 |
hadoop_RpcScheduler_addResponseTime_rdh | /**
* Store a processing time value for an RPC call into this scheduler.
*
* @param callName
* The name of the call.
* @param schedulable
* The schedulable representing the incoming call.
* @param details
* The details of processing time.
*/
@SuppressWarnings("deprecation")
default void addResponseTime(String callName, Schedulable schedulable, ProcessingDetails details) {
// For the sake of backwards compatibility with old implementations of
// this interface, a default implementation is supplied which uses the old
// method. All new implementations MUST override this interface and should
// NOT use the other addResponseTime method.
int queueTime = ((int) (details.get(Timing.QUEUE, RpcMetrics.DEFAULT_METRIC_TIME_UNIT)));
int processingTime = ((int) (details.get(Timing.PROCESSING, RpcMetrics.DEFAULT_METRIC_TIME_UNIT)));
addResponseTime(callName, schedulable.getPriorityLevel(), queueTime, processingTime);
} | 3.26 |
hadoop_UnresolvedPathException_getResolvedPath_rdh | /**
* Return a path with the link resolved with the target.
*/
public Path getResolvedPath() {
// If the path is absolute we cam throw out the preceding part and
// just append the remainder to the target, otherwise append each
// piece to resolve the link in path.
boolean noRemainder = (remainder == null) || "".equals(remainder);
Path target = new Path(linkTarget);
if (target.isUriPathAbsolute()) {
return noRemainder ? target : new Path(target, remainder);
} else {
return noRemainder ? new Path(preceding, target) : new Path(new Path(preceding, linkTarget), remainder);
}
} | 3.26 |
hadoop_CacheDirectiveStats_hasExpired_rdh | /**
*
* @return Whether this directive has expired.
*/
public boolean hasExpired() {
return hasExpired;
} | 3.26 |
hadoop_CacheDirectiveStats_build_rdh | /**
* Builds a new CacheDirectiveStats populated with the set properties.
*
* @return New CacheDirectiveStats.
*/
public CacheDirectiveStats build() {
return new CacheDirectiveStats(bytesNeeded, bytesCached, filesNeeded, filesCached, hasExpired);
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.