name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hadoop_MountTableRefresherService_getClientRemover_rdh | /**
* Create cache entry remove listener.
*/
private RemovalListener<String, RouterClient> getClientRemover() {
return new RemovalListener<String, RouterClient>() {
@Override
public void onRemoval(RemovalNotification<String, RouterClient> notification) {
closeRouterClient(notification.getValue());
}
};
} | 3.26 |
hadoop_PlacementRule_getName_rdh | /**
* Return the name of the rule.
*
* @return The name of the rule, the fully qualified class name.
*/
public String getName() {
return this.getClass().getName();} | 3.26 |
hadoop_PlacementRule_setConfig_rdh | /**
* Set the config based on the passed in argument. This construct is used to
* not pollute this abstract class with implementation specific references.
*
* @param initArg
* initialization arguments.
*/
public void setConfig(Object initArg) {
// Default is a noop
} | 3.26 |
hadoop_IOStatisticsLogging_mapToSortedString_rdh | /**
* Given a map, produce a string with all the values, sorted.
* Needs to create a treemap and insert all the entries.
*
* @param sb
* string buffer to append to
* @param type
* type (for output)
* @param map
* map to evaluate
* @param <E>
* type of values of the map
*/
private static <E> void mapToSortedString(StringBuilder sb, final String type, final Map<String, E> map, final Predicate<E> isEmpty) {
mapToString(sb, type, m0(map, isEmpty), "\n");
} | 3.26 |
hadoop_IOStatisticsLogging_m1_rdh | /**
* On demand stringifier of an IOStatisticsSource instance.
* <p>
* Whenever this object's toString() method is called, it evaluates the
* statistics.
* <p>
* This is designed to affordable to use in log statements.
*
* @param source
* source of statistics -may be null.
* @return an object whose toString() operation returns the current values.
*/public static Object m1(@Nullable
IOStatisticsSource source) {
return new SourceToString(source);
} | 3.26 |
hadoop_IOStatisticsLogging_toString_rdh | /**
* Evaluate and stringify the statistics.
*
* @return a string value.
*/
@Override
public String toString() {
return f0 != null ? ioStatisticsToString(f0) : IOStatisticsBinding.NULL_SOURCE;
} | 3.26 |
hadoop_IOStatisticsLogging_logIOStatisticsAtLevel_rdh | /**
* A method to log IOStatistics from a source at different levels.
*
* @param log
* Logger for logging.
* @param level
* LOG level.
* @param source
* Source to LOG.
*/
public static void logIOStatisticsAtLevel(Logger log, String level, Object source) {
IOStatistics stats = retrieveIOStatistics(source);
if (stats != null) {
switch (level.toLowerCase(Locale.US)) {
case IOSTATISTICS_LOGGING_LEVEL_INFO :
LOG.info("IOStatistics: {}", ioStatisticsToPrettyString(stats));
break;
case IOSTATISTICS_LOGGING_LEVEL_ERROR :
LOG.error("IOStatistics: {}", ioStatisticsToPrettyString(stats));
break;
case IOSTATISTICS_LOGGING_LEVEL_WARN :
LOG.warn("IOStatistics: {}", ioStatisticsToPrettyString(stats));
break;
default :
logIOStatisticsAtDebug(log, "IOStatistics: {}", source);
}
}
} | 3.26 |
hadoop_IOStatisticsLogging_mapToString_rdh | /**
* Given a map, add its entryset to the string.
* The entries are only sorted if the source entryset
* iterator is sorted, such as from a TreeMap.
*
* @param sb
* string buffer to append to
* @param type
* type (for output)
* @param map
* map to evaluate
* @param separator
* separator
* @param <E>
* type of values of the map
*/
private static <E> void mapToString(StringBuilder sb, final String type, final Map<String, E> map, final String separator) {
int count = 0;
sb.append(type);
sb.append("=(");
for (Map.Entry<String, E> entry : map.entrySet()) {
if (count > 0) {
sb.append(separator);
}
count++;
sb.append(IOStatisticsBinding.entryToString(entry.getKey(), entry.getValue()));
}
sb.append(");\n");
} | 3.26 |
hadoop_IOStatisticsLogging_ioStatisticsToPrettyString_rdh | /**
* Convert IOStatistics to a string form, with all the metrics sorted
* and empty value stripped.
* This is more expensive than the simple conversion, so should only
* be used for logging/output where it's known/highly likely that the
* caller wants to see the values. Not for debug logging.
*
* @param statistics
* A statistics instance.
* @return string value or the empty string if null
*/
public static String ioStatisticsToPrettyString(@Nullable
final IOStatistics statistics) {
if (statistics != null) {
StringBuilder sb = new StringBuilder();
mapToSortedString(sb, "counters", statistics.counters(), p -> p == 0);
mapToSortedString(sb, "\ngauges", statistics.gauges(), p -> p == 0);
mapToSortedString(sb, "\nminimums", statistics.minimums(), p -> p
< 0);
mapToSortedString(sb, "\nmaximums", statistics.maximums(), p -> p < 0);
mapToSortedString(sb, "\nmeans", statistics.meanStatistics(), MeanStatistic::isEmpty);
return sb.toString();
} else {
return "";
}
} | 3.26 |
hadoop_IOStatisticsLogging_demandStringifyIOStatistics_rdh | /**
* On demand stringifier of an IOStatistics instance.
* <p>
* Whenever this object's toString() method is called, it evaluates the
* statistics.
* <p>
* This is for use in log statements where for the cost of creation
* of this entry is low; it is affordable to use in log statements.
*
* @param statistics
* statistics to stringify -may be null.
* @return an object whose toString() operation returns the current values.
*/
public static Object demandStringifyIOStatistics(@Nullable
IOStatistics statistics) {
return new StatisticsToString(statistics);} | 3.26 |
hadoop_IOStatisticsLogging_ioStatisticsSourceToString_rdh | /**
* Extract the statistics from a source object -or ""
* if it is not an instance of {@link IOStatistics},
* {@link IOStatisticsSource} or the retrieved
* statistics are null.
* <p>
* Exceptions are caught and downgraded to debug logging.
*
* @param source
* source of statistics.
* @return a string for logging.
*/
public static String ioStatisticsSourceToString(@Nullable
Object source) {try {
return ioStatisticsToString(retrieveIOStatistics(source));
} catch (RuntimeException e) {
LOG.debug("Ignoring", e);
return "";
}
} | 3.26 |
hadoop_IOStatisticsLogging_logIOStatisticsAtDebug_rdh | /**
* Extract any statistics from the source and log to
* this class's log at debug, if
* the log is set to log at debug.
* No-op if logging is not at debug or the source is null/of
* the wrong type/doesn't provide statistics.
*
* @param message
* message for log -this must contain "{}" for the
* statistics report to actually get logged.
* @param source
* source object
*/
public static void logIOStatisticsAtDebug(String message, Object source) {
logIOStatisticsAtDebug(LOG, message, source);
} | 3.26 |
hadoop_AccessTokenProvider_getConf_rdh | /**
* Return the conf.
*
* @return the conf.
*/
@Override
public Configuration getConf() {return conf;
} | 3.26 |
hadoop_AccessTokenProvider_setConf_rdh | /**
* Set the conf.
*
* @param configuration
* New configuration.
*/
@Override
public void setConf(Configuration configuration) {
this.conf = configuration;
} | 3.26 |
hadoop_PageBlobFormatHelpers_toShort_rdh | /**
* Retrieves a short from the given two bytes.
*/
public static short toShort(byte firstByte, byte secondByte) {
return ByteBuffer.wrap(new byte[]{ firstByte, secondByte }).getShort();
} | 3.26 |
hadoop_PageBlobFormatHelpers_fromShort_rdh | /**
* Stores the given short as a two-byte array.
*/
public static byte[] fromShort(short s) {
return ByteBuffer.allocate(2).putShort(s).array();
} | 3.26 |
hadoop_SetupJobStage_m0_rdh | /**
* Execute the job setup stage.
*
* @param deleteMarker:
* should any success marker be deleted.
* @return the job attempted directory.
* @throws IOException
* failure.
*/
@Override
protected Path m0(final Boolean
deleteMarker) throws IOException {
final Path path = getJobAttemptDir();LOG.info("{}: Creating Job Attempt directory {}", getName(), path);
createNewDirectory("Job setup", path);
createNewDirectory("Creating task manifest dir", getTaskManifestDir());
// delete any success marker if so instructed.
if (deleteMarker) {
delete(getStageConfig().getJobSuccessMarkerPath(), false);
}
return path;
} | 3.26 |
hadoop_PathOutputCommitter_hasOutputPath_rdh | /**
* Predicate: is there an output path?
*
* @return true if we have an output path set, else false.
*/
public boolean hasOutputPath() {
return getOutputPath() != null;
} | 3.26 |
hadoop_GetRouterRegistrationResponse_newInstance_rdh | /**
* API response for retrieving a single router registration present in the state
* store.
*/
| 3.26 |
hadoop_OracleDataDrivenDBInputFormat_getSplitter_rdh | /**
*
* @return the DBSplitter implementation to use to divide the table/query into InputSplits.
*/
@Override
protected DBSplitter getSplitter(int sqlDataType) {
switch (sqlDataType) {
case Types.DATE :
case Types.TIME :
case Types.TIMESTAMP :
return new OracleDateSplitter();
default :
return super.getSplitter(sqlDataType);
}
} | 3.26 |
hadoop_FSSchedulerConfigurationStore_logMutation_rdh | /**
* Update and persist latest configuration in temp file.
*
* @param logMutation
* configuration change to be persisted in write ahead log
* @throws IOException
* throw IOE when write temp configuration file fail
*/
@Override
public void logMutation(LogMutation logMutation) throws IOException {
LOG.info(new GsonBuilder().serializeNulls().create().toJson(logMutation));
oldConf = new Configuration(schedConf);
Map<String, String> mutations = logMutation.getUpdates();
for (Map.Entry<String, String> kv : mutations.entrySet()) {
if (kv.getValue() == null) {
this.schedConf.unset(kv.getKey());
} else {
this.schedConf.set(kv.getKey(), kv.getValue());
}
}
tempConfigPath = m0(schedConf);
} | 3.26 |
hadoop_FSSchedulerConfigurationStore_confirmMutation_rdh | /**
*
* @param pendingMutation
* the log mutation to apply
* @param isValid
* if true, finalize temp configuration file
* if false, remove temp configuration file and rollback
* @throws Exception
* throw IOE when write temp configuration file fail
*/
@Override
public void confirmMutation(LogMutation pendingMutation, boolean isValid) throws Exception {
if ((pendingMutation == null) || (tempConfigPath == null)) {
LOG.warn("pendingMutation or tempConfigPath is null, do nothing");
return;
}
if (isValid) {
finalizeFileSystemFile();
long configVersion = getConfigVersion() + 1L;
writeConfigVersion(configVersion);} else {
schedConf = oldConf;
removeTmpConfigFile();
}
tempConfigPath = null;
} | 3.26 |
hadoop_WordMean_readAndCalcMean_rdh | /**
* Reads the output file and parses the summation of lengths, and the word
* count, to perform a quick calculation of the mean.
*
* @param path
* The path to find the output file in. Set in main to the output
* directory.
* @throws IOException
* If it cannot access the output directory, we throw an exception.
*/
private double readAndCalcMean(Path path, Configuration conf) throws IOException {
FileSystem fs = FileSystem.get(conf);
Path file = new Path(path, "part-r-00000");
if (!fs.exists(file))
throw new IOException("Output not found!");
BufferedReader br = null;
// average = total sum / number of elements;
try {
br = new BufferedReader(new InputStreamReader(fs.open(file),
StandardCharsets.UTF_8));
long count = 0;
long length = 0;
String line;
while ((line = br.readLine()) != null) {
StringTokenizer st = new StringTokenizer(line);
// grab type
String type = st.nextToken();
// differentiate
if (type.equals(COUNT.toString())) {
String countLit = st.nextToken();
count = Long.parseLong(countLit);
} else if (type.equals(LENGTH.toString())) {
String lengthLit = st.nextToken();length = Long.parseLong(lengthLit);
}
}
double theMean = ((double) (length)) / ((double) (count));
System.out.println("The mean is: " + theMean); return theMean;
} finally {if (br != null) {
br.close();
}
}
} | 3.26 |
hadoop_WordMean_map_rdh | /**
* Emits 2 key-value pairs for counting the word and its length. Outputs are
* (Text, LongWritable).
*
* @param value
* This will be a line of text coming in from our input file.
*/
public void map(Object key, Text value, Context context) throws IOException, InterruptedException {
StringTokenizer itr = new StringTokenizer(value.toString());
while (itr.hasMoreTokens()) {
String string = itr.nextToken();
this.wordLen.set(string.length());
context.write(LENGTH, this.wordLen);
context.write(COUNT, ONE);
}
} | 3.26 |
hadoop_WordMean_reduce_rdh | /**
* Sums all the individual values within the iterator and writes them to the
* same key.
*
* @param key
* This will be one of 2 constants: LENGTH_STR or COUNT_STR.
* @param values
* This will be an iterator of all the values associated with that
* key.
*/
public void reduce(Text key, Iterable<LongWritable> values, Context context) throws IOException, InterruptedException {
long theSum = 0;
for (LongWritable val : values) {
theSum += val.get();
}
sum.set(theSum);
context.write(key, sum);
} | 3.26 |
hadoop_YarnVersionInfo_getUser_rdh | /**
* The user that compiled Yarn.
*
* @return the username of the user
*/
public static String getUser() {return YARN_VERSION_INFO._getUser();
} | 3.26 |
hadoop_YarnVersionInfo_getUrl_rdh | /**
* Get the subversion URL for the root YARN directory.
*
* @return URL for the root YARN directory.
*/
public static String getUrl() {
return YARN_VERSION_INFO._getUrl();
} | 3.26 |
hadoop_YarnVersionInfo_getSrcChecksum_rdh | /**
* Get the checksum of the source files from which YARN was
* built.
*
* @return srcChecksum.
*/
public static String getSrcChecksum() {
return YARN_VERSION_INFO._getSrcChecksum();
} | 3.26 |
hadoop_YarnVersionInfo_getVersion_rdh | /**
* Get the YARN version.
*
* @return the YARN version string, eg. "0.6.3-dev"
*/
public static String getVersion() {
return YARN_VERSION_INFO._getVersion();
} | 3.26 |
hadoop_YarnVersionInfo_getRevision_rdh | /**
* Get the subversion revision number for the root directory
*
* @return the revision number, eg. "451451"
*/
public static String getRevision() {
return YARN_VERSION_INFO._getRevision();
} | 3.26 |
hadoop_YarnVersionInfo_getBuildVersion_rdh | /**
* Returns the buildVersion which includes version,
* revision, user and date.
*
* @return buildVersion.
*/
public static String getBuildVersion() {
return YARN_VERSION_INFO._getBuildVersion();
} | 3.26 |
hadoop_YarnVersionInfo_getDate_rdh | /**
* The date that YARN was compiled.
*
* @return the compilation date in unix date format
*/
public static String getDate() {
return
YARN_VERSION_INFO._getDate();
} | 3.26 |
hadoop_UnitsConversionUtil_convert_rdh | /**
* Converts a value from one unit to another. Supported units can be obtained
* by inspecting the KNOWN_UNITS set.
*
* @param fromUnit
* the unit of the from value
* @param toUnit
* the target unit
* @param fromValue
* the value you wish to convert
* @return the value in toUnit
*/
public static long convert(String fromUnit, String toUnit, long fromValue) {
if ((toUnit == null) || (fromUnit == null)) {
throw new IllegalArgumentException("One or more arguments are null");
}
if (fromUnit.equals(toUnit)) {
return fromValue;
}
Converter fc = getConverter(fromUnit);
Converter tc = getConverter(toUnit);
long numerator = fc.numerator * tc.denominator;long denominator = fc.denominator * tc.numerator;
long numeratorMultiplierLimit = Long.MAX_VALUE / numerator;
if (numerator < denominator) {
if (numeratorMultiplierLimit < fromValue) {
String overflowMsg = ((((("Converting " + fromValue) + " from '") + fromUnit) + "' to '") + toUnit) + "' will result in an overflow of Long";
throw new IllegalArgumentException(overflowMsg);}
return (fromValue * numerator) / denominator;}
if (numeratorMultiplierLimit > fromValue) {
return (numerator * fromValue) / denominator;
}
long tmp = numerator / denominator;
if ((Long.MAX_VALUE / tmp) < fromValue) {
String overflowMsg = ((((("Converting " + fromValue) + " from '") + fromUnit) + "' to '") + toUnit) + "' will result in an overflow of Long";
throw new IllegalArgumentException(overflowMsg);
}return fromValue * tmp;
} | 3.26 |
hadoop_CombinedFileRange_merge_rdh | /**
* Merge this input range into the current one, if it is compatible.
* It is assumed that otherOffset is greater or equal the current offset,
* which typically happens by sorting the input ranges on offset.
*
* @param otherOffset
* the offset to consider merging
* @param otherEnd
* the end to consider merging
* @param other
* the underlying FileRange to add if we merge
* @param minSeek
* the minimum distance that we'll seek without merging the
* ranges together
* @param maxSize
* the maximum size that we'll merge into a single range
* @return true if we have merged the range into this one
*/
public boolean merge(long otherOffset, long otherEnd, FileRange other, int minSeek, int maxSize) {
long v0 = this.getOffset() + this.getLength();
long newEnd
= Math.max(v0, otherEnd);
if (((otherOffset - v0) >= minSeek) || ((newEnd - this.getOffset()) > maxSize)) {
return false;
}
this.setLength(((int) (newEnd - this.getOffset())));
underlying.add(other);
return true;
} | 3.26 |
hadoop_CombinedFileRange_getUnderlying_rdh | /**
* Get the list of ranges that were merged together to form this one.
*
* @return the list of input ranges
*/
public List<FileRange> getUnderlying() {
return
underlying;
} | 3.26 |
hadoop_FederationApplicationHomeSubClusterStoreInputValidator_validate_rdh | /**
* Quick validation on the input to check some obvious fail conditions (fail
* fast). Check if the provided {@link DeleteApplicationHomeSubClusterRequest}
* for deleting an application is valid or not.
*
* @param request
* the {@link DeleteApplicationHomeSubClusterRequest} to
* validate against
* @throws FederationStateStoreInvalidInputException
* if the request is invalid
*/
public static void validate(DeleteApplicationHomeSubClusterRequest request) throws FederationStateStoreInvalidInputException {
if (request == null) { String message = ("Missing DeleteApplicationHomeSubCluster Request." + " Please try again by specifying") + " an ApplicationHomeSubCluster information.";
LOG.warn(message);
throw new FederationStateStoreInvalidInputException(message);}
// validate application Id
checkApplicationId(request.getApplicationId());
} | 3.26 |
hadoop_FederationApplicationHomeSubClusterStoreInputValidator_checkApplicationHomeSubCluster_rdh | /**
* Validate if the ApplicationHomeSubCluster info are present or not.
*
* @param applicationHomeSubCluster
* the information of the application to be
* verified
* @throws FederationStateStoreInvalidInputException
* if the SubCluster Info
* are invalid
*/
private static void checkApplicationHomeSubCluster(ApplicationHomeSubCluster applicationHomeSubCluster) throws FederationStateStoreInvalidInputException {
if (applicationHomeSubCluster == null) {
String message = ("Missing ApplicationHomeSubCluster Info." + " Please try again by specifying") + " an ApplicationHomeSubCluster information.";
LOG.warn(message);
throw new FederationStateStoreInvalidInputException(message);
}
// validate application Id
checkApplicationId(applicationHomeSubCluster.getApplicationId());
// validate subcluster Id
FederationMembershipStateStoreInputValidator.checkSubClusterId(applicationHomeSubCluster.getHomeSubCluster());
} | 3.26 |
hadoop_MDCFilter_init_rdh | /**
* Initializes the filter.
* <p>
* This implementation is a NOP.
*
* @param config
* filter configuration.
* @throws ServletException
* thrown if the filter could not be initialized.
*/
@Override
public void init(FilterConfig config) throws ServletException {
} | 3.26 |
hadoop_MDCFilter_doFilter_rdh | /**
* Sets the slf4j <code>MDC</code> and delegates the request to the chain.
*
* @param request
* servlet request.
* @param response
* servlet response.
* @param chain
* filter chain.
* @throws IOException
* thrown if an IO error occurs.
* @throws ServletException
* thrown if a servlet error occurs.
*/
@Override
public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain) throws IOException, ServletException {
try {
MDC.clear();
String hostname = HostnameFilter.get();
if (hostname != null) {
MDC.put("hostname", HostnameFilter.get());
}
Principal principal = ((HttpServletRequest) (request)).getUserPrincipal();
String
user = (principal != null) ? principal.getName() : null;
if (user != null) {MDC.put("user",
user);
}
MDC.put("method", ((HttpServletRequest) (request)).getMethod());
if (((HttpServletRequest) (request)).getPathInfo() != null) {
MDC.put("path", ((HttpServletRequest) (request)).getPathInfo());
}
chain.doFilter(request, response);
} finally {
MDC.clear();
}
} | 3.26 |
hadoop_DeregisterSubClusters_newInstance_rdh | /**
* Initialize DeregisterSubClusters.
*
* @param subClusterId
* subCluster Id.
* @param deregisterState
* deregister state,
* SUCCESS means deregister is successful, Failed means deregister was unsuccessful.
* @param lastHeartBeatTime
* last heartbeat time.
* @param info
* offline information.
* @param subClusterState
* subCluster State.
* @return DeregisterSubClusters.
*/
public static DeregisterSubClusters newInstance(String subClusterId, String deregisterState, String
lastHeartBeatTime, String info, String subClusterState) {
DeregisterSubClusters deregisterSubClusters = Records.newRecord(DeregisterSubClusters.class);
deregisterSubClusters.setSubClusterId(subClusterId);
deregisterSubClusters.setDeregisterState(deregisterState);
deregisterSubClusters.setLastHeartBeatTime(lastHeartBeatTime);
deregisterSubClusters.setInformation(info);
deregisterSubClusters.setSubClusterState(subClusterState);
return deregisterSubClusters;
} | 3.26 |
hadoop_CompositeService_addIfService_rdh | /**
* If the passed object is an instance of {@link Service},
* add it to the list of services managed by this {@link CompositeService}
*
* @param object
* object.
* @return true if a service is added, false otherwise.
*/
protected boolean addIfService(Object object) {
if (object instanceof
Service) {
addService(((Service) (object)));return true;
} else {
return false;
}
} | 3.26 |
hadoop_CompositeService_addService_rdh | /**
* Add the passed {@link Service} to the list of services managed by this
* {@link CompositeService}
*
* @param service
* the {@link Service} to be added
*/
protected void addService(Service service) {
if (f0.isDebugEnabled()) {
f0.debug("Adding service " + service.getName());
}
synchronized(serviceList) {
serviceList.add(service);
}
} | 3.26 |
hadoop_EncryptionSecretOperations_getSSECustomerKey_rdh | /**
* *
* Gets the SSE-C client side key if present.
*
* @param secrets
* source of the encryption secrets.
* @return an optional key to attach to a request.
*/
public static Optional<String> getSSECustomerKey(final EncryptionSecrets secrets) {
if (secrets.hasEncryptionKey() && (secrets.getEncryptionMethod() == S3AEncryptionMethods.SSE_C)) {
return Optional.of(secrets.getEncryptionKey());
} else {
return Optional.empty();
}
} | 3.26 |
hadoop_EncryptionSecretOperations_getSSEAwsKMSKey_rdh | /**
* Gets the SSE-KMS key if present, else let S3 use AWS managed key.
*
* @param secrets
* source of the encryption secrets.
* @return an optional key to attach to a request.
*/
public static Optional<String> getSSEAwsKMSKey(final EncryptionSecrets secrets) {
if (((secrets.getEncryptionMethod() == S3AEncryptionMethods.SSE_KMS) || (secrets.getEncryptionMethod() == S3AEncryptionMethods.DSSE_KMS)) && secrets.hasEncryptionKey()) {
return Optional.of(secrets.getEncryptionKey());
} else {
return Optional.empty();
}
} | 3.26 |
hadoop_DFSRouter_main_rdh | /**
* Main run loop for the router.
*
* @param argv
* parameters.
*/
public static void main(String[] argv) {
if (DFSUtil.parseHelpArgument(argv, USAGE, System.out, true)) {
System.exit(0);
}
try {
StringUtils.startupShutdownMessage(Router.class, argv, LOG);
Router router = new Router();
ShutdownHookManager.get().addShutdownHook(new CompositeServiceShutdownHook(router), SHUTDOWN_HOOK_PRIORITY);
Configuration conf = getConfiguration();
router.init(conf);
router.start();
} catch (Throwable e) {
LOG.error("Failed to start router", e);
terminate(1, e);
}
} | 3.26 |
hadoop_AbstractTask_getTimeout_rdh | /**
* Get Timeout for a Task.
*
* @return timeout in seconds
*/@Override
public final long getTimeout() {
return this.timeout;
} | 3.26 |
hadoop_AbstractTask_setTimeout_rdh | /**
* Set Task Timeout in seconds.
*
* @param taskTimeout
* : Timeout in seconds
*/
@Override
public final void setTimeout(final long taskTimeout) {
this.timeout = taskTimeout;
} | 3.26 |
hadoop_AbstractTask_getEnvironment_rdh | /**
* Get environment for a Task.
*
* @return environment of a Task
*/@Override
public final Map<String, String> getEnvironment() {
return environment;
} | 3.26 |
hadoop_AbstractTask_setTaskType_rdh | /**
* Set TaskType for a Task.
*
* @param type
* Simple or Composite Task
*/
public final void setTaskType(final TaskType type) {
this.taskType = type;
} | 3.26 |
hadoop_AbstractTask_toString_rdh | /**
* ToString.
*
* @return String representation of Task
*/
@Override
public final String toString() {
return ((((("TaskId: " + this.taskID.toString()) + ", TaskType: ") + this.taskType) + ", cmd: '") + taskCmd) + "'";
} | 3.26 |
hadoop_AbstractTask_readFields_rdh | /**
* Read Fields from file.
*
* @param in
* : datainput object.
* @throws IOException
* : Throws IOException in case of error.
*/
@Override
public final void readFields(final DataInput in) throws IOException {
this.taskID = new TaskId();
taskID.readFields(in);
IntWritable envSize = new IntWritable(0);
envSize.readFields(in);
for (int i
= 0; i < envSize.get(); i++) {
Text key = new Text();
Text v6 = new Text();
key.readFields(in);
v6.readFields(in);
environment.put(key.toString(), v6.toString());
}
Text taskCmdText = new Text();
taskCmdText.readFields(in);
taskCmd = taskCmdText.toString();
taskType = WritableUtils.readEnum(in, TaskType.class);
timeout = WritableUtils.readVLong(in);
} | 3.26 |
hadoop_AbstractTask_write_rdh | /**
* Write Task.
*
* @param out
* : dataoutout object.
* @throws IOException
* : Throws IO exception if any error occurs.
*/
@Override
public final void write(final DataOutput out) throws IOException {
taskID.write(out);
int environmentSize = 0;
if (environment ==
null) {
environmentSize = 0;
} else {
environmentSize = environment.size();
}
new IntWritable(environmentSize).write(out);
if (environmentSize != 0) {for (Entry<String, String> envEntry : environment.entrySet()) {
new Text(envEntry.getKey()).write(out);
new Text(envEntry.getValue()).write(out);
}
}
Text taskCmdText;if (taskCmd == null) {
taskCmdText = new Text("");
} else {
taskCmdText = new Text(taskCmd);
}
taskCmdText.write(out);
WritableUtils.writeEnum(out, taskType);
WritableUtils.writeVLong(out, timeout);
} | 3.26 |
hadoop_AbstractTask_getTaskCmd_rdh | /**
* Get TaskCmd for a Task.
*
* @return TaskCMD: Its a task command line such as sleep 10
*/
@Override
public final String getTaskCmd() {
return taskCmd;
} | 3.26 |
hadoop_AbstractTask_setEnvironment_rdh | /**
* Set environment for a Task.
*
* @param localenvironment
* : Map of environment vars
*/
@Override
public final void setEnvironment(final Map<String, String>
localenvironment) {this.environment = localenvironment;
} | 3.26 |
hadoop_AbstractTask_getTaskType_rdh | /**
* Get TaskType for a Task.
*
* @return TaskType: Type of Task
*/
@Override
public final TaskType getTaskType() {
return taskType;
} | 3.26 |
hadoop_S3ListRequest_v2_rdh | /**
* Restricted constructors to ensure v1 or v2, not both.
*
* @param request
* v2 request
* @return new list request container
*/
public static S3ListRequest v2(ListObjectsV2Request request) {
return new S3ListRequest(null, request);
} | 3.26 |
hadoop_S3ListRequest_isV1_rdh | /**
* Is this a v1 API request or v2?
*
* @return true if v1, false if v2
*/
public boolean isV1() {
return v1Request != null;
} | 3.26 |
hadoop_S3ListRequest_v1_rdh | /**
* Restricted constructors to ensure v1 or v2, not both.
*
* @param request
* v1 request
* @return new list request container
*/
public static S3ListRequest v1(ListObjectsRequest request) {
return new S3ListRequest(request, null);
} | 3.26 |
hadoop_ServiceLauncher_getServiceName_rdh | /**
* Get the service name via {@link Service#getName()}.
*
* If the service is not instantiated, the classname is returned instead.
*
* @return the service name
*/
public String getServiceName()
{
Service s = service;
String name =
null;
if (s != null) {
try {
name = s.getName();
} catch (Exception ignored) {
// ignored
}
}
if (name != null) {
return "service "
+ name;
} else {return "service " + serviceName;
}
} | 3.26 |
hadoop_ServiceLauncher_launchServiceAndExit_rdh | /**
* Launch the service and exit.
*
* <ol>
* <li>Parse the command line.</li>
* <li>Build the service configuration from it.</li>
* <li>Start the service.</li>
* <li>If it is a {@link LaunchableService}: execute it</li>
* <li>Otherwise: wait for it to finish.</li>
* <li>Exit passing the status code to the {@link #exit(int, String)}
* method.</li>
* </ol>
*
* @param args
* arguments to the service. {@code arg[0]} is
* assumed to be the service classname.
*/
public void launchServiceAndExit(List<String> args) {
StringBuilder builder = new StringBuilder();
for (String arg : args) {
builder.append('"').append(arg).append("\" ");
}
String argumentString = builder.toString();
if (LOG.isDebugEnabled()) {LOG.debug(startupShutdownMessage(serviceName, args));
LOG.debug(argumentString);
}
registerFailureHandling();
// set up the configs, using reflection to push in the -site.xml files
loadConfigurationClasses();
Configuration
conf = createConfiguration();for (URL resourceUrl : confResourceUrls) {
conf.addResource(resourceUrl);
}
bindCommandOptions();
ExitUtil.ExitException exitException;
try {
List<String> processedArgs = extractCommandOptions(conf, args);
exitException = launchService(conf, processedArgs, true, true);
} catch (ExitUtil.ExitException e) {
exitException = e;
noteException(exitException);
}
if (exitException.getExitCode() == LauncherExitCodes.EXIT_USAGE) {
// something went wrong. Print the usage and commands
System.err.println(getUsageMessage());
System.err.println("Command: " + argumentString);
}
System.out.flush();
System.err.flush();
exit(exitException);
} | 3.26 |
hadoop_ServiceLauncher_launchService_rdh | /**
* Launch a service catching all exceptions and downgrading them to exit codes
* after logging.
*
* Sets {@link #serviceException} to this value.
*
* @param conf
* configuration to use
* @param instance
* optional instance of the service.
* @param processedArgs
* command line after the launcher-specific arguments
* have been stripped out.
* @param addShutdownHook
* should a shutdown hook be added to terminate
* this service on shutdown. Tests should set this to false.
* @param execute
* execute/wait for the service to stop.
* @return an exit exception, which will have a status code of 0 if it worked
*/
public ExitException launchService(Configuration conf, S instance, List<String> processedArgs, boolean addShutdownHook, boolean execute) {
ExitUtil.ExitException exitException;try {
int exitCode = coreServiceLaunch(conf, instance, processedArgs, addShutdownHook, execute);
if (service != null) {// check to see if the service failed
Throwable failure = service.getFailureCause();
if (failure != null) {
// the service exited with a failure.
// check what state it is in
Service.STATE failureState = service.getFailureState();
if (failureState == STATE.STOPPED) {
// the failure occurred during shutdown, not important enough
// to bother the user as it may just scare them
LOG.debug("Failure during shutdown: {} ", failure, failure);
} else {
// throw it for the catch handlers to deal with
throw failure;
}
}
}
String
name = getServiceName();
if (exitCode == 0) {
exitException = new ServiceLaunchException(exitCode, "%s succeeded", name);
} else {
exitException = new ServiceLaunchException(exitCode, "%s failed ", name);
}
// either the service succeeded, or an error raised during shutdown,
// which we don't worry that much about
} catch (ExitUtil.ExitException ee) {
// exit exceptions are passed through unchanged
exitException = ee;
} catch (Throwable thrown) {
// other errors need a full log.
LOG.error("Exception raised {}", service != null ? (service.toString() + " in state ") + service.getServiceState() : "during service instantiation", thrown);
exitException = convertToExitException(thrown);
}
noteException(exitException);
return exitException;
} | 3.26 |
hadoop_ServiceLauncher_warn_rdh | /**
* Print a warning message.
* <p>
* This tries to log to the log's warn() operation.
* If the log at that level is disabled it logs to system error
*
* @param text
* warning text
*/
protected void warn(String text) {
if (LOG.isWarnEnabled())
{
LOG.warn(text);
} else {
System.err.println(text);
}
} | 3.26 |
hadoop_ServiceLauncher_coreServiceLaunch_rdh | /**
* Launch the service.
*
* All exceptions that occur are propagated upwards.
*
* If the method returns a status code, it means that it got as far starting
* the service, and if it implements {@link LaunchableService}, that the
* method {@link LaunchableService#execute()} has completed.
*
* After this method returns, the service can be retrieved returned by
* {@link #getService()}.
*
* @param conf
* configuration
* @param instance
* optional instance of the service.
* @param processedArgs
* arguments after the configuration parameters
* have been stripped out.
* @param addShutdownHook
* should a shutdown hook be added to terminate
* this service on shutdown. Tests should set this to false.
* @param execute
* execute/wait for the service to stop
* @throws ClassNotFoundException
* classname not on the classpath
* @throws IllegalAccessException
* not allowed at the class
* @throws InstantiationException
* not allowed to instantiate it
* @throws InterruptedException
* thread interrupted
* @throws ExitUtil.ExitException
* any exception defining the status code.
* @throws Exception
* any other failure -if it implements
* {@link ExitCodeProvider} then it defines the exit code for any
* containing exception
* @return status code.
*/
protected int coreServiceLaunch(Configuration conf, S instance,
List<String> processedArgs, boolean addShutdownHook, boolean execute) throws Exception {
// create the service instance
if (instance == null) {
instantiateService(conf);
} else {
// service already exists, so instantiate
configuration = conf;
service = instance;
}ServiceShutdownHook shutdownHook = null;
// and the shutdown hook if requested
if (addShutdownHook) {
shutdownHook = new ServiceShutdownHook(service);
shutdownHook.register(SHUTDOWN_PRIORITY);
}
String name =
getServiceName();
LOG.debug("Launched service {}", name);
CommonAuditContext.noteEntryPoint(service);
LaunchableService launchableService = null;
if (service instanceof LaunchableService) {
// it's a LaunchableService, pass in the conf and arguments before init)
LOG.debug("Service {} implements LaunchableService", name);
launchableService = ((LaunchableService) (service));
if (launchableService.isInState(STATE.INITED)) {
LOG.warn("LaunchableService {}" + " initialized in constructor before CLI arguments passed in", name);
}
Configuration newconf = launchableService.bindArgs(configuration, processedArgs);
if (newconf != null) {
configuration = newconf;
}
}
// some class constructors init; here this is picked up on.
if (!service.isInState(STATE.INITED)) {
service.init(configuration);
}
int exitCode;
try {
// start the service
service.start();
exitCode = EXIT_SUCCESS;
if (execute && service.isInState(STATE.STARTED)) {
if (launchableService != null) {
// assume that runnable services are meant to run from here
try {
exitCode
= launchableService.execute();
LOG.debug("Service {} execution returned exit code {}", name, exitCode);
} finally {
// then stop the service
service.stop();
}
} else {
// run the service until it stops or an interrupt happens
// on a different thread.
LOG.debug("waiting for service threads to terminate");
service.waitForServiceToStop(0);
}
}
} finally {
if (shutdownHook != null) {
shutdownHook.unregister();
}
}return exitCode;
}
/**
*
* @return Instantiate the service defined in {@code serviceClassName}.
Sets the {@code configuration} field
to the the value of {@code conf},
and the {@code service} | 3.26 |
hadoop_ServiceLauncher_extractCommandOptions_rdh | /**
* Extract the command options and apply them to the configuration,
* building an array of processed arguments to hand down to the service.
*
* @param conf
* configuration to update.
* @param args
* main arguments. {@code args[0]}is assumed to be
* the service classname and is skipped.
* @return the remaining arguments
* @throws ExitUtil.ExitException
* if JVM exiting is disabled.
*/
public List<String> extractCommandOptions(Configuration conf, List<String> args) {
int size = args.size();
if (size <= 1) {
return Collections.emptyList();
}
List<String> coreArgs = args.subList(1, size);
return parseCommandArgs(conf, coreArgs);
} | 3.26 |
hadoop_ServiceLauncher_exitWithMessage_rdh | /**
* Exit with a printed message.
*
* @param status
* status code
* @param message
* message message to print before exiting
* @throws ExitUtil.ExitException
* if exceptions are disabled
*/
protected static void exitWithMessage(int status, String message) {
ExitUtil.terminate(new ServiceLaunchException(status, message));
}
/**
* Exit with the usage exit code {@link #EXIT_USAGE}
* and message {@link #USAGE_MESSAGE} | 3.26 |
hadoop_ServiceLauncher_parseCommandArgs_rdh | /**
* Parse the command arguments, extracting the service class as the last
* element of the list (after extracting all the rest).
*
* The field {@link #commandOptions} field must already have been set.
*
* @param conf
* configuration to use
* @param args
* command line argument list
* @return the remaining arguments
* @throws ServiceLaunchException
* if processing of arguments failed
*/
protected List<String> parseCommandArgs(Configuration conf, List<String> args) {
Preconditions.checkNotNull(commandOptions, "Command options have not been created");
StringBuilder argString = new StringBuilder(args.size() * 32);
for (String arg : args) {
argString.append("\"").append(arg).append("\" ");
}
LOG.debug("Command line: {}", argString);
try {
String[] argArray = args.toArray(new String[args.size()]);
// parse this the standard way. This will
// update the configuration in the parser, and potentially
// patch the user credentials
GenericOptionsParser parser = createGenericOptionsParser(conf, argArray);if (!parser.isParseSuccessful()) {
throw new ServiceLaunchException(EXIT_COMMAND_ARGUMENT_ERROR, E_PARSE_FAILED + " %s", argString);
}
CommandLine line = parser.getCommandLine();
List<String> remainingArgs = Arrays.asList(parser.getRemainingArgs());
LOG.debug("Remaining arguments {}", remainingArgs);
// Scan the list of configuration files
// and bail out if they don't exist
if (line.hasOption(ARG_CONF)) {
String[] filenames = line.getOptionValues(ARG_CONF);
m0(filenames);
// Add URLs of files as list of URLs to load
for (String filename : filenames) {
File file = new File(filename);
LOG.debug("Configuration files {}", file);
confResourceUrls.add(file.toURI().toURL());
}
}
if (line.hasOption(ARG_CONFCLASS)) {
// new resources to instantiate as configurations
List<String> classnameList = Arrays.asList(line.getOptionValues(ARG_CONFCLASS));
LOG.debug("Configuration classes {}", classnameList);
confClassnames.addAll(classnameList);
}
// return the remainder
return remainingArgs;
} catch (IOException e) {
// parsing problem: convert to a command argument error with
// the original text
throw new ServiceLaunchException(EXIT_COMMAND_ARGUMENT_ERROR, e);
} catch (RuntimeException
e) {
// lower level issue such as XML parse failure
throw new ServiceLaunchException(EXIT_COMMAND_ARGUMENT_ERROR, e, E_PARSE_FAILED + " %s : %s", argString, e);
}
} | 3.26 |
hadoop_ServiceLauncher_createOptions_rdh | /**
* Override point: create an options instance to combine with the
* standard options set.
* <i>Important. Synchronize uses of {@link Option}</i>
* with {@code Option.class}
*
* @return the new options
*/@SuppressWarnings("static-access")
protected Options createOptions() {
synchronized(Option.class) {
Options options = new Options();
Option
oconf = Option.builder(ARG_CONF_SHORT).argName("configuration file").hasArg().desc("specify an application configuration file").longOpt(ARG_CONF).build();
Option confclass = Option.builder(ARG_CONFCLASS_SHORT).argName("configuration classname").hasArg().desc("Classname of a Hadoop Configuration subclass to load").longOpt(ARG_CONFCLASS).build();
Option property = Option.builder("D").argName("property=value").hasArg().desc("use value for given property").build();
options.addOption(oconf);
options.addOption(property);
options.addOption(confclass);
return options;
}
} | 3.26 |
hadoop_ServiceLauncher_isClassnameDefined_rdh | /**
* Probe for service classname being defined.
*
* @return true if the classname is set
*/
private boolean isClassnameDefined() {return (serviceClassName != null) && (!serviceClassName.isEmpty());
} | 3.26 |
hadoop_ServiceLauncher_m0_rdh | /**
* Verify that all the specified filenames exist.
*
* @param filenames
* a list of files
* @throws ServiceLaunchException
* if a file is not found
*/
protected void m0(String[] filenames) {
if (filenames == null) {
return;
}
for (String filename : filenames) {
File file = new File(filename);LOG.debug("Conf file {}", file.getAbsolutePath());
if (!file.exists()) {
// no configuration file
throw new ServiceLaunchException(EXIT_NOT_FOUND, ARG_CONF_PREFIXED + ": configuration file not found: %s", file.getAbsolutePath());
}
}
} | 3.26 |
hadoop_ServiceLauncher_getUsageMessage_rdh | /**
* Get the usage message, ideally dynamically.
*
* @return the usage message
*/
protected String getUsageMessage() {
String message = USAGE_MESSAGE;
if (commandOptions != null) {
message = (((USAGE_NAME + " ") + commandOptions.toString()) + " ") + USAGE_SERVICE_ARGUMENTS;
}
return message;
} | 3.26 |
hadoop_ServiceLauncher_getServiceExitCode_rdh | /**
* The exit code from a successful service execution.
*
* @return the exit code.
*/
public final int getServiceExitCode() {
return serviceExitCode;
}
/**
* Get the exit exception used to end this service.
*
* @return an exception, which will be null until the service
has exited (and {@code System.exit} | 3.26 |
hadoop_ServiceLauncher_main_rdh | /**
* This is the JVM entry point for the service launcher.
*
* Converts the arguments to a list, then invokes {@link #serviceMain(List)}
*
* @param args
* command line arguments.
*/
public static void main(String[] args) {
serviceMain(Arrays.asList(args));
} | 3.26 |
hadoop_ServiceLauncher_startupShutdownMessage_rdh | /**
*
* @return Build a log message for starting up and shutting down.
* @param classname
* the class of the server
* @param args
* arguments
*/
protected static String startupShutdownMessage(String classname, List<String> args) {
final String hostname = NetUtils.getHostname();
return StringUtils.createStartupShutdownMessage(classname, hostname, args.toArray(new String[args.size()]));
} | 3.26 |
hadoop_ServiceLauncher_error_rdh | /**
* Report an error.
* <p>
* This tries to log to {@code LOG.error()}.
* <p>
* If that log level is disabled disabled the message
* is logged to system error along with {@code thrown.toString()}
*
* @param message
* message for the user
* @param thrown
* the exception thrown
*/
protected void error(String message, Throwable thrown) {
String text = "Exception: " + message;if (LOG.isErrorEnabled()) {
LOG.error(text, thrown);
} else {
System.err.println(text);
if
(thrown != null)
{
System.err.println(thrown.toString());
}
}
} | 3.26 |
hadoop_ServiceLauncher_convertToExitException_rdh | /**
* Convert an exception to an {@code ExitException}.
*
* This process may just be a simple pass through, otherwise a new
* exception is created with an exit code, the text of the supplied
* exception, and the supplied exception as an inner cause.
*
* <ol>
* <li>If is already the right type, pass it through.</li>
* <li>If it implements {@link ExitCodeProvider#getExitCode()},
* the exit code is extracted and used in the new exception.</li>
* <li>Otherwise, the exit code
* {@link LauncherExitCodes#EXIT_EXCEPTION_THROWN} is used.</li>
* </ol>
*
* @param thrown
* the exception thrown
* @return an {@code ExitException} with a status code
*/
protected static ExitException convertToExitException(Throwable thrown) {
ExitUtil.ExitException exitException;
// get the exception message
String message = thrown.toString();
int exitCode;
if (thrown instanceof ExitCodeProvider) {
// the exception provides a status code -extract it
exitCode = ((ExitCodeProvider) (thrown)).getExitCode();
message = thrown.getMessage();
if (message == null) {
// some exceptions do not have a message; fall back
// to the string value.
message = thrown.toString();
}
} else { // no exception code: use the default
exitCode = EXIT_EXCEPTION_THROWN;
}
// construct the new exception with the original message and
// an exit code
exitException = new ServiceLaunchException(exitCode, thrown, message);
return exitException;
}
/**
* Generate an exception announcing a failure to create the service.
*
* @param exception
* inner exception.
* @return a new exception, with the exit code
{@link LauncherExitCodes#EXIT_SERVICE_CREATION_FAILURE} | 3.26 |
hadoop_ServiceLauncher_setService_rdh | /**
* Setter is to give subclasses the ability to manipulate the service.
*
* @param s
* the new service
*/
protected void setService(S s) {
this.service = s;
} | 3.26 |
hadoop_ServiceLauncher_getConfiguration_rdh | /**
* Get the configuration constructed from the command line arguments.
*
* @return the configuration used to create the service
*/
public final Configuration getConfiguration() {
return configuration;
} | 3.26 |
hadoop_ServiceLauncher_noteException_rdh | /**
* Record that an Exit Exception has been raised.
* Save it to {@link #serviceException}, with its exit code in
* {@link #serviceExitCode}
*
* @param exitException
* exception
*/
void noteException(ExitUtil.ExitException exitException) {
int exitCode = exitException.getExitCode();
if (exitCode != 0) {
LOG.debug("Exception raised with exit code {}", exitCode, exitException);
Throwable cause = exitException.getCause();
if (cause != null) {
// log the nested exception in more detail
LOG.warn("{}", cause.toString(),
cause);
}
}
serviceExitCode
= exitCode;
serviceException = exitException;
} | 3.26 |
hadoop_ServiceLauncher_createGenericOptionsParser_rdh | /**
* Override point: create a generic options parser or subclass thereof.
*
* @param conf
* Hadoop configuration
* @param argArray
* array of arguments
* @return a generic options parser to parse the arguments
* @throws IOException
* on any failure
*/
protected GenericOptionsParser createGenericOptionsParser(Configuration conf, String[] argArray) throws IOException {
return new MinimalGenericOptionsParser(conf, commandOptions, argArray);
} | 3.26 |
hadoop_ServiceLauncher_registerFailureHandling_rdh | /**
* Override point: register this class as the handler for the control-C
* and SIGINT interrupts.
*
* Subclasses can extend this with extra operations, such as
* an exception handler:
* <pre>
* Thread.setDefaultUncaughtExceptionHandler(
* new YarnUncaughtExceptionHandler());
* </pre>
*/
protected void registerFailureHandling() {
try {
interruptEscalator = new InterruptEscalator(this, SHUTDOWN_TIME_ON_INTERRUPT);
interruptEscalator.register(IrqHandler.CONTROL_C);
interruptEscalator.register(IrqHandler.SIGTERM);
} catch (IllegalArgumentException e) {// downgrade interrupt registration to warnings
LOG.warn("{}", e, e);
}
Thread.setDefaultUncaughtExceptionHandler(new HadoopUncaughtExceptionHandler(this));
} | 3.26 |
hadoop_ServiceLauncher_createConfiguration_rdh | /**
* Override point: create the base configuration for the service.
*
* Subclasses can override to create HDFS/YARN configurations etc.
*
* @return the configuration to use as the service initializer.
*/
protected Configuration createConfiguration() {
return new Configuration();
} | 3.26 |
hadoop_ServiceLauncher_getService_rdh | /**
* Get the service.
*
* Null until
* {@link #coreServiceLaunch(Configuration, Service, List, boolean, boolean)}
* has completed.
*
* @return the service
*/
public final S getService() {
return service;
} | 3.26 |
hadoop_ServiceLauncher_serviceMain_rdh | /* ====================================================================== */
public static void serviceMain(List<String>
argsList) {
if (argsList.isEmpty()) {
// no arguments: usage message
exitWithUsageMessage();
} else {
ServiceLauncher<Service> serviceLauncher = new ServiceLauncher<>(argsList.get(0));
serviceLauncher.launchServiceAndExit(argsList);
}
} | 3.26 |
hadoop_ServiceLauncher_exit_rdh | /**
* Exit the JVM using an exception for the exit code and message,
* invoking {@link ExitUtil#terminate(ExitUtil.ExitException)}.
*
* This is the standard way a launched service exits.
* An error code of 0 means success -nothing is printed.
*
* If {@link ExitUtil#disableSystemExit()} has been called, this
* method will throw the exception.
*
* The method <i>may</i> be subclassed for testing
*
* @param ee
* exit exception
* @throws ExitUtil.ExitException
* if ExitUtil exceptions are disabled
*/
protected void exit(ExitUtil.ExitException ee) {
ExitUtil.terminate(ee);
} | 3.26 |
hadoop_ServiceLauncher_bindCommandOptions_rdh | /**
* Set the {@link #commandOptions} field to the result of
* {@link #createOptions()}; protected for subclasses and test access.
*/
protected void bindCommandOptions() {
commandOptions = createOptions();
} | 3.26 |
hadoop_ServiceLauncher_getClassLoader_rdh | /**
* Override point: get the classloader to use.
*
* @return the classloader for loading a service class.
*/
protected ClassLoader getClassLoader() {
return this.getClass().getClassLoader();
} | 3.26 |
hadoop_XMLUtils_bestEffortSetAttribute_rdh | /**
* Set an attribute value on a {@link TransformerFactory}. If the TransformerFactory
* does not support the attribute, the method just returns <code>false</code> and
* logs the issue at debug level.
*
* @param transformerFactory
* to update
* @param flag
* that indicates whether to do the update and the flag can be set to
* <code>false</code> if an update fails
* @param name
* of the attribute to set
* @param value
* to set on the attribute
*/
static void bestEffortSetAttribute(TransformerFactory transformerFactory, AtomicBoolean flag, String name, Object value) {if (flag.get()) {
try {
transformerFactory.setAttribute(name, value);
} catch (Throwable t) {
flag.set(false);
LOG.debug("Issue setting TransformerFactory attribute {}: {}", name, t.toString());
}
}
} | 3.26 |
hadoop_XMLUtils_transform_rdh | /**
* Transform input xml given a stylesheet.
*
* @param styleSheet
* the style-sheet
* @param xml
* input xml data
* @param out
* output
* @throws TransformerConfigurationException
* synopsis signals a problem
* creating a transformer object.
* @throws TransformerException
* this is used for throwing processor
* exceptions before the processing has started.
*/
public static void transform(InputStream styleSheet, InputStream xml, Writer out) throws TransformerConfigurationException, TransformerException {
// Instantiate a TransformerFactory
TransformerFactory tFactory = newSecureTransformerFactory();
// Use the TransformerFactory to process the
// stylesheet and generate a Transformer
Transformer transformer = tFactory.newTransformer(new StreamSource(styleSheet));
// Use the Transformer to transform an XML Source
// and send the output to a Result object.
transformer.transform(new StreamSource(xml), new StreamResult(out));
} | 3.26 |
hadoop_XMLUtils_setOptionalSecureTransformerAttributes_rdh | /**
* These attributes are recommended for maximum security but some JAXP transformers do
* not support them. If at any stage, we fail to set these attributes, then we won't try again
* for subsequent transformers.
*
* @param transformerFactory
* to update
*/
private static void setOptionalSecureTransformerAttributes(TransformerFactory transformerFactory) {
bestEffortSetAttribute(transformerFactory, CAN_SET_TRANSFORMER_ACCESS_EXTERNAL_DTD, XMLConstants.ACCESS_EXTERNAL_DTD, "");
bestEffortSetAttribute(transformerFactory, CAN_SET_TRANSFORMER_ACCESS_EXTERNAL_STYLESHEET, XMLConstants.ACCESS_EXTERNAL_STYLESHEET, "");
} | 3.26 |
hadoop_XMLUtils_newSecureSAXParserFactory_rdh | /**
* This method should be used if you need a {@link SAXParserFactory}. Use this method
* instead of {@link SAXParserFactory#newInstance()}. The factory that is returned has
* secure configuration enabled.
*
* @return a {@link SAXParserFactory} with secure configuration enabled
* @throws ParserConfigurationException
* if the {@code JAXP} parser does not support the
* secure configuration
* @throws SAXException
* if there are another issues when creating the factory
*/
public static SAXParserFactory newSecureSAXParserFactory() throws SAXException, ParserConfigurationException {
SAXParserFactory spf = SAXParserFactory.newInstance();
spf.setFeature(XMLConstants.FEATURE_SECURE_PROCESSING, true);
spf.setFeature(DISALLOW_DOCTYPE_DECL, true);
spf.setFeature(LOAD_EXTERNAL_DECL, false);
spf.setFeature(EXTERNAL_GENERAL_ENTITIES, false);
spf.setFeature(EXTERNAL_PARAMETER_ENTITIES, false);
return spf;
} | 3.26 |
hadoop_XMLUtils_newSecureDocumentBuilderFactory_rdh | /**
* This method should be used if you need a {@link DocumentBuilderFactory}. Use this method
* instead of {@link DocumentBuilderFactory#newInstance()}. The factory that is returned has
* secure configuration enabled.
*
* @return a {@link DocumentBuilderFactory} with secure configuration enabled
* @throws ParserConfigurationException
* if the {@code JAXP} parser does not support the
* secure configuration
*/
public static DocumentBuilderFactory newSecureDocumentBuilderFactory() throws ParserConfigurationException {
DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
dbf.setFeature(XMLConstants.FEATURE_SECURE_PROCESSING, true);
dbf.setFeature(DISALLOW_DOCTYPE_DECL, true);
dbf.setFeature(LOAD_EXTERNAL_DECL, false);
dbf.setFeature(EXTERNAL_GENERAL_ENTITIES, false);
dbf.setFeature(EXTERNAL_PARAMETER_ENTITIES, false);
dbf.setFeature(CREATE_ENTITY_REF_NODES, false);
return dbf;
} | 3.26 |
hadoop_XMLUtils_newSecureTransformerFactory_rdh | /**
* This method should be used if you need a {@link TransformerFactory}. Use this method
* instead of {@link TransformerFactory#newInstance()}. The factory that is returned has
* secure configuration enabled.
*
* @return a {@link TransformerFactory} with secure configuration enabled
* @throws TransformerConfigurationException
* if the {@code JAXP} transformer does not
* support the secure configuration
*/
public static TransformerFactory newSecureTransformerFactory() throws TransformerConfigurationException {
TransformerFactory trfactory = TransformerFactory.newInstance();
trfactory.setFeature(XMLConstants.FEATURE_SECURE_PROCESSING, true);
setOptionalSecureTransformerAttributes(trfactory);
return trfactory;
} | 3.26 |
hadoop_XMLUtils_newSecureSAXTransformerFactory_rdh | /**
* This method should be used if you need a {@link SAXTransformerFactory}. Use this method
* instead of {@link SAXTransformerFactory#newInstance()}. The factory that is returned has
* secure configuration enabled.
*
* @return a {@link SAXTransformerFactory} with secure configuration enabled
* @throws TransformerConfigurationException
* if the {@code JAXP} transformer does not
* support the secure configuration
*/public static SAXTransformerFactory newSecureSAXTransformerFactory() throws TransformerConfigurationException {
SAXTransformerFactory trfactory = ((SAXTransformerFactory) (SAXTransformerFactory.newInstance()));
trfactory.setFeature(XMLConstants.FEATURE_SECURE_PROCESSING, true);
setOptionalSecureTransformerAttributes(trfactory);
return trfactory;
} | 3.26 |
hadoop_NameValuePair_getName_rdh | /**
* Get the name.
*
* @return The name.
*/
public String getName() {
return
name;
} | 3.26 |
hadoop_NameValuePair_getValue_rdh | /**
* Get the value.
*
* @return The value.
*/public Object getValue() {
return value;
} | 3.26 |
hadoop_ClientMethod_getTypes_rdh | /**
* Get the calling types for this method.
*
* @return An array of calling types.
*/
public Class<?>[] getTypes() {
return Arrays.copyOf(this.types, this.types.length);
} | 3.26 |
hadoop_IFileWrappedMapOutput_getMerger_rdh | /**
*
* @return the merger
*/
protected MergeManagerImpl<K, V> getMerger() {
return merger;
} | 3.26 |
hadoop_ActiveOperationContext_newOperationId_rdh | /**
* Create an operation ID. The nature of it should be opaque.
*
* @return an ID for the constructor.
*/
protected static long newOperationId() {
return NEXT_OPERATION_ID.incrementAndGet();
} | 3.26 |
hadoop_AuditingFunctions_callableWithinAuditSpan_rdh | /**
* Given a callable, return a new callable which
* activates and deactivates the span around the inner invocation.
*
* @param auditSpan
* audit span
* @param operation
* operation
* @param <T>
* type of result
* @return a new invocation.
*/
public static <T> Callable<T> callableWithinAuditSpan(@Nullable
AuditSpan auditSpan, Callable<T> operation) {
return auditSpan == null ? operation : () -> {
auditSpan.activate();
return operation.call();
};
} | 3.26 |
hadoop_AuditingFunctions_m0_rdh | /**
* Given an invocation, return a new invocation which
* activates and deactivates the span around the inner invocation.
*
* @param auditSpan
* audit span
* @param operation
* operation
* @return a new invocation.
*/
public static InvocationRaisingIOE m0(@Nullable
AuditSpan auditSpan, InvocationRaisingIOE operation) {
return auditSpan == null ? operation
: () -> {
auditSpan.activate();
operation.apply();
};
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.