name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hadoop_WordMedian_reduce_rdh | /**
* Sums all the individual values within the iterator and writes them to the
* same key.
*
* @param key
* This will be a length of a word that was read.
* @param values
* This will be an iterator of all the values associated with that
* key.
*/
public void reduce(IntWritable key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException {
int sum = 0;
for (IntWritable value : values) {
sum += value.get();
}
f0.set(sum);
context.write(key, f0);
} | 3.26 |
hadoop_WordMedian_readAndFindMedian_rdh | /**
* This is a standard program to read and find a median value based on a file
* of word counts such as: 1 456, 2 132, 3 56... Where the first values are
* the word lengths and the following values are the number of times that
* words of that length appear.
*
* @param path
* The path to read the HDFS file from (part-r-00000...00001...etc).
* @param medianIndex1
* The first length value to look for.
* @param medianIndex2
* The second length value to look for (will be the same as the first
* if there are an even number of words total).
* @throws IOException
* If file cannot be found, we throw an exception.
*/
private double readAndFindMedian(String path, int medianIndex1, int medianIndex2, Configuration conf) throws IOException {
FileSystem fs = FileSystem.get(conf);
Path file = new Path(path, "part-r-00000");
if (!fs.exists(file))
throw new IOException("Output not found!");
BufferedReader br = null;
try {
br = new BufferedReader(new InputStreamReader(fs.open(file), StandardCharsets.UTF_8));
int num = 0;
String line;
while ((line = br.readLine()) != null) {
StringTokenizer st = new StringTokenizer(line);
// grab length
String currLen = st.nextToken();
// grab count
String lengthFreq = st.nextToken();
int prevNum = num;
num += Integer.parseInt(lengthFreq);
if ((medianIndex2 >= prevNum) && (medianIndex1 <= num)) {
System.out.println("The median is: " + currLen);
br.close();
return Double.parseDouble(currLen);
} else if ((medianIndex2 >= prevNum) && (medianIndex1 < num)) {
String nextCurrLen = st.nextToken();
double theMedian = (Integer.parseInt(currLen) + Integer.parseInt(nextCurrLen)) / 2.0;
System.out.println("The median is: " + theMedian);
br.close();return theMedian;
}
}
} finally {
if (br != null) {
br.close();
}
}
// error, no median found
return -1;
} | 3.26 |
hadoop_AbstractQueueCapacityCalculator_getResourceNames_rdh | /**
* Returns all resource names that are defined for a capacity type.
*
* @param queue
* queue for which the capacity vector is defined
* @param label
* node label
* @param capacityType
* capacity type for which the resource names are defined
* @return resource names
*/
protected Set<String> getResourceNames(CSQueue queue, String label, ResourceUnitCapacityType capacityType) {
return queue.getConfiguredCapacityVector(label).getResourceNamesByCapacityType(capacityType);
} | 3.26 |
hadoop_SnappyCompressor_setDictionary_rdh | /**
* Does nothing.
*/
@Override
public void setDictionary(byte[] b, int off, int len) {
// do nothing
} | 3.26 |
hadoop_SnappyCompressor_end_rdh | /**
* Closes the compressor and discards any unprocessed input.
*/@Override
public void end() {
} | 3.26 |
hadoop_SnappyCompressor_getBytesRead_rdh | /**
* Return number of bytes given to this compressor since last reset.
*/
@Override
public long getBytesRead() {return bytesRead;
} | 3.26 |
hadoop_SnappyCompressor_reset_rdh | /**
* Resets compressor so that a new set of input data can be processed.
*/
@Override
public void reset() {
finish = false;
finished = false;
uncompressedDirectBuf.clear();
uncompressedDirectBufLen = 0;
compressedDirectBuf.clear();
compressedDirectBuf.limit(0);
f1 = userBufLen
= 0;
bytesRead = bytesWritten =
0L;
} | 3.26 |
hadoop_SnappyCompressor_reinit_rdh | /**
* Prepare the compressor to be used in a new stream with settings defined in
* the given Configuration
*
* @param conf
* Configuration from which new setting are fetched
*/
@Override
public void reinit(Configuration conf) {
reset();
} | 3.26 |
hadoop_SnappyCompressor_m0_rdh | /**
* Sets input data for compression.
* This should be called whenever #needsInput() returns
* <code>true</code> indicating that more input data is required.
*
* @param b
* Input data
* @param off
* Start offset
* @param len
* Length
*/
@Override
public void m0(byte[] b, int off, int len) {
if (b
== null) {
throw new NullPointerException();
}
if
(((off < 0) || (len < 0)) || (off > (b.length - len))) {
throw new ArrayIndexOutOfBoundsException();
}
finished = false;
if (len > uncompressedDirectBuf.remaining()) {
// save data; now !needsInput
this.userBuf = b;this.f1 = off;
this.userBufLen = len;
} else {
((ByteBuffer) (uncompressedDirectBuf)).put(b, off, len);
uncompressedDirectBufLen = uncompressedDirectBuf.position();
}
bytesRead += len;
} | 3.26 |
hadoop_SnappyCompressor_finish_rdh | /**
* When called, indicates that compression should end
* with the current contents of the input buffer.
*/
@Override
public void finish() {
finish = true;
} | 3.26 |
hadoop_SnappyCompressor_compress_rdh | /**
* Fills specified buffer with compressed data. Returns actual number
* of bytes of compressed data. A return value of 0 indicates that
* needsInput() should be called in order to determine if more input
* data is required.
*
* @param b
* Buffer for the compressed data
* @param off
* Start offset of the data
* @param len
* Size of the buffer
* @return The actual number of bytes of compressed data.
*/
@Override
public int compress(byte[] b, int off, int len) throws IOException {
if (b == null) {
throw new NullPointerException();
}
if (((off < 0) || (len < 0))
|| (off > (b.length - len))) {
throw new ArrayIndexOutOfBoundsException();}
// Check if there is compressed data
int n = compressedDirectBuf.remaining();
if (n > 0) {
n = Math.min(n, len);
((ByteBuffer)
(compressedDirectBuf)).get(b, off, n);
bytesWritten += n;
return n;
}
// Re-initialize the snappy's output direct-buffer
compressedDirectBuf.clear();
compressedDirectBuf.limit(0);
if (0 == uncompressedDirectBuf.position()) {
// No compressed data, so we should have !needsInput or !finished
setInputFromSavedData();
if (0 == uncompressedDirectBuf.position()) {
// Called without data; write nothing
finished = true;return 0;
}
}
// Compress data
n = compressDirectBuf();
compressedDirectBuf.limit(n);
uncompressedDirectBuf.clear();// snappy consumes all buffer input
// Set 'finished' if snapy has consumed all user-data
if (0 == userBufLen) {
finished = true;
}
// Get atmost 'len' bytes
n = Math.min(n, len);
bytesWritten += n;
((ByteBuffer) (compressedDirectBuf)).get(b, off, n);
return n;
} | 3.26 |
hadoop_SnappyCompressor_setInputFromSavedData_rdh | /**
* If a write would exceed the capacity of the direct buffers, it is set
* aside to be loaded by this function while the compressed data are
* consumed.
*/
void setInputFromSavedData() {
if (0 >= userBufLen) {
return;
} finished = false;
uncompressedDirectBufLen = Math.min(userBufLen, f0);
((ByteBuffer) (uncompressedDirectBuf)).put(userBuf, f1, uncompressedDirectBufLen);
// Note how much data is being fed to snappy
f1 += uncompressedDirectBufLen;
userBufLen -= uncompressedDirectBufLen;
} | 3.26 |
hadoop_SnappyCompressor_getBytesWritten_rdh | /**
* Return number of bytes consumed by callers of compress since last reset.
*/
@Override
public long getBytesWritten() {
return bytesWritten;
} | 3.26 |
hadoop_SchedulerNodeReport_getNumContainers_rdh | /**
*
* @return the number of containers currently running on this node.
*/
public int getNumContainers() {
return num;
} | 3.26 |
hadoop_SchedulerNodeReport_getUtilization_rdh | /**
*
* @return utilization of this node
*/
public ResourceUtilization getUtilization() {
return utilization;
} | 3.26 |
hadoop_SchedulerNodeReport_m0_rdh | /**
*
* @return the amount of resources currently available on the node
*/
public Resource m0() {
return avail;
} | 3.26 |
hadoop_SchedulerNodeReport_getUsedResource_rdh | /**
*
* @return the amount of resources currently used by the node.
*/
public Resource getUsedResource() {
return used;} | 3.26 |
hadoop_Time_formatTime_rdh | /**
* Convert time in millisecond to human readable format.
*
* @param millis
* millisecond.
* @return a human readable string for the input time
*/
public static String formatTime(long millis) {
return DATE_FORMAT.get().format(millis);
} | 3.26 |
hadoop_Time_now_rdh | /**
* Current system time. Do not use this to calculate a duration or interval
* to sleep, because it will be broken by settimeofday. Instead, use
* monotonicNow.
*
* @return current time in msec.
*/
public static long now() {
return System.currentTimeMillis();
} | 3.26 |
hadoop_Time_getUtcTime_rdh | /**
* Get the current UTC time in milliseconds.
*
* @return the current UTC time in milliseconds.
*/
public static long getUtcTime() {
return Calendar.getInstance(UTC_ZONE).getTimeInMillis();} | 3.26 |
hadoop_Time_monotonicNowNanos_rdh | /**
* Same as {@link #monotonicNow()} but returns its result in nanoseconds.
* Note that this is subject to the same resolution constraints as
* {@link System#nanoTime()}.
*
* @return a monotonic clock that counts in nanoseconds.
*/
public static long monotonicNowNanos() {return System.nanoTime();
} | 3.26 |
hadoop_Time_m0_rdh | /**
* Current time from some arbitrary time base in the past, counting in
* milliseconds, and not affected by settimeofday or similar system clock
* changes. This is appropriate to use when computing how much longer to
* wait for an interval to expire.
* This function can return a negative value and it must be handled correctly
* by callers. See the documentation of System#nanoTime for caveats.
*
* @return a monotonic clock that counts in milliseconds.
*/
public static long m0() {
return System.nanoTime() / NANOSECONDS_PER_MILLISECOND;} | 3.26 |
hadoop_FutureIOSupport_propagateOptions_rdh | /**
* Propagate options to any builder.
* {@link FutureIO#propagateOptions(FSBuilder, Configuration, String, boolean)}
*
* @param builder
* builder to modify
* @param conf
* configuration to read
* @param prefix
* prefix to scan/strip
* @param mandatory
* are the options to be mandatory or optional?
*/
@Deprecated
public static void propagateOptions(final FSBuilder<?, ?> builder, final Configuration conf, final String prefix, final boolean mandatory) {
FutureIO.propagateOptions(builder, conf, prefix, mandatory);
} | 3.26 |
hadoop_FutureIOSupport_eval_rdh | /**
* Evaluate a CallableRaisingIOE in the current thread,
* converting IOEs to RTEs and propagating.
* See {@link FutureIO#eval(CallableRaisingIOE)}.
*
* @param callable
* callable to invoke
* @param <T>
* Return type.
* @return the evaluated result.
* @throws UnsupportedOperationException
* fail fast if unsupported
* @throws IllegalArgumentException
* invalid argument
*/
public static <T>
CompletableFuture<T> eval(CallableRaisingIOE<T> callable) {
return FutureIO.eval(callable);
} | 3.26 |
hadoop_FutureIOSupport_awaitFuture_rdh | /**
* Given a future, evaluate it. Raised exceptions are
* extracted and handled.
* See {@link FutureIO#awaitFuture(Future, long, TimeUnit)}.
*
* @param future
* future to evaluate
* @param <T>
* type of the result.
* @param timeout
* timeout.
* @param unit
* unit.
* @return the result, if all went well.
* @throws InterruptedIOException
* future was interrupted
* @throws IOException
* if something went wrong
* @throws RuntimeException
* any nested RTE thrown
* @throws TimeoutException
* the future timed out.
*/
@Deprecated
public static <T> T awaitFuture(final Future<T> future, final long timeout, final TimeUnit unit) throws InterruptedIOException, IOException, RuntimeException, TimeoutException {
return FutureIO.awaitFuture(future, timeout, unit);
} | 3.26 |
hadoop_FutureIOSupport_raiseInnerCause_rdh | /**
* Extract the cause of a completion failure and rethrow it if an IOE
* or RTE.
* See {@link FutureIO#raiseInnerCause(CompletionException)}.
*
* @param e
* exception.
* @param <T>
* type of return value.
* @return nothing, ever.
* @throws IOException
* either the inner IOException, or a wrapper around
* any non-Runtime-Exception
* @throws RuntimeException
* if that is the inner cause.
*/
@Deprecated
public static <T> T raiseInnerCause(final CompletionException e) throws IOException {
return FutureIO.raiseInnerCause(e);
} | 3.26 |
hadoop_RemoteParam_getParameterForContext_rdh | /**
* Determine the appropriate value for this parameter based on the location.
*
* @param context
* Context identifying the location.
* @return A parameter specific to this location.
*/
public Object getParameterForContext(RemoteLocationContext context) {
if (context == null) {return null;
} else if (this.paramMap != null) {
return this.paramMap.get(context);
} else {
// Default case
return context.getDest();
}
} | 3.26 |
hadoop_ClientMmap_m0_rdh | /**
* Close the ClientMmap object.
*/
@Override
public void m0() {
if (replica != null) {
if (anchored) {
replica.removeNoChecksumAnchor();
}
replica.unref();
}
replica = null;
} | 3.26 |
hadoop_Server_getName_rdh | /**
* Returns the name of the server.
*
* @return the server name.
*/
public String getName() {
return name;
} | 3.26 |
hadoop_Server_getLogDir_rdh | /**
* Returns the server log dir.
*
* @return the server log dir.
*/
public String getLogDir() {
return logDir;
} | 3.26 |
hadoop_Server_destroy_rdh | /**
* Destroys the server.
* <p>
* All services are destroyed in reverse order of initialization, then the
* Log4j framework is shutdown.
*/
public void destroy() {
ensureOperational();
destroyServices();
log.info("Server [{}] shutdown!", name);
log.info("======================================================");
if (!Boolean.getBoolean("test.circus")) {
LogManager.shutdown();
}
status = Status.SHUTDOWN;
} | 3.26 |
hadoop_Server_getConfigDir_rdh | /**
* Returns the server config dir.
*
* @return the server config dir.
*/
public String getConfigDir() {
return configDir;
} | 3.26 |
hadoop_Server_init_rdh | /**
* Initializes the Server.
* <p>
* The initialization steps are:
* <ul>
* <li>It verifies the service home and temp directories exist</li>
* <li>Loads the Server <code>#SERVER#-default.xml</code>
* configuration file from the classpath</li>
* <li>Initializes log4j logging. If the
* <code>#SERVER#-log4j.properties</code> file does not exist in the config
* directory it load <code>default-log4j.properties</code> from the classpath
* </li>
* <li>Loads the <code>#SERVER#-site.xml</code> file from the server config
* directory and merges it with the default configuration.</li>
* <li>Loads the services</li>
* <li>Initializes the services</li>
* <li>Post-initializes the services</li>
* <li>Sets the server startup status</li>
* </ul>
*
* @throws ServerException
* thrown if the server could not be initialized.
*/
public void init() throws ServerException {
if (status != Status.UNDEF) {
throw new IllegalStateException("Server already initialized");
}
status = Status.BOOTING;
verifyDir(homeDir);
verifyDir(tempDir);
Properties serverInfo = new Properties();
try {
InputStream v4 = getResource(name + ".properties");
serverInfo.load(v4);
v4.close();
} catch (IOException ex) {
throw new RuntimeException(("Could not load server information file: " + name) + ".properties");
}
initLog();
log.info("++++++++++++++++++++++++++++++++++++++++++++++++++++++");
log.info("Server [{}] starting", name);
log.info(" Built information:");
log.info(" Version : {}", serverInfo.getProperty(name + ".version", "undef"));
log.info(" Source Repository : {}", serverInfo.getProperty(name
+ ".source.repository", "undef"));
log.info(" Source Revision : {}", serverInfo.getProperty(name + ".source.revision", "undef"));
log.info(" Built by : {}", serverInfo.getProperty(name + ".build.username", "undef"));
log.info(" Built timestamp : {}", serverInfo.getProperty(name + ".build.timestamp", "undef"));
log.info(" Runtime information:");
log.info(" Home dir: {}", homeDir);
log.info(" Config dir: {}", config == null ? configDir : "-");
log.info(" Log dir: {}", logDir);
log.info(" Temp dir: {}", tempDir);
initConfig();
log.debug("Loading services");
List<Service> list = loadServices();
try {log.debug("Initializing services");initServices(list);
log.info("Services initialized");
} catch (ServerException ex) {
log.error("Services initialization failure, destroying initialized services");
destroyServices();
throw ex;
}
Status status = Status.valueOf(getConfig().get(getPrefixedName(CONF_STARTUP_STATUS), Status.NORMAL.toString()));
setStatus(status);
log.info("Server [{}] started!, status [{}]", name, status);
} | 3.26 |
hadoop_Server_verifyDir_rdh | /**
* Verifies the specified directory exists.
*
* @param dir
* directory to verify it exists.
* @throws ServerException
* thrown if the directory does not exist or it the
* path it is not a directory.
*/
private void verifyDir(String dir) throws ServerException {
File file = new File(dir);
if (!file.exists()) {
throw new ServerException(ERROR.S01, dir);
}
if (!file.isDirectory()) {
throw new ServerException(ERROR.S02, dir);
}
} | 3.26 |
hadoop_Server_destroyServices_rdh | /**
* Destroys the server services.
*/
protected void destroyServices() {
List<Service> list = new ArrayList<Service>(services.values());
Collections.reverse(list);
for (Service service : list) {
try {
log.debug("Destroying service [{}]", service.getInterface());
service.destroy();
} catch (Throwable ex) {
log.error("Could not destroy service [{}], {}", new Object[]{ service.getInterface(), ex.getMessage(), ex });}
}
log.info("Services destroyed");
} | 3.26 |
hadoop_Server_getStatus_rdh | /**
* Returns the current server status.
*
* @return the current server status.
*/
public Status getStatus() {
return status;
} | 3.26 |
hadoop_Server_ensureOperational_rdh | /**
* Verifies the server is operational.
*
* @throws IllegalStateException
* thrown if the server is not operational.
*/
protected void ensureOperational() {
if (!getStatus().isOperational()) {
throw new IllegalStateException("Server is not running");
}
} | 3.26 |
hadoop_Server_initServices_rdh | /**
* Initializes the list of services.
*
* @param services
* services to initialized, it must be a de-dupped list of
* services.
* @throws ServerException
* thrown if the services could not be initialized.
*/
protected void initServices(List<Service> services) throws ServerException {
for (Service service : services) {
log.debug("Initializing service [{}]", service.getInterface());
checkServiceDependencies(service);
service.init(this);
this.services.put(service.getInterface(), service);
}
for (Service service : services) {
service.postInit();
}
} | 3.26 |
hadoop_Server_getHomeDir_rdh | /**
* Returns the server home dir.
*
* @return the server home dir.
*/
public String getHomeDir() {
return homeDir;
} | 3.26 |
hadoop_Server_setService_rdh | /**
* Adds a service programmatically.
* <p>
* If a service with the same interface exists, it will be destroyed and
* removed before the given one is initialized and added.
* <p>
* If an exception is thrown the server is destroyed.
*
* @param klass
* service class to add.
* @throws ServerException
* throw if the service could not initialized/added
* to the server.
*/
public void setService(Class<? extends Service> klass) throws ServerException {
ensureOperational();
Check.notNull(klass, "serviceKlass");
if (getStatus() == Status.SHUTTING_DOWN) {
throw new IllegalStateException("Server shutting down");
}
try
{
Service newService = klass.newInstance();
Service oldService = services.get(newService.getInterface());
if (oldService != null) {
try {
oldService.destroy();
} catch (Throwable ex) {
log.error("Could not destroy service [{}], {}", new
Object[]{ oldService.getInterface(), ex.getMessage(), ex });
}
}
newService.init(this);
services.put(newService.getInterface(), newService);
} catch (Exception ex) {
log.error("Could not set service [{}] programmatically -server shutting down-, {}", klass, ex);
destroy();
throw
new ServerException(ERROR.S09, klass, ex.getMessage(), ex);}
} | 3.26 |
hadoop_Server_getTempDir_rdh | /**
* Returns the server temp dir.
*
* @return the server temp dir.
*/
public String getTempDir() {
return tempDir;
} | 3.26 |
hadoop_Server_checkAbsolutePath_rdh | /**
* Validates that the specified value is an absolute path (starts with '/').
*
* @param value
* value to verify it is an absolute path.
* @param name
* name to use in the exception if the value is not an absolute
* path.
* @return the value.
* @throws IllegalArgumentException
* thrown if the value is not an absolute
* path.
*/
private String checkAbsolutePath(String value, String
name) {
if (!new File(value).isAbsolute()) {
throw new IllegalArgumentException(MessageFormat.format("[{0}] must be an absolute path [{1}]", name, value));
}
return value;
} | 3.26 |
hadoop_Server_get_rdh | /**
* Returns the {@link Service} associated to the specified interface.
*
* @param serviceKlass
* service interface.
* @return the service implementation.
*/
@SuppressWarnings("unchecked")
public <T> T get(Class<T> serviceKlass) {
ensureOperational();Check.notNull(serviceKlass, "serviceKlass");
return ((T) (services.get(serviceKlass)));
} | 3.26 |
hadoop_Server_loadServices_rdh | /**
* Loads services defined in <code>services</code> and
* <code>services.ext</code> and de-dups them.
*
* @return List of final services to initialize.
* @throws ServerException
* throw if the services could not be loaded.
*/
protected List<Service> loadServices() throws ServerException {
try {
Map<Class, Service> map = new LinkedHashMap<Class, Service>();
Class[] classes = getConfig().getClasses(getPrefixedName(CONF_SERVICES));
Class[] classesExt = getConfig().getClasses(getPrefixedName(f0));
List<Service> list = new ArrayList<Service>();
loadServices(classes, list);
loadServices(classesExt,
list);
// removing duplicate services, strategy: last one wins
for (Service service : list) {
if (map.containsKey(service.getInterface())) {
log.debug("Replacing service [{}] implementation [{}]", service.getInterface(), service.getClass());
}
map.put(service.getInterface(),
service);
}
list
= new ArrayList<Service>();
for (Map.Entry<Class, Service> entry : map.entrySet()) {
list.add(entry.getValue());
}
return list;
} catch (RuntimeException ex) {
throw new ServerException(ERROR.S08, ex.getMessage(), ex);
}
} | 3.26 |
hadoop_Server_getPrefixedName_rdh | /**
* Returns the prefixed name of a server property.
*
* @param name
* of the property.
* @return prefixed name of the property.
*/public String getPrefixedName(String name) {
return (getPrefix() + ".") + Check.notEmpty(name, "name");
} | 3.26 |
hadoop_Server_initConfig_rdh | /**
* Loads and inializes the server configuration.
*
* @throws ServerException
* thrown if the configuration could not be loaded/initialized.
*/
protected void initConfig() throws ServerException {
verifyDir(configDir);
File file = new File(configDir);
Configuration defaultConf;
String defaultConfig = name + "-default.xml";
ClassLoader classLoader = Thread.currentThread().getContextClassLoader();
InputStream inputStream = classLoader.getResourceAsStream(defaultConfig);
if (inputStream
== null) {
log.warn("Default configuration file not available in classpath [{}]", defaultConfig);
defaultConf = new Configuration(false);
} else {
try {
defaultConf = new Configuration(false);
ConfigurationUtils.load(defaultConf, inputStream);
} catch (Exception ex) {
throw new ServerException(ERROR.S03, defaultConfig, ex.getMessage(), ex);
}
}
if (config == null) {
Configuration siteConf;
File v17 = new File(file, name + "-site.xml");
if (!v17.exists()) {
log.warn("Site configuration file [{}] not found in config directory", v17);
siteConf = new Configuration(false);
} else {
if (!v17.isFile()) {
throw new ServerException(ERROR.S05, v17.getAbsolutePath());
}
try {
log.debug("Loading site configuration from [{}]", v17);
inputStream = Files.newInputStream(v17.toPath());
siteConf = new Configuration(false);
ConfigurationUtils.load(siteConf, inputStream);
} catch (IOException ex) {
throw new ServerException(ERROR.S06, v17, ex.getMessage(), ex);
}
}
config = new Configuration(false);
ConfigurationUtils.copy(siteConf, config);
}
ConfigurationUtils.injectDefaults(defaultConf, config);
ConfigRedactor
redactor = new ConfigRedactor(config);
for (String name : System.getProperties().stringPropertyNames()) {
String value = System.getProperty(name);
if (name.startsWith(getPrefix() + ".")) {
config.set(name, value);
String redacted = redactor.redact(name, value);
log.info("System property sets {}: {}", name, redacted);
}
}
log.debug("Loaded Configuration:");
log.debug("------------------------------------------------------");
for (Map.Entry<String, String> entry : config) {
String name = entry.getKey();
String value = config.get(entry.getKey());
String redacted = redactor.redact(name, value);
log.debug(" {}: {}", entry.getKey(), redacted);
}
log.debug("------------------------------------------------------");
} | 3.26 |
hadoop_Server_getPrefix_rdh | /**
* Returns the server prefix for server configuration properties.
* <p>
* By default it is the server name.
*
* @return the prefix for server configuration properties.
*/
public String getPrefix() {
return getName();
} | 3.26 |
hadoop_Server_getConfig_rdh | /**
* Returns the server configuration.
*
* @return the server configuration.
*/
public Configuration getConfig() {
return config;
} | 3.26 |
hadoop_Server_setStatus_rdh | /**
* Sets a new server status.
* <p>
* The status must be settable.
* <p>
* All services will be notified o the status change via the
* {@link Service#serverStatusChange(Server.Status, Server.Status)} method. If a service
* throws an exception during the notification, the server will be destroyed.
*
* @param status
* status to set.
* @throws ServerException
* thrown if the service has been destroy because of
* a failed notification to a service.
*/
public void setStatus(Status status) throws ServerException {
Check.notNull(status, "status");
if (status.settable) {
if (status != this.status) {
Status oldStatus = this.status;
this.status = status;for (Service service : services.values()) {
try {
service.serverStatusChange(oldStatus, status);
}
catch (Exception ex) {
log.error("Service [{}] exception during status change to [{}] -server shutting down-, {}", new Object[]{ service.getInterface().getSimpleName(), status, ex.getMessage(), ex });
destroy();
throw new ServerException(ERROR.S11, service.getInterface().getSimpleName(), status, ex.getMessage(), ex);
}
}
}
} else {
throw new IllegalArgumentException(("Status [" + status) + " is not settable");
}
} | 3.26 |
hadoop_Server_isOperational_rdh | /**
* Returns if this server status is operational.
*
* @return if this server status is operational.
*/
public boolean isOperational() {
return operational;
} | 3.26 |
hadoop_Event_getReplication_rdh | /**
* Replication is zero if the CreateEvent iNodeType is directory or symlink.
*/
public int getReplication() {
return f0;
} | 3.26 |
hadoop_Event_m0_rdh | /**
* The size of the closed file in bytes. May be -1 if the size is not
* available (e.g. in the case of a close generated by a concat operation).
*/
public long m0() {
return fileSize;
} | 3.26 |
hadoop_Event_getCtime_rdh | /**
* Creation time of the file, directory, or symlink.
*/
public long getCtime() {
return ctime;
} | 3.26 |
hadoop_Event_getSymlinkTarget_rdh | /**
* Symlink target is null if the CreateEvent iNodeType is not symlink.
*/
public String getSymlinkTarget() {
return symlinkTarget;
} | 3.26 |
hadoop_Event_isxAttrsRemoved_rdh | /**
* Whether the xAttrs returned by getxAttrs() were removed (as opposed to
* added).
*/
public boolean isxAttrsRemoved() {return xAttrsRemoved;
} | 3.26 |
hadoop_Event_getTimestamp_rdh | /**
* The time when this event occurred, in milliseconds since the epoch.
*/
public long getTimestamp() {
return timestamp;
} | 3.26 |
hadoop_Event_getAcls_rdh | /**
* The full set of ACLs currently associated with this file or directory.
* May be null if all ACLs were removed.
*/
public List<AclEntry> getAcls() {
return acls;
} | 3.26 |
hadoop_AbfsDelegationTokenManager_close_rdh | /**
* Close.
* If the token manager is closeable, it has its {@link Closeable#close()}
* method (quietly) invoked.
*/
@Override
public void close() {
if (tokenManager instanceof Closeable) {
IOUtils.cleanupWithLogger(LOG, ((Closeable) (tokenManager)));
}
} | 3.26 |
hadoop_AbfsDelegationTokenManager_getDelegationToken_rdh | /**
* Get a delegation token by invoking
* {@link CustomDelegationTokenManager#getDelegationToken(String)}.
* If the token returned already has a Kind; that is used.
* If not, then the token kind is set to
* {@link AbfsDelegationTokenIdentifier#TOKEN_KIND}, which implicitly
* resets any token renewer class.
*
* @param renewer
* the principal permitted to renew the token.
* @return a token for the filesystem.
* @throws IOException
* failure.
*/
public Token<DelegationTokenIdentifier> getDelegationToken(String renewer) throws IOException {
LOG.debug("Requesting Delegation token for {}", renewer);Token<DelegationTokenIdentifier> token = tokenManager.getDelegationToken(renewer);
if (token.getKind() == null) {
// if a token type is not set, use the default.
// note: this also sets the renewer to null.
token.setKind(AbfsDelegationTokenIdentifier.TOKEN_KIND);
}
return token;
} | 3.26 |
hadoop_AbfsDelegationTokenManager_bind_rdh | /**
* Bind to a filesystem instance by passing the binding information down
* to any token manager which implements {@link BoundDTExtension}.
*
* This is not invoked before renew or cancel operations, but is guaranteed
* to be invoked before calls to {@link #getDelegationToken(String)}.
*
* @param fsURI
* URI of the filesystem.
* @param conf
* configuration of this extension.
* @throws IOException
* bind failure.
*/
@Override
public void bind(final
URI fsURI, final Configuration conf) throws IOException {
Preconditions.checkNotNull(fsURI, "Np Filesystem URI");
ExtensionHelper.bind(tokenManager, fsURI, conf);
} | 3.26 |
hadoop_DirectoryStagingCommitter_preCommitJob_rdh | /**
* Pre-commit actions for a job.
* Here: look at the conflict resolution mode and choose
* an action based on the current policy.
*
* @param commitContext
* commit context
* @param pending
* pending commits
* @throws IOException
* any failure
*/
@Override
public void preCommitJob(final CommitContext commitContext, final ActiveCommit pending) throws IOException {
final JobContext context = commitContext.getJobContext();
// see if the files can be loaded.
super.preCommitJob(commitContext,
pending);
Path outputPath = getOutputPath();
FileSystem fs = getDestFS();
Configuration fsConf = fs.getConf();
switch (getConflictResolutionMode(context, fsConf)) {
case FAIL :
// this was checked in setupJob; temporary files may have been
// created, so do not check again.
break;
case APPEND :
// do nothing
break;
case REPLACE :
if (/* recursive */
fs.delete(outputPath, true)) {
LOG.info("{}: removed output path to be replaced: {}", getRole(), outputPath);
}
break;
default :
throw new IOException((getRole() + ": unknown conflict resolution mode: ") + getConflictResolutionMode(context, fsConf));
}
} | 3.26 |
hadoop_RegistryTypeUtils_validateServiceRecord_rdh | /**
* Validate the record by checking for null fields and other invalid
* conditions
*
* @param path
* path for exceptions
* @param record
* record to validate. May be null
* @throws InvalidRecordException
* on invalid entries
*/
public static void validateServiceRecord(String path, ServiceRecord record) throws InvalidRecordException {
if (record == null) {
throw new InvalidRecordException(path, "Null record");
}
if (!ServiceRecord.RECORD_TYPE.equals(record.type)) {
throw new
InvalidRecordException(path, ("invalid record type field: \"" + record.type) + "\"");
}
if (record.external != null)
{
for (Endpoint endpoint : record.external) {
validateEndpoint(path, endpoint);
}
}
if (record.internal != null) {
for (Endpoint endpoint : record.internal) {
validateEndpoint(path, endpoint);
}
}
} | 3.26 |
hadoop_RegistryTypeUtils_getAddressField_rdh | /**
* Get a specific field from an address -raising an exception if
* the field is not present
*
* @param address
* address to query
* @param field
* field to resolve
* @return the resolved value. Guaranteed to be non-null.
* @throws InvalidRecordException
* if the field did not resolve
*/
public static String getAddressField(Map<String, String> address, String field) throws InvalidRecordException {
String val = address.get(field);
if (val == null) {
throw new InvalidRecordException("", "Missing address field: " + field);
}
return val;
} | 3.26 |
hadoop_RegistryTypeUtils_ipcEndpoint_rdh | /**
* Create an IPC endpoint
*
* @param api
* API
* @param address
* the address as a tuple of (hostname, port)
* @return the new endpoint
*/
public static Endpoint ipcEndpoint(String api, InetSocketAddress address) {
return new Endpoint(api, ADDRESS_HOSTNAME_AND_PORT, ProtocolTypes.PROTOCOL_HADOOP_IPC, address == null ? null : hostnamePortPair(address));
} | 3.26 |
hadoop_RegistryTypeUtils_validateEndpoint_rdh | /**
* Validate the endpoint by checking for null fields and other invalid
* conditions
*
* @param path
* path for exceptions
* @param endpoint
* endpoint to validate. May be null
* @throws InvalidRecordException
* on invalid entries
*/
public static void validateEndpoint(String path, Endpoint endpoint) throws InvalidRecordException
{
if (endpoint == null) {
throw new InvalidRecordException(path, "Null endpoint");
}
try {
endpoint.validate();
} catch (RuntimeException e) {
throw new InvalidRecordException(path, e.toString());
}
} | 3.26 |
hadoop_RegistryTypeUtils_requireAddressType_rdh | /**
* Require a specific address type on an endpoint
*
* @param required
* required type
* @param epr
* endpoint
* @throws InvalidRecordException
* if the type is wrong
*/
public static void requireAddressType(String required, Endpoint epr) throws InvalidRecordException {
if (!required.equals(epr.addressType)) {
throw new InvalidRecordException(epr.toString(), (("Address type of " + epr.addressType) + " does not match required type of ") + required);
}
} | 3.26 |
hadoop_RegistryTypeUtils_uri_rdh | /**
* Create a URI
*
* @param uri
* value
* @return a 1 entry map.
*/
public static Map<String, String> uri(String uri) {
return map(ADDRESS_URI, uri);
} | 3.26 |
hadoop_RegistryTypeUtils_hostnamePortPair_rdh | /**
* Create a (hostname, port) address pair
*
* @param address
* socket address whose hostname and port are used for the
* generated address.
* @return a 1 entry map.
*/
public static Map<String, String> hostnamePortPair(InetSocketAddress address) {
return hostnamePortPair(address.getHostName(), address.getPort());
} | 3.26 |
hadoop_RegistryTypeUtils_restEndpoint_rdh | /**
* Create a REST endpoint from a list of URIs
*
* @param api
* implemented API
* @param uris
* URIs
* @return a new endpoint
*/
public static Endpoint restEndpoint(String api, URI... uris) {
return urlEndpoint(api, ProtocolTypes.PROTOCOL_REST, uris);
} | 3.26 |
hadoop_RegistryTypeUtils_webEndpoint_rdh | /**
* Create a Web UI endpoint from a list of URIs
*
* @param api
* implemented API
* @param uris
* URIs
* @return a new endpoint
*/
public static Endpoint webEndpoint(String api, URI... uris) {
return urlEndpoint(api, ProtocolTypes.PROTOCOL_WEBUI, uris);
} | 3.26 |
hadoop_RegistryTypeUtils_map_rdh | /**
* Create a single entry map
*
* @param key
* map entry key
* @param val
* map entry value
* @return a 1 entry map.
*/
public static Map<String, String> map(String key, String val) {
Map<String, String> map = new HashMap<String, String>(1);
map.put(key, val);
return map;
} | 3.26 |
hadoop_RegistryTypeUtils_retrieveAddressURLs_rdh | /**
* Get the address URLs. Guranteed to return at least one address.
*
* @param epr
* endpoint
* @return the address as a URL
* @throws InvalidRecordException
* if the type is wrong, there are no addresses
* or the payload ill-formatted
* @throws MalformedURLException
* address can't be turned into a URL
*/
public static List<URL> retrieveAddressURLs(Endpoint epr) throws InvalidRecordException, MalformedURLException {
if (epr == null) {
throw new InvalidRecordException("", "Null endpoint");
}
List<String> addresses = retrieveAddressesUriType(epr);
List<URL> results = new ArrayList<URL>(addresses.size());
for (String address :
addresses) {
results.add(new URL(address));
}
return results;
} | 3.26 |
hadoop_RecoverPausedContainerLaunch_m0_rdh | /**
* Cleanup the paused container by issuing a kill on it.
*/
@SuppressWarnings("unchecked")
@Override
public Integer m0() {
int retCode = ExitCode.LOST.getExitCode();
ContainerId containerId = container.getContainerId();
String appIdStr = containerId.getApplicationAttemptId().getApplicationId().toString();
String containerIdStr = containerId.toString();
dispatcher.getEventHandler().handle(new ContainerEvent(containerId,
ContainerEventType.RECOVER_PAUSED_CONTAINER));
boolean interrupted = false;
try {
File pidFile = locatePidFile(appIdStr, containerIdStr);
if (pidFile !=
null) {
String pidPathStr = pidFile.getPath();
pidFilePath = new Path(pidPathStr);
exec.activateContainer(containerId, pidFilePath);
retCode = exec.reacquireContainer(new ContainerReacquisitionContext.Builder().setContainer(container).setUser(container.getUser()).setContainerId(containerId).build());} else {
LOG.warn("Unable to locate pid file for container " + containerIdStr);
}
} catch (InterruptedException | InterruptedIOException e) {
LOG.warn("Interrupted while waiting for exit code from " + containerId);
interrupted = true;
} catch (IOException e) {
LOG.error("Unable to kill the paused container " + containerIdStr, e);
} finally {
if (!interrupted) {
this.completed.set(true);
exec.deactivateContainer(containerId);
try {
getContext().getNMStateStore().storeContainerCompleted(containerId, retCode);
} catch (IOException e) {
LOG.error("Unable to set exit code for container " + containerId);
}
}
}
if (retCode != 0) {
LOG.warn("Recovered container exited with a non-zero exit code " + retCode);
this.dispatcher.getEventHandler().handle(new ContainerExitEvent(containerId,
ContainerEventType.CONTAINER_EXITED_WITH_FAILURE, retCode, "Container exited with a non-zero exit code " + retCode));
return retCode;
}
LOG.info(("Recovered container " + containerId) + " succeeded");
dispatcher.getEventHandler().handle(new ContainerEvent(containerId, ContainerEventType.CONTAINER_EXITED_WITH_SUCCESS));
return 0;
} | 3.26 |
hadoop_LoggedTask_incorporateCounters_rdh | // incorporate event counters
// LoggedTask MUST KNOW ITS TYPE BEFORE THIS CALL
public void incorporateCounters(JhCounters counters) {
switch (taskType) {
case MAP :
incorporateMapCounters(counters);
return;
case REDUCE :
incorporateReduceCounters(counters);
return;
// NOT exhaustive
}
} | 3.26 |
hadoop_LoggedTask_setUnknownAttribute_rdh | // for input parameter ignored.
@JsonAnySetter
public void setUnknownAttribute(String attributeName, Object ignored) {
if (!alreadySeenAnySetterAttributes.contains(attributeName)) {
alreadySeenAnySetterAttributes.add(attributeName);
System.err.println(("In LoggedJob, we saw the unknown attribute " + attributeName) + ".");
}
} | 3.26 |
hadoop_SnappyCodec_getDefaultExtension_rdh | /**
* Get the default filename extension for this kind of compression.
*
* @return <code>.snappy</code>.
*/
@Override
public String getDefaultExtension() {
return CodecConstants.SNAPPY_CODEC_EXTENSION;
} | 3.26 |
hadoop_SnappyCodec_setConf_rdh | /**
* Set the configuration to be used by this object.
*
* @param conf
* the configuration object.
*/
@Override
public void setConf(Configuration conf) {
this.conf = conf;
} | 3.26 |
hadoop_SnappyCodec_createCompressor_rdh | /**
* Create a new {@link Compressor} for use by this {@link CompressionCodec}.
*
* @return a new compressor for use by this codec
*/
@Override
public Compressor createCompressor() {
int bufferSize = conf.getInt(CommonConfigurationKeys.IO_COMPRESSION_CODEC_SNAPPY_BUFFERSIZE_KEY, CommonConfigurationKeys.IO_COMPRESSION_CODEC_SNAPPY_BUFFERSIZE_DEFAULT);
return new SnappyCompressor(bufferSize);
} | 3.26 |
hadoop_SnappyCodec_createDecompressor_rdh | /**
* Create a new {@link Decompressor} for use by this {@link CompressionCodec}.
*
* @return a new decompressor for use by this codec
*/
@Override
public Decompressor createDecompressor() {
int bufferSize = conf.getInt(CommonConfigurationKeys.IO_COMPRESSION_CODEC_SNAPPY_BUFFERSIZE_KEY, CommonConfigurationKeys.IO_COMPRESSION_CODEC_SNAPPY_BUFFERSIZE_DEFAULT);
return new
SnappyDecompressor(bufferSize);
}
/**
* {@inheritDoc } | 3.26 |
hadoop_SnappyCodec_getConf_rdh | /**
* Return the configuration used by this object.
*
* @return the configuration object used by this objec.
*/
@Override
public Configuration getConf() {
return conf;
} | 3.26 |
hadoop_SnappyCodec_createInputStream_rdh | /**
* Create a {@link CompressionInputStream} that will read from the given
* {@link InputStream} with the given {@link Decompressor}.
*
* @param in
* the stream to read compressed bytes from
* @param decompressor
* decompressor to use
* @return a stream to read uncompressed bytes from
* @throws IOException
* raised on errors performing I/O.
*/
@Override
public CompressionInputStream createInputStream(InputStream in, Decompressor decompressor) throws IOException {
return new BlockDecompressorStream(in, decompressor, conf.getInt(CommonConfigurationKeys.IO_COMPRESSION_CODEC_SNAPPY_BUFFERSIZE_KEY, CommonConfigurationKeys.IO_COMPRESSION_CODEC_SNAPPY_BUFFERSIZE_DEFAULT));
} | 3.26 |
hadoop_SnappyCodec_getDecompressorType_rdh | /**
* Get the type of {@link Decompressor} needed by this {@link CompressionCodec}.
*
* @return the type of decompressor needed by this codec.
*/
@Override
public Class<? extends Decompressor> getDecompressorType() {
return SnappyDecompressor.class;
} | 3.26 |
hadoop_SnappyCodec_m0_rdh | /**
* Get the type of {@link Compressor} needed by this {@link CompressionCodec}.
*
* @return the type of compressor needed by this codec.
*/ @Override
public Class<? extends Compressor> m0() {
return SnappyCompressor.class;
} | 3.26 |
hadoop_SnappyCodec_createOutputStream_rdh | /**
* Create a {@link CompressionOutputStream} that will write to the given
* {@link OutputStream} with the given {@link Compressor}.
*
* @param out
* the location for the final output stream
* @param compressor
* compressor to use
* @return a stream the user can write uncompressed data to have it compressed
* @throws IOException
* raised on errors performing I/O.
*/
@Override
public CompressionOutputStream createOutputStream(OutputStream out, Compressor compressor) throws IOException {
int bufferSize = conf.getInt(CommonConfigurationKeys.IO_COMPRESSION_CODEC_SNAPPY_BUFFERSIZE_KEY, CommonConfigurationKeys.IO_COMPRESSION_CODEC_SNAPPY_BUFFERSIZE_DEFAULT);int compressionOverhead = (bufferSize / 6) + 32;
return new BlockCompressorStream(out, compressor, bufferSize, compressionOverhead);
} | 3.26 |
hadoop_FutureDataInputStreamBuilderImpl_getStatus_rdh | /**
* Get any status set in {@link #withFileStatus(FileStatus)}.
*
* @return a status value or null.
*/
protected FileStatus getStatus() {
return status;
} | 3.26 |
hadoop_FutureDataInputStreamBuilderImpl_builder_rdh | /**
* Get the builder.
* This must be used after the constructor has been invoked to create
* the actual builder: it allows for subclasses to do things after
* construction.
*
* @return FutureDataInputStreamBuilder.
*/
public FutureDataInputStreamBuilder builder() {
return getThisBuilder();
} | 3.26 |
hadoop_FutureDataInputStreamBuilderImpl_initFromFS_rdh | /**
* Initialize from a filesystem.
*/
private void initFromFS() { bufferSize = fileSystem.getConf().getInt(IO_FILE_BUFFER_SIZE_KEY, IO_FILE_BUFFER_SIZE_DEFAULT);
} | 3.26 |
hadoop_FutureDataInputStreamBuilderImpl_bufferSize_rdh | /**
* Set the size of the buffer to be used.
*
* @param bufSize
* buffer size.
* @return FutureDataInputStreamBuilder.
*/
public FutureDataInputStreamBuilder
bufferSize(int bufSize) {
bufferSize = bufSize;
return getThisBuilder();
} | 3.26 |
hadoop_HdfsDataOutputStream_getCurrentBlockReplication_rdh | /**
* Get the actual number of replicas of the current block.
*
* This can be different from the designated replication factor of the file
* because the namenode does not maintain replication for the blocks which are
* currently being written to. Depending on the configuration, the client may
* continue to write to a block even if a few datanodes in the write pipeline
* have failed, or the client may add a new datanodes once a datanode has
* failed.
*
* @return the number of valid replicas of the current block
*/public synchronized int getCurrentBlockReplication() throws IOException {
OutputStream
wrappedStream = getWrappedStream();
if (wrappedStream instanceof CryptoOutputStream) {
wrappedStream = ((CryptoOutputStream) (wrappedStream)).getWrappedStream();
}
return ((DFSOutputStream) (wrappedStream)).getCurrentBlockReplication();
} | 3.26 |
hadoop_HdfsDataOutputStream_hsync_rdh | /**
* Sync buffered data to DataNodes (flush to disk devices).
*
* @param syncFlags
* Indicate the detailed semantic and actions of the hsync.
* @throws IOException
* @see FSDataOutputStream#hsync()
*/
public void hsync(EnumSet<SyncFlag> syncFlags) throws IOException {
OutputStream wrappedStream = getWrappedStream();
if (wrappedStream instanceof CryptoOutputStream) {
wrappedStream.flush();
wrappedStream = ((CryptoOutputStream) (wrappedStream)).getWrappedStream();
}
((DFSOutputStream) (wrappedStream)).hsync(syncFlags);
} | 3.26 |
hadoop_JWTRedirectAuthenticationHandler_validateToken_rdh | /**
* This method provides a single method for validating the JWT for use in
* request processing. It provides for the override of specific aspects of
* this implementation through submethods used within but also allows for the
* override of the entire token validation algorithm.
*
* @param jwtToken
* the token to validate
* @return true if valid
*/
protected boolean validateToken(SignedJWT jwtToken) {
boolean sigValid = validateSignature(jwtToken);
if
(!sigValid) {
LOG.warn("Signature could not be verified");
}
boolean audValid = validateAudiences(jwtToken);
if (!audValid) {
LOG.warn("Audience validation failed.");
}
boolean v21 =
validateExpiration(jwtToken);if (!v21) {
LOG.info("Expiration validation failed.");
}
return (sigValid && audValid) && v21;
} | 3.26 |
hadoop_JWTRedirectAuthenticationHandler_validateSignature_rdh | /**
* Verify the signature of the JWT token in this method. This method depends
* on the public key that was established during init based upon the
* provisioned public key. Override this method in subclasses in order to
* customize the signature verification behavior.
*
* @param jwtToken
* the token that contains the signature to be validated
* @return valid true if signature verifies successfully; false otherwise
*/
protected boolean validateSignature(SignedJWT jwtToken) {
boolean v22 = false;
if (State.SIGNED == jwtToken.getState()) {
LOG.debug("JWT token is in a SIGNED state");
if (jwtToken.getSignature() != null) {
LOG.debug("JWT token signature is not null");
try {
JWSVerifier verifier = new RSASSAVerifier(publicKey);
if (jwtToken.verify(verifier)) {v22 = true;
LOG.debug("JWT token has been successfully verified");
} else {
LOG.warn("JWT signature verification failed.");
}
} catch (JOSEException je) {
LOG.warn("Error while validating signature",
je);
}
}
}
return v22;
} | 3.26 |
hadoop_JWTRedirectAuthenticationHandler_validateAudiences_rdh | /**
* Validate whether any of the accepted audience claims is present in the
* issued token claims list for audience. Override this method in subclasses
* in order to customize the audience validation behavior.
*
* @param jwtToken
* the JWT token where the allowed audiences will be found
* @return true if an expected audience is present, otherwise false
*/
protected boolean validateAudiences(SignedJWT jwtToken)
{
boolean valid = false;
try {
List<String> tokenAudienceList = jwtToken.getJWTClaimsSet().getAudience();
// if there were no expected audiences configured then just
// consider any audience acceptable
if (audiences == null) {valid = true;
} else {
// if any of the configured audiences is found then consider it
// acceptable
boolean v26 = false;
for (String aud : tokenAudienceList) {
if (audiences.contains(aud)) {
LOG.debug("JWT token audience has been successfully validated");
valid = true;
break;
}
}
if (!valid) {
LOG.warn("JWT audience validation failed.");
}
}
} catch (ParseException pe) {
LOG.warn("Unable to parse the JWT token.", pe);
}
return valid;
} | 3.26 |
hadoop_JWTRedirectAuthenticationHandler_constructLoginURL_rdh | /**
* Create the URL to be used for authentication of the user in the absence of
* a JWT token within the incoming request.
*
* @param request
* for getting the original request URL
* @return url to use as login url for redirect
*/
@VisibleForTesting
String constructLoginURL(HttpServletRequest request) {
String delimiter = "?";
if (authenticationProviderUrl.contains("?")) {
delimiter = "&";
}
String loginURL = (((authenticationProviderUrl
+ delimiter) + ORIGINAL_URL_QUERY_PARAM) + request.getRequestURL().toString()) + getOriginalQueryString(request);
return loginURL;
} | 3.26 |
hadoop_JWTRedirectAuthenticationHandler_setPublicKey_rdh | /**
* Primarily for testing, this provides a way to set the publicKey for
* signature verification without needing to get a PEM encoded value.
*
* @param pk
* publicKey for the token signtature verification
*/
public void setPublicKey(RSAPublicKey pk) {
publicKey = pk;
} | 3.26 |
hadoop_JWTRedirectAuthenticationHandler_getJWTFromCookie_rdh | /**
* Encapsulate the acquisition of the JWT token from HTTP cookies within the
* request.
*
* @param req
* servlet request to get the JWT token from
* @return serialized JWT token
*/protected String getJWTFromCookie(HttpServletRequest req) {
String serializedJWT = null;
Cookie[] cookies = req.getCookies();
if (cookies != null) {for (Cookie cookie : cookies) {
if (cookieName.equals(cookie.getName())) {
LOG.info(cookieName
+ " cookie has been found and is being processed");
serializedJWT = cookie.getValue();
break;
}
}
}
return serializedJWT;
} | 3.26 |
hadoop_JWTRedirectAuthenticationHandler_init_rdh | /**
* Initializes the authentication handler instance.
* <p>
* This method is invoked by the {@link AuthenticationFilter#init} method.
* </p>
*
* @param config
* configuration properties to initialize the handler.
* @throws ServletException
* thrown if the handler could not be initialized.
*/
@Override
public void init(Properties config) throws ServletException {
super.init(config);
// setup the URL to redirect to for authentication
authenticationProviderUrl = config.getProperty(AUTHENTICATION_PROVIDER_URL);
if (authenticationProviderUrl == null) {
throw new ServletException("Authentication provider URL must not be null - configure: " + AUTHENTICATION_PROVIDER_URL);
}
// setup the public key of the token issuer for verification
if (publicKey == null) {String pemPublicKey = config.getProperty(PUBLIC_KEY_PEM);
if (pemPublicKey == null) {
throw new ServletException("Public key for signature validation must be provisioned.");
}
publicKey = CertificateUtil.parseRSAPublicKey(pemPublicKey);
}
// setup the list of valid audiences for token validation
String auds = config.getProperty(EXPECTED_JWT_AUDIENCES);
if (auds != null) {
// parse into the list
String[]
audArray = auds.split(",");
audiences = new ArrayList<String>();
for (String a : audArray) {
audiences.add(a);
}
}
// setup custom cookie name if configured
String customCookieName
= config.getProperty(JWT_COOKIE_NAME);
if (customCookieName != null) {
cookieName = customCookieName;
}
} | 3.26 |
hadoop_JWTRedirectAuthenticationHandler_validateExpiration_rdh | /**
* Validate that the expiration time of the JWT token has not been violated.
* If it has then throw an AuthenticationException. Override this method in
* subclasses in order to customize the expiration validation behavior.
*
* @param jwtToken
* the token that contains the expiration date to validate
* @return valid true if the token has not expired; false otherwise
*/
protected boolean validateExpiration(SignedJWT jwtToken) {
boolean valid = false;
try {
Date expires = jwtToken.getJWTClaimsSet().getExpirationTime();
if
((expires == null) || new Date().before(expires)) {
LOG.debug("JWT token expiration date has been " + "successfully validated");
valid = true;
} else {
LOG.warn("JWT expiration date validation failed.");
}
} catch (ParseException pe) {
LOG.warn("JWT expiration date validation failed.", pe);
}
return valid;
} | 3.26 |
hadoop_PathCapabilitiesSupport_validatePathCapabilityArgs_rdh | /**
* Validate the arguments to
* {@link PathCapabilities#hasPathCapability(Path, String)}.
*
* @param path
* path to query the capability of.
* @param capability
* non-null, non-empty string to query the path for support.
* @return the string to use in a switch statement.
* @throws IllegalArgumentException
* if a an argument is invalid.
*/
public static String validatePathCapabilityArgs(final Path path, final String
capability) {
checkArgument(path != null, "null path");
checkArgument(capability != null, "capability parameter is null");
checkArgument(!capability.isEmpty(), "capability parameter is empty string");
return capability.toLowerCase(Locale.ENGLISH);
} | 3.26 |
hadoop_DataJoinReducerBase_joinAndCollect_rdh | /**
* Perform the actual join recursively.
*
* @param tags
* a list of input tags
* @param values
* a list of value lists, each corresponding to one input source
* @param pos
* indicating the next value list to be joined
* @param partialList
* a list of values, each from one value list considered so far.
* @param key
* @param output
* @throws IOException
*/private void joinAndCollect(Object[] tags, ResetableIterator[] values, int pos, Object[] partialList,
Object key, OutputCollector
output, Reporter reporter) throws IOException {
if (values.length == pos) {
// get a value from each source. Combine them
TaggedMapOutput v10 = combine(tags, partialList);
collect(key, v10, output, reporter);
return;
}
ResetableIterator nextValues = values[pos];
nextValues.reset();
while (nextValues.hasNext()) {
Object v = nextValues.next();
partialList[pos] = v;
joinAndCollect(tags, values, pos + 1, partialList, key, output, reporter);
}
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.