name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hadoop_BondedS3AStatisticsContext_newDelegationTokenStatistics_rdh | /**
* Create a delegation token statistics instance.
*
* @return an instance of delegation token statistics
*/
@Override
public DelegationTokenStatistics newDelegationTokenStatistics() {
return getInstrumentation().newDelegationTokenStatistics();
} | 3.26 |
hadoop_BondedS3AStatisticsContext_newOutputStreamStatistics_rdh | /**
* Create a stream output statistics instance.
*
* @return the new instance
*/
@Override
public BlockOutputStreamStatistics newOutputStreamStatistics() {
return getInstrumentation().newOutputStreamStatistics(getInstanceStatistics());
} | 3.26 |
hadoop_BondedS3AStatisticsContext_addValueToQuantiles_rdh | /**
* Add a value to a quantiles statistic. No-op if the quantile
* isn't found.
*
* @param op
* operation to look up.
* @param value
* value to add.
* @throws ClassCastException
* if the metric is not a Quantiles.
*/
@Override
public void addValueToQuantiles(Statistic op, long value) {
getInstrumentation().addValueToQuantiles(op, value);
} | 3.26 |
hadoop_BondedS3AStatisticsContext_incrementGauge_rdh | /**
* Increment a specific gauge.
* <p>
* No-op if not defined.
*
* @param op
* operation
* @param count
* increment value
* @throws ClassCastException
* if the metric is of the wrong type
*/
@Override
public void incrementGauge(Statistic op, long count) {
getInstrumentation().incrementGauge(op, count);
} | 3.26 |
hadoop_BondedS3AStatisticsContext_incrementCounter_rdh | /**
* Increment a specific counter.
* <p>
* No-op if not defined.
*
* @param op
* operation
* @param count
* increment value
*/
@Override
public void incrementCounter(Statistic op, long count) {
getInstrumentation().incrementCounter(op, count);
} | 3.26 |
hadoop_BondedS3AStatisticsContext_getInstanceStatistics_rdh | /**
* The filesystem statistics: know this is thread-local.
*
* @return FS statistics.
*/
private Statistics getInstanceStatistics() {
return statisticsSource.getInstanceStatistics();} | 3.26 |
hadoop_BondedS3AStatisticsContext_getInstrumentation_rdh | /**
* Get the instrumentation from the FS integration.
*
* @return instrumentation instance.
*/
private S3AInstrumentation getInstrumentation() {
return statisticsSource.getInstrumentation();
} | 3.26 |
hadoop_BondedS3AStatisticsContext_newInputStreamStatistics_rdh | /**
* Create a stream input statistics instance.
* The FileSystem.Statistics instance of the {@link #statisticsSource}
* is used as the reference to FileSystem statistics to update
*
* @return the new instance
*/
@Override
public S3AInputStreamStatistics newInputStreamStatistics() {
return getInstrumentation().newInputStreamStatistics(statisticsSource.getInstanceStatistics());
} | 3.26 |
hadoop_OperationAuditorOptions_withConfiguration_rdh | /**
* Set builder value.
*
* @param value
* new value
* @return the builder
*/
public OperationAuditorOptions withConfiguration(final Configuration value) {
configuration = value;
return this;
} | 3.26 |
hadoop_OperationAuditorOptions_m0_rdh | /**
* Create one.
*
* @return a new option instance
*/
public static OperationAuditorOptions m0() {
return new OperationAuditorOptions();
} | 3.26 |
hadoop_OperationAuditorOptions_withIoStatisticsStore_rdh | /**
* Set builder value.
*
* @param value
* new value
* @return the builder
*/
public OperationAuditorOptions withIoStatisticsStore(final IOStatisticsStore value) {
ioStatisticsStore = value;
return this;
} | 3.26 |
hadoop_AuditContextUpdater_resetCurrentAuditContext_rdh | /**
* Remove job/task info from the current audit context.
*/
public void resetCurrentAuditContext() {
currentAuditContext().remove(AuditConstants.PARAM_JOB_ID);currentAuditContext().remove(CommitConstants.PARAM_TASK_ATTEMPT_ID);
} | 3.26 |
hadoop_AuditContextUpdater_updateCurrentAuditContext_rdh | /**
* Add job/task info to current audit context.
*/
public void updateCurrentAuditContext() {
final CommonAuditContext auditCtx = currentAuditContext();
if (jobId != null) {
auditCtx.put(AuditConstants.PARAM_JOB_ID, jobId);
} else {
currentAuditContext().remove(AuditConstants.PARAM_JOB_ID);
}
if (taskAttemptId != null) {
auditCtx.put(AuditConstants.PARAM_TASK_ATTEMPT_ID, taskAttemptId);
} else {
currentAuditContext().remove(CommitConstants.PARAM_TASK_ATTEMPT_ID);
}
} | 3.26 |
hadoop_DockerClientConfigHandler_getCredentialsFromTokensByteBuffer_rdh | /**
* Convert the Token ByteBuffer to the appropriate Credentials object.
*
* @param tokens
* the Tokens from the ContainerLaunchContext.
* @return the Credentials object populated from the Tokens.
* @throws IOException
* io error occur.
*/
public static Credentials getCredentialsFromTokensByteBuffer(ByteBuffer tokens) throws IOException {
Credentials credentials = new Credentials();
DataInputByteBuffer dibb = new DataInputByteBuffer();
tokens.rewind();dibb.reset(tokens);
credentials.readTokenStorageStream(dibb);
tokens.rewind();
if
(LOG.isDebugEnabled()) {
for (Token token : credentials.getAllTokens()) {
LOG.debug("Token read from token storage: {}", token);
}
}
return credentials;
} | 3.26 |
hadoop_DockerClientConfigHandler_readCredentialsFromConfigFile_rdh | /**
* Read the Docker client configuration and extract the auth tokens into
* Credentials.
*
* @param configFile
* the Path to the Docker client configuration.
* @param conf
* the Configuration object, needed by the FileSystem.
* @param applicationId
* the application ID to associate the Credentials with.
* @return the populated Credential object with the Docker Tokens.
* @throws IOException
* if the file can not be read.
*/
public static Credentials readCredentialsFromConfigFile(Path configFile, Configuration conf, String applicationId) throws IOException {
// Read the config file
String
contents = null;
configFile = new Path(configFile.toUri());
FileSystem fs =
configFile.getFileSystem(conf);
if (fs != null) {
FSDataInputStream fileHandle = fs.open(configFile);
if (fileHandle != null) {
contents = IOUtils.toString(fileHandle, StandardCharsets.UTF_8);
}
}
if (contents ==
null) {
throw new IOException("Failed to read Docker client configuration: " + configFile);
}
// Parse the JSON and create the Tokens/Credentials.
ObjectMapper mapper = new ObjectMapper();
JsonFactory factory = mapper.getFactory();
JsonParser parser = factory.createParser(contents);
JsonNode rootNode = mapper.readTree(parser);
Credentials credentials = new Credentials();
if (rootNode.has(CONFIG_AUTHS_KEY)) {
Iterator<String> iter = rootNode.get(CONFIG_AUTHS_KEY).fieldNames(); for (; iter.hasNext();) {
String registryUrl = iter.next();
String registryCred = rootNode.get(CONFIG_AUTHS_KEY).get(registryUrl).get(CONFIG_AUTH_KEY).asText();TokenIdentifier tokenId = new DockerCredentialTokenIdentifier(registryUrl, applicationId);
Token<DockerCredentialTokenIdentifier> token = new Token<>(tokenId.getBytes(), registryCred.getBytes(StandardCharsets.UTF_8), tokenId.getKind(), new Text(registryUrl));
credentials.addToken(new Text((registryUrl + "-") + applicationId), token);
LOG.info("Token read from Docker client configuration file: " + token.toString());
}
}
return credentials;
} | 3.26 |
hadoop_DockerClientConfigHandler_writeDockerCredentialsToPath_rdh | /**
* Extract the Docker related tokens from the Credentials and write the Docker
* client configuration to the supplied File.
*
* @param outConfigFile
* the File to write the Docker client configuration to.
* @param credentials
* the populated Credentials object.
* @throws IOException
* if the write fails.
* @return true if a Docker credential is found in the supplied credentials.
*/
public static boolean writeDockerCredentialsToPath(File outConfigFile, Credentials credentials) throws IOException {
boolean foundDockerCred = false;
if (credentials.numberOfTokens() > 0) {
ObjectMapper mapper = new ObjectMapper();
ObjectNode rootNode = mapper.createObjectNode();
ObjectNode registryUrlNode = mapper.createObjectNode();for (Token<? extends TokenIdentifier> tk : credentials.getAllTokens()) {
if (tk.getKind().equals(DockerCredentialTokenIdentifier.KIND)) {
foundDockerCred = true;
DockerCredentialTokenIdentifier ti
= ((DockerCredentialTokenIdentifier) (tk.decodeIdentifier()));
ObjectNode registryCredNode = mapper.createObjectNode();
registryUrlNode.set(ti.getRegistryUrl(), registryCredNode);
registryCredNode.put(CONFIG_AUTH_KEY, new
String(tk.getPassword(), StandardCharsets.UTF_8));
LOG.debug("Prepared token for write: {}", tk);
}
}
if (foundDockerCred)
{
rootNode.set(CONFIG_AUTHS_KEY,
registryUrlNode);
String json
= mapper.writerWithDefaultPrettyPrinter().writeValueAsString(rootNode);
FileUtils.writeStringToFile(outConfigFile, json, StandardCharsets.UTF_8);
}
}
return foundDockerCred;
} | 3.26 |
hadoop_AllocationTags_getNamespace_rdh | /**
*
* @return the namespace of these tags.
*/
public TargetApplicationsNamespace getNamespace() {
return this.ns;
} | 3.26 |
hadoop_AllocationTags_getTags_rdh | /**
*
* @return the allocation tags.
*/
public Set<String> getTags() {
return
this.tags;
} | 3.26 |
hadoop_AdlFsInputStream_available_rdh | /**
* This method returns the remaining bytes in the stream, rather than the
* expected Java
* interpretation of {@link java.io.InputStream#available()}, which expects
* the
* number of remaining
* bytes in the local buffer. Moreover, it caps the value returned to a
* maximum of Integer.MAX_VALUE.
* These changed behaviors are to ensure compatibility with the
* expectations of HBase WAL reader,
* which depends on available() returning the number of bytes in stream.
*
* Given all other FileSystems in the hadoop ecosystem (especially HDFS) do
* this, it is possible other
* apps other than HBase would also pick up expectation of this behavior
* based on HDFS implementation.
* Therefore keeping this quirky behavior here, to ensure compatibility.
*
* @return remaining bytes in the stream, with maximum of Integer.MAX_VALUE.
* @throws IOException
* If fails to get the position or file length from SDK.
*/
@Override
public synchronized int available() throws IOException {
return ((int) (Math.min(f0.length() - f0.getPos(), Integer.MAX_VALUE)));
} | 3.26 |
hadoop_AdlFsInputStream_getPos_rdh | /**
* Return the current offset from the start of the file.
*/
@Override
public synchronized long getPos() throws IOException {
return f0.getPos();
} | 3.26 |
hadoop_RollingFileSystemSink_rollLogDirIfNeeded_rdh | /**
* Check the current directory against the time stamp. If they're not
* the same, create a new directory and a new log file in that directory.
*
* @throws MetricsException
* thrown if an error occurs while creating the
* new directory or new log file
*/
private void rollLogDirIfNeeded() throws MetricsException {
// Because we're working relative to the clock, we use a Date instead
// of Time.monotonicNow().
Date now = new Date();
// We check whether currentOutStream is null instead of currentDirPath,
// because if currentDirPath is null, then currentOutStream is null, but
// currentOutStream can be null for other reasons. Same for nextFlush.
if ((currentOutStream == null) || now.after(f1.getTime())) {
// If we're not yet connected to HDFS, create the connection
if (!initialized) {
initialized = initFs();
}
if (initialized) {
// Close the stream. This step could have been handled already by the
// flusher thread, but if it has, the PrintStream will just swallow the
// exception, which is fine.
if (currentOutStream != null) {
currentOutStream.close();
}
currentDirPath = findCurrentDirectory(now);
try {
rollLogDir();
} catch (IOException ex) {throwMetricsException("Failed to create new log file", ex);
}// Update the time of the next flush
updateFlushTime(now);
// Schedule the next flush at that time
scheduleFlush(f1.getTime());
}
} else if (forceFlush) {
scheduleFlush(new Date());
}
} | 3.26 |
hadoop_RollingFileSystemSink_m0_rdh | /**
* Set the {@link #nextFlush} variable to the initial flush time. The initial
* flush will be an integer number of flush intervals past the beginning of
* the current hour and will have a random offset added, up to
* {@link #rollOffsetIntervalMillis}. The initial flush will be a time in
* past that can be used from which to calculate future flush times.
*
* @param now
* the current time
*/
@VisibleForTesting
protected void m0(Date now) {
// Start with the beginning of the current hour
f1
= Calendar.getInstance();
f1.setTime(now);
f1.set(Calendar.MILLISECOND, 0);
f1.set(Calendar.SECOND, 0);
f1.set(Calendar.MINUTE, 0);
// In the first round, calculate the first flush as the largest number of
// intervals from the beginning of the current hour that's not in the
// future by:
// 1. Subtract the beginning of the hour from the current time
// 2. Divide by the roll interval and round down to get the number of whole
// intervals that have passed since the beginning of the hour
// 3. Multiply by the roll interval to get the number of millis between
// the beginning of the current hour and the beginning of the current
// interval.
int millis = ((int) (((now.getTime() - f1.getTimeInMillis()) / rollIntervalMillis) * rollIntervalMillis));
// Then add some noise to help prevent all the nodes from
// closing their files at the same time.
if (rollOffsetIntervalMillis > 0) {
millis += ThreadLocalRandom.current().nextLong(rollOffsetIntervalMillis);
// If the added time puts us into the future, step back one roll interval
// because the code to increment nextFlush to the next flush expects that
// nextFlush is the next flush from the previous interval. There wasn't
// a previous interval, so we just fake it with the time in the past that
// would have been the previous interval if there had been one.
//
// It's OK if millis comes out negative.
while ((f1.getTimeInMillis() + millis) > now.getTime()) {
millis -= rollIntervalMillis;
}
}
// Adjust the next flush time by millis to get the time of our ficticious
// previous next flush
f1.add(Calendar.MILLISECOND, millis);
} | 3.26 |
hadoop_RollingFileSystemSink_createOrAppendLogFile_rdh | /**
* Create a new log file and return the {@link FSDataOutputStream}. If a
* file with the specified path already exists, open the file for append
* instead.
*
* Once the file is open, update {@link #currentFSOutStream},
* {@link #currentOutStream}, and {@#link #currentFilePath}.
*
* @param initial
* the target path
* @throws IOException
* thrown if the call to see the append operation fails.
*/
private void createOrAppendLogFile(Path targetFile)
throws
IOException {
// First try blindly creating the file. If we fail, it either means
// the file exists, or the operation actually failed. We do it this way
// because if we check whether the file exists, it might still be created
// by the time we try to create it. Creating first works like a
// test-and-set.
try {
currentFSOutStream = fileSystem.create(targetFile, false);
currentOutStream = new PrintStream(currentFSOutStream, true, StandardCharsets.UTF_8.name());
} catch (IOException ex) {
// Try appending instead. If we fail, if means the file doesn't
// actually exist yet or the operation actually failed.
try {
currentFSOutStream = fileSystem.append(targetFile);
currentOutStream = new PrintStream(currentFSOutStream, true, StandardCharsets.UTF_8.name());
} catch (IOException ex2) {
// If the original create failed for a legit but transitory
// reason, the append will fail because the file now doesn't exist,
// resulting in a confusing stack trace. To avoid that, we set
// the cause of the second exception to be the first exception.
// It's still a tiny bit confusing, but it's enough
// information that someone should be able to figure it out.
ex2.initCause(ex);
throw ex2;
}
}
currentFilePath = targetFile;
} | 3.26 |
hadoop_RollingFileSystemSink_getNonNegative_rdh | /**
* Return the property value if it's non-negative and throw an exception if
* it's not.
*
* @param key
* the property key
* @param defaultValue
* the default value
*/ private long getNonNegative(String key, int defaultValue) {
int flushOffsetIntervalMillis = properties.getInt(key, defaultValue);
if (flushOffsetIntervalMillis < 0) {
throw new MetricsException(((("The " + key) + " property must be ") + "non-negative. Value was ") + flushOffsetIntervalMillis);
}
return flushOffsetIntervalMillis;
} | 3.26 |
hadoop_RollingFileSystemSink_rollLogDir_rdh | /**
* Create a new directory based on the current interval and a new log file in
* that directory.
*
* @throws IOException
* thrown if an error occurs while creating the
* new directory or new log file
*/private void rollLogDir() throws IOException {
String fileName = ((source + "-") + InetAddress.getLocalHost().getHostName()) + ".log";
Path targetFile = new Path(currentDirPath, fileName);
fileSystem.mkdirs(currentDirPath);
if (allowAppend) {
createOrAppendLogFile(targetFile);
} else {
createLogFile(targetFile);
}
} | 3.26 |
hadoop_RollingFileSystemSink_stringifySecurityProperty_rdh | /**
* Turn a security property into a nicely formatted set of <i>name=value</i>
* strings, allowing for either the property or the configuration not to be
* set.
*
* @param property
* the property to stringify
* @return the stringified property
*/
private String stringifySecurityProperty(String property) {String securityProperty;
if (properties.containsKey(property)) {
String propertyValue = properties.getString(property);
String confValue = conf.get(properties.getString(property));
if (confValue != null) {
securityProperty = (((((property + "=") + propertyValue) + ", ") + properties.getString(property)) + "=") + confValue;
} else {
securityProperty = ((((property + "=") + propertyValue) + ", ") + properties.getString(property)) + "=<NOT SET>";
}
} else {
securityProperty
= property + "=<NOT SET>";
}
return securityProperty;
} | 3.26 |
hadoop_RollingFileSystemSink_m1_rdh | /**
* If the sink isn't set to ignore errors, throw a {@link MetricsException}
* if the stream encountered an exception. The message parameter will be used
* as the new exception's message with the current file name
* ({@link #currentFilePath}) appended to it.
*
* @param message
* the exception message. The message will have a colon and
* the current file name ({@link #currentFilePath}) appended to it.
* @throws MetricsException
* thrown if there was an error and the sink isn't
* ignoring errors
*/
private void m1(String message) throws MetricsException {
if ((!ignoreError) && currentOutStream.checkError()) {
throw new MetricsException((message +
": ") +
currentFilePath);
}
} | 3.26 |
hadoop_RollingFileSystemSink_getRollInterval_rdh | /**
* Extract the roll interval from the configuration and return it in
* milliseconds.
*
* @return the roll interval in millis
*/
@VisibleForTesting
protected long getRollInterval() {
String rollInterval = properties.getString(ROLL_INTERVAL_KEY, DEFAULT_ROLL_INTERVAL);
Pattern pattern = Pattern.compile("^\\s*(\\d+)\\s*([A-Za-z]*)\\s*$");
Matcher match = pattern.matcher(rollInterval);
long millis;
if (match.matches()) {
String flushUnit = match.group(2);
int rollIntervalInt;
try {
rollIntervalInt = Integer.parseInt(match.group(1));
} catch (NumberFormatException ex) {
throw new MetricsException((("Unrecognized flush interval: " + rollInterval) + ". Must be a number followed by an optional ") + "unit. The unit must be one of: minute, hour, day", ex);
}
if ("".equals(flushUnit)) {
millis = TimeUnit.HOURS.toMillis(rollIntervalInt);
} else {
switch (flushUnit.toLowerCase()) {
case "m" :
case "min" :
case "minute" :
case "minutes" :
millis = TimeUnit.MINUTES.toMillis(rollIntervalInt);
break;
case "h" :
case "hr" :
case "hour" :
case "hours" :millis = TimeUnit.HOURS.toMillis(rollIntervalInt);
break;
case "d" :
case "day" :
case "days" :
millis = TimeUnit.DAYS.toMillis(rollIntervalInt);
break;
default :
throw new MetricsException(("Unrecognized unit for flush interval: " + flushUnit) + ". Must be one of: minute, hour, day");
}
}
} else {
throw new MetricsException((("Unrecognized flush interval: " + rollInterval) + ". Must be a number followed by an optional unit.") + " The unit must be one of: minute, hour, day");
}
if (millis < 60000) {
throw new MetricsException(("The flush interval property must be " + "at least 1 minute. Value was ") + rollInterval);
}
return millis;
} | 3.26 |
hadoop_RollingFileSystemSink_checkAppend_rdh | /**
* Test whether the file system supports append and return the answer.
*
* @param fs
* the target file system
*/
private boolean checkAppend(FileSystem fs) {
boolean canAppend = true;
try {
fs.append(basePath);
} catch (UnsupportedOperationException ex) {
canAppend = false;
} catch (IOException ex) {
// Ignore. The operation is supported.
}
return canAppend;
} | 3.26 |
hadoop_RollingFileSystemSink_scheduleFlush_rdh | /**
* Schedule the current interval's directory to be flushed. If this ends up
* running after the top of the next interval, it will execute immediately.
*
* @param when
* the time the thread should run
*/
private void scheduleFlush(Date when) {
// Store the current currentDirPath to close later
final PrintStream toClose = currentOutStream;
flushTimer.schedule(new TimerTask() {
@Override
public void
run() {
synchronized(lock) {
// This close may have already been done by a putMetrics() call. If it
// has, the PrintStream will swallow the exception, which is fine.
toClose.close();
}
hasFlushed = true;}
}, when);
} | 3.26 |
hadoop_RollingFileSystemSink_createLogFile_rdh | /**
* Create a new log file and return the {@link FSDataOutputStream}. If a
* file with the specified path already exists, add a suffix, starting with 1
* and try again. Keep incrementing the suffix until a nonexistent target
* path is found.
*
* Once the file is open, update {@link #currentFSOutStream},
* {@link #currentOutStream}, and {@#link #currentFilePath} are set
* appropriately.
*
* @param initial
* the target path
* @throws IOException
* thrown if the call to see if the exists fails
*/
private void createLogFile(Path initial) throws IOException {
Path currentAttempt = initial;
// Start at 0 so that if the base filname exists, we start with the suffix
// ".1".
int id = 0;
while (true) {
// First try blindly creating the file. If we fail, it either means
// the file exists, or the operation actually failed. We do it this way
// because if we check whether the file exists, it might still be created
// by the time we try to create it. Creating first works like a
// test-and-set.
try {
currentFSOutStream = fileSystem.create(currentAttempt, false);
currentOutStream = new PrintStream(currentFSOutStream, true, StandardCharsets.UTF_8.name());
currentFilePath = currentAttempt;
break;
} catch (IOException ex) {
// Now we can check to see if the file exists to know why we failed
if (fileSystem.exists(currentAttempt)) {
id = getNextIdToTry(initial, id);
currentAttempt = new Path((initial.toString() + ".") + id);
} else {
throw ex;
}
}
}
} | 3.26 |
hadoop_RollingFileSystemSink_checkIfPropertyExists_rdh | /**
* Throw a {@link MetricsException} if the given property is not set.
*
* @param key
* the key to validate
*/
private void checkIfPropertyExists(String key) {
if (!properties.containsKey(key)) {
throw new MetricsException(("Metrics2 configuration is missing " + key) + " property");
}
} | 3.26 |
hadoop_RollingFileSystemSink_updateFlushTime_rdh | /**
* Update the {@link #nextFlush} variable to the next flush time. Add
* an integer number of flush intervals, preserving the initial random offset.
*
* @param now
* the current time
*/
@VisibleForTesting
protected void updateFlushTime(Date now) {
// In non-initial rounds, add an integer number of intervals to the last
// flush until a time in the future is achieved, thus preserving the
// original random offset.
int millis = ((int) ((((now.getTime() - f1.getTimeInMillis()) / rollIntervalMillis) + 1) * rollIntervalMillis));
f1.add(Calendar.MILLISECOND, millis); } | 3.26 |
hadoop_RollingFileSystemSink_throwMetricsException_rdh | /**
* If the sink isn't set to ignore errors, throw a new
* {@link MetricsException}. The message parameter will be used as the
* new exception's message with the current file name
* ({@link #currentFilePath}) appended to it.
*
* @param message
* the exception message. The message will have a colon and
* the current file name ({@link #currentFilePath}) appended to it.
*/
private void throwMetricsException(String message) {
if
(!ignoreError) {throw new MetricsException((message + ": ") + currentFilePath);
}
} | 3.26 |
hadoop_RollingFileSystemSink_getNextIdToTry_rdh | /**
* Return the next ID suffix to use when creating the log file. This method
* will look at the files in the directory, find the one with the highest
* ID suffix, and 1 to that suffix, and return it. This approach saves a full
* linear probe, which matters in the case where there are a large number of
* log files.
*
* @param initial
* the base file path
* @param lastId
* the last ID value that was used
* @return the next ID to try
* @throws IOException
* thrown if there's an issue querying the files in the
* directory
*/
private int getNextIdToTry(Path
initial, int lastId) throws IOException {
RemoteIterator<LocatedFileStatus> files = fileSystem.listFiles(currentDirPath, true);
String base
= initial.toString();
int id = lastId;
while (files.hasNext()) {
String
file = files.next().getPath().getName();
if (file.startsWith(base)) {
int fileId = extractId(file);
if (fileId > id) {
id = fileId;
}
}
}
// Return either 1 more than the highest we found or 1 more than the last
// ID used (if no ID was found).
return id + 1;
} | 3.26 |
hadoop_RollingFileSystemSink_findCurrentDirectory_rdh | /**
* Use the given time to determine the current directory. The current
* directory will be based on the {@link #rollIntervalMinutes}.
*
* @param now
* the current time
* @return the current directory
*/
private Path findCurrentDirectory(Date now) {
long offset = ((now.getTime() - f1.getTimeInMillis()) / rollIntervalMillis) * rollIntervalMillis;
String currentDir = DATE_FORMAT.format(new Date(f1.getTimeInMillis() + offset));
return new Path(basePath, currentDir);
} | 3.26 |
hadoop_RollingFileSystemSink_getFileSystem_rdh | /**
* Return the supplied file system for testing or otherwise get a new file
* system.
*
* @return the file system to use
* @throws MetricsException
* thrown if the file system could not be retrieved
*/
private FileSystem getFileSystem() throws MetricsException {
FileSystem fs = null;
if (suppliedFilesystem != null) {
fs = suppliedFilesystem;
} else {
try {
fs = FileSystem.get(new URI(basePath.toString()), conf);
} catch (URISyntaxException
ex) {
throw new MetricsException(("The supplied filesystem base path URI" + " is not a valid URI: ") + basePath.toString(), ex);
} catch (IOException ex) {
throw new MetricsException(((("Error connecting to file system: " + basePath) + " [") + ex.toString()) + "]", ex);
}
}
return fs;
} | 3.26 |
hadoop_RollingFileSystemSink_extractId_rdh | /**
* Extract the ID from the suffix of the given file name.
*
* @param file
* the file name
* @return the ID or -1 if no ID could be extracted
*/
private int extractId(String file) {
int index = file.lastIndexOf(".");
int id = -1;
// A hostname has to have at least 1 character
if (index > 0) {
try {
id = Integer.parseInt(file.substring(index + 1));
} catch (NumberFormatException ex) {
// This can happen if there's no suffix, but there is a dot in the
// hostname. Just ignore it.
}
}
return id;
} | 3.26 |
hadoop_RollingFileSystemSink_initFs_rdh | /**
* Initialize the connection to HDFS and create the base directory. Also
* launch the flush thread.
*/
private boolean initFs() {
boolean success = false;
fileSystem = getFileSystem();
// This step isn't strictly necessary, but it makes debugging issues much
// easier. We try to create the base directory eagerly and fail with
// copious debug info if it fails.
try {
fileSystem.mkdirs(basePath);
success = true;
} catch (Exception ex) {
if (!ignoreError) {
throw new MetricsException((((((((((((((("Failed to create " + basePath) + "[") + SOURCE_KEY) +
"=") + source) + ", ") + ALLOW_APPEND_KEY) + "=")
+ allowAppend) + ", ") + stringifySecurityProperty(KEYTAB_PROPERTY_KEY)) + ", ") + stringifySecurityProperty(USERNAME_PROPERTY_KEY))
+ "] -- ") + ex.toString(), ex);
}
}
if (success) {
// If we're permitted to append, check if we actually can
if (allowAppend) {
allowAppend = checkAppend(fileSystem);
}
flushTimer = new Timer("RollingFileSystemSink Flusher", true);
m0(new Date());
}
return success;
} | 3.26 |
hadoop_RollingFileSystemSink_loadConf_rdh | /**
* Return the supplied configuration for testing or otherwise load a new
* configuration.
*
* @return the configuration to use
*/
private Configuration loadConf() { Configuration c;
if (suppliedConf != null) {
c = suppliedConf;
} else {
// The config we're handed in init() isn't the one we want here, so we
// create a new one to pick up the full settings.
c = new Configuration();
}
return c;
} | 3.26 |
hadoop_ContainerLogContext_getExitCode_rdh | /**
* Get the exit code of the container.
*
* @return the exit code
*/
public int getExitCode() {
return exitCode;
} | 3.26 |
hadoop_ContainerLogContext_getContainerType_rdh | /**
* Get {@link ContainerType} the type of the container.
*
* @return the type of the container
*/
public ContainerType getContainerType() {
return f0;
} | 3.26 |
hadoop_ContainerLogContext_m0_rdh | /**
* Get {@link ContainerId} of the container.
*
* @return the container ID
*/
public ContainerId m0() {
return containerId;} | 3.26 |
hadoop_NamenodeHeartbeatResponse_newInstance_rdh | /**
* API response for registering a namenode with the state store.
*/public abstract class NamenodeHeartbeatResponse {
public static NamenodeHeartbeatResponse newInstance() throws IOException {
return StateStoreSerializer.newRecord(NamenodeHeartbeatResponse.class);
} | 3.26 |
hadoop_CallableSupplier_submit_rdh | /**
* Submit a callable into a completable future.
* RTEs are rethrown.
* Non RTEs are caught and wrapped; IOExceptions to
* {@code RuntimeIOException} instances.
*
* @param executor
* executor.
* @param auditSpan
* audit span (or null)
* @param call
* call to invoke
* @param <T>
* type
* @return the future to wait for
*/
@SuppressWarnings("unchecked")
public static <T> CompletableFuture<T> submit(final Executor
executor, final AuditSpan auditSpan, final Callable<T> call) {
return CompletableFuture.supplyAsync(new CallableSupplier<T>(auditSpan, call), executor);
} | 3.26 |
hadoop_CallableSupplier_waitForCompletionIgnoringExceptions_rdh | /**
* Wait for a single of future to complete, ignoring exceptions raised.
*
* @param future
* future to wait for.
* @param <T>
* type
* @return the outcome if successfully retrieved.
*/
public static <T> Optional<T> waitForCompletionIgnoringExceptions(@Nullable
final CompletableFuture<T> future) {
try {
return maybeAwaitCompletion(future);
} catch (Exception e) {
LOG.debug("Ignoring exception raised in task completion: ", e);return Optional.empty();
}
} | 3.26 |
hadoop_CallableSupplier_get_rdh | /**
* Active any span and then call the supplied callable.
*
* @return the result.
*/
@Override
public T get() {
try {
if (auditSpan != null) {
auditSpan.activate();
}
return call.call();
} catch (RuntimeException e) {
throw e;
} catch (IOException e) {
throw new UncheckedIOException(e);
} catch (Exception e) {
throw new UncheckedIOException(new IOException(e));
}
} | 3.26 |
hadoop_CallableSupplier_waitForCompletion_rdh | /**
* Wait for a single of future to complete, extracting IOEs afterwards.
*
* @param future
* future to wait for.
* @param <T>
* type
* @return the result
* @throws IOException
* if one of the called futures raised an IOE.
* @throws RuntimeException
* if one of the futures raised one.
*/
public static <T>
T waitForCompletion(final CompletableFuture<T> future) throws IOException {try (DurationInfo v0 = new DurationInfo(LOG, false, "Waiting for task completion")) {
return future.join();
} catch (CancellationException e) { throw
new IOException(e);
} catch (CompletionException
e)
{
raiseInnerCause(e);
return null;
}
} | 3.26 |
hadoop_XAttrStorage_readINodeXAttrs_rdh | /**
* Reads the existing extended attributes of an inode.
* <p>
* Must be called while holding the FSDirectory read lock.
*
* @param inodeAttr
* INodeAttributes to read.
* @return {@code XAttr} list.
*/
public static List<XAttr> readINodeXAttrs(INodeAttributes inodeAttr) {
XAttrFeature
f = inodeAttr.getXAttrFeature();
return f == null ? new ArrayList<XAttr>(0) : f.getXAttrs();
} | 3.26 |
hadoop_XAttrStorage_updateINodeXAttrs_rdh | /**
* Update xattrs of inode.
* <p>
* Must be called while holding the FSDirectory write lock.
*
* @param inode
* INode to update
* @param xAttrs
* to update xAttrs.
* @param snapshotId
* id of the latest snapshot of the inode
*/
public static void updateINodeXAttrs(INode inode, List<XAttr> xAttrs, int snapshotId) throws QuotaExceededException {
if (inode.getXAttrFeature() != null) {
inode.removeXAttrFeature(snapshotId);
}
if ((xAttrs == null) || xAttrs.isEmpty()) {
return;
}
inode.addXAttrFeature(new XAttrFeature(xAttrs), snapshotId);
} | 3.26 |
hadoop_XAttrStorage_readINodeXAttrByPrefixedName_rdh | /**
* Reads the extended attribute of an inode by name with prefix.
* <p>
*
* @param inode
* INode to read
* @param snapshotId
* the snapshotId of the requested path
* @param prefixedName
* xAttr name with prefix
* @return the xAttr
*/public static XAttr readINodeXAttrByPrefixedName(INode inode, int snapshotId, String prefixedName) {
XAttrFeature f = inode.getXAttrFeature(snapshotId);
return f == null ? null : f.getXAttr(prefixedName);
} | 3.26 |
hadoop_S3ARemoteObject_getPath_rdh | /**
* Gets the path corresponding to the given s3Attributes.
*
* @param s3Attributes
* attributes of an S3 object.
* @return the path corresponding to the given s3Attributes.
*/
public static String getPath(S3ObjectAttributes s3Attributes) {
return String.format("s3a://%s/%s", s3Attributes.getBucket(), s3Attributes.getKey());
} | 3.26 |
hadoop_S3ARemoteObject_getStatistics_rdh | /**
* Gets an instance of {@code S3AInputStreamStatistics} used for reporting access metrics.
*
* @return an instance of {@code S3AInputStreamStatistics} used for reporting access metrics.
*/
public S3AInputStreamStatistics getStatistics() {
return streamStatistics;
} | 3.26 |
hadoop_S3ARemoteObject_openForRead_rdh | /**
* Opens a section of the file for reading.
*
* @param offset
* Start offset (0 based) of the section to read.
* @param size
* Size of the section to read.
* @return an {@code InputStream} corresponding to the given section of this file.
* @throws IOException
* if there is an error opening this file section for reading.
* @throws IllegalArgumentException
* if offset is negative.
* @throws IllegalArgumentException
* if offset is greater than or equal to file size.
* @throws IllegalArgumentException
* if size is greater than the remaining bytes.
*/
public ResponseInputStream<GetObjectResponse> openForRead(long offset, int size) throws IOException {
Validate.checkNotNegative(offset, "offset");
Validate.checkLessOrEqual(offset, "offset", size(), "size()");
Validate.checkLessOrEqual(size, "size", size() - offset, "size() - offset");
streamStatistics.streamOpened();
final GetObjectRequest request = client.newGetRequestBuilder(s3Attributes.getKey()).range(S3AUtils.formatRange(offset, (offset + size) - 1)).applyMutation(changeTracker::maybeApplyConstraint).build();
String operation = String.format("%s %s at %d", S3AInputStream.OPERATION_OPEN, uri, offset);
DurationTracker tracker = streamStatistics.initiateGetRequest();
ResponseInputStream<GetObjectResponse> object = null;
try {
object = Invoker.once(operation, uri, () -> client.getObject(request));
} catch (IOException e) {
tracker.failed();
throw e;
} finally
{
tracker.close();
}
changeTracker.processResponse(object.response(), operation, offset);
return object;
} | 3.26 |
hadoop_S3ARemoteObject_getReadInvoker_rdh | /**
* Gets an instance of {@code Invoker} for interacting with S3 API.
*
* @return an instance of {@code Invoker} for interacting with S3 API.
*/
public Invoker getReadInvoker() {
return context.getReadInvoker();
} | 3.26 |
hadoop_S3ARemoteObject_size_rdh | /**
* Gets the size of this file.
* Its value is cached once obtained from AWS.
*
* @return the size of this file.
*/
public long size() {
return s3Attributes.getLen();
} | 3.26 |
hadoop_HdfsDataInputStream_getCurrentDatanode_rdh | /**
* Get the datanode from which the stream is currently reading.
*/
public DatanodeInfo getCurrentDatanode() {
return m0().getCurrentDatanode();
} | 3.26 |
hadoop_HdfsDataInputStream_m1_rdh | /**
* Get the collection of blocks that has already been located.
*/
public List<LocatedBlock> m1() throws IOException {
return m0().getAllBlocks();
} | 3.26 |
hadoop_HdfsDataInputStream_getWrappedStream_rdh | /**
* Get a reference to the wrapped output stream. We always want to return the
* actual underlying InputStream, even when we're using a CryptoStream. e.g.
* in the delegated methods below.
*
* @return the underlying output stream
*/
public InputStream getWrappedStream() {
return in;
} | 3.26 |
hadoop_HdfsDataInputStream_getCurrentBlock_rdh | /**
* Get the block containing the target position.
*/
public ExtendedBlock getCurrentBlock() {
return m0().getCurrentBlock();
} | 3.26 |
hadoop_HdfsDataInputStream_getReadStatistics_rdh | /**
* Get statistics about the reads which this DFSInputStream has done.
* Note that because HdfsDataInputStream is buffered, these stats may
* be higher than you would expect just by adding up the number of
* bytes read through HdfsDataInputStream.
*/
public ReadStatistics getReadStatistics() {
return m0().getReadStatistics();
} | 3.26 |
hadoop_HdfsDataInputStream_getVisibleLength_rdh | /**
* Get the visible length of the file. It will include the length of the last
* block even if that is in UnderConstruction state.
*
* @return The visible length of the file.
*/ public long
getVisibleLength() {
return m0().getFileLength();
} | 3.26 |
hadoop_RawErasureEncoder_encode_rdh | /**
* Encode with inputs and generates outputs. More see above.
*
* @param inputs
* input buffers to read data from
* @param outputs
* output buffers to put the encoded data into, read to read
* after the call
* @throws IOException
* if the encoder is closed.
*/
public void encode(ECChunk[] inputs, ECChunk[] outputs) throws IOException {
ByteBuffer[] newInputs = ECChunk.toBuffers(inputs);
ByteBuffer[] newOutputs = ECChunk.toBuffers(outputs);
encode(newInputs, newOutputs);
} | 3.26 |
hadoop_RawErasureEncoder_release_rdh | /**
* Should be called when release this coder. Good chance to release encoding
* or decoding buffers
*/
public void release() {
// Nothing to do here.
} | 3.26 |
hadoop_RawErasureEncoder_allowVerboseDump_rdh | /**
* Allow to dump verbose info during encoding/decoding.
*
* @return true if it's allowed to do verbose dump, false otherwise.
*/
public boolean
allowVerboseDump() {
return coderOptions.allowVerboseDump();
} | 3.26 |
hadoop_RawErasureEncoder_allowChangeInputs_rdh | /**
* Allow change into input buffers or not while perform encoding/decoding.
*
* @return true if it's allowed to change inputs, false otherwise
*/public boolean allowChangeInputs() {
return coderOptions.allowChangeInputs();
} | 3.26 |
hadoop_HadoopExecutors_shutdown_rdh | /**
* Helper routine to shutdown a {@link ExecutorService}. Will wait up to a
* certain timeout for the ExecutorService to gracefully shutdown. If the
* ExecutorService did not shutdown and there are still tasks unfinished after
* the timeout period, the ExecutorService will be notified to forcibly shut
* down. Another timeout period will be waited before giving up. So, at most,
* a shutdown will be allowed to wait up to twice the timeout value before
* giving up.
*
* @param executorService
* ExecutorService to shutdown
* @param logger
* Logger
* @param timeout
* the maximum time to wait
* @param unit
* the time unit of the timeout argument
*/
public static void shutdown(ExecutorService
executorService, Logger logger, long timeout, TimeUnit unit) {
if (executorService == null) {
return;
}
try {
executorService.shutdown();
logger.debug("Gracefully shutting down executor service {}. Waiting max {} {}", executorService, timeout, unit);
if (!executorService.awaitTermination(timeout, unit)) {
logger.debug("Executor service has not shutdown yet. Forcing. " + "Will wait up to an additional {} {} for shutdown", timeout, unit);
executorService.shutdownNow();
}
if (executorService.awaitTermination(timeout, unit)) {
logger.debug("Succesfully shutdown executor service");
} else {
logger.error("Unable to shutdown executor service after timeout {} {}", 2 *
timeout, unit);
}
} catch (InterruptedException e) {
logger.error("Interrupted while attempting to shutdown", e);
executorService.shutdownNow();
} catch (Exception e) {
logger.warn("Exception closing executor service {}", e.getMessage());
logger.debug("Exception closing executor service", e);
throw e;
}
} | 3.26 |
hadoop_HadoopExecutors_newSingleThreadExecutor_rdh | // Executors.newSingleThreadExecutor has special semantics - for the
// moment we'll delegate to it rather than implement the semantics here.
public static ExecutorService newSingleThreadExecutor(ThreadFactory threadFactory) {
return Executors.newSingleThreadExecutor(threadFactory);
} | 3.26 |
hadoop_HadoopExecutors_m0_rdh | // Executors.newSingleThreadScheduledExecutor has special semantics - for the
// moment we'll delegate to it rather than implement the semantics here
public static ScheduledExecutorService m0() {
return Executors.newSingleThreadScheduledExecutor();
} | 3.26 |
hadoop_InterruptEscalator_register_rdh | /**
* Register an interrupt handler.
*
* @param signalName
* signal name
* @throws IllegalArgumentException
* if the registration failed
*/
public synchronized void register(String signalName) {
IrqHandler handler = new IrqHandler(signalName, this);
handler.bind();
interruptHandlers.add(handler);
} | 3.26 |
hadoop_InterruptEscalator_lookup_rdh | /**
* Look up the handler for a signal.
*
* @param signalName
* signal name
* @return a handler if found
*/
public synchronized IrqHandler lookup(String signalName) {
for (IrqHandler irqHandler : interruptHandlers) {
if (irqHandler.getName().equals(signalName)) {
return irqHandler;
}
}
return null;
} | 3.26 |
hadoop_InterruptEscalator_run_rdh | /**
* Shutdown callback: stop the service and set an atomic boolean
* if it stopped within the shutdown time.
*/
@Override
public void run() {
if (service
!= null) {
service.stop();
f0.set(service.waitForServiceToStop(shutdownTimeMillis));
} else {
f0.set(true);
}
} | 3.26 |
hadoop_InterruptEscalator_isSignalAlreadyReceived_rdh | /**
* Flag set if a signal has been received.
*
* @return true if there has been one interrupt already.
*/public boolean isSignalAlreadyReceived() {
return signalAlreadyReceived.get();
} | 3.26 |
hadoop_InterruptEscalator_getServiceWasShutdown_rdh | /**
* Probe for the service being shutdown.
*
* @return true if the service has been shutdown in the runnable
*/
private boolean getServiceWasShutdown() {
return f0.get();
} | 3.26 |
hadoop_InterruptEscalator_isForcedShutdownTimedOut_rdh | /**
* Flag set if forced shut down timed out.
*
* @return true if a shutdown was attempted and it timed out
*/
public boolean isForcedShutdownTimedOut() {
return forcedShutdownTimedOut;
} | 3.26 |
hadoop_AssumedRoleCredentialProvider_close_rdh | /**
* Propagate the close() call to the inner stsProvider.
*/
@Override
public void close() {
S3AUtils.closeAutocloseables(LOG, stsProvider, credentialsToSTS, stsClient);
} | 3.26 |
hadoop_AssumedRoleCredentialProvider_sanitize_rdh | /**
* Build a session name from the string, sanitizing it for the permitted
* characters.
*
* @param session
* source session
* @return a string for use in role requests.
*/
@VisibleForTesting
static String sanitize(String session) {
StringBuilder r = new StringBuilder(session.length());
for (char c : session.toCharArray()) {
if ("abcdefghijklmnopqrstuvwxyz0123456789,.@-".contains(Character.toString(c).toLowerCase(Locale.ENGLISH))) {
r.append(c);
} else {
r.append('-');
}
}
return r.toString();
} | 3.26 |
hadoop_AssumedRoleCredentialProvider_resolveCredentials_rdh | /**
* Get credentials.
*
* @return the credentials
* @throws StsException
* if none could be obtained.
*/
@Override
@Retries.RetryRaw
public AwsCredentials resolveCredentials() {
try {
return invoker.retryUntranslated("resolveCredentials", true, stsProvider::resolveCredentials);
} catch (IOException e) {
// this is in the signature of retryUntranslated;
// its hard to see how this could be raised, but for
// completeness, it is wrapped as an Amazon Client Exception
// and rethrown.
throw new CredentialInitializationException("getCredentials failed: " + e, e);
} catch (SdkClientException e) {
LOG.error("Failed to resolve credentials for role {}", arn, e);
throw e;
}
} | 3.26 |
hadoop_AssumedRoleCredentialProvider_operationRetried_rdh | /**
* Callback from {@link Invoker} when an operation is retried.
*
* @param text
* text of the operation
* @param ex
* exception
* @param retries
* number of retries
* @param idempotent
* is the method idempotent
*/
public void operationRetried(String text, Exception ex, int retries,
boolean idempotent) {
if (retries == 0) {
// log on the first retry attempt of the credential access.
// At worst, this means one log entry every intermittent renewal
// time.
LOG.info("Retried {}", text);
}} | 3.26 |
hadoop_AssumedRoleCredentialProvider_buildSessionName_rdh | /**
* Build the session name from the current user's shortname.
*
* @return a string for the session name.
* @throws IOException
* failure to get the current user
*/
static String buildSessionName() throws IOException
{
return sanitize(UserGroupInformation.getCurrentUser().getShortUserName());
} | 3.26 |
hadoop_FileSetUtils_getCommaSeparatedList_rdh | /**
* Returns a string containing every element of the given list, with each
* element separated by a comma.
*
* @param list
* List of all elements
* @return String containing every element, comma-separated
*/
private static String getCommaSeparatedList(List<String> list)
{
StringBuilder buffer = new StringBuilder();
String
separator = "";
for (Object e : list) {
buffer.append(separator).append(e);
separator = ",";
}
return buffer.toString();
} | 3.26 |
hadoop_FileSetUtils_convertFileSetToFiles_rdh | /**
* Converts a Maven FileSet to a list of File objects.
*
* @param source
* FileSet to convert
* @return List containing every element of the FileSet as a File
* @throws IOException
* if an I/O error occurs while trying to find the files
*/@SuppressWarnings("unchecked")
public static List<File> convertFileSetToFiles(FileSet source) throws IOException {
String
includes = getCommaSeparatedList(source.getIncludes());
String excludes = getCommaSeparatedList(source.getExcludes());
return FileUtils.getFiles(new File(source.getDirectory()), includes, excludes);
} | 3.26 |
hadoop_RouterAuditLogger_createStringBuilderForSuccessEvent_rdh | /**
* A helper function for creating the common portion of a successful
* log message.
*/
private static StringBuilder createStringBuilderForSuccessEvent(String user, String operation, String target) {
StringBuilder b = new StringBuilder();
start(Keys.USER, user, b);
addRemoteIP(b);
add(Keys.OPERATION, operation, b);
add(Keys.TARGET, target, b);
add(Keys.RESULT, AuditConstants.SUCCESS, b);
return b;
} | 3.26 |
hadoop_RouterAuditLogger_addRemoteIP_rdh | /**
* A helper api to add remote IP address.
*/
static void addRemoteIP(StringBuilder b) {
InetAddress ip = Server.getRemoteIp();
// ip address can be null for testcases
if (ip != null) {
add(Keys.IP, ip.getHostAddress(), b);
}
} | 3.26 |
hadoop_RouterAuditLogger_createSuccessLog_rdh | /**
* A helper api for creating an audit log for a successful event.
*/
static String createSuccessLog(String user, String operation, String target, ApplicationId appId, SubClusterId subClusterID) {
StringBuilder b = createStringBuilderForSuccessEvent(user, operation, target); if (appId != null) {
add(Keys.APPID, appId.toString(), b);
}
if (subClusterID != null) {
add(Keys.f0, subClusterID.toString(), b);
}return b.toString();
} | 3.26 |
hadoop_RouterAuditLogger_logFailure_rdh | /**
* Create a readable and parsable audit log string for a failed event.
*
* @param user
* User who made the service request.
* @param operation
* Operation requested by the user.
* @param perm
* Target permissions.
* @param target
* The target on which the operation is being performed.
* @param description
* Some additional information as to why the operation failed.
* @param subClusterId
* SubCluster Id in which operation was performed.
*/
public static void logFailure(String user, String operation, String perm, String target, String description, SubClusterId subClusterId) {
if (LOG.isInfoEnabled()) {
LOG.info(createFailureLog(user, operation, perm, target, description, null, subClusterId));
}
} | 3.26 |
hadoop_RouterAuditLogger_m0_rdh | /**
* A helper function for creating the common portion of a failure
* log message.
*/
private static StringBuilder m0(String user, String operation, String target, String description, String perm) {
StringBuilder b = new StringBuilder();
start(Keys.USER, user, b);
addRemoteIP(b);
add(Keys.OPERATION, operation, b);
add(Keys.TARGET, target, b);
add(Keys.RESULT, AuditConstants.FAILURE, b);
add(Keys.DESCRIPTION, description, b);
add(Keys.PERMISSIONS, perm, b);
return b;
} | 3.26 |
hadoop_RouterAuditLogger_logSuccess_rdh | /**
* Create a readable and parseable audit log string for a successful event.
*
* @param user
* User who made the service request to the Router
* @param operation
* Operation requested by the user.
* @param target
* The target on which the operation is being performed.
* @param appId
* Application Id in which operation was performed.
* @param subClusterId
* Subcluster Id in which operation is performed.
*
* <br><br>
* Note that the {@link RouterAuditLogger} uses tabs ('\t') as a key-val
* delimiter and hence the value fields should not contains tabs ('\t').
*/
public static void logSuccess(String user, String operation, String target, ApplicationId appId, SubClusterId subClusterId) {
if (LOG.isInfoEnabled()) {
LOG.info(createSuccessLog(user, operation, target, appId, subClusterId));
}
} | 3.26 |
hadoop_RouterAuditLogger_start_rdh | /**
* Adds the first key-val pair to the passed builder in the following format
* key=value.
*/static void start(Keys key, String value, StringBuilder b) {
b.append(key.name()).append(AuditConstants.KEY_VAL_SEPARATOR).append(value);
} | 3.26 |
hadoop_RouterAuditLogger_createFailureLog_rdh | /**
* A helper api for creating an audit log for a failure event.
*/
static String createFailureLog(String user, String operation, String perm, String target, String description, ApplicationId appId, SubClusterId subClusterId) {
StringBuilder b = m0(user, operation, target, description, perm); if (appId != null) {
add(Keys.APPID, appId.toString(), b);
}
if (subClusterId != null) {
add(Keys.f0, subClusterId.toString(), b);
}
return b.toString();
} | 3.26 |
hadoop_RouterAuditLogger_add_rdh | /**
* Appends the key-val pair to the passed builder in the following format
* <pair-delim>key=value.
*/
static void add(Keys key, String value, StringBuilder b) {
b.append(AuditConstants.f1).append(key.name()).append(AuditConstants.KEY_VAL_SEPARATOR).append(value);
} | 3.26 |
hadoop_AbstractReservationSystem_getPlanFollowerTimeStep_rdh | /**
*
* @return the planStepSize
*/
@Override
public long getPlanFollowerTimeStep() {
readLock.lock();
try {
return planStepSize;
} finally {
readLock.unlock();
}
} | 3.26 |
hadoop_AbstractReservationSystem_getDefaultReservationSystem_rdh | /**
* Get the default reservation system corresponding to the scheduler
*
* @param scheduler
* the scheduler for which the reservation system is required
* @return the {@link ReservationSystem} based on the configured scheduler
*/
public static String getDefaultReservationSystem(ResourceScheduler scheduler) {
if (scheduler instanceof CapacityScheduler) {
return CapacityReservationSystem.class.getName();
} else if (scheduler instanceof FairScheduler) {
return FairReservationSystem.class.getName();
}
return null; } | 3.26 |
hadoop_RpcNoSuchMethodException_getRpcErrorCodeProto_rdh | /**
* get the detailed rpc status corresponding to this exception
*/
public RpcErrorCodeProto getRpcErrorCodeProto() {
return RpcErrorCodeProto.ERROR_NO_SUCH_METHOD;
} | 3.26 |
hadoop_RpcNoSuchMethodException_getRpcStatusProto_rdh | /**
* get the rpc status corresponding to this exception
*/
public RpcStatusProto getRpcStatusProto() {
return RpcStatusProto.ERROR;
} | 3.26 |
hadoop_ByteArrayEncodingState_convertToByteBufferState_rdh | /**
* Convert to a ByteBufferEncodingState when it's backed by on-heap arrays.
*/
ByteBufferEncodingState convertToByteBufferState() {
ByteBuffer[] newInputs = new ByteBuffer[f0.length];
ByteBuffer[] newOutputs = new ByteBuffer[outputs.length];
for (int i = 0; i < f0.length; i++) {
newInputs[i] = CoderUtil.cloneAsDirectByteBuffer(f0[i], inputOffsets[i], encodeLength);
}
for (int i =
0; i < outputs.length; i++) {
newOutputs[i] = ByteBuffer.allocateDirect(encodeLength);
}
ByteBufferEncodingState bbeState = new ByteBufferEncodingState(encoder, encodeLength, newInputs, newOutputs);
return bbeState;
} | 3.26 |
hadoop_ByteArrayEncodingState_checkBuffers_rdh | /**
* Check and ensure the buffers are of the desired length.
*
* @param buffers
* the buffers to check
*/
void checkBuffers(byte[][] buffers) {
for (byte[] buffer : buffers) {
if (buffer == null) {
throw new HadoopIllegalArgumentException("Invalid buffer found, not allowing null");
}
if (buffer.length != encodeLength) {
throw new HadoopIllegalArgumentException("Invalid buffer not of length " + encodeLength);
}
}
} | 3.26 |
hadoop_And_apply_rdh | /**
* Applies child expressions to the {@link PathData} item. If all pass then
* returns {@link Result#PASS} else returns the result of the first
* non-passing expression.
*/
@Override
public Result apply(PathData item, int depth) throws IOException {
Result result = Result.PASS;
for (Expression child : getChildren()) {
Result childResult = child.apply(item, -1);
result = result.combine(childResult);
if (!result.isPass()) {
return result;
} }
return result;
} | 3.26 |
hadoop_And_registerExpression_rdh | /**
* Registers this expression with the specified factory.
*/
public static void registerExpression(ExpressionFactory factory) throws IOException {
factory.addClass(And.class, "-a");
factory.addClass(And.class, "-and");
} | 3.26 |
hadoop_AbstractDTService_requireServiceStarted_rdh | /**
* Require the service to be started.
*
* @throws IllegalStateException
* if it is not.
*/
protected void requireServiceStarted() throws IllegalStateException {
requireServiceState(STATE.STARTED);
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.