name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hadoop_Adl_getUriDefaultPort_rdh | /**
*
* @return Default port for ADL File system to communicate
*/
@Override
public final int getUriDefaultPort() {
return AdlFileSystem.DEFAULT_PORT;
} | 3.26 |
hadoop_Configured_setConf_rdh | // inherit javadoc
@Override
public void setConf(Configuration conf) {
this.conf = conf;
} | 3.26 |
hadoop_Configured_getConf_rdh | // inherit javadoc
@Override
public Configuration getConf() {
return conf;
} | 3.26 |
hadoop_AbstractAutoCreatedLeafQueue_setEntitlement_rdh | /**
* This methods to change capacity for a queue and adjusts its
* absoluteCapacity.
*
* @param nodeLabel
* nodeLabel.
* @param entitlement
* the new entitlement for the queue (capacity,
* maxCapacity, etc..)
* @throws SchedulerDynamicEditException
* when setEntitlement fails.
*/
public void setEntitlement(String nodeLabel, QueueEntitlement entitlement)
throws SchedulerDynamicEditException {
writeLock.lock();
try {
float capacity = entitlement.getCapacity();
if ((capacity < 0) || (capacity > 1.0F)) {
throw new SchedulerDynamicEditException("Capacity demand is not in the [0,1] range: " + capacity);
}
setCapacity(nodeLabel, capacity);
setAbsoluteCapacity(nodeLabel, this.getParent().getQueueCapacities().getAbsoluteCapacity(nodeLabel) * getQueueCapacities().getCapacity(nodeLabel));
// note: we currently set maxCapacity to capacity
// this might be revised later
setMaxCapacity(nodeLabel, entitlement.getMaxCapacity());
setConfiguredMinCapacityVector(nodeLabel, QueueCapacityVector.of(queueCapacities.getCapacity(nodeLabel) * 100, ResourceUnitCapacityType.PERCENTAGE));
setConfiguredMaxCapacityVector(nodeLabel, QueueCapacityVector.of(queueCapacities.getMaximumCapacity(nodeLabel) * 100, ResourceUnitCapacityType.PERCENTAGE));
LOG.debug("successfully changed to {} for queue {}", capacity, this.getQueuePath());
// update queue used capacity etc
CSQueueUtils.updateQueueStatistics(resourceCalculator, queueContext.getClusterResource(), this, labelManager, nodeLabel);
} finally {writeLock.unlock();}
} | 3.26 |
hadoop_ECBlock_isErased_rdh | /**
*
* @return true if it's erased due to erasure, otherwise false
*/
public boolean isErased() {
return isErased;
} | 3.26 |
hadoop_ECBlock_isParity_rdh | /**
*
* @return true if it's parity block, otherwise false
*/
public boolean isParity() {
return isParity;} | 3.26 |
hadoop_ECBlock_setParity_rdh | /**
* Set true if it's for a parity block.
*
* @param isParity
* is parity or not
*/
public void
setParity(boolean isParity) {this.isParity = isParity;
} | 3.26 |
hadoop_ECBlock_setErased_rdh | /**
* Set true if the block is missing.
*
* @param isErased
* is erased or not
*/
public void setErased(boolean isErased) {
this.isErased = isErased;
} | 3.26 |
hadoop_DirectoryPolicyImpl_availablePolicies_rdh | /**
* Enumerate all available policies.
*
* @return set of the policies.
*/
public static Set<MarkerPolicy>
availablePolicies() {
return AVAILABLE_POLICIES;} | 3.26 |
hadoop_DirectoryPolicyImpl_m0_rdh | /**
* Return path policy for store and paths.
*
* @param path
* path
* @param capability
* capability
* @return true if a capability is active
*/
@Override
public boolean m0(final Path path, final String capability) {
switch (capability) {
/* Marker policy is dynamically determined for the given path. */case STORE_CAPABILITY_DIRECTORY_MARKER_AWARE :
return true;
case STORE_CAPABILITY_DIRECTORY_MARKER_POLICY_KEEP :
return markerPolicy == MarkerPolicy.Keep;
case STORE_CAPABILITY_DIRECTORY_MARKER_POLICY_DELETE :
return markerPolicy == MarkerPolicy.Delete;
case STORE_CAPABILITY_DIRECTORY_MARKER_POLICY_AUTHORITATIVE :
return markerPolicy == MarkerPolicy.Authoritative;
case STORE_CAPABILITY_DIRECTORY_MARKER_ACTION_KEEP :
return keepDirectoryMarkers(path);
case STORE_CAPABILITY_DIRECTORY_MARKER_ACTION_DELETE :
return
!keepDirectoryMarkers(path);default :
throw new IllegalArgumentException("Unknown capability " + capability);
}
} | 3.26 |
hadoop_DirectoryPolicyImpl_getDirectoryPolicy_rdh | /**
* Create/Get the policy for this configuration.
*
* @param conf
* config
* @param authoritativeness
* Callback to evaluate authoritativeness of a
* path.
* @return a policy
*/
public static DirectoryPolicy getDirectoryPolicy(final Configuration conf, final Predicate<Path> authoritativeness) {
DirectoryPolicy policy;
String option = conf.getTrimmed(DIRECTORY_MARKER_POLICY, DEFAULT_DIRECTORY_MARKER_POLICY);
switch (option.toLowerCase(Locale.ENGLISH)) {
case DIRECTORY_MARKER_POLICY_DELETE :
// backwards compatible.
LOG.debug("Directory markers will be deleted");
policy = DELETE;
break;
case DIRECTORY_MARKER_POLICY_KEEP :
LOG.debug("Directory markers will be kept");
policy =
KEEP;
break;
case DIRECTORY_MARKER_POLICY_AUTHORITATIVE :
LOG.debug("Directory markers will be kept on authoritative" + " paths");
policy = new DirectoryPolicyImpl(MarkerPolicy.Authoritative, authoritativeness);
break;
default :
throw new IllegalArgumentException(UNKNOWN_MARKER_POLICY + option);
}
return policy;
} | 3.26 |
hadoop_AbfsHttpOperation_getConnOutputStream_rdh | /**
* Gets the connection output stream.
*
* @return output stream.
* @throws IOException
*/
OutputStream getConnOutputStream() throws IOException {
return connection.getOutputStream();
} | 3.26 |
hadoop_AbfsHttpOperation_sendRequest_rdh | /**
* Sends the HTTP request. Note that HttpUrlConnection requires that an
* empty buffer be sent in order to set the "Content-Length: 0" header, which
* is required by our endpoint.
*
* @param buffer
* the request entity body.
* @param offset
* an offset into the buffer where the data beings.
* @param length
* the length of the data in the buffer.
* @throws IOException
* if an error occurs.
*/
public void sendRequest(byte[] buffer, int offset, int length) throws IOException {
this.connection.setDoOutput(true);
this.connection.setFixedLengthStreamingMode(length);
if (buffer == null) {
// An empty buffer is sent to set the "Content-Length: 0" header, which
// is required by our endpoint.
buffer = new byte[]{ };
offset = 0;
length = 0;
}
// send the request body
long startTime = 0;
startTime = System.nanoTime();
OutputStream outputStream = null;
// Updates the expected bytes to be sent based on length.
this.expectedBytesToBeSent = length;try {
try {
/* Without expect header enabled, if getOutputStream() throws
an exception, it gets caught by the restOperation. But with
expect header enabled we return back without throwing an exception
for the correct response code processing.
*/
outputStream = getConnOutputStream();
} catch (IOException e) {
/* If getOutputStream fails with an exception and expect header
is enabled, we return back without throwing an exception to
the caller. The caller is responsible for setting the correct status code.
If expect header is not enabled, we throw back the exception.
*/
String expectHeader = getConnProperty(EXPECT);if ((expectHeader != null) && expectHeader.equals(HUNDRED_CONTINUE)) {
LOG.debug("Getting output stream failed with expect header enabled, returning back ", e);
return;
} else {LOG.debug("Getting output stream failed without expect header enabled, throwing exception ", e);
throw e;
}
}
// update bytes sent for successful as well as failed attempts via the
// accompanying statusCode.
this.bytesSent = length;
// If this fails with or without expect header enabled,
// it throws an IOException.
outputStream.write(buffer, offset, length);
} finally {
// Closing the opened output stream
if (outputStream != null) {
outputStream.close();
}
this.f1 = elapsedTimeMs(startTime);
}
} | 3.26 |
hadoop_AbfsHttpOperation_isNullInputStream_rdh | /**
* Check null stream, this is to pass findbugs's redundant check for NULL
*
* @param stream
* InputStream
*/
private boolean isNullInputStream(InputStream stream) {
return stream == null ? true : false;
} | 3.26 |
hadoop_AbfsHttpOperation_toString_rdh | // Returns a trace message for the request
@Overridepublic String toString() {
final StringBuilder sb = new StringBuilder();
sb.append(statusCode);
sb.append(",");
sb.append(storageErrorCode);sb.append(",");
sb.append(expectedAppendPos);
sb.append(",cid=");sb.append(getClientRequestId());
sb.append(",rid=");
sb.append(requestId);
sb.append(",connMs=");
sb.append(connectionTimeMs);
sb.append(",sendMs=");
sb.append(f1);
sb.append(",recvMs=");
sb.append(recvResponseTimeMs);
sb.append(",sent=");
sb.append(bytesSent);
sb.append(",recv=");
sb.append(bytesReceived);
sb.append(",");
sb.append(method);
sb.append(",");
sb.append(getMaskedUrl());
return sb.toString();
} | 3.26 |
hadoop_AbfsHttpOperation_getConnRequestMethod_rdh | /**
* Gets the connection request method.
*
* @return request method.
*/
String getConnRequestMethod() {
return connection.getRequestMethod();} | 3.26 |
hadoop_AbfsHttpOperation_getConnUrl_rdh | /**
* Gets the connection url.
*
* @return url.
*/
URL getConnUrl() {
return connection.getURL();
} | 3.26 |
hadoop_AbfsHttpOperation_processResponse_rdh | /**
* Gets and processes the HTTP response.
*
* @param buffer
* a buffer to hold the response entity body
* @param offset
* an offset in the buffer where the data will being.
* @param length
* the number of bytes to be written to the buffer.
* @throws IOException
* if an error occurs.
*/
public void processResponse(final byte[] buffer, final int offset, final int length) throws IOException {
// get the response
long startTime = 0;
startTime = System.nanoTime();
this.statusCode = getConnResponseCode();
this.recvResponseTimeMs = elapsedTimeMs(startTime);
this.statusDescription = getConnResponseMessage();
this.requestId = this.connection.getHeaderField(HttpHeaderConfigurations.X_MS_REQUEST_ID);
if (this.requestId == null) {
this.requestId = AbfsHttpConstants.EMPTY_STRING;
}
// dump the headers
AbfsIoUtils.dumpHeadersToDebugLog("Response Headers", connection.getHeaderFields());
if (AbfsHttpConstants.HTTP_METHOD_HEAD.equals(this.method)) {
// If it is HEAD, and it is ERROR
return;
}
startTime = System.nanoTime();
if (statusCode >= HttpURLConnection.HTTP_BAD_REQUEST) {
processStorageErrorResponse();
this.recvResponseTimeMs += elapsedTimeMs(startTime);
this.bytesReceived = this.connection.getHeaderFieldLong(HttpHeaderConfigurations.CONTENT_LENGTH, 0);
} else {
// consume the input stream to release resources
int totalBytesRead = 0;
try (InputStream stream = this.connection.getInputStream()) {
if (isNullInputStream(stream)) {
return;
}boolean endOfStream = false;
// this is a list operation and need to retrieve the data
// need a better solution
if (AbfsHttpConstants.HTTP_METHOD_GET.equals(this.method) && (buffer == null)) {
parseListFilesResponse(stream);} else {
if (buffer != null) {
while
(totalBytesRead < length) {
int bytesRead = stream.read(buffer, offset + totalBytesRead, length - totalBytesRead);
if (bytesRead == (-1))
{
endOfStream = true;
break;
}
totalBytesRead += bytesRead;
} }
if ((!endOfStream) && (stream.read() != (-1))) {
// read and discard
int bytesRead = 0;
byte[] b = new byte[CLEAN_UP_BUFFER_SIZE];
while ((bytesRead = stream.read(b)) >= 0) {
totalBytesRead += bytesRead;
} }
}
} catch (IOException ex) {
LOG.warn("IO/Network error: {} {}: {}", method, getMaskedUrl(), ex.getMessage());
LOG.debug("IO Error: ", ex);
throw ex;
} finally {
this.recvResponseTimeMs += elapsedTimeMs(startTime);
this.bytesReceived = totalBytesRead;
}
}
} | 3.26 |
hadoop_AbfsHttpOperation_openConnection_rdh | /**
* Open the HTTP connection.
*
* @throws IOException
* if an error occurs.
*/
private HttpURLConnection openConnection() throws IOException {
long start = System.nanoTime();
try {
return ((HttpURLConnection) (url.openConnection()));
} finally {
connectionTimeMs = elapsedTimeMs(start);
}
} | 3.26 |
hadoop_AbfsHttpOperation_getConnResponseCode_rdh | /**
* Gets the connection response code.
*
* @return response code.
* @throws IOException
*/
Integer getConnResponseCode() throws IOException {
return connection.getResponseCode();} | 3.26 |
hadoop_AbfsHttpOperation_getLogString_rdh | // Returns a trace message for the ABFS API logging service to consume
public String getLogString() {
final StringBuilder sb = new StringBuilder();
sb.append("s=").append(statusCode).append(" e=").append(storageErrorCode).append(" ci=").append(getClientRequestId()).append(" ri=").append(requestId).append(" ct=").append(connectionTimeMs).append(" st=").append(f1).append(" rt=").append(recvResponseTimeMs).append(" bs=").append(bytesSent).append(" br=").append(bytesReceived).append(" m=").append(method).append(" u=").append(getMaskedEncodedUrl());
return sb.toString();
} | 3.26 |
hadoop_AbfsHttpOperation_getConnResponseMessage_rdh | /**
* Gets the connection response message.
*
* @return response message.
* @throws IOException
*/String getConnResponseMessage() throws IOException {
return connection.getResponseMessage();
} | 3.26 |
hadoop_AbfsHttpOperation_processStorageErrorResponse_rdh | /**
* When the request fails, this function is used to parse the responseAbfsHttpClient.LOG.debug("ExpectedError: ", ex);
* and extract the storageErrorCode and storageErrorMessage. Any errors
* encountered while attempting to process the error response are logged,
* but otherwise ignored.
*
* For storage errors, the response body *usually* has the following format:
*
* {
* "error":
* {
* "code": "string",
* "message": "string"
* }
* }
*/
private void processStorageErrorResponse() {
try (InputStream stream
= connection.getErrorStream()) {
if (stream ==
null) {
return;
}
JsonFactory jf = new JsonFactory();
try (JsonParser jp = jf.createParser(stream)) {
String fieldName;
String fieldValue;
jp.nextToken();// START_OBJECT - {
jp.nextToken();// FIELD_NAME - "error":
jp.nextToken();// START_OBJECT - {
jp.nextToken();
while (jp.hasCurrentToken()) {
if (jp.getCurrentToken() == JsonToken.FIELD_NAME) {
fieldName = jp.getCurrentName();
jp.nextToken();fieldValue = jp.getText();
switch (fieldName) {
case "code" :
storageErrorCode = fieldValue;
break;
case "message" :
storageErrorMessage = fieldValue;
break;
case "ExpectedAppendPos" :
expectedAppendPos = fieldValue;
break;
default :
break;
}
}
jp.nextToken();
}
}
} | 3.26 |
hadoop_AbfsHttpOperation_elapsedTimeMs_rdh | /**
* Returns the elapsed time in milliseconds.
*/
private long elapsedTimeMs(final long startTime) {
return (System.nanoTime() - startTime) / ONE_MILLION;
} | 3.26 |
hadoop_AbfsHttpOperation_getConnProperty_rdh | /**
* Gets the connection request property for a key.
*
* @param key
* The request property key.
* @return request peoperty value.
*/
String getConnProperty(String key) {
return connection.getRequestProperty(key);
} | 3.26 |
hadoop_AbfsHttpOperation_parseListFilesResponse_rdh | /**
* Parse the list file response
*
* @param stream
* InputStream contains the list results.
* @throws IOException
*/
private void parseListFilesResponse(final InputStream stream) throws IOException {
if (stream == null) {
return;
}
if (listResultSchema != null) {
// already parse the response
return;
}
try {
final ObjectMapper objectMapper = new ObjectMapper();
this.listResultSchema = objectMapper.readValue(stream, ListResultSchema.class);
}
catch (IOException ex) {
LOG.error("Unable to deserialize list results", ex);
throw ex;
}
} | 3.26 |
hadoop_CleanerMetrics_reportCleaningStart_rdh | /**
* Report the start a new run of the cleaner.
*/
public void reportCleaningStart() {
processedFiles.set(0);
deletedFiles.set(0);
fileErrors.set(0);
} | 3.26 |
hadoop_CleanerMetrics_reportAFileProcess_rdh | /**
* Report a process operation at the current system time
*/
public void reportAFileProcess() {
totalProcessedFiles.incr();
processedFiles.incr();
} | 3.26 |
hadoop_CleanerMetrics_reportAFileDelete_rdh | /**
* Report a delete operation at the current system time
*/
public void reportAFileDelete() {
totalProcessedFiles.incr();
processedFiles.incr();
totalDeletedFiles.incr();
deletedFiles.incr();
} | 3.26 |
hadoop_CleanerMetrics_reportAFileError_rdh | /**
* Report a process operation error at the current system time
*/
public void reportAFileError() {
totalProcessedFiles.incr();
processedFiles.incr();
totalFileErrors.incr();
fileErrors.incr();
} | 3.26 |
hadoop_Tracer_getCurrentSpan_rdh | /**
* *
* Return active span.
*
* @return org.apache.hadoop.tracing.Span
*/
public static Span getCurrentSpan() {
return null;
} | 3.26 |
hadoop_Tracer_curThreadTracer_rdh | // Keeping this function at the moment for HTrace compatiblity,
// in fact all threads share a single global tracer for OpenTracing.
public static Tracer curThreadTracer() {
return globalTracer;
} | 3.26 |
hadoop_QueueAclsInfo_getOperations_rdh | /**
* Get opearations allowed on queue.
*
* @return array of String
*/
public String[] getOperations() {
return operations;
} | 3.26 |
hadoop_QueueAclsInfo_getQueueName_rdh | /**
* Get queue name.
*
* @return name
*/
public String getQueueName() {
return queueName;
} | 3.26 |
hadoop_IdentityMapper_map_rdh | /**
* The identity function. Input key/value pair is written directly to
* output.
*/
public void map(K key, V val, OutputCollector<K, V> output,
Reporter reporter) throws IOException {
output.collect(key, val);
} | 3.26 |
hadoop_AclUtil_getAclFromPermAndEntries_rdh | /**
* Given permissions and extended ACL entries, returns the full logical ACL.
*
* @param perm
* FsPermission containing permissions
* @param entries
* List<AclEntry> containing extended ACL entries
* @return List<AclEntry> containing full logical ACL
*/
public static List<AclEntry> getAclFromPermAndEntries(FsPermission perm, List<AclEntry> entries)
{
List<AclEntry> acl = Lists.newArrayListWithCapacity(entries.size() + 3);
// Owner entry implied by owner permission bits.
acl.add(new AclEntry.Builder().setScope(AclEntryScope.ACCESS).setType(AclEntryType.USER).setPermission(perm.getUserAction()).build());
// All extended access ACL entries.
boolean hasAccessAcl = false;
Iterator<AclEntry> entryIter = entries.iterator();
AclEntry curEntry = null;while (entryIter.hasNext()) {
curEntry = entryIter.next();
if (curEntry.getScope() == AclEntryScope.DEFAULT)
{
break;
}
hasAccessAcl = true;
acl.add(curEntry);
}
// Mask entry implied by group permission bits, or group entry if there is
// no access ACL (only default ACL).
acl.add(new AclEntry.Builder().setScope(AclEntryScope.ACCESS).setType(hasAccessAcl ? AclEntryType.MASK : AclEntryType.GROUP).setPermission(perm.getGroupAction()).build());
// Other entry implied by other bits.
acl.add(new
AclEntry.Builder().setScope(AclEntryScope.ACCESS).setType(AclEntryType.OTHER).setPermission(perm.getOtherAction()).build());
// Default ACL entries.
if ((curEntry != null) && (curEntry.getScope() == AclEntryScope.DEFAULT)) {
acl.add(curEntry);
while (entryIter.hasNext()) {
acl.add(entryIter.next());
}
}return acl;
} | 3.26 |
hadoop_AclUtil_isMinimalAcl_rdh | /**
* Checks if the given entries represent a minimal ACL (contains exactly 3
* entries).
*
* @param entries
* List<AclEntry> entries to check
* @return boolean true if the entries represent a minimal ACL
*/
public static boolean isMinimalAcl(List<AclEntry> entries) {
return entries.size() == 3;
} | 3.26 |
hadoop_FsCommand_getCommandName_rdh | // historical abstract method in Command
@Override
public String
getCommandName() {
return getName();
} | 3.26 |
hadoop_FsCommand_registerCommands_rdh | /**
* Register the command classes used by the fs subcommand
*
* @param factory
* where to register the class
*/
public static void registerCommands(CommandFactory factory) {
factory.registerCommands(AclCommands.class);
factory.registerCommands(CopyCommands.class);
factory.registerCommands(Count.class);
factory.registerCommands(Delete.class);
factory.registerCommands(Display.class);
factory.registerCommands(Find.class);
factory.registerCommands(FsShellPermissions.class);
factory.registerCommands(FsUsage.class);
factory.registerCommands(Ls.class);
factory.registerCommands(Mkdir.class);
factory.registerCommands(MoveCommands.class);
factory.registerCommands(SetReplication.class);
factory.registerCommands(Stat.class);
factory.registerCommands(Tail.class);
factory.registerCommands(Head.class);
factory.registerCommands(Test.class);
factory.registerCommands(TouchCommands.class);
factory.registerCommands(Truncate.class);
factory.registerCommands(SnapshotCommands.class);
factory.registerCommands(XAttrCommands.class);
factory.registerCommands(Concat.class);
} | 3.26 |
hadoop_FsCommand_runAll_rdh | /**
*
* @deprecated use {@link Command#run(String...argv)}
*/
@Deprecated
@Override
public int runAll() {
return run(args);
} | 3.26 |
hadoop_WebServlet_m0_rdh | /**
* Get method is modified to support impersonation and Kerberos
* SPNEGO token by forcing client side redirect when accessing
* "/" (root) of the web application context.
*/
@Override
protected void m0(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException {
if (request.getRequestURI().equals("/")) {
StringBuilder location = new StringBuilder();
location.append("index.html");
if (request.getQueryString() != null) {
// echo query string but prevent HTTP response splitting
location.append("?");
location.append(request.getQueryString().replaceAll("\n", "").replaceAll("\r", ""));
}response.sendRedirect(location.toString());
} else {
super.doGet(request,
response);
}
} | 3.26 |
hadoop_AzureFileSystemInstrumentation_getBlockDownloadLatency_rdh | /**
* Get the current rolling average of the download latency.
*
* @return rolling average of download latency in milliseconds.
*/
public long getBlockDownloadLatency() {
return currentBlockDownloadLatency.getCurrentAverage();
} | 3.26 |
hadoop_AzureFileSystemInstrumentation_updateBytesWrittenInLastSecond_rdh | /**
* Sets the current gauge value for how many bytes were written in the last
* second.
*
* @param currentBytesWritten
* The number of bytes.
*/
public void updateBytesWrittenInLastSecond(long currentBytesWritten) {
bytesWrittenInLastSecond.set(currentBytesWritten);
} | 3.26 |
hadoop_AzureFileSystemInstrumentation_serverErrorEncountered_rdh | /**
* Indicate that we just encountered a server-caused error.
*/
public void serverErrorEncountered() {
serverErrors.incr();
} | 3.26 |
hadoop_AzureFileSystemInstrumentation_setContainerName_rdh | /**
* Sets the container name to tag all the metrics with.
*
* @param containerName
* The container name.
*/
public void setContainerName(String containerName) {
registry.tag("containerName", "Name of the Azure Storage container that these metrics are going against", containerName);
} | 3.26 |
hadoop_AzureFileSystemInstrumentation_setAccountName_rdh | /**
* Sets the account name to tag all the metrics with.
*
* @param accountName
* The account name.
*/
public void setAccountName(String accountName) {
registry.tag("accountName", "Name of the Azure Storage account that these metrics are going against", accountName);
} | 3.26 |
hadoop_AzureFileSystemInstrumentation_getBlockUploadLatency_rdh | /**
* Get the current rolling average of the upload latency.
*
* @return rolling average of upload latency in milliseconds.
*/
public long getBlockUploadLatency() {
return currentBlockUploadLatency.getCurrentAverage();
} | 3.26 |
hadoop_AzureFileSystemInstrumentation_getCurrentMaximumUploadBandwidth_rdh | /**
* Get the current maximum upload bandwidth.
*
* @return maximum upload bandwidth in bytes per second.
*/
public long getCurrentMaximumUploadBandwidth() {
return currentMaximumUploadBytesPerSecond;
} | 3.26 |
hadoop_AzureFileSystemInstrumentation_getMetricsRegistryInfo_rdh | /**
* Get the metrics registry information.
*
* @return The metrics registry information.
*/
public MetricsInfo getMetricsRegistryInfo() {
return registry.info();
} | 3.26 |
hadoop_AzureFileSystemInstrumentation_currentUploadBytesPerSecond_rdh | /**
* Record the current bytes-per-second upload rate seen.
*
* @param bytesPerSecond
* The bytes per second.
*/
public synchronized void currentUploadBytesPerSecond(long bytesPerSecond) {
if (bytesPerSecond > currentMaximumUploadBytesPerSecond) {
currentMaximumUploadBytesPerSecond = bytesPerSecond;
maximumUploadBytesPerSecond.set(bytesPerSecond);
}
} | 3.26 |
hadoop_AzureFileSystemInstrumentation_clientErrorEncountered_rdh | /**
* Indicate that we just encountered a client-side error.
*/
public void clientErrorEncountered() {
clientErrors.incr();
} | 3.26 |
hadoop_AzureFileSystemInstrumentation_directoryDeleted_rdh | /**
* Indicate that we just deleted a directory through WASB.
*/
public void directoryDeleted() {
numberOfDirectoriesDeleted.incr();
} | 3.26 |
hadoop_AzureFileSystemInstrumentation_fileDeleted_rdh | /**
* Indicate that we just deleted a file through WASB.
*/
public void fileDeleted() {
numberOfFilesDeleted.incr();
} | 3.26 |
hadoop_AzureFileSystemInstrumentation_blockDownloaded_rdh | /**
* Indicate that we just downloaded a block and record its latency.
*
* @param latency
* The latency in milliseconds.
*/
public void blockDownloaded(long latency) {
currentBlockDownloadLatency.addPoint(latency);
} | 3.26 |
hadoop_AzureFileSystemInstrumentation_updateBytesReadInLastSecond_rdh | /**
* Sets the current gauge value for how many bytes were read in the last
* second.
*
* @param currentBytesRead
* The number of bytes.
*/
public void updateBytesReadInLastSecond(long currentBytesRead) {
bytesReadInLastSecond.set(currentBytesRead);
} | 3.26 |
hadoop_AzureFileSystemInstrumentation_getFileSystemInstanceId_rdh | /**
* The unique identifier for this file system in the metrics.
*
* @return The unique identifier.
*/
public UUID getFileSystemInstanceId() {
return fileSystemInstanceId;
} | 3.26 |
hadoop_AzureFileSystemInstrumentation_getCurrentMaximumDownloadBandwidth_rdh | /**
* Get the current maximum download bandwidth.
*
* @return maximum download bandwidth in bytes per second.
*/
public long getCurrentMaximumDownloadBandwidth() {
return f0;
} | 3.26 |
hadoop_AzureFileSystemInstrumentation_webResponse_rdh | /**
* Indicate that we just got a web response from Azure Storage. This should
* be called for every web request/response we do (to get accurate metrics
* of how we're hitting the storage service).
*/
public void webResponse() {
numberOfWebResponses.incr();
inMemoryNumberOfWebResponses.incrementAndGet();
} | 3.26 |
hadoop_AzureFileSystemInstrumentation_rawBytesDownloaded_rdh | /**
* Indicate that we just downloaded some data to Azure storage.
*
* @param numberOfBytes
* The raw number of bytes downloaded (including overhead).
*/
public void rawBytesDownloaded(long numberOfBytes) {
rawBytesDownloaded.incr(numberOfBytes);
} | 3.26 |
hadoop_AzureFileSystemInstrumentation_getCurrentWebResponses_rdh | /**
* Gets the current number of web responses obtained from Azure Storage.
*
* @return The number of web responses.
*/
public long getCurrentWebResponses() {
return inMemoryNumberOfWebResponses.get();
} | 3.26 |
hadoop_AzureFileSystemInstrumentation_blockUploaded_rdh | /**
* Indicate that we just uploaded a block and record its latency.
*
* @param latency
* The latency in milliseconds.
*/
public void blockUploaded(long latency) {
currentBlockUploadLatency.addPoint(latency);
} | 3.26 |
hadoop_AzureFileSystemInstrumentation_rawBytesUploaded_rdh | /**
* Indicate that we just uploaded some data to Azure storage.
*
* @param numberOfBytes
* The raw number of bytes uploaded (including overhead).
*/
public void rawBytesUploaded(long numberOfBytes) {
rawBytesUploaded.incr(numberOfBytes);
} | 3.26 |
hadoop_MRJobConfUtil_redact_rdh | /**
* Redact job configuration properties.
*
* @param conf
* the job configuration to redact
*/
public static void redact(final Configuration conf) {
for (String prop : conf.getTrimmedStringCollection(MRJobConfig.MR_JOB_REDACTED_PROPERTIES)) {
conf.set(prop, REDACTION_REPLACEMENT_VAL);
}
} | 3.26 |
hadoop_MRJobConfUtil_m0_rdh | /**
* load the values defined from a configuration file including the delta
* progress and the maximum time between each log message.
*
* @param conf
*/
public static void m0(final Configuration conf) {
if (progressMinDeltaThreshold == null) {
progressMinDeltaThreshold = new Double(PROGRESS_MIN_DELTA_FACTOR * conf.getDouble(MRJobConfig.TASK_LOG_PROGRESS_DELTA_THRESHOLD, MRJobConfig.TASK_LOG_PROGRESS_DELTA_THRESHOLD_DEFAULT));
}
if (progressMaxWaitDeltaTimeThreshold == null) {
progressMaxWaitDeltaTimeThreshold = TimeUnit.SECONDS.toMillis(conf.getLong(MRJobConfig.TASK_LOG_PROGRESS_WAIT_INTERVAL_SECONDS, MRJobConfig.TASK_LOG_PROGRESS_WAIT_INTERVAL_SECONDS_DEFAULT));
}
}
/**
* Retrieves the min delta progress required to log the task attempt current
* progress.
*
* @return the defined threshold in the conf.
returns the default value if
{@link #setTaskLogProgressDeltaThresholds} | 3.26 |
hadoop_MRJobConfUtil_setLocalDirectoriesConfigForTesting_rdh | /**
* Set local directories so that the generated folders is subdirectory of the
* test directories.
*
* @param conf
* @param testRootDir
* @return */
public static Configuration setLocalDirectoriesConfigForTesting(Configuration conf, File testRootDir) {
Configuration config = (conf == null) ? new Configuration() : conf;
final File hadoopLocalDir = new File(testRootDir, "hadoop-dir");
// create the directory
if (!hadoopLocalDir.getAbsoluteFile().mkdirs()) {
f0.info("{} directory already exists", hadoopLocalDir.getPath());
}
Path mapredHadoopTempDir = new Path(hadoopLocalDir.getPath());
Path mapredSystemDir = new Path(mapredHadoopTempDir, "system");
Path stagingDir = new Path(mapredHadoopTempDir,
"tmp/staging");
// Set the temp directories a subdir of the test directory.
config.set("mapreduce.jobtracker.staging.root.dir", stagingDir.toString());
config.set("mapreduce.jobtracker.system.dir", mapredSystemDir.toString());
config.set("mapreduce.cluster.temp.dir", mapredHadoopTempDir.toString());
config.set("mapreduce.cluster.local.dir", new Path(mapredHadoopTempDir, "local").toString());
return config;
} | 3.26 |
hadoop_MRJobConfUtil_getTaskProgressReportInterval_rdh | /**
* Get the progress heartbeat interval configuration for mapreduce tasks.
* By default, the value of progress heartbeat interval is a proportion of
* that of task timeout.
*
* @param conf
* the job configuration to read from
* @return the value of task progress report interval
*/
public static long getTaskProgressReportInterval(final Configuration conf) {
long taskHeartbeatTimeOut = conf.getLong(MRJobConfig.TASK_TIMEOUT, MRJobConfig.DEFAULT_TASK_TIMEOUT_MILLIS);
return conf.getLong(MRJobConfig.TASK_PROGRESS_REPORT_INTERVAL, ((long) (TASK_REPORT_INTERVAL_TO_TIMEOUT_RATIO * taskHeartbeatTimeOut)));
} | 3.26 |
hadoop_S3ACommitterFactory_chooseCommitterFactory_rdh | /**
* Choose a committer from the FS and task configurations. Task Configuration
* takes priority, allowing execution engines to dynamically change
* committer on a query-by-query basis.
*
* @param fileSystem
* FS
* @param outputPath
* destination path
* @param taskConf
* configuration from the task
* @return An S3A committer if chosen, or "null" for the classic value
* @throws PathCommitException
* on a failure to identify the committer
*/
private AbstractS3ACommitterFactory chooseCommitterFactory(S3AFileSystem fileSystem, Path outputPath, Configuration taskConf) throws PathCommitException {
AbstractS3ACommitterFactory factory;
// the FS conf will have had its per-bucket values resolved, unlike
// job/task configurations.
Configuration fsConf = fileSystem.getConf();
String name = fsConf.getTrimmed(FS_S3A_COMMITTER_NAME, COMMITTER_NAME_FILE);
name = taskConf.getTrimmed(FS_S3A_COMMITTER_NAME, name);
LOG.debug("Committer option is {}", name);
switch (name) {
case COMMITTER_NAME_FILE :
factory = null;
break;
case COMMITTER_NAME_DIRECTORY :
factory = new DirectoryStagingCommitterFactory();
break;
case COMMITTER_NAME_PARTITIONED :
factory = new PartitionedStagingCommitterFactory();
break;
case COMMITTER_NAME_MAGIC :
factory = new MagicS3GuardCommitterFactory();
break;
case InternalCommitterConstants.COMMITTER_NAME_STAGING :
factory = new StagingCommitterFactory();
break;
default :
throw new PathCommitException(outputPath, ("Unknown committer: \""
+ name) + "\"");
}
return factory;
} | 3.26 |
hadoop_S3ACommitterFactory_createTaskCommitter_rdh | /**
* Create a task committer.
*
* @param fileSystem
* destination FS.
* @param outputPath
* final output path for work
* @param context
* job context
* @return a committer
* @throws IOException
* instantiation failure
*/
@Override
public PathOutputCommitter createTaskCommitter(S3AFileSystem fileSystem, Path outputPath, TaskAttemptContext context) throws IOException {
AbstractS3ACommitterFactory factory = chooseCommitterFactory(fileSystem, outputPath, context.getConfiguration());
if (factory != null) {
PathOutputCommitter committer = factory.createTaskCommitter(fileSystem, outputPath, context);
LOG.info("Using committer {} to output data to {}", committer
instanceof AbstractS3ACommitter ? ((AbstractS3ACommitter) (committer)).getName() : committer.toString(), outputPath);
return committer;
} else {
LOG.warn("Using standard FileOutputCommitter to commit work." + " This is slow and potentially unsafe.");
return createFileOutputCommitter(outputPath, context);
}
} | 3.26 |
hadoop_BlockStorageMovementCommand_getBlockPoolId_rdh | /**
* Returns block pool ID.
*/
public String getBlockPoolId() {
return blockPoolId;
} | 3.26 |
hadoop_BlockStorageMovementCommand_getBlockMovingTasks_rdh | /**
* Returns the list of blocks to be moved.
*/
public Collection<BlockMovingInfo> getBlockMovingTasks() {
return blockMovingTasks;
} | 3.26 |
hadoop_BinaryRecordOutput_get_rdh | /**
* Get a thread-local record output for the supplied DataOutput.
*
* @param out
* data output stream
* @return binary record output corresponding to the supplied DataOutput.
*/
public static BinaryRecordOutput get(DataOutput out) {
BinaryRecordOutput bout = B_OUT.get();
bout.m0(out);
return bout;
} | 3.26 |
hadoop_RateLimitingFactory_create_rdh | /**
* Create an instance.
* If the rate is 0; return the unlimited rate.
*
* @param capacity
* capacity in permits/second.
* @return limiter restricted to the given capacity.
*/
public static RateLimiting create(int capacity) {
return capacity == 0 ? unlimitedRate() : new RestrictedRateLimiting(capacity);
} | 3.26 |
hadoop_RateLimitingFactory_unlimitedRate_rdh | /**
* Get the unlimited rate.
*
* @return a rate limiter which always has capacity.
*/
public static RateLimiting unlimitedRate() {
return UNLIMITED;
} | 3.26 |
hadoop_CommitUtils_getS3AFileSystem_rdh | /**
* Get the S3A FS of a path.
*
* @param path
* path to examine
* @param conf
* config
* @param magicCommitRequired
* is magic complete required in the FS?
* @return the filesystem
* @throws PathCommitException
* output path isn't to an S3A FS instance, or
* if {@code magicCommitRequired} is set, if doesn't support these commits.
* @throws IOException
* failure to instantiate the FS
*/
public static S3AFileSystem getS3AFileSystem(Path path, Configuration conf, boolean magicCommitRequired) throws PathCommitException, IOException {
S3AFileSystem s3AFS = verifyIsS3AFS(path.getFileSystem(conf), path);
if (magicCommitRequired) {
verifyIsMagicCommitFS(s3AFS);
}
return s3AFS;
} | 3.26 |
hadoop_CommitUtils_extractJobID_rdh | /**
* Extract the job ID from a configuration.
*
* @param conf
* configuration
* @return a job ID or null.
*/
public static String extractJobID(Configuration conf) {
String jobUUID = conf.getTrimmed(FS_S3A_COMMITTER_UUID, "");
if (!jobUUID.isEmpty()) {return jobUUID;
}
// there is no job UUID.
// look for one from spark
jobUUID = conf.getTrimmed(SPARK_WRITE_UUID, "");
if (!jobUUID.isEmpty()) {
return jobUUID;
}
jobUUID = conf.getTrimmed(MR_JOB_ID, "");
if (!jobUUID.isEmpty()) {
return jobUUID;
}
return null;
} | 3.26 |
hadoop_CommitUtils_verifyIsMagicCommitPath_rdh | /**
* Verify that the path is a magic one.
*
* @param fs
* filesystem
* @param path
* path
* @throws PathCommitException
* if the path isn't a magic commit path
*/
public static void verifyIsMagicCommitPath(S3AFileSystem fs, Path path) throws PathCommitException {
verifyIsMagicCommitFS(fs);
if (!fs.isMagicCommitPath(path)) {
throw new PathCommitException(path,
E_BAD_PATH);
}
} | 3.26 |
hadoop_CommitUtils_verifyIsMagicCommitFS_rdh | /**
* Verify that an S3A FS instance is a magic commit FS.
*
* @param fs
* filesystem
* @throws PathCommitException
* if the FS isn't a magic commit FS.
*/
public static void verifyIsMagicCommitFS(S3AFileSystem fs) throws PathCommitException {
if (!fs.isMagicCommitEnabled()) {
// dump out details to console for support diagnostics
String fsUri = fs.getUri().toString();
LOG.error("{}: {}:\n{}", E_NORMAL_FS, fsUri, fs);
// then fail
throw new PathCommitException(fsUri, E_NORMAL_FS);}
} | 3.26 |
hadoop_CommitUtils_validateCollectionClass_rdh | /**
* Verify that all instances in a collection are of the given class.
*
* @param it
* iterator
* @param classname
* classname to require
* @throws ValidationFailure
* on a failure
*/
public static void validateCollectionClass(Iterable it, Class classname) throws ValidationFailure {
for (Object o : it) {
verify(o.getClass().equals(classname), "Collection element is not a %s: %s", classname, o.getClass());
}
} | 3.26 |
hadoop_CommitUtils_verifyIsS3AFS_rdh | /**
* Verify that an FS is an S3A FS.
*
* @param fs
* filesystem
* @param path
* path to to use in exception
* @return the typecast FS.
* @throws PathCommitException
* if the FS is not an S3A FS.
*/
public static S3AFileSystem verifyIsS3AFS(FileSystem fs, Path path) throws PathCommitException {
if (!(fs instanceof S3AFileSystem)) {
throw new PathCommitException(path, E_WRONG_FS);
}
return ((S3AFileSystem) (fs));
} | 3.26 |
hadoop_FSStarvedApps_take_rdh | /**
* Blocking call to fetch the next app to process. The returned app is
* tracked until the next call to this method. This tracking assumes a
* single reader.
*
* @return starved application to process
* @throws InterruptedException
* if interrupted while waiting
*/
FSAppAttempt take() throws InterruptedException {
// Reset appBeingProcessed before the blocking call
appBeingProcessed = null;
// Blocking call to fetch the next starved application
FSAppAttempt app = appsToProcess.take();
appBeingProcessed = app;
return app;
} | 3.26 |
hadoop_FSStarvedApps_addStarvedApp_rdh | /**
* Add a starved application if it is not already added.
*
* @param app
* application to add
*/
void addStarvedApp(FSAppAttempt app) {
if ((!app.equals(appBeingProcessed)) && (!appsToProcess.contains(app))) {
appsToProcess.add(app);
}
} | 3.26 |
hadoop_TextSplitter_bigDecimalToString_rdh | /**
* Return the string encoded in a BigDecimal.
* Repeatedly multiply the input value by 65536; the integer portion after such a multiplication
* represents a single character in base 65536. Convert that back into a char and create a
* string out of these until we have no data left.
*/
String bigDecimalToString(BigDecimal bd) {
BigDecimal cur = bd.stripTrailingZeros();
StringBuilder sb = new StringBuilder();
for (int numConverted = 0; numConverted < MAX_CHARS; numConverted++) {
cur = cur.multiply(ONE_PLACE);
int curCodePoint = cur.intValue();
if (0 == curCodePoint) {
break;
}
cur = cur.subtract(new BigDecimal(curCodePoint));
sb.append(Character.toChars(curCodePoint));
}
return sb.toString();
} | 3.26 |
hadoop_TextSplitter_stringToBigDecimal_rdh | /**
* Return a BigDecimal representation of string 'str' suitable for use
* in a numerically-sorting order.
*/
BigDecimal stringToBigDecimal(String str) {
BigDecimal
result = BigDecimal.ZERO;
BigDecimal curPlace = ONE_PLACE;// start with 1/65536 to compute the first digit.
int len = Math.min(str.length(), MAX_CHARS);
for (int i = 0; i < len; i++) {
int codePoint = str.codePointAt(i);
result = result.add(tryDivide(new BigDecimal(codePoint), curPlace));
// advance to the next less significant place. e.g., 1/(65536^2) for the second char.
curPlace = curPlace.multiply(ONE_PLACE);
}
return result;
} | 3.26 |
hadoop_TextSplitter_split_rdh | /**
* This method needs to determine the splits between two user-provided strings.
* In the case where the user's strings are 'A' and 'Z', this is not hard; we
* could create two splits from ['A', 'M') and ['M', 'Z'], 26 splits for strings
* beginning with each letter, etc.
*
* If a user has provided us with the strings "Ham" and "Haze", however, we need
* to create splits that differ in the third letter.
*
* The algorithm used is as follows:
* Since there are 2**16 unicode characters, we interpret characters as digits in
* base 65536. Given a string 's' containing characters s_0, s_1 .. s_n, we interpret
* the string as the number: 0.s_0 s_1 s_2.. s_n in base 65536. Having mapped the
* low and high strings into floating-point values, we then use the BigDecimalSplitter
* to establish the even split points, then map the resulting floating point values
* back into strings.
*/
public List<InputSplit> split(Configuration conf, ResultSet results, String colName) throws SQLException {LOG.warn("Generating splits for a textual index column.");
LOG.warn("If your database sorts in a case-insensitive order, " + "this may result in a partial import or duplicate records.");
LOG.warn("You are strongly encouraged to choose an integral split column.");
String minString = results.getString(1);
String maxString = results.getString(2);
boolean minIsNull = false;
// If the min value is null, switch it to an empty string instead for purposes
// of interpolation. Then add [null, null] as a special case split.
if (null == minString) {
minString = "";
minIsNull = true;
}
if (null == maxString) {
// If the max string is null, then the min string has to be null too.
// Just return a special split for this case.
List<InputSplit> splits = new ArrayList<InputSplit>();
splits.add(new DataDrivenDBInputFormat.DataDrivenDBInputSplit(colName + " IS NULL", colName + " IS NULL"));
return splits;
}
// Use this as a hint. May need an extra task if the size doesn't
// divide cleanly.
int numSplits = conf.getInt(MRJobConfig.NUM_MAPS, 1); String lowClausePrefix = colName +
" >= '";
String highClausePrefix = colName + " < '";
// If there is a common prefix between minString and maxString, establish it
// and pull it out of minString and maxString.
int maxPrefixLen = Math.min(minString.length(), maxString.length());
int sharedLen;
for (sharedLen = 0; sharedLen < maxPrefixLen; sharedLen++) {
char c1 = minString.charAt(sharedLen);
char c2 = maxString.charAt(sharedLen);
if (c1 != c2) {
break;
}
}
// The common prefix has length 'sharedLen'. Extract it from both.
String commonPrefix = minString.substring(0, sharedLen);minString = minString.substring(sharedLen);
maxString =
maxString.substring(sharedLen);
List<String> splitStrings = split(numSplits, minString, maxString, commonPrefix);
List<InputSplit> splits =
new ArrayList<InputSplit>();
// Convert the list of split point strings into an actual set of InputSplits.
String start = splitStrings.get(0);
for (int i = 1; i < splitStrings.size();
i++) {
String end = splitStrings.get(i);
if (i == (splitStrings.size() - 1)) {
// This is the last one; use a closed interval.
splits.add(new DataDrivenDBInputFormat.DataDrivenDBInputSplit((lowClausePrefix + start) + "'", ((colName + " <= '") + end) + "'"));
} else {
// Normal open-interval case.
splits.add(new DataDrivenDBInputFormat.DataDrivenDBInputSplit((lowClausePrefix + start) + "'", (highClausePrefix +
end) + "'"));
}
}
if (minIsNull) {
// Add the special null split at the end.
splits.add(new DataDrivenDBInputFormat.DataDrivenDBInputSplit(colName + " IS NULL", colName + " IS NULL"));
}
return splits;
} | 3.26 |
hadoop_ReplicaAccessor_getNetworkDistance_rdh | /**
* Return the network distance between local machine and the remote machine.
*/
public int getNetworkDistance() {
return isLocal() ? 0 : Integer.MAX_VALUE;
} | 3.26 |
hadoop_DockerContainerDeletionTask_getContainerId_rdh | /**
* Get the id of the container to delete.
*
* @return the id of the container to delete.
*/
public String getContainerId() {
return
containerId;
} | 3.26 |
hadoop_DockerContainerDeletionTask_run_rdh | /**
* Delete the specified Docker container.
*/
@Overridepublic void run() {
LOG.debug("Running DeletionTask : {}", this);
LinuxContainerExecutor exec = ((LinuxContainerExecutor) (getDeletionService().getContainerExecutor()));
exec.removeDockerContainer(containerId);
} | 3.26 |
hadoop_DockerContainerDeletionTask_toString_rdh | /**
* Convert the DockerContainerDeletionTask to a String representation.
*
* @return String representation of the DockerContainerDeletionTask.
*/
@Override
public String toString() {
StringBuffer sb = new StringBuffer("DockerContainerDeletionTask : ");sb.append(" id : ").append(this.getTaskId());
sb.append(" containerId : ").append(this.containerId);
return sb.toString().trim();
} | 3.26 |
hadoop_MappableBlockLoader_verifyChecksum_rdh | /**
* Verifies the block's checksum. This is an I/O intensive operation.
*/
protected void verifyChecksum(long length, FileInputStream metaIn, FileChannel blockChannel, String blockFileName) throws IOException {
// Verify the checksum from the block's meta file
// Get the DataChecksum from the meta file header
BlockMetadataHeader header = BlockMetadataHeader.readHeader(new DataInputStream(new BufferedInputStream(metaIn, BlockMetadataHeader.getHeaderSize())));
try (FileChannel metaChannel = metaIn.getChannel()) {
if (metaChannel == null) {
throw new IOException("Block InputStream meta file has no FileChannel.");
}
DataChecksum checksum = header.getChecksum();
final int bytesPerChecksum = checksum.getBytesPerChecksum();
final int checksumSize = checksum.getChecksumSize();
final int numChunks = ((8 * 1024) * 1024) / bytesPerChecksum;
ByteBuffer blockBuf = ByteBuffer.allocate(numChunks * bytesPerChecksum);
ByteBuffer checksumBuf = ByteBuffer.allocate(numChunks * checksumSize);
// Verify the checksum
int bytesVerified = 0;
while (bytesVerified < length) {
Preconditions.checkState((bytesVerified % bytesPerChecksum) == 0, "Unexpected partial chunk before EOF");
assert (bytesVerified % bytesPerChecksum) == 0;
int bytesRead = fillBuffer(blockChannel, blockBuf);
if (bytesRead == (-1)) {
throw new IOException("checksum verification failed: premature EOF");
}
blockBuf.flip();
// Number of read chunks, including partial chunk at end
int chunks = ((bytesRead + bytesPerChecksum) - 1) / bytesPerChecksum;
checksumBuf.limit(chunks * checksumSize);
fillBuffer(metaChannel, checksumBuf);
checksumBuf.flip();
checksum.verifyChunkedSums(blockBuf, checksumBuf, blockFileName, bytesVerified);
// Success
bytesVerified += bytesRead;
blockBuf.clear();
checksumBuf.clear();
}
}
} | 3.26 |
hadoop_MappableBlockLoader_fillBuffer_rdh | /**
* Reads bytes into a buffer until EOF or the buffer's limit is reached.
*/
protected int fillBuffer(FileChannel channel, ByteBuffer buf) throws IOException {int bytesRead = channel.read(buf);
if (bytesRead < 0) {
// EOF
return bytesRead;
}
while (buf.remaining() > 0) {
int n = channel.read(buf);
if (n < 0) {
// EOF
return bytesRead;
}
bytesRead += n;
}
return bytesRead;
} | 3.26 |
hadoop_MappableBlockLoader_shutdown_rdh | /**
* Clean up cache, can be used during DataNode shutdown.
*/
void shutdown() {
// Do nothing.
} | 3.26 |
hadoop_StreamUtil_m0_rdh | /**
* It may seem strange to silently switch behaviour when a String
* is not a classname; the reason is simplified Usage:<pre>
* -mapper [classname | program ]
* instead of the explicit Usage:
* [-mapper program | -javamapper classname], -mapper and -javamapper are mutually exclusive.
* (repeat for -reducer, -combiner) </pre>
*/
public static Class m0(Configuration conf, String className, String defaultPackage) {
Class clazz = null;
try {
clazz = conf.getClassByName(className);
} catch (ClassNotFoundException cnf) {
}if (clazz == null) {
if ((className.indexOf('.') == (-1)) && (defaultPackage != null)) {
className = (defaultPackage + ".") + className;
try {
clazz = conf.getClassByName(className);
} catch (ClassNotFoundException cnf) {
}
}
}
return clazz;
} | 3.26 |
hadoop_StreamUtil_findInClasspath_rdh | /**
*
* @return a jar file path or a base directory or null if not found.
*/
public static String findInClasspath(String className, ClassLoader loader) {
String relPath = className;
relPath = relPath.replace('.', '/');
relPath += ".class";
URL classUrl = loader.getResource(relPath);
String codePath;
if (classUrl != null) {
boolean inJar = classUrl.getProtocol().equals("jar");
codePath = classUrl.toString();
if (codePath.startsWith("jar:")) {
codePath = codePath.substring("jar:".length());
}
if (codePath.startsWith("file:")) {
// can have both
codePath = codePath.substring("file:".length());
}
if (inJar) {
// A jar spec: remove class suffix in /path/my.jar!/package/Class
int bang = codePath.lastIndexOf('!');
codePath = codePath.substring(0, bang);
} else {
// A class spec: remove the /my/package/Class.class portion
int pos = codePath.lastIndexOf(relPath);
if (pos
== (-1)) {
throw new IllegalArgumentException((("invalid codePath: className=" + className) + " codePath=") + codePath);
}
codePath = codePath.substring(0, pos);
}
}
else {
codePath = null;
}
return codePath;
} | 3.26 |
hadoop_Service_getValue_rdh | /**
* Get the integer value of a state
*
* @return the numeric value of the state
*/
public int getValue() {return value;
} | 3.26 |
hadoop_Service_toString_rdh | /**
* Get the name of a state
*
* @return the state's name
*/
@Override
public String toString() {
return statename;
} | 3.26 |
hadoop_AbfsClientThrottlingAnalyzer_addBytesTransferred_rdh | /**
* Updates metrics with results from the current storage operation.
*
* @param count
* The count of bytes transferred.
* @param isFailedOperation
* True if the operation failed; otherwise false.
*/
public void addBytesTransferred(long count, boolean isFailedOperation) {
AbfsOperationMetrics metrics = blobMetrics.get();
if (isFailedOperation) {metrics.addBytesFailed(count);
metrics.incrementOperationsFailed();
} else {
metrics.addBytesSuccessful(count);
metrics.incrementOperationsSuccessful();
}
blobMetrics.set(metrics);
} | 3.26 |
hadoop_AbfsClientThrottlingAnalyzer_timerOrchestrator_rdh | /**
* Synchronized method to suspend or resume timer.
*
* @param timerFunctionality
* resume or suspend.
* @param timerTask
* The timertask object.
* @return true or false.
*/
private synchronized boolean timerOrchestrator(TimerFunctionality timerFunctionality, TimerTask timerTask) {
switch (timerFunctionality) {
case RESUME :
if (isOperationOnAccountIdle.get()) {
resumeTimer();
}
break;
case SUSPEND :
if (accountLevelThrottlingEnabled && ((System.currentTimeMillis() - lastExecutionTime.get()) >= getOperationIdleTimeout())) {
isOperationOnAccountIdle.set(true);
timerTask.cancel();
timer.purge();
return true;
}
break;
default :break;
}
return false;
} | 3.26 |
hadoop_AbfsClientThrottlingAnalyzer_resumeTimer_rdh | /**
* Resumes the timer if it was stopped.
*/
private void resumeTimer() {
blobMetrics = new
AtomicReference<AbfsOperationMetrics>(new AbfsOperationMetrics(System.currentTimeMillis()));
timer.schedule(new TimerTaskImpl(), analysisPeriodMs, analysisPeriodMs);
isOperationOnAccountIdle.set(false);
} | 3.26 |
hadoop_AbfsClientThrottlingAnalyzer_run_rdh | /**
* Periodically analyzes a snapshot of the blob storage metrics and updates
* the sleepDuration in order to appropriately throttle storage operations.
*/
@Override
public void run() {
boolean doWork = false;
try {
doWork =
doingWork.compareAndSet(0, 1);
// prevent concurrent execution of this task
if (!doWork) {
return;
}
long now = System.currentTimeMillis();
if (timerOrchestrator(TimerFunctionality.SUSPEND, this)) {
return;}
if ((now - blobMetrics.get().getStartTime()) >= analysisPeriodMs) {
AbfsOperationMetrics oldMetrics = blobMetrics.getAndSet(new AbfsOperationMetrics(now));
oldMetrics.setEndTime(now);
sleepDuration = analyzeMetricsAndUpdateSleepDuration(oldMetrics, sleepDuration);
}
} finally {
if (doWork) {
doingWork.set(0);
}
}
} | 3.26 |
hadoop_JobACLsManager_constructJobACLs_rdh | /**
* Construct the jobACLs from the configuration so that they can be kept in
* the memory. If authorization is disabled on the JT, nothing is constructed
* and an empty map is returned.
*
* @return JobACL to AccessControlList map.
*/
public Map<JobACL, AccessControlList> constructJobACLs(Configuration conf) {
Map<JobACL, AccessControlList> acls = new HashMap<JobACL, AccessControlList>();
// Don't construct anything if authorization is disabled.
if (!areACLsEnabled()) {
return acls;
}
for (JobACL aclName : JobACL.values()) {
String aclConfigName = aclName.getAclName();
String aclConfigured = conf.get(aclConfigName);
if (aclConfigured == null) {
// If ACLs are not configured at all, we grant no access to anyone. So
// jobOwner and cluster administrator _only_ can do 'stuff'
aclConfigured = " ";
}
acls.put(aclName, new AccessControlList(aclConfigured));
}
return acls;} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.