name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hadoop_AbfsInputStream_getStreamStatistics_rdh | /**
* Getter for AbfsInputStreamStatistics.
*
* @return an instance of AbfsInputStreamStatistics.
*/
@VisibleForTesting
public AbfsInputStreamStatistics getStreamStatistics() {
return streamStatistics;
} | 3.26 |
hadoop_AbfsInputStream_seek_rdh | /**
* Seek to given position in stream.
*
* @param n
* position to seek to
* @throws IOException
* if there is an error
* @throws EOFException
* if attempting to seek past end of file
*/
@Override
public synchronized void seek(long n) throws IOException {
LOG.debug("requested seek to position {}", n);
if (closed) {
throw new IOException(FSExceptionMessages.STREAM_IS_CLOSED);
}
if (n < 0) {
throw new EOFException(FSExceptionMessages.NEGATIVE_SEEK);
}
if (n > contentLength) {
throw new EOFException(FSExceptionMessages.CANNOT_SEEK_PAST_EOF);
}
if (streamStatistics != null) {
streamStatistics.seek(n, fCursor);
}
// next read will read from here
nextReadPos = n;
LOG.debug("set nextReadPos to {}", nextReadPos);
} | 3.26 |
hadoop_AbfsInputStream_mark_rdh | /**
* Not supported by this stream. Throws {@link UnsupportedOperationException}
*
* @param readlimit
* ignored
*/
@Override
public synchronized void mark(int readlimit) {
throw new UnsupportedOperationException("mark()/reset() not supported on this stream");
} | 3.26 |
hadoop_AbfsInputStream_reset_rdh | /**
* Not supported by this stream. Throws {@link UnsupportedOperationException}
*/
@Override
public synchronized void reset() throws IOException {
throw new UnsupportedOperationException("mark()/reset() not supported on this stream");
} | 3.26 |
hadoop_AbfsInputStream_incrementReadOps_rdh | /**
* Increment Read Operations.
*/
private void incrementReadOps() {
if (statistics != null) {
statistics.incrementReadOps(1);
}
} | 3.26 |
hadoop_AbfsInputStream_toString_rdh | /**
* Get the statistics of the stream.
*
* @return a string value.
*/ @Override
public String toString() {
final StringBuilder sb = new StringBuilder(super.toString());
sb.append("AbfsInputStream@(").append(this.hashCode()).append("){");
sb.append(("[" + CAPABILITY_SAFE_READAHEAD) + "]");
if (streamStatistics != null) {
sb.append(", ").append(streamStatistics);
}
sb.append("}");
return sb.toString();
} | 3.26 |
hadoop_AbfsInputStream_getBytesFromReadAhead_rdh | /**
* Getter for bytes read from readAhead buffer that fills asynchronously.
*
* @return value of the counter in long.
*/
@VisibleForTesting
public long getBytesFromReadAhead() {
return bytesFromReadAhead;
} | 3.26 |
hadoop_AbfsInputStream_available_rdh | /**
* Return the size of the remaining available bytes
* if the size is less than or equal to {@link Integer#MAX_VALUE},
* otherwise, return {@link Integer#MAX_VALUE}.
*
* This is to match the behavior of DFSInputStream.available(),
* which some clients may rely on (HBase write-ahead log reading in
* particular).
*/
@Override
public synchronized int available() throws IOException {
if (closed) {
throw new IOException(FSExceptionMessages.STREAM_IS_CLOSED);
}
final long remaining =
this.contentLength - this.getPos();
return remaining <= Integer.MAX_VALUE ? ((int) (remaining)) : Integer.MAX_VALUE;
} | 3.26 |
hadoop_AbfsInputStream_getPos_rdh | /**
* Return the current offset from the start of the file
*
* @throws IOException
* throws {@link IOException} if there is an error
*/
@Override
public synchronized long getPos() throws IOException {
if (closed) {
throw new IOException(FSExceptionMessages.STREAM_IS_CLOSED);}
return nextReadPos < 0
? 0 : nextReadPos;
} | 3.26 |
hadoop_AbfsInputStream_seekToNewSource_rdh | /**
* Seeks a different copy of the data. Returns true if
* found a new source, false otherwise.
*
* @throws IOException
* throws {@link IOException} if there is an error
*/
@Override
public boolean seekToNewSource(long l) throws IOException {
return false;
} | 3.26 |
hadoop_AbstractDNSToSwitchMapping_isSingleSwitch_rdh | /**
* Predicate that indicates that the switch mapping is known to be
* single-switch. The base class returns false: it assumes all mappings are
* multi-rack. Subclasses may override this with methods that are more aware
* of their topologies.
*
* <p>
*
* This method is used when parts of Hadoop need know whether to apply
* single rack vs multi-rack policies, such as during block placement.
* Such algorithms behave differently if they are on multi-switch systems.
* </p>
*
* @return true if the mapping thinks that it is on a single switch
*/
public boolean isSingleSwitch() {
return false;
} | 3.26 |
hadoop_AbstractDNSToSwitchMapping_getSwitchMap_rdh | /**
* Get a copy of the map (for diagnostics)
*
* @return a clone of the map or null for none known
*/
public Map<String, String> getSwitchMap() {
return null;
} | 3.26 |
hadoop_PeriodicRLESparseResourceAllocation_removeInterval_rdh | /**
* Removes a resource for the specified interval.
*
* @param interval
* the {@link ReservationInterval} for which the resource is
* to be removed.
* @param resource
* the {@link Resource} to be removed.
* @return true if removal is successful, false otherwise
*/
public boolean removeInterval(ReservationInterval interval,
Resource resource) {
long startTime = interval.getStartTime();
long endTime = interval.getEndTime();
// If the resource to be subtracted is less than the minimum resource in
// the range, abort removal to avoid negative capacity.
// TODO revesit decrementing endTime
if (!Resources.fitsIn(resource, getMinimumCapacityInInterval(new ReservationInterval(startTime, endTime - 1)))) {
LOG.info("Request to remove more resources than what is available");
return false;
}
if (((startTime >= 0) && (endTime > startTime)) && (endTime <= timePeriod)) {
return super.removeInterval(interval, resource);
} else {
LOG.info("Interval extends beyond the end time " + timePeriod);
return false;
}
} | 3.26 |
hadoop_PeriodicRLESparseResourceAllocation_addInterval_rdh | /**
* Add resource for the specified interval. This function will be used by
* {@link InMemoryPlan} while placing reservations between 0 and timePeriod.
* The interval may include 0, but the end time must be strictly less than
* timePeriod.
*
* @param interval
* {@link ReservationInterval} to which the specified resource
* is to be added.
* @param resource
* {@link Resource} to be added to the interval specified.
* @return true if addition is successful, false otherwise
*/public boolean addInterval(ReservationInterval interval, Resource resource) {
long startTime = interval.getStartTime();
long endTime = interval.getEndTime(); if (((startTime >=
0) && (endTime > startTime)) && (endTime
<= timePeriod)) {
return super.addInterval(interval, resource);
} else {
LOG.info(((("Cannot set capacity beyond end time: " + timePeriod) + " was (") + interval.toString()) + ")");
return false;
}
} | 3.26 |
hadoop_PeriodicRLESparseResourceAllocation_getMaximumPeriodicCapacity_rdh | /**
* Get maximum capacity at periodic offsets from the specified time.
*
* @param tick
* UTC time base from which offsets are specified for finding the
* maximum capacity.
* @param period
* periodic offset at which capacities are evaluated.
* @return the maximum {@link Resource} across the specified time instants.
*/
public Resource getMaximumPeriodicCapacity(long tick, long period) {
Resource maxResource;
if (period < timePeriod) {
maxResource = super.getMaximumPeriodicCapacity(tick % timePeriod, period);
} else {
// if period is greater than the length of PeriodicRLESparseAllocation,
// only a single value exists in this interval.
maxResource = super.getCapacityAtTime(tick % timePeriod);
}
return maxResource;
} | 3.26 |
hadoop_PeriodicRLESparseResourceAllocation_getCapacityAtTime_rdh | /**
* Get capacity at time based on periodic repetition.
*
* @param tick
* UTC time for which the allocated {@link Resource} is queried.
* @return {@link Resource} allocated at specified time
*/
public Resource getCapacityAtTime(long tick) {
long convertedTime = tick % timePeriod;
return super.getCapacityAtTime(convertedTime);
} | 3.26 |
hadoop_PeriodicRLESparseResourceAllocation_m0_rdh | /**
* Get time period of PeriodicRLESparseResourceAllocation.
*
* @return timePeriod time period represented in ms.
*/
public long m0() {
return this.timePeriod;
} | 3.26 |
hadoop_ExecutionSummarizer_getTraceSignature_rdh | // Generates a signature for the trace file based on
// - filename
// - modification time
// - file length
// - owner
protected static String getTraceSignature(String input) throws IOException {
Path inputPath = new Path(input);
FileSystem fs = inputPath.getFileSystem(new Configuration());FileStatus status = fs.getFileStatus(inputPath);
Path qPath = fs.makeQualified(status.getPath());
String traceID = ((status.getModificationTime() + qPath.toString()) + status.getOwner()) + status.getLen();
return MD5Hash.digest(traceID).toString();
} | 3.26 |
hadoop_ExecutionSummarizer_stringifyDataStatistics_rdh | // Gets the stringified version of DataStatistics
static String stringifyDataStatistics(DataStatistics stats) {
if (stats != null) {
StringBuffer buffer
= new StringBuffer();
String compressionStatus = (stats.isDataCompressed()) ? "Compressed" : "Uncompressed";
buffer.append(compressionStatus).append(" input data size: ");
buffer.append(StringUtils.humanReadableInt(stats.getDataSize()));
buffer.append(", ");buffer.append("Number of files: ").append(stats.getNumFiles());
return
buffer.toString();
} else {
return Summarizer.NA;
}
} | 3.26 |
hadoop_ExecutionSummarizer_m0_rdh | // Getters
protected String m0() {
return expectedDataSize;
} | 3.26 |
hadoop_ExecutionSummarizer_toString_rdh | /**
* Summarizes the current {@link Gridmix} run.
*/
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("Execution Summary:-");
builder.append("\nInput trace: ").append(getInputTraceLocation());
builder.append("\nInput trace signature: ").append(getInputTraceSignature());
builder.append("\nTotal number of jobs in trace: ").append(getNumJobsInTrace());
builder.append("\nExpected input data size: ").append(m0());builder.append("\nInput data statistics: ").append(getInputDataStatistics());
builder.append("\nTotal number of jobs processed: ").append(getNumSubmittedJobs());
builder.append("\nTotal number of successful jobs: ").append(getNumSuccessfulJobs());
builder.append("\nTotal number of failed jobs: ").append(getNumFailedJobs());
builder.append("\nTotal number of lost jobs: ").append(getNumLostJobs());
builder.append("\nTotal number of map tasks launched: ").append(getNumMapTasksLaunched());
builder.append("\nTotal number of reduce task launched: ").append(getNumReduceTasksLaunched());
builder.append("\nGridmix start time: ").append(UTIL.format(getStartTime()));
builder.append("\nGridmix end time: ").append(UTIL.format(getEndTime()));
builder.append("\nGridmix simulation start time: ").append(UTIL.format(getStartTime()));
builder.append("\nGridmix runtime: ").append(StringUtils.formatTime(getRuntime()));
builder.append("\nTime spent in initialization (data-gen etc): ").append(StringUtils.formatTime(getInitTime()));
builder.append("\nTime spent in simulation: ").append(StringUtils.formatTime(getSimulationTime()));
builder.append("\nGridmix configuration parameters: ").append(getCommandLineArgsString());
builder.append("\nGridmix job submission policy: ").append(getJobSubmissionPolicy());
builder.append("\nGridmix resolver: ").append(getUserResolver()); builder.append("\n\n");
return builder.toString();
} | 3.26 |
hadoop_SocksSocketFactory_setProxy_rdh | /**
* Set the proxy of this socket factory as described in the string
* parameter
*
* @param proxyStr
* the proxy address using the format "host:port"
*/
private void setProxy(String proxyStr) {
String[] strs = proxyStr.split(":", 2);if (strs.length != 2) throw new RuntimeException("Bad SOCKS proxy parameter: " + proxyStr);
String host = strs[0];
int port = Integer.parseInt(strs[1]);
this.proxy = new Proxy(Proxy.Type.SOCKS, InetSocketAddress.createUnresolved(host, port));
} | 3.26 |
hadoop_RouterSafemodeService_enter_rdh | /**
* Enter safe mode.
*/
private void enter() {
LOG.info("Entering safe mode");
enterSafeModeTime = monotonicNow();safeMode = true;
router.updateRouterState(RouterServiceState.SAFEMODE);
} | 3.26 |
hadoop_RouterSafemodeService_leave_rdh | /**
* Leave safe mode.
*/
private void leave() {
// Cache recently updated, leave safemode
long timeInSafemode = monotonicNow() - enterSafeModeTime;
LOG.info("Leaving safe mode after {} milliseconds", timeInSafemode);
RouterMetrics routerMetrics = router.getRouterMetrics();
if (routerMetrics == null) {
LOG.error("The Router metrics are not enabled");
} else {
routerMetrics.setSafeModeTime(timeInSafemode);
}
safeMode = false;
router.updateRouterState(RouterServiceState.RUNNING);
} | 3.26 |
hadoop_RouterSafemodeService_isInSafeMode_rdh | /**
* Return whether the current Router is in safe mode.
*/
boolean isInSafeMode() {
return this.safeMode;
} | 3.26 |
hadoop_RouterSafemodeService_setManualSafeMode_rdh | /**
* Set the flag to indicate that the safe mode for this Router is set manually
* via the Router admin command.
*/
void setManualSafeMode(boolean mode) {
this.safeMode = mode;
this.isSafeModeSetManually = mode;
} | 3.26 |
hadoop_PutTracker_initialize_rdh | /**
* Startup event.
*
* @return true if the multipart should start immediately.
* @throws IOException
* any IO problem.
*/
public boolean initialize() throws IOException {
return false;
} | 3.26 |
hadoop_PutTracker_getDestKey_rdh | /**
* get the destination key. The default implementation returns the
* key passed in: there is no adjustment of the destination.
*
* @return the destination to use in PUT requests.
*/
public String getDestKey() {
return destKey;
} | 3.26 |
hadoop_PutTracker_m0_rdh | /**
* Flag to indicate that output is not immediately visible after the stream
* is closed. Default: false.
*
* @return true if the output's visibility will be delayed.
*/
public boolean m0() {
return true;
} | 3.26 |
hadoop_ClientRegistryBinder_homePathForUser_rdh | /**
* Buld the user path -switches to the system path if the user is "".
* It also cross-converts the username to ascii via punycode
*
* @param username
* username or ""
* @return the path to the user
*/
public static String homePathForUser(String username) {
Preconditions.checkArgument(username != null, "null user");
// catch recursion
if (username.startsWith(RegistryConstants.PATH_USERS)) {
return username;
}
if (username.isEmpty()) {
return RegistryConstants.PATH_SYSTEM_SERVICES;
} // convert username to registry name
String convertedName = convertUsername(username);
return RegistryPathUtils.join(RegistryConstants.PATH_USERS, encodeForRegistry(convertedName));
} | 3.26 |
hadoop_ClientRegistryBinder_getEndpoint_rdh | /**
* Get an endpont by API
*
* @param record
* service record
* @param api
* API
* @param external
* flag to indicate this is an external record
* @return the endpoint or null
*/
public static Endpoint getEndpoint(ServiceRecord record, String api, boolean external) {
return external ? record.getExternalEndpoint(api) : record.getInternalEndpoint(api);
} | 3.26 |
hadoop_ClientRegistryBinder_qualifyUser_rdh | /**
* Qualify a user.
* <ol>
* <li> <code>"~"</code> maps to user home path home</li>
* <li> <code>"~user"</code> maps to <code>/users/$user</code></li>
* <li> <code>"/"</code> maps to <code>/services/</code></li>
* </ol>
*
* @param user
* the username
* @return the base path
*/
public static String qualifyUser(String user) {
// qualify the user
String t = user.trim();
if (t.startsWith("/")) {
// already resolved
return t;
} else if (t.equals("~")) {
// self
return currentUsernameUnencoded();
} else if (t.startsWith("~")) {
// another user
// convert username to registry name
String convertedName = convertUsername(t.substring(1));
return RegistryPathUtils.join(RegistryConstants.PATH_USERS, encodeForRegistry(convertedName));
}
else {return "/" + t;
}
} | 3.26 |
hadoop_ClientRegistryBinder_m0_rdh | /**
* Look up an external REST API
*
* @param user
* user which will be qualified as per {@link #qualifyUser(String)}
* @param serviceClass
* service class
* @param instance
* instance name
* @param api
* API
* @return the API, or an exception is raised.
* @throws IOException
*/
public String m0(String user, String serviceClass, String instance, String api) throws IOException {
String qualified = qualifyUser(user);
String path = servicePath(qualified, serviceClass, instance);
String restAPI = resolveExternalRestAPI(api, path);
if (restAPI == null) {
throw new PathNotFoundException((path + " API ") + api);
}
return restAPI;
} | 3.26 |
hadoop_IdentityTransformer_transformAclEntriesForSetRequest_rdh | /**
* Perform Identity transformation when calling setAcl(),removeAclEntries() and modifyAclEntries()
* If the AclEntry type is a user or group, and its name is one of the following:
* 1.short name; 2.$superuser; 3.Fully qualified name; 4. principal id.
* <pre>
* Short name could be transformed to:
* - A service principal id or $superuser, if short name belongs a daemon service
* stated in substitution list AND "fs.azure.identity.transformer.service.principal.id"
* is set with $superuser or a principal id.
* - A fully qualified name, if the AclEntry type is User AND if "fs.azure.identity.transformer.domain.name"
* is set in configuration. This is to make the behavior consistent with HDI.
*
* $superuser, fully qualified name and principal id should not be transformed.
* </pre>
*
* @param aclEntries
* list of AclEntry
*/
public void transformAclEntriesForSetRequest(final List<AclEntry> aclEntries) {
if (skipUserIdentityReplacement) {
return;
}
for (int i = 0;
i < aclEntries.size(); i++) {AclEntry aclEntry =
aclEntries.get(i);
String name = aclEntry.getName();
String transformedName = name;
if ((((name == null) || name.isEmpty()) || aclEntry.getType().equals(AclEntryType.OTHER)) || aclEntry.getType().equals(AclEntryType.MASK)) {
continue;
}
// case 1: when the user or group name to be set is stated in substitution list.
if (isInSubstitutionList(name)) {
transformedName = servicePrincipalId;
} else if (aclEntry.getType().equals(AclEntryType.USER)// case 2: when the owner is a short name
&& shouldUseFullyQualifiedUserName(name)) {
// of the user principal name (UPN).
// Notice: for group type ACL entry, if name is shortName.
// It won't be converted to Full Name. This is
// to make the behavior consistent with HDI.
transformedName = getFullyQualifiedName(name);
}
// Avoid unnecessary new AclEntry allocation
if (transformedName.equals(name)) {
continue;
}
AclEntry.Builder aclEntryBuilder = new AclEntry.Builder();
aclEntryBuilder.setType(aclEntry.getType());
aclEntryBuilder.setName(transformedName);
aclEntryBuilder.setScope(aclEntry.getScope());
aclEntryBuilder.setPermission(aclEntry.getPermission());
// Replace the original AclEntry
aclEntries.set(i, aclEntryBuilder.build());
}
} | 3.26 |
hadoop_IdentityTransformer_isShortUserName_rdh | /**
* Internal method to identify if owner name returned by the ADLS backend is short name or not.
* If name contains "@", this code assumes that whatever comes after '@' is domain name and ignores it.
*
* @param owner
* @return */
private boolean isShortUserName(String owner) {
return (owner != null) && (!owner.contains(AT));
} | 3.26 |
hadoop_IdentityTransformer_transformAclEntriesForGetRequest_rdh | /**
* Perform Identity transformation when calling GetAclStatus()
* If the AclEntry type is a user or group, and its name is one of the following:
* <pre>
* 1. $superuser:
* by default it will be transformed to local user/group, this can be disabled by setting
* "fs.azure.identity.transformer.skip.superuser.replacement" to true.
*
* 2. User principal id:
* can be transformed to localUser/localGroup, if this principal id matches the principal id set in
* "fs.azure.identity.transformer.service.principal.id" and localIdentity is stated in
* "fs.azure.identity.transformer.service.principal.substitution.list"
*
* 3. User principal name (UPN):
* can be transformed to a short name(local identity) if originalIdentity is owner name, and
* "fs.azure.identity.transformer.enable.short.name" is enabled.
* </pre>
*
* @param aclEntries
* list of AclEntry
* @param localUser
* local user name
* @param localGroup
* local primary group
*/
public void transformAclEntriesForGetRequest(final List<AclEntry> aclEntries, String localUser, String localGroup) throws IOException {
if (skipUserIdentityReplacement) {
return;
}for (int i
= 0; i < aclEntries.size(); i++) {
AclEntry aclEntry = aclEntries.get(i);
String name = aclEntry.getName();
String transformedName = name;
if ((((name == null) || name.isEmpty()) || aclEntry.getType().equals(AclEntryType.OTHER)) || aclEntry.getType().equals(AclEntryType.MASK)) {
continue;
}
// when type of aclEntry is user or group
if (aclEntry.getType().equals(AclEntryType.USER)) {
transformedName = transformIdentityForGetRequest(name, true, localUser);
} else if (aclEntry.getType().equals(AclEntryType.GROUP)) {
transformedName = transformIdentityForGetRequest(name, false, localGroup);
}
// Avoid unnecessary new AclEntry allocation
if (transformedName.equals(name)) {
continue;
}AclEntry.Builder aclEntryBuilder = new AclEntry.Builder();
aclEntryBuilder.setType(aclEntry.getType());
aclEntryBuilder.setName(transformedName);
aclEntryBuilder.setScope(aclEntry.getScope());
aclEntryBuilder.setPermission(aclEntry.getPermission());
// Replace the original AclEntry
aclEntries.set(i, aclEntryBuilder.build());
}
} | 3.26 |
hadoop_IdentityTransformer_transformIdentityForGetRequest_rdh | /**
* Perform identity transformation for the Get request results in AzureBlobFileSystemStore:
* getFileStatus(), listStatus(), getAclStatus().
* Input originalIdentity can be one of the following:
* <pre>
* 1. $superuser:
* by default it will be transformed to local user/group, this can be disabled by setting
* "fs.azure.identity.transformer.skip.superuser.replacement" to true.
*
* 2. User principal id:
* can be transformed to localIdentity, if this principal id matches the principal id set in
* "fs.azure.identity.transformer.service.principal.id" and localIdentity is stated in
* "fs.azure.identity.transformer.service.principal.substitution.list"
*
* 3. User principal name (UPN):
* can be transformed to a short name(localIdentity) if originalIdentity is owner name, and
* "fs.azure.identity.transformer.enable.short.name" is enabled.
* </pre>
*
* @param originalIdentity
* the original user or group in the get request results: FileStatus, AclStatus.
* @param isUserName
* indicate whether the input originalIdentity is an owner name or owning group name.
* @param localIdentity
* the local user or group, should be parsed from UserGroupInformation.
* @return owner or group after transformation.
*/
public String transformIdentityForGetRequest(String originalIdentity, boolean isUserName, String localIdentity) throws IOException {
if (originalIdentity == null) {
originalIdentity = localIdentity;
// localIdentity might be a full name, so continue the transformation.
}
// case 1: it is $superuser and replace $superuser config is enabled
if ((!skipSuperUserReplacement) && SUPER_USER.equals(originalIdentity)) {
return localIdentity;
}
if (skipUserIdentityReplacement) {
return originalIdentity;
}
// case 2: original owner is principalId set in config, and localUser
// is a daemon service specified in substitution list,
// To avoid ownership check failure in job task, replace it
// to local daemon user/group
if (originalIdentity.equals(servicePrincipalId) && isInSubstitutionList(localIdentity)) {
return localIdentity;
}
// case 3: If original owner is a fully qualified name, and
// short name is enabled, replace with shortName.
if (isUserName && shouldUseShortUserName(originalIdentity)) {
return getShortName(originalIdentity);
}
return originalIdentity;
} | 3.26 |
hadoop_HsSingleCounterPage_m0_rdh | /**
* The content of this page is the CountersBlock now.
*
* @return CountersBlock.class
*/
@Override
protected Class<? extends SubView> m0() {
return SingleCounterBlock.class;} | 3.26 |
hadoop_HsSingleCounterPage_preHead_rdh | /* (non-Javadoc)
@see org.apache.hadoop.mapreduce.v2.hs.webapp.HsView#preHead(org.apache.hadoop.yarn.webapp.hamlet.Hamlet.HTML)
*/
@Override
protected void preHead(Page.HTML<__> html) {
commonPreHead(html);
setActiveNavColumnForTask();
set(DATATABLES_ID, "singleCounter");
set(initID(DATATABLES, "singleCounter"), counterTableInit());
setTableStyles(html, "singleCounter");
}
/**
*
* @return The end of a javascript map that is the jquery datatable
configuration for the jobs table. the Jobs table is assumed to be
rendered by the class returned from {@link #content()} | 3.26 |
hadoop_StorageStatisticsFromIOStatistics_toLongStatistic_rdh | /**
* Convert a counter/gauge entry to a long statistics.
*
* @param e
* entry
* @return statistic
*/
private LongStatistic toLongStatistic(final Map.Entry<String,
Long> e) {
return new LongStatistic(e.getKey(), e.getValue());
} | 3.26 |
hadoop_ErrorMetricUpdater_hook_rdh | /**
* Hooks a new listener to the given operationContext that will update the
* error metrics for the WASB file system appropriately in response to
* ResponseReceived events.
*
* @param operationContext
* The operationContext to hook.
* @param instrumentation
* The metrics source to update.
*/
public static void hook(OperationContext operationContext, AzureFileSystemInstrumentation instrumentation) {
ErrorMetricUpdater listener = new ErrorMetricUpdater(operationContext, instrumentation);
operationContext.getResponseReceivedEventHandler().addListener(listener);
} | 3.26 |
hadoop_KMSExceptionsProvider_toResponse_rdh | /**
* Maps different exceptions thrown by KMS to HTTP status codes.
*/
@Override
public Response toResponse(Exception exception) {
Response.Status status;
boolean doAudit
= true;
Throwable throwable = exception;
if (exception instanceof ContainerException) {
throwable = exception.getCause();
}
if (throwable instanceof SecurityException) {
status = Status.FORBIDDEN;
} else if (throwable instanceof AuthenticationException) {
status = Status.FORBIDDEN;
// we don't audit here because we did it already when checking access
doAudit = false;
} else if (throwable instanceof AuthorizationException) {
status = Status.FORBIDDEN;
// we don't audit here because we did it already when checking access
doAudit = false;
} else if (throwable instanceof AccessControlException) {
status = Status.FORBIDDEN;
} else if (exception instanceof IOException) {status = Status.INTERNAL_SERVER_ERROR;
log(status, throwable);
} else if (exception instanceof UnsupportedOperationException) {
status = Status.BAD_REQUEST;
} else if (exception instanceof IllegalArgumentException) {
status = Status.BAD_REQUEST;
} else {
status = Status.INTERNAL_SERVER_ERROR;
log(status, throwable);
}
if (doAudit)
{
KMSWebApp.getKMSAudit().error(KMSMDCFilter.getUgi(), KMSMDCFilter.getMethod(), KMSMDCFilter.getURL(), getOneLineMessage(exception));
}
EXCEPTION_LOG.warn("User {} request {} {} caused exception.", KMSMDCFilter.getUgi(), KMSMDCFilter.getMethod(), KMSMDCFilter.getURL(), exception);
return createResponse(status, throwable);
} | 3.26 |
hadoop_TaskInfo_getInputRecords_rdh | /**
*
* @return Number of records input to this task.
*/
public int getInputRecords() {
return recsIn;
} | 3.26 |
hadoop_TaskInfo_getTaskVCores_rdh | /**
*
* @return Vcores used by the task.
*/
public long getTaskVCores() {
return maxVcores;
} | 3.26 |
hadoop_TaskInfo_getResourceUsageMetrics_rdh | /**
*
* @return Resource usage metrics
*/
public ResourceUsageMetrics getResourceUsageMetrics() {
return metrics;
} | 3.26 |
hadoop_TaskInfo_getTaskMemory_rdh | /**
*
* @return Memory used by the task leq the heap size.
*/
public long getTaskMemory() {
return maxMemory;
} | 3.26 |
hadoop_TimelineReaderClient_createTimelineReaderClient_rdh | /**
* Create a new instance of Timeline Reader Client.
*
* @return instance of Timeline Reader Client.
*/
@InterfaceAudience.Public
public static TimelineReaderClient createTimelineReaderClient() {
return new TimelineReaderClientImpl();
} | 3.26 |
hadoop_RegexMountPointResolvedDstPathReplaceInterceptor_interceptRemainingPath_rdh | /**
* The interceptRemainingPath will just return the remainingPath passed in.
*/
@Override
public Path interceptRemainingPath(Path remainingPath) {
return remainingPath;
} | 3.26 |
hadoop_RegexMountPointResolvedDstPathReplaceInterceptor_interceptSource_rdh | /**
* Source won't be changed in the interceptor.
*
* @return source param string passed in.
*/
@Override
public String interceptSource(String source) {
return source;
} | 3.26 |
hadoop_RegexMountPointResolvedDstPathReplaceInterceptor_deserializeFromString_rdh | /**
* Create interceptor from config string. The string should be in
* replaceresolvedpath:wordToReplace:replaceString
* Note that we'll assume there's no ':' in the regex for the moment.
*
* @return Interceptor instance or null on bad config.
*/
public static RegexMountPointResolvedDstPathReplaceInterceptor deserializeFromString(String serializedString) {
String[] strings = serializedString.split(Character.toString(RegexMountPoint.INTERCEPTOR_INTERNAL_SEP));
// We'll assume there's no ':' in the regex for the moment.
if (strings.length != 3) {
return null;
}
// The format should be like replaceresolvedpath:wordToReplace:replaceString
return new RegexMountPointResolvedDstPathReplaceInterceptor(strings[1], strings[2]);
} | 3.26 |
hadoop_RegexMountPointResolvedDstPathReplaceInterceptor_m1_rdh | /**
* Intercept resolved path, e.g.
* Mount point /^(\\w+)/, ${1}.hadoop.net
* If incoming path is /user1/home/tmp/job1,
* then the resolved path str will be user1.
*
* @return intercepted string
*/
@Override
public String m1(String parsedDestPathStr) {
Matcher matcher = srcRegexPattern.matcher(parsedDestPathStr);
return matcher.replaceAll(replaceString);
} | 3.26 |
hadoop_MountTableProcedure_updateMountTableDestination_rdh | /**
* Update the destination of the mount point to target namespace and target
* path.
*
* @param mount
* the mount point.
* @param dstNs
* the target namespace.
* @param dstPath
* the target path
* @param conf
* the configuration of the router.
*/
private static void updateMountTableDestination(String mount, String dstNs, String dstPath, Configuration conf) throws IOException {
String address = conf.getTrimmed(RBFConfigKeys.DFS_ROUTER_ADMIN_ADDRESS_KEY, RBFConfigKeys.DFS_ROUTER_ADMIN_ADDRESS_DEFAULT);
InetSocketAddress routerSocket = NetUtils.createSocketAddr(address);
RouterClient rClient = new RouterClient(routerSocket, conf);
try {
MountTableManager mountTable = rClient.getMountTableManager();
MountTable originalEntry = getMountEntry(mount, mountTable);
if (originalEntry == null) {
throw new IOException(("Mount table " + mount) + " doesn't exist");
} else {
RemoteLocation remoteLocation = new RemoteLocation(dstNs, dstPath, mount);
originalEntry.setDestinations(Arrays.asList(remoteLocation));
UpdateMountTableEntryRequest v6 = UpdateMountTableEntryRequest.newInstance(originalEntry);
UpdateMountTableEntryResponse response = mountTable.updateMountTableEntry(v6);
if (!response.getStatus()) {
throw new IOException("Failed update mount table " + mount);
}
rClient.getMountTableManager().refreshMountTableEntries(RefreshMountTableEntriesRequest.newInstance());
}
} finally {
rClient.close();
}
} | 3.26 |
hadoop_MountTableProcedure_enableWrite_rdh | /**
* Enable write by cancelling the mount point readonly.
*
* @param mount
* the mount point to cancel readonly.
* @param conf
* the configuration of the router.
*/
static void enableWrite(String mount, Configuration conf) throws IOException {
setMountReadOnly(mount, false, conf);
} | 3.26 |
hadoop_MountTableProcedure_setMountReadOnly_rdh | /**
* Enable or disable readonly of the mount point.
*
* @param mount
* the mount point.
* @param readOnly
* enable or disable readonly.
* @param conf
* the configuration of the router.
*/
private static void setMountReadOnly(String mount, boolean readOnly, Configuration conf) throws IOException {
String address = conf.getTrimmed(RBFConfigKeys.DFS_ROUTER_ADMIN_ADDRESS_KEY, RBFConfigKeys.DFS_ROUTER_ADMIN_ADDRESS_DEFAULT);InetSocketAddress routerSocket = NetUtils.createSocketAddr(address);
RouterClient rClient = new RouterClient(routerSocket, conf);
try {
MountTableManager mountTable = rClient.getMountTableManager();
MountTable originalEntry = getMountEntry(mount, mountTable);
if (originalEntry == null) {
throw new IOException(("Mount table " + mount) + " doesn't exist");
} else {
originalEntry.setReadOnly(readOnly);
UpdateMountTableEntryRequest updateRequest = UpdateMountTableEntryRequest.newInstance(originalEntry);
UpdateMountTableEntryResponse response = mountTable.updateMountTableEntry(updateRequest);
if (!response.getStatus()) {
throw new IOException((("Failed update mount table " + mount) + " with readonly=") + readOnly);
}
rClient.getMountTableManager().refreshMountTableEntries(RefreshMountTableEntriesRequest.newInstance());
}
} finally {
rClient.close();
}
} | 3.26 |
hadoop_MountTableProcedure_getMountEntry_rdh | /**
* Gets the mount table entry.
*
* @param mount
* name of the mount entry.
* @param mountTable
* the mount table.
* @return corresponding mount entry.
* @throws IOException
* in case of failure to retrieve mount entry.
*/
public static MountTable getMountEntry(String mount, MountTableManager mountTable) throws IOException {
GetMountTableEntriesRequest getRequest = GetMountTableEntriesRequest.newInstance(mount);
GetMountTableEntriesResponse getResponse = mountTable.getMountTableEntries(getRequest);
List<MountTable> results = getResponse.getEntries();
MountTable existingEntry = null;
for (MountTable result : results) {
if (mount.equals(result.getSourcePath())) {
existingEntry = result;
break;}
}return
existingEntry;
} | 3.26 |
hadoop_MountTableProcedure_disableWrite_rdh | /**
* Disable write by making the mount point readonly.
*
* @param mount
* the mount point to set readonly.
* @param conf
* the configuration of the router.
*/
static void disableWrite(String mount, Configuration conf) throws IOException {
setMountReadOnly(mount, true, conf);
} | 3.26 |
hadoop_MutableCounterInt_incr_rdh | /**
* Increment the value by a delta
*
* @param delta
* of the increment
*/
public synchronized void incr(int delta) {
value.addAndGet(delta);
setChanged();
} | 3.26 |
hadoop_LongLong_set_rdh | /**
* Set the values.
*/
LongLong set(long d0, long d1) {
this.d0 = d0;
this.d1 = d1;
return this;
} | 3.26 |
hadoop_LongLong_and_rdh | /**
* And operation (&).
*/
long and(long mask) {
return d0 & mask;
} | 3.26 |
hadoop_LongLong_multiplication_rdh | /**
* Compute a*b and store the result to r.
*
* @return r
*/
static LongLong multiplication(final LongLong
r, final long a, final long b) {
/* final long x0 = a & LOWER_MASK;
final long x1 = (a & UPPER_MASK) >> MID;
final long y0 = b & LOWER_MASK;
final long y1 = (b & UPPER_MASK) >> MID;
final long t = (x0 + x1)*(y0 + y1);
final long u = (x0 - x1)*(y0 - y1);
final long v = x1*y1;
final long tmp = (t - u)>>>1;
result.d0 = ((t + u)>>>1) - v + ((tmp << MID) & FULL_MASK);
result.d1 = v + (tmp >> MID);
return result;
*/
final long a_lower = a & LOWER_MASK;
final long a_upper = (a & UPPER_MASK) >> MID;
final long b_lower = b & LOWER_MASK;
final long b_upper = (b & UPPER_MASK) >> MID;
final long tmp = (a_lower * b_upper) + (a_upper * b_lower);
r.d0 = (a_lower
* b_lower) + ((tmp << MID) & FULL_MASK);
r.d1 = (a_upper * b_upper) + (tmp >> MID);
return r;
} | 3.26 |
hadoop_LongLong_plusEqual_rdh | /**
* Plus equal operation (+=).
*/
LongLong plusEqual(LongLong that) {
this.d0 += that.d0;
this.d1 += that.d1;
return this;
} | 3.26 |
hadoop_LongLong_toBigInteger_rdh | /**
* Convert this to a BigInteger.
*/
BigInteger toBigInteger() {
return BigInteger.valueOf(d1).shiftLeft(BITS_PER_LONG).add(BigInteger.valueOf(d0));
} | 3.26 |
hadoop_LongLong_shiftRight_rdh | /**
* Shift right operation (>>).
*/
long shiftRight(int n) {
return (d1 << (BITS_PER_LONG - n)) + (d0 >>> n);
} | 3.26 |
hadoop_LongLong_toString_rdh | /**
* {@inheritDoc }
*/
@Override
public String toString() {
final int remainder = BITS_PER_LONG % 4;
return String.format("%x*2^%d + %016x", d1 << remainder, BITS_PER_LONG - remainder, d0);
} | 3.26 |
hadoop_RemoteEditLogManifest_checkState_rdh | /**
* Check that the logs are non-overlapping sequences of transactions,
* in sorted order. They do not need to be contiguous.
*
* @throws IllegalStateException
* if incorrect
*/private void checkState() {
Preconditions.checkNotNull(logs);
RemoteEditLog prev = null;
for (RemoteEditLog log :
logs) {
if (prev != null) {
if (log.getStartTxId() <= prev.getEndTxId()) {
throw new IllegalStateException((((("Invalid log manifest (log " +
log) + " overlaps ") + prev) + ")\n") + this);}
}
prev = log;
}
} | 3.26 |
hadoop_ZKSignerSecretProvider_createCuratorClient_rdh | /**
* This method creates the Curator client and connects to ZooKeeper.
*
* @param config
* configuration properties
* @return A Curator client
*/
protected CuratorFramework createCuratorClient(Properties config) {
String connectionString = config.getProperty(ZOOKEEPER_CONNECTION_STRING, "localhost:2181");
String authType = config.getProperty(ZOOKEEPER_AUTH_TYPE, "none");
String keytab = config.getProperty(f0, "").trim();
String principal = config.getProperty(ZOOKEEPER_KERBEROS_PRINCIPAL, "").trim();boolean sslEnabled = Boolean.parseBoolean(config.getProperty(ZOOKEEPER_SSL_ENABLED, "false"));
String keystoreLocation = config.getProperty(ZOOKEEPER_SSL_KEYSTORE_LOCATION, "");
String keystorePassword = config.getProperty(ZOOKEEPER_SSL_KEYSTORE_PASSWORD, "");
String truststoreLocation = config.getProperty(ZOOKEEPER_SSL_TRUSTSTORE_LOCATION, "");
String truststorePassword = config.getProperty(ZOOKEEPER_SSL_TRUSTSTORE_PASSWORD, "");
CuratorFramework zkClient = ZookeeperClient.configure().withConnectionString(connectionString).withAuthType(authType).withKeytab(keytab).withPrincipal(principal).withJaasLoginEntryName(f2).enableSSL(sslEnabled).withKeystore(keystoreLocation).withKeystorePassword(keystorePassword).withTruststore(truststoreLocation).withTruststorePassword(truststorePassword).create();
zkClient.start();
return
zkClient;
} | 3.26 |
hadoop_ZKSignerSecretProvider_generateZKData_rdh | /**
* Serialize the data to attempt to push into ZooKeeper. The format is this:
* <p>
* [DATA_VERSION, newSecretLength, newSecret, currentSecretLength, currentSecret, previousSecretLength, previousSecret, nextRolloverDate]
* <p>
* Only previousSecret can be null, in which case the format looks like this:
* <p>
* [DATA_VERSION, newSecretLength, newSecret, currentSecretLength, currentSecret, 0, nextRolloverDate]
* <p>
*
* @param newSecret
* The new secret to use
* @param currentSecret
* The current secret
* @param previousSecret
* The previous secret
* @return The serialized data for ZooKeeper
*/
private synchronized byte[] generateZKData(byte[]
newSecret, byte[] currentSecret, byte[] previousSecret) {
int newSecretLength = newSecret.length;
int currentSecretLength = currentSecret.length;
int previousSecretLength = 0;
if (previousSecret != null) {
previousSecretLength = previousSecret.length;
}
ByteBuffer bb =
ByteBuffer.allocate(((((((INT_BYTES + INT_BYTES) + newSecretLength) + INT_BYTES) + currentSecretLength) + INT_BYTES) + previousSecretLength) + LONG_BYTES);
bb.putInt(DATA_VERSION);
bb.putInt(newSecretLength);
bb.put(newSecret);
bb.putInt(currentSecretLength);
bb.put(currentSecret);
bb.putInt(previousSecretLength); if (previousSecretLength > 0) {
bb.put(previousSecret);}
bb.putLong(nextRolloverDate);
return bb.array();
} | 3.26 |
hadoop_ZKSignerSecretProvider_m0_rdh | /**
* Pushes proposed data to ZooKeeper. If a different server pushes its data
* first, it gives up.
*
* @param newSecret
* The new secret to use
* @param currentSecret
* The current secret
* @param previousSecret
* The previous secret
*/
private synchronized void m0(byte[] newSecret, byte[] currentSecret, byte[] previousSecret) {byte[] bytes = generateZKData(newSecret, currentSecret, previousSecret);
try {
client.setData().withVersion(zkVersion).forPath(path, bytes);
} catch (KeeperException.BadVersionException bve) {
LOG.debug("Unable to push to znode; another server already did it");
} catch (Exception ex) {
LOG.error("An unexpected exception occurred pushing data to ZooKeeper", ex);
} } | 3.26 |
hadoop_ZKSignerSecretProvider_pullFromZK_rdh | /**
* Pulls data from ZooKeeper. If isInit is false, it will only parse the
* next secret and version. If isInit is true, it will also parse the current
* and previous secrets, and the next rollover date; it will also init the
* secrets. Hence, isInit should only be true on startup.
*
* @param isInit
* see description above
*/
private synchronized void pullFromZK(boolean isInit) {
try {
Stat stat = new Stat();
byte[] bytes = client.getData().storingStatIn(stat).forPath(path);
ByteBuffer bb = ByteBuffer.wrap(bytes);
int dataVersion = bb.getInt();if (dataVersion >
DATA_VERSION) {
throw new IllegalStateException("Cannot load data from ZooKeeper; it" + "was written with a newer version");
}
int nextSecretLength = bb.getInt();
byte[] nextSecret = new byte[nextSecretLength];
bb.get(nextSecret);
this.f3 = nextSecret;
zkVersion = stat.getVersion();
if (isInit) {
int currentSecretLength = bb.getInt();
byte[] currentSecret = new byte[currentSecretLength];
bb.get(currentSecret);
int previousSecretLength = bb.getInt();byte[] previousSecret = null;
if (previousSecretLength > 0) {
previousSecret = new byte[previousSecretLength];
bb.get(previousSecret);
}
super.initSecrets(currentSecret, previousSecret);
nextRolloverDate = bb.getLong();}
} catch (Exception ex) {
LOG.error("An unexpected exception occurred while pulling data from" + "ZooKeeper", ex);
}
} | 3.26 |
hadoop_ZKSignerSecretProvider_destroy_rdh | /**
* Disconnects from ZooKeeper unless told not to.
*/
@Override
public void destroy() {
if (shouldDisconnect
&& (client
!= null)) {
client.close();
}
super.destroy();
} | 3.26 |
hadoop_TaskContainerDefinition_withDurationLegacy_rdh | /**
* Also support "duration.ms" for backward compatibility.
*
* @param jsonTask
* the json representation of the task.
* @param key
* The json key.
* @return the builder
*/
public Builder withDurationLegacy(Map<String, String> jsonTask, String key) {
if (jsonTask.containsKey(key)) {
this.durationLegacy = Integer.parseInt(jsonTask.get(key));
}
return this;
} | 3.26 |
hadoop_HHUtil_findFirstValidInput_rdh | /**
* Find the valid input from all the inputs.
*
* @param <T>
* Generics Type T.
* @param inputs
* input buffers to look for valid input
* @return the first valid input
*/
public static <T> T findFirstValidInput(T[] inputs) {
for (T input : inputs) {
if (input != null) {
return input;
}
}
throw new HadoopIllegalArgumentException("Invalid inputs are found, all being null");
} | 3.26 |
hadoop_Validate_checkRequired_rdh | /**
* Validates that the expression (that checks a required field is present) is true.
*
* @param isPresent
* indicates whether the given argument is present.
* @param argName
* the name of the argument being validated.
*/
public static void checkRequired(boolean isPresent, String argName) {
checkArgument(isPresent, "'%s' is required.", argName);
} | 3.26 |
hadoop_Validate_checkNotNullAndNumberOfElements_rdh | /**
* Validates that the given set is not null and has an exact number of items.
*
* @param <T>
* the type of collection's elements.
* @param collection
* the argument reference to validate.
* @param numElements
* the expected number of elements in the collection.
* @param argName
* the name of the argument being validated.
*/
public static <T> void checkNotNullAndNumberOfElements(Collection<T> collection, int numElements, String argName) {
checkNotNull(collection, argName);
checkArgument(collection.size() == numElements, "Number of elements in '%s' must be exactly %s, %s given.", argName, numElements, collection.size());
} | 3.26 |
hadoop_Validate_checkGreater_rdh | /**
* Validates that the first value is greater than the second value.
*
* @param value1
* the first value to check.
* @param value1Name
* the name of the first argument.
* @param value2
* the second value to check.
* @param value2Name
* the name of the second argument.
*/
public static void checkGreater(long value1, String value1Name, long value2, String value2Name) {
checkArgument(value1 > value2, "'%s' (%s) must be greater than '%s' (%s).", value1Name, value1, value2Name, value2);
} | 3.26 |
hadoop_Validate_checkNotNegative_rdh | /**
* Validates that the given integer argument is not negative.
*
* @param value
* the argument value to validate
* @param argName
* the name of the argument being validated.
*/
public static void checkNotNegative(long value, String argName) {
checkArgument(value >= 0, "'%s' must not be negative.", argName);
} | 3.26 |
hadoop_Validate_checkIntegerMultiple_rdh | /**
* Validates that the first value is an integer multiple of the second value.
*
* @param value1
* the first value to check.
* @param value1Name
* the name of the first argument.
* @param value2
* the second value to check.
* @param value2Name
* the name of the second argument.
*/
public static void checkIntegerMultiple(long value1, String value1Name, long value2, String value2Name) {
checkArgument((value1 % value2) == 0, "'%s' (%s) must be an integer multiple of '%s' (%s).", value1Name, value1, value2Name, value2);
} | 3.26 |
hadoop_Validate_checkPositiveInteger_rdh | /**
* Validates that the given integer argument is not zero or negative.
*
* @param value
* the argument value to validate
* @param argName
* the name of the argument being validated.
*/
public static void checkPositiveInteger(long value, String argName) {
checkArgument(value > 0, "'%s' must be a positive integer.", argName);} | 3.26 |
hadoop_Validate_checkGreaterOrEqual_rdh | /**
* Validates that the first value is greater than or equal to the second value.
*
* @param value1
* the first value to check.
* @param value1Name
* the name of the first argument.
* @param value2
* the second value to check.
* @param value2Name
* the name of the second argument.
*/
public static void checkGreaterOrEqual(long value1, String value1Name, long value2, String value2Name) {
checkArgument(value1 >=
value2, "'%s' (%s) must be greater than or equal to '%s' (%s).", value1Name, value1, value2Name, value2);
} | 3.26 |
hadoop_Validate_checkWithinRange_rdh | /**
* Validates that the given value is within the given range of values.
*
* @param value
* the value to check.
* @param valueName
* the name of the argument.
* @param minValueInclusive
* inclusive lower limit for the value.
* @param maxValueInclusive
* inclusive upper limit for the value.
*/ public static void checkWithinRange(double value, String valueName, double minValueInclusive, double maxValueInclusive) {
checkArgument((value >= minValueInclusive) && (value <= maxValueInclusive), "'%s' (%s) must be within the range [%s, %s].", valueName, value, minValueInclusive, maxValueInclusive);
} | 3.26 |
hadoop_Validate_checkValuesEqual_rdh | /**
* Validates that the given two values are equal.
*
* @param value1
* the first value to check.
* @param value1Name
* the name of the first argument.
* @param value2
* the second value to check.
* @param value2Name
* the name of the second argument.
*/
public static void checkValuesEqual(long value1, String value1Name, long value2, String value2Name) {
checkArgument(value1 == value2, "'%s' (%s) must equal '%s' (%s).", value1Name, value1, value2Name, value2);
} | 3.26 |
hadoop_Validate_checkLessOrEqual_rdh | /**
* Validates that the first value is less than or equal to the second value.
*
* @param value1
* the first value to check.
* @param value1Name
* the name of the first argument.
* @param value2
* the second value to check.
* @param value2Name
* the name of the second argument.
*/
public static void checkLessOrEqual(long value1, String value1Name, long value2, String value2Name) {
checkArgument(value1 <= value2, "'%s' (%s) must be less than or equal to '%s' (%s).", value1Name, value1, value2Name, value2);
} | 3.26 |
hadoop_Validate_checkNotNull_rdh | /**
* Validates that the given reference argument is not null.
*
* @param obj
* the argument reference to validate.
* @param argName
* the name of the argument being validated.
*/
public static void checkNotNull(Object obj, String argName) {
checkArgument(obj != null, "'%s' must not be null.", argName);
} | 3.26 |
hadoop_Validate_checkPathExists_rdh | /**
* Validates that the given path exists.
*
* @param path
* the path to check.
* @param argName
* the name of the argument being validated.
*/
public static void checkPathExists(Path path,
String
argName) {
checkNotNull(path, argName);
checkArgument(Files.exists(path), "Path %s (%s) does not exist.", argName, path);
} | 3.26 |
hadoop_Validate_checkPathExistsAsDir_rdh | /**
* Validates that the given path exists and is a directory.
*
* @param path
* the path to check.
* @param argName
* the name of the argument being validated.
*/
public static void checkPathExistsAsDir(Path path, String argName) {
checkPathExists(path, argName);
checkArgument(Files.isDirectory(path), "Path %s (%s) must point to a directory.", argName, path);
} | 3.26 |
hadoop_Validate_checkPathExistsAsFile_rdh | /**
* Validates that the given path exists and is a file.
*
* @param path
* the path to check.
* @param argName
* the name of the argument being validated.
*/public static void checkPathExistsAsFile(Path path, String argName) {
checkPathExists(path, argName);
checkArgument(Files.isRegularFile(path), "Path %s (%s) must point to a file.", argName, path);
} | 3.26 |
hadoop_Validate_checkValid_rdh | /**
* Validates that the expression (that checks a field is valid) is true.
*
* @param isValid
* indicates whether the given argument is valid.
* @param argName
* the name of the argument being validated.
*/
public static void checkValid(boolean isValid, String argName) {
checkArgument(isValid, "'%s' is invalid.", argName);
} | 3.26 |
hadoop_Validate_checkNotNullAndNotEmpty_rdh | /**
* Validates that the given buffer is not null and has non-zero capacity.
*
* @param <T>
* the type of iterable's elements.
* @param iter
* the argument reference to validate.
* @param argName
* the name of the argument being validated.
*/
public static <T> void checkNotNullAndNotEmpty(Iterable<T> iter, String argName) {
checkNotNull(iter, argName);
int minNumElements = (iter.iterator().hasNext())
? 1 : 0;
checkNotEmpty(minNumElements, argName);
} | 3.26 |
hadoop_StagingCommitter_getConflictResolutionMode_rdh | /**
* Returns the {@link ConflictResolution} mode for this commit.
*
* @param context
* the JobContext for this commit
* @param fsConf
* filesystem config
* @return the ConflictResolution mode
*/
public final ConflictResolution getConflictResolutionMode(JobContext context, Configuration fsConf) {
if (conflictResolution == null) {this.conflictResolution = ConflictResolution.valueOf(getConfictModeOption(context, fsConf, DEFAULT_CONFLICT_MODE));
}
return conflictResolution;
} | 3.26 |
hadoop_StagingCommitter_createWrappedCommitter_rdh | /**
* Create the wrapped committer.
* This includes customizing its options, and setting up the destination
* directory.
*
* @param context
* job/task context.
* @param conf
* config
* @return the inner committer
* @throws IOException
* on a failure
*/
protected FileOutputCommitter createWrappedCommitter(JobContext context, Configuration conf) throws IOException {// explicitly choose commit algorithm
m0(context);
commitsDirectory = Paths.getMultipartUploadCommitsDirectory(conf, getUUID());
return new FileOutputCommitter(commitsDirectory, context);
} | 3.26 |
hadoop_StagingCommitter_getJobPath_rdh | /**
* Compute the path under which all job attempts will be placed.
*
* @return the path to store job attempt data.
*/
protected Path getJobPath() {return getPendingJobAttemptsPath(commitsDirectory);
} | 3.26 |
hadoop_StagingCommitter_getFinalPath_rdh | /**
* Returns the final S3 location for a relative path as a Hadoop {@link Path}.
* This is a final method that calls {@link #getFinalKey(String, JobContext)}
* to determine the final location.
*
* @param relative
* the path of a file relative to the task attempt path
* @param context
* the JobContext or TaskAttemptContext for this job
* @return the S3 Path where the file will be uploaded
* @throws IOException
* IO problem
*/
protected final Path getFinalPath(String relative, JobContext context) throws IOException {
return getDestS3AFS().keyToQualifiedPath(getFinalKey(relative, context));
} | 3.26 |
hadoop_StagingCommitter_deleteTaskWorkingPathQuietly_rdh | /**
* Delete the working path of a task; no-op if there is none, that
* is: this is a job.
*
* @param context
* job/task context
*/
protected void deleteTaskWorkingPathQuietly(JobContext context) {
ignoreIOExceptions(LOG, "Delete working path", "", () -> {
Path path = buildWorkPath(context, getUUID());if (path != null) {
deleteQuietly(path.getFileSystem(getConf()), path, true);
}
});
} | 3.26 |
hadoop_StagingCommitter_m3_rdh | /**
* Generate a {@link PathExistsException} because the destination exists.
* Lists some of the child entries first, to help diagnose the problem.
*
* @param path
* path which exists
* @param description
* description (usually task/job ID)
* @return an exception to throw
*/
protected PathExistsException m3(final Path path, final String description) {
LOG.error("{}: Failing commit by job {} to write" + " to existing output path {}.", description, getJobContext().getJobID(), path);
// List the first 10 descendants, to give some details
// on what is wrong but not overload things if there are many files.
try {
int limit = 10;
RemoteIterator<LocatedFileStatus> lf = getDestFS().listFiles(path, true);
LOG.info("Partial Directory listing");
while ((limit > 0) && lf.hasNext()) {
limit--;
LocatedFileStatus status = lf.next();
LOG.info("{}: {}", status.getPath(), status.isDirectory() ? " dir" : ("file size " + status.getLen()) + " bytes");
}
cleanupRemoteIterator(lf);
} catch (IOException e) {
LOG.info("Discarding exception raised when listing {}: " + e, path);
LOG.debug("stack trace ", e);
}
return new PathExistsException(path.toString(), (description +
": ") + InternalCommitterConstants.E_DEST_EXISTS);
} | 3.26 |
hadoop_StagingCommitter_listPendingUploadsToCommit_rdh | /**
* Get the list of pending uploads for this job attempt.
*
* @param commitContext
* job context
* @return a list of pending uploads.
* @throws IOException
* Any IO failure
*/
@Override
protected ActiveCommit listPendingUploadsToCommit(CommitContext commitContext) throws IOException {
return listPendingUploads(commitContext, false);
} | 3.26 |
hadoop_StagingCommitter_getConfictModeOption_rdh | /**
* Get the conflict mode option string.
*
* @param context
* context with the config
* @param fsConf
* filesystem config
* @param defVal
* default value.
* @return the trimmed configuration option, upper case.
*/
public static String getConfictModeOption(JobContext context, Configuration fsConf, String defVal) {
return getConfigurationOption(context, fsConf, FS_S3A_COMMITTER_STAGING_CONFLICT_MODE, defVal).toUpperCase(Locale.ENGLISH);
} | 3.26 |
hadoop_StagingCommitter_m2_rdh | /**
* Compute the path where the output of a committed task is stored until the
* entire job is committed for a specific application attempt.
*
* @param appAttemptId
* the ID of the application attempt to use
* @param context
* the context of any task.
* @return the path where the output of a committed task is stored.
*/
protected Path m2(int appAttemptId, TaskAttemptContext context) {
m1(context);
return new Path(getJobAttemptPath(appAttemptId), String.valueOf(context.getTaskAttemptID().getTaskID()));
} | 3.26 |
hadoop_StagingCommitter_setupJob_rdh | /**
* Set up the job, including calling the same method on the
* wrapped committer.
*
* @param context
* job context
* @throws IOException
* IO failure.
*/
@Override
public void setupJob(JobContext context) throws IOException {
super.setupJob(context);
wrappedCommitter.setupJob(context);
} | 3.26 |
hadoop_StagingCommitter_deleteDestinationPaths_rdh | /**
* Delete the working paths of a job.
* <ol>
* <li>{@code $dest/__temporary}</li>
* <li>the local working directory for staged files</li>
* </ol>
* Does not attempt to clean up the work of the wrapped committer.
*
* @param context
* job context
* @throws IOException
* IO failure
*/protected void deleteDestinationPaths(JobContext context) throws IOException {
// delete the __temporary directory. This will cause problems
// if there is >1 task targeting the same dest dir
deleteWithWarning(getDestFS(),
new Path(getOutputPath(), TEMPORARY), true);
// and the working path
deleteTaskWorkingPathQuietly(context);
} | 3.26 |
hadoop_StagingCommitter_buildWorkPath_rdh | /**
* Get the work path for a task.
*
* @param context
* job/task complex
* @param uuid
* UUID
* @return a path or null if the context is not of a task
* @throws IOException
* failure to build the path
*/
private static Path buildWorkPath(JobContext context, String uuid) throws IOException {
if (context instanceof TaskAttemptContext) {
return taskAttemptWorkingPath(((TaskAttemptContext) (context)), uuid);
} else {
return null;
}
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.