name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hadoop_TokenIdentifier_getBytes_rdh | /**
* Get the bytes for the token identifier
*
* @return the bytes of the identifier
*/
public byte[] getBytes() {
DataOutputBuffer buf = new
DataOutputBuffer(4096);
try {
this.write(buf);
} catch (IOException ie) {
throw new RuntimeException("i/o error in getBytes", ie);
}
return Arrays.copyOf(buf.getData(), buf.getLength());
} | 3.26 |
hadoop_BinaryRecordInput_get_rdh | /**
* Get a thread-local record input for the supplied DataInput.
*
* @param inp
* data input stream
* @return binary record input corresponding to the supplied DataInput.
*/
public static BinaryRecordInput get(DataInput inp) {
BinaryRecordInput v0 = B_IN.get();
v0.setDataInput(inp);
return v0;
} | 3.26 |
hadoop_WeakReferenceThreadMap_removeForCurrentThread_rdh | /**
* Remove the reference for the current thread.
*
* @return any reference value which existed.
*/
public V removeForCurrentThread() {
return remove(currentThreadId());
} | 3.26 |
hadoop_WeakReferenceThreadMap_setForCurrentThread_rdh | /**
* Set the new value for the current thread.
*
* @param newVal
* new reference to set for the active thread.
* @return the previously set value, possibly null
*/
public V setForCurrentThread(V newVal) {
requireNonNull(newVal);
long id = currentThreadId();
// if the same object is already in the map, just return it.
WeakReference<V> existingWeakRef = lookup(id);
// The looked up reference could be one of
// 1. null: nothing there
// 2. valid but get() == null : reference lost by GC.
// 3. different from the new value
// 4. the same as the old value
if (resolve(existingWeakRef) == newVal) {
// case 4: do nothing, return the new value
return newVal;
} else {// cases 1, 2, 3: update the map and return the old value
return put(id, newVal);
}
} | 3.26 |
hadoop_WeakReferenceThreadMap_currentThreadId_rdh | /**
* Get the current thread ID.
*
* @return thread ID.
*/
public long currentThreadId() {
return Thread.currentThread().getId();
} | 3.26 |
hadoop_WeakReferenceThreadMap_getForCurrentThread_rdh | /**
* Get the value for the current thread, creating if needed.
*
* @return an instance.
*/
public V getForCurrentThread() {
return get(currentThreadId());
} | 3.26 |
hadoop_HsJobPage_content_rdh | /**
* The content of this page is the JobBlock
*
* @return HsJobBlock.class
*/
@Override
protected Class<? extends SubView> content() {
return HsJobBlock.class;
} | 3.26 |
hadoop_HsJobPage_preHead_rdh | /* (non-Javadoc)
@see org.apache.hadoop.mapreduce.v2.hs.webapp.HsView#preHead(org.apache.hadoop.yarn.webapp.hamlet.Hamlet.HTML)
*/
@Override
protected void preHead(Page.HTML<__> html) {
String jobID = $(JOB_ID);
set(TITLE,
jobID.isEmpty() ? "Bad request: missing job ID" : join("MapReduce Job ", $(JOB_ID)));commonPreHead(html);
// Override the nav config from the commonPreHead
set(initID(ACCORDION, "nav"), "{autoHeight:false, active:1}");
} | 3.26 |
hadoop_RecurrenceId_getPipelineId_rdh | /**
* Return the pipelineId for the pipeline jobs.
*
* @return the pipelineId.
*/
public final String getPipelineId() {
return pipelineId;
} | 3.26 |
hadoop_RecurrenceId_getRunId_rdh | /**
* Return the runId for the pipeline job in one run.
*
* @return the runId.
*/
public final String getRunId() {
return runId;
} | 3.26 |
hadoop_CachingAuthorizer_get_rdh | /**
*
* @param key
* - Cache key
* @return null on cache-miss. true/false on cache-hit
*/
public V get(K key) {
if (!isEnabled) {return null;
}
V result = cache.getIfPresent(key);
if (result == null) {LOG.debug("{}: CACHE MISS: {}", label, key.toString());
} else {
LOG.debug("{}: CACHE HIT: {}, {}", label, key.toString(), result.toString());
}
return result;
} | 3.26 |
hadoop_FederationRegistryClient_writeRegistry_rdh | /**
* Write registry entry, override if exists.
*/
private void
writeRegistry(final RegistryOperations registryImpl, UserGroupInformation ugi, final String key, final String value, final boolean throwIfFails) throws YarnException {
final ServiceRecord recordValue = new
ServiceRecord();
recordValue.description = value;
// Use the ugi loaded with app credentials to access registry
boolean success = ugi.doAs(new PrivilegedAction<Boolean>() {
@Override
public Boolean run() {
try {
registryImpl.bind(key, recordValue, BindFlags.OVERWRITE);
return true;
} catch (Throwable e)
{
if
(throwIfFails) {
LOG.error("Registry write key {} failed.", key, e);
}
}
return false;
}
});
if ((!success) && throwIfFails) {
throw new YarnException(("Registry write key " + key) + " failed");
}
} | 3.26 |
hadoop_FederationRegistryClient_listDirRegistry_rdh | /**
* List the sub directories in the given directory.
*/
private List<String> listDirRegistry(final RegistryOperations registryImpl, UserGroupInformation ugi, final String key, final boolean throwIfFails) throws YarnException {
List<String> result = ugi.doAs(((PrivilegedAction<List<String>>) (() -> {
try { return registryImpl.list(key);
} catch
(Throwable e) {
if (throwIfFails) {
LOG.error("Registry list key {} failed.", key, e);
}
}
return null;
})));
if ((result == null) && throwIfFails) {
throw new YarnException(("Registry list key " + key) + " failed");
}
return result;
} | 3.26 |
hadoop_FederationRegistryClient_writeAMRMTokenForUAM_rdh | /**
* Write/update the UAM token for an application and a sub-cluster.
*
* @param appId
* ApplicationId.
* @param subClusterId
* sub-cluster id of the token
* @param token
* the UAM of the application
* @return whether the amrmToken is added or updated to a new value
*/
public synchronized boolean writeAMRMTokenForUAM(ApplicationId appId, String subClusterId, Token<AMRMTokenIdentifier> token) {
Map<String, Token<AMRMTokenIdentifier>> subClusterTokenMap = this.appSubClusterTokenMap.get(appId);
if (subClusterTokenMap == null) {
subClusterTokenMap = new ConcurrentHashMap<>();
this.appSubClusterTokenMap.put(appId, subClusterTokenMap);
}boolean update = !token.equals(subClusterTokenMap.get(subClusterId));
if (!update) {
LOG.debug("Same amrmToken received from {}, skip writing registry for {}",
subClusterId, appId);
return update;
}
LOG.info("Writing/Updating amrmToken for {} to registry for {}", subClusterId, appId);
try {
// First, write the token entry
writeRegistry(this.registry, this.user, getRegistryKey(appId,
subClusterId), token.encodeToUrlString(), true);
// Then update the subClusterTokenMap
subClusterTokenMap.put(subClusterId, token);
} catch (YarnException | IOException e) {
LOG.error("Failed writing AMRMToken to registry for subcluster {}.", subClusterId, e);
}return update;
} | 3.26 |
hadoop_FederationRegistryClient_removeAppFromRegistry_rdh | /**
* Remove an application from registry.
*
* @param appId
* application id.
*/
public synchronized void removeAppFromRegistry(ApplicationId appId) {
m0(appId, false);
} | 3.26 |
hadoop_FederationRegistryClient_cleanAllApplications_rdh | /**
* For testing, delete all application records in registry.
*/
@VisibleForTesting
public synchronized void cleanAllApplications() {
try {
removeKeyRegistry(this.registry, this.user, getRegistryKey(null, null), true, false);
} catch (YarnException e) {
LOG.warn("Unexpected exception from removeKeyRegistry", e);
}
} | 3.26 |
hadoop_FederationRegistryClient_getAllApplications_rdh | /**
* Get the list of known applications in the registry.
*
* @return the list of known applications
*/public synchronized List<String>
getAllApplications() {
// Suppress the exception here because it is valid that the entry does not
// exist
List<String> applications = null;try {
applications =
listDirRegistry(this.registry, this.user, getRegistryKey(null, null), false);
} catch (YarnException e) {
LOG.warn("Unexpected exception from listDirRegistry", e);
}
if (applications == null) {
// It is valid for listDirRegistry to return null
return new ArrayList<>();
}
return applications;
} | 3.26 |
hadoop_FederationRegistryClient_loadStateFromRegistry_rdh | /**
* Load the information of one application from registry.
*
* @param appId
* application id
* @return the sub-cluster to UAM token mapping
*/
public synchronized Map<String, Token<AMRMTokenIdentifier>> loadStateFromRegistry(ApplicationId appId) {
Map<String, Token<AMRMTokenIdentifier>> retMap = new HashMap<>();
// Suppress the exception here because it is valid that the entry does not
// exist
List<String> subclusters = null;
try {
subclusters = listDirRegistry(this.registry, this.user, getRegistryKey(appId, null), false);
} catch (YarnException e) {
LOG.warn("Unexpected exception from listDirRegistry", e);
}
if (subclusters == null) {
LOG.info("Application {} does not exist in registry", appId);
return retMap;
}
// Read the amrmToken for each sub-cluster with an existing UAM
for (String scId : subclusters) {
LOG.info("Reading amrmToken for subcluster {} for {}", scId, appId);
String key = getRegistryKey(appId, scId);try {
String tokenString = readRegistry(this.registry, this.user,
key, true);
if (tokenString == null) {
throw new YarnException("Null string from readRegistry key " + key);
}
Token<AMRMTokenIdentifier> amrmToken = new Token<>();
amrmToken.decodeFromUrlString(tokenString);
// Clear the service field, as if RM just issued the token
amrmToken.setService(new Text());
retMap.put(scId, amrmToken);
} catch (Exception e) {
LOG.error("Failed reading registry key {}, skipping subcluster {}.", key, scId, e);
}
}
// Override existing map if there
this.appSubClusterTokenMap.put(appId, new ConcurrentHashMap<>(retMap));
return retMap;
} | 3.26 |
hadoop_RecordStore_getDriver_rdh | /**
* Get the State Store driver.
*
* @return State Store driver.
*/
public StateStoreDriver getDriver() {
return this.driver;
}
/**
* Build a state store API implementation interface.
*
* @param clazz
* The specific interface implementation to create
* @param driver
* The {@link StateStoreDriver} | 3.26 |
hadoop_ActiveUsersManager_activateApplication_rdh | /**
* An application has new outstanding requests.
*
* @param user
* application user
* @param applicationId
* activated application
*/
@Lock({ Queue.class, SchedulerApplicationAttempt.class })
@Override
public synchronized void activateApplication(String user, ApplicationId applicationId) {
Set<ApplicationId> userApps = usersApplications.get(user);
if (userApps == null) {
userApps = new HashSet<ApplicationId>();
usersApplications.put(user, userApps);
++activeUsers;
metrics.incrActiveUsers();
LOG.debug("User {} added to activeUsers, currently: {}", user, activeUsers);
}
if (userApps.add(applicationId)) {
metrics.activateApp(user);
}
} | 3.26 |
hadoop_ActiveUsersManager_deactivateApplication_rdh | /**
* An application has no more outstanding requests.
*
* @param user
* application user
* @param applicationId
* deactivated application
*/
@Lock({ Queue.class, SchedulerApplicationAttempt.class })
@Override
public synchronized void deactivateApplication(String user, ApplicationId applicationId) {
Set<ApplicationId> userApps = usersApplications.get(user);
if (userApps != null) {
if (userApps.remove(applicationId)) {
metrics.deactivateApp(user);
}
if (userApps.isEmpty()) {
usersApplications.remove(user);
--activeUsers;
metrics.decrActiveUsers();
LOG.debug("User {} removed from activeUsers, currently: {}", user, activeUsers);
}
}
} | 3.26 |
hadoop_ActiveUsersManager_getNumActiveUsers_rdh | /**
* Get number of active users i.e. users with applications which have pending
* resource requests.
*
* @return number of active users
*/
@Lock({ Queue.class, SchedulerApplicationAttempt.class })
@Override
public synchronized int getNumActiveUsers() {
return activeUsers;
} | 3.26 |
hadoop_AdlPermission_getAclBit_rdh | /**
* Returns true if "adl.feature.support.acl.bit" configuration is set to
* true.
*
* If configuration is not set then default value is true.
*
* @return If configuration is not set then default value is true.
*/
public boolean getAclBit() {
return aclBit;
} | 3.26 |
hadoop_PlacementConstraintTransformations_transform_rdh | /**
* This method performs the transformation of the
* {@link #placementConstraint}.
*
* @return the transformed placement constraint.
*/
public PlacementConstraint transform() {
AbstractConstraint constraintExpr
= placementConstraint.getConstraintExpr();
// Visit the constraint tree to perform the transformation.
constraintExpr = constraintExpr.accept(this);
return new PlacementConstraint(constraintExpr);
} | 3.26 |
hadoop_ReduceTaskAttemptInfo_getReduceRuntime_rdh | /**
* Get the runtime for the <b>reduce</b> phase of the reduce task-attempt.
*
* @return the runtime for the <b>reduce</b> phase of the reduce task-attempt
*/
public long getReduceRuntime() {
return reduceTime;
} | 3.26 |
hadoop_ReduceTaskAttemptInfo_getShuffleRuntime_rdh | /**
* Get the runtime for the <b>shuffle</b> phase of the reduce task-attempt.
*
* @return the runtime for the <b>shuffle</b> phase of the reduce task-attempt
*/
public long getShuffleRuntime() {
return shuffleTime;
} | 3.26 |
hadoop_ReduceTaskAttemptInfo_getMergeRuntime_rdh | /**
* Get the runtime for the <b>merge</b> phase of the reduce task-attempt
*
* @return the runtime for the <b>merge</b> phase of the reduce task-attempt
*/
public long getMergeRuntime() {
return mergeTime;
} | 3.26 |
hadoop_TypedBytesWritable_toString_rdh | /**
* Generate a suitable string representation.
*/
public String toString() {
return getValue().toString();
} | 3.26 |
hadoop_TypedBytesWritable_getType_rdh | /**
* Get the type code embedded in the first byte.
*/
public Type getType() {
byte[] bytes = getBytes();
if ((bytes == null) || (bytes.length == 0)) {
return null;
}
for (Type type : Type.values()) {
if (type.code == ((int) (bytes[0]))) {
return type;
}
}
return null;
} | 3.26 |
hadoop_TypedBytesWritable_setValue_rdh | /**
* Set the typed bytes from a given Java object.
*/
public void setValue(Object obj) {
try {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
TypedBytesOutput tbo = TypedBytesOutput.get(new DataOutputStream(baos));
tbo.write(obj);
byte[] bytes = baos.toByteArray();
set(bytes, 0, bytes.length);
} catch (IOException e) {
throw new RuntimeException(e);
}
} | 3.26 |
hadoop_TypedBytesWritable_getValue_rdh | /**
* Get the typed bytes as a Java object.
*/
public Object getValue() {
try {
ByteArrayInputStream bais = new ByteArrayInputStream(getBytes());
TypedBytesInput tbi = TypedBytesInput.get(new
DataInputStream(bais));
Object obj = tbi.read();
return obj;
} catch (IOException e) {
throw new RuntimeException(e);
}
} | 3.26 |
hadoop_WrappedMapper_getInputSplit_rdh | /**
* Get the input split for this map.
*/
public InputSplit getInputSplit() {
return mapContext.getInputSplit();
} | 3.26 |
hadoop_WrappedMapper_getMapContext_rdh | /**
* Get a wrapped {@link Mapper.Context} for custom implementations.
*
* @param mapContext
* <code>MapContext</code> to be wrapped
* @return a wrapped <code>Mapper.Context</code> for custom implementations
*/
public Mapper<KEYIN, VALUEIN, KEYOUT,
VALUEOUT>.Context getMapContext(MapContext<KEYIN, VALUEIN, KEYOUT, VALUEOUT>
mapContext) {
return new Context(mapContext);
} | 3.26 |
hadoop_RehashPartitioner_getPartition_rdh | /**
* Rehash {@link Object#hashCode()} to partition.
*/
public int getPartition(K key, V value, int numReduceTasks) {
int h = SEED ^ key.hashCode();
h ^= (h >>> 20) ^ (h >>> 12);
h = (h ^ (h >>> 7)) ^ (h >>> 4);
return (h & Integer.MAX_VALUE) % numReduceTasks;} | 3.26 |
hadoop_ApplicationRowKey_getRowKeyAsString_rdh | /**
* Constructs a row key for the application table as follows:
* {@code clusterId!userName!flowName!flowRunId!AppId}.
*
* @return String representation of row key.
*/
public String getRowKeyAsString() {
return appRowKeyConverter.encodeAsString(this);
} | 3.26 |
hadoop_ApplicationRowKey_parseRowKey_rdh | /**
* Given the raw row key as bytes, returns the row key as an object.
*
* @param rowKey
* Byte representation of row key.
* @return An <cite>ApplicationRowKey</cite> object.
*/
public static ApplicationRowKey parseRowKey(byte[] rowKey) {
return new
ApplicationRowKeyConverter().decode(rowKey);
} | 3.26 |
hadoop_ApplicationRowKey_parseRowKeyFromString_rdh | /**
* Given the encoded row key as string, returns the row key as an object.
*
* @param encodedRowKey
* String representation of row key.
* @return A <cite>ApplicationRowKey</cite> object.
*/
public static ApplicationRowKey parseRowKeyFromString(String encodedRowKey) {
return new
ApplicationRowKeyConverter().decodeFromString(encodedRowKey);
} | 3.26 |
hadoop_PersistentLongFile_writeFile_rdh | /**
* Atomically write the given value to the given file, including fsyncing.
*
* @param file
* destination file
* @param val
* value to write
* @throws IOException
* if the file cannot be written
*/
public static void writeFile(File file, long val) throws IOException {
AtomicFileOutputStream fos = new AtomicFileOutputStream(file);
try {
fos.write(String.valueOf(val).getBytes(StandardCharsets.UTF_8));
fos.write('\n');
fos.close();
fos = null;
} finally {
if (fos != null) {
fos.abort();
}
}
} | 3.26 |
hadoop_WritableFactories_getFactory_rdh | /**
* Define a factory for a class.
*
* @param c
* input c.
* @return a factory for a class.
*/
public static WritableFactory getFactory(Class c) {
return CLASS_TO_FACTORY.get(c);
} | 3.26 |
hadoop_WritableFactories_setFactory_rdh | /**
* Define a factory for a class.
*
* @param c
* input c.
* @param factory
* input factory.
*/
public static void setFactory(Class c, WritableFactory factory) {
CLASS_TO_FACTORY.put(c, factory);
} | 3.26 |
hadoop_WritableFactories_newInstance_rdh | /**
* Create a new instance of a class with a defined factory.
*
* @param c
* input c.
* @return a new instance of a class with a defined factory.
*/
public static Writable newInstance(Class<? extends Writable> c) {
return newInstance(c, null);
} | 3.26 |
hadoop_AbfsPermission_valueOf_rdh | /**
* Create a AbfsPermission from a abfs symbolic permission string
*
* @param abfsSymbolicPermission
* e.g. "rw-rw-rw-+" / "rw-rw-rw-"
* @return a permission object for the provided string representation
*/
public static AbfsPermission valueOf(final String abfsSymbolicPermission) {
if (abfsSymbolicPermission == null) {
return null;
}
final boolean isExtendedAcl = abfsSymbolicPermission.charAt(abfsSymbolicPermission.length() -
1) == '+';
final String abfsRawSymbolicPermission = (isExtendedAcl) ? abfsSymbolicPermission.substring(0, abfsSymbolicPermission.length() - 1) : abfsSymbolicPermission;
int n = 0;
for (int i = 0;
i < abfsRawSymbolicPermission.length(); i++) {
n = n << 1;
char c = abfsRawSymbolicPermission.charAt(i);
n += (((c == '-') || (c == 'T')) || (c == 'S')) ? 0 : 1;
}
// Add sticky bit value if set
if ((abfsRawSymbolicPermission.charAt(abfsRawSymbolicPermission.length() - 1) == 't') ||
(abfsRawSymbolicPermission.charAt(abfsRawSymbolicPermission.length() - 1) == 'T')) {
n += f0;
}
return new AbfsPermission(((short) (n)), isExtendedAcl);
} | 3.26 |
hadoop_RouterQuotaUpdateService_getMountTableStore_rdh | /**
* Get mount table store management interface.
*
* @return MountTableStore instance.
* @throws IOException
*/
private MountTableStore getMountTableStore() throws IOException {
if (this.mountTableStore == null) {
this.mountTableStore = router.getStateStore().getRegisteredRecordStore(MountTableStore.class);
if (this.mountTableStore == null) {
throw new IOException("Mount table state store is not available.");
}
}
return this.mountTableStore;
} | 3.26 |
hadoop_RouterQuotaUpdateService_getQuotaSetMountTables_rdh | /**
* Get mount tables which quota was set.
* During this time, the quota usage cache will also be updated by
* quota manager:
* 1. Stale paths (entries) will be removed.
* 2. Existing entries will be overridden and updated.
*
* @return List of mount tables which quota was set.
* @throws IOException
*/
private List<MountTable> getQuotaSetMountTables() throws IOException {
List<MountTable> mountTables = getMountTableEntries();
Set<String> allPaths = this.quotaManager.getAll();
Set<String> stalePaths = new HashSet<>(allPaths);
List<MountTable> neededMountTables = new
LinkedList<>();
for (MountTable entry : mountTables) {
// select mount tables which is quota set
if (isQuotaSet(entry)) {
neededMountTables.add(entry);
}
// update mount table entries info in quota cache
String src = entry.getSourcePath();
this.quotaManager.updateQuota(src, entry.getQuota());
stalePaths.remove(src);}
// remove stale paths that currently cached
for (String stalePath : stalePaths) {
this.quotaManager.remove(stalePath);
}
return neededMountTables;
} | 3.26 |
hadoop_RouterQuotaUpdateService_isQuotaSet_rdh | /**
* Check if the quota was set in given MountTable.
*
* @param mountTable
* Mount table entry.
*/
private boolean isQuotaSet(MountTable mountTable) {
if (mountTable != null) {return this.quotaManager.isQuotaSet(mountTable.getQuota());
}
return false;
} | 3.26 |
hadoop_RouterQuotaUpdateService_generateNewQuota_rdh | /**
* Generate a new quota based on old quota and current quota usage value.
*
* @param oldQuota
* Old quota stored in State Store.
* @param currentQuotaUsage
* Current quota usage value queried from
* subcluster.
* @return A new RouterQuotaUsage.
*/
private RouterQuotaUsage generateNewQuota(RouterQuotaUsage oldQuota, QuotaUsage currentQuotaUsage) {
RouterQuotaUsage.Builder newQuotaBuilder = new RouterQuotaUsage.Builder().fileAndDirectoryCount(currentQuotaUsage.getFileAndDirectoryCount()).quota(oldQuota.getQuota()).spaceConsumed(currentQuotaUsage.getSpaceConsumed()).spaceQuota(oldQuota.getSpaceQuota());
Quota.eachByStorageType(t -> {
newQuotaBuilder.typeQuota(t, oldQuota.getTypeQuota(t));
newQuotaBuilder.typeConsumed(t, currentQuotaUsage.getTypeConsumed(t));
});
return newQuotaBuilder.build();
} | 3.26 |
hadoop_RouterQuotaUpdateService_getMountTableEntries_rdh | /**
* Get all the existing mount tables.
*
* @return List of mount tables.
* @throws IOException
*/
private List<MountTable> getMountTableEntries() throws IOException {
// scan mount tables from root path
GetMountTableEntriesRequest v19 = GetMountTableEntriesRequest.newInstance("/");
GetMountTableEntriesResponse getResponse = getMountTableStore().getMountTableEntries(v19);
return getResponse.getEntries();
} | 3.26 |
hadoop_TextView_echo_rdh | /**
* Print strings escaping html.
*
* @param args
* the strings to print
*/public void echo(Object... args) {
PrintWriter out = writer();
for (Object v1 : args)
{
String escapedString = StringEscapeUtils.escapeEcmaScript(StringEscapeUtils.escapeHtml4(v1.toString()));
out.print(escapedString);
}
} | 3.26 |
hadoop_TextView_echoWithoutEscapeHtml_rdh | /**
* Print strings as is (no newline, a la php echo).
*
* @param args
* the strings to print
*/
public void
echoWithoutEscapeHtml(Object... args) {
PrintWriter out = writer();
for (Object s : args) {
out.print(s);
}
} | 3.26 |
hadoop_TextView_puts_rdh | /**
* Print strings as a line (new line appended at the end, a la C/Tcl puts).
*
* @param args
* the strings to print
*/
public void puts(Object... args) {
echo(args);
writer().println();
} | 3.26 |
hadoop_TextView_putWithoutEscapeHtml_rdh | /**
* Print string as a line. This does not escapes the string for html
*
* @param args
* the strings to print
*/
public void putWithoutEscapeHtml(Object args) {
echoWithoutEscapeHtml(args);
writer().println();
} | 3.26 |
hadoop_MountTableStore_updateCacheAllRouters_rdh | /**
* Update mount table cache of this router as well as all other routers.
*/
protected void updateCacheAllRouters() {
if (f0 != null) {
try {
f0.refresh();
} catch (StateStoreUnavailableException e) {
LOG.error("Cannot refresh mount table: state store not available", e);
}
}} | 3.26 |
hadoop_EntityIdentifier_getType_rdh | /**
* Get the entity type.
*
* @return The entity type.
*/
public String getType() {
return type;} | 3.26 |
hadoop_PlanningAlgorithm_allocateUser_rdh | /**
* Performs the actual allocation for a ReservationDefinition within a Plan.
*
* @param reservationId
* the identifier of the reservation
* @param user
* the user who owns the reservation
* @param plan
* the Plan to which the reservation must be fitted
* @param contract
* encapsulates the resources required by the user for his
* session
* @param oldReservation
* the existing reservation (null if none)
* @return whether the allocateUser function was successful or not
* @throws PlanningException
* if the session cannot be fitted into the plan
* @throws ContractValidationException
* if validation fails
*/
protected boolean allocateUser(ReservationId reservationId, String user, Plan plan, ReservationDefinition contract, ReservationAllocation oldReservation) throws PlanningException, ContractValidationException {
// Adjust the ResourceDefinition to account for system "imperfections"
// (e.g., scheduling delays for large containers).
ReservationDefinition v0 = adjustContract(plan, contract);
// Compute the job allocation
RLESparseResourceAllocation allocation = computeJobAllocation(plan, reservationId, v0, user);
long period = Long.parseLong(contract.getRecurrenceExpression());
// Make allocation periodic if request is periodic
if (contract.getRecurrenceExpression() != null) {
if (period > 0) {
allocation = new PeriodicRLESparseResourceAllocation(allocation, period);
}
}
// If no job allocation was found, fail
if (allocation
== null) {
throw new PlanningException("The planning algorithm could not find a valid allocation" + " for your request");
}
// Translate the allocation to a map (with zero paddings)
long step = plan.getStep();
long jobArrival = stepRoundUp(v0.getArrival(), step);
long v5 = stepRoundUp(v0.getDeadline(), step);
Map<ReservationInterval, Resource> mapAllocations = allocationsToPaddedMap(allocation, jobArrival, v5, period);
// Create the reservation
ReservationAllocation capReservation = // ID
// Contract
// User name
// Queue name
// Allocations
// Resource calculator
new InMemoryReservationAllocation(reservationId, v0, user, plan.getQueueName(), v0.getArrival(), v0.getDeadline(), mapAllocations, plan.getResourceCalculator(), plan.getMinimumAllocation());// Minimum allocation
// Add (or update) the reservation allocation
if (oldReservation != null) {
return plan.updateReservation(capReservation);
} else {
return plan.addReservation(capReservation, false);
}
} | 3.26 |
hadoop_JobID_getJobIDsPattern_rdh | /**
* Returns a regex pattern which matches task IDs. Arguments can
* be given null, in which case that part of the regex will be generic.
* For example to obtain a regex matching <i>any job</i>
* run on the jobtracker started at <i>200707121733</i>, we would use :
* <pre>
* JobID.getTaskIDsPattern("200707121733", null);
* </pre>
* which will return :
* <pre> "job_200707121733_[0-9]*" </pre>
*
* @param jtIdentifier
* jobTracker identifier, or null
* @param jobId
* job number, or null
* @return a regex pattern matching JobIDs
*/
@Deprecated
public static String getJobIDsPattern(String jtIdentifier, Integer jobId)
{
StringBuilder builder = new StringBuilder(JOB).append(SEPARATOR);
builder.append(getJobIDsPatternWOPrefix(jtIdentifier, jobId));
return builder.toString();
} | 3.26 |
hadoop_JobID_m0_rdh | /**
* Construct a JobId object from given string
*
* @return constructed JobId object or null if the given String is null
* @throws IllegalArgumentException
* if the given string is malformed
*/
public static JobID m0(String str) throws IllegalArgumentException {
return ((JobID) (JobID.forName(str)));} | 3.26 |
hadoop_JobID_downgrade_rdh | /**
* Downgrade a new JobID to an old one
*
* @param old
* a new or old JobID
* @return either old or a new JobID build to match old
*/
public static JobID downgrade(JobID old) {if (old instanceof JobID) {
return ((JobID) (old));
} else {
return new JobID(old.getJtIdentifier(), old.getId());
}
} | 3.26 |
hadoop_ExcludePrivateAnnotationsStandardDoclet_languageVersion_rdh | /**
* A <a href="http://java.sun.com/javase/6/docs/jdk/api/javadoc/doclet/">Doclet</a>
* for excluding elements that are annotated with
* {@link org.apache.hadoop.classification.InterfaceAudience.Private} or
* {@link org.apache.hadoop.classification.InterfaceAudience.LimitedPrivate}.
* It delegates to the Standard Doclet, and takes the same options.
*/public class ExcludePrivateAnnotationsStandardDoclet {
public static LanguageVersion languageVersion() {
return LanguageVersion.JAVA_1_5;
} | 3.26 |
hadoop_BoundedByteArrayOutputStream_getBuffer_rdh | /**
* Returns the underlying buffer.
* Data is only valid to {@link #size()}.
*
* @return the underlying buffer.
*/
public byte[] getBuffer() {
return buffer;
} | 3.26 |
hadoop_BoundedByteArrayOutputStream_getLimit_rdh | /**
* Return the current limit.
*
* @return limit.
*/
public int getLimit() {
return limit; } | 3.26 |
hadoop_BoundedByteArrayOutputStream_size_rdh | /**
* Returns the length of the valid data
* currently in the buffer.
*
* @return the length of the valid data.
*/
public int size() {
return currentPointer - startOffset;
} | 3.26 |
hadoop_BoundedByteArrayOutputStream_reset_rdh | /**
* Reset the buffer
*/
public void reset() {
this.limit = buffer.length - startOffset;
this.currentPointer = startOffset;
} | 3.26 |
hadoop_BlockPoolTokenSecretManager_generateToken_rdh | /**
* See {@link BlockTokenSecretManager#generateToken(ExtendedBlock, EnumSet,
* StorageType[], String[])}.
*/
public Token<BlockTokenIdentifier> generateToken(ExtendedBlock b, EnumSet<AccessMode> of, StorageType[] storageTypes, String[] storageIds) throws IOException {
return get(b.getBlockPoolId()).generateToken(b, of, storageTypes, storageIds);
} | 3.26 |
hadoop_BlockPoolTokenSecretManager_createIdentifier_rdh | /**
* Return an empty BlockTokenIdentifer
*/
@Override
public BlockTokenIdentifier createIdentifier() {
return new BlockTokenIdentifier();
} | 3.26 |
hadoop_BlockPoolTokenSecretManager_checkAccess_rdh | /**
* See {@link BlockTokenSecretManager#checkAccess(Token, String,
* ExtendedBlock, BlockTokenIdentifier.AccessMode,
* StorageType[], String[])}
*/
public void checkAccess(Token<BlockTokenIdentifier> token, String userId, ExtendedBlock block, AccessMode mode, StorageType[] storageTypes, String[] storageIds) throws InvalidToken {
get(block.getBlockPoolId()).checkAccess(token, userId, block, mode, storageTypes, storageIds);
} | 3.26 |
hadoop_BlockPoolTokenSecretManager_addKeys_rdh | /**
* See {@link BlockTokenSecretManager#addKeys(ExportedBlockKeys)}.
*/
public void addKeys(String bpid, ExportedBlockKeys exportedKeys) throws IOException {
get(bpid).addKeys(exportedKeys);
} | 3.26 |
hadoop_LeaveSafeModeRequest_newInstance_rdh | /**
* API request for the Router leaving safe mode state and updating
* its state in State Store.
*/public abstract class LeaveSafeModeRequest {
public static LeaveSafeModeRequest newInstance() throws IOException {
return StateStoreSerializer.newRecord(LeaveSafeModeRequest.class);} | 3.26 |
hadoop_DockerCommand_getCommandOption_rdh | /**
* Returns the docker sub-command string being used
* e.g 'run'.
*/
public final String getCommandOption() {
return this.command;
} | 3.26 |
hadoop_DockerCommand_preparePrivilegedOperation_rdh | /**
* Prepare the privileged operation object that will be used to invoke
* the container-executor.
*
* @param dockerCommand
* Specific command to be run by docker.
* @param containerName
* @param env
* @param nmContext
* @return Returns the PrivilegedOperation object to be used.
* @throws ContainerExecutionException
*/
public PrivilegedOperation preparePrivilegedOperation(DockerCommand dockerCommand, String containerName, Map<String, String> env, Context nmContext) throws ContainerExecutionException {
DockerClient dockerClient = new DockerClient();
String commandFile = dockerClient.writeCommandToTempFile(dockerCommand, ContainerId.fromString(containerName), nmContext);
PrivilegedOperation dockerOp = new PrivilegedOperation(OperationType.RUN_DOCKER_CMD);dockerOp.appendArgs(commandFile);
return dockerOp;
} | 3.26 |
hadoop_DockerCommand_addCommandArguments_rdh | /**
* Add command commandWithArguments - this method is only meant for use by
* sub-classes.
*
* @param key
* name of the key to be added
* @param value
* value of the key
*/
protected final void addCommandArguments(String key, String value) {
List<String> list = commandArguments.get(key);
if (list !=
null) {
list.add(value);
return;
}
list = new ArrayList<>();
list.add(value);
this.commandArguments.put(key, list);
} | 3.26 |
hadoop_DockerCommand_setClientConfigDir_rdh | /**
* Add the client configuration directory to the docker command.
*
* The client configuration option proceeds any of the docker subcommands
* (such as run, load, pull, etc). Ordering will be handled by
* container-executor. Docker expects the value to be a directory containing
* the file config.json. This file is typically generated via docker login.
*
* @param clientConfigDir
* - directory containing the docker client config.
*/
public void setClientConfigDir(String clientConfigDir) {
if (clientConfigDir != null) {
addCommandArguments("docker-config", clientConfigDir);
}
} | 3.26 |
hadoop_GetGroupsBase_getUgmProtocol_rdh | /**
* Get a client of the {@link GetUserMappingsProtocol}.
*
* @return A {@link GetUserMappingsProtocol} client proxy.
* @throws IOException
* raised on errors performing I/O.
*/protected GetUserMappingsProtocol
getUgmProtocol() throws IOException
{
GetUserMappingsProtocol userGroupMappingProtocol = RPC.getProxy(GetUserMappingsProtocol.class, GetUserMappingsProtocol.versionID, getProtocolAddress(getConf()), UserGroupInformation.getCurrentUser(), getConf(), NetUtils.getSocketFactory(getConf(), GetUserMappingsProtocol.class));
return userGroupMappingProtocol;} | 3.26 |
hadoop_PathOutputCommitterFactory_createFileOutputCommitter_rdh | /**
* Create an instance of the default committer, a {@link FileOutputCommitter}
* for a task.
*
* @param outputPath
* the task's output path, or or null if no output path
* has been defined.
* @param context
* the task attempt context
* @return the committer to use
* @throws IOException
* problems instantiating the committer
*/
protected final PathOutputCommitter createFileOutputCommitter(Path outputPath,
TaskAttemptContext context) throws IOException
{
LOG.debug("Creating FileOutputCommitter for path {} and context {}", outputPath, context);
return new FileOutputCommitter(outputPath, context);
} | 3.26 |
hadoop_PathOutputCommitterFactory_createOutputCommitter_rdh | /**
* Create an output committer for a task attempt.
*
* @param outputPath
* output path. This may be null.
* @param context
* context
* @return a new committer
* @throws IOException
* problems instantiating the committer
*/
public PathOutputCommitter createOutputCommitter(Path outputPath, TaskAttemptContext context) throws IOException {
return createFileOutputCommitter(outputPath, context);} | 3.26 |
hadoop_PathOutputCommitterFactory_getCommitterFactory_rdh | /**
* Get the committer factory for a configuration.
*
* @param outputPath
* the job's output path. If null, it means that the
* schema is unknown and a per-schema factory cannot be determined.
* @param conf
* configuration
* @return an instantiated committer factory
*/
public static PathOutputCommitterFactory getCommitterFactory(Path outputPath, Configuration conf) {
// determine which key to look up the overall one or a schema-specific
// key
LOG.debug("Looking for committer factory for path {}", outputPath);
String key = COMMITTER_FACTORY_CLASS;
if (StringUtils.isEmpty(conf.getTrimmed(key)) && (outputPath != null)) {
// there is no explicit factory and there's an output path
// Get the scheme of the destination
String scheme
= outputPath.toUri().getScheme();
// and see if it has a key
String schemeKey
= String.format(COMMITTER_FACTORY_SCHEME_PATTERN, scheme);
if (StringUtils.isNotEmpty(conf.getTrimmed(schemeKey))) {
// it does, so use that key in the classname lookup
LOG.info("Using schema-specific factory for {}", outputPath);key = schemeKey;
} else {
LOG.debug("No scheme-specific factory defined in {}", schemeKey);
}
}
// create the factory. Before using Configuration.getClass, check
// for an empty configuration value, as that raises ClassNotFoundException.
Class<? extends PathOutputCommitterFactory> factory;
String trimmedValue = conf.getTrimmed(key, "");
if (StringUtils.isEmpty(trimmedValue)) {
// empty/null value, use default
LOG.info("No output committer factory defined," + " defaulting to FileOutputCommitterFactory");
factory = FileOutputCommitterFactory.class;
} else {
// key is set, get the class
factory = conf.getClass(key, FileOutputCommitterFactory.class, PathOutputCommitterFactory.class);
LOG.info("Using OutputCommitter factory class {} from key {}", factory, key);
}
return ReflectionUtils.newInstance(factory, conf);
} | 3.26 |
hadoop_Endpoint_clone_rdh | /**
* Shallow clone: the lists of addresses are shared
*
* @return a cloned instance
* @throws CloneNotSupportedException
*/
@Override
public Object clone() throws CloneNotSupportedException {
return super.clone();
} | 3.26 |
hadoop_Endpoint_m0_rdh | /**
* Create a new address structure of the requested size
*
* @param size
* size to create
* @return the new list
*/
private List<Map<String, String>> m0(int size) {
return new ArrayList<Map<String, String>>(size);
} | 3.26 |
hadoop_Endpoint_validate_rdh | /**
* Validate the record by checking for null fields and other invalid
* conditions
*
* @throws NullPointerException
* if a field is null when it
* MUST be set.
* @throws RuntimeException
* on invalid entries
*/
public void validate() {
Preconditions.checkNotNull(api, "null API field");
Preconditions.checkNotNull(addressType, "null addressType field");
Preconditions.checkNotNull(protocolType, "null protocolType field");
Preconditions.checkNotNull(addresses, "null addresses field");
for (Map<String, String> address : addresses) {
Preconditions.checkNotNull(address, "null element in address");
}
} | 3.26 |
hadoop_StopContainersRequest_m0_rdh | /**
* <p>The request sent by the <code>ApplicationMaster</code> to the
* <code>NodeManager</code> to <em>stop</em> containers.</p>
*
* @see ContainerManagementProtocol#stopContainers(StopContainersRequest)
*/
@Public
@Stablepublic abstract class StopContainersRequest {
@Public
@Stable
public static StopContainersRequest m0(List<ContainerId> containerIds) {
StopContainersRequest request = Records.newRecord(StopContainersRequest.class);
request.setContainerIds(containerIds);
return request;
} | 3.26 |
hadoop_SaveTaskManifestStage_executeStage_rdh | /**
* Save the manifest to a temp file and rename to the final
* manifest destination.
*
* @param manifest
* manifest
* @return the path to the final entry
* @throws IOException
* IO failure.
*/
@Override
protected Path executeStage(final TaskManifest manifest) throws IOException {
final Path manifestDir = getTaskManifestDir();// final manifest file is by task ID
Path manifestFile = manifestPathForTask(manifestDir, getRequiredTaskId());
Path manifestTempFile = manifestTempPathForTaskAttempt(manifestDir, getRequiredTaskAttemptId());
LOG.info("{}: Saving manifest file to {}", getName(), manifestFile);
save(manifest, manifestTempFile, manifestFile);
return manifestFile;
} | 3.26 |
hadoop_SaveSuccessFileStage_getStageName_rdh | /**
* Stage name is always job commit.
*
* @param arguments
* args to the invocation.
* @return stage name
*/
@Override
protected String getStageName(ManifestSuccessData arguments) {
// set it to the job commit stage, always.
return OP_STAGE_JOB_COMMIT;
} | 3.26 |
hadoop_OBSCommonUtils_blockRootDelete_rdh | /**
* Reject any request to delete an object where the key is root.
*
* @param bucket
* bucket name
* @param key
* key to validate
* @throws InvalidRequestException
* if the request was rejected due to a
* mistaken attempt to delete the root
* directory.
*/
static void blockRootDelete(final String bucket, final String key) throws InvalidRequestException {
if (key.isEmpty() || "/".equals(key)) {
throw new InvalidRequestException(("Bucket " + bucket) + " cannot be deleted");
}
} | 3.26 |
hadoop_OBSCommonUtils_translateException_rdh | /**
* Translate an exception raised in an operation into an IOException. The
* specific type of IOException depends on the class of {@link ObsException}
* passed in, and any status codes included in the operation. That is: HTTP
* error codes are examined and can be used to build a more specific
* response.
*
* @param operation
* operation
* @param path
* path operated on (must not be null)
* @param exception
* obs exception raised
* @return an IOE which wraps the caught exception.
*/
static IOException translateException(final String operation, final Path path, final ObsException exception)
{
return translateException(operation, path.toString(), exception);
} | 3.26 |
hadoop_OBSCommonUtils_getBucketFsStatus_rdh | /**
* Get the fs status of the bucket.
*
* @param obs
* OBS client instance
* @param bucketName
* bucket name
* @return boolean value indicating if this bucket is a posix bucket
* @throws FileNotFoundException
* the bucket is absent
* @throws IOException
* any other problem talking to OBS
*/
static boolean getBucketFsStatus(final ObsClient obs, final String bucketName) throws FileNotFoundException, IOException {
try {
GetBucketFSStatusRequest getBucketFsStatusRequest
= new GetBucketFSStatusRequest();
getBucketFsStatusRequest.setBucketName(bucketName);
GetBucketFSStatusResult getBucketFsStatusResult = obs.getBucketFSStatus(getBucketFsStatusRequest);
FSStatusEnum fsStatus = getBucketFsStatusResult.getStatus();
return fsStatus == FSStatusEnum.ENABLED;
} catch (ObsException e) {
LOG.error(e.toString());
throw translateException("getBucketFsStatus", bucketName, e);
}
} | 3.26 |
hadoop_OBSCommonUtils_longOption_rdh | /**
* Get a long option not smaller than the minimum allowed value.
*
* @param conf
* configuration
* @param key
* key to look up
* @param defVal
* default value
* @param min
* minimum value
* @return the value
* @throws IllegalArgumentException
* if the value is below the minimum
*/
static long longOption(final Configuration conf, final String key, final
long defVal, final long min) {
long v = conf.getLong(key, defVal);
Preconditions.checkArgument(v >= min, String.format("Value of %s: %d is below the minimum value %d", key, v, min));
LOG.debug("Value of {} is {}", key, v);
return v;
} | 3.26 |
hadoop_OBSCommonUtils_toLocatedFileStatus_rdh | /**
* Build a {@link LocatedFileStatus} from a {@link FileStatus} instance.
*
* @param owner
* the owner OBSFileSystem instance
* @param status
* file status
* @return a located status with block locations set up from this FS.
* @throws IOException
* IO Problems.
*/
static LocatedFileStatus toLocatedFileStatus(final OBSFileSystem owner, final FileStatus status) throws IOException {
return new LocatedFileStatus(status, status.isFile() ? owner.getFileBlockLocations(status, 0, status.getLen()) : null);
} | 3.26 |
hadoop_OBSCommonUtils_maybeAddTrailingSlash_rdh | /**
* Turns a path (relative or otherwise) into an OBS key, adding a trailing "/"
* if the path is not the root <i>and</i> does not already have a "/" at the
* end.
*
* @param key
* obs key or ""
* @return the with a trailing "/", or, if it is the root key, "",
*/
static String maybeAddTrailingSlash(final String key) {
if ((!StringUtils.isEmpty(key)) && (!key.endsWith("/"))) {
return key + '/';
} else {
return key;
}
} | 3.26 |
hadoop_OBSCommonUtils_innerListStatus_rdh | /**
* List the statuses of the files/directories in the given path if the path is
* a directory.
*
* @param owner
* the owner OBSFileSystem instance
* @param f
* given path
* @param recursive
* flag indicating if list is recursive
* @return the statuses of the files/directories in the given patch
* @throws FileNotFoundException
* when the path does not exist;
* @throws IOException
* due to an IO problem.
* @throws ObsException
* on failures inside the OBS SDK
*/
static FileStatus[] innerListStatus(final OBSFileSystem owner, final Path f, final boolean recursive) throws FileNotFoundException, IOException, ObsException {
Path path = qualify(owner, f);
String key = pathToKey(owner, path);
List<FileStatus> result;
final FileStatus fileStatus = owner.getFileStatus(path);
if (fileStatus.isDirectory()) {
key = maybeAddTrailingSlash(key);
String delimiter = (recursive) ? null : "/";
ListObjectsRequest request = createListObjectsRequest(owner, key, delimiter);
LOG.debug("listStatus: doing listObjects for directory {} - recursive {}", f, recursive);
OBSListing.FileStatusListingIterator v30 = owner.getObsListing().createFileStatusListingIterator(path, request, OBSListing.ACCEPT_ALL, new OBSListing.AcceptAllButSelfAndS3nDirs(path));
result = new ArrayList<>(v30.getBatchSize());
while (v30.hasNext()) {
result.add(v30.next());
}
return result.toArray(new FileStatus[0]);
} else {
LOG.debug("Adding: rd (not a dir): {}", path);
FileStatus[] stats = new FileStatus[1];
stats[0] = fileStatus;
return
stats;
}
} | 3.26 |
hadoop_OBSCommonUtils_verifyBucketExists_rdh | /**
* Verify that the bucket exists. This does not check permissions, not even
* read access.
*
* @param owner
* the owner OBSFileSystem instance
* @throws FileNotFoundException
* the bucket is absent
* @throws IOException
* any other problem talking to OBS
*/
static void verifyBucketExists(final OBSFileSystem owner) throws
FileNotFoundException,
IOException {
int retryTime = 1;
while (true) {
try
{
if (!owner.getObsClient().headBucket(owner.getBucket())) {
throw new FileNotFoundException(("Bucket " + owner.getBucket()) + " does not exist");
}
return;
} catch (ObsException e) {
LOG.warn("Failed to head bucket for [{}], retry time [{}], " + "exception [{}]", owner.getBucket(), retryTime, translateException("doesBucketExist", owner.getBucket(), e));
if (MAX_RETRY_TIME == retryTime) {
throw translateException("doesBucketExist", owner.getBucket(), e);
}
try {
Thread.sleep(DELAY_TIME);
} catch (InterruptedException ie) {
throw e;
}
}retryTime++;
}
} | 3.26 |
hadoop_OBSCommonUtils_deleteObjects_rdh | /**
* Perform a bulk object delete operation. Increments the {@code OBJECT_DELETE_REQUESTS} and write operation statistics.
*
* @param owner
* the owner OBSFileSystem instance
* @param deleteRequest
* keys to delete on the obs-backend
* @throws IOException
* on any failure to delete objects
*/
static void deleteObjects(final OBSFileSystem owner, final DeleteObjectsRequest deleteRequest) throws IOException {
DeleteObjectsResult result;
deleteRequest.setQuiet(true);
try {
result = owner.getObsClient().deleteObjects(deleteRequest);
owner.getSchemeStatistics().incrementWriteOps(1);
} catch (ObsException e) {
LOG.warn("delete objects failed, request [{}], request id [{}] - " + "error code [{}] - error message [{}]", deleteRequest, e.getErrorRequestId(), e.getErrorCode(), e.getErrorMessage());
for (KeyAndVersion keyAndVersion : deleteRequest.getKeyAndVersionsList()) {
deleteObject(owner, keyAndVersion.getKey());
}
return;
}
// delete one by one if there is errors
if (result != null) {
List<DeleteObjectsResult.ErrorResult> errorResults = result.getErrorResults();
if (!errorResults.isEmpty()) {
LOG.warn("bulk delete {} objects, {} failed, begin to delete " + "one by one.", deleteRequest.getKeyAndVersionsList().size(), errorResults.size());
for (DeleteObjectsResult.ErrorResult errorResult : errorResults) {
deleteObject(owner, errorResult.getObjectKey());
}
}
}
} | 3.26 |
hadoop_OBSCommonUtils_getMultipartSizeProperty_rdh | /**
* Get a size property from the configuration: this property must be at least
* equal to {@link OBSConstants#MULTIPART_MIN_SIZE}. If it is too small, it is
* rounded up to that minimum, and a warning printed.
*
* @param conf
* configuration
* @param property
* property name
* @param defVal
* default value
* @return the value, guaranteed to be above the minimum size
*/
public static long getMultipartSizeProperty(final Configuration conf, final String property, final long defVal) {
long partSize = conf.getLongBytes(property, defVal);
if (partSize < OBSConstants.MULTIPART_MIN_SIZE) {
LOG.warn("{} must be at least 5 MB; configured value is {}", property, partSize);
partSize = OBSConstants.MULTIPART_MIN_SIZE;
}
return
partSize;
} | 3.26 |
hadoop_OBSCommonUtils_newAppendFileRequest_rdh | /**
* Create a appendFile request. Adds the ACL and metadata
*
* @param owner
* the owner OBSFileSystem instance
* @param key
* key of object
* @param tmpFile
* temp file or input stream
* @param recordPosition
* client record next append position
* @return the request
* @throws IOException
* any problem
*/
static WriteFileRequest newAppendFileRequest(final OBSFileSystem owner, final String key, final long recordPosition, final File tmpFile) throws IOException {
Preconditions.checkNotNull(key);
Preconditions.checkNotNull(tmpFile);
ObsFSAttribute obsFsAttribute;
try {
GetAttributeRequest getAttributeReq = new GetAttributeRequest(owner.getBucket(), key);
obsFsAttribute = owner.getObsClient().getAttribute(getAttributeReq);
} catch (ObsException e) {
throw translateException("GetAttributeRequest", key, e);
}
long appendPosition = Math.max(recordPosition, obsFsAttribute.getContentLength());
if (recordPosition != obsFsAttribute.getContentLength()) {
LOG.warn("append url[{}] position[{}], file contentLength[{}] not" + " equal to recordPosition[{}].", key, appendPosition, obsFsAttribute.getContentLength(), recordPosition);
}WriteFileRequest writeFileReq = new WriteFileRequest(owner.getBucket(), key, tmpFile, appendPosition);
writeFileReq.setAcl(owner.getCannedACL());
return writeFileReq;
} | 3.26 |
hadoop_OBSCommonUtils_extractException_rdh | /**
* Extract an exception from a failed future, and convert to an IOE.
*
* @param operation
* operation which failed
* @param path
* path operated on (may be null)
* @param ee
* execution exception
* @return an IOE which can be thrown
*/
static IOException extractException(final String operation, final String path, final ExecutionException ee) {IOException ioe;
Throwable cause = ee.getCause();
if (cause instanceof ObsException) {
ioe = translateException(operation, path, ((ObsException) (cause)));
} else if (cause instanceof IOException) {
ioe = ((IOException) (cause));
} else {
ioe = new IOException((operation + " failed: ") + cause, cause);
}
return ioe;
} | 3.26 |
hadoop_OBSCommonUtils_putObjectDirect_rdh | /**
* PUT an object directly (i.e. not via the transfer manager). Byte length is
* calculated from the file length, or, if there is no file, from the content
* length of the header. <i>Important: this call will close any input stream
* in the request.</i>
*
* @param owner
* the owner OBSFileSystem instance
* @param putObjectRequest
* the request
* @return the upload initiated
* @throws ObsException
* on problems
*/
static PutObjectResult putObjectDirect(final OBSFileSystem owner, final PutObjectRequest putObjectRequest) throws ObsException {
long len;
if (putObjectRequest.getFile() != null) {
len = putObjectRequest.getFile().length();
} else {
len = putObjectRequest.getMetadata().getContentLength();
}
PutObjectResult result = owner.getObsClient().putObject(putObjectRequest);
owner.getSchemeStatistics().incrementWriteOps(1);
owner.getSchemeStatistics().incrementBytesWritten(len);
return result;
} | 3.26 |
hadoop_OBSCommonUtils_m3_rdh | /**
* Propagates bucket-specific settings into generic OBS configuration keys.
* This is done by propagating the values of the form {@code fs.obs.bucket.${bucket}.key} to {@code fs.obs.key}, for all values of "key"
* other than a small set of unmodifiable values.
*
* <p>The source of the updated property is set to the key name of the
* bucket property, to aid in diagnostics of where things came from.
*
* <p>Returns a new configuration. Why the clone? You can use the same conf
* for different filesystems, and the original values are not updated.
*
* <p>The {@code fs.obs.impl} property cannot be set, nor can any with the
* prefix {@code fs.obs.bucket}.
*
* <p>This method does not propagate security provider path information
* from the OBS property into the Hadoop common provider: callers must call
* {@link #patchSecurityCredentialProviders(Configuration)} explicitly.
*
* @param source
* Source Configuration object.
* @param bucket
* bucket name. Must not be empty.
* @return a (potentially) patched clone of the original.
*/
static Configuration m3(final Configuration source, final String bucket) {
Preconditions.checkArgument(StringUtils.isNotEmpty(bucket), "bucket");
final String bucketPrefix = (OBSConstants.FS_OBS_BUCKET_PREFIX + bucket) + '.';
LOG.debug("Propagating entries under {}", bucketPrefix);
final Configuration dest = new Configuration(source);
for (Map.Entry<String, String> entry : source) {
final String key = entry.getKey();
// get the (unexpanded) value.
final String value = entry.getValue();
if ((!key.startsWith(bucketPrefix)) || bucketPrefix.equals(key)) {
continue;
}
// there's a bucket prefix, so strip it
final String stripped = key.substring(bucketPrefix.length());
if (stripped.startsWith("bucket.") || "impl".equals(stripped)) {
// tell user off
LOG.debug("Ignoring bucket option {}", key);
} else {
// propagate the value, building a new origin field.
// to track overwrites, the generic key is overwritten even if
// already matches the new one.
final String generic = OBSConstants.FS_OBS_PREFIX + stripped;
LOG.debug("Updating {}", generic);
dest.set(generic, value, key);
}
}
return dest;
} | 3.26 |
hadoop_OBSCommonUtils_innerIsFolderEmpty_rdh | // Used to check if a folder is empty or not.
static boolean innerIsFolderEmpty(final OBSFileSystem owner, final String key) throws FileNotFoundException, ObsException {
String obsKey = maybeAddTrailingSlash(key);
ListObjectsRequest request = new ListObjectsRequest();
request.setBucketName(owner.getBucket());
request.setPrefix(obsKey);
request.setDelimiter("/");
request.setMaxKeys(MAX_KEYS_FOR_CHECK_FOLDER_EMPTY);
owner.getSchemeStatistics().incrementReadOps(1);
ObjectListing objects =
owner.getObsClient().listObjects(request);
if ((!objects.getCommonPrefixes().isEmpty()) || (!objects.getObjects().isEmpty())) {
if (isFolderEmpty(obsKey, objects)) {
LOG.debug("Found empty directory {}", obsKey);
return true;
}
if (LOG.isDebugEnabled()) {
LOG.debug("Found path as directory (with /): {}/{}", objects.getCommonPrefixes().size(), objects.getObjects().size());
for (ObsObject summary : objects.getObjects()) {
LOG.debug("Summary: {} {}", summary.getObjectKey(), summary.getMetadata().getContentLength());
}
for (String prefix : objects.getCommonPrefixes()) {
LOG.debug("Prefix: {}", prefix);
}
}
LOG.debug("Found non-empty directory {}", obsKey);
return false;
} else if (obsKey.isEmpty()) {
LOG.debug("Found root directory");
return true;
} else if (owner.isFsBucket()) {LOG.debug("Found empty directory {}", obsKey);
return true;
}
LOG.debug("Not Found: {}", obsKey);
throw new FileNotFoundException("No such file or directory: " + obsKey);
} | 3.26 |
hadoop_OBSCommonUtils_qualify_rdh | /**
* Qualify a path.
*
* @param owner
* the owner OBSFileSystem instance
* @param path
* path to qualify
* @return a qualified path.
*/
static Path qualify(final OBSFileSystem owner, final Path path) {
return path.makeQualified(owner.getUri(), owner.getWorkingDirectory());
} | 3.26 |
hadoop_OBSCommonUtils_maybeAddBeginningSlash_rdh | /**
* Add obs key started '/'.
*
* @param key
* object key
* @return new key
*/
static String maybeAddBeginningSlash(final String key) {
return (!StringUtils.isEmpty(key)) && (!key.startsWith("/")) ? "/" + key : key;
} | 3.26 |
hadoop_OBSCommonUtils_patchSecurityCredentialProviders_rdh | /**
* Patch the security credential provider information in {@link #CREDENTIAL_PROVIDER_PATH} with the providers listed in {@link OBSConstants#OBS_SECURITY_CREDENTIAL_PROVIDER_PATH}.
*
* <p>This allows different buckets to use different credential files.
*
* @param conf
* configuration to patch
*/
static void patchSecurityCredentialProviders(final Configuration conf) {
Collection<String> customCredentials = conf.getStringCollection(OBSConstants.OBS_SECURITY_CREDENTIAL_PROVIDER_PATH);
Collection<String> hadoopCredentials = conf.getStringCollection(CREDENTIAL_PROVIDER_PATH);
if (!customCredentials.isEmpty()) {
List<String> all = Lists.newArrayList(customCredentials);
all.addAll(hadoopCredentials);
String joined = StringUtils.join(all, ',');
LOG.debug("Setting {} to {}", CREDENTIAL_PROVIDER_PATH, joined);
conf.set(CREDENTIAL_PROVIDER_PATH, joined, "patch of " + OBSConstants.OBS_SECURITY_CREDENTIAL_PROVIDER_PATH);
}
} | 3.26 |
hadoop_OBSCommonUtils_lookupPassword_rdh | /**
* Get a password from a configuration/configured credential providers.
*
* @param conf
* configuration
* @param key
* key to look up
* @return a password or the value in {@code defVal}
* @throws IOException
* on any problem
*/
private static String lookupPassword(final Configuration conf, final String key) throws IOException {
try {final char[] pass = conf.getPassword(key);
return pass != null ? new String(pass).trim() : "";
} catch (IOException ioe) {
throw new IOException("Cannot find password option " + key, ioe);
}
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.