name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hadoop_CheckpointCommand_getSignature_rdh | /**
* Checkpoint signature is used to ensure
* that nodes are talking about the same checkpoint.
*/
public CheckpointSignature getSignature() {
return cSig;
} | 3.26 |
hadoop_StorageStatistics_getValue_rdh | /**
*
* @return The value of this statistic.
*/
public long getValue() {
return f1;
} | 3.26 |
hadoop_StorageStatistics_getName_rdh | /**
* Get the name of this StorageStatistics object.
*
* @return name of this StorageStatistics object
*/
public String getName() {
return name;
} | 3.26 |
hadoop_AbfsThrottlingInterceptFactory_getInstance_rdh | /**
* Returns an instance of AbfsThrottlingIntercept.
*
* @param accountName
* The account for which we need instance of throttling intercept.
* @param abfsConfiguration
* The object of abfsconfiguration class.
* @return Instance of AbfsThrottlingIntercept.
*/
static synchronized AbfsThrottlingIntercept getInstance(String accountName, AbfsConfiguration abfsConfiguration) {
abfsConfig = abfsConfiguration;
AbfsThrottlingIntercept intercept;
if
(!abfsConfiguration.isAutoThrottlingEnabled()) {
return AbfsNoOpThrottlingIntercept.INSTANCE;
}
// If singleton is enabled use a static instance of the intercept class for all accounts
if (!abfsConfiguration.accountThrottlingEnabled()) {
intercept = AbfsClientThrottlingIntercept.initializeSingleton(abfsConfiguration);
} else {
// Return the instance from the map
intercept = interceptMap.get(accountName);
if (intercept == null) {
intercept = new AbfsClientThrottlingIntercept(accountName, abfsConfiguration);
interceptMap.put(accountName, intercept);
}
}
return intercept;
} | 3.26 |
hadoop_AbfsThrottlingInterceptFactory_factory_rdh | /**
* Returns instance of throttling intercept.
*
* @param accountName
* Account name.
* @return instance of throttling intercept.
*/
private static AbfsClientThrottlingIntercept factory(final String accountName) {
return new AbfsClientThrottlingIntercept(accountName, abfsConfig);
} | 3.26 |
hadoop_AbfsThrottlingInterceptFactory_referenceLost_rdh | /**
* Reference lost callback.
*
* @param accountName
* key lost.
*/
private static void referenceLost(String accountName) {
lostReferences.add(accountName);
} | 3.26 |
hadoop_TimelineFilterList_getOperator_rdh | /**
* Get the operator.
*
* @return operator
*/
public Operator getOperator() {
return operator;
} | 3.26 |
hadoop_TimelineFilterList_m0_rdh | /**
* Get the filter list.
*
* @return filterList
*/
public List<TimelineFilter> m0() { return filterList;
} | 3.26 |
hadoop_InMemoryConfigurationStore_getCurrentVersion_rdh | /**
* Configuration mutations not logged (i.e. not persisted). As such, they are
* not persisted and not versioned. Hence, a current version is not
* applicable.
*
* @return null A current version not applicable for this store.
*/
@Override
public Version getCurrentVersion() {
// Does nothing.
return null;
} | 3.26 |
hadoop_InMemoryConfigurationStore_checkVersion_rdh | /**
* Configuration mutations not logged (i.e. not persisted). As such, they are
* not persisted and not versioned. Hence, version is always compatible,
* since it is in-memory.
*/
@Override
public void checkVersion() {
// Does nothing. (Version is always compatible since it's in memory)
} | 3.26 |
hadoop_InMemoryConfigurationStore_storeVersion_rdh | /**
* Configuration mutations not logged (i.e. not persisted). As such, they are
* not persisted and not versioned. Hence, no version information to store.
*
* @throws Exception
* if any exception occurs during store Version.
*/
@Override
public void storeVersion() throws Exception {
// Does nothing.
} | 3.26 |
hadoop_InMemoryConfigurationStore_getConfStoreVersion_rdh | /**
* Configuration mutations applied directly in-memory. As such, there is no
* persistent configuration store.
* As there is no configuration store for versioning purposes,
* a conf store version is not applicable.
*
* @return null Conf store version not applicable for this store.
* @throws Exception
* if any exception occurs during getConfStoreVersion.
*/
@Override
public Version getConfStoreVersion() throws Exception {
// Does nothing.
return null;} | 3.26 |
hadoop_InMemoryConfigurationStore_getLogs_rdh | /**
* Configuration mutations not logged (i.e. not persisted) but directly
* confirmed. As such, a list of persisted configuration mutations does not
* exist.
*
* @return null Configuration mutation list not applicable for this store.
*/
@Override
protected LinkedList<LogMutation> getLogs()
{
// Unimplemented.
return null;
} | 3.26 |
hadoop_InMemoryConfigurationStore_logMutation_rdh | /**
* This method does not log as it does not support backing store.
* The mutation to be applied on top of schedConf will be directly passed
* in confirmMutation.
*/
@Override
public void logMutation(LogMutation logMutation) {
} | 3.26 |
hadoop_InMemoryConfigurationStore_getConfirmedConfHistory_rdh | /**
* Configuration mutations not logged (i.e. not persisted) but directly
* confirmed. As such, a list of persisted configuration mutations does not
* exist.
*
* @return null Configuration mutation list not applicable for this store.
*/
@Override
public List<LogMutation> getConfirmedConfHistory(long fromId) {// Unimplemented.
return null;
} | 3.26 |
hadoop_MappingRuleActionBase_setFallbackSkip_rdh | /**
* Sets the fallback method to skip, if the action cannot be executed
* We move onto the next rule, ignoring this one.
*
* @return MappingRuleAction The same object for method chaining.
*/
public MappingRuleAction setFallbackSkip() {
fallback = MappingRuleResult.createSkipResult();
return this;
} | 3.26 |
hadoop_MappingRuleActionBase_setFallbackDefaultPlacement_rdh | /**
* Sets the fallback method to place to default, if the action cannot be
* executed the application will be placed into the default queue, if the
* default queue does not exist the application will get rejected.
*
* @return MappingRuleAction The same object for method chaining.
*/
public MappingRuleAction setFallbackDefaultPlacement() {
fallback = MappingRuleResult.createDefaultPlacementResult();
return this;
} | 3.26 |
hadoop_MappingRuleActionBase_getFallback_rdh | /**
* Returns the fallback action to be taken if the main action (result returned
* by the execute method) fails.
* e.g. Target queue does not exist, or reference is ambiguous
*
* @return The fallback action to be taken if the main action fails
*/
public MappingRuleResult getFallback() {
return fallback;
} | 3.26 |
hadoop_MappingRuleActionBase_setFallbackReject_rdh | /**
* Sets the fallback method to reject, if the action cannot be executed the
* application will get rejected.
*
* @return MappingRuleAction The same object for method chaining.
*/
public MappingRuleAction setFallbackReject() {
fallback = MappingRuleResult.createRejectResult();
return this;
} | 3.26 |
hadoop_TimestampGenerator_getTruncatedTimestamp_rdh | /**
* truncates the last few digits of the timestamp which were supplemented by
* the TimestampGenerator#getSupplementedTimestamp function.
*
* @param incomingTS
* Timestamp to be truncated.
* @return a truncated timestamp value
*/
public static long getTruncatedTimestamp(long incomingTS) {
return incomingTS / TS_MULTIPLIER;
} | 3.26 |
hadoop_TimestampGenerator_getUniqueTimestamp_rdh | /**
* Returns a timestamp value unique within the scope of this
* {@code TimestampGenerator} instance. For usage by HBase
* {@code RegionObserver} coprocessors, this normally means unique within a
* given region.
*
* Unlikely scenario of generating a non-unique timestamp: if there is a
* sustained rate of more than 1M hbase writes per second AND if region fails
* over within that time range of timestamps being generated then there may be
* collisions writing to a cell version of the same column.
*
* @return unique timestamp.
*/
public long getUniqueTimestamp() {
long lastTs;
long nextTs;
do {
lastTs = lastTimestamp.get();
nextTs = Math.max(lastTs + 1, currentTime());
} while (!lastTimestamp.compareAndSet(lastTs, nextTs) );
return nextTs;
} | 3.26 |
hadoop_MutableStat_setUpdateTimeStamp_rdh | /**
* Set whether to update the snapshot time or not.
*
* @param updateTimeStamp
* enable update stats snapshot timestamp
*/
public synchronized void setUpdateTimeStamp(boolean updateTimeStamp) {
this.updateTimeStamp = updateTimeStamp;
} | 3.26 |
hadoop_MutableStat_lastStat_rdh | /**
* Return a SampleStat object that supports
* calls like StdDev and Mean.
*
* @return SampleStat
*/
public SampleStat lastStat() {
return
changed() ? intervalStat : prevStat;
} | 3.26 |
hadoop_MutableStat_resetMinMax_rdh | /**
* Reset the all time min max of the metric
*/
public void resetMinMax() {
minMax.reset();
} | 3.26 |
hadoop_MutableStat_add_rdh | /**
* Add a snapshot to the metric.
*
* @param value
* of the metric
*/
public synchronized void add(long value) {
intervalStat.add(value);
minMax.add(value);
setChanged();
} | 3.26 |
hadoop_MutableStat_getSnapshotTimeStamp_rdh | /**
*
* @return Return the SampleStat snapshot timestamp.
*/
public long getSnapshotTimeStamp() {
return snapshotTimeStamp;
} | 3.26 |
hadoop_EntityCacheItem_forceRelease_rdh | /**
* Force releasing the cache item for the given group id, even though there
* may be active references.
*/
public synchronized void forceRelease() {
try {
if (store != null) {
store.close();
}
} catch (IOException e) {
LOG.warn("Error closing timeline store", e);
}
store = null;
// reset offsets so next time logs are re-parsed
for (LogInfo log : appLogs.getDetailLogs()) {
if (log.getFilename().contains(groupId.toString())) {
log.setOffset(0);
}
}
LOG.debug("Cache for group {} released. ", groupId);
} | 3.26 |
hadoop_EntityCacheItem_getAppLogs_rdh | /**
*
* @return The application log associated to this cache item, may be null.
*/
public synchronized AppLogs getAppLogs() {
return this.appLogs;
} | 3.26 |
hadoop_EntityCacheItem_setAppLogs_rdh | /**
* Set the application logs to this cache item. The entity group should be
* associated with this application.
*
* @param incomingAppLogs
* Application logs this cache item mapped to
*/
public synchronized void setAppLogs(EntityGroupFSTimelineStore.AppLogs incomingAppLogs) {
this.appLogs =
incomingAppLogs;
} | 3.26 |
hadoop_Trash_expunge_rdh | /**
* Delete old checkpoint(s).
*
* @throws IOException
* raised on errors performing I/O.
*/
public void expunge() throws IOException {
trashPolicy.deleteCheckpoint();} | 3.26 |
hadoop_Trash_getCurrentTrashDir_rdh | /**
* get the current working directory.
*
* @throws IOException
* on raised on errors performing I/O.
* @return Trash Dir.
*/
Path getCurrentTrashDir() throws IOException {
return trashPolicy.getCurrentTrashDir();
} | 3.26 |
hadoop_Trash_moveToAppropriateTrash_rdh | /**
* In case of the symlinks or mount points, one has to move the appropriate
* trashbin in the actual volume of the path p being deleted.
*
* Hence we get the file system of the fully-qualified resolved-path and
* then move the path p to the trashbin in that volume,
*
* @param fs
* - the filesystem of path p
* @param p
* - the path being deleted - to be moved to trash
* @param conf
* - configuration
* @return false if the item is already in the trash or trash is disabled
* @throws IOException
* on error
*/
public static boolean moveToAppropriateTrash(FileSystem fs, Path p, Configuration conf) throws IOException {
Path fullyResolvedPath = fs.resolvePath(p);
FileSystem fullyResolvedFs = FileSystem.get(fullyResolvedPath.toUri(), conf);
// If the trash interval is configured server side then clobber this
// configuration so that we always respect the server configuration.
try {
long trashInterval = fullyResolvedFs.getServerDefaults(fullyResolvedPath).getTrashInterval();
if
(0 != trashInterval) {
Configuration confCopy = new Configuration(conf);
confCopy.setLong(CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY, trashInterval);
conf = confCopy;
}
} catch (Exception e) {
// If we can not determine that trash is enabled server side then
// bail rather than potentially deleting a file when trash is enabled.
LOG.warn("Failed to get server trash configuration", e);
throw new IOException("Failed to get server trash configuration", e);
}
/* In HADOOP-18144, we changed getTrashRoot() in ViewFileSystem to return a
viewFS path, instead of a targetFS path. moveToTrash works for
ViewFileSystem now. ViewFileSystem will do path resolution internally by
itself.
When localized trash flag is enabled:
1). if fs is a ViewFileSystem, we can initialize Trash() with a
ViewFileSystem object;
2). When fs is not a ViewFileSystem, the only place we would need to
resolve a path is for symbolic links. However, symlink is not
enabled in Hadoop due to the complexity to support it
(HADOOP-10019).
*/
if (conf.getBoolean(CONFIG_VIEWFS_TRASH_FORCE_INSIDE_MOUNT_POINT, CONFIG_VIEWFS_TRASH_FORCE_INSIDE_MOUNT_POINT_DEFAULT)) {
Trash trash = new Trash(fs, conf);
return trash.moveToTrash(p);
}
Trash trash = new Trash(fullyResolvedFs, conf);
return trash.moveToTrash(fullyResolvedPath);
} | 3.26 |
hadoop_Trash_checkpoint_rdh | /**
* Create a trash checkpoint.
*
* @throws IOException
* raised on errors performing I/O.
*/
public void checkpoint() throws IOException {
trashPolicy.createCheckpoint();
} | 3.26 |
hadoop_Trash_moveToTrash_rdh | /**
* Move a file or directory to the current trash directory.
*
* @param path
* the path.
* @return false if the item is already in the trash or trash is disabled
* @throws IOException
* raised on errors performing I/O.
*/
public boolean moveToTrash(Path path) throws IOException {
return trashPolicy.moveToTrash(path);
} | 3.26 |
hadoop_Trash_getTrashPolicy_rdh | /**
* get the configured trash policy.
*
* @return TrashPolicy.
*/
TrashPolicy getTrashPolicy() {
return trashPolicy;
} | 3.26 |
hadoop_Trash_expungeImmediately_rdh | /**
* Delete all trash immediately.
*
* @throws IOException
* raised on errors performing I/O.
*/
public void expungeImmediately() throws IOException {
trashPolicy.createCheckpoint();
trashPolicy.deleteCheckpointsImmediately();
} | 3.26 |
hadoop_Trash_getEmptier_rdh | /**
* Return a {@link Runnable} that periodically empties the trash of all
* users, intended to be run by the superuser.
*
* @throws IOException
* on raised on errors performing I/O.
* @return Runnable.
*/
public Runnable getEmptier() throws IOException {
return trashPolicy.getEmptier();
} | 3.26 |
hadoop_Trash_isEnabled_rdh | /**
* Returns whether the trash is enabled for this filesystem.
*
* @return return if isEnabled true,not false.
*/
public boolean isEnabled() {
return trashPolicy.isEnabled();
} | 3.26 |
hadoop_DataStatistics_meanCI_rdh | /**
* calculates the mean value within 95% ConfidenceInterval.
* 1.96 is standard for 95 %
*
* @return the mean value adding 95% confidence interval
*/
public synchronized double meanCI() {
if (count <= 1) {
return 0.0;
}
double currMean = mean();
double currStd = std();
return currMean + ((DEFAULT_CI_FACTOR * currStd) / Math.sqrt(count));
} | 3.26 |
hadoop_PositionedReadable_minSeekForVectorReads_rdh | /**
* What is the smallest reasonable seek?
*
* @return the minimum number of bytes
*/
default int minSeekForVectorReads() {
return 4 * 1024;
} | 3.26 |
hadoop_PositionedReadable_maxReadSizeForVectorReads_rdh | /**
* What is the largest size that we should group ranges together as?
*
* @return the number of bytes to read at once
*/
default int maxReadSizeForVectorReads() {
return 1024 * 1024;
} | 3.26 |
hadoop_PositionedReadable_readVectored_rdh | /**
* Read fully a list of file ranges asynchronously from this file.
* The default iterates through the ranges to read each synchronously, but
* the intent is that FSDataInputStream subclasses can make more efficient
* readers.
* As a result of the call, each range will have FileRange.setData(CompletableFuture)
* called with a future that when complete will have a ByteBuffer with the
* data from the file's range.
* <p>
* The position returned by getPos() after readVectored() is undefined.
* </p>
* <p>
* If a file is changed while the readVectored() operation is in progress, the output is
* undefined. Some ranges may have old data, some may have new and some may have both.
* </p>
* <p>
* While a readVectored() operation is in progress, normal read api calls may block.
* </p>
*
* @param ranges
* the byte ranges to read
* @param allocate
* the function to allocate ByteBuffer
* @throws IOException
* any IOE.
*/
default void readVectored(List<? extends FileRange> ranges, IntFunction<ByteBuffer> allocate) throws IOException {
VectoredReadUtils.readVectored(this, ranges, allocate);
} | 3.26 |
hadoop_FieldSelectionHelper_extractFields_rdh | /**
* Extract the actual field numbers from the given field specs.
* If a field spec is in the form of "n-" (like 3-), then n will be the
* return value. Otherwise, -1 will be returned.
*
* @param fieldListSpec
* an array of field specs
* @param fieldList
* an array of field numbers extracted from the specs.
* @return number n if some field spec is in the form of "n-", -1 otherwise.
*/
private static int extractFields(String[] fieldListSpec, List<Integer>
fieldList) {
int allFieldsFrom = -1;
int i = 0;
int j = 0;
int pos = -1;
String fieldSpec = null;
for (i = 0; i < fieldListSpec.length; i++) {
fieldSpec = fieldListSpec[i];
if (fieldSpec.length() == 0) {continue;
}
pos = fieldSpec.indexOf('-');
if (pos < 0)
{
Integer fn = Integer.valueOf(fieldSpec);
fieldList.add(fn);
} else {
String start = fieldSpec.substring(0, pos);
String end = fieldSpec.substring(pos + 1);
if (start.length() == 0) {start = "0";
}
if (end.length() == 0) {
allFieldsFrom = Integer.parseInt(start);
continue;
}
int startPos = Integer.parseInt(start);
int endPos = Integer.parseInt(end);
for (j = startPos; j <= endPos; j++) {
fieldList.add(j);
}
}}
return allFieldsFrom;
} | 3.26 |
hadoop_ApplicationTableRW_setMetricsTTL_rdh | /**
*
* @param metricsTTL
* time to live parameter for the metrics in this table.
* @param hbaseConf
* configuration in which to set the metrics TTL config
* variable.
*/
public void setMetricsTTL(int metricsTTL, Configuration hbaseConf) {
hbaseConf.setInt(METRICS_TTL_CONF_NAME, metricsTTL);
} | 3.26 |
hadoop_JournalNodeRpcServer_getRpcServer_rdh | /**
* Allow access to the RPC server for testing.
*/
@VisibleForTesting
Server getRpcServer() {
return server;
} | 3.26 |
hadoop_HttpReferrerAuditHeader_set_rdh | /**
* Set an attribute. If the value is non-null/empty,
* it will be used as a query parameter.
*
* @param key
* key to set
* @param value
* value.
*/
public void set(final String key, final String value) {
addAttribute(requireNonNull(key), value);
} | 3.26 |
hadoop_HttpReferrerAuditHeader_withSpanId_rdh | /**
* Set ID.
*
* @param value
* new value
* @return the builder
*/
public Builder withSpanId(final String value) {
spanId = value;
return this;
} | 3.26 |
hadoop_HttpReferrerAuditHeader_withOperationName_rdh | /**
* Set Operation name.
*
* @param value
* new value
* @return the builder
*/
public Builder withOperationName(final String value) {
operationName =
value;
return this;
} | 3.26 |
hadoop_HttpReferrerAuditHeader_withGlobalContextValues_rdh | /**
* Set the global context values (replaces the default binding
* to {@link CommonAuditContext#getGlobalContextEntries()}).
*
* @param value
* new value
* @return the builder
*/
public Builder withGlobalContextValues(final Iterable<Map.Entry<String, String>>
value) {
globalContextValues = value;
return this;
} | 3.26 |
hadoop_HttpReferrerAuditHeader_withPath2_rdh | /**
* Set Path2 of operation.
*
* @param value
* new value
* @return the builder
*/public Builder withPath2(final String value) {
path2 = value;
return this;
} | 3.26 |
hadoop_HttpReferrerAuditHeader_addAttribute_rdh | /**
* Add a query parameter if not null/empty
* There's no need to escape here as it is done in the URI
* constructor.
*
* @param key
* query key
* @param value
* query value
*/
private void addAttribute(String key, String
value) {
if (StringUtils.isNotEmpty(value)) {
attributes.put(key, value);
}
} | 3.26 |
hadoop_HttpReferrerAuditHeader_withFilter_rdh | /**
* Declare the fields to filter.
*
* @param fields
* iterable of field names.
* @return the builder
*/
public Builder withFilter(final Collection<String> fields) {
this.filter = new HashSet<>(fields);
return this;
} | 3.26 |
hadoop_HttpReferrerAuditHeader_withContextId_rdh | /**
* Set context ID.
*
* @param value
* context
* @return the builder
*/
public Builder withContextId(final
String value) {
contextId = value;
return this;
} | 3.26 |
hadoop_HttpReferrerAuditHeader_builder_rdh | /**
* Get a builder.
*
* @return a new builder.
*/
public static Builder builder() {
return new Builder();
} | 3.26 |
hadoop_HttpReferrerAuditHeader_build_rdh | /**
* Build.
*
* @return an HttpReferrerAuditHeader
*/
public HttpReferrerAuditHeader build() {
return new HttpReferrerAuditHeader(this);
} | 3.26 |
hadoop_HttpReferrerAuditHeader_withAttribute_rdh | /**
* Add an attribute to the current map.
* Replaces any with the existing key.
*
* @param key
* key to set/update
* @param value
* new value
* @return the builder
*/
public Builder withAttribute(String key, String value) {
attributes.put(key, value);
return this;
} | 3.26 |
hadoop_HttpReferrerAuditHeader_extractQueryParameters_rdh | /**
* Split up the string. Uses httpClient: make sure it is on the classpath.
* Any query param with a name but no value, e.g ?something is
* returned in the map with an empty string as the value.
*
* @param header
* URI to parse
* @return a map of parameters.
* @throws URISyntaxException
* failure to build URI from header.
*/
public static Map<String, String> extractQueryParameters(String header) throws URISyntaxException {
URI uri = new URI(maybeStripWrappedQuotes(header));
// get the decoded query
List<NameValuePair> params = URLEncodedUtils.parse(uri, StandardCharsets.UTF_8);
Map<String, String> result = new HashMap<>(params.size());
for (NameValuePair param : params) {
String name = param.getName();
String value = param.getValue();
if (value
== null) {
value = "";
}
result.put(name, value);
}
return result;
} | 3.26 |
hadoop_HttpReferrerAuditHeader_withPath1_rdh | /**
* Set Path1 of operation.
*
* @param value
* new value
* @return the builder
*/
public Builder withPath1(final String value) {
path1 = value;
return this;
} | 3.26 |
hadoop_HttpReferrerAuditHeader_withEvaluated_rdh | /**
* Add an evaluated attribute to the current map.
* Replaces any with the existing key.
* Set evaluated methods.
*
* @param key
* key
* @param value
* new value
* @return the builder
*/
public Builder
withEvaluated(String key, Supplier<String> value) {
evaluated.put(key, value);
return this;
} | 3.26 |
hadoop_HttpReferrerAuditHeader_buildHttpReferrer_rdh | /**
* Build the referrer string.
* This includes dynamically evaluating all of the evaluated
* attributes.
* If there is an error creating the string it will be logged once
* per entry, and "" returned.
*
* @return a referrer string or ""
*/
public String buildHttpReferrer() {
String header;
try {
String queries;
// Update any params which are dynamically evaluated
evaluated.forEach((key, eval) -> addAttribute(key, eval.get()));
// now build the query parameters from all attributes, static and
// evaluated, stripping out any from the filter
queries = attributes.entrySet().stream().filter(e -> !filter.contains(e.getKey())).map(e -> (e.getKey() + "=") + e.getValue()).collect(Collectors.joining("&"));
final URI uri = new URI("https", REFERRER_ORIGIN_HOST, String.format(Locale.ENGLISH, REFERRER_PATH_FORMAT, contextId, spanId, operationName), queries, null);
header = uri.toASCIIString();
} catch (URISyntaxException e) {
WARN_OF_URL_CREATION.warn("Failed to build URI for auditor: " + e, e);
header = "";
}
return header;
} | 3.26 |
hadoop_HttpReferrerAuditHeader_escapeToPathElement_rdh | /**
* Perform any escaping to valid path elements in advance of
* new URI() doing this itself. Only path separators need to
* be escaped/converted at this point.
*
* @param source
* source string
* @return an escaped path element.
*/
public static String escapeToPathElement(CharSequence source) {
int len = source.length();
StringBuilder r = new StringBuilder(len);
for
(int i = 0; i < len; i++) {
char c = source.charAt(i);
String s = Character.toString(c);
switch (c) {
case '/' :
case '@'
:
s = "+";
break;
default :
break;
}
r.append(s);
}
return r.toString();
} | 3.26 |
hadoop_HttpReferrerAuditHeader_withAttributes_rdh | /**
* Add all attributes to the current map.
*
* @param value
* new value
* @return the builder
*/
public Builder withAttributes(final Map<String, String> value) {attributes.putAll(value);
return this;
} | 3.26 |
hadoop_VolumeAMSProcessor_checkAndGetVolume_rdh | /**
* If given volume ID already exists in the volume manager,
* it returns the existing volume. Otherwise, it creates a new
* volume and add that to volume manager.
*
* @param metaData
* @return volume
*/
private Volume checkAndGetVolume(VolumeMetaData metaData) throws InvalidVolumeException {
Volume toAdd = new VolumeImpl(metaData);
CsiAdaptorProtocol adaptor = volumeManager.getAdaptorByDriverName(metaData.getDriverName());
if (adaptor == null) {
throw new InvalidVolumeException((((((("It seems for the driver name" + " specified in the volume ") + metaData.getDriverName()) + " ,there is no matched driver-adaptor can be found. ") + "Is the driver probably registered? Please check if") + " adaptors service addresses defined in ")
+ YarnConfiguration.NM_CSI_ADAPTOR_ADDRESSES) + " are correct and services are started.");}
toAdd.setClient(adaptor);
return
this.volumeManager.addOrGetVolume(toAdd);
} | 3.26 |
hadoop_VolumeAMSProcessor_aggregateVolumesFrom_rdh | // Currently only scheduling request is supported.
private List<Volume> aggregateVolumesFrom(AllocateRequest request) throws VolumeException {
List<Volume> volumeList = new ArrayList<>();
List<SchedulingRequest> requests = request.getSchedulingRequests();if (requests != null) {
for (SchedulingRequest req : requests) {
Resource totalResource = req.getResourceSizing().getResources();
List<ResourceInformation> resourceList = totalResource.getAllResourcesListCopy();
for (ResourceInformation resourceInformation : resourceList) {
List<VolumeMetaData> volumes = VolumeMetaData.fromResource(resourceInformation);
for (VolumeMetaData vs : volumes) {
if (vs.getVolumeCapabilityRange().getMinCapacity() <= 0) {
// capacity not specified, ignore
continue;
} else if (vs.isProvisionedVolume()) {
volumeList.add(checkAndGetVolume(vs));
} else {throw new InvalidVolumeException("Only pre-provisioned volume" + " is supported now, volumeID must exist.");
}}
}
}
}
return volumeList;
} | 3.26 |
hadoop_GetContentSummaryOperation_getDirSummary_rdh | /**
* Return the {@link ContentSummary} of a given directory.
*
* @param dir
* dir to scan
* @throws FileNotFoundException
* if the path does not resolve
* @throws IOException
* IO failure
* @return the content summary
* @throws FileNotFoundException
* the path does not exist
* @throws IOException
* failure
*/
public ContentSummary
getDirSummary(Path dir) throws IOException {
long totalLength = 0;
long fileCount = 0;
long
dirCount = 1;
RemoteIterator<S3ALocatedFileStatus> it = callbacks.listFilesIterator(dir, true);
Set<Path> dirSet = new HashSet<>();
Set<Path> pathsTraversed = new HashSet<>();
while (it.hasNext()) {
S3ALocatedFileStatus fileStatus = it.next();
Path filePath = fileStatus.getPath();
if (fileStatus.isDirectory() && (!filePath.equals(dir))) {
dirSet.add(filePath);
buildDirectorySet(dirSet, pathsTraversed, dir, filePath.getParent());
} else if (!fileStatus.isDirectory()) {
fileCount += 1;
totalLength += fileStatus.getLen();
buildDirectorySet(dirSet, pathsTraversed, dir, filePath.getParent());
}
}
// Add the list's IOStatistics
iostatistics.aggregate(retrieveIOStatistics(it));
return new ContentSummary.Builder().length(totalLength).fileCount(fileCount).directoryCount(dirCount + dirSet.size()).spaceConsumed(totalLength).build();
} | 3.26 |
hadoop_GetContentSummaryOperation_execute_rdh | /**
* Return the {@link ContentSummary} of a given path.
*
* @return the summary.
* @throws FileNotFoundException
* if the path does not resolve
* @throws IOException
* failure
*/
@Override
@Retries.RetryTranslated
public ContentSummary execute() throws IOException {
FileStatus status = probePathStatusOrNull(path, StatusProbeEnum.FILE);
if ((status != null) && status.isFile()) {
// f is a file
long length = status.getLen();
return new ContentSummary.Builder().length(length).fileCount(1).directoryCount(0).spaceConsumed(length).build();
}
final ContentSummary summary = getDirSummary(path);
// Log the IOStatistics at debug so the cost of the operation
// can be made visible.
LOG.debug("IOStatistics of getContentSummary({}):\n{}", path, iostatistics);return summary;
} | 3.26 |
hadoop_GetContentSummaryOperation_buildDirectorySet_rdh | /**
* *
* This method builds the set of all directories found under the base path. We need to do this
* because if the directory structure /a/b/c was created with a single mkdirs() call, it is
* stored as 1 object in S3 and the list files iterator will only return a single entry /a/b/c.
*
* We keep track of paths traversed so far to prevent duplication of work. For eg, if we had
* a/b/c/file-1.txt and /a/b/c/file-2.txt, we will only recurse over the complete path once
* and won't have to do anything for file-2.txt.
*
* @param dirSet
* Set of all directories found in the path
* @param pathsTraversed
* Set of all paths traversed so far
* @param basePath
* Path of directory to scan
* @param parentPath
* Parent path of the current file/directory in the iterator
*/
private void buildDirectorySet(Set<Path> dirSet, Set<Path> pathsTraversed, Path basePath, Path parentPath) {
if
(((parentPath == null) || pathsTraversed.contains(parentPath)) || parentPath.equals(basePath)) {
return;
}
dirSet.add(parentPath);
buildDirectorySet(dirSet, pathsTraversed, basePath, parentPath.getParent());
pathsTraversed.add(parentPath);
} | 3.26 |
hadoop_GetContentSummaryOperation_probePathStatusOrNull_rdh | /**
* Get the status of a path, downgrading FNFE to null result.
*
* @param p
* path to probe.
* @param probes
* probes to exec
* @return the status or null
* @throws IOException
* failure other than FileNotFound
*/private S3AFileStatus probePathStatusOrNull(final Path p, final Set<StatusProbeEnum> probes) throws IOException {
try {
return callbacks.probePathStatus(p, probes);
} catch (FileNotFoundException fnfe) {
return null;
}
} | 3.26 |
hadoop_RenameOperation_removeSourceObjects_rdh | /**
* Remove source objects.
*
* @param keys
* list of keys to delete
* @throws IOException
* failure
*/
@Retries.RetryTranslated
private void removeSourceObjects(final List<ObjectIdentifier> keys) throws IOException {
// remove the keys
// list what is being deleted for the interest of anyone
// who is trying to debug why objects are no longer there.
if (LOG.isDebugEnabled()) {
LOG.debug("Initiating delete operation for {} objects", keys.size());
for (ObjectIdentifier objectIdentifier :
keys) {
LOG.debug(" {} {}", objectIdentifier.key(), objectIdentifier.versionId() != null ? objectIdentifier.versionId() : "");
}
}
Invoker.once((("rename " +
sourcePath) + " to ") + destPath, sourcePath.toString(), () -> callbacks.removeKeys(keys, false));} | 3.26 |
hadoop_RenameOperation_completeActiveCopies_rdh | /**
* Wait for the active copies to complete then reset the list.
*
* @param reason
* for messages
* @throws IOException
* if one of the called futures raised an IOE.
* @throws RuntimeException
* if one of the futures raised one.
*/
@Retries.OnceTranslated
private void completeActiveCopies(String reason) throws IOException {
LOG.debug("Waiting for {} active copies to complete: {}", activeCopies.size(), reason);
waitForCompletion(activeCopies);
activeCopies.clear();
} | 3.26 |
hadoop_RenameOperation_endOfLoopActions_rdh | /**
* Operations to perform at the end of every loop iteration.
* <p>
* This may block the thread waiting for copies to complete
* and/or delete a page of data.
*/
private void endOfLoopActions() throws IOException {
if (keysToDelete.size() == pageSize) {
// finish ongoing copies then delete all queued keys.
completeActiveCopiesAndDeleteSources("paged delete");
} else if (activeCopies.size() == RENAME_PARALLEL_LIMIT) {
// the limit of active copies has been reached;
// wait for completion or errors to surface.
LOG.debug("Waiting for active copies to complete");
completeActiveCopies("batch threshold reached");
}
} | 3.26 |
hadoop_RenameOperation_queueToDelete_rdh | /**
* Queue a single marker for deletion.
* <p>
* See {@link #queueToDelete(Path, String)} for
* details on safe use of this method.
*
* @param marker
* markers
*/
private void queueToDelete(final DirMarkerTracker.Marker marker) {
queueToDelete(marker.getPath(), marker.getKey());
} | 3.26 |
hadoop_RenameOperation_renameFileToDest_rdh | /**
* The source is a file: rename it to the destination, which
* will be under the current destination path if that is a directory.
*
* @return the path of the object created.
* @throws IOException
* failure
*/
protected Path renameFileToDest() throws IOException {
final StoreContext storeContext = getStoreContext();
// the source is a file.
Path copyDestinationPath =
destPath;
String copyDestinationKey = destKey;
S3ObjectAttributes sourceAttributes = callbacks.createObjectAttributes(f0);
S3AReadOpContext readContext =
callbacks.createReadContext(f0);
if ((destStatus != null) && destStatus.isDirectory()) {
// destination is a directory: build the final destination underneath
String newDestKey = maybeAddTrailingSlash(destKey);
String filename = sourceKey.substring(storeContext.pathToKey(sourcePath.getParent()).length() + 1);
newDestKey = newDestKey + filename;
copyDestinationKey = newDestKey;
copyDestinationPath = storeContext.keyToPath(newDestKey);
}
// destination either does not exist or is a file to overwrite.
LOG.debug("rename: renaming file {} to {}", sourcePath, copyDestinationPath);
copySource(sourceKey, sourceAttributes, readContext, copyDestinationPath, copyDestinationKey);
bytesCopied.addAndGet(f0.getLen());
// delete the source
callbacks.deleteObjectAtPath(sourcePath, sourceKey, true); return copyDestinationPath;
} | 3.26 |
hadoop_RenameOperation_convertToIOException_rdh | /**
* Convert a passed in exception (expected to be an IOE or AWS exception)
* into an IOException.
*
* @param ex
* exception caught
* @return the exception to throw in the failure handler.
*/
protected IOException convertToIOException(final Exception ex) {
if (ex instanceof IOException) {
return ((IOException) (ex));
} else if (ex instanceof SdkException) {
return translateException((("rename " + sourcePath) + " to ") + destPath, sourcePath.toString(), ((SdkException) (ex)));
} else {// should never happen, but for completeness
return new IOException(ex);
}
} | 3.26 |
hadoop_RenameOperation_getUploadsAborted_rdh | /**
* Get the count of uploads aborted.
* Non-empty iff enabled, and the operations completed without errors.
*
* @return count of aborted uploads.
*/
public Optional<Long> getUploadsAborted() {
return uploadsAborted;
} | 3.26 |
hadoop_RenameOperation_copySource_rdh | /**
* This is invoked to copy a file or directory marker.
* It may be called in its own thread.
*
* @param srcKey
* source key
* @param srcAttributes
* status of the source object
* @param destination
* destination as a qualified path.
* @param destinationKey
* destination key
* @return the destination path.
* @throws IOException
* failure
*/
@Retries.RetryTranslated
private Path copySource(final String srcKey, final S3ObjectAttributes srcAttributes, final S3AReadOpContext readContext, final Path destination, final String
destinationKey) throws IOException {
long len = srcAttributes.getLen();
try (DurationInfo v35 = new DurationInfo(LOG, false, "Copy file from %s to %s (length=%d)", srcKey, destinationKey, len)) {
callbacks.copyFile(srcKey, destinationKey, srcAttributes, readContext);}
return destination;
} | 3.26 |
hadoop_RenameOperation_initiateCopy_rdh | /**
* Initiate a copy operation in the executor.
*
* @param source
* status of the source object.
* @param key
* source key
* @param newDestKey
* destination key
* @param childDestPath
* destination path.
* @return the future.
*/
protected CompletableFuture<Path> initiateCopy(final S3ALocatedFileStatus source, final String key, final String newDestKey, final Path childDestPath) {
S3ObjectAttributes sourceAttributes = callbacks.createObjectAttributes(source.getPath(), source.getEtag(), source.getVersionId(), source.getLen());
// queue the copy operation for execution in the thread pool
return submit(getStoreContext().getExecutor(), callableWithinAuditSpan(getAuditSpan(), () -> copySource(key, sourceAttributes, callbacks.createReadContext(source), childDestPath, newDestKey)));
} | 3.26 |
hadoop_RenameOperation_recursiveDirectoryRename_rdh | /**
* Execute a full recursive rename.
* There is a special handling of directly markers here -only leaf markers
* are copied. This reduces incompatibility "regions" across versions.
*
* @throws IOException
* failure
*/
protected void recursiveDirectoryRename() throws IOException {
final StoreContext storeContext = getStoreContext();
LOG.debug("rename: renaming directory {} to {}", sourcePath, destPath);// This is a directory-to-directory copy
String dstKey = maybeAddTrailingSlash(destKey);
String srcKey = maybeAddTrailingSlash(sourceKey);
// Verify dest is not a child of the source directory
if (dstKey.startsWith(srcKey)) {
throw new RenameFailedException(srcKey, dstKey, "cannot rename a directory to a subdirectory of itself ");
}
// start the async dir cleanup
final CompletableFuture<Long> abortUploads;
if (dirOperationsPurgeUploads) {
final String key = srcKey;
LOG.debug("All uploads under {} will be deleted", key);
abortUploads = submit(getStoreContext().getExecutor(), () -> callbacks.abortMultipartUploadsUnderPrefix(key));
} else {
abortUploads = null;
}
if ((destStatus != null) && (destStatus.isEmptyDirectory() == Tristate.TRUE)) {
// delete unnecessary fake directory at the destination.
LOG.debug("Deleting fake directory marker at destination {}", destStatus.getPath());
// Although the dir marker policy doesn't always need to do this,
// it's simplest just to be consistent here.
callbacks.deleteObjectAtPath(destStatus.getPath(), dstKey, false);
}
Path parentPath = storeContext.keyToPath(srcKey);
// Track directory markers so that we know which leaf directories need to be
// recreated
DirMarkerTracker dirMarkerTracker = new DirMarkerTracker(parentPath, false);
final RemoteIterator<S3ALocatedFileStatus> iterator = callbacks.listFilesAndDirectoryMarkers(parentPath, f0, true);
while (iterator.hasNext()) {
// get the next entry in the listing.
S3ALocatedFileStatus child = iterator.next();
LOG.debug("To rename {}", child);
// convert it to an S3 key.
String k = storeContext.pathToKey(child.getPath());
// possibly adding a "/" if it represents directory and it does
// not have a trailing slash already.
String key = (child.isDirectory()
&& (!k.endsWith("/"))) ? k + "/" : k;
// the source object to copy as a path.
Path childSourcePath = storeContext.keyToPath(key);
List<DirMarkerTracker.Marker> markersToDelete;
boolean isMarker = key.endsWith("/");
if (isMarker) {
// add the marker to the tracker.
// it will not be deleted _yet_ but it may find a list of parent
// markers which may now be deleted.
markersToDelete = dirMarkerTracker.markerFound(childSourcePath,
key, child);
}
else {
// it is a file.
// note that it has been found -this may find a list of parent
// markers which may now be deleted.
markersToDelete = dirMarkerTracker.fileFound(childSourcePath, key, child);
// the destination key is that of the key under the source tree,
// remapped under the new destination path.
String
newDestKey = dstKey + key.substring(srcKey.length());
Path childDestPath = storeContext.keyToPath(newDestKey);
// mark the source file for deletion on a successful copy.
queueToDelete(childSourcePath, key);
// now begin the single copy
CompletableFuture<Path> copy = initiateCopy(child, key, newDestKey, childDestPath);
activeCopies.add(copy);
bytesCopied.addAndGet(f0.getLen());
}
// add any markers to delete to the operation so they get cleaned
// incrementally
queueToDelete(markersToDelete);
// and trigger any end of loop operations
endOfLoopActions();
} // end of iteration through the list
// finally process remaining directory markers
copyEmptyDirectoryMarkers(srcKey, dstKey, dirMarkerTracker);
// await the final set of copies and their deletion
// This will notify the renameTracker that these objects
// have been deleted.
completeActiveCopiesAndDeleteSources("final copy and delete");
// and if uploads were being aborted, wait for that to finish
uploadsAborted = waitForCompletionIgnoringExceptions(abortUploads); } | 3.26 |
hadoop_RenameOperation_completeActiveCopiesAndDeleteSources_rdh | /**
* Block waiting for ay active copies to finish
* then delete all queued keys + paths to delete.
*
* @param reason
* reason for logs
* @throws IOException
* failure.
*/
@Retries.RetryTranslated
private void completeActiveCopiesAndDeleteSources(String reason) throws IOException {
completeActiveCopies(reason);
removeSourceObjects(keysToDelete);
// now reset the lists.
keysToDelete.clear();
} | 3.26 |
hadoop_JobHistoryServer_getBindAddress_rdh | /**
* Retrieve JHS bind address from configuration
*
* @param conf
* @return InetSocketAddress
*/
public static InetSocketAddress getBindAddress(Configuration conf) {
return conf.getSocketAddr(JHAdminConfig.MR_HISTORY_ADDRESS, JHAdminConfig.DEFAULT_MR_HISTORY_ADDRESS, JHAdminConfig.DEFAULT_MR_HISTORY_PORT);
} | 3.26 |
hadoop_EditLogInputStream_getCurrentStreamName_rdh | /**
* Returns the name of the currently active underlying stream. The default
* implementation returns the same value as getName unless overridden by the
* subclass.
*
* @return String name of the currently active underlying stream
*/
public String getCurrentStreamName() {
return m0();
} | 3.26 |
hadoop_EditLogInputStream_scanNextOp_rdh | /**
* Go through the next operation from the stream storage.
*
* @return the txid of the next operation.
*/
protected long scanNextOp() throws IOException {
FSEditLogOp next = readOp();
return next != null ? next.txid : HdfsServerConstants.INVALID_TXID;
} | 3.26 |
hadoop_EditLogInputStream_nextValidOp_rdh | /**
* Get the next valid operation from the stream storage.
*
* This is exactly like nextOp, except that we attempt to skip over damaged
* parts of the edit log
*
* @return an operation from the stream or null if at end of stream
*/
protected FSEditLogOp nextValidOp() {
// This is a trivial implementation which just assumes that any errors mean
// that there is nothing more of value in the log. Subclasses that support
// error recovery will want to override this.
try {
return nextOp();
}
catch
(Throwable e) {
return null;
}
} | 3.26 |
hadoop_EditLogInputStream_resync_rdh | /**
* Position the stream so that a valid operation can be read from it with
* readOp().
*
* This method can be used to skip over corrupted sections of edit logs.
*/
public void resync() {
if (cachedOp != null) {
return;
}
cachedOp = nextValidOp();
} | 3.26 |
hadoop_EditLogInputStream_readOp_rdh | /**
* Read an operation from the stream
*
* @return an operation from the stream or null if at end of stream
* @throws IOException
* if there is an error reading from the stream
*/
public FSEditLogOp readOp() throws IOException {
FSEditLogOp ret;
if (cachedOp != null) {
ret = cachedOp;
cachedOp = null;
return ret;
}
return nextOp();
} | 3.26 |
hadoop_EditLogInputStream_getCachedOp_rdh | /**
* return the cachedOp, and reset it to null.
*/
FSEditLogOp getCachedOp() {
FSEditLogOp op = this.cachedOp;
cachedOp = null;
return op;
} | 3.26 |
hadoop_AMRMTokenSecretManager_retrievePassword_rdh | /**
* Retrieve the password for the given {@link AMRMTokenIdentifier}.
* Used by RPC layer to validate a remote {@link AMRMTokenIdentifier}.
*/
@Override
public byte[] retrievePassword(AMRMTokenIdentifier identifier) throws InvalidToken {
this.readLock.lock();
try {
ApplicationAttemptId applicationAttemptId = identifier.getApplicationAttemptId();
LOG.debug("Trying to retrieve password for {}", applicationAttemptId);
if (!appAttemptSet.contains(applicationAttemptId)) {
throw new InvalidToken(applicationAttemptId + " not found in AMRMTokenSecretManager.");
}
if (identifier.getKeyId() == this.currentMasterKey.getMasterKey().getKeyId()) {
return createPassword(identifier.getBytes(), this.currentMasterKey.getSecretKey());
} else if ((nextMasterKey != null) && (identifier.getKeyId() == this.nextMasterKey.getMasterKey().getKeyId())) {
return createPassword(identifier.getBytes(),
this.nextMasterKey.getSecretKey());
}
throw new InvalidToken("Invalid AMRMToken from " + applicationAttemptId);
} finally {
this.readLock.unlock();
}
} | 3.26 |
hadoop_AMRMTokenSecretManager_addPersistedPassword_rdh | /**
* Populate persisted password of AMRMToken back to AMRMTokenSecretManager.
*
* @param token
* AMRMTokenIdentifier.
* @throws IOException
* an I/O exception has occurred.
*/
public void addPersistedPassword(Token<AMRMTokenIdentifier> token) throws IOException {
this.writeLock.lock();
try {
AMRMTokenIdentifier identifier = token.decodeIdentifier();
LOG.debug("Adding password for " + identifier.getApplicationAttemptId());
appAttemptSet.add(identifier.getApplicationAttemptId());
} finally {
this.writeLock.unlock();
}
} | 3.26 |
hadoop_AMRMTokenSecretManager_getMasterKey_rdh | // If nextMasterKey is not Null, then return nextMasterKey
// otherwise return currentMasterKey
@VisibleForTesting
public MasterKeyData getMasterKey() {
this.readLock.lock();
try {
return nextMasterKey == null ?
currentMasterKey : nextMasterKey;
} finally {
this.readLock.unlock();
}
} | 3.26 |
hadoop_AMRMTokenSecretManager_createIdentifier_rdh | /**
* Creates an empty TokenId to be used for de-serializing an
* {@link AMRMTokenIdentifier} by the RPC layer.
*/
@Override
public AMRMTokenIdentifier createIdentifier() {
return new AMRMTokenIdentifier();
} | 3.26 |
hadoop_TimedHealthReporterService_setHealthy_rdh | /**
* Sets if the node is healthy or not.
*
* @param healthy
* whether the node is healthy
*/
protected synchronized void setHealthy(boolean healthy) {
this.isHealthy = healthy;
} | 3.26 |
hadoop_TimedHealthReporterService_serviceStop_rdh | /**
* Method used to terminate the health monitoring service.
*/
@Override
protected void serviceStop() throws Exception {
if (timer != null) {
timer.cancel();
}
super.serviceStop();
} | 3.26 |
hadoop_TimedHealthReporterService_setHealthReport_rdh | /**
* Sets the health report from the node health check. Also set the disks'
* health info obtained from DiskHealthCheckerService.
*
* @param report
* report String
*/
private synchronized void setHealthReport(String report)
{
this.healthReport = report;
} | 3.26 |
hadoop_TimedHealthReporterService_setLastReportedTime_rdh | /**
* Sets the last run time of the node health check.
*
* @param lastReportedTime
* last reported time in long
*/
private synchronized void setLastReportedTime(long lastReportedTime) {
this.lastReportedTime = lastReportedTime;
} | 3.26 |
hadoop_SolverPreprocessor_validate_rdh | /**
* Check if Solver's input parameters are valid.
*
* @param jobHistory
* the history {@link ResourceSkyline}s of the recurring
* pipeline job.
* @param timeInterval
* the time interval which is used to discretize the
* history {@link ResourceSkyline}s.
* @throws InvalidInputException
* if: (1) jobHistory is <em>null</em>;
* (2) jobHistory is empty; (3) timeout is non-positive;
* (4) timeInterval is non-positive;
*/
public final void validate(final Map<RecurrenceId, List<ResourceSkyline>> jobHistory, final int timeInterval) throws InvalidInputException {
if ((jobHistory == null) || (jobHistory.size() == 0)) {
LOGGER.error("Job resource skyline history is invalid, please try again with" + " valid resource skyline history.");
throw new InvalidInputException("Job ResourceSkyline history", "invalid");
}
if (timeInterval <= 0) {
LOGGER.error("Solver timeInterval {} is invalid, please specify a positive value.", timeInterval);
throw new InvalidInputException("Solver timeInterval", "non-positive");
}} | 3.26 |
hadoop_SolverPreprocessor_getResourceVector_rdh | /**
* Return the multi-dimension resource vector consumed by the job at specified
* time.
*
* @param skyList
* the list of {@link Resource}s used by the job.
* @param index
* the discretized time index.
* @param containerMemAlloc
* the multi-dimension resource vector allocated to
* one container.
* @return the multi-dimension resource vector consumed by the job.
*/
public final long getResourceVector(final RLESparseResourceAllocation skyList, final int index, final long containerMemAlloc) {
return skyList.getCapacityAtTime(index).getMemorySize() / containerMemAlloc;
}
/**
* Discretize job's lifespan into intervals, and return the number of
* containers used by the job within each interval.
* <p> Note that here we assume all containers allocated to the job have the
* same {@link Resource}. This is due to the limit of
* {@link RLESparseResourceAllocation}.
*
* @param skyList
* the list of {@link Resource} | 3.26 |
hadoop_SolverPreprocessor_mergeSkyline_rdh | /**
* Merge different jobs' resource skylines into one within the same pipeline.
*
* @param resourceSkylines
* different jobs' resource skylines within the same
* pipeline.
* @return an aggregated resource skyline for the pipeline.
*/
public final ResourceSkyline mergeSkyline(final List<ResourceSkyline> resourceSkylines) {
// TODO:
// rewrite this function with shift and merge once YARN-5328 is committed
/**
* First, getHistory the pipeline submission time.
*/
long pipelineSubmission = Long.MAX_VALUE;
for (int i = 0; i < resourceSkylines.size(); i++) {
long jobSubmission = resourceSkylines.get(i).getJobSubmissionTime();
if (pipelineSubmission >
jobSubmission) {
pipelineSubmission = jobSubmission;
} }
final TreeMap<Long, Resource> resourceOverTime = new TreeMap<>();
final RLESparseResourceAllocation skylineListAgg = new RLESparseResourceAllocation(resourceOverTime, new DefaultResourceCalculator());
/**
* Second, adjust different jobs' ResourceSkyline starting time based on
* pipeline submission time, and merge them into one ResourceSkyline.
*/
for (int i = 0; i < resourceSkylines.size(); i++) {
long jobSubmission = resourceSkylines.get(i).getJobSubmissionTime();long diff = (jobSubmission - pipelineSubmission) / 1000;
RLESparseResourceAllocation tmp = resourceSkylines.get(i).getSkylineList();
Object[] timePoints = tmp.getCumulative().keySet().toArray();
for (int j = 0; j < (timePoints.length - 2); j++) {
ReservationInterval riAdd
= new ReservationInterval(toIntExact(((long) (timePoints[j]))) + diff, toIntExact(((long) (timePoints[j + 1])) + diff));
skylineListAgg.addInterval(riAdd, tmp.getCapacityAtTime(toIntExact(((long) (timePoints[j])))));
}
}
ResourceSkyline skylineAgg = new ResourceSkyline(resourceSkylines.get(0).getJobId(), resourceSkylines.get(0).getJobInputDataSize(), resourceSkylines.get(0).getJobSubmissionTime(), resourceSkylines.get(0).getJobFinishTime(), resourceSkylines.get(0).getContainerSpec(), skylineListAgg); return skylineAgg;
}
/**
* Aggregate all job's {@link ResourceSkyline}s in the one run of recurring
* pipeline, and return the aggregated {@link ResourceSkyline}s in different
* runs.
*
* @param jobHistory
* the history {@link ResourceSkyline} of the recurring
* pipeline job.
* @param minJobRuns
* the minimum number of job runs required to run the
* solver.
* @return the aggregated {@link ResourceSkyline} | 3.26 |
hadoop_AppStoreController_get_rdh | /**
* Find yarn application from solr.
*
* @param id
* Application ID
* @return AppEntry
*/
@GET
@Path("get/{id}")
@Produces(MediaType.APPLICATION_JSON)
public AppStoreEntry get(@PathParam("id")
String id) {
AppCatalogSolrClient sc = new AppCatalogSolrClient();
return sc.findAppStoreEntry(id);
}
/**
* Register an application.
*
* @apiGroup AppStoreController
* @apiName register
* @api {post} /app_store/register Register an application in appstore.
* @apiParam {Object} app Application definition.
* @apiParamExample {json} Request-Example:
{
"name": "Jenkins",
"organization": "Jenkins-ci.org",
"description": "The leading open source automation server",
"icon": "/css/img/jenkins.png",
"lifetime": "3600",
"components": [
{
"name": "jenkins",
"number_of_containers": 1,
"artifact": {
"id": "eyang-1.openstacklocal:5000/jenkins:latest",
"type": "DOCKER"
},
"launch_command": "",
"resource": {
"cpus": 1,
"memory": "2048"
},
"configuration": {
"env": {
},
"files": [
]
}
}
],
"quicklinks": {
"Jenkins UI": "http://jenkins.${SERVICE_NAME}.${USER}.${DOMAIN}:8080/"
}
} | 3.26 |
hadoop_LocatedBlockBuilder_newLocatedBlock_rdh | // return new block so tokens can be set
LocatedBlock newLocatedBlock(ExtendedBlock eb, DatanodeStorageInfo[] storage, long pos, boolean isCorrupt) {
LocatedBlock blk = BlockManager.newLocatedBlock(eb, storage, pos, isCorrupt);return blk;
} | 3.26 |
hadoop_RMAuthenticationFilter_doFilter_rdh | /**
* {@inheritDoc }
*/
@Override
public void doFilter(ServletRequest request, ServletResponse response, FilterChain filterChain) throws IOException, ServletException {
HttpServletRequest req = ((HttpServletRequest) (request));
String newHeader = req.getHeader(DelegationTokenAuthenticator.DELEGATION_TOKEN_HEADER);if ((newHeader == null) || newHeader.isEmpty()) {
// For backward compatibility, allow use of the old header field
// only when the new header doesn't exist
final String oldHeader = req.getHeader(OLD_HEADER);
if ((oldHeader != null) && (!oldHeader.isEmpty())) {
request = new HttpServletRequestWrapper(req)
{
@Override
public String getHeader(String name) {
if (name.equals(DelegationTokenAuthenticator.DELEGATION_TOKEN_HEADER)) {
return oldHeader;
}
return super.getHeader(name);
}
};
}
}
super.doFilter(request, response, filterChain);
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.