name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hadoop_SuccessData_getDate_rdh | /**
*
* @return timestamp as date; no expectation of parseability.
*/
public String getDate() {
return date;
} | 3.26 |
hadoop_SuccessData_load_rdh | /**
* Load an instance from a file, then validate it.
*
* @param fs
* filesystem
* @param path
* path
* @return the loaded instance
* @throws IOException
* IO failure
* @throws ValidationFailure
* if the data is invalid
*/
public static SuccessData load(FileSystem fs, Path path) throws IOException {
LOG.debug("Reading success data from {}", path);SuccessData instance = serializer().load(fs, path);
instance.validate();
return instance;
} | 3.26 |
hadoop_SuccessData_recordJobFailure_rdh | /**
* Note a failure by setting success flag to false,
* then add the exception to the diagnostics.
*
* @param thrown
* throwable
*/
public void recordJobFailure(Throwable thrown) {
setSuccess(false);
String stacktrace = ExceptionUtils.getStackTrace(thrown);
diagnostics.put("exception", thrown.toString());
diagnostics.put("stacktrace", stacktrace);} | 3.26 |
hadoop_SuccessData_getTimestamp_rdh | /**
*
* @return timestamp of creation.
*/
public long getTimestamp() {return timestamp;
} | 3.26 |
hadoop_SuccessData_getDescription_rdh | /**
*
* @return any description text.
*/
public String getDescription() {
return description;
} | 3.26 |
hadoop_SuccessData_getMetrics_rdh | /**
*
* @return any metrics.
*/
public Map<String, Long> getMetrics() {
return metrics;
} | 3.26 |
hadoop_SuccessData_putDiagnostic_rdh | /**
* Add a diagnostics entry.
*
* @param key
* name
* @param value
* value
*/
public void putDiagnostic(String key, String value) {
diagnostics.put(key, value);
} | 3.26 |
hadoop_SuccessData_setSuccess_rdh | /**
* Set the success flag.
*
* @param success
* did the job succeed?
*/
public void setSuccess(boolean
success) {this.success = success;
} | 3.26 |
hadoop_SuccessData_getCommitter_rdh | /**
*
* @return committer name.
*/
public String getCommitter() {
return committer;
} | 3.26 |
hadoop_SuccessData_getFilenames_rdh | /**
*
* @return a list of filenames in the commit.
*/
public List<String> getFilenames() {
return filenames;
} | 3.26 |
hadoop_AsyncGet_wait_rdh | /**
* Use {@link #get(long, TimeUnit)} timeout parameters to wait.
*
* @param obj
* object.
* @param timeout
* timeout.
* @param unit
* unit.
* @throws InterruptedException
* if the thread is interrupted.
*/
public static void wait(Object obj, long timeout, TimeUnit unit)
throws InterruptedException {
if
(timeout <
0) {
obj.wait();
} else if (timeout > 0) {
obj.wait(unit.toMillis(timeout));
}
} | 3.26 |
hadoop_ProxyCombiner_combine_rdh | /**
* Combine two or more proxies which together comprise a single proxy
* interface. This can be used for a protocol interface which {@code extends}
* multiple other protocol interfaces. The returned proxy will implement
* all of the methods of the combined proxy interface, delegating calls
* to which proxy implements that method. If multiple proxies implement the
* same method, the first in the list will be used for delegation.
* <p>
* This will check that every method on the combined interface is
* implemented by at least one of the supplied proxy objects.
*
* @param combinedProxyInterface
* The interface of the combined proxy.
* @param proxies
* The proxies which should be used as delegates.
* @param <T>
* The type of the proxy that will be returned.
* @return The combined proxy.
*/
@SuppressWarnings("unchecked")
public static <T> T combine(Class<T> combinedProxyInterface, Object... proxies) {
methodLoop : for (Method m : combinedProxyInterface.getMethods()) {
for (Object proxy : proxies) {
try {proxy.getClass().getMethod(m.getName(), m.getParameterTypes());
continue methodLoop;// go to the next method
} catch (NoSuchMethodException nsme) {
// Continue to try the next proxy
}
}
throw new IllegalStateException((("The proxies specified for " + combinedProxyInterface) + " do not cover method ") + m);}
InvocationHandler handler = new CombinedProxyInvocationHandler(combinedProxyInterface, proxies);
return ((T) (Proxy.newProxyInstance(combinedProxyInterface.getClassLoader(), new Class[]{ combinedProxyInterface }, handler)));
} | 3.26 |
hadoop_ProxyCombiner_getConnectionId_rdh | /**
* Since this is incapable of returning multiple connection IDs, simply
* return the first one. In most cases, the connection ID should be the same
* for all proxies.
*/
@Override
public ConnectionId getConnectionId() {
return RPC.getConnectionIdForProxy(proxies[0]);
} | 3.26 |
hadoop_StartupProgressMetrics_register_rdh | /**
* Registers StartupProgressMetrics linked to the given StartupProgress.
*
* @param prog
* StartupProgress to link
*/
public static void register(StartupProgress prog) {
new StartupProgressMetrics(prog);
} | 3.26 |
hadoop_StartupProgressMetrics_addGauge_rdh | /**
* Adds a gauge with a name built by using the specified phase's name as prefix
* and then appending the specified suffix.
*
* @param builder
* MetricsRecordBuilder to receive counter
* @param phase
* Phase to add
* @param nameSuffix
* String suffix of metric name
* @param descSuffix
* String suffix of metric description
* @param value
* float gauge value
*/
private static void addGauge(MetricsRecordBuilder builder, Phase phase, String nameSuffix, String descSuffix, float value) {
MetricsInfo metricsInfo = info(phase.getName() + nameSuffix, phase.getDescription() + descSuffix);
builder.addGauge(metricsInfo, value);
} | 3.26 |
hadoop_StartupProgressMetrics_addCounter_rdh | /**
* Adds a counter with a name built by using the specified phase's name as
* prefix and then appending the specified suffix.
*
* @param builder
* MetricsRecordBuilder to receive counter
* @param phase
* Phase to add
* @param nameSuffix
* String suffix of metric name
* @param descSuffix
* String suffix of metric description
* @param value
* long counter value
*/
private static void addCounter(MetricsRecordBuilder builder, Phase phase, String nameSuffix, String descSuffix, long value) {
MetricsInfo metricsInfo = info(phase.getName() + nameSuffix, phase.getDescription() + descSuffix);
builder.addCounter(metricsInfo, value);
} | 3.26 |
hadoop_LocalAllocationTagsManager_cleanTempContainers_rdh | /**
* Method removes temporary containers associated with an application
* Used by the placement algorithm to clean temporary tags at the end of
* a placement cycle.
*
* @param applicationId
* Application Id.
*/
public void cleanTempContainers(ApplicationId applicationId) {
if (!appTempMappings.get(applicationId).isEmpty()) {
appTempMappings.get(applicationId).entrySet().stream().forEach(nodeE -> {nodeE.getValue().entrySet().stream().forEach(tagE -> {
for (int i =
0; i < tagE.getValue().get(); i++) {
removeTags(nodeE.getKey(), applicationId, Collections.singleton(tagE.getKey()));
}
});
});
appTempMappings.remove(applicationId);
LOG.debug("Removed TEMP containers of app={}", applicationId);
}
} | 3.26 |
hadoop_LocatedFileStatus_getBlockLocations_rdh | /**
* Get the file's block locations
*
* In HDFS, the returned BlockLocation will have different formats for
* replicated and erasure coded file.
* Please refer to
* {@link FileSystem#getFileBlockLocations(FileStatus, long, long)}
* for more details.
*
* @return the file's block locations
*/public BlockLocation[] getBlockLocations() {
return locations;
} | 3.26 |
hadoop_LocatedFileStatus_setBlockLocations_rdh | /**
* Hook for subclasses to lazily set block locations. The {@link #locations}
* field should be null before this is called.
*
* @param locations
* Block locations for this instance.
*/
protected void setBlockLocations(BlockLocation[] locations) {
this.locations = locations;
} | 3.26 |
hadoop_KMSMDCFilter_m0_rdh | /**
* Sets the context with the given parameters.
*
* @param ugi
* the {@link UserGroupInformation} for the current request.
* @param method
* the http method
* @param requestURL
* the requested URL.
* @param remoteAddr
* the remote address of the client.
*/
@VisibleForTesting
public static void m0(UserGroupInformation ugi, String method, String requestURL, String remoteAddr) {
DATA_TL.set(new Data(ugi, method,
requestURL, remoteAddr));
} | 3.26 |
hadoop_PartitionResourcesInfo_setUserAmLimit_rdh | /**
*
* @param userAmLimit
* the userAmLimit to set
*/
public void setUserAmLimit(ResourceInfo userAmLimit) {
this.userAmLimit = userAmLimit;
} | 3.26 |
hadoop_PartitionResourcesInfo_getUserAmLimit_rdh | /**
*
* @return the userAmLimit
*/
public ResourceInfo getUserAmLimit() {
return userAmLimit;
} | 3.26 |
hadoop_ConfigurationProviderFactory_getConfigurationProvider_rdh | /**
* Creates an instance of {@link ConfigurationProvider} using given
* configuration.
*
* @param bootstrapConf
* bootstrap configuration.
* @return configurationProvider configuration provider.
*/
@SuppressWarnings("unchecked")
public static ConfigurationProvider getConfigurationProvider(Configuration bootstrapConf) {
Class<? extends
ConfigurationProvider> v0;
try {
v0 = ((Class<? extends ConfigurationProvider>) (Class.forName(YarnConfiguration.DEFAULT_RM_CONFIGURATION_PROVIDER_CLASS)));
} catch (Exception e) {
throw new YarnRuntimeException("Invalid default configuration provider class" + YarnConfiguration.DEFAULT_RM_CONFIGURATION_PROVIDER_CLASS, e);
}
ConfigurationProvider configurationProvider = ReflectionUtils.newInstance(bootstrapConf.getClass(YarnConfiguration.RM_CONFIGURATION_PROVIDER_CLASS, v0, ConfigurationProvider.class), bootstrapConf);
return configurationProvider;
} | 3.26 |
hadoop_AWSClientIOException_retryable_rdh | /**
* Query inner cause for retryability.
*
* @return what the cause says.
*/
public boolean retryable() {
return getCause().retryable();
} | 3.26 |
hadoop_GangliaConf_getTmax_rdh | /**
*
* @return the tmax
*/
int getTmax() {
return tmax;
} | 3.26 |
hadoop_GangliaConf_getDmax_rdh | /**
*
* @return the dmax
*/
int getDmax() {
return dmax;
} | 3.26 |
hadoop_GangliaConf_setUnits_rdh | /**
*
* @param units
* the units to set
*/
void setUnits(String units) {
this.units = units;
} | 3.26 |
hadoop_GangliaConf_setTmax_rdh | /**
*
* @param tmax
* the tmax to set
*/
void setTmax(int tmax) {
this.tmax = tmax;
} | 3.26 |
hadoop_GangliaConf_getSlope_rdh | /**
*
* @return the slope
*/
GangliaSlope getSlope() {
return slope;
} | 3.26 |
hadoop_GangliaConf_setDmax_rdh | /**
*
* @param dmax
* the dmax to set
*/
void setDmax(int dmax) {
this.dmax
= dmax;
} | 3.26 |
hadoop_GangliaConf_getUnits_rdh | /**
*
* @return the units
*/
String getUnits() {
return units;
} | 3.26 |
hadoop_GangliaConf_setSlope_rdh | /**
*
* @param slope
* the slope to set
*/
void setSlope(GangliaSlope slope) {
this.slope = slope;
} | 3.26 |
hadoop_CopyCommandWithMultiThread_setThreadPoolQueueSize_rdh | /**
* set thread pool queue size by option value, if the value less than 1,
* use DEFAULT_QUEUE_SIZE instead.
*
* @param optValue
* option value
*/protected void setThreadPoolQueueSize(String optValue) {
if (optValue != null) {
int size = Integer.parseInt(optValue);
threadPoolQueueSize = (size < 1) ? DEFAULT_QUEUE_SIZE : size;
}
} | 3.26 |
hadoop_CopyCommandWithMultiThread_hasMoreThanOneSourcePaths_rdh | // check if source is only one single file.
private boolean hasMoreThanOneSourcePaths(LinkedList<PathData> args) throws IOException {
if (args.size() > 1) {
return true;
}
if (args.size() == 1) {
PathData src = args.get(0);
if (src.stat == null) {
src.refreshStatus();
}
return isPathRecursable(src);
}
return false;
} | 3.26 |
hadoop_CopyCommandWithMultiThread_m0_rdh | /**
* set thread count by option value, if the value less than 1,
* use 1 instead.
*
* @param optValue
* option value
*/
protected void m0(String optValue) {
if (optValue != null) {
threadCount = Math.max(Integer.parseInt(optValue), 1);
}
} | 3.26 |
hadoop_CopyCommandWithMultiThread_isMultiThreadNecessary_rdh | // if thread count is 1 or the source is only one single file,
// don't init executor to avoid threading overhead.
@VisibleForTesting
protected boolean isMultiThreadNecessary(LinkedList<PathData> args)
throws IOException {
return (this.threadCount > 1) && hasMoreThanOneSourcePaths(args);
} | 3.26 |
hadoop_AHSLogsPage_content_rdh | /**
* The content of this page is the AggregatedLogsBlock
*
* @return AggregatedLogsBlock.class
*/
@Override
protected Class<? extends SubView> content() {
return AggregatedLogsBlock.class;
} | 3.26 |
hadoop_AbfssDtFetcher_getScheme_rdh | /**
* Get the scheme for this specific fetcher.
*
* @return a scheme.
*/
protected String getScheme() {
return FileSystemUriSchemes.ABFS_SECURE_SCHEME;
} | 3.26 |
hadoop_YarnWebServiceUtils_getNodeInfoFromRMWebService_rdh | /**
* Utility function to get NodeInfo by calling RM WebService.
*
* @param conf
* the configuration
* @param nodeId
* the nodeId
* @return a JSONObject which contains the NodeInfo
* @throws ClientHandlerException
* if there is an error
* processing the response.
* @throws UniformInterfaceException
* if the response status
* is 204 (No Content).
*/
public static JSONObject getNodeInfoFromRMWebService(Configuration conf, String nodeId) throws ClientHandlerException, UniformInterfaceException {
try {
return WebAppUtils.execOnActiveRM(conf, YarnWebServiceUtils::getNodeInfoFromRM, nodeId);
} catch (Exception e) {
if
(e instanceof ClientHandlerException) {
throw ((ClientHandlerException) (e));
} else if (e instanceof UniformInterfaceException) {
throw ((UniformInterfaceException) (e));
} else {
throw new RuntimeException(e);}
}
} | 3.26 |
hadoop_TopologyBuilder_build_rdh | /**
* Request the builder to build the final object. Once called, the
* {@link TopologyBuilder} would accept no more events or job-conf properties.
*
* @return Parsed {@link LoggedNetworkTopology} object.
*/
public LoggedNetworkTopology build() {
return new LoggedNetworkTopology(allHosts);
} | 3.26 |
hadoop_TopologyBuilder_process_rdh | /**
* Process a collection of JobConf {@link Properties}. We do not restrict it
* to be called once.
*
* @param conf
* The job conf properties to be added.
*/
public void process(Properties conf) {
// no code
} | 3.26 |
hadoop_SingleFilePerBlockCache_deleteCacheFiles_rdh | /**
* Delete cache files as part of the close call.
*/
private void deleteCacheFiles() {
int numFilesDeleted =
0;
for (Entry entry : blocks.values()) {
boolean v19 = entry.takeLock(Entry.LockType.WRITE, PrefetchConstants.PREFETCH_WRITE_LOCK_TIMEOUT, PrefetchConstants.PREFETCH_WRITE_LOCK_TIMEOUT_UNIT);
if (!v19) {
LOG.error("Cache file {} deletion would not be attempted as write lock could not" + " be acquired within {} {}", entry.path, PrefetchConstants.PREFETCH_WRITE_LOCK_TIMEOUT, PrefetchConstants.PREFETCH_WRITE_LOCK_TIMEOUT_UNIT);
continue;
}
try {
if (Files.deleteIfExists(entry.path)) {
prefetchingStatistics.blockRemovedFromFileCache();
numFilesDeleted++;
}
} catch (IOException e) {
LOG.warn("Failed to delete cache file {}", entry.path, e);
} finally {
entry.releaseLock(Entry.LockType.WRITE);
}
}
LOG.debug("Prefetch cache close: Deleted {} cache files", numFilesDeleted);
} | 3.26 |
hadoop_SingleFilePerBlockCache_put_rdh | /**
* Puts the given block in this cache.
*
* @param blockNumber
* the block number, used as a key for blocks map.
* @param buffer
* buffer contents of the given block to be added to this cache.
* @param conf
* the configuration.
* @param localDirAllocator
* the local dir allocator instance.
* @throws IOException
* if either local dir allocator fails to allocate file or if IO error
* occurs while writing the buffer content to the file.
* @throws IllegalArgumentException
* if buffer is null, or if buffer.limit() is zero or negative.
*/
@Override
public void
put(int blockNumber,
ByteBuffer buffer, Configuration conf, LocalDirAllocator localDirAllocator) throws IOException {
if (closed.get()) {
return;
}
checkNotNull(buffer, "buffer");
if (blocks.containsKey(blockNumber)) {
Entry v7 = blocks.get(blockNumber);
v7.m0(Entry.LockType.READ);
try {
m1(v7, buffer);
}
finally {
v7.releaseLock(Entry.LockType.READ);
}
addToLinkedListHead(v7);
return;
}
Validate.checkPositiveInteger(buffer.limit(), "buffer.limit()");Path blockFilePath = getCacheFilePath(conf, localDirAllocator);
long size = Files.size(blockFilePath);
if (size != 0) {String message = String.format("[%d] temp file already has data. %s (%d)", blockNumber, blockFilePath, size);
throw new IllegalStateException(message);
}writeFile(blockFilePath, buffer);
long checksum = BufferData.getChecksum(buffer);
Entry entry = new Entry(blockNumber, blockFilePath, buffer.limit(), checksum);
blocks.put(blockNumber, entry);
// Update stream_read_blocks_in_cache stats only after blocks map is updated with new file
// entry to avoid any discrepancy related to the value of stream_read_blocks_in_cache.
// If stream_read_blocks_in_cache is updated before updating the blocks map here, closing of
// the input stream can lead to the removal of the cache file even before blocks is added
// with the new cache file, leading to incorrect value of stream_read_blocks_in_cache.
prefetchingStatistics.blockAddedToFileCache();
addToLinkedListAndEvictIfRequired(entry);
} | 3.26 |
hadoop_SingleFilePerBlockCache_addToLinkedListHead_rdh | /**
* Helper method to add the given entry to the head of the linked list.
*
* @param entry
* Block entry to add.
*/
private void
addToLinkedListHead(Entry entry) {
blocksLock.writeLock().lock();
try {
addToHeadOfLinkedList(entry);
} finally {
blocksLock.writeLock().unlock();}
} | 3.26 |
hadoop_SingleFilePerBlockCache_getIntList_rdh | /**
* Produces a human readable list of blocks for the purpose of logging.
* This method minimizes the length of returned list by converting
* a contiguous list of blocks into a range.
* for example,
* 1, 3, 4, 5, 6, 8 becomes 1, 3~6, 8
*/
private String getIntList(Iterable<Integer> nums) {
List<String> numList = new ArrayList<>();List<Integer> numbers = new ArrayList<Integer>();
for (Integer n : nums) {
numbers.add(n);
}Collections.sort(numbers);
int index = 0;
while (index < numbers.size()) {
int start = numbers.get(index);
int v29 = start;
int end = start;
while (((++index) < numbers.size()) && ((end = numbers.get(index))
== (v29 + 1))) {
v29 = end;
}
if (start == v29) {
numList.add(Integer.toString(start));
} else {
numList.add(String.format("%d~%d", start, v29));
}
}
return String.join(", ", numList);
} | 3.26 |
hadoop_SingleFilePerBlockCache_addToHeadOfLinkedList_rdh | /**
* Add the given entry to the head of the linked list.
*
* @param entry
* Block entry to add.
*/
private void addToHeadOfLinkedList(Entry entry) {
if (f0 == null) {
f0 = entry;
tail = entry;
}
LOG.debug("Block num {} to be added to the head. Current head block num: {} and tail block num: {}", entry.blockNumber, f0.blockNumber, tail.blockNumber);
if (entry != f0) {
Entry prev = entry.getPrevious();
Entry nxt = entry.getNext();
// no-op if the block is already evicted
if (!blocks.containsKey(entry.blockNumber)) {
return;
}if (prev != null) {
prev.setNext(nxt);
}
if (nxt != null) {
nxt.setPrevious(prev);
}
entry.setPrevious(null);
entry.setNext(f0);
f0.setPrevious(entry);
f0
= entry;
if ((prev !=
null) && (prev.getNext() == null)) {
tail = prev;
}
}
} | 3.26 |
hadoop_SingleFilePerBlockCache_m2_rdh | /**
* Create temporary file based on the file path retrieved from local dir allocator
* instance. The file is created with .bin suffix. The created file has been granted
* posix file permissions available in TEMP_FILE_ATTRS.
*
* @param conf
* the configuration.
* @param localDirAllocator
* the local dir allocator instance.
* @return path of the file created.
* @throws IOException
* if IO error occurs while local dir allocator tries to retrieve path
* from local FS or file creation fails or permission set fails.
*/
private static Path m2(final Configuration conf, final LocalDirAllocator localDirAllocator) throws IOException {
Path path = localDirAllocator.getLocalPathForWrite(CACHE_FILE_PREFIX, conf);
File dir = new File(path.getParent().toUri().getPath());
String prefix = path.getName();
File tmpFile = File.createTempFile(prefix, BINARY_FILE_SUFFIX, dir);
Path v38 = Paths.get(tmpFile.toURI());
return Files.setPosixFilePermissions(v38, TEMP_FILE_ATTRS);
} | 3.26 |
hadoop_SingleFilePerBlockCache_addToLinkedListAndEvictIfRequired_rdh | /**
* Add the given entry to the head of the linked list and if the LRU cache size
* exceeds the max limit, evict tail of the LRU linked list.
*
* @param entry
* Block entry to add.
*/
private void addToLinkedListAndEvictIfRequired(Entry entry) {
blocksLock.writeLock().lock();
try {
addToHeadOfLinkedList(entry);
entryListSize++;
if ((entryListSize > maxBlocksCount)
&& (!closed.get())) {
Entry elementToPurge = tail;
tail = tail.getPrevious();
if (tail == null) {
tail = f0;
}
tail.setNext(null);
elementToPurge.setPrevious(null);
deleteBlockFileAndEvictCache(elementToPurge);
}
} finally {
blocksLock.writeLock().unlock();
}
} | 3.26 |
hadoop_SingleFilePerBlockCache_m0_rdh | /**
* Take the read or write lock.
*
* @param lockType
* type of the lock.
*/
private void m0(LockType lockType) {
if (LockType.READ == lockType) {
f1.readLock().lock();} else if (LockType.WRITE == lockType) {
f1.writeLock().lock();
}
} | 3.26 |
hadoop_SingleFilePerBlockCache_getCacheFilePath_rdh | /**
* Return temporary file created based on the file path retrieved from local dir allocator.
*
* @param conf
* The configuration object.
* @param localDirAllocator
* Local dir allocator instance.
* @return Path of the temporary file created.
* @throws IOException
* if IO error occurs while local dir allocator tries to retrieve path
* from local FS or file creation fails or permission set fails.
*/
protected Path getCacheFilePath(final Configuration conf, final LocalDirAllocator localDirAllocator) throws IOException {
return m2(conf, localDirAllocator);
} | 3.26 |
hadoop_SingleFilePerBlockCache_size_rdh | /**
* Gets the number of blocks in this cache.
*/
@Override
public int size() {
return blocks.size();
} | 3.26 |
hadoop_SingleFilePerBlockCache_containsBlock_rdh | /**
* Indicates whether the given block is in this cache.
*/
@Override
public boolean containsBlock(int blockNumber) {
return blocks.containsKey(blockNumber);
} | 3.26 |
hadoop_SingleFilePerBlockCache_get_rdh | /**
* Gets the block having the given {@code blockNumber}.
*
* @throws IllegalArgumentException
* if buffer is null.
*/
@Override
public void get(int blockNumber, ByteBuffer buffer)
throws IOException {
if (closed.get()) {
return;
}
checkNotNull(buffer, "buffer");
Entry entry = getEntry(blockNumber);
entry.m0(Entry.LockType.READ);
try {
buffer.clear();
readFile(entry.path, buffer);
buffer.rewind();
m1(entry, buffer);
} finally {
entry.releaseLock(Entry.LockType.READ);
}
} | 3.26 |
hadoop_SingleFilePerBlockCache_blocks_rdh | /**
* Gets the blocks in this cache.
*/
@Override
public Iterable<Integer> blocks() {
return Collections.unmodifiableList(new ArrayList<>(blocks.keySet()));
} | 3.26 |
hadoop_SingleFilePerBlockCache_takeLock_rdh | /**
* Try to take the read or write lock within the given timeout.
*
* @param lockType
* type of the lock.
* @param timeout
* the time to wait for the given lock.
* @param unit
* the time unit of the timeout argument.
* @return true if the lock of the given lock type was acquired.
*/
private boolean takeLock(LockType lockType, long timeout, TimeUnit unit) {
try {
if (LockType.READ == lockType) {
return f1.readLock().tryLock(timeout, unit);
} else if (LockType.WRITE == lockType) {
return f1.writeLock().tryLock(timeout, unit);
}
} catch (InterruptedException e) {
LOG.warn("Thread interrupted while trying to acquire {} lock", lockType, e);
Thread.currentThread().interrupt();
}
return false;} | 3.26 |
hadoop_SingleFilePerBlockCache_releaseLock_rdh | /**
* Release the read or write lock.
*
* @param lockType
* type of the lock.
*/
private void releaseLock(LockType lockType) {
if (LockType.READ == lockType) {
f1.readLock().unlock();
} else if (LockType.WRITE == lockType) {
f1.writeLock().unlock();
}
} | 3.26 |
hadoop_SingleFilePerBlockCache_deleteBlockFileAndEvictCache_rdh | /**
* Delete cache file as part of the block cache LRU eviction.
*
* @param elementToPurge
* Block entry to evict.
*/
private void deleteBlockFileAndEvictCache(Entry elementToPurge) {
try (DurationTracker ignored = trackerFactory.trackDuration(STREAM_FILE_CACHE_EVICTION)) {
boolean lockAcquired = elementToPurge.takeLock(Entry.LockType.WRITE, PrefetchConstants.PREFETCH_WRITE_LOCK_TIMEOUT, PrefetchConstants.PREFETCH_WRITE_LOCK_TIMEOUT_UNIT);
if (!lockAcquired) {
LOG.error("Cache file {} deletion would not be attempted as write lock could not" + " be acquired within {} {}", elementToPurge.path, PrefetchConstants.PREFETCH_WRITE_LOCK_TIMEOUT, PrefetchConstants.PREFETCH_WRITE_LOCK_TIMEOUT_UNIT);
} else {
try {
if (Files.deleteIfExists(elementToPurge.path)) {
entryListSize--;
prefetchingStatistics.blockRemovedFromFileCache();
blocks.remove(elementToPurge.blockNumber);
prefetchingStatistics.blockEvictedFromFileCache();
}
} catch (IOException e) {
LOG.warn("Failed to delete cache file {}", elementToPurge.path, e);
} finally {
elementToPurge.releaseLock(Entry.LockType.WRITE);
}
}
}
} | 3.26 |
hadoop_ClientId_getClientId_rdh | /**
*
* @return Return clientId as byte[].
*/
public static byte[] getClientId() {
UUID uuid = UUID.randomUUID();
ByteBuffer buf = ByteBuffer.wrap(new byte[BYTE_LENGTH]);
buf.putLong(uuid.getMostSignificantBits());
buf.putLong(uuid.getLeastSignificantBits());
return buf.array();} | 3.26 |
hadoop_ClientId_toBytes_rdh | /**
*
* @return Convert from clientId string byte[] representation of clientId.
* @param id
* input id.
*/
public static byte[] toBytes(String id) {
if ((id ==
null) || "".equals(id)) {
return new byte[0];
}
UUID uuid = UUID.fromString(id);
ByteBuffer buf = ByteBuffer.wrap(new byte[BYTE_LENGTH]);
buf.putLong(uuid.getMostSignificantBits());
buf.putLong(uuid.getLeastSignificantBits());
return buf.array();
} | 3.26 |
hadoop_ClientId_m0_rdh | /**
*
* @return Convert a clientId byte[] to string.
* @param clientId
* input clientId.
*/
public static String m0(byte[] clientId) {
// clientId can be null or an empty array
if ((clientId == null) || (clientId.length == 0)) {
return "";
}
// otherwise should be 16 bytes
Preconditions.checkArgument(clientId.length == BYTE_LENGTH);
long msb = getMsb(clientId);
long lsb = getLsb(clientId);
return new UUID(msb, lsb).toString();
} | 3.26 |
hadoop_DurationTrackerFactory_trackDuration_rdh | /**
* Initiate a duration tracking operation by creating/returning
* an object whose {@code close()} call will
* update the statistics.
* The expected use is within a try-with-resources clause.
*
* @param key
* statistic key
* @return an object to close after an operation completes.
*/
default DurationTracker trackDuration(String
key) {
return trackDuration(key, 1);
} | 3.26 |
hadoop_FsCreateModes_create_rdh | /**
* Create from masked and unmasked modes.
*
* @param masked
* masked.
* @param unmasked
* unmasked.
* @return FsCreateModes.
*/
public static FsCreateModes create(FsPermission masked, FsPermission unmasked) {
assert masked.getUnmasked() == null;
assert unmasked.getUnmasked() == null;
return new FsCreateModes(masked, unmasked);
} | 3.26 |
hadoop_NFS3Response_serialize_rdh | /**
* Write the response, along with the rpc header (including verifier), to the
* XDR.
*
* @param out
* XDR output message
* @param xid
* transaction id
* @param verifier
* verifies reply
* @return XDR response
*/
public XDR serialize(XDR out, int xid, Verifier verifier) {
RpcAcceptedReply reply = RpcAcceptedReply.getAcceptInstance(xid, verifier);
reply.write(out);
out.writeInt(this.getStatus());
return out;
} | 3.26 |
hadoop_AoclDiagnosticOutputParser_parseDiagnosticOutput_rdh | /**
* One real sample output of Intel FPGA SDK 17.0's "aocl diagnose" is as below:
* "
* aocl diagnose: Running diagnose from /home/fpga/intelFPGA_pro/17.0/hld/board/nalla_pcie/linux64/libexec
*
* ------------------------- acl0 -------------------------
* Vendor: Nallatech ltd
*
* Phys Dev Name Status Information
*
* aclnalla_pcie0Passed nalla_pcie (aclnalla_pcie0)
* PCIe dev_id = 2494, bus:slot.func = 02:00.00, Gen3 x8
* FPGA temperature = 54.4 degrees C.
* Total Card Power Usage = 31.7 Watts.
* Device Power Usage = 0.0 Watts.
*
* DIAGNOSTIC_PASSED
* ---------------------------------------------------------
* "
*
* While per Intel's guide, the output(should be outdated or prior SDK version's) is as below:
*
* "
* aocl diagnose: Running diagnostic from ALTERAOCLSDKROOT/board/<board_name>/
* <platform>/libexec
* Verified that the kernel mode driver is installed on the host machine.
* Using board package from vendor: <board_vendor_name>
* Querying information for all supported devices that are installed on the host
* machine ...
*
* device_name Status Information
*
* acl0 Passed <descriptive_board_name>
* PCIe dev_id = <device_ID>, bus:slot.func = 02:00.00,
* at Gen 2 with 8 lanes.
* FPGA temperature=43.0 degrees C.
* acl1 Passed <descriptive_board_name>
* PCIe dev_id = <device_ID>, bus:slot.func = 03:00.00,
* at Gen 2 with 8 lanes.
* FPGA temperature = 35.0 degrees C.
*
* Found 2 active device(s) installed on the host machine, to perform a full
* diagnostic on a specific device, please run aocl diagnose <device_name>
*
* DIAGNOSTIC_PASSED
* "
* But this method only support the first output
*/
public static List<FpgaDevice> parseDiagnosticOutput(String output, InnerShellExecutor shellExecutor, String fpgaType) {
if (output.contains("DIAGNOSTIC_PASSED")) {
List<FpgaDevice>
devices = new ArrayList<>();
Matcher headerStartMatcher = Pattern.compile("acl[0-31]").matcher(output);
Matcher headerEndMatcher =
Pattern.compile("(?i)DIAGNOSTIC_PASSED").matcher(output);
int sectionStartIndex;
int sectionEndIndex;
String
aliasName;
while (headerStartMatcher.find()) {
sectionStartIndex = headerStartMatcher.end();
String section = null;
aliasName = headerStartMatcher.group();
while (headerEndMatcher.find(sectionStartIndex)) {
sectionEndIndex = headerEndMatcher.start();
section = output.substring(sectionStartIndex, sectionEndIndex);
break;
}
if (section == null) {
LOG.warn("Unsupported diagnose output");
LOG.warn("aocl output is: " + output);
return Collections.emptyList();
}
// devName, \(.*\)
// busNum, bus:slot.func\s=\s.*,
// FPGA temperature\s=\s.*
// Total\sCard\sPower\sUsage\s=\s.*
String[] fieldRegexes = new String[]{ "\\(.*\\)\n", "(?i)bus:slot.func\\s=\\s.*,", "(?i)FPGA temperature\\s=\\s.*", "(?i)Total\\sCard\\sPower\\sUsage\\s=\\s.*" };
String[] fields = new String[4];
String tempFieldValue;
for (int i = 0; i < fieldRegexes.length; i++) {
Matcher fieldMatcher = Pattern.compile(fieldRegexes[i]).matcher(section);
if (!fieldMatcher.find()) {
LOG.warn(("Couldn't find " + fieldRegexes[i]) + " pattern");
fields[i] = "";
continue;
}
tempFieldValue
= fieldMatcher.group().trim(); if (i == 0) {
// special case for Device name
fields[i] = tempFieldValue.substring(1, tempFieldValue.length() - 1);
} else {
String
ss = tempFieldValue.split("=")[1].trim();
fields[i] = ss.substring(0, ss.length() - 1);
}
}
String majorMinorNumber = shellExecutor.getMajorAndMinorNumber(fields[0]);
if (null != majorMinorNumber) {
String[] v14 = majorMinorNumber.split(":");
devices.add(new FpgaDevice(fpgaType, Integer.parseInt(v14[0]), Integer.parseInt(v14[1]), aliasName));
} else {
LOG.warn("Failed to retrieve major/minor number for device");
}
}
return devices;
} else {
LOG.warn("The diagnostic has failed");
LOG.warn("Output of aocl is: " + output);
return Collections.emptyList();
}
} | 3.26 |
hadoop_RegistryOperationsService_validatePath_rdh | /**
* Validate a path
*
* @param path
* path to validate
* @throws InvalidPathnameException
* if a path is considered invalid
*/
protected void validatePath(String path) throws InvalidPathnameException {
// currently no checks are performed
} | 3.26 |
hadoop_RegistryOperationsService_getClientAcls_rdh | /**
* Get the aggregate set of ACLs the client should use
* to create directories
*
* @return the ACL list
*/
public List<ACL> getClientAcls() {
return getRegistrySecurity().getClientACLs();
} | 3.26 |
hadoop_S3AMultipartUploader_buildPartHandlePayload_rdh | /**
* Build the payload for marshalling.
*
* @param partNumber
* part number from response
* @param etag
* upload etag
* @param len
* length
* @return a byte array to marshall.
* @throws IOException
* error writing the payload
*/
@VisibleForTestingstatic byte[] buildPartHandlePayload(final String path, final String uploadId, final int partNumber, final String etag, final long len) throws IOException {
return new PartHandlePayload(path, uploadId, partNumber, len, etag).toBytes();
} | 3.26 |
hadoop_S3AMultipartUploader_parsePartHandlePayload_rdh | /**
* Parse the payload marshalled as a part handle.
*
* @param data
* handle data
* @return the length and etag
* @throws IOException
* error reading the payload
*/
@VisibleForTesting
static PartHandlePayload parsePartHandlePayload(final byte[] data) throws IOException {try (DataInputStream input = new DataInputStream(new ByteArrayInputStream(data))) {
final String header = input.readUTF();
if (!HEADER.equals(header)) {
throw new IOException(("Wrong header string: \"" + header) + "\"");
}
final String path = input.readUTF();
final String uploadId = input.readUTF();
final int partNumber = input.readInt();
final long len = input.readLong();
final String etag = input.readUTF();
if (len < 0) {
throw new IOException("Negative length");
}
return new PartHandlePayload(path, uploadId, partNumber, len, etag);
}
} | 3.26 |
hadoop_S3AMultipartUploader_abortUploadsUnderPath_rdh | /**
* Upload all MPUs under the path.
*
* @param path
* path to abort uploads under.
* @return a future which eventually returns the number of entries found
* @throws IOException
* submission failure
*/
@Override
public CompletableFuture<Integer> abortUploadsUnderPath(final Path path) throws IOException {
statistics.abortUploadsUnderPathInvoked();
return context.submit(new CompletableFuture<>(), () -> writeOperations.abortMultipartUploadsUnderPath(context.pathToKey(path)));
} | 3.26 |
hadoop_EchoUserResolver_needsTargetUsersList_rdh | /**
* {@inheritDoc }
* <br><br>
* Since {@link EchoUserResolver} simply returns the user's name passed as
* the argument, it doesn't need a target list of users.
*/
public boolean needsTargetUsersList() {
return false;
} | 3.26 |
hadoop_WorkloadDriver_getMapperClass_rdh | // The cast is actually checked via isAssignableFrom but the compiler doesn't
// recognize this
@SuppressWarnings("unchecked")
private Class<? extends WorkloadMapper<?, ?, ?, ?>> getMapperClass(String className) {
String[] potentialQualifiedClassNames = new String[]{ (WorkloadDriver.class.getPackage().getName() + ".") + className, (AuditReplayMapper.class.getPackage().getName() + ".") + className, className
};
for (String qualifiedClassName : potentialQualifiedClassNames) {
Class<?> mapperClass;
try {
mapperClass = getConf().getClassByName(qualifiedClassName);
} catch (ClassNotFoundException cnfe) {
continue;
}
if (!WorkloadMapper.class.isAssignableFrom(mapperClass)) {
throw new IllegalArgumentException((className + " is not a subclass of ") + WorkloadMapper.class.getCanonicalName());
}
return ((Class<? extends WorkloadMapper<?, ?, ?, ?>>) (mapperClass));
}
throw new IllegalArgumentException("Unable to find workload mapper class: " + className);
} | 3.26 |
hadoop_AHSClient_createAHSClient_rdh | /**
* Create a new instance of AHSClient.
*/
@Public
public static AHSClient createAHSClient() {
return new AHSClientImpl();
} | 3.26 |
hadoop_AbfsOperationMetrics_m0_rdh | /**
*
* @return no of operations failed.
*/
AtomicLong m0() {
return operationsFailed;
} | 3.26 |
hadoop_AbfsOperationMetrics_setEndTime_rdh | /**
*
* @param endTime
* sets the end time.
*/
void setEndTime(final long endTime) {
this.endTime = endTime;
} | 3.26 |
hadoop_AbfsOperationMetrics_getOperationsSuccessful_rdh | /**
*
* @return no of successful operations.
*/
AtomicLong getOperationsSuccessful() {
return operationsSuccessful;
} | 3.26 |
hadoop_AbfsOperationMetrics_getEndTime_rdh | /**
*
* @return end time of metric collection.
*/long getEndTime() {
return endTime;
} | 3.26 |
hadoop_AbfsOperationMetrics_getBytesSuccessful_rdh | /**
*
* @return bytes successfully transferred.
*/
AtomicLong getBytesSuccessful() {
return bytesSuccessful;
} | 3.26 |
hadoop_AbfsOperationMetrics_getStartTime_rdh | /**
*
* @return start time of metric collection.
*/
long getStartTime() {
return startTime;
} | 3.26 |
hadoop_AbfsOperationMetrics_getBytesFailed_rdh | /**
*
* @return bytes failed to transfer.
*/
AtomicLong getBytesFailed() {
return bytesFailed;
} | 3.26 |
hadoop_MoveStep_toString_rdh | /**
* Returns a string representation of the object.
*
* @return a string representation of the object.
*/@Override
public String toString() {
return String.format("%s\t %s\t %s\t %s%n", this.getSourceVolume().getPath(), this.getDestinationVolume().getPath(), getSizeString(this.getBytesToMove()), this.getDestinationVolume().getStorageType());} | 3.26 |
hadoop_MoveStep_getSizeString_rdh | /**
* Returns human readable move sizes.
*
* @param size
* - bytes being moved.
* @return String
*/
@Override
public String getSizeString(long size) {return StringUtils.TraditionalBinaryPrefix.long2String(size, "", 1);
} | 3.26 |
hadoop_MoveStep_getVolumeSetID_rdh | /**
* Gets a volume Set ID.
*
* @return String
*/
@Override
public String getVolumeSetID() {
return volumeSetID;} | 3.26 |
hadoop_MoveStep_getSourceVolume_rdh | /**
* Gets Source Volume.
*
* @return -- Source Volume
*/
@Override
public DiskBalancerVolume getSourceVolume() {
return sourceVolume;
} | 3.26 |
hadoop_MoveStep_getBytesToMove_rdh | /**
* Returns number of bytes to move.
*
* @return - long
*/
@Override
public long getBytesToMove() {
return f0;
} | 3.26 |
hadoop_MoveStep_setMaxDiskErrors_rdh | /**
* Sets the maximum numbers of Errors to be tolerated before this
* step is aborted.
*
* @param maxDiskErrors
* - long
*/
@Override
public void setMaxDiskErrors(long maxDiskErrors)
{
this.maxDiskErrors = maxDiskErrors;
} | 3.26 |
hadoop_MoveStep_setSourceVolume_rdh | /**
* Set source volume.
*
* @param sourceVolume
* - volume
*/public void setSourceVolume(DiskBalancerVolume sourceVolume) {this.sourceVolume = sourceVolume;
} | 3.26 |
hadoop_MoveStep_getBandwidth_rdh | /**
* Gets the disk Bandwidth. That is the MB/Sec to copied. We will max out
* on this amount of throughput. This is useful to prevent too much I/O on
* datanode while data node is in use.
*
* @return long.
*/
@Override
public long getBandwidth() {
return bandwidth;
} | 3.26 |
hadoop_MoveStep_setIdealStorage_rdh | /**
* Sets Ideal Storage.
*
* @param idealStorage
* - ideal Storage
*/
public void setIdealStorage(double idealStorage) {
this.idealStorage = idealStorage;
} | 3.26 |
hadoop_MoveStep_setTolerancePercent_rdh | /**
* Sets the tolerance percentage.
*
* @param tolerancePercent
* - long
*/
@Override
public void setTolerancePercent(long tolerancePercent) {this.tolerancePercent = tolerancePercent;
} | 3.26 |
hadoop_MoveStep_setDestinationVolume_rdh | /**
* Sets destination volume.
*
* @param destinationVolume
* - volume
*/
public void setDestinationVolume(DiskBalancerVolume destinationVolume) {
this.destinationVolume = destinationVolume;
} | 3.26 |
hadoop_MoveStep_getIdealStorage_rdh | /**
* Gets the IdealStorage.
*
* @return float
*/
@Override
public double getIdealStorage() {
return idealStorage;
} | 3.26 |
hadoop_MoveStep_getDestinationVolume_rdh | /**
* Gets the destination volume.
*
* @return - volume
*/
@Override
public DiskBalancerVolume getDestinationVolume() {
return destinationVolume;
} | 3.26 |
hadoop_MoveStep_setVolumeSetID_rdh | /**
* Sets volume id.
*
* @param volumeSetID
* - volume ID
*/public void setVolumeSetID(String volumeSetID) { this.volumeSetID = volumeSetID;
} | 3.26 |
hadoop_MoveStep_getTolerancePercent_rdh | /**
* Tolerance Percentage indicates when a move operation is considered good
* enough. This is a percentage of deviation from ideal that is considered
* fine.
*
* For example : if the ideal amount on each disk was 1 TB and the
* tolerance was 10%, then getting to 900 GB on the destination disk is
* considerd good enough.
*
* @return tolerance percentage.
*/
@Override
public long getTolerancePercent() {
return tolerancePercent;
} | 3.26 |
hadoop_MoveStep_setBytesToMove_rdh | /**
* Sets bytes to move.
*
* @param bytesToMove
* - number of bytes
*/
public void setBytesToMove(long bytesToMove) {
this.f0 = bytesToMove;
} | 3.26 |
hadoop_ApplicationInitializationContext_getApplicationId_rdh | /**
* Get {@link ApplicationId} of the application
*
* @return applications ID
*/
public ApplicationId getApplicationId() {
return this.applicationId;
} | 3.26 |
hadoop_ApplicationInitializationContext_getApplicationDataForService_rdh | /**
* Get the data sent to the NodeManager via
* {@link ContainerManagementProtocol#startContainers(StartContainersRequest)}
* as part of {@link ContainerLaunchContext#getServiceData()}
*
* @return the servicesData for this application.
*/
public ByteBuffer getApplicationDataForService() {
return this.appDataForService;
} | 3.26 |
hadoop_ApplicationInitializationContext_getUser_rdh | /**
* Get the user-name of the application-submitter
*
* @return user-name
*/
public String getUser() {
return this.user;
} | 3.26 |
hadoop_BondedS3AStatisticsContext_decrementGauge_rdh | /**
* Decrement a specific gauge.
* <p>
* No-op if not defined.
*
* @param op
* operation
* @param count
* increment value
* @throws ClassCastException
* if the metric is of the wrong type
*/
@Overridepublic void decrementGauge(Statistic op, long count) {
getInstrumentation().decrementGauge(op, count);
} | 3.26 |
hadoop_BondedS3AStatisticsContext_newCommitterStatistics_rdh | /**
* Create a new instance of the committer statistics.
*
* @return a new committer statistics instance
*/
@Override
public CommitterStatistics newCommitterStatistics() {
return getInstrumentation().newCommitterStatistics();
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.