name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hadoop_DataJoinReducerBase_regroup_rdh | /**
* This is the function that re-groups values for a key into sub-groups based
* on a secondary key (input tag).
*
* @param arg1
* @return */
private SortedMap<Object, ResetableIterator>
regroup(Object key, Iterator arg1, Reporter reporter) throws IOException {
this.numOfValues = 0;
SortedMap<Object, ResetableIterator> retv = new TreeMap<Object, ResetableIterator>();
TaggedMapOutput aRecord = null;
while (arg1.hasNext()) {
this.numOfValues += 1;
if ((this.numOfValues % 100) == 0) {
reporter.setStatus((("key: " + key.toString()) + " numOfValues: ") + this.numOfValues);
}
if (this.numOfValues > this.maxNumOfValuesPerGroup) {
continue;
}
aRecord = ((TaggedMapOutput) (arg1.next())).clone(job);
Text tag = aRecord.getTag();
ResetableIterator data = retv.get(tag);
if (data == null) {
data = createResetableIterator();
retv.put(tag, data);
}
data.add(aRecord);
}
if (this.numOfValues > this.largestNumOfValues) {this.largestNumOfValues = numOfValues;
LOG.info((("key: " + key.toString()) + " this.largestNumOfValues: ") + this.largestNumOfValues);
}
return retv;
} | 3.26 |
hadoop_DataJoinReducerBase_createResetableIterator_rdh | /**
* The subclass can provide a different implementation on ResetableIterator.
* This is necessary if the number of values in a reduce call is very high.
*
* The default provided here uses ArrayListBackedIterator
*
* @return an Object of ResetableIterator.
*/
protected ResetableIterator createResetableIterator() {
return new ArrayListBackedIterator();
} | 3.26 |
hadoop_DataJoinReducerBase_collect_rdh | /**
* The subclass can overwrite this method to perform additional filtering
* and/or other processing logic before a value is collected.
*
* @param key
* @param aRecord
* @param output
* @param reporter
* @throws IOException
*/protected void collect(Object key, TaggedMapOutput aRecord, OutputCollector output, Reporter reporter) throws IOException {
this.collected += 1;
addLongValue("collectedCount", 1);
if (aRecord != null) {
output.collect(key, aRecord.getData());
reporter.setStatus((("key: " + key.toString()) + " collected: ") + collected);
addLongValue("actuallyCollectedCount", 1);
}
} | 3.26 |
hadoop_EmptyIOStatistics_getInstance_rdh | /**
* Get the single instance of this class.
*
* @return a shared, empty instance.
*/
public static IOStatistics getInstance() {
return INSTANCE;
} | 3.26 |
hadoop_Print_registerExpression_rdh | /**
* Implements the -print expression for the
* {@link org.apache.hadoop.fs.shell.find.Find} command.
*/final class Print extends BaseExpression {
/**
* Registers this expression with the specified factory.
*/
public static void registerExpression(ExpressionFactory factory) throws IOException {
factory.addClass(Print.class, "-print");
factory.addClass(Print.Print0.class, "-print0");
} | 3.26 |
hadoop_SubClusterIdInfo_toId_rdh | /**
* Get the sub-cluster identifier as {@link SubClusterId}.
*
* @return the sub-cluster id.
*/
public SubClusterId toId() {
return SubClusterId.newInstance(id);
} | 3.26 |
hadoop_Platform_registerKey_rdh | /**
* associate a key class with its serializer and platform
*
* @param keyClassName
* map out key class name
* @param key
* key serializer class
*/
protected void registerKey(String keyClassName, Class<?> key) throws IOException {
serialization.register(keyClassName, key);
keyClassNames.add(keyClassName);
}
/**
* whether a platform supports a specific key should at least satisfy two conditions
*
* 1. the key belongs to the platform
* 2. the associated serializer must implement {@link INativeComparable} | 3.26 |
hadoop_RegistryDNSServer_processServiceRecord_rdh | /**
* Process the service record, parsing the information and creating the
* required DNS records.
*
* @param path
* the service record path.
* @param record
* the record.
* @param command
* the registry command to execute.
* @throws IOException
*/
private void processServiceRecord(String path, ServiceRecord record, ManagementCommand command) throws IOException {
command.exec(path, record);
} | 3.26 |
hadoop_RegistryDNSServer_manageRegistryDNS_rdh | /**
* Performs operations required to setup the DNS registry instance (e.g. sets
* up a path listener to react to service record creation/deletion and invoke
* the appropriate registry method).
*/private void manageRegistryDNS() {
try {
registryOperations.instantiateCacheForRegistry();
registryOperations.registerPathListener(new PathListener() {
private String
registryRoot = getConfig().get(RegistryConstants.KEY_REGISTRY_ZK_ROOT, RegistryConstants.DEFAULT_ZK_REGISTRY_ROOT);
@Override
public void nodeAdded(String path) throws IOException {
// get a listing of service records
String relativePath = getPathRelativeToRegistryRoot(path);
String child = RegistryPathUtils.lastPathEntry(path);
Map<String, RegistryPathStatus> map = new HashMap<>();
map.put(child, registryOperations.stat(relativePath));
Map<String, ServiceRecord> records = RegistryUtils.extractServiceRecords(registryOperations, getAdjustedParentPath(path), map);
processServiceRecords(records, register);
pathToRecordMap.putAll(records);
}
private String getAdjustedParentPath(String path) {
Preconditions.checkNotNull(path);String adjustedPath = null;
adjustedPath = getPathRelativeToRegistryRoot(path);
try {
return RegistryPathUtils.parentOf(adjustedPath);
} catch (PathNotFoundException e) {
// attempt to use passed in path
return path;
}
}
private String getPathRelativeToRegistryRoot(String path) {
String adjustedPath;
if (path.equals(registryRoot)) {
adjustedPath = "/";
} else {
adjustedPath = path.substring(registryRoot.length());
}
return adjustedPath;
}
@Override
public void nodeRemoved(String path) throws IOException {
ServiceRecord record = pathToRecordMap.remove(path.substring(registryRoot.length()));
processServiceRecord(path, record, delete);
}});
registryOperations.startCache();
// create listener for record deletions
} catch (Exception e) {
LOG.warn("Unable to monitor the registry. DNS support disabled.", e);
}
} | 3.26 |
hadoop_RegistryDNSServer_serviceStart_rdh | /**
* Starts the server.
*
* @throws Exception
* if service start fails.
*/
@Override
protected void serviceStart() throws Exception {
super.serviceStart();
manageRegistryDNS();
} | 3.26 |
hadoop_RegistryDNSServer_main_rdh | /**
* Lanches the server instance.
*
* @param args
* the command line args.
* @throws IOException
* if command line options can't be parsed
*/
public static void main(String[] args) throws IOException {
StringUtils.startupShutdownMessage(RegistryDNSServer.class, args, LOG);
Configuration conf = new RegistryConfiguration();
new GenericOptionsParser(conf, args);
launchDNSServer(conf, null);
} | 3.26 |
hadoop_RegistryDNSServer_processServiceRecords_rdh | /**
* iterates thru the supplied service records, executing the provided registry
* command.
*
* @param records
* the service records.
* @param command
* the registry command.
* @throws IOException
*/
private void processServiceRecords(Map<String, ServiceRecord> records, ManagementCommand command) throws IOException {
for (Map.Entry<String, ServiceRecord> entry : records.entrySet()) {
processServiceRecord(entry.getKey(), entry.getValue(), command);
}
} | 3.26 |
hadoop_RegistryDNSServer_launchDNSServer_rdh | /**
* Launch the server.
*
* @param conf
* configuration
* @param rdns
* registry dns instance
* @return */
static RegistryDNSServer launchDNSServer(Configuration conf, RegistryDNS rdns) {
RegistryDNSServer dnsServer = null;
Thread.setDefaultUncaughtExceptionHandler(new HadoopUncaughtExceptionHandler());
try {
dnsServer = new RegistryDNSServer("RegistryDNSServer", rdns);
ShutdownHookManager.get().addShutdownHook(new CompositeService.CompositeServiceShutdownHook(dnsServer), SHUTDOWN_HOOK_PRIORITY);
dnsServer.init(conf);
dnsServer.start();
} catch (Throwable t) {
LOG.error("Error starting Registry DNS Server", t);
ExitUtil.terminate(-1, "Error starting Registry DNS Server");
}
return dnsServer;
} | 3.26 |
hadoop_RegistryDNSServer_serviceInit_rdh | /**
* Initializes the DNS server.
*
* @param conf
* the hadoop configuration instance.
* @throws Exception
* if service initialization fails.
*/
@Override
protected void serviceInit(Configuration conf) throws Exception {
pathToRecordMap = new ConcurrentHashMap<>();
registryOperations = new RegistryOperationsService("RegistryDNSOperations");
addService(registryOperations);
if (registryDNS == null) {
registryDNS = ((RegistryDNS) (DNSOperationsFactory.createInstance(conf)));
}
addService(registryDNS);
super.serviceInit(conf);
} | 3.26 |
hadoop_WasbTokenRenewer_isManaged_rdh | /**
* Checks if passed token is managed.
*
* @param token
* the token being checked
* @return true if it is managed.
* @throws IOException
* thrown when evaluating if token is managed.
*/
@Override
public boolean isManaged(Token<?> token) throws IOException {return true;
} | 3.26 |
hadoop_WasbTokenRenewer_renew_rdh | /**
* Renew the delegation token.
*
* @param token
* token to renew.
* @param conf
* configuration object.
* @return extended expiry time of the token.
* @throws IOException
* thrown when trying get current user.
* @throws InterruptedException
* thrown when thread is interrupted
*/
@Override
public long renew(final Token<?> token, Configuration conf) throws IOException, InterruptedException {
f0.debug("Renewing the delegation token");
return getInstance(conf).renewDelegationToken(token);
} | 3.26 |
hadoop_WasbTokenRenewer_handleKind_rdh | /**
* Checks if this particular object handles the Kind of token passed.
*
* @param kind
* the kind of the token
* @return true if it handles passed token kind false otherwise.
*/
@Override
public boolean handleKind(Text kind) {
return WasbDelegationTokenIdentifier.TOKEN_KIND.equals(kind);
} | 3.26 |
hadoop_TimelineStateStore_serviceStart_rdh | /**
* Start the state storage for use
*
* @throws IOException
*/
@Override
public void serviceStart()
throws IOException {startStorage();
} | 3.26 |
hadoop_TimelineStateStore_serviceStop_rdh | /**
* Shutdown the state storage.
*
* @throws IOException
*/
@Override
public void serviceStop() throws IOException {
closeStorage();
} | 3.26 |
hadoop_TimelineStateStore_serviceInit_rdh | /**
* Initialize the state storage
*
* @param conf
* the configuration
* @throws IOException
*/
@Override
public void serviceInit(Configuration conf) throws IOException {
m0(conf);
} | 3.26 |
hadoop_IOStatisticsBinding_iostatisticsStore_rdh | /**
* Create a builder for an {@link IOStatisticsStore}.
*
* @return a builder instance.
*/public static IOStatisticsStoreBuilder iostatisticsStore() {
return new IOStatisticsStoreBuilderImpl();
} | 3.26 |
hadoop_IOStatisticsBinding_trackJavaFunctionDuration_rdh | /**
* Given a java function/lambda expression,
* return a new one which wraps the inner and tracks
* the duration of the operation, including whether
* it passes/fails.
*
* @param factory
* factory of duration trackers
* @param statistic
* statistic key
* @param inputFn
* input function
* @param <A>
* type of argument to the input function.
* @param <B>
* return type.
* @return a new function which tracks duration and failure.
*/
public static <A, B> Function<A, B> trackJavaFunctionDuration(@Nullable
DurationTrackerFactory factory, String statistic, Function<A, B>
inputFn) {
return x -> {
// create the tracker outside try-with-resources so
// that failures can be set in the catcher.
DurationTracker tracker = createTracker(factory, statistic);
try {
// exec the input function and return its value
return inputFn.apply(x);
} catch (RuntimeException e) {
// input function failed: note it
tracker.failed();
// and rethrow
throw e;
} finally {
// update the tracker.
// this is called after the catch() call would have
// set the failed flag.
tracker.close();}
};
} | 3.26 |
hadoop_IOStatisticsBinding_passthroughFn_rdh | /**
* A passthrough copy operation suitable for immutable
* types, including numbers.
*
* @param <E>
* type of values.
* @param src
* source object
* @return the source object
*/
public static <E extends Serializable>
E passthroughFn(E src) {
return
src;
} | 3.26 |
hadoop_IOStatisticsBinding_maybeUpdateMinimum_rdh | /**
* Update a maximum value tracked in an atomic long.
* This is thread safe -it uses compareAndSet to ensure
* that Thread T1 whose sample is greater than the current
* value never overwrites an update from thread T2 whose
* sample was also higher -and which completed first.
*
* @param dest
* destination for all changes.
* @param sample
* sample to update.
*/
public static void maybeUpdateMinimum(AtomicLong
dest, long sample) {boolean done;
do {long current = dest.get();
if ((current
== MIN_UNSET_VALUE) || (sample < current)) {
done = dest.compareAndSet(current, sample);
} else {
done = true;
}
} while (!done );
} | 3.26 |
hadoop_IOStatisticsBinding_dynamicIOStatistics_rdh | /**
* Create a builder for dynamic IO Statistics.
*
* @return a builder to be completed.
*/
public static DynamicIOStatisticsBuilder dynamicIOStatistics() {
return new DynamicIOStatisticsBuilder();
} | 3.26 |
hadoop_IOStatisticsBinding_aggregateMeanStatistics_rdh | /**
* Aggregate the mean statistics.
* This returns a new instance.
*
* @param l
* left value
* @param r
* right value
* @return aggregate value
*/
public static MeanStatistic aggregateMeanStatistics(MeanStatistic l, MeanStatistic r) {
MeanStatistic res = l.copy();
res.add(r);
return res;
} | 3.26 |
hadoop_IOStatisticsBinding_emptyStatistics_rdh | /**
* Get the shared instance of the immutable empty statistics
* object.
*
* @return an empty statistics object.
*/
public static IOStatistics emptyStatistics() {
return EmptyIOStatistics.getInstance();
} | 3.26 |
hadoop_IOStatisticsBinding_aggregateMaps_rdh | /**
* Aggregate two maps so that the destination.
*
* @param <E>
* type of values
* @param dest
* destination map.
* @param other
* other map
* @param aggregateFn
* function to aggregate the values.
* @param copyFn
* function to copy the value
*/
public static <E> void aggregateMaps(Map<String, E> dest, Map<String, E> other, BiFunction<E, E, E> aggregateFn, Function<E, E> copyFn) {
// scan through the other hand map; copy
// any values not in the left map,
// aggregate those for which there is already
// an entry
other.entrySet().forEach(entry -> {
String key = entry.getKey();
E rVal = entry.getValue();
E lVal = dest.get(key);
if (lVal == null) {dest.put(key, copyFn.apply(rVal));
} else {
dest.put(key, aggregateFn.apply(lVal, rVal));
}
});
} | 3.26 |
hadoop_IOStatisticsBinding_measureDurationOfInvocation_rdh | /**
* Given an IOException raising callable/lambda expression,
* execute it and update the relevant statistic,
* returning the measured duration.
*
* {@link #trackDurationOfInvocation(DurationTrackerFactory, String, InvocationRaisingIOE)}
* with the duration returned for logging etc.; added as a new
* method to avoid linking problems with any code calling the existing
* method.
*
* @param factory
* factory of duration trackers
* @param statistic
* statistic key
* @param input
* input callable.
* @return the duration of the operation, as measured by the duration tracker.
* @throws IOException
* IO failure.
*/
public static Duration measureDurationOfInvocation(DurationTrackerFactory
factory, String statistic, InvocationRaisingIOE input) throws IOException {
// create the tracker outside try-with-resources so
// that failures can be set in the catcher.
DurationTracker tracker =
createTracker(factory, statistic);
try {
// exec the input function and return its value
input.apply();
} catch (IOException |
RuntimeException e) {
// input function failed: note it
tracker.failed();
// and rethrow
throw e;
} finally {
// update the tracker.
// this is called after the catch() call would have
// set the failed flag.
tracker.close();
}
return tracker.asDuration();
} | 3.26 |
hadoop_IOStatisticsBinding_aggregateMinimums_rdh | /**
* Aggregate two minimum values.
*
* @param l
* left
* @param r
* right
* @return the new minimum.
*/
public static Long aggregateMinimums(Long l, Long r) {
if (l == MIN_UNSET_VALUE) {
return r;
} else if (r ==
MIN_UNSET_VALUE) {
return l;
} else {
return
Math.min(l, r);
}
} | 3.26 |
hadoop_IOStatisticsBinding_m2_rdh | /**
* Given an IOException raising callable/lambda expression,
* execute it and update the relevant statistic.
*
* @param factory
* factory of duration trackers
* @param statistic
* statistic key
* @param input
* input callable.
* @throws IOException
* IO failure.
*/
public static void m2(DurationTrackerFactory factory, String statistic, InvocationRaisingIOE input) throws IOException {
measureDurationOfInvocation(factory, statistic, input);
} | 3.26 |
hadoop_IOStatisticsBinding_aggregateMaximums_rdh | /**
* Aggregate two maximum values.
*
* @param l
* left
* @param r
* right
* @return the new minimum.
*/
public static Long
aggregateMaximums(Long l, Long r) {if (l == MIN_UNSET_VALUE) {
return
r;
} else if (r == MIN_UNSET_VALUE) {
return l;
} else {
return Math.max(l, r);
}
} | 3.26 |
hadoop_IOStatisticsBinding_invokeTrackingDuration_rdh | /**
* Given an IOException raising callable/lambda expression,
* execute it, updating the tracker on success/failure.
*
* @param tracker
* duration tracker.
* @param input
* input callable.
* @param <B>
* return type.
* @return the result of the invocation
* @throws IOException
* on failure.
*/
public static <B> B invokeTrackingDuration(final DurationTracker tracker, final CallableRaisingIOE<B> input) throws IOException {
try {
// exec the input function and return its value
return input.apply();
} catch (IOException | RuntimeException e) {
// input function failed: note it
tracker.failed();
// and rethrow
throw e;
} finally {
// update the tracker.
// this is called after the catch() call would have
// set the failed flag.
tracker.close();
}
} | 3.26 |
hadoop_IOStatisticsBinding_trackDurationOfSupplier_rdh | /**
* Given a Java supplier, evaluate it while
* tracking the duration of the operation and success/failure.
*
* @param factory
* factory of duration trackers
* @param statistic
* statistic key
* @param input
* input callable.
* @param <B>
* return type.
* @return the output of the supplier.
*/
public static <B> B trackDurationOfSupplier(@Nullable
DurationTrackerFactory factory, String statistic, Supplier<B> input)
{
// create the tracker outside try-with-resources so
// that failures can be set in the catcher.
DurationTracker tracker = createTracker(factory, statistic);
try {
// exec the input function and return its value
return input.get();} catch (RuntimeException e) {
// input function failed: note it
tracker.failed();
// and rethrow
throw e;
} finally {
// update the tracker.
// this is called after any catch() call will have
// set the failed flag.
tracker.close();
}
} | 3.26 |
hadoop_IOStatisticsBinding_maybeUpdateMaximum_rdh | /**
* Update a maximum value tracked in an atomic long.
* This is thread safe -it uses compareAndSet to ensure
* that Thread T1 whose sample is greater than the current
* value never overwrites an update from thread T2 whose
* sample was also higher -and which completed first.
*
* @param dest
* destination for all changes.
* @param sample
* sample to update.
*/
public static void maybeUpdateMaximum(AtomicLong
dest, long
sample) {
boolean done;
do {
long current = dest.get();
if (sample > current) {
done = dest.compareAndSet(current, sample);
} else {
done = true;
}
} while (!done );
} | 3.26 |
hadoop_IOStatisticsBinding_aggregateGauges_rdh | /**
* Add two gauges.
*
* @param l
* left value
* @param r
* right value
* @return aggregate value
*/
public static Long aggregateGauges(Long l, Long r) {
return l + r;
} | 3.26 |
hadoop_IOStatisticsBinding_trackDurationOfOperation_rdh | /**
* Given an IOException raising callable/lambda expression,
* return a new one which wraps the inner and tracks
* the duration of the operation, including whether
* it passes/fails.
*
* @param factory
* factory of duration trackers
* @param statistic
* statistic key
* @param input
* input callable.
* @param <B>
* return type.
* @return a new callable which tracks duration and failure.
*/
public static <B> CallableRaisingIOE<B> trackDurationOfOperation(@Nullable
DurationTrackerFactory factory, String statistic, CallableRaisingIOE<B> input) {
return () -> {
// create the tracker outside try-with-resources so
// that failures can be set in the catcher.
DurationTracker tracker = createTracker(factory, statistic);
return invokeTrackingDuration(tracker, input);
};
} | 3.26 |
hadoop_IOStatisticsBinding_wrap_rdh | /**
* Take an IOStatistics instance and wrap it in a source.
*
* @param statistics
* statistics.
* @return a source which will return the values
*/public
static IOStatisticsSource wrap(IOStatistics statistics) {return new SourceWrappedStatistics(statistics);
} | 3.26 |
hadoop_IOStatisticsBinding_aggregateCounters_rdh | /**
* Aggregate two counters.
*
* @param l
* left value
* @param r
* right value
* @return the aggregate value
*/
public static Long aggregateCounters(Long
l, Long r) {
return Math.max(l, 0) + Math.max(r, 0);
} | 3.26 |
hadoop_IOStatisticsBinding_publishAsStorageStatistics_rdh | /**
* Publish the IOStatistics as a set of storage statistics.
* This is dynamic.
*
* @param name
* storage statistics name.
* @param scheme
* FS scheme; may be null.
* @param source
* IOStatistics source.
* @return a dynamic storage statistics object.
*/
public static StorageStatistics publishAsStorageStatistics(String name, String scheme, IOStatistics source) {
return new StorageStatisticsFromIOStatistics(name, scheme, source);
} | 3.26 |
hadoop_IOStatisticsBinding_trackDurationOfCallable_rdh | /**
* Given a callable/lambda expression,
* return a new one which wraps the inner and tracks
* the duration of the operation, including whether
* it passes/fails.
*
* @param factory
* factory of duration trackers
* @param statistic
* statistic key
* @param input
* input callable.
* @param <B>
* return type.
* @return a new callable which tracks duration and failure.
*/
public static <B>
Callable<B> trackDurationOfCallable(@Nullable
DurationTrackerFactory factory, String statistic, Callable<B> input) {
return () -> {
// create the tracker outside try-with-resources so
// that failures can be set in the catcher.
DurationTracker tracker = createTracker(factory, statistic);
try {// exec the input function and return its value
return input.call();
} catch (RuntimeException e) {
// input function failed: note it
tracker.failed();
// and rethrow
throw e;
} finally {
// update the tracker.
// this is called after any catch() call will have
// set the failed flag.
tracker.close();
}
};
} | 3.26 |
hadoop_IOStatisticsBinding_snapshotMap_rdh | /**
* Take a snapshot of a supplied map, using the copy function
* to replicate the source values.
*
* @param source
* source map
* @param copyFn
* function to copy the value
* @param <E>
* type of values.
* @return a concurrent hash map referencing the same values.
*/
public static <E extends Serializable> ConcurrentHashMap<String, E> snapshotMap(Map<String, E> source, Function<E, E> copyFn) {ConcurrentHashMap<String, E> dest = new ConcurrentHashMap<>();
copyMap(dest, source, copyFn);
return dest;
} | 3.26 |
hadoop_IOStatisticsBinding_trackDuration_rdh | /**
* Given an IOException raising callable/lambda expression,
* execute it and update the relevant statistic.
*
* @param factory
* factory of duration trackers
* @param statistic
* statistic key
* @param input
* input callable.
* @param <B>
* return type.
* @return the result of the operation.
* @throws IOException
* raised on errors performing I/O.
*/
public static <B> B trackDuration(DurationTrackerFactory factory, String statistic, CallableRaisingIOE<B> input) throws IOException {
return trackDurationOfOperation(factory, statistic, input).apply();
} | 3.26 |
hadoop_IOStatisticsBinding_entryToString_rdh | /**
* Convert entry values to the string format used in logging.
*
* @param <E>
* type of values.
* @param name
* statistic name
* @param value
* stat value
* @return formatted string
*/
public static <E> String entryToString(final String name, final E value) {
return String.format(f0, name, value);} | 3.26 |
hadoop_IOStatisticsBinding_copyMap_rdh | /**
* Copy into the dest map all the source entries.
* The destination is cleared first.
*
* @param <E>
* entry type
* @param dest
* destination of the copy
* @param source
* source
* @param copyFn
* function to copy entries
* @return the destination.
*/
private static <E> Map<String, E> copyMap(Map<String, E> dest, Map<String, E> source, Function<E, E> copyFn) {
// we have to clone the values so that they aren't
// bound to the original values
dest.clear();
source.entrySet().forEach(entry -> dest.put(entry.getKey(), copyFn.apply(entry.getValue())));return dest;
} | 3.26 |
hadoop_IOStatisticsBinding_m1_rdh | /**
* Given an IOException raising function/lambda expression,
* return a new one which wraps the inner and tracks
* the duration of the operation, including whether
* it passes/fails.
*
* @param factory
* factory of duration trackers
* @param statistic
* statistic key
* @param inputFn
* input function
* @param <A>
* type of argument to the input function.
* @param <B>
* return type.
* @return a new function which tracks duration and failure.
*/
public static <A, B> FunctionRaisingIOE<A, B>
m1(@Nullable
DurationTrackerFactory
factory, String statistic, FunctionRaisingIOE<A, B> inputFn) {
return x ->
{
// create the tracker outside try-with-resources so
// that failures can be set in the catcher.
DurationTracker tracker = createTracker(factory, statistic);
try {
// exec the input function and return its value
return inputFn.apply(x);
} catch
(IOException | RuntimeException e) {
// input function failed: note it
tracker.failed();
// and rethrow
throw e;
} finally
{
// update the tracker.
// this is called after the catch() call would have
// set the failed flag.
tracker.close();
}
};
} | 3.26 |
hadoop_IOStatisticsBinding_pairedTrackerFactory_rdh | /**
* Create a DurationTrackerFactory which aggregates the tracking
* of two other factories.
*
* @param first
* first tracker factory
* @param second
* second tracker factory
* @return a factory
*/
public static DurationTrackerFactory pairedTrackerFactory(final DurationTrackerFactory first, final DurationTrackerFactory second) {
return new PairedDurationTrackerFactory(first, second);
} | 3.26 |
hadoop_IOStatisticsBinding_fromStorageStatistics_rdh | /**
* Create IOStatistics from a storage statistics instance.
*
* This will be updated as the storage statistics change.
*
* @param storageStatistics
* source data.
* @return an IO statistics source.
*/
public static IOStatistics fromStorageStatistics(StorageStatistics storageStatistics) {
DynamicIOStatisticsBuilder builder = dynamicIOStatistics();
Iterator<StorageStatistics.LongStatistic>
it = storageStatistics.getLongStatistics();
while (it.hasNext()) {
StorageStatistics.LongStatistic next = it.next();
builder.withLongFunctionCounter(next.getName(), k -> storageStatistics.getLong(k));
}
return builder.build();
} | 3.26 |
hadoop_UserDefinedValueAggregatorDescriptor_createInstance_rdh | /**
* Create an instance of the given class
*
* @param className
* the name of the class
* @return a dynamically created instance of the given class
*/
public static Object createInstance(String className) {
Object v0 = null;
try {
ClassLoader v1 = Thread.currentThread().getContextClassLoader();
Class<?> theFilterClass = Class.forName(className, true, v1);
Constructor<?> meth = theFilterClass.getDeclaredConstructor(argArray);
meth.setAccessible(true);
v0 = meth.newInstance();
} catch (Exception e) {
throw new RuntimeException(e);
}
return v0;
} | 3.26 |
hadoop_UserDefinedValueAggregatorDescriptor_toString_rdh | /**
*
* @return the string representation of this object.
*/
public String toString() {
return ("UserDefinedValueAggregatorDescriptor with class name:" + "\t") + this.className;
} | 3.26 |
hadoop_UserDefinedValueAggregatorDescriptor_configure_rdh | /**
* Do nothing.
*/
public void configure(Configuration conf) {
} | 3.26 |
hadoop_AsyncDataService_shutdown_rdh | /**
* Gracefully shut down the ThreadPool. Will wait for all data tasks to
* finish.
*/
synchronized void shutdown() {
if (executor == null) {
LOG.warn("AsyncDataService has already shut down.");
} else {
LOG.info("Shutting down all async data service threads...");
executor.shutdown();
// clear the executor so that calling execute again will fail.
executor = null;
LOG.info("All async data service threads have been shut down");
}
} | 3.26 |
hadoop_AsyncDataService_execute_rdh | /**
* Execute the task sometime in the future.
*/ synchronized void execute(Runnable task) {
if (executor == null) {
throw new RuntimeException("AsyncDataService is already shutdown");
}
if (LOG.isDebugEnabled()) {
LOG.debug((((("Current active thread number: " + executor.getActiveCount()) + " queue size: ") + executor.getQueue().size()) + " scheduled task number: ") + executor.getTaskCount());
}
executor.execute(task);
} | 3.26 |
hadoop_AsyncDataService_writeAsync_rdh | /**
* Write the data to HDFS asynchronously
*/
void writeAsync(OpenFileCtx openFileCtx) {
if (LOG.isDebugEnabled()) {
LOG.debug("Scheduling write back task for fileId: " + openFileCtx.getLatestAttr().getFileId());
}
WriteBackTask wbTask = new
WriteBackTask(openFileCtx);
execute(wbTask);
} | 3.26 |
hadoop_TaskId_equals_rdh | /**
* Equal method override for TaskId.
*/
@Overridepublic final boolean equals(final Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
TaskId other = ((TaskId) (obj));
if (jobId == null) {if (other.jobId != null) {
return false;
}
} else if (!jobId.equals(other.jobId)) {
return false;
}
if (taskId != other.taskId) {
return false;
}
return true;
} | 3.26 |
hadoop_TaskId_readFields_rdh | /**
* {@inheritDoc }
*/
public final void readFields(final DataInput in) throws IOException {
jobId = new JobId();
jobId.readFields(in);
this.taskId = WritableUtils.readVLong(in);
} | 3.26 |
hadoop_TaskId_write_rdh | /**
* {@inheritDoc }
*/
public final void write(final DataOutput out) throws IOException {
jobId.write(out);
WritableUtils.writeVLong(out, taskId);
} | 3.26 |
hadoop_TaskId_m0_rdh | /**
* Getter method for TaskID.
*
* @return TaskId: Task identifier
*/
public final long m0() {
return taskId;
} | 3.26 |
hadoop_TaskId_m1_rdh | /**
* Hashcode method for TaskId.
*/
@Override
public final int
m1() {final int
prime =
31;
final int bits = 32;
int result = 1;
int jobHash = 0;
if (jobId == null) {
jobHash = 0;
} else {
jobHash = jobId.hashCode();
}
result = (prime * result) + jobHash;
result = (prime * result) + ((int) (taskId
^ (taskId >>> bits)));
return result;
} | 3.26 |
hadoop_TaskId_getJobId_rdh | /**
* Getter method for jobId.
*
* @return JobID: Job identifier
*/
public final int getJobId() {
return jobId.getID();
} | 3.26 |
hadoop_TaskId_toString_rdh | /**
* Print method for TaskId.
*
* @return : Full TaskId which is TaskId_prefix + jobId + _ + TaskId
*/
public final String toString() {
return ((TASK_ID_PREFIX +
jobId.getID()) + "_") + taskId;
} | 3.26 |
hadoop_ClientCache_stopClient_rdh | /**
* Stop a RPC client connection
* A RPC client is closed only when its reference count becomes zero.
*
* @param client
* input client.
*/
public void stopClient(Client client) {
if
(Client.LOG.isDebugEnabled()) {
Client.LOG.debug("stopping client from cache: " + client);
}
final int count;
synchronized(this) {
count = client.decAndGetCount();
if (count == 0) {
if (Client.LOG.isDebugEnabled()) {
Client.LOG.debug("removing client from cache: " +
client);
}
clients.remove(client.getSocketFactory());
}
}
if (count == 0) {
if (Client.LOG.isDebugEnabled()) {
Client.LOG.debug("stopping actual client because no more references remain: " + client);
}
client.stop();
}
} | 3.26 |
hadoop_ClientCache_getClient_rdh | /**
* Construct & cache an IPC client with the user-provided SocketFactory
* if no cached client exists. Default response type is ObjectWritable.
*
* @param conf
* Configuration
* @param factory
* SocketFactory for client socket
* @return an IPC client
*/
public synchronized Client getClient(Configuration conf, SocketFactory factory) {
return this.m0(conf, factory, ObjectWritable.class);
} | 3.26 |
hadoop_ClientCache_m0_rdh | /**
* Construct & cache an IPC client with the user-provided SocketFactory
* if no cached client exists.
*
* @param conf
* Configuration
* @param factory
* SocketFactory for client socket
* @param valueClass
* Class of the expected response
* @return an IPC client
*/
public synchronized Client m0(Configuration
conf, SocketFactory factory, Class<? extends Writable> valueClass) {
// Construct & cache client. The configuration is only used for timeout,
// and Clients have connection pools. So we can either (a) lose some
// connection pooling and leak sockets, or (b) use the same timeout for all
// configurations. Since the IPC is usually intended globally, not
// per-job, we choose (a).
Client client = clients.get(factory);
if (client == null) {
client = new Client(valueClass, conf, factory);
clients.put(factory, client);
} else {
client.incCount();
}
if (Client.LOG.isDebugEnabled()) {
Client.LOG.debug("getting client out of cache: "
+ client);
}
return client;
} | 3.26 |
hadoop_HamletImpl_setSelector_rdh | /**
* Set id and/or class attributes for an element.
*
* @param <E>
* type of the element
* @param e
* the element
* @param selector
* Haml form of "(#id)?(.class)*"
* @return the element
*/
public static <E extends CoreAttrs> E setSelector(E e, String selector) {
String[] res = parseSelector(selector);
if (res[S_ID] != null) {
e.$id(res[S_ID]);
}
if (res[S_CLASS] != null) {
e.$class(res[S_CLASS]);
}
return e;
} | 3.26 |
hadoop_HamletImpl_subView_rdh | /**
* Sub-classes should override this to do something interesting.
*
* @param cls
* the sub-view class
*/
protected void subView(Class<? extends SubView> cls) {
indent(of(ENDTAG));// not an inline view
sb.setLength(0);
out.print(sb.append('[').append(cls.getName()).append(']').toString());
out.println();
} | 3.26 |
hadoop_OBSListing_createLocatedFileStatusIterator_rdh | /**
* Create a located status iterator over a file status iterator.
*
* @param statusIterator
* an iterator over the remote status entries
* @return a new remote iterator
*/
LocatedFileStatusIterator createLocatedFileStatusIterator(final RemoteIterator<FileStatus> statusIterator) {
return new LocatedFileStatusIterator(statusIterator);} | 3.26 |
hadoop_OBSListing_buildNextStatusBatch_rdh | /**
* Build the next status batch from a listing.
*
* @param objects
* the next object listing
* @return true if this added any entries after filtering
*/
private boolean buildNextStatusBatch(final ObjectListing objects) {
// counters for debug logs
int added = 0;
int v2 = 0;
// list to fill in with results. Initial size will be list maximum.
List<FileStatus> stats = new ArrayList<>(objects.getObjects().size() + objects.getCommonPrefixes().size());// objects
for (ObsObject summary : objects.getObjects()) {
String key = summary.getObjectKey();
Path keyPath = OBSCommonUtils.keyToQualifiedPath(owner, key);
if (LOG.isDebugEnabled()) {
LOG.debug("{}: {}", keyPath, OBSCommonUtils.stringify(summary));
}
// Skip over keys that are ourselves and old OBS _$folder$ files
if (acceptor.accept(keyPath, summary) && filter.accept(keyPath)) {
FileStatus status = OBSCommonUtils.createFileStatus(keyPath, summary, owner.getDefaultBlockSize(keyPath), owner.getUsername());
LOG.debug("Adding: {}", status);
stats.add(status);
added++;
} else {
LOG.debug("Ignoring: {}", keyPath);
v2++;
}
}
// prefixes: always directories
for (ObsObject prefix
: objects.getExtenedCommonPrefixes()) {
String key = prefix.getObjectKey();
Path keyPath = OBSCommonUtils.keyToQualifiedPath(owner, key);
if (acceptor.accept(keyPath, key) && filter.accept(keyPath)) {
long lastModified = (prefix.getMetadata().getLastModified() == null) ? System.currentTimeMillis() : OBSCommonUtils.dateToLong(prefix.getMetadata().getLastModified());
FileStatus status = new OBSFileStatus(keyPath, lastModified, lastModified, owner.getUsername());
LOG.debug("Adding directory: {}", status);added++;
stats.add(status);
} else {
LOG.debug("Ignoring directory: {}", keyPath);
v2++;
}
}
// finish up
batchSize = stats.size();
statusBatchIterator = stats.listIterator();
boolean hasNext = statusBatchIterator.hasNext();
LOG.debug("Added {} entries; ignored {}; hasNext={}; hasMoreObjects={}", added, v2, hasNext, objects.isTruncated());
return hasNext;
} | 3.26 |
hadoop_OBSListing_next_rdh | /**
* Ask for the next listing. For the first invocation, this returns the
* initial set, with no remote IO. For later requests, OBS will be queried,
* hence the calls may block or fail.
*
* @return the next object listing.
* @throws IOException
* if a query made of OBS fails.
* @throws NoSuchElementException
* if there is no more data to list.
*/
@Override
public ObjectListing next() throws IOException {
if (firstListing) {
// on the first listing, don't request more data.
// Instead just clear the firstListing flag so that it future
// calls will request new data.
firstListing = false;
} else {
try {
if (!objects.isTruncated()) {
// nothing more to request: fail.
throw new NoSuchElementException("No more results in listing of " + listPath);
}
// need to request a new set of objects.
LOG.debug("[{}], Requesting next {} objects under {}", listingCount, maxKeys, listPath);
objects = OBSCommonUtils.continueListObjects(owner, objects);
listingCount++;
LOG.debug("New listing status: {}", this);
} catch (ObsException e) {
throw OBSCommonUtils.translateException("listObjects()", listPath, e);
}
}
return objects;
} | 3.26 |
hadoop_OBSListing_getBatchSize_rdh | /**
* Get the number of entries in the current batch.
*
* @return a number, possibly zero.
*/
public int getBatchSize() {return batchSize;
} | 3.26 |
hadoop_OBSListing_m0_rdh | /**
* Create a FileStatus iterator against a path, with a given list object
* request.
*
* @param listPath
* path of the listing
* @param request
* initial request to make
* @param filter
* the filter on which paths to accept
* @param acceptor
* the class/predicate to decide which entries to accept in
* the listing based on the full file status.
* @return the iterator
* @throws IOException
* IO Problems
*/
FileStatusListingIterator m0(final Path listPath, final ListObjectsRequest request, final PathFilter filter, final FileStatusAcceptor acceptor) throws IOException {
return new FileStatusListingIterator(new ObjectListingIterator(listPath, request), filter, acceptor);
} | 3.26 |
hadoop_OBSListing_accept_rdh | /**
* Accept no directory paths.
*
* @param keyPath
* qualified path to the entry
* @param prefix
* common prefix in listing.
* @return false, always.
*/@Override
public boolean accept(final Path keyPath, final String prefix) {return false;
} | 3.26 |
hadoop_OBSListing_hasNext_rdh | /**
* Declare that the iterator has data if it is either is the initial
* iteration or it is a later one and the last listing obtained was
* incomplete.
*/
@Override
public boolean hasNext() {
return firstListing || objects.isTruncated();
} | 3.26 |
hadoop_HsAboutPage_content_rdh | /**
* The content of this page is the attempts block
*
* @return AttemptsBlock.class
*/
@Overrideprotected Class<? extends SubView> content() {
HistoryInfo info = new HistoryInfo();
info("History Server").__("BuildVersion", (info.getHadoopBuildVersion() + " on ") + info.getHadoopVersionBuiltOn()).__("History Server started on", Times.format(info.getStartedOn()));
return InfoBlock.class;
} | 3.26 |
hadoop_HsAboutPage_preHead_rdh | /* (non-Javadoc)
@see org.apache.hadoop.mapreduce.v2.hs.webapp.HsView#preHead(org.apache.hadoop.yarn.webapp.hamlet.Hamlet.HTML)
*/
@Override
protected void preHead(Page.HTML<__> html) {
commonPreHead(html);
// override the nav config from commonPReHead
set(initID(ACCORDION, "nav"), "{autoHeight:false, active:0}");
setTitle("About History Server");
} | 3.26 |
hadoop_ExitUtil_addSuppressed_rdh | /**
* Suppresses if legit and returns the first non-null of the two. Legit means
* <code>suppressor</code> if neither <code>null</code> nor <code>suppressed</code>.
*
* @param suppressor
* <code>Throwable</code> that suppresses <code>suppressed</code>
* @param suppressed
* <code>Throwable</code> that is suppressed by <code>suppressor</code>
* @return <code>suppressor</code> if not <code>null</code>, <code>suppressed</code> otherwise
*/
private static <T extends
Throwable> T addSuppressed(T suppressor, T suppressed) {
if (suppressor
== null) {
return suppressed;
}
if (suppressor != suppressed) {
suppressor.addSuppressed(suppressed);
}
return suppressor;
} | 3.26 |
hadoop_ExitUtil_halt_rdh | /**
* Forcibly terminates the currently running Java virtual machine.
*
* @param status
* status code
* @param message
* message
* @throws HaltException
* if {@link Runtime#halt(int)} is disabled.
*/
public static void halt(int status, String message) throws HaltException {
halt(new HaltException(status, message));
} | 3.26 |
hadoop_ExitUtil_getFirstHaltException_rdh | /**
*
* @return the first {@code HaltException} thrown, null if none thrown yet.
*/
public static HaltException getFirstHaltException() {
return FIRST_HALT_EXCEPTION.get();
} | 3.26 |
hadoop_ExitUtil_haltCalled_rdh | /**
*
* @return true if halt has been called.
*/
public static boolean haltCalled() {
// Either we set this member or we actually called Runtime#halt
return FIRST_HALT_EXCEPTION.get() != null;
} | 3.26 |
hadoop_ExitUtil_getFirstExitException_rdh | /**
*
* @return the first {@code ExitException} thrown, null if none thrown yet.
*/
public static ExitException getFirstExitException() {
return FIRST_EXIT_EXCEPTION.get(); } | 3.26 |
hadoop_ExitUtil_resetFirstHaltException_rdh | /**
* Reset the tracking of process termination. This is for use in unit tests
* where one test in the suite expects a halt but others do not.
*/
public static void resetFirstHaltException() {
FIRST_HALT_EXCEPTION.set(null);
} | 3.26 |
hadoop_ExitUtil_toString_rdh | /**
* String value does not include exception type, just exit code and message.
*
* @return the exit code and any message
*/
@Override
public String toString() {
String message = getMessage();
if (message == null) {
message = super.toString();
}
return (Integer.toString(status) + ": ") + message;
} | 3.26 |
hadoop_ExitUtil_terminate_rdh | /**
* Terminate the current process. Note that terminate is the *only* method
* that should be used to terminate the daemon processes.
*
* @param status
* exit code
* @param msg
* message used to create the {@code ExitException}
* @throws ExitException
* if {@link System#exit(int)} is disabled.
*/
public static void terminate(int status, String msg) throws ExitException {
terminate(new ExitException(status, msg));
} | 3.26 |
hadoop_ExitUtil_resetFirstExitException_rdh | /**
* Reset the tracking of process termination. This is for use in unit tests
* where one test in the suite expects an exit but others do not.
*/
public static void resetFirstExitException() {
FIRST_EXIT_EXCEPTION.set(null);
} | 3.26 |
hadoop_ExitUtil_haltOnOutOfMemory_rdh | /**
* Handler for out of memory events -no attempt is made here
* to cleanly shutdown or support halt blocking; a robust
* printing of the event to stderr is all that can be done.
*
* @param oome
* out of memory event
*/
public static void haltOnOutOfMemory(OutOfMemoryError oome) {
// After catching an OOM java says it is undefined behavior, so don't
// even try to clean up or we can get stuck on shutdown.
try {
System.err.println("Halting due to Out Of Memory Error...");
} catch (Throwable err) {
// Again we done want to exit because of logging issues.
}
Runtime.getRuntime().halt(-1);
} | 3.26 |
hadoop_ExitUtil_terminateCalled_rdh | /**
*
* @return true if terminate has been called.
*/
public static boolean terminateCalled() {
// Either we set this member or we actually called System#exit
return FIRST_EXIT_EXCEPTION.get() != null;
} | 3.26 |
hadoop_ExitUtil_disableSystemHalt_rdh | /**
* Disable the use of {@code Runtime.getRuntime().halt()} for testing.
*/
public static void disableSystemHalt() {
systemHaltDisabled = true;
} | 3.26 |
hadoop_AllowAllImpersonationProvider_authorize_rdh | // Although this API was removed from the interface by HADOOP-17367, we need
// to keep it here because TestDynamometerInfra uses an old hadoop binary.
public void authorize(UserGroupInformation user, String remoteAddress) {
// Do nothing
} | 3.26 |
hadoop_FileStatusAcceptor_accept_rdh | /**
* Accept no directory paths.
*
* @param keyPath
* qualified path to the entry
* @param prefix
* common prefix in listing.
* @return false, always.
*/
@Override
public boolean accept(Path keyPath, String prefix) {
return false;
} | 3.26 |
hadoop_OperationDuration_humanTime_rdh | /**
* Convert to a human time of minutes:seconds.millis.
*
* @param time
* time to humanize.
* @return a printable value.
*/
public static String humanTime(long time) {
long seconds = time / 1000;
long minutes = seconds
/ 60;
return
String.format("%d:%02d.%03ds", minutes, seconds % 60, time % 1000);
} | 3.26 |
hadoop_OperationDuration_asDuration_rdh | /**
* Get the duration of an operation as a java Duration
* instance.
*
* @return a duration.
*/
public Duration asDuration() {
return Duration.ofMillis(value());
} | 3.26 |
hadoop_OperationDuration_toString_rdh | /**
* Return the duration as {@link #humanTime(long)}.
*
* @return a printable duration.
*/
@Override
public String toString() {return getDurationString();
} | 3.26 |
hadoop_OperationDuration_finished_rdh | /**
* Update the finished time with the current system time.
*/
public void finished() {
finished = time();
} | 3.26 |
hadoop_OperationDuration_getDurationString_rdh | /**
* Return the duration as {@link #humanTime(long)}.
*
* @return a printable duration.
*/
public String getDurationString() {
return humanTime(value());
} | 3.26 |
hadoop_OperationDuration_time_rdh | /**
* Evaluate the system time.
*
* @return the current clock time.
*/
protected long time()
{
return System.currentTimeMillis();
} | 3.26 |
hadoop_OperationDuration_value_rdh | /**
* Get the duration in milliseconds.
*
* <p>
* This will be 0 until a call
* to {@link #finished()} has been made.
* </p>
*
* @return the currently recorded duration.
*/
public long value() {
return finished - started;
} | 3.26 |
hadoop_FlowRunRowKey_getRowKey_rdh | /**
* Constructs a row key for the entity table as follows: {
* clusterId!userId!flowName!Inverted Flow Run Id}.
*
* @return byte array with the row key
*/
public byte[] getRowKey() {return flowRunRowKeyConverter.encode(this);} | 3.26 |
hadoop_FlowRunRowKey_toString_rdh | /**
* returns the Flow Key as a verbose String output.
*
* @return String
*/
@Overridepublic String toString() {
StringBuilder flowKeyStr = new StringBuilder();
flowKeyStr.append("{clusterId="
+ clusterId).append(" userId=" + userId).append(" flowName=" + flowName).append(" flowRunId=").append(flowRunId).append("}");
return flowKeyStr.toString();
} | 3.26 |
hadoop_FlowRunRowKey_parseRowKeyFromString_rdh | /**
* Given the encoded row key as string, returns the row key as an object.
*
* @param encodedRowKey
* String representation of row key.
* @return A <cite>FlowRunRowKey</cite> object.
*/
public static FlowRunRowKey parseRowKeyFromString(String
encodedRowKey) {
return new FlowRunRowKeyConverter().decodeFromString(encodedRowKey);
} | 3.26 |
hadoop_FlowRunRowKey_parseRowKey_rdh | /**
* Given the raw row key as bytes, returns the row key as an object.
*
* @param rowKey
* Byte representation of row key.
* @return A <cite>FlowRunRowKey</cite> object.
*/
public static FlowRunRowKey parseRowKey(byte[] rowKey) {
return new FlowRunRowKeyConverter().decode(rowKey);
}
/**
* Constructs a row key for the flow run table as follows:
* {@code clusterId!userId!flowName!Flow Run Id} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.