name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hadoop_ExcessRedundancyMap_getSize4Testing_rdh | /**
*
* @return the number of redundancies corresponding to the given datanode.
*/
@VisibleForTesting
synchronized int getSize4Testing(String dnUuid) {
final LightWeightHashSet<BlockInfo> set = map.get(dnUuid);
return set == null ? 0 : set.size();
} | 3.26 |
hadoop_ExcessRedundancyMap_add_rdh | /**
* Add the redundancy of the given block stored in the given datanode to the
* map.
*
* @return true if the block is added.
*/
synchronized boolean add(DatanodeDescriptor dn, BlockInfo blk) {
LightWeightHashSet<BlockInfo> v2 = map.get(dn.getDatanodeUuid());
if (v2 == null) {
v2 = new LightWeightHashSet<>();
map.put(dn.getDatanodeUuid(), v2);
}
final boolean added = v2.add(blk);
if (added) {
size.incrementAndGet();blockLog.debug("BLOCK* ExcessRedundancyMap.add({}, {})", dn, blk);
}
return added;
} | 3.26 |
hadoop_NullGroupsMapping_getGroupsSet_rdh | /**
* Get all various group memberships of a given user.
* Returns EMPTY set in case of non-existing user
*
* @param user
* User's name
* @return set of group memberships of user
* @throws IOException
* raised on errors performing I/O.
*/
@Override
public Set<String> getGroupsSet(String user)
throws IOException {
return Collections.emptySet();
} | 3.26 |
hadoop_NullGroupsMapping_getGroups_rdh | /**
* Returns an empty list.
*
* @param user
* ignored
* @return an empty list
*/
@Override
public List<String> getGroups(String user)
{
return Collections.emptyList();
} | 3.26 |
hadoop_RolePolicies_bucketObjectsToArn_rdh | /**
* From an S3 bucket name, build an ARN to refer to all objects in
* it.
*
* @param bucket
* bucket name.
* @return return the ARN to use in statements.
*/
public static String bucketObjectsToArn(String bucket) {
return String.format("arn:aws:s3:::%s/*", bucket);} | 3.26 |
hadoop_RolePolicies_bucketToArn_rdh | /**
* From an S3 bucket name, build an ARN to refer to it.
*
* @param bucket
* bucket name.
* @return return the ARN to use in statements.
*/
public static String bucketToArn(String bucket) {
return String.format("arn:aws:s3:::%s", bucket);
} | 3.26 |
hadoop_Find_getRootExpression_rdh | /**
* Return the root expression for this find.
*
* @return the root expression
*/
@InterfaceAudience.Private
Expression getRootExpression() {
return this.rootExpression;
} | 3.26 |
hadoop_Find_parseExpression_rdh | /**
* Parse a list of arguments to to extract the {@link Expression} elements.
* The input Deque will be modified to remove the used elements.
*
* @param args
* arguments to be parsed
* @return list of {@link Expression} elements applicable to this command
* @throws IOException
* if list can not be parsed
*/private Expression parseExpression(Deque<String> args) throws IOException {
Deque<Expression> primaries = new LinkedList<Expression>();
Deque<Expression> operators = new LinkedList<Expression>();
Expression prevExpr = getExpression(And.class);
while (!args.isEmpty()) {
String arg = args.pop();
if ("(".equals(arg)) {
Expression expr = parseExpression(args);
primaries.add(expr);
prevExpr = new BaseExpression() {
@Override
public Result apply(PathData item, int depth) throws IOException {
return Result.PASS;
}
};// stub the previous expression to be a non-op
} else if (")".equals(arg)) {
break;
} else if (isExpression(arg)) {
Expression expr = getExpression(arg);
expr.addArguments(args);
if (expr.isOperator()) {
while (!operators.isEmpty()) {
if (operators.peek().getPrecedence() >= expr.getPrecedence()) {
Expression op = operators.pop();
op.addChildren(primaries);
primaries.push(op);
} else {
break;
}
}
operators.push(expr);
} else {
if (!prevExpr.isOperator()) {
Expression and = getExpression(And.class);
while (!operators.isEmpty()) {
if (operators.peek().getPrecedence() >= and.getPrecedence()) {
Expression op =
operators.pop();
op.addChildren(primaries);
primaries.push(op);
} else {
break;
}
}
operators.push(and);
}
primaries.push(expr);
}
prevExpr = expr;} else {
throw new IOException("Unexpected argument: " + arg);
}
}
while (!operators.isEmpty()) {
Expression operator = operators.pop();
operator.addChildren(primaries);
primaries.push(operator);
}
return primaries.isEmpty() ? getExpression(Print.class) : primaries.pop();
} | 3.26 |
hadoop_Find_isStop_rdh | /**
* Returns true if the {@link PathData} item is in the stop set.
*/
private boolean isStop(PathData item) {
return stopPaths.contains(item.path);
} | 3.26 |
hadoop_Find_addStop_rdh | /**
* Add the {@link PathData} item to the stop set.
*/
private void addStop(PathData item) {
stopPaths.add(item.path);
} | 3.26 |
hadoop_Find_setRootExpression_rdh | /**
* Set the root expression for this find.
*
* @param expression
*/
@InterfaceAudience.Private
void setRootExpression(Expression expression) {
this.rootExpression = expression;
} | 3.26 |
hadoop_Find_createOptions_rdh | /**
* Create a new set of find options.
*/
private FindOptions createOptions() {
FindOptions options = new FindOptions();
options.setOut(out);
options.setErr(err);options.setIn(System.in);
options.setCommandFactory(getCommandFactory());
options.setConfiguration(getConf());
return
options;
} | 3.26 |
hadoop_Find_isExpression_rdh | /**
* Asks the factory whether an expression is recognized.
*/
private boolean isExpression(String expressionName) {
return ExpressionFactory.getExpressionFactory().isExpression(expressionName);
} | 3.26 |
hadoop_Find_getOptions_rdh | /**
* Returns the current find options, creating them if necessary.
*/@InterfaceAudience.Private
FindOptions getOptions() {
if (options == null) {
options = createOptions();
}
return
options;
} | 3.26 |
hadoop_Find_getExpression_rdh | /**
* Gets an instance of an expression from the factory.
*/
private Expression getExpression(Class<? extends Expression> expressionClass) {
return ExpressionFactory.getExpressionFactory().createExpression(expressionClass, getConf());
} | 3.26 |
hadoop_Find_registerExpressions_rdh | /**
* Register the expressions with the expression factory.
*/
private static void registerExpressions(ExpressionFactory factory) {
for (Class<? extends Expression> exprClass : f1) {
factory.registerExpression(exprClass);
}
} | 3.26 |
hadoop_Find_isAncestor_rdh | /**
* Returns true if the target is an ancestor of the source.
*/
private boolean isAncestor(PathData source, PathData target) {
for (Path parent = source.path; (parent != null) && (!parent.isRoot()); parent = parent.getParent()) {
if (parent.equals(target.path)) {return true;
}
}
return false;
} | 3.26 |
hadoop_Find_buildDescription_rdh | /**
* Build the description used by the help command.
*/
private static String buildDescription(ExpressionFactory factory) {
ArrayList<Expression> operators = new ArrayList<Expression>();
ArrayList<Expression> primaries = new ArrayList<Expression>();
for (Class<? extends Expression> exprClass : f1) {
Expression expr = factory.createExpression(exprClass, null);
if (expr.isOperator()) {
operators.add(expr);
} else {
primaries.add(expr);
}
}
Collections.sort(operators, new Comparator<Expression>() {
@Override
public int compare(Expression
arg0, Expression arg1) {
return arg0.getClass().getName().compareTo(arg1.getClass().getName());
}
});
Collections.sort(primaries, new Comparator<Expression>() {
@Override
public int compare(Expression arg0, Expression
arg1) {
return arg0.getClass().getName().compareTo(arg1.getClass().getName());
}
});
StringBuilder sb = new StringBuilder();
for (String line : HELP) {
sb.append(line).append("\n");
}
sb.append("\n").append("The following primary expressions are recognised:\n");
for (Expression expr : primaries) {
for (String line : expr.getUsage()) {
sb.append(" ").append(line).append("\n");
}
for (String line
: expr.getHelp()) {
sb.append(" ").append(line).append("\n");
}
sb.append("\n");
}
sb.append("The following operators are recognised:\n");
for (Expression expr : operators) {
for (String line : expr.getUsage()) {
sb.append(" ").append(line).append("\n");
}
for (String line : expr.getHelp()) {
sb.append(" ").append(line).append("\n");
}
sb.append("\n");
}
return sb.toString();
} | 3.26 |
hadoop_ListResultSchema_paths_rdh | /**
* * Get the paths value.
*
* @return the paths value
*/
public List<ListResultEntrySchema> paths() {
return this.paths;
} | 3.26 |
hadoop_ListResultSchema_withPaths_rdh | /**
* Set the paths value.
*
* @param paths
* the paths value to set
* @return the ListSchema object itself.
*/
public ListResultSchema withPaths(final List<ListResultEntrySchema> paths) {
this.paths = paths;
return this;
} | 3.26 |
hadoop_SessionTokenIdentifier_getMarshalledCredentials_rdh | /**
* Get the marshalled credentials.
*
* @return marshalled AWS credentials.
*/
public MarshalledCredentials getMarshalledCredentials() {
return marshalledCredentials;
} | 3.26 |
hadoop_SessionTokenIdentifier_toString_rdh | /**
* Add the (sanitized) marshalled credentials to the string value.
*
* @return a string value for test assertions and debugging.
*/
@Override
public String toString() {
return (super.toString()
+ "; ") + marshalledCredentials.toString();
} | 3.26 |
hadoop_SessionTokenIdentifier_getExpiryTime_rdh | /**
* Return the expiry time in seconds since 1970-01-01.
*
* @return the time when the AWS credentials expire.
*/
@Override
public long getExpiryTime() {
return marshalledCredentials.getExpiration();
} | 3.26 |
hadoop_TimelineHBaseSchemaConstants_getUsernameSplits_rdh | /**
*
* @return splits for splits where a user is a prefix.
*/
public static byte[][] getUsernameSplits() {
byte[][] kloon = f0.clone();
// Deep copy.
for (int row = 0; row < f0.length; row++) {
kloon[row] = Bytes.copy(f0[row]);
}
return kloon;
} | 3.26 |
hadoop_FSDataOutputStreamBuilder_permission_rdh | /**
* Set permission for the file.
*
* @param perm
* permission.
* @return B Generics Type.
*/
public B permission(@Nonnull
final FsPermission perm) {
checkNotNull(perm);
permission = perm;
return getThisBuilder();
} | 3.26 |
hadoop_FSDataOutputStreamBuilder_bufferSize_rdh | /**
* Set the size of the buffer to be used.
*
* @param bufSize
* buffer size.
* @return Generics Type B.
*/
public B bufferSize(int bufSize) {
f0 = bufSize;
return getThisBuilder();
} | 3.26 |
hadoop_FSDataOutputStreamBuilder_append_rdh | /**
* Append to an existing file (optional operation).
*
* @return Generics Type B.
*/
public B append() {
flags.add(CreateFlag.APPEND);
return getThisBuilder();
} | 3.26 |
hadoop_FSDataOutputStreamBuilder_replication_rdh | /**
* Set replication factor.
*
* @param replica
* replica.
* @return Generics Type B.
*/
public B replication(short replica) {
replication = replica;
return getThisBuilder();
} | 3.26 |
hadoop_FSDataOutputStreamBuilder_blockSize_rdh | /**
* Set block size.
*
* @param blkSize
* block size.
* @return B Generics Type.
*/
public B blockSize(long blkSize) {
blockSize = blkSize;
return getThisBuilder();
} | 3.26 |
hadoop_FSDataOutputStreamBuilder_recursive_rdh | /**
* Create the parent directory if they do not exist.
*
* @return B Generics Type.
*/
public B recursive() {
recursive = true;
return getThisBuilder();
} | 3.26 |
hadoop_FSDataOutputStreamBuilder_create_rdh | /**
* Create an FSDataOutputStream at the specified path.
*
* @return return Generics Type B.
*/
public B
create() {
flags.add(CreateFlag.CREATE);
return getThisBuilder();
} | 3.26 |
hadoop_FSDataOutputStreamBuilder_isRecursive_rdh | /**
* Return true to create the parent directories if they do not exist.
*
* @return if create the parent directories if they do not exist true,not false.
*/
protected boolean isRecursive() {
return recursive;
} | 3.26 |
hadoop_FSDataOutputStreamBuilder_progress_rdh | /**
* Set the facility of reporting progress.
*
* @param prog
* progress.
* @return B Generics Type.
*/
public B progress(@Nonnull
final Progressable
prog) {
checkNotNull(prog);
progress = prog;
return getThisBuilder();
} | 3.26 |
hadoop_FSDataOutputStreamBuilder_overwrite_rdh | /**
* Set to true to overwrite the existing file.
* Set it to false, an exception will be thrown when calling {@link #build()}
* if the file exists.
*
* @param overwrite
* overrite.
* @return Generics Type B.
*/
public B overwrite(boolean overwrite) {
if (overwrite) {
flags.add(CreateFlag.OVERWRITE);
} else {
flags.remove(CreateFlag.OVERWRITE);
}
return getThisBuilder();} | 3.26 |
hadoop_FSDataOutputStreamBuilder_checksumOpt_rdh | /**
* Set checksum opt.
*
* @param chksumOpt
* check sum opt.
* @return Generics Type B.
*/
public B checksumOpt(@Nonnull
final ChecksumOpt chksumOpt) {
checkNotNull(chksumOpt);
checksumOpt = chksumOpt;
return getThisBuilder();
} | 3.26 |
hadoop_AppCatalog_addRestResourceClasses_rdh | /**
* Add your own resources here.
*/
private void addRestResourceClasses(final Set<Class<?>> resources) {
resources.add(AppDetailsController.class);
} | 3.26 |
hadoop_HdfsLocatedFileStatus_getStoragePolicy_rdh | /**
*
* @return the storage policy id
*/
@Override
public byte getStoragePolicy() {
return storagePolicy;
} | 3.26 |
hadoop_HdfsLocatedFileStatus_makeQualifiedLocated_rdh | /**
* This function is used to transform the underlying HDFS LocatedBlocks to
* BlockLocations. This method must be invoked before
* {@link #getBlockLocations()}.
*
* The returned BlockLocation will have different formats for replicated
* and erasure coded file.
* Please refer to
* {@link org.apache.hadoop.fs.FileSystem#getFileBlockLocations
* (FileStatus, long, long)}
* for examples.
*/
public LocatedFileStatus makeQualifiedLocated(URI defaultUri, Path path) {
makeQualified(defaultUri, path);
setBlockLocations(DFSUtilClient.locatedBlocks2Locations(getLocatedBlocks()));
return this;
} | 3.26 |
hadoop_HdfsLocatedFileStatus_getLocalNameInBytes_rdh | /**
* Get the Java UTF8 representation of the local name.
*
* @return the local name in java UTF8
*/
@Override
public byte[] getLocalNameInBytes() {
return uPath;
} | 3.26 |
hadoop_HdfsLocatedFileStatus_getErasureCodingPolicy_rdh | /**
* Get the erasure coding policy if it's set.
*
* @return the erasure coding policy
*/
@Override
public ErasureCodingPolicy getErasureCodingPolicy() {
return ecPolicy;
} | 3.26 |
hadoop_HdfsLocatedFileStatus_setGroup_rdh | // visibility
@Override
public void setGroup(String group) {
super.setOwner(group);
} | 3.26 |
hadoop_HdfsLocatedFileStatus_getLocatedBlocks_rdh | /**
* Get block locations for this entity, in HDFS format.
* See {@link #makeQualifiedLocated(URI, Path)}.
* See {@link DFSUtilClient#locatedBlocks2Locations(LocatedBlocks)}.
*
* @return block locations
*/
public LocatedBlocks getLocatedBlocks() {
return hdfsloc;
} | 3.26 |
hadoop_HdfsLocatedFileStatus_setPermission_rdh | // visibility
@Override
public void setPermission(FsPermission permission) {
super.setPermission(permission);
} | 3.26 |
hadoop_HdfsLocatedFileStatus_m0_rdh | /**
* Opaque referant for the symlink, to be resolved at the client.
*/
@Override
public byte[] m0() {
return uSymlink;
} | 3.26 |
hadoop_HdfsLocatedFileStatus_setOwner_rdh | // visibility
@Override
public void setOwner(String owner) {
super.setOwner(owner);
} | 3.26 |
hadoop_RequestHedgingRMFailoverProxyProvider_invoke_rdh | /**
* Creates a Executor and invokes all proxies concurrently.
*/
@Override
public Object invoke(Object
proxy, final Method method, final Object[] args) throws Throwable {
if (successfulProxy != null) {
return invokeMethod(nonRetriableProxy.get(successfulProxy), method, args);
}
LOG.info(("Looking for the active RM in " + Arrays.toString(rmServiceIds)) + "..."); ExecutorService executor = null;
CompletionService<Object> completionService;try {
Map<Future<Object>, ProxyInfo<T>> proxyMap = new HashMap<>();
executor = HadoopExecutors.newFixedThreadPool(allProxies.size());
completionService
= new ExecutorCompletionService<>(executor);
for (final ProxyInfo<T> pInfo : allProxies.values()) {
Callable<Object> c
= new Callable<Object>() {
@Override
public Object call() throws Exception {return method.invoke(pInfo.proxy, args);
}
};
proxyMap.put(completionService.submit(c), pInfo);
}
Future<Object> callResultFuture = completionService.take();
String pInfo = proxyMap.get(callResultFuture).proxyInfo;
successfulProxy = pInfo;
Object retVal;
try {
retVal = callResultFuture.get();
LOG.info(("Found active RM [" + pInfo)
+ "]");
return retVal;
} catch (Exception ex) {
// Throw exception from first responding RM so that clients can handle
// appropriately
Throwable rootCause = extraRootException(ex);
LOG.warn((((("Invocation returned exception: " + rootCause.toString()) + " on ") + "[") + pInfo) + "], so propagating back to caller.");
throw rootCause;
}
} finally {
if (executor != null) {
executor.shutdownNow();
}
}
} | 3.26 |
hadoop_EvaluatingStatisticsMap_entrySet_rdh | /**
* Creating the entry set forces an evaluation of the functions.
*
* This is not a snapshot, so if the evaluators actually return
* references to mutable objects (e.g. a MeanStatistic instance)
* then that value may still change.
*
* The evaluation may be parallelized.
*
* @return an evaluated set of values
*/
@Overridepublic synchronized Set<Entry<String, E>> entrySet() {
Set<Entry<String, Function<String, E>>> evalEntries = evaluators.entrySet();
Set<Entry<String, E>> r = evalEntries.parallelStream().map(e -> new EntryImpl<>(e.getKey(), e.getValue().apply(e.getKey()))).collect(Collectors.toSet());
return r;
} | 3.26 |
hadoop_EvaluatingStatisticsMap_values_rdh | /**
* Evaluate all the entries and provide a list of the results.
*
* This is not a snapshot, so if the evaluators actually return
* references to mutable objects (e.g. a MeanStatistic instance)
* then that value may still change.
*
* @return the current list of evaluated results.
*/@Override
public Collection<E> values() {
Set<Entry<String, Function<String, E>>> evalEntries = evaluators.entrySet();
return evalEntries.parallelStream().map(e -> e.getValue().apply(e.getKey())).collect(Collectors.toList());
} | 3.26 |
hadoop_EvaluatingStatisticsMap_addFunction_rdh | /**
* add a mapping of a key to a function.
*
* @param key
* the key
* @param eval
* the evaluator
*/
void addFunction(String key, Function<String, E> eval) {
evaluators.put(key, eval);
} | 3.26 |
hadoop_WasbFsck_setMockFileSystemForTesting_rdh | /**
* For testing purposes, set the file system to use here instead of relying on
* getting it from the FileSystem class based on the URI.
*
* @param fileSystem
* The file system to use.
*/
@VisibleForTesting
public void setMockFileSystemForTesting(FileSystem fileSystem) {
this.mockFileSystemForTesting = fileSystem;
} | 3.26 |
hadoop_WasbFsck_recursiveCheckChildPathName_rdh | /**
* Recursively check if a given path and its child paths have colons in their
* names. It returns true if none of them has a colon or this path does not
* exist, and false otherwise.
*/
private boolean recursiveCheckChildPathName(FileSystem fs, Path p) throws IOException {
if (p
== null) {
return true;}
FileStatus status;
try {
status = fs.getFileStatus(p);
} catch (FileNotFoundException e) {
System.out.println(("Path " + p) + " does not exist!");
return true;
}
if
(status.isFile()) {
if (containsColon(p)) {
System.out.println(("Warning: file " + p) + " has a colon in its name.");
return false;
} else {return true;
}
} else {
boolean flag;
if (containsColon(p)) {
System.out.println(("Warning: directory " + p) + " has a colon in its name.");
flag = false;
} else {
flag = true;
}
FileStatus[] listed = fs.listStatus(p);
for (FileStatus
l : listed) {
if (!recursiveCheckChildPathName(fs, l.getPath())) {
flag = false;
}
}
return flag;
}
} | 3.26 |
hadoop_EntryStatus_toEntryStatus_rdh | /**
* Go from the result of a getFileStatus call or
* listing entry to a status.
* A null argument is mapped to {@link #not_found}
*
* @param st
* file status
* @return the status enum.
*/
public static EntryStatus toEntryStatus(@Nullable
FileStatus st) {
if (st
== null) {
return not_found;
}
if (st.isDirectory()) {
return dir;}if (st.isFile())
{
return file;
}
return unknown;
} | 3.26 |
hadoop_ExecutorServiceFuturePool_executeRunnable_rdh | /**
*
* @param r
* runnable to run in future on executor pool
* @return future
* @throws java.util.concurrent.RejectedExecutionException
* can be thrown
* @throws NullPointerException
* if r param is null
*/
@SuppressWarnings("unchecked")
public Future<Void> executeRunnable(final Runnable r) {
return ((Future<Void>) (executor.submit(r::run)));
} | 3.26 |
hadoop_ExecutorServiceFuturePool_shutdown_rdh | /**
* Utility to shutdown the {@link ExecutorService} used by this class. Will wait up to a
* certain timeout for the ExecutorService to gracefully shutdown.
*
* @param logger
* Logger
* @param timeout
* the maximum time to wait
* @param unit
* the time unit of the timeout argument
*/
public void shutdown(Logger logger, long timeout, TimeUnit unit) {
HadoopExecutors.shutdown(executor, logger, timeout, unit);
} | 3.26 |
hadoop_ExecutorServiceFuturePool_executeFunction_rdh | /**
*
* @param f
* function to run in future on executor pool
* @return future
* @throws java.util.concurrent.RejectedExecutionException
* can be thrown
* @throws NullPointerException
* if f param is null
*/
public Future<Void> executeFunction(final Supplier<Void> f) {
return executor.submit(f::get);
} | 3.26 |
hadoop_DefaultOBSClientFactory_initConnectionSettings_rdh | /**
* Initializes all OBS SDK settings related to connection management.
*
* @param conf
* Hadoop configuration
* @param obsConf
* OBS SDK configuration
*/
@SuppressWarnings("deprecation")
private static void initConnectionSettings(final Configuration conf, final ExtObsConfiguration obsConf) {
obsConf.setMaxConnections(OBSCommonUtils.intOption(conf, OBSConstants.MAXIMUM_CONNECTIONS, OBSConstants.DEFAULT_MAXIMUM_CONNECTIONS, 1));
boolean secureConnections = conf.getBoolean(OBSConstants.SECURE_CONNECTIONS, OBSConstants.DEFAULT_SECURE_CONNECTIONS);
obsConf.setHttpsOnly(secureConnections);
obsConf.setMaxErrorRetry(OBSCommonUtils.intOption(conf, OBSConstants.MAX_ERROR_RETRIES, OBSConstants.DEFAULT_MAX_ERROR_RETRIES, 0));
obsConf.setConnectionTimeout(OBSCommonUtils.intOption(conf, OBSConstants.ESTABLISH_TIMEOUT, OBSConstants.DEFAULT_ESTABLISH_TIMEOUT, 0));
obsConf.setSocketTimeout(OBSCommonUtils.intOption(conf, OBSConstants.SOCKET_TIMEOUT, OBSConstants.DEFAULT_SOCKET_TIMEOUT, 0));
obsConf.setIdleConnectionTime(OBSCommonUtils.intOption(conf, OBSConstants.IDLE_CONNECTION_TIME, OBSConstants.DEFAULT_IDLE_CONNECTION_TIME, 1));
obsConf.setMaxIdleConnections(OBSCommonUtils.intOption(conf, OBSConstants.MAX_IDLE_CONNECTIONS, OBSConstants.DEFAULT_MAX_IDLE_CONNECTIONS, 1));
obsConf.setReadBufferSize(OBSCommonUtils.intOption(conf, OBSConstants.READ_BUFFER_SIZE, OBSConstants.DEFAULT_READ_BUFFER_SIZE, -1));// to be
// modified
obsConf.setWriteBufferSize(OBSCommonUtils.intOption(conf, OBSConstants.WRITE_BUFFER_SIZE, OBSConstants.DEFAULT_WRITE_BUFFER_SIZE, -1));// to be
// modified
obsConf.setUploadStreamRetryBufferSize(OBSCommonUtils.intOption(conf, OBSConstants.UPLOAD_STREAM_RETRY_SIZE, OBSConstants.DEFAULT_UPLOAD_STREAM_RETRY_SIZE, 1));
obsConf.setSocketReadBufferSize(OBSCommonUtils.intOption(conf, OBSConstants.SOCKET_RECV_BUFFER, OBSConstants.DEFAULT_SOCKET_RECV_BUFFER, -1));
obsConf.setSocketWriteBufferSize(OBSCommonUtils.intOption(conf, OBSConstants.SOCKET_SEND_BUFFER, OBSConstants.DEFAULT_SOCKET_SEND_BUFFER, -1));
obsConf.setKeepAlive(conf.getBoolean(OBSConstants.KEEP_ALIVE, OBSConstants.DEFAULT_KEEP_ALIVE));
obsConf.setValidateCertificate(conf.getBoolean(OBSConstants.VALIDATE_CERTIFICATE, OBSConstants.DEFAULT_VALIDATE_CERTIFICATE));
obsConf.setVerifyResponseContentType(conf.getBoolean(OBSConstants.VERIFY_RESPONSE_CONTENT_TYPE, OBSConstants.DEFAULT_VERIFY_RESPONSE_CONTENT_TYPE));
obsConf.setCname(conf.getBoolean(OBSConstants.CNAME, OBSConstants.DEFAULT_CNAME));
obsConf.setIsStrictHostnameVerification(conf.getBoolean(OBSConstants.STRICT_HOSTNAME_VERIFICATION, OBSConstants.DEFAULT_STRICT_HOSTNAME_VERIFICATION));
// sdk auth type negotiation enable
obsConf.setAuthTypeNegotiation(conf.getBoolean(OBSConstants.SDK_AUTH_TYPE_NEGOTIATION_ENABLE, OBSConstants.DEFAULT_SDK_AUTH_TYPE_NEGOTIATION_ENABLE));
// set SDK AUTH TYPE to OBS when auth type negotiation unenabled
if (!obsConf.isAuthTypeNegotiation()) {
obsConf.setAuthType(AuthTypeEnum.OBS);
}
// okhttp retryOnConnectionFailure switch, default set to true
obsConf.retryOnConnectionFailureInOkhttp(conf.getBoolean(OBSConstants.SDK_RETRY_ON_CONNECTION_FAILURE_ENABLE, OBSConstants.DEFAULT_SDK_RETRY_ON_CONNECTION_FAILURE_ENABLE));
// sdk max retry times on unexpected end of stream exception,
// default: -1 don't retry
int retryTime = conf.getInt(OBSConstants.SDK_RETRY_TIMES_ON_UNEXPECTED_END_EXCEPTION, OBSConstants.DEFAULT_SDK_RETRY_TIMES_ON_UNEXPECTED_END_EXCEPTION);
if (((retryTime > 0) && (retryTime < OBSConstants.DEFAULT_MAX_SDK_CONNECTION_RETRY_TIMES)) || ((!obsConf.isRetryOnConnectionFailureInOkhttp())
&& (retryTime < 0))) {
retryTime = OBSConstants.DEFAULT_MAX_SDK_CONNECTION_RETRY_TIMES;
}
obsConf.setMaxRetryOnUnexpectedEndException(retryTime);
} | 3.26 |
hadoop_DefaultOBSClientFactory_createHuaweiObsClient_rdh | /**
* Creates an {@link ObsClient} from the established configuration.
*
* @param conf
* Hadoop configuration
* @param obsConf
* ObsConfiguration
* @param name
* URL
* @return ObsClient client
* @throws IOException
* on any failure to create Huawei OBS client
*/
private static ObsClient createHuaweiObsClient(final Configuration conf, final ObsConfiguration obsConf, final URI name) throws IOException {
Class<?> credentialsProviderClass;
BasicSessionCredential credentialsProvider;
ObsClient obsClient;try {
credentialsProviderClass =
conf.getClass(OBSConstants.OBS_CREDENTIALS_PROVIDER, null);
} catch (RuntimeException e) {
Throwable c = (e.getCause() != null) ? e.getCause() : e;
throw new IOException((("From option " + OBSConstants.OBS_CREDENTIALS_PROVIDER) + ' ') + c, c);
}
if (credentialsProviderClass == null) {
return createObsClientWithoutCredentialsProvider(conf, obsConf, name);
}
try {
Constructor<?> cons = credentialsProviderClass.getDeclaredConstructor(URI.class, Configuration.class);
credentialsProvider = ((BasicSessionCredential) (cons.newInstance(name, conf)));
} catch (NoSuchMethodException | SecurityException | IllegalAccessException | InstantiationException | InvocationTargetException e) {
Throwable c = (e.getCause() != null) ? e.getCause() : e;
throw new IOException((("From option " + OBSConstants.OBS_CREDENTIALS_PROVIDER) + ' ') + c, c);
}String v14 = credentialsProvider.getSessionToken();
String ak = credentialsProvider.getOBSAccessKeyId();
String sk = credentialsProvider.getOBSSecretKey();String endPoint = conf.getTrimmed(OBSConstants.ENDPOINT, "");
obsConf.setEndPoint(endPoint);
if ((v14 != null) && (v14.length() != 0)) {
obsClient = new ObsClient(ak, sk, v14, obsConf);
} else {
obsClient = new ObsClient(ak, sk, obsConf);
}
return obsClient;
} | 3.26 |
hadoop_DefaultOBSClientFactory_initProxySupport_rdh | /**
* Initializes OBS SDK proxy support if configured.
*
* @param conf
* Hadoop configuration
* @param obsConf
* OBS SDK configuration
* @throws IllegalArgumentException
* if misconfigured
* @throws IOException
* on any failure to initialize proxy
*/
private static void initProxySupport(final Configuration conf, final ExtObsConfiguration obsConf) throws IllegalArgumentException, IOException {
String proxyHost = conf.getTrimmed(OBSConstants.PROXY_HOST, "");
int proxyPort = conf.getInt(OBSConstants.PROXY_PORT, -1);
if ((!proxyHost.isEmpty()) && (proxyPort < 0)) {
if (conf.getBoolean(OBSConstants.SECURE_CONNECTIONS, OBSConstants.DEFAULT_SECURE_CONNECTIONS)) {
LOG.warn("Proxy host set without port. Using HTTPS default " +
OBSConstants.DEFAULT_HTTPS_PORT);
obsConf.getHttpProxy().setProxyPort(OBSConstants.DEFAULT_HTTPS_PORT);
} else {
LOG.warn("Proxy host set without port. Using HTTP default " + OBSConstants.DEFAULT_HTTP_PORT);
obsConf.getHttpProxy().setProxyPort(OBSConstants.DEFAULT_HTTP_PORT);
}
}
String proxyUsername = conf.getTrimmed(OBSConstants.PROXY_USERNAME);
String proxyPassword = null;
char[] proxyPass
= conf.getPassword(OBSConstants.PROXY_PASSWORD);
if (proxyPass != null) {
proxyPassword = new String(proxyPass).trim();
}
if ((proxyUsername == null) != (proxyPassword == null)) {
String msg = ((("Proxy error: " + OBSConstants.PROXY_USERNAME) + " or ") + OBSConstants.PROXY_PASSWORD) + " set without the other.";
LOG.error(msg);
throw new IllegalArgumentException(msg);
}
obsConf.setHttpProxy(proxyHost, proxyPort,
proxyUsername, proxyPassword);
if (LOG.isDebugEnabled()) {
LOG.debug("Using proxy server {}:{} as user {} on "
+ "domain {} as workstation {}", obsConf.getHttpProxy().getProxyAddr(), obsConf.getHttpProxy().getProxyPort(), obsConf.getHttpProxy().getProxyUName(), obsConf.getHttpProxy().getDomain(), obsConf.getHttpProxy().getWorkstation());
}
} | 3.26 |
hadoop_ReconfigurableBase_startReconfigurationTask_rdh | /**
* Start a reconfiguration task to reload configuration in background.
*
* @throws IOException
* raised on errors performing I/O.
*/
public void startReconfigurationTask() throws IOException {
synchronized(reconfigLock) {
if (!shouldRun) {
String errorMessage = "The server is stopped.";
LOG.warn(errorMessage);throw new IOException(errorMessage);
}
if (reconfigThread != null) {
String errorMessage = "Another reconfiguration task is running.";
LOG.warn(errorMessage);
throw new IOException(errorMessage);
}
reconfigThread = new
ReconfigurationThread(this);
reconfigThread.setDaemon(true);
reconfigThread.setName("Reconfiguration Task");
reconfigThread.start();
startTime = Time.now();
}
} | 3.26 |
hadoop_ReconfigurableBase_isPropertyReconfigurable_rdh | /**
* {@inheritDoc }
*
* Subclasses may wish to override this with a more efficient implementation.
*/
@Override
public boolean isPropertyReconfigurable(String property) {
return getReconfigurableProperties().contains(property);
} | 3.26 |
hadoop_ReconfigurableBase_run_rdh | // See {@link ReconfigurationServlet#applyChanges}
public void run() {
LOG.info("Starting reconfiguration task.");
final Configuration v0 = parent.getConf();
final Configuration newConf = parent.getNewConf();
final Collection<PropertyChange> changes = parent.getChangedProperties(newConf, v0);
Map<PropertyChange, Optional<String>> results = Maps.newHashMap();
ConfigRedactor oldRedactor = new ConfigRedactor(v0);
ConfigRedactor newRedactor = new ConfigRedactor(newConf);for (PropertyChange v6 : changes) {
String errorMessage = null;
String oldValRedacted = oldRedactor.redact(v6.prop, v6.oldVal);
String newValRedacted = newRedactor.redact(v6.prop, v6.newVal);
if (!parent.isPropertyReconfigurable(v6.prop)) {
LOG.info(String.format("Property %s is not configurable: old value: %s, new value: %s", v6.prop, oldValRedacted, newValRedacted));
continue;
}
LOG.info(((((("Change property: " + v6.prop) + " from \"") + (v6.oldVal == null ? "<default>" : oldValRedacted)) + "\" to \"") + (v6.newVal == null ? "<default>" : newValRedacted)) + "\".");
try {
String effectiveValue = parent.reconfigurePropertyImpl(v6.prop, v6.newVal);
if (v6.newVal != null) {
v0.set(v6.prop, effectiveValue);
} else {
v0.unset(v6.prop);
}
} catch (ReconfigurationException e) {
Throwable cause = e.getCause(); errorMessage = (cause == null) ? e.getMessage() : cause.getMessage();
}
results.put(v6, Optional.ofNullable(errorMessage));
}
synchronized(parent.reconfigLock) {
parent.endTime = Time.now();
parent.status = Collections.unmodifiableMap(results);
parent.reconfigThread = null;
}
} | 3.26 |
hadoop_EditLogOutputStream_getNumSync_rdh | /**
* Return number of calls to {@link #flushAndSync(boolean)}
*/
protected long getNumSync() {
return numSync;} | 3.26 |
hadoop_EditLogOutputStream_flush_rdh | /**
* Flush data to persistent store.
* Collect sync metrics.
*/
public void flush() throws IOException {
flush(true);
} | 3.26 |
hadoop_EditLogOutputStream_shouldForceSync_rdh | /**
* Implement the policy when to automatically sync the buffered edits log
* The buffered edits can be flushed when the buffer becomes full or
* a certain period of time is elapsed.
*
* @return true if the buffered data should be automatically synced to disk
*/
public boolean shouldForceSync() {
return false;
} | 3.26 |
hadoop_EditLogOutputStream_getTotalSyncTime_rdh | /**
* Return total time spent in {@link #flushAndSync(boolean)}
*/
long getTotalSyncTime() {
return f0;
} | 3.26 |
hadoop_EditLogOutputStream_getLastJournalledTxId_rdh | /**
* Get the last txId journalled in the stream.
* The txId is recorded when FSEditLogOp is written to the stream.
* The default implementation is dummy.
* JournalSet tracks the txId uniformly for all underlying streams.
*/
public long getLastJournalledTxId() {
return HdfsServerConstants.INVALID_TXID;
} | 3.26 |
hadoop_EditLogOutputStream_setCurrentLogVersion_rdh | /**
*
* @param logVersion
* The version of the current edit log
*/
public void setCurrentLogVersion(int logVersion) {
this.currentLogVersion = logVersion;
} | 3.26 |
hadoop_EditLogOutputStream_getCurrentLogVersion_rdh | /**
*
* @return The version of the current edit log
*/
public int getCurrentLogVersion() {
return currentLogVersion;
} | 3.26 |
hadoop_MultiObjectDeleteException_translateException_rdh | /**
* A {@code MultiObjectDeleteException} is raised if one or more
* paths listed in a bulk DELETE operation failed.
* The top-level exception is therefore just "something wasn't deleted",
* but doesn't include the what or the why.
* This translation will extract an AccessDeniedException if that's one of
* the causes, otherwise grabs the status code and uses it in the
* returned exception.
*
* @param message
* text for the exception
* @return an IOE with more detail.
*/
public IOException translateException(final String message) {
LOG.info("Bulk delete operation failed to delete all objects;" +
" failure count = {}", errors().size());
final StringBuilder result = new StringBuilder(errors().size() * 256);
result.append(message).append(": ");
String exitCode = "";
for (S3Error error : errors()) {
String code = error.code();
String item = String.format("%s: %s%s: %s%n", code, error.key(), error.versionId() != null
? (" (" + error.versionId()) + ")" : "", error.message());
LOG.info(item);
result.append(item);
if (((exitCode == null) || exitCode.isEmpty()) || ACCESS_DENIED.equals(code)) {
exitCode = code;
}
}
if (ACCESS_DENIED.equals(exitCode)) {
return ((IOException) (new AccessDeniedException(result.toString()).initCause(this)));
} else {
return new AWSS3IOException(result.toString(), this);
}
} | 3.26 |
hadoop_FileDeletionTask_getBaseDirs_rdh | /**
* Get the base directories containing the subdirectory.
*
* @return the base directories for the FileDeletionTask.
*/
public List<Path> getBaseDirs() {
return this.baseDirs;
} | 3.26 |
hadoop_FileDeletionTask_getSubDir_rdh | /**
* Get the subdirectory to delete.
*
* @return the subDir for the FileDeletionTask.
*/
public Path getSubDir() {
return this.subDir;
} | 3.26 |
hadoop_FileDeletionTask_run_rdh | /**
* Delete the specified file/directory as the specified user.
*/
@Override
public void run() {
LOG.debug("Running DeletionTask : {}",
this);
boolean error = false;
if (null == getUser()) {
if ((baseDirs == null) || (baseDirs.size() == 0)) {
LOG.debug("NM deleting absolute path : {}", subDir);
try {
lfs.delete(subDir, true);
} catch (IOException e) {
error = true;
LOG.warn("Failed to delete " + subDir);
}
} else {
for (Path baseDir : baseDirs) {
Path del = (subDir == null) ? baseDir : new Path(baseDir, subDir);
LOG.debug("NM deleting path : {}", del);
try {
lfs.delete(del, true);
} catch (IOException e) {
error = true;
LOG.warn("Failed to delete " + subDir);
}
}
}
} else {
try {
LOG.debug("Deleting path: [{}] as user [{}]", subDir, getUser());
if ((baseDirs == null) ||
(baseDirs.size() == 0)) {
getDeletionService().getContainerExecutor().deleteAsUser(new DeletionAsUserContext.Builder().setUser(getUser()).setSubDir(subDir).build());
} else {
getDeletionService().getContainerExecutor().deleteAsUser(new DeletionAsUserContext.Builder().setUser(getUser()).setSubDir(subDir).setBasedirs(baseDirs.toArray(new Path[0])).build());
}
} catch (IOException | InterruptedException e) {
error = true;
LOG.warn("Failed to delete as user " + getUser(), e);
}
}
if (error) {
setSuccess(!error);
}
deletionTaskFinished();
} | 3.26 |
hadoop_FileDeletionTask_convertDeletionTaskToProto_rdh | /**
* Convert the FileDeletionTask to the Protobuf representation for storing
* in the state store and recovery.
*
* @return the protobuf representation of the FileDeletionTask.
*/
public DeletionServiceDeleteTaskProto convertDeletionTaskToProto() {
DeletionServiceDeleteTaskProto.Builder builder = getBaseDeletionTaskProtoBuilder();
builder.setTaskType(DeletionTaskType.FILE.name());
if (getSubDir() != null) {
builder.setSubdir(getSubDir().toString());
}
if (getBaseDirs() != null) {
for (Path dir : getBaseDirs()) {
builder.addBasedirs(dir.toString());
}}
return builder.build();
} | 3.26 |
hadoop_FileDeletionTask_toString_rdh | /**
* Convert the FileDeletionTask to a String representation.
*
* @return String representation of the FileDeletionTask.
*/
@Override
public String toString() {
StringBuilder sb = new StringBuilder("FileDeletionTask :");
sb.append(" id : ").append(getTaskId());
sb.append(" user : ").append(getUser());sb.append(" subDir : ").append(subDir == null ? "null"
: subDir.toString());
sb.append(" baseDir : ");
if ((baseDirs == null) || (baseDirs.size() == 0)) {
sb.append("null");
} else {
for (Path baseDir : baseDirs) {
sb.append(baseDir.toString()).append(',');
}
}
return sb.toString().trim();
} | 3.26 |
hadoop_SubApplicationColumnPrefix_getColumnPrefix_rdh | /**
*
* @return the column name value
*/
public String getColumnPrefix() {
return columnPrefix;
} | 3.26 |
hadoop_RMDelegatedNodeLabelsUpdater_updateNodeLabels_rdh | /**
* Update node labels for a specified node.
*
* @param node
* the node to update node labels
*/
public void updateNodeLabels(NodeId node) {synchronized(lock) {
newlyRegisteredNodes.add(node);
}
} | 3.26 |
hadoop_RMDelegatedNodeLabelsUpdater_serviceStop_rdh | /**
* Terminate the timer.
*
* @throws Exception
* exception occurs.
*/
@Override
protected void serviceStop() throws Exception {
if (nodeLabelsScheduler != null) {
nodeLabelsScheduler.cancel();
}
super.serviceStop();
} | 3.26 |
hadoop_RMDelegatedNodeLabelsUpdater_createRMNodeLabelsMappingProvider_rdh | /**
* Get the RMNodeLabelsMappingProvider which is used to provide node labels.
*/
private RMNodeLabelsMappingProvider createRMNodeLabelsMappingProvider(Configuration conf) throws IOException {
RMNodeLabelsMappingProvider nodeLabelsMappingProvider = null;
try {
Class<? extends RMNodeLabelsMappingProvider> labelsProviderClass = conf.getClass(YarnConfiguration.RM_NODE_LABELS_PROVIDER_CONFIG, null, RMNodeLabelsMappingProvider.class);
if (labelsProviderClass
!= null) {
nodeLabelsMappingProvider = labelsProviderClass.newInstance();
}
} catch (InstantiationException | IllegalAccessException | RuntimeException e) {
LOG.error("Failed to create RMNodeLabelsMappingProvider based on" + " Configuration", e);
throw new IOException("Failed to create RMNodeLabelsMappingProvider : " + e.getMessage(), e);
}
if (nodeLabelsMappingProvider == null) {
String msg = "RMNodeLabelsMappingProvider should be configured when "
+ "delegated-centralized node label configuration is enabled";
LOG.error(msg);
throw new
IOException(msg);
} else {
LOG.debug("RM Node labels mapping provider class is : {}", nodeLabelsMappingProvider.getClass());}
return nodeLabelsMappingProvider; } | 3.26 |
hadoop_TypedBytesInput_read_rdh | /**
* Reads a typed bytes sequence and converts it to a Java object. The first
* byte is interpreted as a type code, and then the right number of
* subsequent bytes are read depending on the obtained type.
*
* @return the obtained object or null when the end of the file is reached
* @throws IOException
*/
public Object read() throws IOException {
int code = 1;
try {code = in.readUnsignedByte();
} catch (EOFException eof) {
return null;
}
if (code == BYTES.code) {
return new Buffer(readBytes());
} else if (code == BYTE.code) {
return readByte();
} else if (code == BOOL.code) {
return readBool();
} else if (code == INT.code) {
return m1();
} else if (code == LONG.code) {
return m2();
} else if (code == FLOAT.code) {
return readFloat();
} else if (code == DOUBLE.code) {
return
readDouble();
} else if (code == STRING.code) {
return readString();
} else if (code == VECTOR.code) {return m3();
} else if (code == LIST.code) {
return readList();
} else if (code == MAP.code) {
return readMap();
} else if (code == MARKER.code)
{return null;
} else if ((50 <= code) && (code <= 200)) {
// application-specific typecodes
return new Buffer(readBytes());
} else {
throw new RuntimeException("unknown type");
}
} | 3.26 |
hadoop_TypedBytesInput_readRawList_rdh | /**
* Reads the raw bytes following a <code>Type.LIST</code> code.
*
* @return the obtained bytes sequence
* @throws IOException
*/
public byte[] readRawList() throws IOException {
Buffer buffer = new Buffer(new byte[]{ ((byte) (LIST.code)) });
byte[] bytes = readRaw();
while (bytes != null) {
buffer.append(bytes);
bytes = readRaw();
}
buffer.append(new byte[]{ ((byte) (MARKER.code)) });
return buffer.get();
} | 3.26 |
hadoop_TypedBytesInput_m2_rdh | /**
* Reads the long following a <code>Type.LONG</code> code.
*
* @return the obtained long
* @throws IOException
*/
public long m2() throws IOException {return in.readLong();
} | 3.26 |
hadoop_TypedBytesInput_readBool_rdh | /**
* Reads the boolean following a <code>Type.BOOL</code> code.
*
* @return the obtained boolean
* @throws IOException
*/
public boolean readBool() throws IOException {
return
in.readBoolean();
} | 3.26 |
hadoop_TypedBytesInput_readVectorHeader_rdh | /**
* Reads the header following a <code>Type.VECTOR</code> code.
*
* @return the number of elements in the vector
* @throws IOException
*/
public int readVectorHeader() throws IOException {
return in.readInt();
} | 3.26 |
hadoop_TypedBytesInput_get_rdh | /**
* Get a thread-local typed bytes input for the supplied {@link DataInput}.
*
* @param in
* data input object
* @return typed bytes input corresponding to the supplied {@link DataInput}.
*/
public static TypedBytesInput get(DataInput in) {
TypedBytesInput bin = TB_IN.get();
bin.setDataInput(in);
return bin;
} | 3.26 |
hadoop_TypedBytesInput_readRawLong_rdh | /**
* Reads the raw bytes following a <code>Type.LONG</code> code.
*
* @return the obtained bytes sequence
* @throws IOException
*/
public byte[] readRawLong() throws IOException {
byte[] bytes = new byte[9];
bytes[0] = ((byte) (LONG.code));
in.readFully(bytes, 1, 8);
return bytes;
} | 3.26 |
hadoop_TypedBytesInput_readRawInt_rdh | /**
* Reads the raw bytes following a <code>Type.INT</code> code.
*
* @return the obtained bytes sequence
* @throws IOException
*/
public byte[] readRawInt() throws IOException {
byte[] bytes = new byte[5];
bytes[0] =
((byte) (INT.code));
in.readFully(bytes, 1, 4);
return bytes;} | 3.26 |
hadoop_TypedBytesInput_readList_rdh | /**
* Reads the list following a <code>Type.LIST</code> code.
*
* @return the obtained list
* @throws IOException
*/
@SuppressWarnings("unchecked")
public List readList() throws IOException {
List list = new ArrayList();
Object v24 = read();
while (v24 != null) {
list.add(v24);
v24
= read();
}
return list;
} | 3.26 |
hadoop_TypedBytesInput_readRawVector_rdh | /**
* Reads the raw bytes following a <code>Type.VECTOR</code> code.
*
* @return the obtained bytes sequence
* @throws IOException
*/
public byte[] readRawVector() throws IOException {
Buffer buffer = new Buffer();
int length = readVectorHeader();
buffer.append(new byte[]{ ((byte) (VECTOR.code)), ((byte) (0xff & (length >> 24))), ((byte) (0xff & (length >> 16))), ((byte) (0xff & (length >> 8))), ((byte) (0xff & length)) });
for (int i = 0; i < length; i++) {
buffer.append(readRaw());
}
return buffer.get();
} | 3.26 |
hadoop_TypedBytesInput_m3_rdh | /**
* Reads the vector following a <code>Type.VECTOR</code> code.
*
* @return the obtained vector
* @throws IOException
*/
@SuppressWarnings("unchecked")
public ArrayList m3() throws
IOException {
int length = readVectorHeader();
ArrayList result = new ArrayList(length);
for (int i = 0; i < length; i++) {
result.add(read());
}
return result;
} | 3.26 |
hadoop_TypedBytesInput_readMap_rdh | /**
* Reads the map following a <code>Type.MAP</code> code.
*
* @return the obtained map
* @throws IOException
*/
@SuppressWarnings("unchecked")
public TreeMap readMap() throws IOException {
int
length = readMapHeader();
TreeMap result = new TreeMap();
for
(int i = 0; i < length; i++) {
Object key
= read();Object value = read();
result.put(key, value);
}
return result;
} | 3.26 |
hadoop_TypedBytesInput_readRawMap_rdh | /**
* Reads the raw bytes following a <code>Type.MAP</code> code.
*
* @return the obtained bytes sequence
* @throws IOException
*/
public byte[] readRawMap() throws IOException {Buffer buffer
= new Buffer();
int length = readMapHeader();
buffer.append(new byte[]{ ((byte) (MAP.code)), ((byte) (0xff & (length >> 24))), ((byte) (0xff & (length >> 16))), ((byte) (0xff & (length >> 8))), ((byte) (0xff
& length)) });
for (int i = 0; i < length; i++) {
buffer.append(readRaw());
buffer.append(readRaw());
}
return buffer.get();
} | 3.26 |
hadoop_TypedBytesInput_readBytes_rdh | /**
* Reads the bytes following a <code>Type.BYTES</code> code.
*
* @return the obtained bytes sequence
* @throws IOException
*/
public byte[] readBytes() throws IOException {
int length = in.readInt();
byte[] bytes = new byte[length];
in.readFully(bytes);
return bytes;
} | 3.26 |
hadoop_TypedBytesInput_readRawFloat_rdh | /**
* Reads the raw bytes following a <code>Type.FLOAT</code> code.
*
* @return the obtained bytes sequence
* @throws IOException
*/
public byte[] readRawFloat() throws IOException
{
byte[] bytes = new byte[5];
bytes[0] = ((byte) (FLOAT.code));
in.readFully(bytes, 1,
4);
return bytes;
} | 3.26 |
hadoop_TypedBytesInput_readDouble_rdh | /**
* Reads the double following a <code>Type.DOUBLE</code> code.
*
* @return the obtained double
* @throws IOException
*/
public double readDouble() throws IOException {
return in.readDouble();
} | 3.26 |
hadoop_TypedBytesInput_readRawByte_rdh | /**
* Reads the raw byte following a <code>Type.BYTE</code> code.
*
* @return the obtained byte
* @throws IOException
*/
public byte[] readRawByte() throws
IOException {
byte[] bytes = new byte[2];
bytes[0] = ((byte) (BYTE.code));
in.readFully(bytes, 1, 1);
return bytes;
} | 3.26 |
hadoop_TypedBytesInput_m1_rdh | /**
* Reads the integer following a <code>Type.INT</code> code.
*
* @return the obtained integer
* @throws IOException
*/
public int m1() throws IOException {
return in.readInt();
} | 3.26 |
hadoop_TypedBytesInput_readRawString_rdh | /**
* Reads the raw bytes following a <code>Type.STRING</code> code.
*
* @return the obtained bytes sequence
* @throws IOException
*/
public byte[] readRawString() throws IOException {
int length = in.readInt();
byte[] bytes = new byte[5 + length];
bytes[0] = ((byte) (STRING.code));
bytes[1] = ((byte) (0xff & (length >> 24)));
bytes[2] = ((byte) (0xff & (length >> 16)));
bytes[3] = ((byte) (0xff & (length >> 8)));
bytes[4] = ((byte) (0xff & length));
in.readFully(bytes, 5, length);
return bytes;
} | 3.26 |
hadoop_TypedBytesInput_readMapHeader_rdh | /**
* Reads the header following a <code>Type.MAP</code> code.
*
* @return the number of key-value pairs in the map
* @throws IOException
*/public int readMapHeader() throws IOException {
return in.readInt();
} | 3.26 |
hadoop_TypedBytesInput_readString_rdh | /**
* Reads the string following a <code>Type.STRING</code> code.
*
* @return the obtained string
* @throws IOException
*/
public String readString() throws IOException {
return WritableUtils.readString(in);
} | 3.26 |
hadoop_TypedBytesInput_readFloat_rdh | /**
* Reads the float following a <code>Type.FLOAT</code> code.
*
* @return the obtained float
* @throws IOException
*/
public float readFloat() throws IOException { return in.readFloat();
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.