name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hadoop_StagingCommitter_deleteStagingUploadsParentDirectory_rdh | /**
* Delete the multipart upload staging directory.
*
* @param context
* job context
* @throws IOException
* IO failure
*/
protected void deleteStagingUploadsParentDirectory(JobContext context) throws IOException {
Path stagingUploadsPath = Paths.getStagingUploadsParentDirectory(context.getConfiguration(), getUUID());
ignoreIOExceptions(LOG, "Deleting staging uploads path", stagingUploadsPath.toString(), () -> deleteWithWarning(stagingUploadsPath.getFileSystem(getConf()), stagingUploadsPath, true));
} | 3.26 |
hadoop_StagingCommitter_getPendingTaskAttemptsPath_rdh | /**
* Compute the path where the output of pending task attempts are stored.
*
* @param context
* the context of the job with pending tasks.
* @return the path where the output of pending task attempts are stored.
*/
private static Path getPendingTaskAttemptsPath(JobContext context, Path out) {
return new Path(getJobAttemptPath(context,
out), TEMPORARY);
} | 3.26 |
hadoop_StagingCommitter_getTaskOutput_rdh | /**
* Lists the output of a task under the task attempt path. Subclasses can
* override this method to change how output files are identified.
* <p>
* This implementation lists the files that are direct children of the output
* path and filters hidden files (file names starting with '.' or '_').
* <p>
* The task attempt path is provided by
* {@link #getTaskAttemptPath(TaskAttemptContext)}
*
* @param context
* this task's {@link TaskAttemptContext}
* @return the output files produced by this task in the task attempt path
* @throws IOException
* on a failure
*/
protected List<LocatedFileStatus> getTaskOutput(TaskAttemptContext context) throws IOException {
// get files on the local FS in the attempt path
Path attemptPath = requireNonNull(getTaskAttemptPath(context), "No attemptPath path");
LOG.debug("Scanning {} for files to commit", attemptPath);
return toList(listAndFilter(getTaskAttemptFilesystem(context), attemptPath, true, HIDDEN_FILE_FILTER));
} | 3.26 |
hadoop_StagingCommitter_getJobAttemptPath_rdh | /**
* Compute the path where the output of a given job attempt will be placed.
*
* @param context
* the context of the job. This is used to get the
* application attempt ID.
* @param out
* the output path to place these in.
* @return the path to store job attempt data.
*/
public static Path getJobAttemptPath(JobContext context, Path out) {
return getJobAttemptPath(getAppAttemptId(context), out);} | 3.26 |
hadoop_StagingCommitter_m0_rdh | /**
* Init the context config with everything needed for the file output
* committer. In particular, this code currently only works with
* commit algorithm 1.
*
* @param context
* context to configure.
*/
protected void m0(JobContext context) {
context.getConfiguration().setInt(FileOutputCommitter.FILEOUTPUTCOMMITTER_ALGORITHM_VERSION, 1);} | 3.26 |
hadoop_StagingCommitter_cleanup_rdh | /**
* Staging committer cleanup includes calling wrapped committer's
* cleanup method, and removing staging uploads path and all
* destination paths in the final filesystem.
*
* @param commitContext
* commit context
* @param suppressExceptions
* should exceptions be suppressed?
* @throws IOException
* IO failures if exceptions are not suppressed.
*/
@Override
@SuppressWarnings("deprecation")
protected void cleanup(CommitContext commitContext, boolean
suppressExceptions) throws IOException {
maybeIgnore(suppressExceptions, "Cleanup wrapped committer", () -> wrappedCommitter.cleanupJob(commitContext.getJobContext()));
maybeIgnore(suppressExceptions, "Delete staging uploads path", () -> deleteStagingUploadsParentDirectory(commitContext.getJobContext()));
maybeIgnore(suppressExceptions, "Delete destination paths", () -> deleteDestinationPaths(commitContext.getJobContext()));
super.cleanup(commitContext, suppressExceptions);
} | 3.26 |
hadoop_StagingCommitter_commitTaskInternal_rdh | /**
* Commit the task by uploading all created files and then
* writing a pending entry for them.
*
* @param context
* task context
* @param taskOutput
* list of files from the output
* @param commitContext
* commit context
* @return number of uploads committed.
* @throws IOException
* IO Failures.
*/
protected int commitTaskInternal(final TaskAttemptContext context, List<? extends FileStatus> taskOutput, CommitContext commitContext) throws IOException {
LOG.debug("{}: commitTaskInternal", getRole());
Configuration conf = context.getConfiguration();
final Path attemptPath = getTaskAttemptPath(context);
FileSystem attemptFS = getTaskAttemptFilesystem(context);
LOG.debug("{}: attempt path is {}", getRole(), attemptPath);
// add the commits file to the wrapped committer's task attempt location.
// of this method.
Path commitsAttemptPath = wrappedCommitter.getTaskAttemptPath(context);
FileSystem commitsFS = commitsAttemptPath.getFileSystem(conf);
// keep track of unfinished commits in case one fails. if something fails,
// we will try to abort the ones that had already succeeded.
int commitCount = taskOutput.size();
final Queue<SinglePendingCommit> commits = new ConcurrentLinkedQueue<>();
LOG.info("{}: uploading from staging directory to S3 {}", getRole(), attemptPath);
LOG.info("{}: Saving pending data information to {}", getRole(), commitsAttemptPath);
if (taskOutput.isEmpty()) {
// there is nothing to write. needsTaskCommit() should have caught
// this, so warn that there is some kind of problem in the protocol.
LOG.warn("{}: No files to commit", getRole());
} else {
boolean threw = true;
// before the uploads, report some progress
context.progress();
PendingSet pendingCommits = new PendingSet(commitCount);
pendingCommits.putExtraData(TASK_ATTEMPT_ID, context.getTaskAttemptID().toString());
try {
TaskPool.foreach(taskOutput).stopOnFailure().suppressExceptions(false).executeWith(commitContext.getOuterSubmitter()).run(stat -> {
Path path = stat.getPath();
File localFile = new File(path.toUri().getPath());
String relative = Paths.getRelativePath(attemptPath, path);
String partition = Paths.getPartition(relative);
String key = getFinalKey(relative, context);
Path destPath = getDestS3AFS().keyToQualifiedPath(key);
SinglePendingCommit commit = getCommitOperations().uploadFileToPendingCommit(localFile, destPath, partition, uploadPartSize, context);
LOG.debug("{}: adding pending commit {}", getRole(), commit);
commits.add(commit);
});
for (SinglePendingCommit commit : commits) {
pendingCommits.add(commit);
}
// maybe add in the IOStatistics the thread
if (commitContext.isCollectIOStatistics()) {pendingCommits.getIOStatistics().aggregate(commitContext.getIOStatisticsContext().getIOStatistics());
}
// save the data
// overwrite any existing file, so whichever task attempt
// committed last wins.
LOG.debug("Saving {} pending commit(s)) to file {}", pendingCommits.size(), commitsAttemptPath);
pendingCommits.save(commitsFS, commitsAttemptPath, commitContext.getPendingSetSerializer());
threw = false;
} finally {
if (threw) {
LOG.error("{}: Exception during commit process, aborting {} commit(s)", getRole(), commits.size());
try (DurationInfo ignored = new DurationInfo(LOG, "Aborting %s uploads", commits.size())) {
TaskPool.foreach(commits).suppressExceptions().executeWith(commitContext.getOuterSubmitter()).run(commitContext::abortSingleCommit);
}
deleteTaskAttemptPathQuietly(context);
}
}
// always purge attempt information at this point.
Paths.clearTempFolderInfo(context.getTaskAttemptID());
}
LOG.debug("Committing wrapped task");
wrappedCommitter.commitTask(context);LOG.debug("Cleaning up attempt dir {}", attemptPath);
attemptFS.delete(attemptPath, true);
return commits.size();
} | 3.26 |
hadoop_StagingCommitter_getJobAttemptFileSystem_rdh | /**
* Get the filesystem for the job attempt.
*
* @param context
* the context of the job. This is used to get the
* application attempt ID.
* @return the FS to store job attempt data.
* @throws IOException
* failure to create the FS.
*/
public FileSystem getJobAttemptFileSystem(JobContext context) throws IOException {
Path p = getJobAttemptPath(context);
return p.getFileSystem(context.getConfiguration());
} | 3.26 |
hadoop_StagingCommitter_getFinalKey_rdh | /**
* Returns the final S3 key for a relative path. Subclasses can override this
* method to upload files to a different S3 location.
* <p>
* This implementation concatenates the relative path with the key prefix
* from the output path.
* If {@link CommitConstants#FS_S3A_COMMITTER_STAGING_UNIQUE_FILENAMES} is
* set, then the task UUID is also included in the calculation
*
* @param relative
* the path of a file relative to the task attempt path
* @param context
* the JobContext or TaskAttemptContext for this job
* @return the S3 key where the file will be uploaded
*/
protected String getFinalKey(String relative, JobContext context) {
if (uniqueFilenames) {
return (getS3KeyPrefix(context) + "/") + Paths.addUUID(relative, getUUID());} else {
return (getS3KeyPrefix(context) + "/") + relative;
}
} | 3.26 |
hadoop_StagingCommitter_useUniqueFilenames_rdh | /**
* Is this committer using unique filenames?
*
* @return true if unique filenames are used.
*/
public Boolean useUniqueFilenames()
{
return uniqueFilenames;
} | 3.26 |
hadoop_StagingCommitter_getS3KeyPrefix_rdh | /**
* Get the key of the destination "directory" of the job/task.
*
* @param context
* job context
* @return key to write to
*/
private String getS3KeyPrefix(JobContext context) {
return s3KeyPrefix;
} | 3.26 |
hadoop_StagingCommitter_m1_rdh | /**
* Validate the task attempt context; makes sure
* that the task attempt ID data is valid.
*
* @param context
* task context
*/
private static void m1(TaskAttemptContext context) {
requireNonNull(context, "null context");
requireNonNull(context.getTaskAttemptID(), "null task attempt ID");
requireNonNull(context.getTaskAttemptID().getTaskID(), "null task ID");
requireNonNull(context.getTaskAttemptID().getJobID(), "null job ID");
} | 3.26 |
hadoop_StagingCommitter_taskAttemptWorkingPath_rdh | /**
* Get the work path for a task.
*
* @param context
* job/task complex
* @param uuid
* UUID
* @return a path
* @throws IOException
* failure to build the path
*/
private static Path
taskAttemptWorkingPath(TaskAttemptContext context, String uuid) throws IOException {
return getTaskAttemptPath(context, Paths.getLocalTaskAttemptTempDir(context.getConfiguration(), uuid, context.getTaskAttemptID()));
} | 3.26 |
hadoop_StagingCommitter_preCommitJob_rdh | /**
* Pre-commit actions for a job.
* Loads all the pending files to verify they can be loaded
* and parsed.
*
* @param commitContext
* commit context
* @param pending
* pending commits
* @throws IOException
* any failure
*/
@Override
public void preCommitJob(CommitContext commitContext, final ActiveCommit pending) throws IOException {
// see if the files can be loaded.
precommitCheckPendingFiles(commitContext, pending);
} | 3.26 |
hadoop_StagingCommitter_getBaseTaskAttemptPath_rdh | /**
* Return the local work path as the destination for writing work.
*
* @param context
* the context of the task attempt.
* @return a path in the local filesystem.
*/
@Override
public Path getBaseTaskAttemptPath(TaskAttemptContext context) {
// a path on the local FS for files that will be uploaded
return getWorkPath();
} | 3.26 |
hadoop_CompressedWritable_ensureInflated_rdh | /**
* Must be called by all methods which access fields to ensure that the data
* has been uncompressed.
*/
protected void ensureInflated() {
if (compressed != null) {
try {
ByteArrayInputStream deflated = new ByteArrayInputStream(compressed);
DataInput inflater = new DataInputStream(new InflaterInputStream(deflated));
readFieldsCompressed(inflater);
compressed = null;
} catch (IOException e) {
throw new RuntimeException(e);
}
}
} | 3.26 |
hadoop_YarnClientUtils_generateToken_rdh | /**
* Generate SPNEGO challenge request token.
*
* @param server
* - hostname to contact
* @throws IOException
* thrown if doAs failed
* @throws InterruptedException
* thrown if doAs is interrupted
* @return SPNEGO token challenge
*/
public static String generateToken(String server) throws IOException, InterruptedException {
UserGroupInformation currentUser =
UserGroupInformation.getCurrentUser();
LOG.debug("The user credential is {}", currentUser);String challenge = currentUser.doAs(new PrivilegedExceptionAction<String>() {
@Override
public String run() throws Exception {
try {
GSSManager manager = GSSManager.getInstance();
// GSS name for server
GSSName serverName = manager.createName("HTTP@" + server, GSSName.NT_HOSTBASED_SERVICE);
// Create a GSSContext for authentication with the service.
// We're passing client credentials as null since we want them to
// be read from the Subject.
// We're passing Oid as null to use the default.
GSSContext gssContext = manager.createContext(serverName.canonicalize(null), null, null, GSSContext.DEFAULT_LIFETIME);
gssContext.requestMutualAuth(true);
gssContext.requestCredDeleg(true);
// Establish context
byte[] inToken = new byte[0];
byte[] outToken = gssContext.initSecContext(inToken, 0, inToken.length);
gssContext.dispose();
// Base64 encoded and stringified token for server
LOG.debug("Got valid challenge for host {}", serverName);
return new String(f0.encode(outToken), StandardCharsets.US_ASCII); } catch (GSSException e) {
LOG.error("Error: ", e);
throw new AuthenticationException(e);
}
}
});
return challenge;} | 3.26 |
hadoop_YarnClientUtils_getRmPrincipal_rdh | /**
* Perform the <code>_HOST</code> replacement in the {@code principal},
* Returning the result. Correctly handles HA resource manager configurations.
*
* @param rmPrincipal
* the principal string to prepare
* @param conf
* the configuration
* @return the prepared principal string
* @throws IOException
* thrown if there's an error replacing the host name
*/
public static String getRmPrincipal(String rmPrincipal, Configuration conf) throws IOException {
if (rmPrincipal == null) {
throw new IllegalArgumentException("RM principal string is null");
}
if (HAUtil.isHAEnabled(conf)) {
conf = getYarnConfWithRmHaId(conf);
}String hostname = conf.getSocketAddr(YarnConfiguration.RM_ADDRESS, YarnConfiguration.DEFAULT_RM_ADDRESS, YarnConfiguration.DEFAULT_RM_PORT).getHostName();return
SecurityUtil.getServerPrincipal(rmPrincipal, hostname);
} | 3.26 |
hadoop_YarnClientUtils_buildNodeLabelsFromStr_rdh | /**
* Creates node labels from string
*
* @param args
* nodelabels string to be parsed
* @return list of node labels
*/
public static List<NodeLabel> buildNodeLabelsFromStr(String args) {
List<NodeLabel> nodeLabels = new ArrayList<>();
for (String p : args.split(",")) {
if (!p.trim().isEmpty()) {
String labelName = p;
// Try to parse exclusive
boolean exclusive = NodeLabel.DEFAULT_NODE_LABEL_EXCLUSIVITY;
int leftParenthesisIdx = p.indexOf("(");
int rightParenthesisIdx = p.indexOf(")");
if (((leftParenthesisIdx == (-1)) && (rightParenthesisIdx != (-1)))
|| ((leftParenthesisIdx != (-1)) && (rightParenthesisIdx == (-1)))) {
// Parentheses not match
throw new IllegalArgumentException(ADD_LABEL_FORMAT_ERR_MSG);
}
if ((leftParenthesisIdx > 0) && (rightParenthesisIdx > 0)) {
if (leftParenthesisIdx > rightParenthesisIdx) {
// Parentheses not match
throw new IllegalArgumentException(ADD_LABEL_FORMAT_ERR_MSG);
}
String
property = p.substring(p.indexOf("(") + 1, p.indexOf(")"));
if (property.contains("=")) {
String key = property.substring(0, property.indexOf("=")).trim();
String value
= property.substring(property.indexOf("=") + 1, property.length()).trim();// Now we only support one property, which is exclusive, so check if
// key = exclusive and value = {true/false}
if ("exclusive".equals(key) && ImmutableSet.of("true", "false").contains(value)) {
exclusive = Boolean.parseBoolean(value);
} else {
throw new IllegalArgumentException(ADD_LABEL_FORMAT_ERR_MSG);}
} else if (!property.trim().isEmpty()) {
throw new IllegalArgumentException(ADD_LABEL_FORMAT_ERR_MSG);
}
}
// Try to get labelName if there's "(..)"
if (labelName.contains("(")) {
labelName = labelName.substring(0, labelName.indexOf("(")).trim();
}
nodeLabels.add(NodeLabel.newInstance(labelName, exclusive));
}
}
if (nodeLabels.isEmpty()) {
throw new IllegalArgumentException(NO_LABEL_ERR_MSG);
}
return nodeLabels;
}
/**
* Returns a {@link YarnConfiguration} built from the {@code conf} parameter
* that is guaranteed to have the {@link YarnConfiguration#RM_HA_ID}
* property set.
*
* @param conf
* the base configuration
* @return a {@link YarnConfiguration} built from the base
{@link Configuration} | 3.26 |
hadoop_BlockDispatcher_receiveResponse_rdh | /**
* Receive a reportedBlock copy response from the input stream.
*/
private static void receiveResponse(DataInputStream in) throws IOException {
BlockOpResponseProto response = BlockOpResponseProto.parseFrom(vintPrefixed(in));
while (response.getStatus() == Status.IN_PROGRESS) {
// read intermediate responses
response = BlockOpResponseProto.parseFrom(vintPrefixed(in));
}
String logInfo =
"reportedBlock move is failed";
DataTransferProtoUtil.checkBlockOpStatus(response, logInfo);
} | 3.26 |
hadoop_BlockDispatcher_sendRequest_rdh | /**
* Send a reportedBlock replace request to the output stream.
*/
private static void sendRequest(DataOutputStream out, ExtendedBlock eb, Token<BlockTokenIdentifier> accessToken, DatanodeInfo source, StorageType targetStorageType) throws IOException {
new Sender(out).replaceBlock(eb, targetStorageType, accessToken, source.getDatanodeUuid(), source, null);
} | 3.26 |
hadoop_BlockDispatcher_moveBlock_rdh | /**
* Moves the given block replica to the given target node and wait for the
* response.
*
* @param blkMovingInfo
* block to storage info
* @param saslClient
* SASL for DataTransferProtocol on behalf of a client
* @param eb
* extended block info
* @param sock
* target node's socket
* @param km
* for creation of an encryption key
* @param accessToken
* connection block access token
* @return status of the block movement
*/
public BlockMovementStatus moveBlock(BlockMovingInfo blkMovingInfo, SaslDataTransferClient saslClient, ExtendedBlock eb, Socket sock, DataEncryptionKeyFactory km, Token<BlockTokenIdentifier> accessToken) throws IOException {
LOG.info("Start moving block:{} from src:{} to destin:{} to satisfy " + "storageType, sourceStoragetype:{} and destinStoragetype:{}", blkMovingInfo.getBlock(), blkMovingInfo.getSource(), blkMovingInfo.getTarget(), blkMovingInfo.getSourceStorageType(), blkMovingInfo.getTargetStorageType());
DataOutputStream out = null;
DataInputStream in = null;
try {
NetUtils.connect(sock, NetUtils.createSocketAddr(blkMovingInfo.getTarget().getXferAddr(connectToDnViaHostname)), socketTimeout);
// Set read timeout so that it doesn't hang forever against
// unresponsive nodes. Datanode normally sends IN_PROGRESS response
// twice within the client read timeout period (every 30 seconds by
// default). Here, we make it give up after "socketTimeout * 5" period
// of no response.
sock.setSoTimeout(socketTimeout * 5);
sock.setKeepAlive(true);
OutputStream unbufOut = sock.getOutputStream();
InputStream unbufIn = sock.getInputStream();
LOG.debug("Connecting to datanode {}", blkMovingInfo.getTarget());IOStreamPair saslStreams = saslClient.socketSend(sock, unbufOut, unbufIn, km, accessToken, blkMovingInfo.getTarget());
unbufOut = saslStreams.out;
unbufIn = saslStreams.in;
out = new DataOutputStream(new BufferedOutputStream(unbufOut, ioFileBufferSize));
in = new DataInputStream(new BufferedInputStream(unbufIn, ioFileBufferSize));
sendRequest(out, eb, accessToken, blkMovingInfo.getSource(), blkMovingInfo.getTargetStorageType());
receiveResponse(in);
LOG.info("Successfully moved block:{} from src:{} to destin:{} for" + " satisfying storageType:{}", blkMovingInfo.getBlock(), blkMovingInfo.getSource(), blkMovingInfo.getTarget(), blkMovingInfo.getTargetStorageType());
return BlockMovementStatus.DN_BLK_STORAGE_MOVEMENT_SUCCESS;
} catch (BlockPinningException e) {
// Pinned block won't be able to move to a different node. So, its not
// required to do retries, just marked as SUCCESS.
LOG.debug("Pinned block can't be moved, so skipping block:{}", blkMovingInfo.getBlock(),
e);
return BlockMovementStatus.DN_BLK_STORAGE_MOVEMENT_SUCCESS;
} finally {
IOUtils.closeStream(out);IOUtils.closeStream(in);
IOUtils.closeSocket(sock);
}
} | 3.26 |
hadoop_ReadWriteDiskValidatorMetrics_sourceName_rdh | /**
* Get a source name by given directory name.
*
* @param dirName
* directory name
* @return the source name
*/
protected static String sourceName(String dirName) {
StringBuilder v8 = new StringBuilder(RECORD_INFO.name());v8.append(",dir=").append(dirName);
return
v8.toString();} | 3.26 |
hadoop_ReadWriteDiskValidatorMetrics_addWriteFileLatency_rdh | /**
* Add the file write latency to {@link MutableQuantiles} metrics.
*
* @param writeLatency
* file write latency in microseconds
*/
public void addWriteFileLatency(long writeLatency) {
if (fileWriteQuantiles != null) {
for (MutableQuantiles q :
fileWriteQuantiles) {
q.add(writeLatency);
}
}
}
/**
* Add the file read latency to {@link MutableQuantiles} | 3.26 |
hadoop_ReadWriteDiskValidatorMetrics_diskCheckFailed_rdh | /**
* Increase the failure count and update the last failure timestamp.
*/
public void diskCheckFailed() {
failureCount.incr();
lastFailureTime.set(System.nanoTime());
} | 3.26 |
hadoop_ReadWriteDiskValidatorMetrics_getFileReadQuantiles_rdh | /**
* Get {@link MutableQuantiles} metrics for the file read time.
*
* @return {@link MutableQuantiles} metrics for the file read time
*/
@VisibleForTesting
protected MutableQuantiles[] getFileReadQuantiles() {
return fileReadQuantiles;
} | 3.26 |
hadoop_ReadWriteDiskValidatorMetrics_getFileWriteQuantiles_rdh | /**
* Get {@link MutableQuantiles} metrics for the file write time.
*
* @return {@link MutableQuantiles} metrics for the file write time
*/
@VisibleForTesting
protected MutableQuantiles[] getFileWriteQuantiles() {
return fileWriteQuantiles;
} | 3.26 |
hadoop_ApplicationConstants_$_rdh | /**
* Expand the environment variable based on client OS environment variable
* expansion syntax (e.g. $VAR for Linux and %VAR% for Windows).
* <p>
* Note: Use $$() method for cross-platform practice i.e. submit an
* application from a Windows client to a Linux/Unix server or vice versa.
* </p>
*
* @return expanded environment variable.
*/
public String $() {if (Shell.WINDOWS) {
return ("%" + variable) + "%";
} else {
return "$" + variable;
}
} | 3.26 |
hadoop_ApplicationConstants_$$_rdh | /**
* Expand the environment variable in platform-agnostic syntax. The
* parameter expansion marker "{{VAR}}" will be replaced with real parameter
* expansion marker ('%' for Windows and '$' for Linux) by NodeManager on
* container launch. For example: {{VAR}} will be replaced as $VAR on Linux,
* and %VAR% on Windows.
*
* @return expanded environment variable.
*/
@Public
@Unstable
public String $$() {
return (PARAMETER_EXPANSION_LEFT + variable) + PARAMETER_EXPANSION_RIGHT;
} | 3.26 |
hadoop_InnerJoinRecordReader_combine_rdh | /**
* Return true iff the tuple is full (all data sources contain this key).
*/
protected boolean combine(Object[] srcs, TupleWritable dst) {
assert srcs.length == dst.size();
for (int v0 = 0; v0 < srcs.length; ++v0) {
if (!dst.has(v0)) {
return false;
}
}
return true;
} | 3.26 |
hadoop_CosNFileSystem_validatePath_rdh | /**
* Validate the path from the bottom up.
*
* @param path
* The path to be validated
* @throws FileAlreadyExistsException
* The specified path is an existing file
* @throws IOException
* Getting the file status of the
* specified path occurs
* an IOException.
*/
private void validatePath(Path path) throws IOException {
Path parent = path.getParent();
do {
try {
FileStatus fileStatus = getFileStatus(parent);
if (fileStatus.isDirectory()) {
break;
} else {
throw new FileAlreadyExistsException(String.format("Can't make directory for path '%s', it is a file.", parent));
}
} catch (FileNotFoundException e) {
f0.debug("The Path: [{}] does not exist.", path);
}
parent = parent.getParent();
} while (parent != null );
} | 3.26 |
hadoop_CosNFileSystem_listStatus_rdh | /**
* <p>
* If <code>f</code> is a file, this method will make a single call to COS.
* If <code>f</code> is a directory,
* this method will make a maximum of ( <i>n</i> / 199) + 2 calls to cos,
* where <i>n</i> is the total number of files
* and directories contained directly in <code>f</code>.
* </p>
*/
@Overridepublic FileStatus[] listStatus(Path f) throws IOException {
Path absolutePath = makeAbsolute(f);
String key = pathToKey(absolutePath);
if (key.length() >
0)
{
FileStatus fileStatus = this.getFileStatus(f);
if (fileStatus.isFile()) {
return new FileStatus[]{ fileStatus };
}
}
if (!key.endsWith(PATH_DELIMITER)) {
key
+= PATH_DELIMITER;
}
URI pathUri = absolutePath.toUri();
Set<FileStatus> status = new TreeSet<>();
String priorLastKey = null;
do {
PartialListing v41 = store.list(key, Constants.COS_MAX_LISTING_LENGTH, priorLastKey, false);
for (FileMetadata fileMetadata : v41.getFiles()) {
Path subPath = keyToPath(fileMetadata.getKey());
if (fileMetadata.getKey().equals(key)) {
// this is just the directory we have been asked to list.
f0.debug("The file list contains the COS key [{}] to be listed.", key);
} else {
status.add(newFile(fileMetadata, subPath));
}
}
for (FileMetadata commonPrefix : v41.getCommonPrefixes()) {
Path subPath = keyToPath(commonPrefix.getKey());
String relativePath = pathUri.relativize(subPath.toUri()).getPath();
status.add(newDirectory(commonPrefix, new Path(absolutePath, relativePath)));
}
priorLastKey = v41.getPriorLastKey();
} while (priorLastKey != null );
return status.toArray(new FileStatus[status.size()]);
} | 3.26 |
hadoop_CosNFileSystem_append_rdh | /**
* This optional operation is not yet supported.
*/
@Override
public FSDataOutputStream append(Path f, int bufferSize, Progressable progress) throws IOException {
throw new IOException("Not supported");
} | 3.26 |
hadoop_CosNFileSystem_mkDirRecursively_rdh | /**
* Recursively create a directory.
*
* @param f
* Absolute path to the directory.
* @param permission
* Directory permissions. Permission does not work for
* the CosN filesystem currently.
* @return Return true if the creation was successful, throw a IOException.
* @throws IOException
* The specified path already exists or an error
* creating the path.
*/
public boolean mkDirRecursively(Path f, FsPermission
permission) throws IOException {
Path v50 = makeAbsolute(f);
List<Path> paths = new ArrayList<>();
do {
paths.add(v50);
v50 = v50.getParent();
} while (v50 != null );
for (Path path : paths) {
if (path.equals(new Path(CosNFileSystem.PATH_DELIMITER))) {
break;
}
try {
FileStatus fileStatus = getFileStatus(path);
if (fileStatus.isFile()) {
throw new FileAlreadyExistsException(String.format("Can't make directory for path: %s, " + "since it is a file.", f));
}
if (fileStatus.isDirectory()) {
break;
}
} catch (FileNotFoundException e) {
f0.debug("Making dir: [{}] in COS", f);
String folderPath = pathToKey(makeAbsolute(f));
if (!folderPath.endsWith(PATH_DELIMITER)) {
folderPath += PATH_DELIMITER;}
store.storeEmptyFile(folderPath);
}
}
return true;
} | 3.26 |
hadoop_CosNFileSystem_setWorkingDirectory_rdh | /**
* Set the working directory to the given directory.
*/
@Override
public void setWorkingDirectory(Path
newDir) {
workingDir = newDir;} | 3.26 |
hadoop_NamenodeStatusReport_getState_rdh | /**
* Get the state of the Namenode being monitored.
*
* @return State of the Namenode.
*/
public FederationNamenodeServiceState getState() {
if (!registrationValid) {
return FederationNamenodeServiceState.UNAVAILABLE;
} else if (haStateValid) {
return FederationNamenodeServiceState.getState(f1);
} else {
return FederationNamenodeServiceState.ACTIVE;
}
} | 3.26 |
hadoop_NamenodeStatusReport_getNumDecommissioningDatanodes_rdh | /**
* Get the number of decommissionining nodes.
*
* @return The number of decommissionining nodes.
*/
public int getNumDecommissioningDatanodes() {
return this.decomDatanodes;
} | 3.26 |
hadoop_NamenodeStatusReport_getNumDeadDatanodes_rdh | /**
* Get the number of dead nodes.
*
* @return The number of dead nodes.
*/
public int getNumDeadDatanodes() {
return this.deadDatanodes;
} | 3.26 |
hadoop_NamenodeStatusReport_getNumInMaintenanceDeadDataNodes_rdh | /**
* Get the number of dead in maintenance nodes.
*
* @return The number of dead in maintenance nodes.
*/
public int getNumInMaintenanceDeadDataNodes()
{
return this.f2;
} | 3.26 |
hadoop_NamenodeStatusReport_getPendingSPSPaths_rdh | /**
* Returns the number of paths to be processed by storage policy satisfier.
*
* @return The number of paths to be processed by sps.
*/
public int getPendingSPSPaths() {
return this.pendingSPSPaths;
} | 3.26 |
hadoop_NamenodeStatusReport_getScheduledReplicationBlocks_rdh | /**
* Blocks scheduled for replication.
*
* @return - num of blocks scheduled for replication
*/
public long getScheduledReplicationBlocks() {
return this.scheduledReplicationBlocks;
} | 3.26 |
hadoop_NamenodeStatusReport_getLifelineAddress_rdh | /**
* Get the Lifeline RPC address.
*
* @return The Lifeline RPC address.
*/
public String getLifelineAddress() {
return this.lifelineAddress;
} | 3.26 |
hadoop_NamenodeStatusReport_getNumInMaintenanceLiveDataNodes_rdh | /**
* Get the number of live in maintenance nodes.
*
* @return The number of live in maintenance nodes.
*/
public int getNumInMaintenanceLiveDataNodes() {
return this.inMaintenanceLiveDataNodes;
} | 3.26 |
hadoop_NamenodeStatusReport_getNumLiveDatanodes_rdh | /**
* Get the number of live blocks.
*
* @return The number of dead nodes.
*/
public int getNumLiveDatanodes() {
return this.liveDatanodes;
} | 3.26 |
hadoop_NamenodeStatusReport_getClusterId_rdh | /**
* Get the cluster identifier.
*
* @return The cluster identifier.
*/
public String getClusterId() {
return this.clusterId;
} | 3.26 |
hadoop_NamenodeStatusReport_getServiceAddress_rdh | /**
* Get the Service RPC address.
*
* @return The Service RPC address.
*/
public String getServiceAddress() {
return this.serviceAddress;
} | 3.26 |
hadoop_NamenodeStatusReport_statsValid_rdh | /**
* If the statistics are valid.
*
* @return If the statistics are valid.
*/
public boolean statsValid() {
return this.statsValid;
} | 3.26 |
hadoop_NamenodeStatusReport_getNameserviceId_rdh | /**
* Get the name service identifier.
*
* @return The name service identifier.
*/
public String getNameserviceId() {
return this.nameserviceId;
} | 3.26 |
hadoop_NamenodeStatusReport_getNumStaleDatanodes_rdh | /**
* Get the number of stale nodes.
*
* @return The number of stale nodes.
*/public int getNumStaleDatanodes() {
return this.staleDatanodes;
} | 3.26 |
hadoop_NamenodeStatusReport_getNamenodeId_rdh | /**
* Get the namenode identifier.
*
* @return The namenode identifier.
*/
public String getNamenodeId() {
return this.namenodeId;
} | 3.26 |
hadoop_NamenodeStatusReport_setNamespaceInfo_rdh | /**
* Set the namespace information.
*
* @param info
* Namespace information.
*/
public void setNamespaceInfo(NamespaceInfo info) {
this.clusterId = info.getClusterID();
this.f0 = info.getBlockPoolID();
this.registrationValid = true;
} | 3.26 |
hadoop_NamenodeStatusReport_getNumBlocks_rdh | /**
* Get the number of blocks.
*
* @return The number of blocks.
*/
public long getNumBlocks() {
return this.numOfBlocks;
} | 3.26 |
hadoop_NamenodeStatusReport_getNumOfBlocksPendingReplication_rdh | /**
* Get the number of pending replication blocks.
*
* @return Number of pending replication blocks.
*/
public long getNumOfBlocksPendingReplication() {
return
this.numOfBlocksPendingReplication;
} | 3.26 |
hadoop_NamenodeStatusReport_getBlockPoolId_rdh | /**
* Get the block pool identifier.
*
* @return The block pool identifier.
*/
public String getBlockPoolId() {
return this.f0;
} | 3.26 |
hadoop_NamenodeStatusReport_registrationValid_rdh | /**
* If the registration is valid.
*
* @return If the registration is valid.
*/
public boolean registrationValid() {
return this.registrationValid;
} | 3.26 |
hadoop_NamenodeStatusReport_haStateValid_rdh | /**
* If the HA state is valid.
*
* @return If the HA state is valid.
*/
public boolean haStateValid() {
return this.haStateValid;
} | 3.26 |
hadoop_NamenodeStatusReport_getNumBlocksMissing_rdh | /**
* Get the number of missing blocks.
*
* @return Number of missing blocks.
*/
public long getNumBlocksMissing() {
return this.numOfBlocksMissing;
} | 3.26 |
hadoop_NamenodeStatusReport_getNumDecomLiveDatanodes_rdh | /**
* Get the number of live decommissioned nodes.
*
* @return The number of live decommissioned nodes.
*/
public int getNumDecomLiveDatanodes() {
return this.liveDecomDatanodes;
} | 3.26 |
hadoop_NamenodeStatusReport_getAvailableSpace_rdh | /**
* Get the available space.
*
* @return The available space.
*/
public long getAvailableSpace() {
return
this.availableSpace;
} | 3.26 |
hadoop_NamenodeStatusReport_setHAServiceState_rdh | /**
* Set the HA service state.
*
* @param state
* The HA service state to set.
*/
public void setHAServiceState(HAServiceState state) {
this.f1 = state;
this.haStateValid = true;
} | 3.26 |
hadoop_NamenodeStatusReport_getProvidedSpace_rdh | /**
* Get the space occupied by provided storage.
*
* @return the provided capacity.
*/
public long getProvidedSpace() {
return this.providedSpace;
} | 3.26 |
hadoop_NamenodeStatusReport_setRegistrationValid_rdh | /**
* Set the validity of registration.
*
* @param isValid
* The desired value to be set.
*/
public void setRegistrationValid(boolean isValid) {
this.registrationValid = isValid;} | 3.26 |
hadoop_NamenodeStatusReport_getWebScheme_rdh | /**
* Get the scheme of web address.
*
* @return The scheme of web address.
*/
public String getWebScheme() {
return this.webScheme;
} | 3.26 |
hadoop_NamenodeStatusReport_m0_rdh | /**
* Set the namenode blocks information.
*
* @param numCorruptFiles
* number of corrupt files.
* @param numOfMissingBlocksWithReplicationFactorOne
* number of missing
* blocks with rep one.
* @param highestPriorityLowRedundancyRepBlocks
* number of high priority low
* redundancy rep blocks.
* @param highPriorityLowRedundancyECBlocks
* number of high priority low
* redundancy EC blocks.
*/
public void m0(int numCorruptFiles, long numOfMissingBlocksWithReplicationFactorOne, long highestPriorityLowRedundancyRepBlocks, long highPriorityLowRedundancyECBlocks) {
this.corruptFilesCount = numCorruptFiles;
this.numberOfMissingBlocksWithReplicationFactorOne = numOfMissingBlocksWithReplicationFactorOne;
this.highestPriorityLowRedundancyReplicatedBlocks = highestPriorityLowRedundancyRepBlocks;
this.highestPriorityLowRedundancyECBlocks = highPriorityLowRedundancyECBlocks;
} | 3.26 |
hadoop_NamenodeStatusReport_getNumDecomDeadDatanodes_rdh | /**
* Get the number of dead decommissioned nodes.
*
* @return The number of dead decommissioned nodes.
*/
public int getNumDecomDeadDatanodes() {
return this.deadDecomDatanodes;
} | 3.26 |
hadoop_NamenodeStatusReport_getNumOfBlocksPendingDeletion_rdh | /**
* Get the number of pending deletion blocks.
*
* @return Number of pending deletion blocks.
*/public long getNumOfBlocksPendingDeletion() {
return this.numOfBlocksPendingDeletion;
} | 3.26 |
hadoop_NamenodeStatusReport_getRpcAddress_rdh | /**
* Get the RPC address.
*
* @return The RPC address.
*/
public String getRpcAddress() {
return this.rpcAddress;
} | 3.26 |
hadoop_NamenodeStatusReport_getNumEnteringMaintenanceDataNodes_rdh | /**
* Get the number of entering maintenance nodes.
*
* @return The number of entering maintenance nodes.
*/
public int getNumEnteringMaintenanceDataNodes() {
return this.enteringMaintenanceDataNodes;
} | 3.26 |
hadoop_NamenodeStatusReport_getCorruptFilesCount_rdh | /**
* Get the number of corrupt files.
*
* @return the total number of corrupt files
*/
public int getCorruptFilesCount() {
return this.corruptFilesCount;
} | 3.26 |
hadoop_NamenodeStatusReport_getNumFiles_rdh | /**
* Get the number of files.
*
* @return The number of files.
*/
public long getNumFiles() {
return this.numOfFiles;
} | 3.26 |
hadoop_NamenodeStatusReport_getTotalSpace_rdh | /**
* Get the total space.
*
* @return The total space.
*/public long getTotalSpace() {
return this.totalSpace;
} | 3.26 |
hadoop_NamenodeStatusReport_getWebAddress_rdh | /**
* Get the web address.
*
* @return The web address.
*/
public String getWebAddress() {
return this.webAddress;
} | 3.26 |
hadoop_NamenodeStatusReport_getNumOfBlocksUnderReplicated_rdh | /**
* Get the number of under replicated blocks.
*
* @return Number of under replicated blocks.
*/
public long getNumOfBlocksUnderReplicated() {
return this.numOfBlocksUnderReplicated;
} | 3.26 |
hadoop_NamenodeStatusReport_setNamesystemInfo_rdh | /**
* Set the filesystem information.
*
* @param available
* Available capacity.
* @param total
* Total capacity.
* @param numFiles
* Number of files.
* @param numBlocks
* Total number of blocks.
* @param numBlocksMissing
* Number of missing blocks.
* @param numBlocksPendingReplication
* Number of blocks pending replication.
* @param numBlocksUnderReplicated
* Number of blocks under replication.
* @param numBlocksPendingDeletion
* Number of blocks pending deletion.
* @param providedStorageSpace
* Space in provided storage.
* @param numPendingSPSPaths
* The number of paths to be processed by storage policy satisfier.
*/
public void setNamesystemInfo(long available, long total, long numFiles, long numBlocks, long numBlocksMissing, long numBlocksPendingReplication, long numBlocksUnderReplicated, long numBlocksPendingDeletion, long providedStorageSpace, int numPendingSPSPaths) {
this.totalSpace = total;
this.availableSpace = available;
this.numOfBlocks = numBlocks;
this.numOfBlocksMissing = numBlocksMissing;
this.numOfBlocksPendingReplication = numBlocksPendingReplication;
this.numOfBlocksUnderReplicated = numBlocksUnderReplicated;
this.numOfBlocksPendingDeletion = numBlocksPendingDeletion;
this.numOfFiles = numFiles;
this.statsValid = true;
this.providedSpace = providedStorageSpace;
this.pendingSPSPaths = numPendingSPSPaths;
} | 3.26 |
hadoop_NamenodeStatusReport_setDatanodeInfo_rdh | /**
* Set the datanode information.
*
* @param numLive
* Number of live nodes.
* @param numDead
* Number of dead nodes.
* @param numStale
* Number of stale nodes.
* @param numDecom
* Number of decommissioning nodes.
* @param numLiveDecom
* Number of decommissioned live nodes.
* @param numDeadDecom
* Number of decommissioned dead nodes.
* @param numInMaintenanceLive
* Number of in maintenance live nodes.
* @param numInMaintenanceDead
* Number of in maintenance dead nodes.
* @param numEnteringMaintenance
* Number of entering maintenance nodes.
* @param numScheduledReplicationBlocks
* Number of scheduled rep. blocks.
*/
public void setDatanodeInfo(int numLive, int numDead, int numStale, int numDecom, int numLiveDecom, int numDeadDecom,
int numInMaintenanceLive, int numInMaintenanceDead, int numEnteringMaintenance,
long numScheduledReplicationBlocks) {
this.liveDatanodes = numLive;
this.deadDatanodes = numDead;
this.staleDatanodes = numStale;
this.decomDatanodes = numDecom;this.liveDecomDatanodes = numLiveDecom;
this.deadDecomDatanodes =
numDeadDecom;
this.inMaintenanceLiveDataNodes = numInMaintenanceLive;
this.f2 = numInMaintenanceDead;
this.enteringMaintenanceDataNodes = numEnteringMaintenance;
this.statsValid = true;
this.scheduledReplicationBlocks = numScheduledReplicationBlocks;
} | 3.26 |
hadoop_NodePlan_setNodeUUID_rdh | /**
* Sets the Node UUID.
*
* @param nodeUUID
* - UUID of the node.
*/
public void setNodeUUID(String nodeUUID) {
this.nodeUUID = nodeUUID;
} | 3.26 |
hadoop_NodePlan_getPort_rdh | /**
* Gets the DataNode RPC Port.
*
* @return port
*/
public int getPort() {
return
port;
} | 3.26 |
hadoop_NodePlan_setPort_rdh | /**
* Sets the DataNode RPC Port.
*
* @param port
* - int
*/
public void setPort(int port) {
this.port = port;
} | 3.26 |
hadoop_NodePlan_getTimeStamp_rdh | /**
* returns timestamp when this plan was created.
*
* @return long
*/
public long getTimeStamp() {
return timeStamp;
} | 3.26 |
hadoop_NodePlan_getVolumeSetPlans_rdh | /**
* Returns a Map of VolumeSetIDs and volumeSetPlans.
*
* @return Map
*/
public List<Step> getVolumeSetPlans() {
return volumeSetPlans;
} | 3.26 |
hadoop_NodePlan_setNodeName_rdh | /**
* Sets Node Name.
*
* @param nodeName
* - Name
*/
public void setNodeName(String nodeName) {
this.f0 = nodeName;
} | 3.26 |
hadoop_NodePlan_toJson_rdh | /**
* Returns a Json representation of NodePlan.
*
* @return - json String
* @throws IOException
*/
public String toJson() throws IOException {
return WRITER.writeValueAsString(this);
} | 3.26 |
hadoop_NodePlan_getNodeUUID_rdh | /**
* gets the Node UUID.
*
* @return Node UUID.
*/
public String
getNodeUUID() {
return nodeUUID;
} | 3.26 |
hadoop_NodePlan_setTimeStamp_rdh | /**
* Sets the timestamp when this plan was created.
*
* @param timeStamp
*/
public void setTimeStamp(long timeStamp) {
this.timeStamp = timeStamp;
} | 3.26 |
hadoop_NodePlan_addStep_rdh | /**
* Adds a step to the existing Plan.
*
* @param nextStep
* - nextStep
*/
void addStep(Step nextStep) {
Preconditions.checkNotNull(nextStep);
volumeSetPlans.add(nextStep);
} | 3.26 |
hadoop_NodePlan_setURI_rdh | /**
* Sets the DataNodeURI.
*
* @param dataNodeName
* - String
*/
public void setURI(String
dataNodeName) {
this.f0 = dataNodeName;
} | 3.26 |
hadoop_NodePlan_getNodeName_rdh | /**
* Returns the DataNode URI.
*
* @return URI
*/
public String getNodeName() {
return f0;
} | 3.26 |
hadoop_ChainMapper_addMapper_rdh | /**
* Adds a {@link Mapper} class to the chain mapper.
*
* <p>
* The key and values are passed from one element of the chain to the next, by
* value. For the added Mapper the configuration given for it,
* <code>mapperConf</code>, have precedence over the job's Configuration. This
* precedence is in effect when the task is running.
* </p>
* <p>
* IMPORTANT: There is no need to specify the output key/value classes for the
* ChainMapper, this is done by the addMapper for the last mapper in the chain
* </p>
*
* @param job
* The job.
* @param klass
* the Mapper class to add.
* @param inputKeyClass
* mapper input key class.
* @param inputValueClass
* mapper input value class.
* @param outputKeyClass
* mapper output key class.
* @param outputValueClass
* mapper output value class.
* @param mapperConf
* a configuration for the Mapper class. It is recommended to use a
* Configuration without default values using the
* <code>Configuration(boolean loadDefaults)</code> constructor with
* FALSE.
*/
public static void addMapper(Job job, Class<? extends Mapper> klass, Class<?> inputKeyClass, Class<?> inputValueClass, Class<?> outputKeyClass, Class<?> outputValueClass, Configuration mapperConf) throws IOException {
job.setMapperClass(ChainMapper.class);
job.setMapOutputKeyClass(outputKeyClass);
job.setMapOutputValueClass(outputValueClass);
Chain.addMapper(true, job, klass, inputKeyClass, inputValueClass, outputKeyClass, outputValueClass, mapperConf);
} | 3.26 |
hadoop_MountdBase_startTCPServer_rdh | /* Start TCP server */
private void startTCPServer() {
tcpServer = new SimpleTcpServer(rpcProgram.getPort(), rpcProgram, 1);
rpcProgram.startDaemons();
try {
tcpServer.run();
} catch (Throwable e) {
LOG.error("Failed to start the TCP server.", e);
if (tcpServer.getBoundPort() > 0) {
rpcProgram.unregister(PortmapMapping.TRANSPORT_TCP, tcpServer.getBoundPort());
}
tcpServer.shutdown();
terminate(1, e);
}
tcpBoundPort = tcpServer.getBoundPort();
} | 3.26 |
hadoop_LeveldbIterator_close_rdh | /**
* Closes the iterator.
*/
@Override
public void close() throws IOException {
try {
iter.close();
} catch (RuntimeException e) {
throw new IOException(e.getMessage(), e);
}} | 3.26 |
hadoop_LeveldbIterator_hasNext_rdh | /**
* Returns <tt>true</tt> if the iteration has more elements.
*/
public boolean hasNext() throws DBException {
try {
return iter.hasNext();
} catch (DBException e) {
throw e;
} catch (RuntimeException e) {
throw new DBException(e.getMessage(), e);
}
} | 3.26 |
hadoop_LeveldbIterator_next_rdh | /**
* Returns the next element in the iteration.
*
* @return the next element in the iteration.
* @throws DBException
* DB Exception.
*/
@Override
public Map.Entry<byte[], byte[]> next() throws DBException {
try {
return iter.next();
} catch (DBException e) {
throw e;
} catch (RuntimeException e) {
throw new DBException(e.getMessage(), e);
}
} | 3.26 |
hadoop_LeveldbIterator_hasPrev_rdh | /**
*
* @return true if there is a previous entry in the iteration.
*/
public boolean hasPrev() throws DBException {
try {
return iter.hasPrev();
} catch (DBException e) {
throw e;
} catch (RuntimeException e) {
throw new DBException(e.getMessage(), e);
}
} | 3.26 |
hadoop_LeveldbIterator_peekNext_rdh | /**
* Returns the next element in the iteration, without advancing the
* iteration.
*
* @return the next element in the iteration.
* @throws DBException
* db Exception.
*/
public Map.Entry<byte[], byte[]> peekNext() throws DBException {
try {
return iter.peekNext();
} catch (DBException e) {
throw e;
} catch (RuntimeException e) {
throw new DBException(e.getMessage(), e);
}
} | 3.26 |
hadoop_LeveldbIterator_remove_rdh | /**
* Removes from the database the last element returned by the iterator.
*/
@Override
public void remove() throws DBException {
try {iter.remove();
} catch (DBException e) {
throw e;
} catch (RuntimeException e) {
throw new DBException(e.getMessage(), e);
}
} | 3.26 |
hadoop_LeveldbIterator_seekToLast_rdh | /**
* Repositions the iterator so it is at the end of of the Database.
*/
public void seekToLast() throws DBException {
try {
iter.seekToLast();} catch (DBException e) {
throw e;
} catch (RuntimeException e) {
throw new DBException(e.getMessage(), e);
}
} | 3.26 |
hadoop_LeveldbIterator_seekToFirst_rdh | /**
* Repositions the iterator so is is at the beginning of the Database.
*/public void seekToFirst() throws DBException {
try {
iter.seekToFirst();
} catch (DBException e) {
throw e;
} catch (RuntimeException e) {throw new DBException(e.getMessage(),
e);
}
} | 3.26 |
hadoop_LeveldbIterator_m0_rdh | /**
* Repositions the iterator so the key of the next BlockElement
* returned greater than or equal to the specified targetKey.
*
* @param key
* key of the next BlockElement.
* @throws DBException
* db Exception.
*/
public void m0(byte[] key) throws DBException {
try {
iter.seek(key);
} catch (DBException e) {
throw e;
} catch (RuntimeException e) {
throw new DBException(e.getMessage(), e);
}
} | 3.26 |
hadoop_LeveldbIterator_prev_rdh | /**
*
* @return the previous element in the iteration and rewinds the iteration.
*/
public Map.Entry<byte[], byte[]> prev() throws DBException {
try {
return iter.prev();
} catch (DBException e) {
throw e;
} catch (RuntimeException e) {
throw new DBException(e.getMessage(), e);
}
} | 3.26 |
hadoop_ManifestStoreOperations_recovered_rdh | /**
* Did some form of recovery take place?
*
* @return true if the commit succeeded through some form of (etag-based) recovery
*/
public boolean recovered() {
return recovered;
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.