name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hadoop_JobACLsManager_isMRAdmin_rdh | /**
* Is the calling user an admin for the mapreduce cluster
* i.e. member of mapreduce.cluster.administrators
*
* @return true, if user is an admin
*/
boolean isMRAdmin(UserGroupInformation callerUGI) {
if (adminAcl.isUserAllowed(callerUGI)) {
return true;
}
return false;
} | 3.26 |
hadoop_ResourceCalculator_compare_rdh | /**
* On a cluster with capacity {@code clusterResource}, compare {@code lhs}
* and {@code rhs} considering all resources.
*
* @param clusterResource
* cluster capacity
* @param lhs
* First {@link Resource} to compare
* @param rhs
* Second {@link Resource} to compare
* @return -1 if {@code lhs} is smaller, 0 if equal and 1 if it is larger
*/
public int compare(Resource clusterResource, Resource lhs,
Resource rhs) {
return compare(clusterResource, lhs, rhs, false);
} | 3.26 |
hadoop_AggregateAppResourceUsage_getVcoreSeconds_rdh | /**
*
* @return the vcoreSeconds
*/
public long getVcoreSeconds() {
return RMServerUtils.getOrDefault(resourceSecondsMap, ResourceInformation.VCORES.getName(), 0L);
} | 3.26 |
hadoop_AggregateAppResourceUsage_getMemorySeconds_rdh | /**
*
* @return the memorySeconds
*/
public long getMemorySeconds() {
return RMServerUtils.getOrDefault(resourceSecondsMap, ResourceInformation.MEMORY_MB.getName(), 0L);
} | 3.26 |
hadoop_WritableName_getClass_rdh | /**
* Return the class for a name.
* Default is {@link Class#forName(String)}.
*
* @param name
* input name.
* @param conf
* input configuration.
* @return class for a name.
* @throws IOException
* raised on errors performing I/O.
*/
public static synchronized Class<?> getClass(String name, Configuration conf) throws IOException {
Class<?> writableClass = NAME_TO_CLASS.get(name);
if (writableClass != null)
return writableClass;
try {
return conf.getClassByName(name);
} catch
(ClassNotFoundException e) {
IOException newE = new IOException("WritableName can't load class: " + name);
newE.initCause(e);
throw newE;
}
} | 3.26 |
hadoop_WritableName_getName_rdh | /**
* Return the name for a class.
* Default is {@link Class#getName()}.
*
* @param writableClass
* input writableClass.
* @return name for a class.
*/
public static synchronized String getName(Class<?> writableClass) {
String name = CLASS_TO_NAME.get(writableClass);
if (name != null)
return name;
return writableClass.getName();
} | 3.26 |
hadoop_WritableName_setName_rdh | /**
* Set the name that a class should be known as to something other than the
* class name.
*
* @param writableClass
* input writableClass.
* @param name
* input name.
*/
public static synchronized void setName(Class<?> writableClass, String name) {
CLASS_TO_NAME.put(writableClass, name);
NAME_TO_CLASS.put(name, writableClass);
} | 3.26 |
hadoop_WritableName_addName_rdh | /**
* Add an alternate name for a class.
*
* @param writableClass
* input writableClass.
* @param name
* input name.
*/
public static synchronized void addName(Class<?> writableClass, String name) {
NAME_TO_CLASS.put(name, writableClass);
} | 3.26 |
hadoop_AbstractS3ACommitter_getOutputPath_rdh | /**
* Final path of output, in the destination FS.
*
* @return the path
*/
@Override
public final Path getOutputPath() {
return outputPath;
} | 3.26 |
hadoop_AbstractS3ACommitter_initiateTaskOperation_rdh | /**
* Start a ask commit/abort commit operations.
* This may have a different thread count.
* If configured to collect statistics,
* The IO StatisticsContext is reset.
*
* @param context
* job or task context
* @return a commit context through which the operations can be invoked.
* @throws IOException
* failure.
*/
protected CommitContext initiateTaskOperation(final JobContext context) throws
IOException {
CommitContext commitContext = getCommitOperations().createCommitContext(context, getOutputPath(), getTaskCommitThreadCount(context), IOStatisticsContext.getCurrentIOStatisticsContext());
commitContext.maybeResetIOStatisticsContext();
return commitContext;
} | 3.26 |
hadoop_AbstractS3ACommitter_setDestFS_rdh | /**
* Set the destination FS: the FS of the final output.
*
* @param destFS
* destination FS.
*/
protected void setDestFS(FileSystem destFS) {
this.destFS = destFS;
} | 3.26 |
hadoop_AbstractS3ACommitter_updateCommonContext_rdh | /**
* Add jobID to current context.
*/
protected final void updateCommonContext() {
currentAuditContext().put(AuditConstants.PARAM_JOB_ID, f0);
} | 3.26 |
hadoop_AbstractS3ACommitter_abortPendingUploads_rdh | /**
* Abort all pending uploads in the list.
*
* @param commitContext
* commit context
* @param pending
* pending uploads
* @param suppressExceptions
* should exceptions be suppressed?
* @param deleteRemoteFiles
* should remote files be deleted?
* @throws IOException
* any exception raised
*/
protected void abortPendingUploads(final CommitContext commitContext, final ActiveCommit pending, final boolean suppressExceptions, final boolean deleteRemoteFiles) throws IOException {
if (pending.isEmpty()) {
LOG.info("{}: no pending commits to abort", getRole());
} else {
try (DurationInfo d = new DurationInfo(LOG, "Aborting %s uploads", pending.size())) {
TaskPool.foreach(pending.getSourceFiles()).executeWith(commitContext.getOuterSubmitter()).suppressExceptions(suppressExceptions).run(path -> loadAndAbort(commitContext, pending, path, suppressExceptions, deleteRemoteFiles));
}
}
} | 3.26 |
hadoop_AbstractS3ACommitter_getUUID_rdh | /**
* The Job UUID, as passed in or generated.
*
* @return the UUID for the job.
*/
@VisibleForTesting
public final String getUUID() {
return f0;} | 3.26 |
hadoop_AbstractS3ACommitter_precommitCheckPendingFiles_rdh | /**
* Run a precommit check that all files are loadable.
* This check avoids the situation where the inability to read
* a file only surfaces partway through the job commit, so
* results in the destination being tainted.
*
* @param commitContext
* commit context
* @param pending
* the pending operations
* @throws IOException
* any failure
*/
protected void precommitCheckPendingFiles(final CommitContext commitContext, final ActiveCommit pending) throws IOException {
FileSystem sourceFS = pending.getSourceFS();
try (DurationInfo ignored = new DurationInfo(LOG, "Preflight Load of pending files")) {
TaskPool.foreach(pending.getSourceFiles()).stopOnFailure().suppressExceptions(false).executeWith(commitContext.getOuterSubmitter()).run(status -> PersistentCommitData.load(sourceFS, status, commitContext.getPendingSetSerializer()));
}
} | 3.26 |
hadoop_AbstractS3ACommitter_getJobAttemptPath_rdh | /**
* Compute the path where the output of a given job attempt will be placed.
*
* @param context
* the context of the job. This is used to get the
* application attempt ID.
* @return the path to store job attempt data.
*/
public Path getJobAttemptPath(JobContext context) {
return getJobAttemptPath(getAppAttemptId(context));
} | 3.26 |
hadoop_AbstractS3ACommitter_maybeCreateSuccessMarkerFromCommits_rdh | /**
* if the job requires a success marker on a successful job,
* create the file {@link CommitConstants#_SUCCESS}.
*
* While the classic committers create a 0-byte file, the S3A committers
* PUT up a the contents of a {@link SuccessData} file.
*
* @param commitContext
* commit context
* @param pending
* the pending commits
* @return the success data, even if the marker wasn't created
* @throws IOException
* IO failure
*/
protected SuccessData maybeCreateSuccessMarkerFromCommits(final CommitContext commitContext, ActiveCommit pending) throws IOException {
List<String> filenames = new ArrayList<>(pending.size());
// The list of committed objects in pending is size limited in
// ActiveCommit.uploadCommitted.
filenames.addAll(pending.committedObjects);
// load in all the pending statistics
IOStatisticsSnapshot snapshot = new IOStatisticsSnapshot(pending.getIOStatistics());
// and the current statistics
snapshot.aggregate(getIOStatistics());
// and include the context statistics if enabled
if (commitContext.isCollectIOStatistics()) {
snapshot.aggregate(commitContext.getIOStatisticsContext().getIOStatistics());
}
return maybeCreateSuccessMarker(commitContext.getJobContext(), filenames, snapshot);
} | 3.26 |
hadoop_AbstractS3ACommitter_loadAndAbort_rdh | /**
* Load a pendingset file and abort all of its contents.
* Invoked within a parallel run; the commitContext thread
* pool is already busy/possibly full, so do not
* execute work through the same submitter.
*
* @param commitContext
* context to commit through
* @param activeCommit
* commit state
* @param status
* status of file to load
* @param deleteRemoteFiles
* should remote files be deleted?
* @throws IOException
* failure
*/
private void loadAndAbort(final CommitContext commitContext, final ActiveCommit activeCommit, final FileStatus status, final boolean suppressExceptions, final boolean deleteRemoteFiles) throws IOException {
final Path
path = status.getPath();
commitContext.switchToIOStatisticsContext();
try (DurationInfo v28 = new DurationInfo(LOG, false, "Aborting %s", path)) {
PendingSet
pendingSet = PersistentCommitData.load(activeCommit.getSourceFS(), status, commitContext.getPendingSetSerializer());
FileSystem fs = getDestFS();
TaskPool.foreach(pendingSet.getCommits()).executeWith(commitContext.getInnerSubmitter()).suppressExceptions(suppressExceptions).run(commit -> {
try
{
commitContext.abortSingleCommit(commit);
} catch (FileNotFoundException e) {
// Commit ID was not known; file may exist.
// delete it if instructed to do so.
if (deleteRemoteFiles) {
fs.delete(commit.destinationPath(), false);
}
}
});
}
} | 3.26 |
hadoop_AbstractS3ACommitter_loadAndRevert_rdh | /**
* Load a pendingset file and revert all of its contents.
* Invoked within a parallel run; the commitContext thread
* pool is already busy/possibly full, so do not
* execute work through the same submitter.
*
* @param commitContext
* context to commit through
* @param activeCommit
* commit state
* @param status
* status of file to load
* @throws IOException
* failure
*/
private void loadAndRevert(final CommitContext commitContext, final ActiveCommit activeCommit, final FileStatus status) throws IOException {
final Path path = status.getPath();
commitContext.switchToIOStatisticsContext();
try (DurationInfo v25 = new DurationInfo(LOG, false, "Committing %s", path)) {
PendingSet pendingSet = PersistentCommitData.load(activeCommit.getSourceFS(), status, commitContext.getPendingSetSerializer());
TaskPool.foreach(pendingSet.getCommits()).suppressExceptions(true).run(commitContext::revertCommit);
}
} | 3.26 |
hadoop_AbstractS3ACommitter_setupTask_rdh | /**
* Task setup. Fails if the the UUID was generated locally, and
* the same committer wasn't used for job setup.
* {@inheritDoc }
*
* @throws PathCommitException
* if the task UUID options are unsatisfied.
*/
@Override
public void setupTask(TaskAttemptContext context) throws IOException {
TaskAttemptID attemptID = context.getTaskAttemptID();
// update the context so that task IO in the same thread has
// the relevant values.
new AuditContextUpdater(context).updateCurrentAuditContext();try (DurationInfo d = new DurationInfo(LOG, "Setup Task %s", attemptID)) {
// reject attempts to set up the task where the output won't be
// picked up
if ((!jobSetup) && (getUUIDSource() == JobUUIDSource.GeneratedLocally)) {
// on anything other than a test run, the context must not have been
// generated locally.
throw new PathCommitException(getOutputPath().toString(), (("Task attempt " + attemptID) + " ") + E_SELF_GENERATED_JOB_UUID);
}
Path taskAttemptPath = getTaskAttemptPath(context);
FileSystem fs = taskAttemptPath.getFileSystem(getConf());
// delete that ta path if somehow it was there
fs.delete(taskAttemptPath, true);
// create an empty directory
fs.mkdirs(taskAttemptPath);
}
} | 3.26 |
hadoop_AbstractS3ACommitter_cleanup_rdh | /**
* Cleanup the job context, including aborting anything pending
* and destroying the thread pool.
*
* @param commitContext
* commit context
* @param suppressExceptions
* should exceptions be suppressed?
* @throws IOException
* any failure if exceptions were not suppressed.
*/
protected void cleanup(CommitContext commitContext, boolean suppressExceptions) throws IOException {
try (DurationInfo d = new DurationInfo(LOG, "Cleanup job %s", jobIdString(commitContext.getJobContext()))) {
abortPendingUploadsInCleanup(suppressExceptions, commitContext);
} finally {
cleanupStagingDirs();
}
} | 3.26 |
hadoop_AbstractS3ACommitter_recoverTask_rdh | /**
* Task recovery considered Unsupported: Warn and fail.
*
* @param taskContext
* Context of the task whose output is being recovered
* @throws IOException
* always.
*/
@Override
public void recoverTask(TaskAttemptContext taskContext) throws IOException {
LOG.warn("Cannot recover task {}", taskContext.getTaskAttemptID());
throw new PathCommitException(outputPath, String.format("Unable to recover task %s", taskContext.getTaskAttemptID()));
} | 3.26 |
hadoop_AbstractS3ACommitter_commitJob_rdh | /**
* Commit work.
* This consists of two stages: precommit and commit.
* <p>
* Precommit: identify pending uploads, then allow subclasses
* to validate the state of the destination and the pending uploads.
* Any failure here triggers an abort of all pending uploads.
* <p>
* Commit internal: do the final commit sequence.
* <p>
* The final commit action is to build the {@code _SUCCESS} file entry.
* </p>
*
* @param context
* job context
* @throws IOException
* any failure
*/
@Override
public void commitJob(JobContext context) throws IOException {String id = jobIdString(context);
// the commit context is created outside a try-with-resources block
// so it can be used in exception handling.
CommitContext commitContext = null;
SuccessData successData = null;
IOException v42 = null;
String stage = "preparing";
try (DurationInfo d = new DurationInfo(LOG, "%s: commitJob(%s)", getRole(), id)) {
commitContext = initiateJobOperation(context);
ActiveCommit pending = listPendingUploadsToCommit(commitContext);
stage = "precommit";
preCommitJob(commitContext, pending);
stage = "commit";
commitJobInternal(commitContext, pending);
stage
= "completed";
jobCompleted(true);
stage = "marker";
successData = maybeCreateSuccessMarkerFromCommits(commitContext, pending);
stage = "cleanup";
cleanup(commitContext, false);
} catch (IOException e) {
// failure. record it for the summary
v42 = e;
LOG.warn("Commit failure for job {}", id, e);
jobCompleted(false);
abortJobInternal(commitContext, true);
throw e;
} finally {
// save the report summary, even on failure
if (commitContext != null) {
if (successData == null) {
// if the commit did not get as far as creating success data, create one.
successData = m0(context, null, null, getDestFS().getConf());
}
// save quietly, so no exceptions are raised
maybeSaveSummary(stage, commitContext, successData, v42, true, true);
// and close that commit context
commitContext.close();
}
}
} | 3.26 |
hadoop_AbstractS3ACommitter_getDestS3AFS_rdh | /**
* Get the destination as an S3A Filesystem; casting it.
*
* @return the dest S3A FS.
* @throws IOException
* if the FS cannot be instantiated.
*/
public S3AFileSystem getDestS3AFS() throws IOException {
return ((S3AFileSystem) (getDestFS()));
} | 3.26 |
hadoop_AbstractS3ACommitter_abortPendingUploadsInCleanup_rdh | /**
* Abort all pending uploads to the destination directory during
* job cleanup operations.
* Note: this instantiates the thread pool if required -so
*
* @param suppressExceptions
* should exceptions be suppressed
* @param commitContext
* commit context
* @throws IOException
* IO problem
*/
protected void abortPendingUploadsInCleanup(boolean suppressExceptions, CommitContext commitContext) throws IOException {
// return early if aborting is disabled.
if (!shouldAbortUploadsInCleanup()) {
LOG.debug("Not cleanup up pending uploads to {} as {} is false ", getOutputPath(), FS_S3A_COMMITTER_ABORT_PENDING_UPLOADS);
return;
}
Path dest = getOutputPath();
try (DurationInfo ignored = new DurationInfo(LOG, "Aborting all pending commits under %s", dest)) {
CommitOperations ops = getCommitOperations();
List<MultipartUpload> pending;
try {
pending = ops.listPendingUploadsUnderPath(dest);
} catch (IOException e) {
// Swallow any errors given this is best effort
LOG.debug("Failed to list pending uploads under {}", dest, e);
return;
}
if (!pending.isEmpty()) {
LOG.warn("{} pending uploads were found -aborting", pending.size());
LOG.warn("If other tasks/jobs are writing to {}," + "this action may cause them to fail", dest);
TaskPool.foreach(pending).executeWith(commitContext.getOuterSubmitter()).suppressExceptions(suppressExceptions).run(u -> commitContext.abortMultipartCommit(u.key(), u.uploadId()));
} else {
LOG.info("No pending uploads were found");
}
}
} | 3.26 |
hadoop_AbstractS3ACommitter_getUUIDSource_rdh | /**
* Source of the UUID.
*
* @return how the job UUID was retrieved/generated.
*/
@VisibleForTesting
public final JobUUIDSource getUUIDSource() {
return uuidSource;
} | 3.26 |
hadoop_AbstractS3ACommitter_getTaskAttemptPath_rdh | /**
* Compute the path where the output of a task attempt is stored until
* that task is committed. This may be the normal Task attempt path
* or it may be a subdirectory.
* The default implementation returns the value of
* {@link #getBaseTaskAttemptPath(TaskAttemptContext)};
* subclasses may return different values.
*
* @param context
* the context of the task attempt.
* @return the path where a task attempt should be stored.
*/
public Path getTaskAttemptPath(TaskAttemptContext context) {
return getBaseTaskAttemptPath(context);
} | 3.26 |
hadoop_AbstractS3ACommitter_loadAndCommit_rdh | /**
* Load a pendingset file and commit all of its contents.
* Invoked within a parallel run; the commitContext thread
* pool is already busy/possibly full, so do not
* execute work through the same submitter.
*
* @param commitContext
* context to commit through
* @param activeCommit
* commit state
* @param status
* file to load
* @throws IOException
* failure
*/
private void loadAndCommit(final CommitContext commitContext, final ActiveCommit activeCommit, final FileStatus
status) throws IOException {
final Path path = status.getPath();
commitContext.switchToIOStatisticsContext();
try (DurationInfo ignored = new DurationInfo(LOG, "Loading and committing files in pendingset %s", path)) {
PendingSet pendingSet = PersistentCommitData.load(activeCommit.getSourceFS(), status, commitContext.getPendingSetSerializer());
String jobId = pendingSet.getJobId();
if ((!StringUtils.isEmpty(jobId)) && (!getUUID().equals(jobId))) {
throw new PathCommitException(path, String.format("Mismatch in Job ID (%s) and commit job ID (%s)", getUUID(), jobId));
}
TaskPool.foreach(pendingSet.getCommits()).stopOnFailure().suppressExceptions(false).executeWith(commitContext.getInnerSubmitter()).onFailure((commit, exception) -> commitContext.abortSingleCommit(commit)).abortWith(commitContext::abortSingleCommit).revertWith(commitContext::revertCommit).run(commit ->
{
commitContext.commitOrFail(commit);
activeCommit.uploadCommitted(commit.getDestinationKey(), commit.getLength());
});
activeCommit.pendingsetCommitted(pendingSet.getIOStatistics());
}
} | 3.26 |
hadoop_AbstractS3ACommitter_maybeCreateSuccessMarker_rdh | /**
* if the job requires a success marker on a successful job,
* create the {@code _SUCCESS} file.
*
* While the classic committers create a 0-byte file, the S3A committers
* PUT up a the contents of a {@link SuccessData} file.
* The file is returned, even if no marker is created.
* This is so it can be saved to a report directory.
*
* @param context
* job context
* @param filenames
* list of filenames.
* @param ioStatistics
* any IO Statistics to include
* @throws IOException
* IO failure
* @return the success data.
*/
protected SuccessData maybeCreateSuccessMarker(final JobContext context, final List<String> filenames, final IOStatisticsSnapshot ioStatistics) throws
IOException {
SuccessData successData = m0(context,
filenames, ioStatistics, getDestFS().getConf());
if (createJobMarker) {
// save it to the job dest dir
commitOperations.createSuccessMarker(getOutputPath(), successData, true);
}
return successData;
} | 3.26 |
hadoop_AbstractS3ACommitter_maybeIgnore_rdh | /**
* Log or rethrow a caught IOException.
*
* @param suppress
* should raised IOEs be suppressed?
* @param action
* action (for logging when the IOE is suppressed.
* @param ex
* exception
* @throws IOException
* if suppress == false
*/
protected void maybeIgnore(boolean suppress, String action, IOException ex) throws IOException {
if (suppress) {
LOG.debug(action, ex);
} else {
throw
ex;
}
} | 3.26 |
hadoop_AbstractS3ACommitter_initiateJobOperation_rdh | /**
* Start the final job commit/abort commit operations.
* If configured to collect statistics,
* The IO StatisticsContext is reset.
*
* @param context
* job context
* @return a commit context through which the operations can be invoked.
* @throws IOException
* failure.
*/
protected CommitContext initiateJobOperation(final JobContext context) throws IOException {
IOStatisticsContext ioStatisticsContext = IOStatisticsContext.getCurrentIOStatisticsContext();CommitContext commitContext = getCommitOperations().createCommitContext(context, getOutputPath(), getJobCommitThreadCount(context), ioStatisticsContext);
commitContext.maybeResetIOStatisticsContext();
return commitContext;
} | 3.26 |
hadoop_AbstractS3ACommitter_preCommitJob_rdh | /**
* Subclass-specific pre-Job-commit actions.
* The staging committers all load the pending files to verify that
* they can be loaded.
* The Magic committer does not, because of the overhead of reading files
* from S3 makes it too expensive.
*
* @param commitContext
* commit context
* @param pending
* the pending operations
* @throws IOException
* any failure
*/
@VisibleForTesting
public void preCommitJob(CommitContext commitContext, ActiveCommit pending) throws IOException {
} | 3.26 |
hadoop_AbstractS3ACommitter_setWorkPath_rdh | /**
* Set the work path for this committer.
*
* @param workPath
* the work path to use.
*/
protected final void
setWorkPath(Path workPath) {
LOG.debug("Setting work path to {}", workPath);
this.workPath = workPath;
} | 3.26 |
hadoop_AbstractS3ACommitter_getTaskCommitThreadCount_rdh | /**
* Get the thread count for this task's commit operations.
*
* @param context
* the JobContext for this commit
* @return a possibly zero thread count.
*/
private int getTaskCommitThreadCount(final JobContext context) {
return context.getConfiguration().getInt(FS_S3A_COMMITTER_THREADS, DEFAULT_COMMITTER_THREADS);
} | 3.26 |
hadoop_AbstractS3ACommitter_getDestinationFS_rdh | /**
* Get the destination filesystem from the output path and the configuration.
*
* @param out
* output path
* @param config
* job/task config
* @return the associated FS
* @throws PathCommitException
* output path isn't to an S3A FS instance.
* @throws IOException
* failure to instantiate the FS.
*/
protected FileSystem getDestinationFS(Path out, Configuration config) throws IOException {
return getS3AFileSystem(out, config, requiresDelayedCommitOutputInFileSystem());
} | 3.26 |
hadoop_AbstractS3ACommitter_uploadCommitted_rdh | /**
* Note that a file was committed.
* Increase the counter of files and total size.
* If there is room in the committedFiles list, the file
* will be added to the list and so end up in the _SUCCESS file.
*
* @param key
* key of the committed object.
* @param size
* size in bytes.
*/
public synchronized void uploadCommitted(String key, long size) {
if (committedObjects.size() < SUCCESS_MARKER_FILE_LIMIT) {
committedObjects.add(key.startsWith("/") ? key : "/" + key);
}
committedObjectCount++;
committedBytes += size;
} | 3.26 |
hadoop_AbstractS3ACommitter_fromStatusIterator_rdh | /**
* Create an active commit of the given pending files.
*
* @param pendingFS
* source filesystem.
* @param statuses
* iterator of file status or subclass to use.
* @return the commit
* @throws IOException
* if the iterator raises one.
*/
public static ActiveCommit fromStatusIterator(final FileSystem pendingFS, final RemoteIterator<? extends FileStatus> statuses) throws IOException {
return new
ActiveCommit(pendingFS, toList(statuses));
} | 3.26 |
hadoop_AbstractS3ACommitter_getRole_rdh | /**
* Used in logging and reporting to help disentangle messages.
*
* @return the committer's role.
*/
protected String getRole() {
return role;
} | 3.26 |
hadoop_AbstractS3ACommitter_warnOnActiveUploads_rdh | /**
* Scan for active uploads and list them along with a warning message.
* Errors are ignored.
*
* @param path
* output path of job.
*/
protected void
warnOnActiveUploads(final Path path) {
List<MultipartUpload> pending;
try {
pending = getCommitOperations().listPendingUploadsUnderPath(path);
} catch (IOException e) {
LOG.debug("Failed to list uploads under {}", path, e);
return;
}
if (!pending.isEmpty()) {
// log a warning
LOG.warn("{} active upload(s) in progress under {}", pending.size(),
path);
LOG.warn("Either jobs are running concurrently" + " or failed jobs are not being cleaned up");
// and the paths + timestamps
DateFormat df = DateFormat.getDateTimeInstance();
pending.forEach(u -> LOG.info("[{}] {}", df.format(Date.from(u.initiated())), u.key()));
if (shouldAbortUploadsInCleanup()) {
LOG.warn("This committer will abort these uploads in job cleanup");
}
}
} | 3.26 |
hadoop_AbstractS3ACommitter_requiresDelayedCommitOutputInFileSystem_rdh | /**
* Flag to indicate whether or not the destination filesystem needs
* to be configured to support magic paths where the output isn't immediately
* visible. If the committer returns true, then committer setup will
* fail if the FS doesn't have the capability.
* Base implementation returns false.
*
* @return what the requirements of the committer are of the filesystem.
*/
protected boolean requiresDelayedCommitOutputInFileSystem() {
return false;
} | 3.26 |
hadoop_AbstractS3ACommitter_setOutputPath_rdh | /**
* Set the output path.
*
* @param outputPath
* new value
*/
protected final void setOutputPath(Path outputPath) {
this.outputPath = requireNonNull(outputPath, "Null output path");
} | 3.26 |
hadoop_AbstractS3ACommitter_setupJob_rdh | /**
* Base job setup (optionally) deletes the success marker and
* always creates the destination directory.
* When objects are committed that dest dir marker will inevitably
* be deleted; creating it now ensures there is something at the end
* while the job is in progress -and if nothing is created, that
* it is still there.
* <p>
* The option {@link InternalCommitterConstants#FS_S3A_COMMITTER_UUID}
* is set to the job UUID; if generated locally
* {@link InternalCommitterConstants#SPARK_WRITE_UUID} is also patched.
* The field {@link #jobSetup} is set to true to note that
* this specific committer instance was used to set up a job.
* </p>
*
* @param context
* context
* @throws IOException
* IO failure
*/
@Override
public void setupJob(JobContext context) throws IOException {
try (DurationInfo d = new DurationInfo(LOG, "Job %s setting up", getUUID())) {
// record that the job has been set up
jobSetup = true;
// patch job conf with the job UUID.
Configuration c = context.getConfiguration();
c.set(FS_S3A_COMMITTER_UUID, getUUID());
c.set(FS_S3A_COMMITTER_UUID_SOURCE, getUUIDSource().getText());
Path dest = getOutputPath();
if (createJobMarker) {
commitOperations.deleteSuccessMarker(dest);
}
getDestFS().mkdirs(dest);
// do a scan for surplus markers
warnOnActiveUploads(dest);
}
} | 3.26 |
hadoop_AbstractS3ACommitter_m0_rdh | /**
* Create the success data structure from a job context.
*
* @param context
* job context.
* @param filenames
* short list of filenames; nullable
* @param ioStatistics
* IOStatistics snapshot
* @param destConf
* config of the dest fs, can be null
* @return the structure
*/
private SuccessData m0(final JobContext context, final List<String> filenames, final IOStatisticsSnapshot ioStatistics,
final Configuration destConf) {
// create a success data structure
SuccessData successData = new SuccessData();
successData.setCommitter(getName());successData.setJobId(f0);
successData.setJobIdSource(uuidSource.getText());
successData.setDescription(getRole());successData.setHostname(NetUtils.getLocalHostname());
Date now = new Date();
successData.setTimestamp(now.getTime());
successData.setDate(now.toString());
if
(filenames != null) {
successData.setFilenames(filenames);
}
successData.getIOStatistics().aggregate(ioStatistics);
// attach some config options as diagnostics to assist
// in debugging performance issues.
// commit thread pool size
successData.addDiagnostic(FS_S3A_COMMITTER_THREADS, Integer.toString(getJobCommitThreadCount(context)));
// and filesystem http connection and thread pool sizes
if (destConf != null) {
successData.addDiagnostic(MAXIMUM_CONNECTIONS, destConf.get(MAXIMUM_CONNECTIONS, Integer.toString(DEFAULT_MAXIMUM_CONNECTIONS)));
successData.addDiagnostic(MAX_TOTAL_TASKS, destConf.get(MAX_TOTAL_TASKS, Integer.toString(DEFAULT_MAX_TOTAL_TASKS)));
}
return successData;
} | 3.26 |
hadoop_AbstractS3ACommitter_getCommitOperations_rdh | /**
* Get the commit actions instance.
* Subclasses may provide a mock version of this.
*
* @return the commit actions instance to use for operations.
*/
protected CommitOperations getCommitOperations() {return commitOperations;
} | 3.26 |
hadoop_AbstractS3ACommitter_commitJobInternal_rdh | /**
* Internal Job commit operation: where the S3 requests are made
* (potentially in parallel).
*
* @param commitContext
* commit context
* @param pending
* pending commits
* @throws IOException
* any failure
*/
protected void commitJobInternal(final CommitContext commitContext, final ActiveCommit pending) throws IOException {
trackDurationOfInvocation(committerStatistics, COMMITTER_COMMIT_JOB.getSymbol(), () -> commitPendingUploads(commitContext, pending));
} | 3.26 |
hadoop_AbstractS3ACommitter_getText_rdh | /**
* Source for messages.
*
* @return text
*/
public String getText() {
return text;
} | 3.26 |
hadoop_AbstractS3ACommitter_m1_rdh | /**
* Build the job UUID.
*
* <p>
* In MapReduce jobs, the application ID is issued by YARN, and
* unique across all jobs.
* </p>
* <p>
* Spark will use a fake app ID based on the current time.
* This can lead to collisions on busy clusters unless
* the specific spark release has SPARK-33402 applied.
* This appends a random long value to the timestamp, so
* is unique enough that the risk of collision is almost
* nonexistent.
* </p>
* <p>
* The order of selection of a uuid is
* </p>
* <ol>
* <li>Value of
* {@link InternalCommitterConstants#FS_S3A_COMMITTER_UUID}.</li>
* <li>Value of
* {@link InternalCommitterConstants#SPARK_WRITE_UUID}.</li>
* <li>If enabled through
* {@link CommitConstants#FS_S3A_COMMITTER_GENERATE_UUID}:
* Self-generated uuid.</li>
* <li>If {@link CommitConstants#FS_S3A_COMMITTER_REQUIRE_UUID}
* is not set: Application ID</li>
* </ol>
* The UUID bonding takes place during construction;
* the staging committers use it to set up their wrapped
* committer to a path in the cluster FS which is unique to the
* job.
* <p>
* In MapReduce jobs, the application ID is issued by YARN, and
* unique across all jobs.
* </p>
* In {@link #setupJob(JobContext)} the job context's configuration
* will be patched
* be valid in all sequences where the job has been set up for the
* configuration passed in.
* <p>
* If the option {@link CommitConstants#FS_S3A_COMMITTER_REQUIRE_UUID}
* is set, then an external UUID MUST be passed in.
* This can be used to verify that the spark engine is reliably setting
* unique IDs for staging.
* </p>
*
* @param conf
* job/task configuration
* @param jobId
* job ID from YARN or spark.
* @return Job UUID and source of it.
* @throws PathCommitException
* no UUID was found and it was required
*/
public static Pair<String, JobUUIDSource> m1(Configuration conf, JobID jobId) throws PathCommitException {
String jobUUID = conf.getTrimmed(FS_S3A_COMMITTER_UUID, "");
if (!jobUUID.isEmpty()) {
return Pair.of(jobUUID, JobUUIDSource.CommitterUUIDProperty);
}
// there is no job UUID.
// look for one from spark
jobUUID = conf.getTrimmed(SPARK_WRITE_UUID, "");
if (!jobUUID.isEmpty()) {
return Pair.of(jobUUID, JobUUIDSource.SparkWriteUUID);
}
// there is no UUID configuration in the job/task config
// Check the job hasn't declared a requirement for the UUID.
// This allows or fail-fast validation of Spark behavior.
if (conf.getBoolean(FS_S3A_COMMITTER_REQUIRE_UUID, DEFAULT_S3A_COMMITTER_REQUIRE_UUID)) {
throw new PathCommitException("", E_NO_SPARK_UUID);
}
// see if the job can generate a random UUI`
if (conf.getBoolean(FS_S3A_COMMITTER_GENERATE_UUID, DEFAULT_S3A_COMMITTER_GENERATE_UUID)) {
// generate a random UUID. This is OK for a job, for a task
// it means that the data may not get picked up.
String newId = UUID.randomUUID().toString();
LOG.warn("No job ID in configuration; generating a random ID: {}", newId);
return Pair.of(newId, JobUUIDSource.GeneratedLocally);
}// if no other option was supplied, return the job ID.
// This is exactly what MR jobs expect, but is not what
// Spark jobs can do as there is a risk of jobID collision.
return Pair.of(jobId.toString(), JobUUIDSource.f1);
} | 3.26 |
hadoop_AbstractS3ACommitter_abortJobInternal_rdh | /**
* The internal job abort operation; can be overridden in tests.
* This must clean up operations; it is called when a commit fails, as
* well as in an {@link #abortJob(JobContext, JobStatus.State)} call.
* The base implementation calls {@link #cleanup(CommitContext, boolean)}
* so cleans up the filesystems and destroys the thread pool.
* Subclasses must always invoke this superclass method after their
* own operations.
* Creates and closes its own commit context.
*
* @param commitContext
* commit context
* @param suppressExceptions
* should exceptions be suppressed?
* @throws IOException
* any IO problem raised when suppressExceptions is false.
*/
protected void abortJobInternal(CommitContext commitContext, boolean suppressExceptions) throws IOException {
cleanup(commitContext, suppressExceptions);
} | 3.26 |
hadoop_AbstractS3ACommitter_empty_rdh | /**
* Get the empty entry.
*
* @return an active commit with no pending files.
*/
public static ActiveCommit empty() {return EMPTY;
} | 3.26 |
hadoop_AbstractS3ACommitter_getJobCommitThreadCount_rdh | /**
* Get the thread count for this job's commit operations.
*
* @param context
* the JobContext for this commit
* @return a possibly zero thread count.
*/
private int getJobCommitThreadCount(final JobContext context) {
return context.getConfiguration().getInt(FS_S3A_COMMITTER_THREADS, DEFAULT_COMMITTER_THREADS);
} | 3.26 |
hadoop_AbstractS3ACommitter_pendingsetCommitted_rdh | /**
* Callback when a pendingset has been committed,
* including any source statistics.
*
* @param sourceStatistics
* any source statistics
*/
public void pendingsetCommitted(final IOStatistics sourceStatistics) {
ioStatistics.aggregate(sourceStatistics);
} | 3.26 |
hadoop_AbstractS3ACommitter_getTaskAttemptFilesystem_rdh | /**
* Get the task attempt path filesystem. This may not be the same as the
* final destination FS, and so may not be an S3A FS.
*
* @param context
* task attempt
* @return the filesystem
* @throws IOException
* failure to instantiate
*/
protected FileSystem getTaskAttemptFilesystem(TaskAttemptContext context) throws IOException { return getTaskAttemptPath(context).getFileSystem(getConf());
} | 3.26 |
hadoop_AbstractS3ACommitter_maybeSaveSummary_rdh | /**
* Save a summary to the report dir if the config option
* is set.
* The report will be updated with the current active stage,
* and if {@code thrown} is non-null, it will be added to the
* diagnostics (and the job tagged as a failure).
* Static for testability.
*
* @param activeStage
* active stage
* @param context
* commit context.
* @param report
* summary file.
* @param thrown
* any exception indicting failure.
* @param quiet
* should exceptions be swallowed.
* @param overwrite
* should the existing file be overwritten
* @return the path of a file, if successfully saved
* @throws IOException
* if a failure occured and quiet==false
*/
private static Path maybeSaveSummary(String activeStage, CommitContext context, SuccessData report, Throwable thrown, boolean quiet, boolean overwrite) throws IOException {
Configuration conf = context.getConf();
String reportDir = conf.getTrimmed(OPT_SUMMARY_REPORT_DIR, "");
if (reportDir.isEmpty()) {
LOG.debug("No summary directory set in " + OPT_SUMMARY_REPORT_DIR);
return null;
}
LOG.debug("Summary directory set to {}", reportDir);
Path reportDirPath = new Path(reportDir);
Path path = new Path(reportDirPath, createJobSummaryFilename(context.getJobId()));
if (thrown != null) {
report.recordJobFailure(thrown);
}
report.putDiagnostic(STAGE, activeStage);
// the store operations here is explicitly created for the FS where
// the reports go, which may not be the target FS of the job.
final FileSystem fs = path.getFileSystem(conf);
try (ManifestStoreOperations operations = new ManifestStoreOperationsThroughFileSystem(fs)) {
if (!overwrite) {
// check for file existence so there is no need to worry about
// precisely what exception is raised when overwrite=false and dest file
// exists
try {
FileStatus v65 = operations.getFileStatus(path);
// get here and the file exists
LOG.debug("Report already exists: {}", v65);
return null;
} catch (FileNotFoundException ignored) {
}
}
report.save(fs, path, SuccessData.serializer());
LOG.info("Job summary saved to {}", path);
return path;
} catch (IOException e) {
LOG.debug("Failed to save summary to {}", path, e);
if (quiet) {
return null;
} else {
throw e;
}
}
} | 3.26 |
hadoop_AbstractS3ACommitter_commitPendingUploads_rdh | /**
* Commit all the pending uploads.
* Each file listed in the ActiveCommit instance is queued for processing
* in a separate thread; its contents are loaded and then (sequentially)
* committed.
* On a failure or abort of a single file's commit, all its uploads are
* aborted.
* The revert operation lists the files already committed and deletes them.
*
* @param commitContext
* commit context
* @param pending
* pending uploads
* @throws IOException
* on any failure
*/
protected void commitPendingUploads(final CommitContext commitContext, final ActiveCommit pending) throws IOException {
if (pending.isEmpty()) {
LOG.warn("{}: No pending uploads to commit", getRole());
}try (DurationInfo ignored = new DurationInfo(LOG, "committing the output of %s task(s)", pending.size())) {
TaskPool.foreach(pending.getSourceFiles()).stopOnFailure().suppressExceptions(false).executeWith(commitContext.getOuterSubmitter()).abortWith(status -> loadAndAbort(commitContext, pending, status, true, false)).revertWith(status -> loadAndRevert(commitContext, pending, status)).run(status -> loadAndCommit(commitContext, pending, status));
}
} | 3.26 |
hadoop_AbstractS3ACommitter_startOperation_rdh | /**
* Start an operation; retrieve an audit span.
*
* All operation names <i>SHOULD</i> come from
* {@code StoreStatisticNames} or
* {@code StreamStatisticNames}.
*
* @param name
* operation name.
* @param path1
* first path of operation
* @param path2
* second path of operation
* @return a span for the audit
* @throws IOException
* failure
*/
protected AuditSpan startOperation(String name, @Nullable
String path1, @Nullable
String path2) throws IOException {
return getAuditSpanSource().createSpan(name, path1, path2);
} | 3.26 |
hadoop_AbstractS3ACommitter_getDestFS_rdh | /**
* Get the destination FS, creating it on demand if needed.
*
* @return the filesystem; requires the output path to be set up
* @throws IOException
* if the FS cannot be instantiated.
*/
public FileSystem getDestFS() throws IOException {
if (destFS == null) {FileSystem fs = getDestinationFS(outputPath, getConf());
setDestFS(fs);
}
return destFS;
} | 3.26 |
hadoop_AbstractS3ACommitter_initOutput_rdh | /**
* Init the output filesystem and path.
* TESTING ONLY; allows mock FS to cheat.
*
* @param out
* output path
* @throws IOException
* failure to create the FS.
*/
@VisibleForTesting
protected void initOutput(Path out) throws IOException {
FileSystem fs = getDestinationFS(out, getConf());
setDestFS(fs);
setOutputPath(fs.makeQualified(out));
} | 3.26 |
hadoop_AbstractS3ACommitter_getWorkPath_rdh | /**
* This is the critical method for {@code FileOutputFormat}; it declares
* the path for work.
*
* @return the working path.
*/
@Override
public final Path getWorkPath() {
return workPath;
} | 3.26 |
hadoop_AbstractS3ACommitter_jobCompleted_rdh | /**
* Job completion outcome; this may be subclassed in tests.
*
* @param success
* did the job succeed.
*/
protected void jobCompleted(boolean success) {
getCommitOperations().jobCompleted(success);
} | 3.26 |
hadoop_HashPartitioner_getPartition_rdh | /**
* Use {@link Object#hashCode()} to partition.
*/
public int getPartition(K2 key, V2
value, int numReduceTasks) {
return (key.hashCode() & Integer.MAX_VALUE) % numReduceTasks;
} | 3.26 |
hadoop_ShellWrapper_getDeviceFileType_rdh | /**
* A shell Wrapper to ease testing.
*/public class ShellWrapper
{public String getDeviceFileType(String devName) throws IOException {
Shell.ShellCommandExecutor shexec = new Shell.ShellCommandExecutor(new String[]{ "stat", "-c", "%F", devName } | 3.26 |
hadoop_WorkloadMapper_configureJob_rdh | /**
* Setup input and output formats and optional reducer.
*/
public void configureJob(Job job) {
job.setInputFormatClass(VirtualInputFormat.class);
job.setNumReduceTasks(0);
job.setOutputKeyClass(NullWritable.class);
job.setOutputValueClass(NullWritable.class);
job.setOutputFormatClass(NullOutputFormat.class);
} | 3.26 |
hadoop_StateStoreSerializer_newRecord_rdh | /**
* Create a new record.
*
* @param clazz
* Class of the new record.
* @param <T>
* Type of the record.
* @return New record.
*/
public static <T> T newRecord(Class<T> clazz) {
return getSerializer(null).newRecordInstance(clazz);
} | 3.26 |
hadoop_StateStoreSerializer_getSerializer_rdh | /**
* Get a serializer based on the provided configuration.
*
* @param conf
* Configuration. Default if null.
* @return Singleton serializer.
*/
public static StateStoreSerializer getSerializer(Configuration conf) {
if (conf == null) {
synchronized(StateStoreSerializer.class) {
if (f0 == null) {
conf = new Configuration();
f0 = newSerializer(conf);
}
}
return f0;
} else {
return newSerializer(conf);
}} | 3.26 |
hadoop_ErasureCodec_getCoderOptions_rdh | /**
* Get a {@link ErasureCoderOptions}.
*
* @return erasure coder options
*/
public ErasureCoderOptions getCoderOptions() {
return coderOptions;
} | 3.26 |
hadoop_Chunk_readLength_rdh | /**
* Reading the length of next chunk.
*
* @throws java.io.IOException
* when no more data is available.
*/
private void readLength() throws IOException {
remain = Utils.readVInt(in);
if (remain >= 0) {
lastChunk = true;
} else {remain = -remain;
}
} | 3.26 |
hadoop_Chunk_writeChunk_rdh | /**
* Write out a chunk.
*
* @param chunk
* The chunk buffer.
* @param offset
* Offset to chunk buffer for the beginning of chunk.
* @param len
* @param last
* Is this the last call to flushBuffer?
*/
private void writeChunk(byte[] chunk, int offset, int len, boolean last) throws IOException {
if (last) {
// always write out the length for the last chunk.
Utils.writeVInt(f0, len);
if (len > 0) {
f0.write(chunk, offset, len);
} } else if (len > 0) {
Utils.writeVInt(f0, -len);
f0.write(chunk, offset, len);
}
} | 3.26 |
hadoop_Chunk_writeBufData_rdh | /**
* Write out a chunk that is a concatenation of the internal buffer plus
* user supplied data. This will never be the last block.
*
* @param data
* User supplied data buffer.
* @param offset
* Offset to user data buffer.
* @param len
* User data buffer size.
*/
private void writeBufData(byte[] data, int offset,
int len) throws IOException {
if ((count + len) > 0) {
Utils.writeVInt(f0, -(count + len));
f0.write(buf, 0, count);
count = 0;
f0.write(data, offset, len);
}
} | 3.26 |
hadoop_Chunk_getRemain_rdh | /**
* How many bytes remain in the current chunk?
*
* @return remaining bytes left in the current chunk.
* @throws java.io.IOException
*/
public int getRemain() throws IOException {
checkEOF();
return remain;
} | 3.26 |
hadoop_Chunk_isLastChunk_rdh | /**
* Have we reached the last chunk.
*
* @return true if we have reached the last chunk.
* @throws java.io.IOException
*/
public boolean isLastChunk() throws IOException {
checkEOF();
return lastChunk;
} | 3.26 |
hadoop_Chunk_flushBuffer_rdh | /**
* Flush the internal buffer.
*
* Is this the last call to flushBuffer?
*
* @throws java.io.IOException
*/ private void flushBuffer() throws IOException {
if (count > 0) {
writeChunk(buf, 0, count, false);
count = 0;
}
} | 3.26 |
hadoop_RpcAcceptedReply_fromValue_rdh | /* e.g. memory allocation failure */
public static AcceptState fromValue(int value) {
return
values()[value];
} | 3.26 |
hadoop_HttpFSReleaseFilter_getFileSystemAccess_rdh | /**
* Returns the {@link FileSystemAccess} service to return the FileSystemAccess filesystem
* instance to.
*
* @return the FileSystemAccess service.
*/
@Override
protected FileSystemAccess getFileSystemAccess() {
return HttpFSServerWebApp.get().get(FileSystemAccess.class);
} | 3.26 |
hadoop_SampleQuantiles_insert_rdh | /**
* Add a new value from the stream.
*
* @param v
* v.
*/public synchronized void insert(long v) {
buffer[bufferCount] = v;
bufferCount++;
count++;
if (bufferCount == buffer.length) {
insertBatch();
compress();
}
} | 3.26 |
hadoop_SampleQuantiles_compress_rdh | /**
* Try to remove extraneous items from the set of sampled items. This checks
* if an item is unnecessary based on the desired error bounds, and merges it
* with the adjacent item if it is.
*/
private void compress() {
if (samples.size() < 2) {
return;
}
ListIterator<SampleItem> it = samples.listIterator();
SampleItem prev = null;
SampleItem next = it.next();
while (it.hasNext()) {
prev = next;
next = it.next();
if (((prev.g + next.g) + next.f1) <= allowableError(it.previousIndex())) {
next.g += prev.g;
// Remove prev. it.remove() kills the last thing returned.
it.previous();
it.previous();
it.remove();
// it.next() is now equal to next, skip it back forward again
it.next();
}
}
} | 3.26 |
hadoop_SampleQuantiles_allowableError_rdh | /**
* Specifies the allowable error for this rank, depending on which quantiles
* are being targeted.
*
* This is the f(r_i, n) function from the CKMS paper. It's basically how wide
* the range of this rank can be.
*
* @param rank
* the index in the list of samples
*/
private double allowableError(int rank) {
int size = samples.size();
double minError = size + 1;
for (Quantile q : quantiles) {
double error;
if (rank <= (q.quantile
* size)) {
error = ((2.0 * q.error) * (size - rank)) / (1.0 - q.quantile);
} else {
error = ((2.0 * q.error) * rank) / q.quantile;
}
if (error < minError) {
minError = error;
}
}
return minError;
} | 3.26 |
hadoop_SampleQuantiles_query_rdh | /**
* Get the estimated value at the specified quantile.
*
* @param quantile
* Queried quantile, e.g. 0.50 or 0.99.
* @return Estimated value at that quantile.
*/
private long query(double quantile) {
Preconditions.checkState(!samples.isEmpty(), "no data in estimator");
int rankMin = 0;
int desired = ((int) (quantile * count));
ListIterator<SampleItem> it = samples.listIterator();
SampleItem prev = null;
SampleItem cur = it.next();
for (int i = 1; i <
samples.size(); i++) {
prev = cur;cur = it.next();
rankMin += prev.g;
if (((rankMin + cur.g) + cur.f1) > (desired + (allowableError(i) / 2))) {
return prev.f0;
}
}
// edge case of wanting max value
return samples.get(samples.size() - 1).f0;
} | 3.26 |
hadoop_SampleQuantiles_clear_rdh | /**
* Resets the estimator, clearing out all previously inserted items
*/
public synchronized void clear() {
count = 0;
bufferCount = 0;
samples.clear();
} | 3.26 |
hadoop_SampleQuantiles_getSampleCount_rdh | /**
* Returns the number of samples kept by the estimator
*
* @return count current number of samples
*/
@VisibleForTesting
public synchronized int getSampleCount() {
return samples.size();
} | 3.26 |
hadoop_SampleQuantiles_getCount_rdh | /**
* Returns the number of items that the estimator has processed
*
* @return count total number of items processed
*/
public synchronized long getCount()
{
return count;
} | 3.26 |
hadoop_SampleQuantiles_insertBatch_rdh | /**
* Merges items from buffer into the samples array in one pass.
* This is more efficient than doing an insert on every item.
*/
private void insertBatch() {
if (bufferCount == 0) {
return;
}
Arrays.sort(buffer, 0, bufferCount);
// Base case: no samples
int start = 0;
if (samples.size() == 0) {
SampleItem newItem = new SampleItem(buffer[0], 1, 0);
samples.add(newItem);
start++;
}
ListIterator<SampleItem> it = samples.listIterator();
SampleItem item = it.next();
for (int i = start; i < bufferCount; i++) {
long v = buffer[i];
while ((it.nextIndex() < samples.size()) && (item.f0 < v)) {
item = it.next();
}
// If we found that bigger item, back up so we insert ourselves before it
if (item.f0 > v) {
it.previous();
}
// We use different indexes for the edge comparisons, because of the above
// if statement that adjusts the iterator
int delta;
if ((it.previousIndex() == 0) || (it.nextIndex() == samples.size())) {
delta = 0;
} else {
delta
= ((int) (Math.floor(allowableError(it.nextIndex())))) - 1;
}
SampleItem newItem = new SampleItem(v, 1, delta);
it.add(newItem);
item = newItem;
}
bufferCount = 0;} | 3.26 |
hadoop_KeyFieldBasedComparator_configure_rdh | /**
* This comparator implementation provides a subset of the features provided
* by the Unix/GNU Sort. In particular, the supported features are:
* -n, (Sort numerically)
* -r, (Reverse the result of comparison)
* -k pos1[,pos2], where pos is of the form f[.c][opts], where f is the number
* of the field to use, and c is the number of the first character from the
* beginning of the field. Fields and character posns are numbered starting
* with 1; a character position of zero in pos2 indicates the field's last
* character. If '.c' is omitted from pos1, it defaults to 1 (the beginning
* of the field); if omitted from pos2, it defaults to 0 (the end of the
* field). opts are ordering options (any of 'nr' as described above).
* We assume that the fields in the key are separated by
* {@link JobContext#MAP_OUTPUT_KEY_FIELD_SEPARATOR}
*/
@InterfaceAudience.Public
@InterfaceStability.Stablepublic class KeyFieldBasedComparator<K, V> extends KeyFieldBasedComparator<K, V> implements JobConfigurable {
public void configure(JobConf job) {
super.setConf(job);
} | 3.26 |
hadoop_MapHost_markAvailable_rdh | /**
* Called when the node is done with its penalty or done copying.
*
* @return the host's new state
*/
public synchronized State markAvailable()
{
if (maps.isEmpty()) {
state = State.IDLE;
} else {
state = State.PENDING;
}
return state;
} | 3.26 |
hadoop_MapHost_penalize_rdh | /**
* Mark the host as penalized
*/
public synchronized void penalize() {state = State.PENALIZED;
} | 3.26 |
hadoop_DelegationBindingInfo_withCredentialProviders_rdh | /**
* Set builder value.
*
* @param value
* non null value
* @return the builder
*/
public DelegationBindingInfo withCredentialProviders(final AWSCredentialProviderList value) {
credentialProviders = requireNonNull(value);
return this;
} | 3.26 |
hadoop_DelegationBindingInfo_getCredentialProviders_rdh | /**
* Get list of credential providers.
*
* @return list of credential providers
*/
public AWSCredentialProviderList getCredentialProviders() {
return credentialProviders;
} | 3.26 |
hadoop_RMWebAppUtil_createAppSubmissionContext_rdh | /**
* Create the actual ApplicationSubmissionContext to be submitted to the RM
* from the information provided by the user.
*
* @param newApp
* the information provided by the user
* @param conf
* RM configuration
* @return returns the constructed ApplicationSubmissionContext
* @throws IOException
* in case of Error
*/
public static ApplicationSubmissionContext createAppSubmissionContext(ApplicationSubmissionContextInfo newApp, Configuration conf) throws IOException {
// create local resources and app submission context
ApplicationId appid;String error = "Could not parse application id " + newApp.getApplicationId();
try {
appid
= ApplicationId.fromString(newApp.getApplicationId());
} catch (Exception e) {
throw new BadRequestException(error);
}
ApplicationSubmissionContext appContext = ApplicationSubmissionContext.newInstance(appid, newApp.getApplicationName(), newApp.getQueue(), Priority.newInstance(newApp.getPriority()), m0(newApp), newApp.getUnmanagedAM(), newApp.getCancelTokensWhenComplete(), newApp.getMaxAppAttempts(), createAppSubmissionContextResource(newApp, conf),
newApp.getApplicationType(), newApp.getKeepContainersAcrossApplicationAttempts(), newApp.getAppNodeLabelExpression(), newApp.getAMContainerNodeLabelExpression());
appContext.setApplicationTags(newApp.getApplicationTags());
appContext.setAttemptFailuresValidityInterval(newApp.getAttemptFailuresValidityInterval());
if (newApp.getLogAggregationContextInfo() != null) {
appContext.setLogAggregationContext(createLogAggregationContext(newApp.getLogAggregationContextInfo()));}
String reservationIdStr = newApp.getReservationId();
if ((reservationIdStr != null) && (!reservationIdStr.isEmpty())) {
ReservationId v17 = ReservationId.parseReservationId(reservationIdStr);
appContext.setReservationID(v17);
}
return appContext;
} | 3.26 |
hadoop_RMWebAppUtil_m0_rdh | /**
* Create the ContainerLaunchContext required for the
* ApplicationSubmissionContext. This function takes the user information and
* generates the ByteBuffer structures required by the ContainerLaunchContext
*
* @param newApp
* the information provided by the user
* @return created context
* @throws BadRequestException
* @throws IOException
*/
private static ContainerLaunchContext m0(ApplicationSubmissionContextInfo newApp) throws BadRequestException, IOException {
// create container launch context
HashMap<String, ByteBuffer> hmap = new
HashMap<String, ByteBuffer>();
for (Map.Entry<String, String> entry : newApp.getContainerLaunchContextInfo().getAuxillaryServiceData().entrySet()) {
if (!entry.getValue().isEmpty()) {
Base64 decoder = new Base64(0, null, true);
byte[] data = decoder.decode(entry.getValue());
hmap.put(entry.getKey(), ByteBuffer.wrap(data));
}
}
HashMap<String, LocalResource> hlr = new HashMap<String, LocalResource>();
for (Map.Entry<String, LocalResourceInfo> entry : newApp.getContainerLaunchContextInfo().getResources().entrySet()) {
LocalResourceInfo l = entry.getValue();
LocalResource lr = LocalResource.newInstance(URL.fromURI(l.getUrl()), l.getType(), l.getVisibility(),
l.getSize(), l.getTimestamp());
hlr.put(entry.getKey(), lr);
}
DataOutputBuffer v29 = new DataOutputBuffer();
Credentials cs = createCredentials(newApp.getContainerLaunchContextInfo().getCredentials());
cs.writeTokenStorageToStream(v29);
ByteBuffer tokens = ByteBuffer.wrap(v29.getData());
ContainerLaunchContext ctx = ContainerLaunchContext.newInstance(hlr, newApp.getContainerLaunchContextInfo().getEnvironment(), newApp.getContainerLaunchContextInfo().getCommands(), hmap, tokens, newApp.getContainerLaunchContextInfo().getAcls());
return ctx;
} | 3.26 |
hadoop_RMWebAppUtil_getCallerUserGroupInformation_rdh | /**
* Helper method to retrieve the UserGroupInformation from the
* HttpServletRequest.
*
* @param hsr
* the servlet request
* @param usePrincipal
* true if we need to use the principal user, remote
* otherwise.
* @return the user group information of the caller.
*/
public static UserGroupInformation getCallerUserGroupInformation(HttpServletRequest hsr, boolean usePrincipal) {
String remoteUser = hsr.getRemoteUser();
if (usePrincipal) {
Principal v42
= hsr.getUserPrincipal();
remoteUser = (v42 == null) ? null : v42.getName();
}
UserGroupInformation callerUGI = null;
if
(remoteUser != null) {
callerUGI =
UserGroupInformation.createRemoteUser(remoteUser);
}
return callerUGI;
} | 3.26 |
hadoop_RMWebAppUtil_createCredentials_rdh | /**
* Generate a Credentials object from the information in the CredentialsInfo
* object.
*
* @param credentials
* the CredentialsInfo provided by the user.
* @return */private static Credentials createCredentials(CredentialsInfo credentials) {Credentials ret = new Credentials();
try {
for (Map.Entry<String, String> entry : credentials.getTokens().entrySet())
{
Text alias = new Text(entry.getKey());
Token<TokenIdentifier> token = new Token<TokenIdentifier>();
token.decodeFromUrlString(entry.getValue());ret.addToken(alias, token);
}
for (Map.Entry<String, String> entry : credentials.getSecrets().entrySet()) {
Text alias = new Text(entry.getKey());
Base64 decoder = new Base64(0, null, true);
byte[] secret = decoder.decode(entry.getValue());
ret.addSecretKey(alias, secret);
}
} catch (IOException ie) {
throw new BadRequestException("Could not parse credentials data; exception message = " + ie.getMessage());
}return ret;
} | 3.26 |
hadoop_RMWebAppUtil_setupSecurityAndFilters_rdh | /**
* Helper method to setup filters and authentication for ResourceManager
* WebServices.
*
* Use the customized yarn filter instead of the standard kerberos filter to
* allow users to authenticate using delegation tokens 4 conditions need to be
* satisfied:
*
* 1. security is enabled.
*
* 2. http auth type is set to kerberos.
*
* 3. "yarn.resourcemanager.webapp.use-yarn-filter" override is set to true.
*
* 4. hadoop.http.filter.initializers container
* AuthenticationFilterInitializer.
*
* @param conf
* RM configuration.
* @param rmDTSecretManager
* RM specific delegation token secret manager.
*/
public static void setupSecurityAndFilters(Configuration conf, RMDelegationTokenSecretManager rmDTSecretManager) {
boolean enableCorsFilter = conf.getBoolean(YarnConfiguration.RM_WEBAPP_ENABLE_CORS_FILTER, YarnConfiguration.DEFAULT_RM_WEBAPP_ENABLE_CORS_FILTER);
boolean useYarnAuthenticationFilter = conf.getBoolean(YarnConfiguration.RM_WEBAPP_DELEGATION_TOKEN_AUTH_FILTER, YarnConfiguration.DEFAULT_RM_WEBAPP_DELEGATION_TOKEN_AUTH_FILTER);
String authPrefix = "hadoop.http.authentication.";
String authTypeKey = authPrefix + "type";
String filterInitializerConfKey = "hadoop.http.filter.initializers";
String actualInitializers = "";
Class<?>[] initializersClasses = conf.getClasses(filterInitializerConfKey);
// setup CORS
if (enableCorsFilter) {
conf.setBoolean(HttpCrossOriginFilterInitializer.PREFIX + HttpCrossOriginFilterInitializer.ENABLED_SUFFIX, true);
}
boolean hasHadoopAuthFilterInitializer = false;
boolean hasRMAuthFilterInitializer =
false;
if (initializersClasses != null) {
for (Class<?> initializer : initializersClasses) {
if (initializer.getName().equals(AuthenticationFilterInitializer.class.getName())) {
hasHadoopAuthFilterInitializer = true;
}
if (initializer.getName().equals(RMAuthenticationFilterInitializer.class.getName())) {
hasRMAuthFilterInitializer = true;
}
}
if
(((UserGroupInformation.isSecurityEnabled() && useYarnAuthenticationFilter) && hasHadoopAuthFilterInitializer) && conf.get(authTypeKey, "").equals(KerberosAuthenticationHandler.TYPE)) {
ArrayList<String> target = new ArrayList<String>();
for (Class<?> filterInitializer : initializersClasses) {
if (filterInitializer.getName().equals(AuthenticationFilterInitializer.class.getName())) {
if (!hasRMAuthFilterInitializer) {
target.add(RMAuthenticationFilterInitializer.class.getName());
}
continue;}
target.add(filterInitializer.getName());
}
target.remove(ProxyUserAuthenticationFilterInitializer.class.getName());
actualInitializers = StringUtils.join(",", target);
LOG.info("Using RM authentication filter(kerberos/delegation-token)" + " for RM webapp authentication");
RMAuthenticationFilter.setDelegationTokenSecretManager(rmDTSecretManager);
conf.set(filterInitializerConfKey, actualInitializers);
}
}
// if security is not enabled and the default filter initializer has not
// been set, set the initializer to include the
// RMAuthenticationFilterInitializer which in turn will set up the simple
// auth filter.
String initializers = conf.get(filterInitializerConfKey);
if (!UserGroupInformation.isSecurityEnabled())
{
if ((initializersClasses == null) || (initializersClasses.length == 0)) {
conf.set(filterInitializerConfKey, RMAuthenticationFilterInitializer.class.getName());
conf.set(authTypeKey, "simple");
} else if (initializers.equals(StaticUserWebFilter.class.getName())) {
conf.set(filterInitializerConfKey, (RMAuthenticationFilterInitializer.class.getName() + ",") + initializers);
conf.set(authTypeKey, "simple");}
}
} | 3.26 |
hadoop_CDFPiecewiseLinearRandomGenerator_valueAt_rdh | /**
* TODO This code assumes that the empirical minimum resp. maximum is the
* epistomological minimum resp. maximum. This is probably okay for the
* minimum, because that likely represents a task where everything went well,
* but for the maximum we may want to develop a way of extrapolating past the
* maximum.
*/
@Override
public long valueAt(double probability) {
int v0 = floorIndex(probability);
double segmentProbMin = getRankingAt(v0);
double segmentProbMax = getRankingAt(v0 + 1);
long segmentMinValue = getDatumAt(v0);
long segmentMaxValue = getDatumAt(v0 + 1);
// If this is zero, this object is based on an ill-formed cdf
double v5 = segmentProbMax - segmentProbMin;
long segmentDatumRange = segmentMaxValue - segmentMinValue;
long result = ((long) (((probability - segmentProbMin) / v5) * segmentDatumRange)) + segmentMinValue;
return result;} | 3.26 |
hadoop_BlobOperationDescriptor_getContentLengthIfKnown_rdh | /**
* Gets the content length for the Azure Storage operation, or returns zero if
* unknown.
*
* @param conn
* the connection object for the Azure Storage operation.
* @param operationType
* the Azure Storage operation type.
* @return the content length, or zero if unknown.
*/
static long getContentLengthIfKnown(HttpURLConnection conn, OperationType operationType) {long contentLength = 0;
switch (operationType) {
case AppendBlock :
case PutBlock :
String lengthString = conn.getRequestProperty(HeaderConstants.CONTENT_LENGTH);
contentLength = (lengthString != null) ? Long.parseLong(lengthString) : 0;
break;
case PutPage :
case GetBlob :
contentLength = BlobOperationDescriptor.getContentLengthIfKnown(conn.getRequestProperty("x-ms-range"));
break;
default :
break;
}
return contentLength;
} | 3.26 |
hadoop_BlobOperationDescriptor_getOperationType_rdh | /**
* Gets the operation type of an Azure Storage operation.
*
* @param conn
* the connection object for the Azure Storage operation.
* @return the operation type.
*/static OperationType getOperationType(HttpURLConnection conn)
{
OperationType operationType = OperationType.Unknown;
String method = conn.getRequestMethod();
String compValue
=
getQueryParameter(conn.getURL(), "comp");
if (method.equalsIgnoreCase("PUT")) {
if (compValue != null) {
switch (compValue) {
case "metadata" :
operationType = OperationType.SetMetadata;
break;
case "properties" :
operationType = OperationType.SetProperties;break;
case "block" :
operationType = OperationType.PutBlock;
break;
case "page" :
String pageWrite = conn.getRequestProperty("x-ms-page-write");
if
((pageWrite !=
null) && pageWrite.equalsIgnoreCase("UPDATE")) {
operationType =
OperationType.PutPage;
}
break;
case "appendblock" :
operationType = OperationType.AppendBlock;
break;
case "blocklist" :
operationType = OperationType.PutBlockList;
break;
default :
break;
}
} else {
String blobType = conn.getRequestProperty("x-ms-blob-type");
if ((blobType != null) && ((blobType.equalsIgnoreCase("PageBlob") || blobType.equalsIgnoreCase("BlockBlob")) || blobType.equalsIgnoreCase("AppendBlob"))) {
operationType = OperationType.CreateBlob;
} else if (blobType
== null) {
String resType = getQueryParameter(conn.getURL(), "restype");
if
((resType != null) && resType.equalsIgnoreCase("container")) {
operationType = operationType.CreateContainer;
}
}
}
} else if (method.equalsIgnoreCase("GET")) {
if (compValue != null) {
switch (compValue) {
case "list" :
operationType
= OperationType.ListBlobs;
break;
case "metadata" :
operationType = OperationType.GetMetadata;
break;
case "blocklist" :
operationType = OperationType.GetBlockList;
break;
case "pagelist" :
operationType = OperationType.GetPageList;
break;
default :
break;
}
} else if (conn.getRequestProperty("x-ms-range") != null) {
operationType = OperationType.GetBlob;
}
} else if (method.equalsIgnoreCase("HEAD")) {
operationType = OperationType.GetProperties;
} else if (method.equalsIgnoreCase("DELETE")) {
String resType = getQueryParameter(conn.getURL(), "restype");
if ((resType != null) && resType.equalsIgnoreCase("container")) {
operationType = operationType.DeleteContainer;
} else {
operationType = OperationType.DeleteBlob;
}
}
return operationType;
} | 3.26 |
hadoop_CredentialProviderListFactory_createAWSV2CredentialProvider_rdh | /**
* Create an AWS v2 credential provider from its class by using reflection.
*
* @param conf
* configuration
* @param className
* credential class name
* @param uri
* URI of the FS
* @param key
* configuration key to use
* @return the instantiated class
* @throws IOException
* on any instantiation failure.
* @see S3AUtils#getInstanceFromReflection
*/private static AwsCredentialsProvider createAWSV2CredentialProvider(Configuration conf,
String className, @Nullable
URI uri, final String key) throws IOException {
LOG.debug("Credential provider class is {}", className);
return S3AUtils.getInstanceFromReflection(className, conf, uri, AwsCredentialsProvider.class, "create", key);
} | 3.26 |
hadoop_CredentialProviderListFactory_initCredentialProvidersMap_rdh | /**
* Maps V1 credential providers to either their equivalent SDK V2 class or hadoop provider.
*/
private static Map<String, String> initCredentialProvidersMap() {
Map<String, String> v1v2CredentialProviderMap = new HashMap<>();
v1v2CredentialProviderMap.put(ANONYMOUS_CREDENTIALS_V1, AnonymousAWSCredentialsProvider.NAME);
v1v2CredentialProviderMap.put(EC2_CONTAINER_CREDENTIALS_V1, EC2_IAM_CREDENTIALS_V2);
v1v2CredentialProviderMap.put(EC2_IAM_CREDENTIALS_V1, EC2_IAM_CREDENTIALS_V2);
v1v2CredentialProviderMap.put(ENVIRONMENT_CREDENTIALS_V1, ENVIRONMENT_CREDENTIALS_V2);
v1v2CredentialProviderMap.put(PROFILE_CREDENTIALS_V1, PROFILE_CREDENTIALS_V2); return v1v2CredentialProviderMap;
} | 3.26 |
hadoop_CredentialProviderListFactory_buildAWSProviderList_rdh | /**
* Load list of AWS credential provider/credential provider factory classes;
* support a forbidden list to prevent loops, mandate full secrets, etc.
*
* @param binding
* Binding URI -may be null
* @param conf
* configuration
* @param key
* configuration key to use
* @param forbidden
* a possibly empty set of forbidden classes.
* @param defaultValues
* list of default providers.
* @return the list of classes, possibly empty
* @throws IOException
* on a failure to load the list.
*/
public static AWSCredentialProviderList buildAWSProviderList(@Nullablefinal URI binding, final Configuration conf, final String key, final List<Class<?>> defaultValues, final Set<Class<?>> forbidden) throws IOException {
// build up the base provider
Collection<String> awsClasses = loadAWSProviderClasses(conf, key, defaultValues.toArray(new Class[defaultValues.size()]));
Map<String, String> v1v2CredentialProviderMap = f0;
final Set<String> forbiddenClassnames = forbidden.stream().map(c -> c.getName()).collect(Collectors.toSet());
// iterate through, checking for forbidden values and then instantiating
// each provider
AWSCredentialProviderList providers = new AWSCredentialProviderList();
for (String className : awsClasses) {
if (v1v2CredentialProviderMap.containsKey(className)) {
// mapping
final String mapped = v1v2CredentialProviderMap.get(className);
LOG_REMAPPED_ENTRY.warn("Credentials option {} contains AWS v1 SDK entry {}; mapping to {}", key, className, mapped);
className = mapped;
}
// now scan the forbidden list. doing this after any mappings ensures the v1 names
// are also blocked
if (forbiddenClassnames.contains(className)) {
throw new InstantiationIOException(Kind.Forbidden, binding, className, key, E_FORBIDDEN_AWS_PROVIDER, null);
}
AwsCredentialsProvider provider;
try {
provider = createAWSV2CredentialProvider(conf, className, binding, key);
} catch (InstantiationIOException e) {
// failed to create a v2; try to see if it is a v1
if (e.getKind() == Kind.IsNotImplementation) {
if (isAwsV1SdkAvailable()) {
// try to create v1
LOG.debug("Failed to create {} as v2 credentials, trying to instantiate as v1", className);
try {
provider = AwsV1BindingSupport.createAWSV1CredentialProvider(conf, className, binding, key);
LOG_REMAPPED_ENTRY.warn("Credentials option {} contains AWS v1 SDK entry {}", key, className);} catch (InstantiationIOException ex) {
// if it is something other than non-implementation, throw.
// that way, non-impl messages are about v2 not v1 in the error
if (ex.getKind() != Kind.IsNotImplementation) {
throw ex;
} else {
throw e;
}
}
} else {
LOG.warn(("Failed to instantiate {} as AWS v2 SDK credential provider;" + " AWS V1 SDK is not on the classpth so unable to attempt to") + " instantiate as a v1 provider", className, e);
throw e;
}
} else {
// any other problem
throw e;
}
LOG.debug("From provider class {} created Aws provider {}", className, provider);
}
providers.add(provider);
}
return providers;
} | 3.26 |
hadoop_FederationStateStoreHeartbeat_updateClusterState_rdh | /**
* Get the current cluster state as a JSON string representation of the
* {@link ClusterMetricsInfo}.
*/
private void updateClusterState() {
try {
// get the current state
currentClusterState.getBuffer().setLength(0);
ClusterMetricsInfo clusterMetricsInfo = new ClusterMetricsInfo(rs);
marshaller.marshallToJSON(clusterMetricsInfo, currentClusterState);
capability = currentClusterState.toString();
} catch (Exception e) {
LOG.warn("Exception while trying to generate cluster state," + " so reverting to last know state.", e);
}
} | 3.26 |
hadoop_TimelineMetricOperation_m0_rdh | /**
* Replace the base metric with the incoming value. Stateless operation.
*
* @param incoming
* Metric a
* @param base
* Metric b
* @param state
* Operation state (not used)
* @return Metric a
*/
@Override
public TimelineMetric m0(TimelineMetric incoming, TimelineMetric base, Map<Object, Object> state) {
return incoming;
} | 3.26 |
hadoop_TimelineMetricOperation_exec_rdh | /**
* Return the average value of the incoming metric and the base metric,
* with a given state. Not supported yet.
*
* @param incoming
* Metric a
* @param base
* Metric b
* @param state
* Operation state
* @return Not finished yet
*/
@Override
public TimelineMetric exec(TimelineMetric incoming, TimelineMetric base, Map<Object, Object> state) {
// Not supported yet
throw new UnsupportedOperationException("Unsupported aggregation operation: AVERAGE");
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.