name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hadoop_BCFile_close_rdh | /**
* Finishing reading the BCFile. Release all resources.
*/
@Override
public void close() {
// nothing to be done now
} | 3.26 |
hadoop_BCFile_getOutputStream_rdh | /**
* Get the output stream for BlockAppender's consumption.
*
* @return the output stream suitable for writing block data.
*/OutputStream getOutputStream() {
return out;
} | 3.26 |
hadoop_BCFile_getCompressionName_rdh | /**
* Get the name of the compression algorithm used to compress the block.
*
* @return name of the compression algorithm.
*/
public String getCompressionName() {
return rBlkState.getCompressionName();
} | 3.26 |
hadoop_BCFile_getDefaultCompressionName_rdh | /**
* Get the name of the default compression algorithm.
*
* @return the name of the default compression algorithm.
*/
public String getDefaultCompressionName() {
return dataIndex.getDefaultCompressionAlgorithm().getName();
} | 3.26 |
hadoop_BCFile_getCompressedSize_rdh | /**
* Get the compressed size of the block.
*
* @return compressed size of the block.
*/
public long getCompressedSize() {
return rBlkState.getBlockRegion().getCompressedSize();
} | 3.26 |
hadoop_BCFile_getBCFileVersion_rdh | /**
* Get version of BCFile file being read.
*
* @return version of BCFile file being read.
*/
public Version getBCFileVersion() {
return version;
} | 3.26 |
hadoop_BalanceJournalInfoHDFS_saveJob_rdh | /**
* Save job journal to HDFS.
*
* All the journals are saved in the path base-dir. Each job has an individual
* directory named after the job id.
* When a job is saved, a new journal file is created. The file's name
* consists of a prefix 'JOB-' and an incremental sequential id. The file with
* the largest id is the latest journal of this job.
*
* Layout:
* base-dir/
* /job-3f1da5e5-2a60-48de-8736-418d134edbe9/
* /JOB-0
* /JOB-3
* /JOB-5
* /job-ebc19478-2324-46c2-8d1a-2f8c4391dc09/
* /JOB-1
* /JOB-2
* /JOB-4
*/public void saveJob(BalanceJob job) throws IOException {
Path jobFile = getNewStateJobPath(job);
Path tmpJobFile = new Path(jobFile + TMP_TAIL);
FSDataOutputStream out = null;
try {
FileSystem fs = FileSystem.get(f0, conf);
out = fs.create(tmpJobFile);
job.write(new DataOutputStream(out));
out.close();
out = null;
fs.rename(tmpJobFile, jobFile);
} finally {
IOUtils.closeStream(out);
}
LOG.debug("Save journal of job={}", job);
} | 3.26 |
hadoop_BalanceJournalInfoHDFS_recoverJob_rdh | /**
* Recover job from journal on HDFS.
*/
public void recoverJob(BalanceJob job) throws IOException
{
FSDataInputStream in = null;
try {
Path logPath = getLatestStateJobPath(job);
FileSystem fs = FileSystem.get(f0, conf);
in = fs.open(logPath);
job.readFields(in);
LOG.debug("Recover job={} from journal.", job);
}
finally {
if (in != null) {
in.close();
}
}
} | 3.26 |
hadoop_RMFatalEvent_getExplanation_rdh | /**
* Get a text description of the reason for the event. If a cause was, that
* {@link Exception} will be converted to a {@link String} and included in
* the result.
*
* @return a text description of the reason for the event
*/
public String getExplanation() {
StringBuilder sb = new StringBuilder();if (message != null) {
sb.append(message);
if (cause != null) {
sb.append(": ");
}
}
if (cause != null) {
sb.append(StringUtils.stringifyException(cause));
}
return sb.toString();
} | 3.26 |
hadoop_ManifestCommitter_maybeSaveSummary_rdh | /**
* Save a summary to the report dir if the config option
* is set.
* The IOStatistics of the summary will be updated to the latest
* snapshot of the committer's statistics, so the report is up
* to date.
* The report will updated with the current active stage,
* and if {@code thrown} is non-null, it will be added to the
* diagnostics (and the job tagged as a failure).
* Static for testability.
*
* @param activeStage
* active stage
* @param config
* configuration to use.
* @param report
* summary file.
* @param thrown
* any exception indicting failure.
* @param quiet
* should exceptions be swallowed.
* @param overwrite
* should the existing file be overwritten
* @return the path of a file, if successfully saved
* @throws IOException
* if a failure occured and quiet==false
*/
private static Path maybeSaveSummary(String activeStage, ManifestCommitterConfig config, ManifestSuccessData report,
Throwable thrown, boolean quiet, boolean overwrite) throws IOException {
Configuration conf = config.getConf();
String reportDir = conf.getTrimmed(OPT_SUMMARY_REPORT_DIR, "");
if (reportDir.isEmpty()) {
LOG.debug("No summary directory set in " + OPT_SUMMARY_REPORT_DIR);
return null;
}
LOG.debug("Summary directory set in to {}" + OPT_SUMMARY_REPORT_DIR, reportDir);
// update to the latest statistics
report.snapshotIOStatistics(config.getIOStatistics());
Path reportDirPath = new Path(reportDir);
Path path
= new Path(reportDirPath, createJobSummaryFilename(config.getJobUniqueId()));
if (thrown != null) {
report.recordJobFailure(thrown);
}
report.putDiagnostic(STAGE, activeStage);
// the store operations here is explicitly created for the FS where
// the reports go, which may not be the target FS of the job.
final FileSystem fs = path.getFileSystem(conf);
try (ManifestStoreOperations operations = new ManifestStoreOperationsThroughFileSystem(fs)) {
if (!overwrite) {
// check for file existence so there is no need to worry about
// precisely what exception is raised when overwrite=false and dest file
// exists
try {
FileStatus v30 = operations.getFileStatus(path);
// get here and the file exists
LOG.debug("Report already exists: {}", v30);
return null;
}
catch (FileNotFoundException ignored) {
}
}
operations.save(report, path, overwrite);
LOG.info("Job summary saved to {}", path); return path;
} catch (IOException e) {
LOG.debug("Failed to save summary to {}", path, e);
if (quiet) {
return null;
} else {
throw e;
}
}
} | 3.26 |
hadoop_ManifestCommitter_getSuccessReport_rdh | /**
* Get the manifest Success data; only valid after a job.
*
* @return the job _SUCCESS data, or null.
*/
public ManifestSuccessData getSuccessReport() {
return successReport;
} | 3.26 |
hadoop_ManifestCommitter_resolveDestinationDirectory_rdh | /**
* Get the final output path, including resolving any relative path.
*
* @param outputPath
* output path
* @param conf
* configuration to create any FS with
* @return a resolved path.
* @throws IOException
* failure.
*/
private Path resolveDestinationDirectory(Path outputPath, Configuration conf) throws IOException {
return FileSystem.get(outputPath.toUri(), conf).makeQualified(outputPath);
} | 3.26 |
hadoop_ManifestCommitter_m1_rdh | /**
* Create manifest store operations for the destination store.
* This MUST NOT be used for the success report operations, as
* they may be to a different filesystem.
* This is a point which can be overridden during testing.
*
* @return a new store operations instance bonded to the destination fs.
* @throws IOException
* failure to instantiate.
*/
protected ManifestStoreOperations m1() throws IOException {
return ManifestCommitterSupport.createManifestStoreOperations(baseConfig.getConf(), baseConfig.getDestinationFileSystem(), baseConfig.getDestinationDir());} | 3.26 |
hadoop_ManifestCommitter_isRecoverySupported_rdh | /**
* Declare that task recovery is not supported.
* It would be, if someone added the code *and tests*.
*
* @param jobContext
* Context of the job whose output is being written.
* @return false, always
* @throws IOException
* never
*/
@Override
public boolean isRecoverySupported(final JobContext jobContext) throws IOException {
LOG.info("Probe for isRecoverySupported({}): returning false", jobContext.getJobID());
return false;
} | 3.26 |
hadoop_ManifestCommitter_getJobUniqueId_rdh | /**
* Get the unique ID of this job.
*
* @return job ID (yarn, spark)
*/
public String getJobUniqueId() {
return baseConfig.getJobUniqueId();
} | 3.26 |
hadoop_ManifestCommitter_commitTask_rdh | /**
* Commit the task.
* This is where the task attempt tree list takes place.
*
* @param context
* task context.
* @throws IOException
* IO Failure.
*/
@Override
public void commitTask(final TaskAttemptContext context) throws IOException {
ManifestCommitterConfig
committerConfig = enterCommitter(true, context);
try {
StageConfig
stageConfig = committerConfig.createStageConfig().withOperations(m1()).build(); taskAttemptCommittedManifest = new CommitTaskStage(stageConfig).apply(null).getTaskManifest();
iostatistics.incrementCounter(COMMITTER_TASKS_COMPLETED_COUNT, 1);
} catch (IOException e) {
iostatistics.incrementCounter(COMMITTER_TASKS_FAILED_COUNT, 1);
throw e;
} finally {
logCommitterStatisticsAtDebug();
updateCommonContextOnCommitterExit();
}
} | 3.26 |
hadoop_ManifestCommitter_exitStage_rdh | /**
* Remove stage from common audit context.
*
* @param stage
* stage exited.
*/
@Override
public void exitStage(String stage) {
AuditingIntegration.exitStage();
} | 3.26 |
hadoop_ManifestCommitter_hasCapability_rdh | /**
* The committer is compatible with spark's dynamic partitioning
* algorithm.
*
* @param capability
* string to query the stream support for.
* @return true if the requested capability is supported.
*/
@Override
public boolean hasCapability(final String capability) {
return CAPABILITY_DYNAMIC_PARTITIONING.equals(capability);
} | 3.26 |
hadoop_ManifestCommitter_recoverTask_rdh | /**
*
* @param taskContext
* Context of the task whose output is being recovered
* @throws IOException
* always
*/
@Override
public void recoverTask(final TaskAttemptContext taskContext) throws IOException {
LOG.warn("Rejecting recoverTask({}) call", taskContext.getTaskAttemptID());
throw new IOException("Cannot recover task " + taskContext.getTaskAttemptID());
} | 3.26 |
hadoop_ManifestCommitter_getOrCreateSuccessData_rdh | /**
* Get the manifest success data for this job; creating on demand if needed.
*
* @param committerConfig
* source config.
* @return the current {@link #successReport} value; never null.
*/
private ManifestSuccessData getOrCreateSuccessData(ManifestCommitterConfig committerConfig) {
if (successReport == null) {
successReport = createManifestOutcome(committerConfig.createStageConfig(), activeStage);
}
return successReport;
} | 3.26 |
hadoop_ManifestCommitter_logCommitterStatisticsAtDebug_rdh | /**
* Log IO Statistics at debug.
*/
private void logCommitterStatisticsAtDebug() {
logIOStatisticsAtDebug(LOG, "Committer Statistics", this);
} | 3.26 |
hadoop_ManifestCommitter_executeCleanup_rdh | /**
* Perform the cleanup operation for job cleanup or abort.
*
* @param statisticName
* statistic/stage name
* @param jobContext
* job context
* @param committerConfig
* committer config
* @throws IOException
* failure
* @return the outcome
*/
private Result executeCleanup(final String statisticName, final JobContext jobContext, final ManifestCommitterConfig committerConfig) throws IOException {
try (CloseableTaskPoolSubmitter ioProcs = committerConfig.createSubmitter()) {
return new CleanupJobStage(committerConfig.createStageConfig().withOperations(m1()).withIOProcessors(ioProcs).build()).apply(cleanupStageOptionsFromConfig(statisticName, jobContext.getConfiguration()));
}
} | 3.26 |
hadoop_ManifestCommitter_isCommitJobRepeatable_rdh | /**
* Failure during Job Commit is not recoverable from.
*
* @param jobContext
* Context of the job whose output is being written.
* @return false, always
* @throws IOException
* never
*/
@Override
public boolean isCommitJobRepeatable(final JobContext jobContext) throws IOException {
LOG.info("Probe for isCommitJobRepeatable({}): returning false", jobContext.getJobID());
return false;
} | 3.26 |
hadoop_ManifestCommitter_getJobAttemptPath_rdh | /**
* Compute the path where the output of a task attempt is stored until
* that task is committed.
*
* @param context
* the context of the task attempt.
* @return the path where a task attempt should be stored.
*/
@VisibleForTesting
public Path getJobAttemptPath(JobContext context) {
return enterCommitter(false, context).getJobAttemptDir();
} | 3.26 |
hadoop_ManifestCommitter_abortJob_rdh | /**
* Abort the job.
* Invokes
* {@link #executeCleanup(String, JobContext, ManifestCommitterConfig)}
* then saves the (ongoing) job report data if reporting is enabled.
*
* @param jobContext
* Context of the job whose output is being written.
* @param state
* final runstate of the job
* @throws IOException
* failure during cleanup; report failure are swallowed
*/
@Override
public void abortJob(final JobContext jobContext, final JobStatus.State state) throws IOException {
LOG.info("Aborting Job {} in state {}", jobContext.getJobID(), state);
ManifestCommitterConfig committerConfig = enterCommitter(false, jobContext);
ManifestSuccessData report = getOrCreateSuccessData(committerConfig);
IOException failure = null;
try {
executeCleanup(OP_STAGE_JOB_ABORT, jobContext, committerConfig);
} catch (IOException e) {
// failure.
failure = e;
}
report.setSuccess(false);
// job abort does not overwrite any existing report, so a job commit
// failure cause will be preserved.
maybeSaveSummary(activeStage, committerConfig, report, failure, true, false);
// print job stats
LOG.info("Job Abort statistics {}", ioStatisticsToPrettyString(iostatistics));
updateCommonContextOnCommitterExit();
} | 3.26 |
hadoop_ManifestCommitter_getConf_rdh | /**
* Get the config of the task attempt this instance was constructed
* with.
*
* @return a configuration.
*/
public Configuration getConf() {
return baseConfig.getConf();
} | 3.26 |
hadoop_ManifestCommitter_getTaskAttemptDir_rdh | /**
* Get the task attempt dir.
* May be null.
*
* @return a path or null.
*/
private Path getTaskAttemptDir() {
return taskAttemptDir;
}
/**
* Callback on stage entry.
* Sets {@link #activeStage} | 3.26 |
hadoop_ManifestCommitter_commitJob_rdh | /**
* This is the big job commit stage.
* Load the manifests, prepare the destination, rename
* the files then cleanup the job directory.
*
* @param jobContext
* Context of the job whose output is being written.
* @throws IOException
* failure.
*/
@Override
public void commitJob(final JobContext jobContext) throws IOException {
ManifestCommitterConfig committerConfig = enterCommitter(false, jobContext);
// create the initial success data.
// this is overwritten by that created during the operation sequence,
// but if the sequence fails before that happens, it
// will be saved to the report directory.
ManifestSuccessData marker = getOrCreateSuccessData(committerConfig);
IOException
failure
= null;
try (CloseableTaskPoolSubmitter v11 =
committerConfig.createSubmitter();ManifestStoreOperations storeOperations = m1()) {
// the stage config will be shared across all stages.
StageConfig stageConfig = committerConfig.createStageConfig().withOperations(storeOperations).withIOProcessors(v11).build();
// commit the job, including any cleanup and validation.
final Configuration conf = jobContext.getConfiguration();
CommitJobStage.Result result = new CommitJobStage(stageConfig).apply(new CommitJobStage.Arguments(committerConfig.getCreateJobMarker(), committerConfig.getValidateOutput(), conf.getTrimmed(OPT_DIAGNOSTICS_MANIFEST_DIR, ""), cleanupStageOptionsFromConfig(OP_STAGE_JOB_CLEANUP, conf)));
marker = result.getJobSuccessData();
// update the cached success with the new report.
setSuccessReport(marker);
// patch in the #of threads as it is useful
marker.putDiagnostic(OPT_IO_PROCESSORS, conf.get(OPT_IO_PROCESSORS, Long.toString(OPT_IO_PROCESSORS_DEFAULT)));
} catch (IOException e) {
// failure. record it for the summary
failure = e;
// rethrow
throw e;
} finally {
// save the report summary, even on failure
maybeSaveSummary(activeStage, committerConfig, marker, failure, true, true);
// print job commit stats
LOG.info("{}: Job Commit statistics {}", committerConfig.getName(), ioStatisticsToPrettyString(iostatistics));
// and warn of rename problems
final Long v16 = iostatistics.counters().get(OP_COMMIT_FILE_RENAME_RECOVERED);
if ((v16 != null) && (v16 > 0)) {
LOG.warn("{}: rename failures were recovered from. Number of recoveries: {}", committerConfig.getName(), v16);
}
updateCommonContextOnCommitterExit();
}
} | 3.26 |
hadoop_ManifestCommitter_enterCommitter_rdh | /**
* Committer method invoked; generates a config for it.
* Calls {@code #updateCommonContextOnCommitterEntry()}
* to update the audit context.
*
* @param isTask
* is this a task entry point?
* @param context
* context
* @return committer config
*/
private ManifestCommitterConfig enterCommitter(boolean isTask, JobContext context) {
ManifestCommitterConfig committerConfig = new ManifestCommitterConfig(getOutputPath(), isTask ? TASK_COMMITTER : JOB_COMMITTER, context, iostatistics, this);
updateCommonContextOnCommitterEntry(committerConfig);
return committerConfig;
}
/**
* Set up a job through a {@link SetupJobStage} | 3.26 |
hadoop_ManifestCommitter_abortTask_rdh | /**
* Abort a task.
*
* @param context
* task context
* @throws IOException
* failure during the delete
*/
@Override
public void abortTask(final TaskAttemptContext context) throws IOException {ManifestCommitterConfig committerConfig = enterCommitter(true, context);
try {
new AbortTaskStage(committerConfig.createStageConfig().withOperations(m1()).build()).apply(false);
} finally {
logCommitterStatisticsAtDebug();
updateCommonContextOnCommitterExit();
}
} | 3.26 |
hadoop_ManifestCommitter_setupTask_rdh | /**
* Set up a task through a {@link SetupTaskStage}.
* Classic FileOutputCommitter is a no-op here, relying
* on RecordWriters to create the dir implicitly on file
* create().
* FileOutputCommitter also uses the existence of that
* file as a flag to indicate task commit is needed.
*
* @param context
* task context.
* @throws IOException
* IO Failure.
*/
@Override
public void setupTask(final TaskAttemptContext context) throws IOException {
ManifestCommitterConfig committerConfig = enterCommitter(true, context);
StageConfig stageConfig = committerConfig.createStageConfig().withOperations(m1()).build();
// create task attempt dir; delete if present. Or fail?
new SetupTaskStage(stageConfig).apply("");
logCommitterStatisticsAtDebug();} | 3.26 |
hadoop_ManifestCommitter_getWorkPath_rdh | /**
* Work path of the current task attempt.
* This is null if the task does not have one.
*
* @return a path.
*/
@Override
public Path getWorkPath() {
return getTaskAttemptDir();
} | 3.26 |
hadoop_ManifestCommitter_getOutputPath_rdh | /**
* Output path: destination directory of the job.
*
* @return the overall job destination directory.
*/
@Override
public Path getOutputPath() {
return getDestinationDir();
} | 3.26 |
hadoop_ManifestCommitter_cleanupJob_rdh | /**
* Execute the {@code CleanupJobStage} to remove the job attempt dir.
* This does
*
* @param jobContext
* Context of the job whose output is being written.
* @throws IOException
* failure during cleanup
*/
@SuppressWarnings("deprecation")
@Override
public void cleanupJob(final JobContext jobContext) throws IOException {
ManifestCommitterConfig committerConfig = enterCommitter(false, jobContext);
try {
executeCleanup(OP_STAGE_JOB_CLEANUP, jobContext, committerConfig);
} finally {
logCommitterStatisticsAtDebug();
updateCommonContextOnCommitterExit();
}
} | 3.26 |
hadoop_ManifestCommitter_getDestinationDir_rdh | /**
* Get the job destination dir.
*
* @return dest dir.
*/
private Path getDestinationDir() {
return destinationDir;
} | 3.26 |
hadoop_ManifestCommitter_needsTaskCommit_rdh | /**
* Always return true.
* This way, even if there is no output, stats are collected.
*
* @param context
* task context.
* @return true
* @throws IOException
* IO Failure.
*/
@Override
public boolean needsTaskCommit(final TaskAttemptContext context) throws IOException {
LOG.info("Probe for needsTaskCommit({})", context.getTaskAttemptID());
return true;
} | 3.26 |
hadoop_ManifestCommitter_getTaskAttemptPath_rdh | /**
* Compute the path where the output of a task attempt is stored until
* that task is committed.
*
* @param context
* the context of the task attempt.
* @return the path where a task attempt should be stored.
*/
@VisibleForTesting
public Path getTaskAttemptPath(TaskAttemptContext context) {
return
enterCommitter(false, context).getTaskAttemptDir();
} | 3.26 |
hadoop_ExponentialRetryPolicy_getRetryInterval_rdh | /**
* Returns backoff interval between 80% and 120% of the desired backoff,
* multiply by 2^n-1 for exponential.
*
* @param retryCount
* The current retry attempt count.
* @return backoff Interval time
*/
public long
getRetryInterval(final int retryCount) {
final long boundedRandDelta = ((int) (this.f0 * MIN_RANDOM_RATIO)) + this.randRef.nextInt(((int) (this.f0 * MAX_RANDOM_RATIO)) - ((int) (this.f0 * MIN_RANDOM_RATIO)));
final double incrementDelta = Math.pow(2, retryCount - 1) * boundedRandDelta;
final long retryInterval = ((int) (Math.round(Math.min(this.minBackoff + incrementDelta, maxBackoff))));
return retryInterval;
} | 3.26 |
hadoop_ExponentialRetryPolicy_shouldRetry_rdh | /**
* Returns if a request should be retried based on the retry count, current response,
* and the current strategy. The valid http status code lies in the range of 1xx-5xx.
* But an invalid status code might be set due to network or timeout kind of issues.
* Such invalid status code also qualify for retry.
*
* @param retryCount
* The current retry attempt count.
* @param statusCode
* The status code of the response, or -1 for socket error.
* @return true if the request should be retried; false otherwise.
*/
public boolean shouldRetry(final int retryCount, final int statusCode) {
return (retryCount < this.retryCount) && (((statusCode < HTTP_CONTINUE) || (statusCode ==
HttpURLConnection.HTTP_CLIENT_TIMEOUT)) || (((statusCode >= HttpURLConnection.HTTP_INTERNAL_ERROR) && (statusCode != HttpURLConnection.HTTP_NOT_IMPLEMENTED)) && (statusCode != HttpURLConnection.HTTP_VERSION)));
} | 3.26 |
hadoop_InverseMapper_map_rdh | /**
* The inverse function. Input keys and values are swapped.
*/
public void map(K key, V value, OutputCollector<V, K> output, Reporter reporter) throws IOException {output.collect(value, key);
} | 3.26 |
hadoop_IteratorSelector_getPartition_rdh | /**
* The partition for this iterator selector.
*
* @return partition
*/
public String getPartition() {
return this.partition;
} | 3.26 |
hadoop_IteratorSelector_setPartition_rdh | /**
* Set partition for this iterator selector.
*
* @param p
* partition
*/
public void setPartition(String p) {
this.partition = p;
} | 3.26 |
hadoop_DelegationTokenIdentifier_stringifyToken_rdh | /**
*
* @return a string representation of the token
*/
public static String stringifyToken(final Token<?>
token) throws IOException {
DelegationTokenIdentifier ident = new DelegationTokenIdentifier();
ByteArrayInputStream v4 = new ByteArrayInputStream(token.getIdentifier());
DataInputStream in = new DataInputStream(v4);
ident.readFields(in);
if (token.getService().getLength() > 0) {
return (ident + " on ") + token.getService();
} else {
return ident.toString();
}
} | 3.26 |
hadoop_HostsFileReader_getHostDetails_rdh | /**
* Retrieve an atomic view of the included and excluded hosts.
*
* @return the included and excluded hosts
*/
public HostDetails getHostDetails() {
return current.get();
} | 3.26 |
hadoop_DatanodeVolumeInfo_getFreeSpace_rdh | /**
* get free space.
*/
public long getFreeSpace() {
return freeSpace;
} | 3.26 |
hadoop_DatanodeVolumeInfo_getDatanodeVolumeReport_rdh | /**
* get volume report.
*/
public String getDatanodeVolumeReport() {
StringBuilder report = new StringBuilder();
report.append("Directory: " + path).append("\nStorageType: " + storageType).append(((("\nCapacity Used: " + usedSpace) + "(") + StringUtils.byteDesc(usedSpace)) + ")").append(((("\nCapacity Left: " + freeSpace) + "(") + StringUtils.byteDesc(freeSpace)) + ")").append(((("\nCapacity Reserved: " + reservedSpace) + "(") + StringUtils.byteDesc(reservedSpace)) + ")").append(((("\nReserved Space for Replicas: " + reservedSpaceForReplicas) + "(") + StringUtils.byteDesc(reservedSpaceForReplicas)) + ")").append("\nBlocks: " + numBlocks);
return report.toString();
} | 3.26 |
hadoop_DatanodeVolumeInfo_getUsedSpace_rdh | /**
* get used space.
*/
public long getUsedSpace() {return usedSpace;
} | 3.26 |
hadoop_DatanodeVolumeInfo_m0_rdh | /**
* get reserved space for replicas.
*/
public long m0() {
return reservedSpaceForReplicas;
} | 3.26 |
hadoop_DatanodeVolumeInfo_getNumBlocks_rdh | /**
* get number of blocks.
*/
public long getNumBlocks() {
return numBlocks;
} | 3.26 |
hadoop_DatanodeVolumeInfo_getPath_rdh | /**
* get volume path.
*/
public String getPath() {
return path;
} | 3.26 |
hadoop_DatanodeVolumeInfo_getReservedSpace_rdh | /**
* get reserved space.
*/
public long getReservedSpace() {
return
reservedSpace;
} | 3.26 |
hadoop_DatanodeVolumeInfo_getStorageType_rdh | /**
* get storage type.
*/
public StorageType getStorageType() {
return storageType;} | 3.26 |
hadoop_AuxServiceConfiguration_properties_rdh | /**
* A blob of key-value pairs of common service properties.
*/
public AuxServiceConfiguration properties(Map<String, String> props) {
this.properties = props;
return
this;
} | 3.26 |
hadoop_AuxServiceConfiguration_toIndentedString_rdh | /**
* Convert the given object to string with each line indented by 4 spaces
* (except the first line).
*/
private String toIndentedString(Object o) {
if (o
== null) {
return "null";}
return o.toString().replace("\n", "\n ");
} | 3.26 |
hadoop_AuxServiceConfiguration_files_rdh | /**
* Array of list of files that needs to be created and made available as
* volumes in the service component containers.
*/
public AuxServiceConfiguration files(List<AuxServiceFile> fileList) {
this.files = fileList;
return this;
} | 3.26 |
hadoop_RLESparseResourceAllocation_addInterval_rdh | /**
* Add a resource for the specified interval.
*
* @param reservationInterval
* the interval for which the resource is to be
* added
* @param totCap
* the resource to be added
* @return true if addition is successful, false otherwise
*/
public boolean addInterval(ReservationInterval reservationInterval, Resource totCap) {
if (totCap.equals(ZERO_RESOURCE)) {
return
true;
}writeLock.lock();
try {
NavigableMap<Long, Resource> addInt = new TreeMap<Long, Resource>();
addInt.put(reservationInterval.getStartTime(), totCap);
addInt.put(reservationInterval.getEndTime(), ZERO_RESOURCE);
try {
cumulativeCapacity = merge(resourceCalculator, totCap, cumulativeCapacity, addInt, Long.MIN_VALUE,
Long.MAX_VALUE, RLEOperator.add);
} catch (PlanningException e) {
// never happens for add
}
return true;
} finally {
writeLock.unlock();
}
} | 3.26 |
hadoop_RLESparseResourceAllocation_getRangeOverlapping_rdh | /**
* Get a {@link RLESparseResourceAllocation} view of the {@link Resource}
* allocations between the specified start and end times.
*
* @param start
* the time from which the {@link Resource} allocations are
* required
* @param end
* the time upto which the {@link Resource} allocations are
* required
* @return the overlapping allocations
*/
public RLESparseResourceAllocation getRangeOverlapping(long start, long end) {
readLock.lock();try {
NavigableMap<Long, Resource> a
= this.getCumulative();if ((a != null) && (!a.isEmpty())) {
// include the portion of previous entry that overlaps start
if (start > a.firstKey()) { long previous = a.floorKey(start);
a
= a.tailMap(previous, true);
}
if (end < a.lastKey()) {
a = a.headMap(end, true);
}
}
RLESparseResourceAllocation ret = new RLESparseResourceAllocation(a, resourceCalculator);
return ret;
} finally {
readLock.unlock();
}
} | 3.26 |
hadoop_RLESparseResourceAllocation_getCapacityAtTime_rdh | /**
* Returns the capacity, i.e. total resources allocated at the specified point
* of time.
*
* @param tick
* timeStap at which resource needs to be known
* @return the resources allocated at the specified time
*/
public Resource getCapacityAtTime(long tick) {
readLock.lock();
try {
Entry<Long, Resource> closestStep = cumulativeCapacity.floorEntry(tick);
if (closestStep
!=
null) {
return Resources.clone(closestStep.getValue());
}
return Resources.clone(ZERO_RESOURCE);
} finally {
readLock.unlock();
}
} | 3.26 |
hadoop_RLESparseResourceAllocation_shift_rdh | /**
* This method shifts all the timestamp of the {@link Resource} entries by the
* specified "delta".
*
* @param delta
* the time by which to shift the {@link Resource} allocations
*/
public void shift(long delta) {
writeLock.lock();
try {
TreeMap<Long, Resource> newCum = new TreeMap<>();
long start;
for (Map.Entry<Long, Resource> entry : cumulativeCapacity.entrySet()) {
if (delta > 0) {
start = (entry.getKey() == Long.MAX_VALUE) ? Long.MAX_VALUE : entry.getKey() + delta;
} else {
start =
(entry.getKey() == Long.MIN_VALUE) ? Long.MIN_VALUE : entry.getKey() + delta;
}
newCum.put(start, entry.getValue());
}
cumulativeCapacity = newCum;
} finally {
writeLock.unlock();
}
} | 3.26 |
hadoop_RLESparseResourceAllocation_getMaximumPeriodicCapacity_rdh | /**
* Get the maximum capacity across specified time instances. The search-space
* is specified using the starting value, tick, and the periodic interval for
* search. Maximum resource allocation across tick, tick + period, tick + 2 *
* period,..., tick + n * period .. is returned.
*
* @param tick
* the starting time instance
* @param period
* interval at which capacity is evaluated
* @return maximum resource allocation
*/
public Resource getMaximumPeriodicCapacity(long tick, long period) {
Resource maxCapacity = ZERO_RESOURCE;
readLock.lock();
try
{
if (!cumulativeCapacity.isEmpty()) {
Long lastKey = cumulativeCapacity.lastKey();
for (long t = tick; t <= lastKey; t = t + period) {
maxCapacity = Resources.componentwiseMax(maxCapacity, cumulativeCapacity.floorEntry(t).getValue());
}
}
return maxCapacity;
} finally {
readLock.unlock();
}
} | 3.26 |
hadoop_RLESparseResourceAllocation_getLatestNonNullTime_rdh | /**
* Get the timestamp of the latest non-null resource allocation.
*
* @return the timestamp of the last resource allocation
*/
public long getLatestNonNullTime() {
readLock.lock();
try {
if (cumulativeCapacity.isEmpty()) {return -1;
} else {
// the last entry might contain null (to terminate
// the sequence)... return previous one.
Entry<Long, Resource> last = cumulativeCapacity.lastEntry();
if (last.getValue() == null) {
return cumulativeCapacity.floorKey(last.getKey() - 1);
} else {
return last.getKey();
}
}
} finally {
readLock.unlock();
}
} | 3.26 |
hadoop_RLESparseResourceAllocation_isEmpty_rdh | /**
* Returns true if there are no non-zero entries.
*
* @return true if there are no allocations or false otherwise
*/
public boolean isEmpty() {
readLock.lock();
try {
if (cumulativeCapacity.isEmpty()) {
return true;
}
// Deletion leaves a single zero entry with a null at the end so check for
// that
if (cumulativeCapacity.size() == 2) {
return cumulativeCapacity.firstEntry().getValue().equals(ZERO_RESOURCE) && (cumulativeCapacity.lastEntry().getValue() == null);
}
return false;
} finally {
readLock.unlock();
}
} | 3.26 |
hadoop_RLESparseResourceAllocation_getMinimumCapacityInInterval_rdh | /**
* Get the minimum capacity in the specified time range.
*
* @param interval
* the {@link ReservationInterval} to be searched
* @return minimum resource allocation
*/
public Resource getMinimumCapacityInInterval(ReservationInterval interval) {
Resource minCapacity = Resource.newInstance(Integer.MAX_VALUE, Integer.MAX_VALUE);
long start = interval.getStartTime();
long end = interval.getEndTime();
NavigableMap<Long, Resource> capacityRange = getRangeOverlapping(start, end).getCumulative();
if (!capacityRange.isEmpty()) {
for (Map.Entry<Long, Resource> entry : capacityRange.entrySet()) {
if (entry.getValue() != null) {
minCapacity = Resources.componentwiseMin(minCapacity, entry.getValue());
}
}
}
return minCapacity;
} | 3.26 |
hadoop_RLESparseResourceAllocation_getEarliestStartTime_rdh | /**
* Get the timestamp of the earliest resource allocation.
*
* @return the timestamp of the first resource allocation
*/public long getEarliestStartTime() {
readLock.lock();
try {
if (cumulativeCapacity.isEmpty()) {
return -1;
} else {
return cumulativeCapacity.firstKey();
}
} finally {
readLock.unlock();
}
} | 3.26 |
hadoop_RLESparseResourceAllocation_removeInterval_rdh | /**
* Removes a resource for the specified interval.
*
* @param reservationInterval
* the interval for which the resource is to be
* removed
* @param totCap
* the resource to be removed
* @return true if removal is successful, false otherwise
*/
public boolean removeInterval(ReservationInterval reservationInterval, Resource totCap) {
if (totCap.equals(ZERO_RESOURCE)) {
return true;
}
writeLock.lock();
try {
NavigableMap<Long, Resource> removeInt = new TreeMap<Long, Resource>();
removeInt.put(reservationInterval.getStartTime(), totCap);
removeInt.put(reservationInterval.getEndTime(),
ZERO_RESOURCE);
try {
cumulativeCapacity = merge(resourceCalculator, totCap, cumulativeCapacity, removeInt, Long.MIN_VALUE, Long.MAX_VALUE, RLEOperator.subtract);} catch (PlanningException e) {
// never happens for subtract
}
return true;
} finally {
writeLock.unlock();
}
} | 3.26 |
hadoop_ZookeeperUtils_buildQuorum_rdh | /**
* Build a quorum list, injecting a ":defaultPort" ref if needed on
* any entry without one
*
* @param hostAndPorts
* @param defaultPort
* @return */ public static String buildQuorum(List<HostAndPort> hostAndPorts, int defaultPort) {
List<String> entries = new ArrayList<String>(hostAndPorts.size());
for (HostAndPort hostAndPort : hostAndPorts) {
entries.add(buildQuorumEntry(hostAndPort, defaultPort));
}
return ServiceUtils.join(entries, ",", false);
} | 3.26 |
hadoop_ZookeeperUtils_buildHostsOnlyList_rdh | /**
* Build up to a hosts only list
*
* @param hostAndPorts
* @return a list of the hosts only
*/
public static String buildHostsOnlyList(List<HostAndPort> hostAndPorts) {
StringBuilder sb = new StringBuilder();
for (HostAndPort hostAndPort : hostAndPorts) {
sb.append(hostAndPort.getHost()).append(",");
} if (sb.length() > 0) {
sb.delete(sb.length() - 1, sb.length());
}
return sb.toString();
} | 3.26 |
hadoop_ZookeeperUtils_splitToHostsAndPorts_rdh | /**
* Split a quorum list into a list of hostnames and ports
*
* @param hostPortQuorumList
* split to a list of hosts and ports
* @return a list of values
*/
public static List<HostAndPort> splitToHostsAndPorts(String
hostPortQuorumList) {
// split an address hot
String[] strings = StringUtils.getStrings(hostPortQuorumList);
int len = 0;
if (strings != null) {
len = strings.length;
}
List<HostAndPort> list = new ArrayList<HostAndPort>(len);
if
(strings != null) {
for (String s : strings) {list.add(HostAndPort.fromString(s.trim()).withDefaultPort(DEFAULT_PORT));
}
}
return list;
} | 3.26 |
hadoop_GangliaSink31_emitMetric_rdh | /**
* The method sends metrics to Ganglia servers. The method has been taken from
* org.apache.hadoop.metrics.ganglia.GangliaContext31 with minimal changes in
* order to keep it in sync.
*
* @param groupName
* The group name of the metric
* @param name
* The metric name
* @param type
* The type of the metric
* @param value
* The value of the metric
* @param gConf
* The GangliaConf for this metric
* @param gSlope
* The slope for this metric
* @throws IOException
* raised on errors performing I/O.
*/@Override
protected void emitMetric(String groupName, String name, String type, String value, GangliaConf gConf, GangliaSlope gSlope) throws IOException {
if (name == null) {
LOG.warn("Metric was emitted with no name.");
return;
} else
if (value == null) {
LOG.warn(("Metric name " + name) + " was emitted with a null value.");
return;
} else if (type
== null) {
LOG.warn(((("Metric name " + name) + ", value ") + value) + " has no type.");
return;
}
if (LOG.isDebugEnabled()) {
LOG.debug((((((((("Emitting metric " + name) + ", type ") + type) + ", value ") + value) + ", slope ") + gSlope.name()) + " from hostname ") + getHostName());
}
// The following XDR recipe was done through a careful reading of
// gm_protocol.x in Ganglia 3.1 and carefully examining the output of
// the gmetric utility with strace.
// First we send out a metadata message
xdr_int(128);
// metric_id = metadata_msg
xdr_string(getHostName());
// hostname
xdr_string(name);
// metric name
xdr_int(0);
// spoof = False
xdr_string(type);
// metric type
xdr_string(name);
// metric name
xdr_string(gConf.getUnits());// units
xdr_int(gSlope.ordinal());// slope
xdr_int(gConf.getTmax());
// tmax, the maximum time between metrics
xdr_int(gConf.getDmax());
// dmax, the maximum data value
xdr_int(1);
/* Num of the entries in extra_value field for
Ganglia 3.1.x
*/
xdr_string("GROUP");
/* Group attribute */
xdr_string(groupName);/* Group value */
// send the metric to Ganglia hosts
emitToGangliaHosts();
// Now we send out a message with the actual value.
// Technically, we only need to send out the metadata message once for
// each metric, but I don't want to have to record which metrics we did and
// did not send.
xdr_int(133);
// we are sending a string value
xdr_string(getHostName());// hostName
xdr_string(name);// metric name
xdr_int(0);
// spoof = False
xdr_string("%s");// format field
xdr_string(value);// metric value
// send the metric to Ganglia hosts
emitToGangliaHosts();
} | 3.26 |
hadoop_HdfsConfiguration_init_rdh | /**
* This method is here so that when invoked, HdfsConfiguration is class-loaded
* if it hasn't already been previously loaded. Upon loading the class, the
* static initializer block above will be executed to add the deprecated keys
* and to add the default resources. It is safe for this method to be called
* multiple times as the static initializer block will only get invoked once.
*
* This replaces the previously, dangerous practice of other classes calling
* Configuration.addDefaultResource("hdfs-default.xml") directly without
* loading this class first, thereby skipping the key deprecation.
*/
public static void init() {} | 3.26 |
hadoop_OBSObjectBucketUtils_createEmptyObject_rdh | // Used to create an empty file that represents an empty directory
private static void createEmptyObject(final OBSFileSystem owner, final String objectName) throws ObsException, IOException {
for (int retryTime = 1; retryTime
< OBSCommonUtils.MAX_RETRY_TIME; retryTime++) {
try {innerCreateEmptyObject(owner, objectName);
return;
} catch (ObsException e) {
LOG.warn("Failed to create empty object [{}], retry time [{}], " + "exception [{}]", objectName, retryTime, e);
try {
Thread.sleep(OBSCommonUtils.DELAY_TIME);
} catch (InterruptedException ie) {
throw e;
}
}
}
innerCreateEmptyObject(owner, objectName);
} | 3.26 |
hadoop_OBSObjectBucketUtils_cloneObjectMetadata_rdh | /**
* Creates a copy of the passed {@link ObjectMetadata}. Does so without using
* the {@link ObjectMetadata#clone()} method, to avoid copying unnecessary
* headers.
*
* @param source
* the {@link ObjectMetadata} to copy
* @return a copy of {@link ObjectMetadata} with only relevant attributes
*/
private static ObjectMetadata cloneObjectMetadata(final ObjectMetadata source) {
// This approach may be too brittle, especially if
// in future there are new attributes added to ObjectMetadata
// that we do not explicitly call to set here
ObjectMetadata ret = newObjectMetadata(source.getContentLength());if (source.getContentEncoding() != null) {
ret.setContentEncoding(source.getContentEncoding());
}
return ret;
} | 3.26 |
hadoop_OBSObjectBucketUtils_renameFolder_rdh | /**
* Implement rename folder.
*
* @param owner
* OBS File System instance
* @param srcKey
* source folder key
* @param dstKey
* destination folder key
* @throws IOException
* any problem with rename folder
*/
static void renameFolder(final OBSFileSystem owner, final String srcKey, final String dstKey) throws IOException {
long startTime = System.nanoTime();
List<KeyAndVersion> keysToDelete = new ArrayList<>();
createFakeDirectory(owner, dstKey);
ListObjectsRequest request = new ListObjectsRequest();
request.setBucketName(owner.getBucket());
request.setPrefix(srcKey);
request.setMaxKeys(owner.getMaxKeys());
ObjectListing objects = OBSCommonUtils.listObjects(owner, request);
List<Future<CopyObjectResult>> copyfutures =
new LinkedList<>();
while (true) {
for (ObsObject summary : objects.getObjects()) {
if (summary.getObjectKey().equals(srcKey)) {
// skip prefix itself
continue;
}
keysToDelete.add(new KeyAndVersion(summary.getObjectKey()));
String newDstKey = dstKey + summary.getObjectKey().substring(srcKey.length());
// copyFile(summary.getObjectKey(), newDstKey,
// summary.getMetadata().getContentLength());
copyfutures.add(copyFileAsync(owner, summary.getObjectKey(), newDstKey, summary.getMetadata().getContentLength()));
if (keysToDelete.size() == owner.getMaxEntriesToDelete()) {
waitAllCopyFinished(copyfutures);
copyfutures.clear();
}
}
if (!objects.isTruncated()) {
if (!keysToDelete.isEmpty()) {
waitAllCopyFinished(copyfutures);
copyfutures.clear();
}
break;
}
objects = OBSCommonUtils.continueListObjects(owner, objects);
}
keysToDelete.add(new KeyAndVersion(srcKey));
DeleteObjectsRequest deleteObjectsRequest = new DeleteObjectsRequest(owner.getBucket());
deleteObjectsRequest.setKeyAndVersions(keysToDelete.toArray(new KeyAndVersion[0]));
OBSCommonUtils.deleteObjects(owner, deleteObjectsRequest);
if (LOG.isDebugEnabled()) {
long delay = System.nanoTime() - startTime;
LOG.debug((((((("OBSFileSystem rename: "
+ ", {src=") + srcKey) +
", dst=") + dstKey) + ", delay=") + delay) + "}");
}
} | 3.26 |
hadoop_OBSObjectBucketUtils_copyFile_rdh | /**
* Copy a single object in the bucket via a COPY operation.
*
* @param owner
* OBS File System instance
* @param srcKey
* source object path
* @param dstKey
* destination object path
* @param size
* object size
* @throws InterruptedIOException
* the operation was interrupted
* @throws IOException
* Other IO problems
*/
private static void copyFile(final OBSFileSystem owner, final String srcKey, final String dstKey, final long size) throws IOException, InterruptedIOException {
for
(int retryTime = 1; retryTime < OBSCommonUtils.MAX_RETRY_TIME; retryTime++)
{
try {
innerCopyFile(owner, srcKey, dstKey, size);
return;
} catch (InterruptedIOException e) {
throw e;
} catch (IOException e) {
LOG.warn("Failed to copy file from [{}] to [{}] with size [{}], " + "retry time [{}], exception [{}]", srcKey, dstKey, size, retryTime, e);
try {
Thread.sleep(OBSCommonUtils.DELAY_TIME);
} catch (InterruptedException ie) {
throw e;
}
}
}
innerCopyFile(owner, srcKey, dstKey, size);
} | 3.26 |
hadoop_OBSObjectBucketUtils_renameBasedOnObject_rdh | /**
* The inner rename operation.
*
* @param owner
* OBS File System instance
* @param src
* path to be renamed
* @param dst
* new path after rename
* @return boolean
* @throws RenameFailedException
* if some criteria for a state changing rename
* was not met. This means work didn't happen;
* it's not something which is reported upstream
* to the FileSystem APIs, for which the
* semantics of "false" are pretty vague.
* @throws FileNotFoundException
* there's no source file.
* @throws IOException
* on IO failure.
* @throws ObsException
* on failures inside the OBS SDK
*/
static boolean renameBasedOnObject(final OBSFileSystem owner, final Path src, final Path dst) throws RenameFailedException, FileNotFoundException, IOException, ObsException {
String srcKey = OBSCommonUtils.pathToKey(owner, src);
String dstKey = OBSCommonUtils.pathToKey(owner, dst);
if (srcKey.isEmpty()) {
LOG.error("rename: src [{}] is root directory", src);
throw new IOException(src + " is root directory");
}
// get the source file status; this raises a FNFE if there is no source
// file.
FileStatus srcStatus = owner.getFileStatus(src);
FileStatus dstStatus;
try {
dstStatus = owner.getFileStatus(dst);
// if there is no destination entry, an exception is raised.
// hence this code sequence can assume that there is something
// at the end of the path; the only detail being what it is and
// whether or not it can be the destination of the rename.
if (dstStatus.isDirectory()) {String newDstKey = OBSCommonUtils.maybeAddTrailingSlash(dstKey);
String filename =
srcKey.substring(OBSCommonUtils.pathToKey(owner, src.getParent()).length() + 1);
newDstKey = newDstKey + filename;
dstKey = newDstKey;
dstStatus = owner.getFileStatus(OBSCommonUtils.keyToPath(dstKey));
if (dstStatus.isDirectory()) {
throw new RenameFailedException(src, dst, "new destination is an existed directory").withExitCode(false);
} else {
throw new RenameFailedException(src, dst, "new destination is an existed file").withExitCode(false);}
} else if (srcKey.equals(dstKey)) {
LOG.warn("rename: src and dest refer to the same file or" + " directory: {}", dst);
return true;
} else {
throw new RenameFailedException(src, dst, "destination is an existed file").withExitCode(false);}
} catch (FileNotFoundException e) {
LOG.debug("rename: destination path {} not found", dst);
// Parent must exist
checkDestinationParent(owner, src, dst);
}
if (dstKey.startsWith(srcKey) && (dstKey.charAt(srcKey.length()) == Path.SEPARATOR_CHAR)) {
LOG.error("rename: dest [{}] cannot be a descendant of src [{}]", dst, src);
return false;
}
// Ok! Time to start
if (srcStatus.isFile()) {
LOG.debug("rename: renaming file {} to {}", src, dst);
renameFile(owner, srcKey, dstKey, srcStatus);
} else {
LOG.debug("rename: renaming directory {} to {}", src, dst);
// This is a directory to directory copy
dstKey = OBSCommonUtils.maybeAddTrailingSlash(dstKey);
srcKey = OBSCommonUtils.maybeAddTrailingSlash(srcKey);
renameFolder(owner, srcKey, dstKey);
}
if (src.getParent() != dst.getParent()) {
// deleteUnnecessaryFakeDirectories(dst.getParent());
createFakeDirectoryIfNecessary(owner, src.getParent());
}
return true;
} | 3.26 |
hadoop_OBSObjectBucketUtils_renameFile_rdh | /**
* Implement rename file.
*
* @param owner
* OBS File System instance
* @param srcKey
* source object key
* @param dstKey
* destination object key
* @param srcStatus
* source object status
* @throws IOException
* any problem with rename operation
*/
private static void renameFile(final OBSFileSystem owner, final String srcKey, final String dstKey, final FileStatus srcStatus) throws IOException {
long startTime = System.nanoTime();
copyFile(owner, srcKey, dstKey, srcStatus.getLen());
objectDelete(owner, srcStatus, false);
if (LOG.isDebugEnabled()) {
long delay = System.nanoTime() - startTime;
LOG.debug((((((("OBSFileSystem rename: " + ", {src=") + srcKey) + ", dst=") + dstKey) + ", delay=") +
delay) + "}");
}
} | 3.26 |
hadoop_OBSObjectBucketUtils_newObjectMetadata_rdh | /**
* Create a new object metadata instance. Any standard metadata headers are
* added here, for example: encryption.
*
* @param length
* length of data to set in header.
* @return a new metadata instance
*/
static ObjectMetadata newObjectMetadata(final long length) {
final ObjectMetadata om = new ObjectMetadata();
if (length >= 0) {
om.setContentLength(length);
}
return om;
} | 3.26 |
hadoop_OBSObjectBucketUtils_getObjectMetadata_rdh | /**
* Request object metadata; increments counters in the process.
*
* @param owner
* OBS File System instance
* @param key
* key
* @return the metadata
*/
protected static ObjectMetadata getObjectMetadata(final
OBSFileSystem owner, final String key) {
GetObjectMetadataRequest request = new GetObjectMetadataRequest();
request.setBucketName(owner.getBucket());
request.setObjectKey(key);
if (owner.getSse().isSseCEnable()) {
request.setSseCHeader(owner.getSse().getSseCHeader());
}
ObjectMetadata meta = owner.getObsClient().getObjectMetadata(request);
owner.getSchemeStatistics().incrementReadOps(1);
return meta;
} | 3.26 |
hadoop_OBSObjectBucketUtils_innerCreateEmptyObject_rdh | // Used to create an empty file that represents an empty directory
private static void innerCreateEmptyObject(final OBSFileSystem owner, final String objectName) throws ObsException, IOException {
final InputStream im = new InputStream() {
@Override
public int read() {
return -1;
}
};
PutObjectRequest putObjectRequest = OBSCommonUtils.newPutObjectRequest(owner, objectName, newObjectMetadata(0L), im);
long len;
if (putObjectRequest.getFile() != null) {
len =
putObjectRequest.getFile().length();
} else {
len = putObjectRequest.getMetadata().getContentLength();
}
try {
owner.getObsClient().putObject(putObjectRequest);
owner.getSchemeStatistics().incrementWriteOps(1);
owner.getSchemeStatistics().incrementBytesWritten(len);
} finally {
im.close();}
} | 3.26 |
hadoop_StoreContext_incrementStatistic_rdh | /**
* Increment a statistic by a specific value.
* This increments both the instrumentation and storage statistics.
*
* @param statistic
* The operation to increment
* @param count
* the count to increment
*/
public void incrementStatistic(Statistic statistic, long count) {
instrumentation.incrementCounter(statistic, count);
} | 3.26 |
hadoop_StoreContext_isCSEEnabled_rdh | /**
* return if the store context have client side encryption enabled.
*
* @return boolean indicating if CSE is enabled or not.
*/
public boolean isCSEEnabled() {
return isCSEEnabled;
} | 3.26 |
hadoop_StoreContext_submit_rdh | /**
* Submit a closure for execution in the executor
* returned by {@link #getExecutor()}.
*
* @param <T>
* type of future
* @param future
* future for the result.
* @param call
* callable to invoke.
* @return the future passed in
*/
public <T> CompletableFuture<T> submit(final CompletableFuture<T> future, final Callable<T> call) {
getExecutor().submit(() -> LambdaUtils.eval(future, call));
return future;
} | 3.26 |
hadoop_StoreContext_keyToPath_rdh | /**
* Convert a key to a fully qualified path.
*
* @param key
* input key
* @return the fully qualified path including URI scheme and bucket name.
*/
public Path keyToPath(String key) {
return f1.keyToPath(key);
} | 3.26 |
hadoop_StoreContext_pathToKey_rdh | /**
* Turns a path (relative or otherwise) into an S3 key.
*
* @param path
* input path, may be relative to the working dir
* @return a key excluding the leading "/", or, if it is the root path, ""
*/
public String pathToKey(Path path) {
return f1.pathToKey(path);
} | 3.26 |
hadoop_StoreContext_fullKey_rdh | /**
* Build the full S3 key for a request from the status entry,
* possibly adding a "/" if it represents directory and it does
* not have a trailing slash already.
*
* @param stat
* status to build the key from
* @return a key for a delete request
*/ public String fullKey(final S3AFileStatus stat) {
String k = pathToKey(stat.getPath());
return stat.isDirectory() && (!k.endsWith("/")) ? k + "/" : k;
} | 3.26 |
hadoop_StoreContext_makeQualified_rdh | /**
* Qualify a path.
*
* @param path
* path to qualify/normalize
* @return possibly new path.
*/
public Path makeQualified(Path path)
{
return f1.makeQualified(path);
} | 3.26 |
hadoop_StoreContext_m1_rdh | /**
* Increment a statistic by 1.
* This increments both the instrumentation and storage statistics.
*
* @param statistic
* The operation to increment
*/
public void m1(Statistic statistic) {
incrementStatistic(statistic, 1);
} | 3.26 |
hadoop_StoreContext_decrementGauge_rdh | /**
* Decrement a gauge by a specific value.
*
* @param statistic
* The operation to decrement
* @param count
* the count to decrement
*/
public void decrementGauge(Statistic statistic, long count) {
instrumentation.decrementGauge(statistic, count);
} | 3.26 |
hadoop_StoreContext_getStorageStatistics_rdh | /**
* Get the storage statistics of this filesystem.
*
* @return the storage statistics
*/
public S3AStorageStatistics getStorageStatistics() {
return storageStatistics;
} | 3.26 |
hadoop_StoreContext_getAuditor_rdh | /**
* Get the auditor.
*
* @return auditor.
*/
public AuditSpanSource<AuditSpanS3A> getAuditor() {
return auditor;
} | 3.26 |
hadoop_StoreContext_createThrottledExecutor_rdh | /**
* Create a new executor with the capacity defined in
* {@link #executorCapacity}.
*
* @return a new executor for exclusive use by the caller.
*/
public ExecutorService createThrottledExecutor() {
return createThrottledExecutor(f0);
} | 3.26 |
hadoop_StoreContext_getOwner_rdh | /**
* Get the owner of the filesystem.
*
* @return the user who created this filesystem.
*/
public UserGroupInformation getOwner()
{
return owner;
} | 3.26 |
hadoop_StoreContext_getBucketLocation_rdh | /**
* Get the location of the bucket.
*
* @return the bucket location.
* @throws IOException
* failure.
*/
public String getBucketLocation() throws IOException {
return f1.getBucketLocation();} | 3.26 |
hadoop_StoreContext_incrementGauge_rdh | /**
* Increment a gauge by a specific value.
*
* @param statistic
* The operation to increment
* @param count
* the count to increment
*/
public void incrementGauge(Statistic statistic, long count) {
instrumentation.incrementGauge(statistic, count);
} | 3.26 |
hadoop_StoreContext_getActiveAuditSpan_rdh | /**
* Return the active audit span.
* This is thread local -it MUST be passed into workers.
* To ensure the correct span is used, it SHOULD be
* collected as early as possible, ideally during construction/
* or service init/start.
*
* @return active audit span.
*/
@Override
public AuditSpan getActiveAuditSpan() {
return f1.getActiveAuditSpan();
} | 3.26 |
hadoop_StoreContext_getRequestFactory_rdh | /**
* Get the request factory.
*
* @return the factory for requests.
*/
public RequestFactory getRequestFactory() {
return f1.getRequestFactory();} | 3.26 |
hadoop_StoreContext_createTempFile_rdh | /**
* Create a temporary file somewhere.
*
* @param prefix
* prefix for the temporary file
* @param size
* expected size.
* @return a file reference.
* @throws IOException
* failure.
*/
public File createTempFile(String prefix, long size) throws IOException {
return f1.createTempFile(prefix, size);
} | 3.26 |
hadoop_DNSOperationsFactory_createInstance_rdh | /**
* Create and initialize a registry operations instance.
* Access rights will be determined from the configuration.
*
* @param name
* name of the instance
* @param impl
* the DNS implementation.
* @param conf
* configuration
* @return a registry operations instance
*/
public static DNSOperations createInstance(String name, DNSImplementation impl, Configuration conf) {
Preconditions.checkArgument(conf != null, "Null configuration");
DNSOperations operations = null;
switch (impl) {
case DNSJAVA :
operations = new RegistryDNS(name);
break;
default :
throw new
IllegalArgumentException(String.format("%s is not available", impl.toString()));}
// operations.init(conf);
return operations;
} | 3.26 |
hadoop_AppToFlowRowKey_getRowKey_rdh | /**
* Constructs a row key prefix for the app_flow table.
*
* @return byte array with the row key
*/
public byte[] getRowKey() {
return appIdKeyConverter.encode(appId);
} | 3.26 |
hadoop_AppToFlowRowKey_parseRowKey_rdh | /**
* Given the raw row key as bytes, returns the row key as an object.
*
* @param rowKey
* a rowkey represented as a byte array.
* @return an <cite>AppToFlowRowKey</cite> object.
*/
public static AppToFlowRowKey parseRowKey(byte[] rowKey) {
String appId = new AppIdKeyConverter().decode(rowKey);
return new AppToFlowRowKey(appId);
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.