name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hadoop_WriteOperationHelper_select_rdh | /**
* Execute an S3 Select operation.
* On a failure, the request is only logged at debug to avoid the
* select exception being printed.
*
* @param source
* source for selection
* @param request
* Select request to issue.
* @param action
* the action for use in exception creation
* @return response
* @throws IOException
* failure
*/
@Retries.RetryTranslated
public SelectEventStreamPublisher select(final Path source, final SelectObjectContentRequest request, final String action) throws IOException {
// no setting of span here as the select binding is (statically) created
// without any span.
String bucketName = request.bucket();
Preconditions.checkArgument(bucket.equals(bucketName), "wrong bucket: %s", bucketName);
if (LOG.isDebugEnabled()) {
LOG.debug("Initiating select call {} {}", source, request.expression());
LOG.debug(SelectBinding.toString(request));
}return invoker.retry(action, source.toString(), true, withinAuditSpan(getAuditSpan(), () -> {
try (DurationInfo ignored =
new DurationInfo(LOG, "S3 Select operation")) {
try {
return SelectObjectContentHelper.select(writeOperationHelperCallbacks, source, request, action);
} catch (Throwable e) {
LOG.error("Failure of S3 Select request against {}", source);
LOG.debug("S3 Select request against {}:\n{}", source, SelectBinding.toString(request), e); throw e;
}
}
}));} | 3.26 |
hadoop_WriteOperationHelper_retry_rdh | /**
* Execute a function with retry processing.
* Also activates the current span.
*
* @param <T>
* type of return value
* @param action
* action to execute (used in error messages)
* @param path
* path of work (used in error messages)
* @param idempotent
* does the operation have semantics
* which mean that it can be retried even if was already executed?
* @param operation
* operation to execute
* @return the result of the call
* @throws IOException
* any IOE raised, or translated exception
*/
public <T>
T retry(String action, String path, boolean idempotent, CallableRaisingIOE<T> operation) throws IOException {
activateAuditSpan();
return invoker.retry(action,
path, idempotent, operation);
} | 3.26 |
hadoop_WriteOperationHelper_putObject_rdh | /**
* PUT an object directly (i.e. not via the transfer manager).
* Byte length is calculated from the file length, or, if there is no
* file, from the content length of the header.
*
* @param putObjectRequest
* the request
* @param putOptions
* put object options
* @param durationTrackerFactory
* factory for duration tracking
* @param uploadData
* data to be uploaded
* @param isFile
* is data to be uploaded a file
* @return the upload initiated
* @throws IOException
* on problems
*/
@Retries.RetryTranslated
public PutObjectResponse putObject(PutObjectRequest putObjectRequest, PutObjectOptions putOptions, S3ADataBlocks.BlockUploadData uploadData, boolean isFile, DurationTrackerFactory durationTrackerFactory) throws IOException {
return retry("Writing Object", putObjectRequest.key(), true, withinAuditSpan(getAuditSpan(), () -> owner.putObjectDirect(putObjectRequest, putOptions, uploadData, isFile, durationTrackerFactory)));
} | 3.26 |
hadoop_WriteOperationHelper_deactivateAuditSpan_rdh | /**
* Deactivate the audit span.
*/
private void deactivateAuditSpan() {
auditSpan.deactivate();
} | 3.26 |
hadoop_WriteOperationHelper_m0_rdh | /**
* This completes a multipart upload to the destination key via
* {@code finalizeMultipartUpload()}.
* Retry policy: retrying, translated.
* Retries increment the {@code errorCount} counter.
*
* @param destKey
* destination
* @param uploadId
* multipart operation Id
* @param partETags
* list of partial uploads
* @param length
* length of the upload
* @param errorCount
* a counter incremented by 1 on every error; for
* use in statistics
* @param putOptions
* put object options
* @return the result of the operation.
* @throws IOException
* if problems arose which could not be retried, or
* the retry count was exceeded
*/
@Retries.RetryTranslated
public CompleteMultipartUploadResponse m0(String destKey, String uploadId, List<CompletedPart> partETags, long length, AtomicInteger errorCount, PutObjectOptions
putOptions) throws IOException {
checkNotNull(uploadId);
checkNotNull(partETags);
LOG.debug("Completing multipart upload {} with {} parts", uploadId, partETags.size());
return finalizeMultipartUpload(destKey, uploadId, partETags, length, putOptions, (text, e, r, i) -> errorCount.incrementAndGet());
} | 3.26 |
hadoop_WriteOperationHelper_initiateMultiPartUpload_rdh | /**
* {@inheritDoc }
*/
@Retries.RetryTranslated
public String initiateMultiPartUpload(final String destKey, final PutObjectOptions options) throws IOException {
LOG.debug("Initiating Multipart upload to {}", destKey);
try (AuditSpan span = activateAuditSpan()) {
return retry("initiate MultiPartUpload", destKey, true, () -> {
final CreateMultipartUploadRequest.Builder initiateMPURequestBuilder = getRequestFactory().newMultipartUploadRequestBuilder(destKey, options);
return owner.initiateMultipartUpload(initiateMPURequestBuilder.build()).uploadId();
});
}
} | 3.26 |
hadoop_WriteOperationHelper_toString_rdh | /**
* The toString method is intended to be used in logging/toString calls.
*
* @return a string description.
*/
@Override
public String toString() {
final StringBuilder sb = new StringBuilder("WriteOperationHelper {bucket=").append(bucket);
sb.append('}');
return sb.toString();
} | 3.26 |
hadoop_WriteOperationHelper_uploadPart_rdh | /**
* Upload part of a multi-partition file.
*
* @param request
* request
* @param durationTrackerFactory
* duration tracker factory for operation
* @param request
* the upload part request.
* @param body
* the request body.
* @return the result of the operation.
* @throws IOException
* on problems
*/
@Retries.RetryTranslated
public UploadPartResponse uploadPart(UploadPartRequest request, RequestBody body, final DurationTrackerFactory durationTrackerFactory) throws IOException {
return retry((("upload part #" + request.partNumber()) + " upload ID ") + request.uploadId(), request.key(), true, withinAuditSpan(getAuditSpan(), () -> owner.uploadPart(request, body, durationTrackerFactory)));
} | 3.26 |
hadoop_WriteOperationHelper_writeSuccessful_rdh | /**
* Callback on a successful write.
*
* @param length
* length of the write
*/
public void writeSuccessful(long length) {
} | 3.26 |
hadoop_WriteOperationHelper_activateAuditSpan_rdh | /**
* Activate the audit span.
*
* @return the span
*/
private AuditSpan activateAuditSpan() {
return auditSpan.activate();
} | 3.26 |
hadoop_WriteOperationHelper_getAuditSpan_rdh | /**
* Get the audit span this object was created with.
*
* @return the audit span
*/
public AuditSpan getAuditSpan() {
return
auditSpan;
} | 3.26 |
hadoop_WriteOperationHelper_abortMultipartUploadsUnderPath_rdh | /**
* Abort multipart uploads under a path: limited to the first
* few hundred.
*
* @param prefix
* prefix for uploads to abort
* @return a count of aborts
* @throws IOException
* trouble; FileNotFoundExceptions are swallowed.
*/
@Retries.RetryTranslated
public int abortMultipartUploadsUnderPath(String prefix) throws IOException {
LOG.debug("Aborting multipart uploads under {}", prefix);
int count
= 0;
List<MultipartUpload> v6 = listMultipartUploads(prefix);
LOG.debug("Number of outstanding uploads: {}", v6.size());
for (MultipartUpload upload : v6) {
try {
abortMultipartUpload(upload);
count++;
} catch (FileNotFoundException e) { LOG.debug("Already aborted: {}", upload.key(), e);
}
}
return count;
} | 3.26 |
hadoop_WriteOperationHelper_revertCommit_rdh | /**
* Revert a commit by deleting the file.
* Relies on retry code in filesystem.
* Does not attempt to recreate the parent directory
*
* @throws IOException
* on problems
* @param destKey
* destination key
*/
@Retries.OnceTranslated
public void revertCommit(String destKey) throws IOException {
once("revert commit", destKey, withinAuditSpan(getAuditSpan(), ()
-> {
Path destPath = owner.keyToQualifiedPath(destKey);
owner.deleteObjectAtPath(destPath, destKey, true);
}));
} | 3.26 |
hadoop_WriteOperationHelper_createPutObjectRequest_rdh | /**
* Create a {@link PutObjectRequest} request against the specific key.
*
* @param destKey
* destination key
* @param length
* size, if known. Use -1 for not known
* @param options
* options for the request
* @param isFile
* is data to be uploaded a file
* @return the request
*/
@Retries.OnceRaw public PutObjectRequest createPutObjectRequest(String destKey, long length, final PutObjectOptions options, boolean isFile) {
activateAuditSpan();
return getRequestFactory().newPutObjectRequestBuilder(destKey, options, length, false).build();
} | 3.26 |
hadoop_WriteOperationHelper_abortMultipartUpload_rdh | /**
* Abort a multipart commit operation.
*
* @param upload
* upload to abort.
* @throws IOException
* on problems.
*/
@Retries.RetryTranslated
public void abortMultipartUpload(MultipartUpload upload) throws IOException {
invoker.retry("Aborting multipart commit", upload.key(), true, withinAuditSpan(getAuditSpan(), () -> owner.abortMultipartUpload(upload)));
} | 3.26 |
hadoop_WriteOperationHelper_getConf_rdh | /**
* Get the configuration of this instance; essentially the owning
* filesystem configuration.
*
* @return the configuration.
*/public Configuration getConf() {
return conf;
} | 3.26 |
hadoop_WriteOperationHelper_finalizeMultipartUpload_rdh | /**
* Finalize a multipart PUT operation.
* This completes the upload, and, if that works, calls
* {@link S3AFileSystem#finishedWrite(String, long, String, String, org.apache.hadoop.fs.s3a.impl.PutObjectOptions)}
* to update the filesystem.
* Retry policy: retrying, translated.
*
* @param destKey
* destination of the commit
* @param uploadId
* multipart operation Id
* @param partETags
* list of partial uploads
* @param length
* length of the upload
* @param putOptions
* put object options
* @param retrying
* retrying callback
* @return the result of the operation.
* @throws IOException
* on problems.
*/
@Retries.RetryTranslated
private CompleteMultipartUploadResponse finalizeMultipartUpload(String destKey, String uploadId, List<CompletedPart> partETags, long length, PutObjectOptions putOptions, Retried retrying) throws IOException {
if (partETags.isEmpty())
{
throw new PathIOException(destKey, "No upload parts in multipart upload");
}
try (AuditSpan span = activateAuditSpan()) {
CompleteMultipartUploadResponse uploadResult;
uploadResult = invoker.retry("Completing multipart upload", destKey, true, retrying, () -> {
final CompleteMultipartUploadRequest.Builder requestBuilder = getRequestFactory().newCompleteMultipartUploadRequestBuilder(destKey, uploadId, partETags);
return writeOperationHelperCallbacks.completeMultipartUpload(requestBuilder.build());
});
owner.finishedWrite(destKey, length, uploadResult.eTag(), uploadResult.versionId(), putOptions);
return uploadResult;
}
} | 3.26 |
hadoop_WriteOperationHelper_commitUpload_rdh | /**
* This completes a multipart upload to the destination key via
* {@code finalizeMultipartUpload()}.
* Markers are never deleted on commit; this avoids having to
* issue many duplicate deletions.
* Retry policy: retrying, translated.
* Retries increment the {@code errorCount} counter.
*
* @param destKey
* destination
* @param uploadId
* multipart operation Id
* @param partETags
* list of partial uploads
* @param length
* length of the upload
* @return the result of the operation.
* @throws IOException
* if problems arose which could not be retried, or
* the retry count was exceeded
*/
@Retries.RetryTranslated
public CompleteMultipartUploadResponse commitUpload(String destKey, String uploadId, List<CompletedPart> partETags, long length) throws IOException {
checkNotNull(uploadId);
checkNotNull(partETags);
LOG.debug("Completing multipart upload {} with {} parts", uploadId, partETags.size());
return finalizeMultipartUpload(destKey, uploadId, partETags, length, PutObjectOptions.keepingDirs(), Invoker.Invoker.NO_OP);
} | 3.26 |
hadoop_WriteOperationHelper_abortMultipartCommit_rdh | /**
* Abort a multipart commit operation.
*
* @param destKey
* destination key of ongoing operation
* @param uploadId
* multipart operation Id
* @throws IOException
* on problems.
* @throws FileNotFoundException
* if the abort ID is unknown
*/
@Override
@Retries.RetryTranslated
public void abortMultipartCommit(String destKey, String uploadId) throws IOException {
abortMultipartUpload(destKey, uploadId, true, invoker.getRetryCallback());
} | 3.26 |
hadoop_WriteOperationHelper_getRequestFactory_rdh | /**
* Get the request factory which uses this store's audit span.
*
* @return the request factory.
*/
public RequestFactory getRequestFactory() {
return requestFactory;
} | 3.26 |
hadoop_WriteOperationHelper_writeFailed_rdh | /**
* Callback on a write failure.
*
* @param ex
* Any exception raised which triggered the failure.
*/
public void writeFailed(Exception ex) {
LOG.debug("Write to {} failed", this, ex);
} | 3.26 |
hadoop_WriteOperationHelper_close_rdh | /**
* Deactivate the audit span.
*/
@Override
public void close() throws IOException {deactivateAuditSpan();
} | 3.26 |
hadoop_SchedulerAppReport_getReservedContainers_rdh | /**
* Get the list of reserved containers
*
* @return All of the reserved containers.
*/
public Collection<RMContainer> getReservedContainers() {
return reserved;
} | 3.26 |
hadoop_SchedulerAppReport_isPending_rdh | /**
* Is this application pending?
*
* @return true if it is else false.
*/
public boolean isPending() {
return pending;
} | 3.26 |
hadoop_FedBalance_setForceCloseOpen_rdh | /**
* Whether force close all open files while there is no diff.
*
* @param value
* true if force close all the open files.
*/
public Builder setForceCloseOpen(boolean value) {
this.forceCloseOpen = value;
return this;
} | 3.26 |
hadoop_FedBalance_m0_rdh | /**
* Build the balance job.
*/
public BalanceJob m0() throws IOException {
// Construct job context.
FedBalanceContext context;
Path dst = new Path(inputDst);
if (dst.toUri().getAuthority() == null) {
throw new IOException("The destination cluster must be specified.");
}
Path src = new Path(inputSrc);
if (src.toUri().getAuthority() == null) {
throw new IOException("The source cluster must be specified.");
}
context = new FedBalanceContext.Builder(src, dst, NO_MOUNT, getConf()).setForceCloseOpenFiles(forceCloseOpen).setUseMountReadOnly(false).setMapNum(map).setBandwidthLimit(bandwidth).setTrash(trashOpt).setDiffThreshold(diffThreshold).build();
LOG.info(context.toString());
// Construct the balance job.
BalanceJob.Builder<BalanceProcedure> builder = new BalanceJob.Builder<>();
DistCpProcedure dcp = new DistCpProcedure(DISTCP_PROCEDURE, null, delayDuration, context);
builder.nextProcedure(dcp);
TrashProcedure tp = new TrashProcedure(TRASH_PROCEDURE, null, delayDuration, context);
builder.nextProcedure(tp);
return builder.build();
} | 3.26 |
hadoop_FedBalance_main_rdh | /**
* Main function of the FedBalance program. Parses the input arguments and
* invokes the FedBalance::run() method, via the ToolRunner.
*
* @param argv
* Command-line arguments sent to FedBalance.
*/
public static void main(String[] argv) {
Configuration conf = getDefaultConf();
FedBalance fedBalance = new FedBalance();
fedBalance.setConf(conf);
int exitCode;
try {
exitCode = ToolRunner.run(fedBalance, argv);} catch (Exception e) {
LOG.warn("Couldn't complete FedBalance operation.", e);
exitCode = -1;
}
System.exit(exitCode);
} | 3.26 |
hadoop_FedBalance_setDiffThreshold_rdh | /**
* Specify the threshold of diff entries.
*
* @param value
* the threshold of a fast distcp.
*/
public Builder setDiffThreshold(int value) {
this.diffThreshold = value;
return this;
} | 3.26 |
hadoop_FedBalance_setBandWidth_rdh | /**
* Specify bandwidth per map in MB.
*
* @param value
* the bandwidth.
*/
public Builder setBandWidth(int value) {
this.bandwidth = value;
return this;
} | 3.26 |
hadoop_FedBalance_setTrashOpt_rdh | /**
* Specify the trash behaviour of the source path.
*
* @param value
* the trash option.
*/
public Builder setTrashOpt(TrashOption value) {
this.trashOpt = value;
return this;
} | 3.26 |
hadoop_FedBalance_continueJob_rdh | /**
* Recover and continue the unfinished jobs.
*/
private int continueJob() throws InterruptedException {
BalanceProcedureScheduler scheduler = new BalanceProcedureScheduler(getConf());
try {
scheduler.init(true);
while (true) {
Collection<BalanceJob> jobs = scheduler.getAllJobs();
int unfinished = 0;
for (BalanceJob job : jobs) {if (!job.isJobDone()) {
unfinished++;
}
LOG.info(job.toString());
}
if (unfinished == 0) {break;
}
Thread.sleep(TimeUnit.SECONDS.toMillis(10));
}
} catch (IOException e) {
LOG.error("Continue balance job failed.", e);
return -1;
} finally {
scheduler.shutDown();
}
return 0;
} | 3.26 |
hadoop_FedBalance_setDelayDuration_rdh | /**
* Specify the duration(millie seconds) when the procedure needs retry.
*
* @param value
* the delay duration of the job.
*/
public Builder setDelayDuration(long value) {
this.delayDuration = value;
return this;} | 3.26 |
hadoop_FedBalance_submit_rdh | /**
* Start a ProcedureScheduler and submit the job.
*
* @param command
* the command options.
* @param inputSrc
* the source input. This specifies the source path.
* @param inputDst
* the dst input. This specifies the dst path.
*/
private int submit(CommandLine command, String inputSrc, String
inputDst) throws IOException {
Builder builder = new Builder(inputSrc, inputDst);
// parse options.
builder.setForceCloseOpen(command.hasOption(FORCE_CLOSE_OPEN.getOpt()));
if (command.hasOption(MAP.getOpt())) {
builder.setMap(Integer.parseInt(command.getOptionValue(MAP.getOpt())));
}
if (command.hasOption(BANDWIDTH.getOpt())) {
builder.setBandWidth(Integer.parseInt(command.getOptionValue(BANDWIDTH.getOpt())));
}
if (command.hasOption(DELAY_DURATION.getOpt())) {
builder.setDelayDuration(Long.parseLong(command.getOptionValue(DELAY_DURATION.getOpt())));
}
if (command.hasOption(DIFF_THRESHOLD.getOpt())) {
builder.setDiffThreshold(Integer.parseInt(command.getOptionValue(DIFF_THRESHOLD.getOpt())));
}
if (command.hasOption(TRASH.getOpt())) {
String val = command.getOptionValue(TRASH.getOpt());
if (val.equalsIgnoreCase("skip")) {
builder.setTrashOpt(FedBalanceConfigs.TrashOption.SKIP);
} else if (val.equalsIgnoreCase("trash")) {
builder.setTrashOpt(FedBalanceConfigs.TrashOption.TRASH);
} else if (val.equalsIgnoreCase("delete")) {
builder.setTrashOpt(FedBalanceConfigs.TrashOption.DELETE);
} else {
printUsage();
return -1;
}
}
// Submit the job.
BalanceProcedureScheduler scheduler = new BalanceProcedureScheduler(getConf());
scheduler.init(false);
try {
BalanceJob v19 = builder.m0();
// Submit and wait until the job is done.
scheduler.submit(v19);
scheduler.waitUntilDone(v19);
} catch (IOException e) {
LOG.error("Submit balance job failed.", e);return -1;
} finally {
scheduler.shutDown();
}
return
0;
} | 3.26 |
hadoop_SubApplicationRowKey_getRowKeyAsString_rdh | /**
* Constructs a row key for the sub app table as follows:
* <p>
* {@code subAppUserId!clusterId!
* entityType!entityIdPrefix!entityId!userId}.
*
* subAppUserId is usually the doAsUser.
* userId is the yarn user that that the AM runs as.
*
* </p>
*
* @return String representation of row key.
*/
public String getRowKeyAsString() {
return subAppRowKeyConverter.encodeAsString(this);
} | 3.26 |
hadoop_SubApplicationRowKey_getRowKey_rdh | /**
* Constructs a row key for the sub app table as follows:
* {@code subAppUserId!clusterId!entityType
* !entityPrefix!entityId!userId}.
* Typically used while querying a specific sub app.
*
* subAppUserId is usually the doAsUser.
* userId is the yarn user that the AM runs as.
*
* @return byte array with the row key.
*/
public byte[] getRowKey() {
return subAppRowKeyConverter.encode(this);
} | 3.26 |
hadoop_SubApplicationRowKey_parseRowKeyFromString_rdh | /**
* Given the encoded row key as string, returns the row key as an object.
*
* @param encodedRowKey
* String representation of row key.
* @return A <cite>SubApplicationRowKey</cite> object.
*/
public static SubApplicationRowKey parseRowKeyFromString(String encodedRowKey) {
return new SubApplicationRowKeyConverter().decodeFromString(encodedRowKey);
} | 3.26 |
hadoop_SubApplicationRowKey_parseRowKey_rdh | /**
* Given the raw row key as bytes, returns the row key as an object.
*
* @param rowKey
* byte representation of row key.
* @return An <cite>SubApplicationRowKey</cite> object.
*/
public static SubApplicationRowKey parseRowKey(byte[] rowKey) {
return new
SubApplicationRowKeyConverter().decode(rowKey);
} | 3.26 |
hadoop_SplitCompressionInputStream_getAdjustedEnd_rdh | /**
* After calling createInputStream, the values of start or end
* might change. So this method can be used to get the new value of end.
*
* @return The changed value of end
*/
public long getAdjustedEnd() {
return end;
} | 3.26 |
hadoop_SplitCompressionInputStream_getAdjustedStart_rdh | /**
* After calling createInputStream, the values of start or end
* might change. So this method can be used to get the new value of start.
*
* @return The changed value of start
*/
public long getAdjustedStart() {return start;
} | 3.26 |
hadoop_ContainerShellWebSocket_checkAuthorization_rdh | /**
* Check if user is authorized to access container.
*
* @param session
* websocket session
* @param container
* instance of container to access
* @return true if user is allowed to access container.
* @throws IOException
*/
protected boolean checkAuthorization(Session session, Container container) throws IOException {
boolean authorized = true;
String user = "";
if (UserGroupInformation.isSecurityEnabled()) {
user = new HadoopKerberosName(session.getUpgradeRequest().getUserPrincipal().getName()).getShortName();} else {
Map<String, List<String>> parameters = session.getUpgradeRequest().getParameterMap();
if (parameters.containsKey("user.name")) {
List<String> users = parameters.get("user.name");
user = users.get(0);
}
}
boolean isAdmin = false;
if (nmContext.getApplicationACLsManager().areACLsEnabled()) {
UserGroupInformation ugi = UserGroupInformation.createRemoteUser(user);
isAdmin = nmContext.getApplicationACLsManager().isAdmin(ugi);
}String containerUser = container.getUser();
if ((!user.equals(containerUser)) && (!isAdmin)) {
authorized = false;
}
return authorized;
} | 3.26 |
hadoop_BlockRecoveryCommand_getNewBlock_rdh | /**
* Return the new block.
*/
public Block getNewBlock() {
return recoveryBlock;
} | 3.26 |
hadoop_BlockRecoveryCommand_getNewGenerationStamp_rdh | /**
* Return the new generation stamp of the block,
* which also plays role of the recovery id.
*/
public long getNewGenerationStamp() {
return newGenerationStamp;
} | 3.26 |
hadoop_BlockRecoveryCommand_add_rdh | /**
* Add recovering block to the command.
*/
public void add(RecoveringBlock block) {
recoveringBlocks.add(block);
} | 3.26 |
hadoop_BlockRecoveryCommand_getRecoveringBlocks_rdh | /**
* Return the list of recovering blocks.
*/
public Collection<RecoveringBlock> getRecoveringBlocks() {
return recoveringBlocks;
} | 3.26 |
hadoop_FileSystemReleaseFilter_setFileSystem_rdh | /**
* Static method that sets the <code>FileSystem</code> to release back to
* the {@link FileSystemAccess} service on servlet request completion.
*
* @param fs
* a filesystem instance.
*/
public static void setFileSystem(FileSystem fs) {
FILE_SYSTEM_TL.set(fs);
} | 3.26 |
hadoop_FileSystemReleaseFilter_destroy_rdh | /**
* Destroys the filter.
* <p>
* This implementation is a NOP.
*/
@Override
public void destroy()
{
} | 3.26 |
hadoop_FileSystemReleaseFilter_init_rdh | /**
* Initializes the filter.
* <p>
* This implementation is a NOP.
*
* @param filterConfig
* filter configuration.
* @throws ServletException
* thrown if the filter could not be initialized.
*/
@Override
public void init(FilterConfig filterConfig) throws ServletException {
}
/**
* It delegates the incoming request to the <code>FilterChain</code>, and
* at its completion (in a finally block) releases the filesystem instance
* back to the {@link FileSystemAccess} | 3.26 |
hadoop_TFile_end_rdh | /**
* Get the end location of the TFile.
*
* @return The location right after the last key-value pair in TFile.
*/
Location
end() {
return end;
} | 3.26 |
hadoop_TFile_getComparator_rdh | /**
* Get an instance of the RawComparator that is constructed based on the
* string comparator representation.
*
* @return a Comparator that can compare RawComparable's.
*/
public Comparator<RawComparable> getComparator() {
return comparator;
} | 3.26 |
hadoop_TFile_getLastKey_rdh | /**
* Get the last key in the TFile.
*
* @return The last key in the TFile.
* @throws IOException
* raised on errors performing I/O.
*/
public RawComparable getLastKey() throws
IOException {
checkTFileDataIndex();
return tfileIndex.getLastKey();
} | 3.26 |
hadoop_TFile_prepareAppendKey_rdh | /**
* Obtain an output stream for writing a key into TFile. This may only be
* called when there is no active Key appending stream or value appending
* stream.
*
* @param length
* The expected length of the key. If length of the key is not
* known, set length = -1. Otherwise, the application must write
* exactly as many bytes as specified here before calling close on
* the returned output stream.
* @return The key appending output stream.
* @throws IOException
* raised on errors performing I/O.
*/
public DataOutputStream prepareAppendKey(int length) throws IOException {if (state != State.READY) {
throw new IllegalStateException("Incorrect state to start a new key: " + state.name());
}
initDataBlock();
DataOutputStream v10 = new KeyRegister(length);
state = State.IN_KEY;
return v10;
} | 3.26 |
hadoop_TFile_seekTo_rdh | /**
* Move the cursor to the new location. The entry returned by the previous
* entry() call will be invalid.
*
* @param l
* new cursor location. It must fall between the begin and end
* location of the scanner.
* @throws IOException
*/
private void seekTo(Location l) throws IOException {
if (l.compareTo(beginLocation) < 0) {
throw new IllegalArgumentException("Attempt to seek before the begin location.");
}
if (l.compareTo(endLocation) > 0) {
throw new IllegalArgumentException("Attempt to seek after the end location.");
}
if (l.compareTo(endLocation) == 0) {
parkCursorAtEnd();
return;
}
if (l.getBlockIndex() != currentLocation.getBlockIndex()) {
// going to a totally different block
initBlock(l.getBlockIndex());
} else {
if (valueChecked) {
// may temporarily go beyond the last record in the block (in which
// case the next if loop will always be true).
inBlockAdvance(1);
}
if (l.getRecordIndex() < currentLocation.getRecordIndex()) {
initBlock(l.getBlockIndex());
}
}
inBlockAdvance(l.getRecordIndex() - currentLocation.getRecordIndex());
return;
} | 3.26 |
hadoop_TFile_makeComparator_rdh | /**
* Make a raw comparator from a string name.
*
* @param name
* Comparator name
* @return A RawComparable comparator.
*/
public static Comparator<RawComparable> makeComparator(String name) {
return TFileMeta.makeComparator(name);
} | 3.26 |
hadoop_TFile_m2_rdh | /**
* Move the cursor to the first entry whose key is greater than or equal
* to the input key. Synonymous to lowerBound(key, 0, key.length). The
* entry returned by the previous entry() call will be invalid.
*
* @param key
* The input key
* @throws IOException
* raised on errors performing I/O.
*/
public void m2(byte[] key) throws IOException {
lowerBound(key, 0, key.length);
} | 3.26 |
hadoop_TFile_getKeyStream_rdh | /**
* Streaming access to the key. Useful for desrializing the key into
* user objects.
*
* @return The input stream.
*/
public DataInputStream getKeyStream() {
keyDataInputStream.reset(keyBuffer, klen);
return keyDataInputStream;
} | 3.26 |
hadoop_TFile_m3_rdh | /**
* Copy the key and value in one shot into BytesWritables. This is
* equivalent to getKey(key); getValue(value);
*
* @param key
* BytesWritable to hold key.
* @param value
* BytesWritable to hold value
* @throws IOException
* raised on errors performing I/O.
*/
public void m3(BytesWritable key, BytesWritable value) throws IOException {
getKey(key);
getValue(value);
} | 3.26 |
hadoop_TFile_close_rdh | /**
* Close the scanner. Release all resources. The behavior of using the
* scanner after calling close is not defined. The entry returned by the
* previous entry() call will be invalid.
*/
@Override
public void close() throws IOException {
parkCursorAtEnd();
} | 3.26 |
hadoop_TFile_atEnd_rdh | /**
* Is cursor at the end location?
*
* @return true if the cursor is at the end location.
*/
public boolean atEnd() {
return currentLocation.compareTo(endLocation) >= 0;
} | 3.26 |
hadoop_TFile_inBlockAdvance_rdh | /**
* Advance cursor in block until we find a key that is greater than or
* equal to the input key.
*
* @param key
* Key to compare.
* @param greater
* advance until we find a key greater than the input key.
* @return true if we find a equal key.
* @throws IOException
*/
private boolean inBlockAdvance(RawComparable key, boolean greater) throws IOException {
int curBid = currentLocation.getBlockIndex();
long entryInBlock = reader.getBlockEntryCount(curBid);
if (curBid
== endLocation.getBlockIndex()) {entryInBlock = endLocation.getRecordIndex();
}
while
(currentLocation.getRecordIndex() < entryInBlock) {
int cmp = compareCursorKeyTo(key);
if (cmp > 0)
return false;
if ((cmp == 0) && (!greater))
return true;
if (!valueBufferInputStream.isClosed()) {
valueBufferInputStream.close();
}
klen = -1;
currentLocation.incRecordIndex();
}
throw new RuntimeException("Cannot find matching key in block.");
} | 3.26 |
hadoop_TFile_hashCode_rdh | /**
*
* @see java.lang.Object#hashCode()
*/
@Override
public int
hashCode() {
final int prime = 31;
int result
= prime + blockIndex;
result = ((int) ((prime * result) + recordIndex));
return result;
} | 3.26 |
hadoop_TFile_isSorted_rdh | /**
* Is the TFile sorted?
*
* @return true if TFile is sorted.
*/
public boolean isSorted() {
return tfileMeta.isSorted();
} | 3.26 |
hadoop_TFile_entry_rdh | /**
* Get an entry to access the key and value.
*
* @return The Entry object to access the key and value.
* @throws IOException
* raised on errors performing I/O.
*/
public Entry entry() throws IOException {
checkKey();
return new Entry();} | 3.26 |
hadoop_TFile_getFirstKey_rdh | /**
* Get the first key in the TFile.
*
* @return The first key in the TFile.
* @throws IOException
* raised on errors performing I/O.
*/
public RawComparable getFirstKey() throws IOException {
checkTFileDataIndex();
return tfileIndex.getFirstKey();
} | 3.26 |
hadoop_TFile_compareTo_rdh | /**
* Compare an entry with a RawComparable object. This is useful when
* Entries are stored in a collection, and we want to compare a user
* supplied key.
*/
@Override
public int compareTo(RawComparable key) {
return reader.compareKeys(keyBuffer, 0, getKeyLength(), key.buffer(), key.offset(), key.size());
} | 3.26 |
hadoop_TFile_equals_rdh | /**
*
* @see java.lang.Object#equals(java.lang.Object)
*/
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
Location other = ((Location) (obj));
if (blockIndex != other.blockIndex)
return false;
if (recordIndex != other.recordIndex)
return false;
return true;} | 3.26 |
hadoop_TFile_writeValue_rdh | /**
* Writing the value to the output stream. This method avoids copying
* value data from Scanner into user buffer, then writing to the output
* stream. It does not require the value length to be known.
*
* @param out
* The output stream
* @return the length of the value
* @throws IOException
* raised on errors performing I/O.
*/
public long writeValue(OutputStream out) throws IOException {
DataInputStream dis = getValueStream();
long size = 0;
try {
int chunkSize;
while ((chunkSize = valueBufferInputStream.getRemain()) > 0) {
chunkSize =
Math.min(chunkSize, MAX_VAL_TRANSFER_BUF_SIZE);
f1.setSize(chunkSize);
dis.readFully(f1.getBytes(), 0, chunkSize);
out.write(f1.getBytes(), 0, chunkSize);
size += chunkSize;
}
return size;
}
finally {
dis.close();
}
} | 3.26 |
hadoop_TFile_getBlockContainsKey_rdh | /**
* if greater is true then returns the beginning location of the block
* containing the key strictly greater than input key. if greater is false
* then returns the beginning location of the block greater than equal to
* the input key
*
* @param key
* the input key
* @param greater
* boolean flag
* @return * @throws IOException
*/
Location getBlockContainsKey(RawComparable key, boolean greater) throws IOException {
if (!isSorted()) {
throw new RuntimeException("Seeking in unsorted TFile");
}
checkTFileDataIndex();
int blkIndex = (greater) ? tfileIndex.upperBound(key)
: tfileIndex.lowerBound(key);
if (blkIndex < 0)
return end;
return new Location(blkIndex, 0);
} | 3.26 |
hadoop_TFile_lowerBound_rdh | /**
* Move the cursor to the first entry whose key is greater than or equal
* to the input key. The entry returned by the previous entry() call will
* be invalid.
*
* @param key
* The input key
* @param keyOffset
* offset in the key buffer.
* @param keyLen
* key buffer length.
* @throws IOException
* raised on errors performing I/O.
*/
public void lowerBound(byte[] key, int keyOffset, int keyLen) throws IOException
{
seekTo(new ByteArray(key, keyOffset, keyLen), false);
} | 3.26 |
hadoop_TFile_writeKey_rdh | /**
* Writing the key to the output stream. This method avoids copying key
* buffer from Scanner into user buffer, then writing to the output
* stream.
*
* @param out
* The output stream
* @return the length of the key.
* @throws IOException
* raised on errors performing I/O.
*/
public int writeKey(OutputStream out) throws IOException {
out.write(keyBuffer, 0, klen);
return klen;
} | 3.26 |
hadoop_TFile_getEntryComparator_rdh | /**
* Get a Comparator object to compare Entries. It is useful when you want
* stores the entries in a collection (such as PriorityQueue) and perform
* sorting or comparison among entries based on the keys without copying out
* the key.
*
* @return An Entry Comparator..
*/
public Comparator<Scanner.Entry> getEntryComparator() {if (!isSorted()) {
throw new RuntimeException("Entries are not comparable for unsorted TFiles");
}
return new Comparator<Scanner.Entry>() {
/**
* Provide a customized comparator for Entries. This is useful if we
* have a collection of Entry objects. However, if the Entry objects
* come from different TFiles, users must ensure that those TFiles share
* the same RawComparator.
*/@Override
public int compare(Scanner.Entry o1, Scanner.Entry o2) {
return comparator.compare(o1.getKeyBuffer(), 0, o1.getKeyLength(), o2.getKeyBuffer(), 0, o2.getKeyLength());
}
};
} | 3.26 |
hadoop_TFile_append_rdh | /**
* Adding a new key-value pair to TFile.
*
* @param key
* buffer for key.
* @param koff
* offset in key buffer.
* @param klen
* length of key.
* @param value
* buffer for value.
* @param voff
* offset in value buffer.
* @param vlen
* length of value.
* @throws IOException
* Upon IO errors.
* <p>
* If an exception is thrown, the TFile will be in an inconsistent
* state. The only legitimate call after that would be close
*/
public void append(byte[] key, int koff, int klen, byte[] value, int voff, int vlen) throws IOException {
if ((((koff | klen) | (koff + klen)) | (key.length - (koff + klen))) < 0) {
throw new IndexOutOfBoundsException("Bad key buffer offset-length combination.");
}
if ((((voff | vlen) | (voff + vlen)) | (value.length - (voff + vlen))) < 0) {throw new IndexOutOfBoundsException("Bad value buffer offset-length combination.");
}
try {
DataOutputStream dosKey = prepareAppendKey(klen);
try {
++errorCount;
dosKey.write(key, koff, klen);
--errorCount;
}
finally {
dosKey.close();
}
DataOutputStream dosValue = prepareAppendValue(vlen);
try {
++errorCount;
dosValue.write(value, voff, vlen);
--errorCount;
} finally {
dosValue.close();
}
} finally {
state
= State.READY;
}
} | 3.26 |
hadoop_TFile_flush_rdh | // Avoiding flushing call to down stream.
@Override
public void flush() {
// do nothing
} | 3.26 |
hadoop_TFile_m1_rdh | /**
* Create a scanner that covers a range of records.
*
* @param beginRecNum
* The RecordNum for the first record (inclusive).
* @param endRecNum
* The RecordNum for the last record (exclusive). To scan the whole
* file, either specify endRecNum==-1 or endRecNum==getEntryCount().
* @return The TFile scanner that covers the specified range of records.
* @throws IOException
* raised on errors performing I/O.
*/
public Scanner m1(long beginRecNum, long endRecNum) throws IOException {if
(beginRecNum < 0)
beginRecNum = 0;
if ((endRecNum < 0) || (endRecNum
> getEntryCount())) {
endRecNum = getEntryCount();}
return new Scanner(this, getLocationByRecordNum(beginRecNum), getLocationByRecordNum(endRecNum));
} | 3.26 |
hadoop_TFile_initBlock_rdh | /**
* Load a compressed block for reading. Expecting blockIndex is valid.
*
* @throws IOException
*/
private void initBlock(int blockIndex) throws IOException {
klen = -1;
if (blkReader != null) {
try {
blkReader.close();
} finally {
blkReader = null;
}
}
blkReader = reader.getBlockReader(blockIndex);
currentLocation.set(blockIndex, 0);
} | 3.26 |
hadoop_TFile_checkTFileDataIndex_rdh | /**
* Lazily loading the TFile index.
*
* @throws IOException
*/
synchronized void checkTFileDataIndex() throws IOException {
if (tfileIndex == null) {
BlockReader brIndex = readerBCF.getMetaBlock(TFileIndex.BLOCK_NAME);
try {
tfileIndex = new TFileIndex(readerBCF.getBlockCount(), brIndex, tfileMeta.getComparator());
} finally {
brIndex.close();
}
}
} | 3.26 |
hadoop_TFile_finishDataBlock_rdh | /**
* Close the current data block if necessary.
*
* @param bForceFinish
* Force the closure regardless of the block size.
* @throws IOException
*/
void finishDataBlock(boolean bForceFinish) throws IOException {
if (blkAppender == null) {
return;
}
// exceeded the size limit, do the compression and finish the block
if (bForceFinish || (blkAppender.getCompressedSize() >= sizeMinBlock)) {
// keep tracks of the last key of each data block, no padding
// for now
TFileIndexEntry keyLast = new TFileIndexEntry(lastKeyBufferOS.getBuffer(), 0, lastKeyBufferOS.size(), blkRecordCount);
tfileIndex.addEntry(keyLast);
// close the appender
blkAppender.close();
blkAppender = null;
blkRecordCount = 0;
}
} | 3.26 |
hadoop_TFile_initDataBlock_rdh | /**
* Check if we need to start a new data block.
*
* @throws IOException
*/
private void initDataBlock() throws IOException {
// for each new block, get a new appender
if (blkAppender == null) {
blkAppender = writerBCF.prepareDataBlock();
}
} | 3.26 |
hadoop_TFile_getKey_rdh | /**
* Copy the key into user supplied buffer.
*
* @param buf
* The buffer supplied by user.
* @param offset
* The starting offset of the user buffer where we should copy
* the key into. Requiring the key-length + offset no greater
* than the buffer length.
* @return The length of the key.
* @throws IOException
* raised on errors performing I/O.
*/ public int getKey(byte[] buf, int offset) throws IOException {
if ((offset | ((buf.length - offset) - klen)) < 0) {throw new IndexOutOfBoundsException("Buffer not enough to store the key");
}
System.arraycopy(keyBuffer, 0, buf, offset, klen);
return klen;
} | 3.26 |
hadoop_TFile_upperBound_rdh | /**
* Move the cursor to the first entry whose key is strictly greater than
* the input key. The entry returned by the previous entry() call will be
* invalid.
*
* @param key
* The input key
* @param keyOffset
* offset in the key buffer.
* @param keyLen
* key buffer length.
* @throws IOException
* raised on errors performing I/O.
*/
public void upperBound(byte[] key, int keyOffset, int keyLen) throws IOException {
seekTo(new ByteArray(key, keyOffset, keyLen), true);
} | 3.26 |
hadoop_TFile_rewind_rdh | /**
* Rewind to the first entry in the scanner. The entry returned by the
* previous entry() call will be invalid.
*
* @throws IOException
* raised on errors performing I/O.
*/
public void rewind() throws IOException {
seekTo(beginLocation);} | 3.26 |
hadoop_TFile_getRecordNum_rdh | /**
* Get the RecordNum corresponding to the entry pointed by the cursor.
*
* @return The RecordNum corresponding to the entry pointed by the cursor.
* @throws IOException
* raised on errors performing I/O.
*/
public long getRecordNum() throws IOException {
return reader.getRecordNumByLocation(currentLocation);
} | 3.26 |
hadoop_TFile_checkKey_rdh | /**
* check whether we have already successfully obtained the key. It also
* initializes the valueInputStream.
*/
void checkKey() throws IOException {
if (klen >= 0)
return;
if (atEnd()) {
throw new EOFException("No key-value to read");
}
klen = -1;
vlen = -1;
valueChecked = false;
klen = Utils.readVInt(blkReader);
blkReader.readFully(keyBuffer, 0, klen);
valueBufferInputStream.reset(blkReader);
if (valueBufferInputStream.isLastChunk()) {
vlen = valueBufferInputStream.getRemain();
}
} | 3.26 |
hadoop_TFile_getKeyLength_rdh | /**
* Get the length of the key.
*
* @return the length of the key.
*/
public int getKeyLength() {
return klen;
} | 3.26 |
hadoop_TFile_getValueStream_rdh | /**
* Stream access to value. The value part of the key-value pair pointed
* by the current cursor is not cached and can only be examined once.
* Calling any of the following functions more than once without moving
* the cursor will result in exception: {@link #getValue(byte[])},
* {@link #getValue(byte[], int)}, {@link #getValueStream}.
*
* @return The input stream for reading the value.
* @throws IOException
* raised on errors performing I/O.
*/public DataInputStream getValueStream() throws IOException {
if (valueChecked == true) {
throw new IllegalStateException("Attempt to examine value multiple times.");
}
valueChecked = true;
return valueDataInputStream;
} | 3.26 |
hadoop_TFile_main_rdh | /**
* Dumping the TFile information.
*
* @param args
* A list of TFile paths.
*/
public static void main(String[] args) {
System.out.printf("TFile Dumper (TFile %s, BCFile %s)%n", TFile.API_VERSION.toString(), BCFile.API_VERSION.toString());
if (args.length == 0) {
System.out.println("Usage: java ... org.apache.hadoop.io.file.tfile.TFile tfile-path [tfile-path ...]");
System.exit(0);
}
Configuration conf = new Configuration();
for (String file : args) {
System.out.println(("===" + file) + "===");
try {
TFileDumper.dumpInfo(file, System.out, conf);
} catch (IOException e) {
e.printStackTrace(System.err);
}
}
} | 3.26 |
hadoop_TFile_m4_rdh | /**
* Compare whether this and other points to the same key value.
*/
@Override
public boolean m4(Object other) {
if (this == other)
return true;
if (!(other instanceof Entry))
return false;
return ((Entry) (other)).compareTo(keyBuffer, 0, getKeyLength()) == 0;
} | 3.26 |
hadoop_TFile_clone_rdh | /**
*
* @see java.lang.Object#clone()
*/
@Override
protected Location clone() {
return new Location(blockIndex, recordIndex);
} | 3.26 |
hadoop_TFile_seekToEnd_rdh | /**
* Seek to the end of the scanner. The entry returned by the previous
* entry() call will be invalid.
*
* @throws IOException
* raised on errors performing I/O.
*/
public void seekToEnd() throws IOException {
parkCursorAtEnd();
} | 3.26 |
hadoop_TFile_compare_rdh | /**
* Provide a customized comparator for Entries. This is useful if we
* have a collection of Entry objects. However, if the Entry objects
* come from different TFiles, users must ensure that those TFiles share
* the same RawComparator.
*/@Override
public int compare(Scanner.Entry o1, Scanner.Entry o2) {
return comparator.compare(o1.getKeyBuffer(), 0, o1.getKeyLength(), o2.getKeyBuffer(), 0, o2.getKeyLength());
} | 3.26 |
hadoop_TFile_getMetaBlock_rdh | /**
* Stream access to a meta block.``
*
* @param name
* The name of the meta block.
* @return The input stream.
* @throws IOException
* on I/O error.
* @throws MetaBlockDoesNotExist
* If the meta block with the name does not exist.
*/
public DataInputStream getMetaBlock(String name) throws IOException, MetaBlockDoesNotExist {
return readerBCF.getMetaBlock(name);
} | 3.26 |
hadoop_TFile_getValue_rdh | /**
* Copy the value into BytesWritable. The input BytesWritable will be
* automatically resized to the actual value size. The implementation
* directly uses the buffer inside BytesWritable for storing the value.
* The call does not require the value length to be known.
*
* @param value
* value.
* @throws IOException
* raised on errors performing I/O.
* @return long value.
*/
public long getValue(BytesWritable value) throws IOException {
DataInputStream dis = getValueStream();
int size = 0;
try {
int remain;while ((remain = valueBufferInputStream.getRemain()) > 0) {
value.setSize(size + remain);
dis.readFully(value.getBytes(), size, remain);
size += remain;
}
return value.getLength(); } finally {
dis.close();
}
} | 3.26 |
hadoop_TFile_getEntryCount_rdh | /**
* Get the number of key-value pair entries in TFile.
*
* @return the number of key-value pairs in TFile
*/
public long getEntryCount() {
return tfileMeta.getRecordCount();
} | 3.26 |
hadoop_BaseService_serverStatusChange_rdh | /**
* Notification callback when the server changes its status.
* <p>
* This method returns an empty array (size 0)
*
* @param oldStatus
* old server status.
* @param newStatus
* new server status.
* @throws ServiceException
* thrown if the service could not process the status change.
*/
@Override
public void serverStatusChange(Server.Status oldStatus, Server.Status newStatus) throws ServiceException {
} | 3.26 |
hadoop_BaseService_destroy_rdh | /**
* Destroy the services. This method is called once, when the
* {@link Server} owning the service is being destroyed.
* <p>
* This method does a NOP.
*/
@Override
public void destroy() {} | 3.26 |
hadoop_BaseService_getPrefix_rdh | /**
* Returns the service prefix.
*
* @return the service prefix.
*/
protected String getPrefix() {
return prefix;
} | 3.26 |
hadoop_BaseService_m0_rdh | /**
* Initializes the service.
* <p>
* It collects all service properties (properties having the
* <code>#SERVER#.#SERVICE#.</code> prefix). The property names are then
* trimmed from the <code>#SERVER#.#SERVICE#.</code> prefix.
* <p>
* After collecting the service properties it delegates to the
* {@link #init()} method.
*
* @param server
* the server initializing the service, give access to the
* server context.
* @throws ServiceException
* thrown if the service could not be initialized.
*/
@Override
public final void m0(Server server) throws ServiceException {
this.server = server;
String servicePrefix = getPrefixedName("");
serviceConfig = new Configuration(false);
for (Map.Entry<String, String> entry : ConfigurationUtils.resolve(server.getConfig())) {
String key = entry.getKey();
if (key.startsWith(servicePrefix)) {
serviceConfig.set(key.substring(servicePrefix.length()), entry.getValue());
}
}
init();
} | 3.26 |
hadoop_BaseService_getServiceDependencies_rdh | /**
* Returns the service dependencies of this service. The service will be
* instantiated only if all the service dependencies are already initialized.
* <p>
* This method returns an empty array (size 0)
*
* @return an empty array (size 0).
*/
@Override
public Class[] getServiceDependencies() {
return new Class[0];
} | 3.26 |
hadoop_BaseService_m1_rdh | /**
* Post initializes the service. This method is called by the
* {@link Server} after all services of the server have been initialized.
* <p>
* This method does a NOP.
*
* @throws ServiceException
* thrown if the service could not be
* post-initialized.
*/
@Override
public void m1() throws ServiceException {} | 3.26 |
hadoop_BaseService_getPrefixedName_rdh | /**
* Returns the full prefixed name of a service property.
*
* @param name
* of the property.
* @return prefixed name of the property.
*/
protected String getPrefixedName(String name) {
return server.getPrefixedName((prefix + ".") + name);
} | 3.26 |
hadoop_BaseService_getServer_rdh | /**
* Returns the server owning the service.
*
* @return the server owning the service.
*/protected Server getServer() {
return server;
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.