name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hadoop_Paths_getParent_rdh | /**
* Get the parent path of a string path: everything up to but excluding
* the last "/" in the path.
*
* @param pathStr
* path as a string
* @return the parent or null if there is no parent.
*/
public static String getParent(String
pathStr) {
int lastSlash = pathStr.lastIndexOf('/');
if (lastSlash >= 0) {
return pathStr.substring(0, lastSlash);
}
return null;
} | 3.26 |
hadoop_Paths_getMultipartUploadCommitsDirectory_rdh | /**
* Build a qualified temporary path for the multipart upload commit
* information in the supplied filesystem
* (which is expected to be the cluster FS).
* Currently {code $tempDir/$user/$uuid/staging-uploads} where
* {@code tempDir} is from
* {@link #tempDirForStaging(FileSystem, Configuration)}.
*
* @param fs
* target FS
* @param conf
* configuration
* @param uuid
* uuid of job
* @return a path which can be used for temporary work
* @throws IOException
* on an IO failure.
*/
@VisibleForTesting
static Path getMultipartUploadCommitsDirectory(FileSystem fs, Configuration conf, String uuid) throws IOException {
return path(tempDirForStaging(fs, conf), UserGroupInformation.getCurrentUser().getShortUserName(), uuid, STAGING_UPLOADS);
} | 3.26 |
hadoop_Paths_clearTempFolderInfo_rdh | /**
* Remove all information held about task attempts.
*
* @param attemptID
* attempt ID.
*/
public static void clearTempFolderInfo(final TaskAttemptID attemptID) {
tempFolders.invalidate(attemptID);
} | 3.26 |
hadoop_Paths_getAppAttemptId_rdh | /**
* Get the Application Attempt ID for this job.
*
* @param conf
* the config to look in
* @return the Application Attempt ID for a given job.
*/
private static int getAppAttemptId(Configuration conf) {return conf.getInt(MRJobConfig.APPLICATION_ATTEMPT_ID, 0);
} | 3.26 |
hadoop_Paths_resetTempFolderCache_rdh | /**
* Reset the temp folder cache; useful in tests.
*/
@VisibleForTesting
public static void resetTempFolderCache() {
tempFolders.invalidateAll();
} | 3.26 |
hadoop_Paths_tempDirForStaging_rdh | /**
* Try to come up with a good temp directory for different filesystems.
*
* @param fs
* filesystem
* @param conf
* configuration
* @return a qualified path under which temporary work can go.
*/
public static Path tempDirForStaging(FileSystem fs, Configuration
conf) {
String fallbackPath = (fs.getScheme().equals("file")) ? System.getProperty(JAVA_IO_TMPDIR) : FILESYSTEM_TEMP_PATH;
return fs.makeQualified(new Path(conf.getTrimmed(FS_S3A_COMMITTER_STAGING_TMP_PATH, fallbackPath)));
} | 3.26 |
hadoop_DatanodeLifelineProtocolClientSideTranslatorPB_isMethodSupported_rdh | // ProtocolMetaInterface
@Override
public boolean isMethodSupported(String methodName) throws IOException {
return RpcClientUtil.isMethodSupported(rpcProxy, DatanodeLifelineProtocolPB.class, RpcKind.RPC_PROTOCOL_BUFFER,
RPC.getProtocolVersion(DatanodeLifelineProtocolPB.class), methodName);
} | 3.26 |
hadoop_FederationNamespaceInfo_m0_rdh | /**
* The HDFS block pool id for this namespace.
*
* @return Block pool identifier.
*/
public String m0() {
return this.blockPoolId;
} | 3.26 |
hadoop_FederationNamespaceInfo_getClusterId_rdh | /**
* The HDFS cluster id for this namespace.
*
* @return Cluster identifier.
*/
public String getClusterId() {
return this.clusterId;
} | 3.26 |
hadoop_TreeWalk_getPendingQueue_rdh | /**
*
* @return the Deque containing the pending paths.
*/
protected Deque<TreePath> getPendingQueue() {
return pending;
} | 3.26 |
hadoop_FindOptions_setDepthFirst_rdh | /**
* Sets flag indicating whether the expression should be applied to the
* directory tree depth first.
*
* @param depthFirst
* true indicates depth first traversal
*/
public void setDepthFirst(boolean depthFirst) {
this.depthFirst = depthFirst;
} | 3.26 |
hadoop_FindOptions_setFollowArgLink_rdh | /**
* Sets flag indicating whether command line symbolic links should be
* followed.
*
* @param followArgLink
* true indicates follow links
*/
public void setFollowArgLink(boolean followArgLink) {
this.followArgLink = followArgLink;
} | 3.26 |
hadoop_FindOptions_setFollowLink_rdh | /**
* Sets flag indicating whether symbolic links should be followed.
*
* @param followLink
* true indicates follow links
*/
public void setFollowLink(boolean followLink) {
this.followLink = followLink;
} | 3.26 |
hadoop_FindOptions_getCommandFactory_rdh | /**
* Return the command factory.
*
* @return {@link CommandFactory}
*/
public CommandFactory getCommandFactory() {
return this.commandFactory;
} | 3.26 |
hadoop_FindOptions_setIn_rdh | /**
* Sets the input stream to be used.
*
* @param in
* input stream to be used
*/
public void setIn(InputStream in) {
this.in = in;
} | 3.26 |
hadoop_FindOptions_getConfiguration_rdh | /**
* Return the {@link Configuration} return configuration {@link Configuration}
*
* @return configuration.
*/
public Configuration getConfiguration() {
return this.configuration;
} | 3.26 |
hadoop_FindOptions_getOut_rdh | /**
* Returns the output stream to be used.
*
* @return output stream to be used
*/
public PrintStream getOut() {
return this.out;
} | 3.26 |
hadoop_FindOptions_setErr_rdh | /**
* Sets the error stream to be used.
*
* @param err
* error stream to be used
*/
public void setErr(PrintStream err) {
this.err = err;
} | 3.26 |
hadoop_FindOptions_getMaxDepth_rdh | /**
* Returns the maximum depth for applying expressions.
*
* @return maximum depth
*/
public int getMaxDepth() {
return this.maxDepth;} | 3.26 |
hadoop_FindOptions_getMinDepth_rdh | /**
* Returns the minimum depth for applying expressions.
*
* @return min depth
*/
public int getMinDepth() {
return this.minDepth;
} | 3.26 |
hadoop_FindOptions_isFollowArgLink_rdh | /**
* Should command line symbolic links be follows?
*
* @return true indicates links should be followed
*/
public boolean isFollowArgLink() {return this.followArgLink;
} | 3.26 |
hadoop_FindOptions_isDepthFirst_rdh | /**
* Should directory tree be traversed depth first?
*
* @return true indicate depth first traversal
*/
public boolean isDepthFirst() {
return this.depthFirst;
} | 3.26 |
hadoop_FindOptions_m1_rdh | /**
* Set the start time of this {@link Find} command.
*
* @param time
* start time (in milliseconds since epoch)
*/
public void m1(long time) {
this.startTime = time;
} | 3.26 |
hadoop_FindOptions_setCommandFactory_rdh | /**
* Set the command factory.
*
* @param factory
* {@link CommandFactory}
*/
public void setCommandFactory(CommandFactory factory) {
this.commandFactory = factory;
} | 3.26 |
hadoop_FindOptions_getStartTime_rdh | /**
* Returns the start time of this {@link Find} command.
*
* @return start time (in milliseconds since epoch)
*/
public long getStartTime() {
return this.startTime;
} | 3.26 |
hadoop_FindOptions_setConfiguration_rdh | /**
* Set the {@link Configuration}
*
* @param configuration
* {@link Configuration}
*/
public void setConfiguration(Configuration configuration) {
this.configuration = configuration;
} | 3.26 |
hadoop_FindOptions_getIn_rdh | /**
* Returns the input stream to be used.
*
* @return input stream to be used
*/
public InputStream getIn() {
return this.in;
} | 3.26 |
hadoop_FindOptions_getErr_rdh | /**
* Returns the error stream to be used.
*
* @return error stream to be used
*/
public PrintStream getErr() {
return this.err;} | 3.26 |
hadoop_FindOptions_setOut_rdh | /**
* Sets the output stream to be used.
*
* @param out
* output stream to be used
*/
public void setOut(PrintStream out) {
this.out = out;
} | 3.26 |
hadoop_FindOptions_setMaxDepth_rdh | /**
* Sets the maximum depth for applying expressions.
*
* @param maxDepth
* maximum depth
*/
public void setMaxDepth(int maxDepth) {
this.maxDepth = maxDepth;
} | 3.26 |
hadoop_FindOptions_setMinDepth_rdh | /**
* Sets the minimum depth for applying expressions.
*
* @param minDepth
* minimum depth
*/
public void setMinDepth(int minDepth) {
this.minDepth = minDepth;
} | 3.26 |
hadoop_BinaryEditsVisitor_start_rdh | /**
* Start the visitor (initialization)
*/@Override
public void start(int version) throws IOException {
} | 3.26 |
hadoop_BinaryEditsVisitor_close_rdh | /**
* Finish the visitor
*/
@Override
public void close(Throwable error) throws IOException {
elfos.setReadyToFlush();
elfos.flushAndSync(true);
elfos.close();
} | 3.26 |
hadoop_TimelineReaderWebServicesUtils_parseIntStr_rdh | /**
* Interpret passed string as a integer.
*
* @param str
* Passed string.
* @return integer representation if string is not null, null otherwise.
*/
static Integer parseIntStr(String str) {
return str == null ? null : Integer.parseInt(str.trim());
} | 3.26 |
hadoop_TimelineReaderWebServicesUtils_createTimelineEntityFilters_rdh | /**
* Parse the passed filters represented as strings and convert them into a
* {@link TimelineEntityFilters} object.
*
* @param limit
* Limit to number of entities to return.
* @param createdTimeStart
* Created time start for the entities to return.
* @param createdTimeEnd
* Created time end for the entities to return.
* @param relatesTo
* Entities to return must match relatesTo.
* @param isRelatedTo
* Entities to return must match isRelatedTo.
* @param infofilters
* Entities to return must match these info filters.
* @param conffilters
* Entities to return must match these metric filters.
* @param metricfilters
* Entities to return must match these metric filters.
* @param eventfilters
* Entities to return must match these event filters.
* @return a {@link TimelineEntityFilters} object.
* @throws TimelineParseException
* if any problem occurs during parsing.
*/
static TimelineEntityFilters createTimelineEntityFilters(String limit, Long createdTimeStart, Long createdTimeEnd, String relatesTo, String isRelatedTo, String infofilters, String conffilters, String metricfilters, String eventfilters, String fromid) throws TimelineParseException {
return new TimelineEntityFilters.Builder().entityLimit(parseLongStr(limit)).createdTimeBegin(createdTimeStart).createTimeEnd(createdTimeEnd).relatesTo(parseRelationFilters(relatesTo)).isRelatedTo(parseRelationFilters(isRelatedTo)).infoFilters(parseKVFilters(infofilters, false)).configFilters(parseKVFilters(conffilters, true)).metricFilters(parseMetricFilters(metricfilters)).eventFilters(parseEventFilters(eventfilters)).fromId(parseStr(fromid)).build();
} | 3.26 |
hadoop_TimelineReaderWebServicesUtils_parseMetricFilters_rdh | /**
* Parses metric filters.
*
* @param expr
* Metric filter expression to be parsed.
* @return a {@link TimelineFilterList} object.
* @throws TimelineParseException
* if any problem occurs during parsing.
*/
static TimelineFilterList parseMetricFilters(String expr) throws
TimelineParseException {
return parseFilters(new TimelineParserForNumericFilters(expr));
} | 3.26 |
hadoop_TimelineReaderWebServicesUtils_createTimelineReaderContext_rdh | /**
* Parse the passed context information represented as strings and convert
* into a {@link TimelineReaderContext} object.
*
* @param clusterId
* Cluster Id.
* @param userId
* User Id.
* @param flowName
* Flow Name.
* @param flowRunId
* Run id for the flow.
* @param appId
* App Id.
* @param entityType
* Entity Type.
* @param entityId
* Entity Id.
* @return a {@link TimelineReaderContext} object.
*/
static TimelineReaderContext createTimelineReaderContext(String clusterId, String userId, String flowName, String flowRunId, String appId, String entityType, String entityIdPrefix, String entityId) {
return new TimelineReaderContext(parseStr(clusterId), parseStr(userId), parseStr(flowName), parseLongStr(flowRunId), parseStr(appId), parseStr(entityType), parseLongStr(entityIdPrefix), parseStr(entityId));
} | 3.26 |
hadoop_TimelineReaderWebServicesUtils_parseDataToRetrieve_rdh | /**
* Parses confstoretrieve and metricstoretrieve.
*
* @param str
* String representing confs/metrics to retrieve expression.
* @return a {@link TimelineFilterList} object.
* @throws TimelineParseException
* if any problem occurs during parsing.
*/
static TimelineFilterList parseDataToRetrieve(String expr) throws TimelineParseException {
return parseFilters(new TimelineParserForDataToRetrieve(expr));
} | 3.26 |
hadoop_TimelineReaderWebServicesUtils_parseStr_rdh | /**
* Trims the passed string if its not null.
*
* @param str
* Passed string.
* @return trimmed string if string is not null, null otherwise.
*/
static String parseStr(String str) {
return StringUtils.trimToNull(str);
} | 3.26 |
hadoop_TimelineReaderWebServicesUtils_getUser_rdh | /**
* Get UGI based on the remote user in the HTTP request.
*
* @param req
* HTTP request.
* @return UGI.
*/
public static UserGroupInformation getUser(HttpServletRequest req) {
Principal princ = req.getUserPrincipal();
String remoteUser = (princ == null) ? null : princ.getName();
UserGroupInformation callerUGI = null;
if (remoteUser != null) {
callerUGI = UserGroupInformation.createRemoteUser(remoteUser);
}
return callerUGI;
} | 3.26 |
hadoop_TimelineReaderWebServicesUtils_parseLongStr_rdh | /**
* Interpret passed string as a long.
*
* @param str
* Passed string.
* @return long representation if string is not null, null otherwise.
*/
static Long parseLongStr(String str) {
return str == null ? null : Long.parseLong(str.trim());
} | 3.26 |
hadoop_TimelineReaderWebServicesUtils_parseEventFilters_rdh | /**
* Parse a delimited string and convert it into a set of strings. For
* instance, if delimiter is ",", then the string should be represented as
* "value1,value2,value3".
*
* @param str
* delimited string.
* @param delimiter
* string is delimited by this delimiter.
* @return set of strings.
*/
static TimelineFilterList parseEventFilters(String expr) throws TimelineParseException {
return parseFilters(new TimelineParserForExistFilters(expr, TimelineParseConstants.COMMA_CHAR));
} | 3.26 |
hadoop_TimelineReaderWebServicesUtils_createTimelineDataToRetrieve_rdh | /**
* Parse the passed fields represented as strings and convert them into a
* {@link TimelineDataToRetrieve} object.
*
* @param confs
* confs to retrieve.
* @param metrics
* metrics to retrieve.
* @param fields
* fields to retrieve.
* @param metricsLimit
* upper limit on number of metrics to return.
* @return a {@link TimelineDataToRetrieve} object.
* @throws TimelineParseException
* if any problem occurs during parsing.
*/
static TimelineDataToRetrieve createTimelineDataToRetrieve(String confs, String metrics, String fields, String metricsLimit, String metricsTimeBegin, String metricsTimeEnd) throws TimelineParseException {
return new TimelineDataToRetrieve(parseDataToRetrieve(confs), parseDataToRetrieve(metrics), parseFieldsStr(fields, TimelineParseConstants.COMMA_DELIMITER), parseIntStr(metricsLimit), parseLongStr(metricsTimeBegin), parseLongStr(metricsTimeEnd));
} | 3.26 |
hadoop_TimelineReaderWebServicesUtils_parseFieldsStr_rdh | /**
* Interprets passed string as set of fields delimited by passed delimiter.
* For instance, if delimiter is ",", then the passed string should be
* represented as "METRICS,CONFIGS" where the delimited parts of the string
* present in {@link Field}.
*
* @param str
* passed string.
* @param delimiter
* string delimiter.
* @return a set of {@link Field}.
*/
static EnumSet<Field> parseFieldsStr(String str, String delimiter) {
if (str == null) {
return null;
}
String[] strs = str.split(delimiter);
EnumSet<Field> fieldList = EnumSet.noneOf(Field.class);
for (String s : strs) {
try {
fieldList.add(Field.valueOf(s.trim().toUpperCase()));
} catch (IllegalArgumentException e) {
throw
new IllegalArgumentException(s + " is not a valid field.");}
}
return fieldList;
} | 3.26 |
hadoop_TimelineReaderWebServicesUtils_getUserName_rdh | /**
* Get username from caller UGI.
*
* @param callerUGI
* caller UGI.
* @return username.
*/
static String getUserName(UserGroupInformation callerUGI) {return callerUGI != null
? callerUGI.getUserName().trim() : "";} | 3.26 |
hadoop_TimelineReaderWebServicesUtils_parseRelationFilters_rdh | /**
* Parse relation filters.
*
* @param expr
* Relation filter expression
* @return a {@link TimelineFilterList} object.
* @throws Exception
* if any problem occurs.
*/
static TimelineFilterList parseRelationFilters(String expr) throws TimelineParseException
{
return parseFilters(new TimelineParserForRelationFilters(expr, TimelineParseConstants.COMMA_CHAR, TimelineParseConstants.COLON_DELIMITER));
} | 3.26 |
hadoop_TimelineReaderWebServicesUtils_parseKVFilters_rdh | /**
* Parses config and info filters.
*
* @param expr
* Expression to be parsed.
* @param valueAsString
* true, if value has to be interpreted as string, false
* otherwise. It is true for config filters and false for info filters.
* @return a {@link TimelineFilterList} object.
* @throws TimelineParseException
* if any problem occurs during parsing.
*/
static TimelineFilterList parseKVFilters(String expr, boolean valueAsString) throws TimelineParseException {
return parseFilters(new TimelineParserForKVFilters(expr, valueAsString));
} | 3.26 |
hadoop_BoundDTExtension_getCanonicalServiceName_rdh | /**
* Get the canonical service name, which will be
* returned by {@code FileSystem.getCanonicalServiceName()} and so used to
* map the issued DT in credentials, including credential files collected
* for job submission.
*
* If null is returned: fall back to the default filesystem logic.
*
* Only invoked on {@link CustomDelegationTokenManager} instances.
*
* @return the service name to be returned by the filesystem.
*/
default String getCanonicalServiceName() {
return null;
} | 3.26 |
hadoop_AuxServiceFile_srcFile_rdh | /**
* This provides the source location of the configuration file, the content
* of which is dumped to dest_file post property substitutions, in the format
* as specified in type. Typically the src_file would point to a source
* controlled network accessible file maintained by tools like puppet, chef,
* or hdfs etc. Currently, only hdfs is supported.
*/
public AuxServiceFile srcFile(String file) {
this.srcFile = file;
return this;
} | 3.26 |
hadoop_AuxServiceFile_toIndentedString_rdh | /**
* Convert the given object to string with each line indented by 4 spaces
* (except the first line).
*/
private String toIndentedString(Object o) {
if (o == null) {
return
"null";
}
return o.toString().replace("\n", "\n ");
} | 3.26 |
hadoop_AuxServiceFile_type_rdh | /**
* Config file in the standard format like xml, properties, json, yaml,
* template.
*/
public AuxServiceFile type(TypeEnum t) {
this.type
= t;
return this;
} | 3.26 |
hadoop_ReplicatedBlockStats_merge_rdh | /**
* Merge the multiple ReplicatedBlockStats.
*
* @param stats
* Collection of stats to merge.
* @return A new ReplicatedBlockStats merging all the input ones
*/public static ReplicatedBlockStats
merge(Collection<ReplicatedBlockStats> stats) {
long lowRedundancyBlocks = 0;
long corruptBlocks =
0;
long missingBlocks = 0;
long missingReplicationOneBlocks = 0;
long bytesInFutureBlocks = 0;
long pendingDeletionBlocks = 0;
long highestPriorityLowRedundancyBlocks = 0;
boolean hasHighestPriorityLowRedundancyBlocks = false;
// long's range is large enough that we don't need to consider overflow
for (ReplicatedBlockStats stat : stats) {
lowRedundancyBlocks += stat.getLowRedundancyBlocks();
corruptBlocks += stat.getCorruptBlocks();
missingBlocks += stat.getMissingReplicaBlocks();
missingReplicationOneBlocks += stat.getMissingReplicationOneBlocks();
bytesInFutureBlocks += stat.getBytesInFutureBlocks();
pendingDeletionBlocks += stat.getPendingDeletionBlocks();if (stat.hasHighestPriorityLowRedundancyBlocks()) {
hasHighestPriorityLowRedundancyBlocks = true;
highestPriorityLowRedundancyBlocks += stat.getHighestPriorityLowRedundancyBlocks();
}
}
if (hasHighestPriorityLowRedundancyBlocks) {
return new ReplicatedBlockStats(lowRedundancyBlocks, corruptBlocks, missingBlocks, missingReplicationOneBlocks, bytesInFutureBlocks, pendingDeletionBlocks, highestPriorityLowRedundancyBlocks);
}
return new ReplicatedBlockStats(lowRedundancyBlocks, corruptBlocks, missingBlocks, missingReplicationOneBlocks, bytesInFutureBlocks, pendingDeletionBlocks);
} | 3.26 |
hadoop_TimelineDomains_addDomains_rdh | /**
* All a list of domains into the existing domain list
*
* @param domains
* a list of domains
*/
public void addDomains(List<TimelineDomain> domains) {
this.f0.addAll(domains);
} | 3.26 |
hadoop_TimelineDomains_setDomains_rdh | /**
* Set the domain list to the given list of domains
*
* @param domains
* a list of domains
*/
public void
setDomains(List<TimelineDomain> domains) {
this.f0 = domains;
} | 3.26 |
hadoop_TimelineDomains_addDomain_rdh | /**
* Add a single domain into the existing domain list
*
* @param domain
* a single domain
*/
public void addDomain(TimelineDomain domain) {
f0.add(domain);
} | 3.26 |
hadoop_TimelineDomains_getDomains_rdh | /**
* Get a list of domains
*
* @return a list of domains
*/
@XmlElement(name = "domains")
public List<TimelineDomain> getDomains() {
return f0;
} | 3.26 |
hadoop_BalanceJob_isJobDone_rdh | /**
* Return true if the job has finished.
*/
public boolean isJobDone() {
return jobDone;
} | 3.26 |
hadoop_BalanceJob_execute_rdh | /**
* Run the state machine.
*/
public void execute() {
boolean quit = false;
try { while (((!jobDone) && (!quit))
&& scheduler.isRunning())
{
if (f0 == null) {
// Job done.
finish(null);quit = true;
} else {
if ((f0 == firstProcedure) || (lastProcedure != f0)) {
LOG.info("Start procedure {}, last procedure is {}", f0.name(), lastProcedure == null ? null : lastProcedure.name());
}if (f0.execute()) {
lastProcedure = f0;
f0 = next();
}if (!scheduler.writeJournal(this)) {
quit = true;// Write journal failed. Simply quit because this job
// has already been added to the recoverQueue.
LOG.debug("Write journal failed. Quit and wait for recovery.");}
}
}
} catch (BalanceProcedure.RetryException tre) {
scheduler.delay(this, f0.delayMillisBeforeRetry());
}
catch (Exception e) {
finish(e);
} catch (Throwable t) {
IOException err = new IOException("Got throwable error.", t);
finish(err);
}
} | 3.26 |
hadoop_BalanceJob_nextProcedure_rdh | /**
* Append a procedure to the tail.
*/
public Builder nextProcedure(T procedure) {
int size = procedures.size();
if (size > 0) {
procedures.get(size -
1).setNextProcedure(procedure.name());
}
procedure.setNextProcedure(NEXT_PROCEDURE_NONE);
procedures.add(procedure); return this;
} | 3.26 |
hadoop_BalanceJob_waitJobDone_rdh | /**
* Wait until the job is done.
*/
public synchronized void waitJobDone() throws InterruptedException {
while (!jobDone) {
wait();} } | 3.26 |
hadoop_BalanceJob_finish_rdh | /**
* Job finishes. It could be either success or failure.
*
* @param exception
* the exception that causes the job to fail. null indicates
* the job is successful.
*/
private synchronized void finish(Exception exception) {
assert !jobDone;
if (scheduler.jobDone(this)) {
jobDone = true;
error = exception;
notifyAll();
}
} | 3.26 |
hadoop_BalanceJob_shouldRemoveAfterDone_rdh | /**
* Whether this job should be removed after it's done.
*/
@VisibleForTestingpublic boolean shouldRemoveAfterDone() {
return removeAfterDone;
} | 3.26 |
hadoop_BalanceJob_getDetailMessage_rdh | /**
* Get the detail description of this job.
*/
public String getDetailMessage() {
StringBuilder builder = new StringBuilder();
builder.append("id=").append(id);
if (firstProcedure != null) {
builder.append(",firstProcedure=").append(firstProcedure);
}
if (f0 != null) {
builder.append(",currentProcedure=").append(f0);
}
builder.append(",jobDone=").append(jobDone);
if (error != null) {
builder.append(",error=").append(error.getMessage());
}
return builder.toString();
} | 3.26 |
hadoop_BalanceJob_getError_rdh | /**
* Return the error exception during the job execution. This should be called
* after the job finishes.
*/
public Exception getError() {
return error;
} | 3.26 |
hadoop_BalanceJob_removeAfterDone_rdh | /**
* Automatically remove this job from the scheduler cache when the job is
* done.
*/
public Builder removeAfterDone(boolean remove) {
removeAfterDone = remove;
return this;
} | 3.26 |
hadoop_BalanceJob_getId_rdh | /**
* Get the uid of the job.
*/
public String getId() {
return this.id;
} | 3.26 |
hadoop_RunnableCallable_call_rdh | /**
* Invokes the wrapped callable/runnable as a callable.
*
* @return void
* @throws Exception
* thrown by the wrapped callable/runnable invocation.
*/
@Override
public Void call() throws Exception {
if (runnable != null) {runnable.run();} else {
callable.call();
}
return null;
} | 3.26 |
hadoop_RunnableCallable_toString_rdh | /**
* Returns the class name of the wrapper callable/runnable.
*
* @return the class name of the wrapper callable/runnable.
*/
@Override
public String toString() {
return runnable != null ? runnable.getClass().getSimpleName() : callable.getClass().getSimpleName();
} | 3.26 |
hadoop_KerberosSecurityTestcase_createTestDir_rdh | /**
* Create a working directory, it should be the build directory. Under
* this directory an ApacheDS working directory will be created, this
* directory will be deleted when the MiniKdc stops.
*/
public void createTestDir() {
workDir = new
File(System.getProperty("test.dir", "target"));
} | 3.26 |
hadoop_KerberosSecurityTestcase_createMiniKdcConf_rdh | /**
* Create a Kdc configuration
*/
public void createMiniKdcConf() {
f0 = MiniKdc.createConf();
} | 3.26 |
hadoop_MRProtoUtils_convertToProtoFormat_rdh | /* TaskType */
public static TaskTypeProto convertToProtoFormat(TaskType e) {
return TaskTypeProto.valueOf(e.name());
} | 3.26 |
hadoop_SharedCacheManager_getSCMStore_rdh | /**
* For testing purposes only.
*/
@VisibleForTesting
SCMStore getSCMStore() {
return this.store;
} | 3.26 |
hadoop_DateSplitter_longToDate_rdh | /**
* Parse the long-valued timestamp into the appropriate SQL date type.
*/
private Date longToDate(long val,
int sqlDataType) {
switch (sqlDataType) {
case Types.DATE :
return new Date(val);
case Types.TIME :
return new Time(val);
case Types.TIMESTAMP :
return new Timestamp(val);
default :
// Shouldn't ever hit this case.
return null;
}} | 3.26 |
hadoop_DateSplitter_m0_rdh | /**
* Retrieve the value from the column in a type-appropriate manner and return
* its timestamp since the epoch. If the column is null, then return Long.MIN_VALUE.
* This will cause a special split to be generated for the NULL case, but may also
* cause poorly-balanced splits if most of the actual dates are positive time
* since the epoch, etc.
*/
private long m0(ResultSet rs, int colNum, int sqlDataType) throws SQLException {
try {
switch (sqlDataType) {
case Types.DATE :
return rs.getDate(colNum).getTime();
case Types.TIME :
return rs.getTime(colNum).getTime();
case Types.TIMESTAMP :
return rs.getTimestamp(colNum).getTime();
default :
throw new SQLException("Not a date-type field");
}
} catch (NullPointerException npe) {
// null column. return minimum long value.
LOG.warn("Encountered a NULL date in the split column. Splits may be poorly balanced.");
return Long.MIN_VALUE;
}
} | 3.26 |
hadoop_Environment_toArray_rdh | // to be used with Runtime.exec(String[] cmdarray, String[] envp)
String[] toArray() {
String[] arr = new String[super.size()];
Enumeration<Object> it = super.keys();
int i = -1;
while (it.hasMoreElements()) {
String v12 = ((String) (it.nextElement()));
String val = ((String) (get(v12)));
i++;
arr[i] = (v12 + "=") + val;}
return arr;
} | 3.26 |
hadoop_ReadaheadPool_submitReadahead_rdh | /**
* Submit a request to readahead on the given file descriptor.
*
* @param identifier
* a textual identifier used in error messages, etc.
* @param fd
* the file descriptor to readahead
* @param off
* the offset at which to start the readahead
* @param len
* the number of bytes to read
* @return an object representing this pending request
*/
public ReadaheadRequest submitReadahead(String identifier, FileDescriptor fd, long off, long len) {
ReadaheadRequestImpl req = new ReadaheadRequestImpl(identifier, fd, off, len);
pool.execute(req);
if (LOG.isTraceEnabled()) {
LOG.trace("submit readahead: " + req);
}
return req;
} | 3.26 |
hadoop_ReadaheadPool_getInstance_rdh | /**
*
* @return Return the singleton instance for the current process.
*/
public static ReadaheadPool getInstance() {
synchronized(ReadaheadPool.class) {
if ((instance == null) && NativeIO.isAvailable()) {
instance = new ReadaheadPool();
}
return instance;
}
} | 3.26 |
hadoop_FieldSelectionMapReduce_map_rdh | /**
* The identify function. Input key/value pair is written directly to output.
*/
public void map(K key, V val, OutputCollector<Text, Text> output, Reporter reporter) throws IOException {
FieldSelectionHelper helper = new FieldSelectionHelper(FieldSelectionHelper.emptyText, FieldSelectionHelper.emptyText);
helper.extractOutputKeyValue(key.toString(),
val.toString(), fieldSeparator, mapOutputKeyFieldList, mapOutputValueFieldList, allMapValueFieldsFrom, ignoreInputKey, true);
output.collect(helper.getKey(), helper.getValue());
} | 3.26 |
hadoop_EntityTableRW_setMetricsTTL_rdh | /**
*
* @param metricsTTL
* time to live parameter for the metricss in this table.
* @param hbaseConf
* configururation in which to set the metrics TTL config
* variable.
*/
public void setMetricsTTL(int metricsTTL, Configuration hbaseConf) {
hbaseConf.setInt(METRICS_TTL_CONF_NAME, metricsTTL);
} | 3.26 |
hadoop_Nfs3Constant_getValue_rdh | /**
*
* @return the int value representing the procedure.
*/
public int getValue() {
return ordinal();
} | 3.26 |
hadoop_TimelineEntityReaderFactory_createEntityTypeReader_rdh | /**
* Creates a timeline entity type reader that will read all available entity
* types within the specified context.
*
* @param context
* Reader context which defines the scope in which query has to
* be made. Limited to application level only.
* @return an <cite>EntityTypeReader</cite> object
*/
public static EntityTypeReader createEntityTypeReader(TimelineReaderContext context) {return new EntityTypeReader(context);
} | 3.26 |
hadoop_SharedKeyCredentials_canonicalizeHttpRequest_rdh | /**
* Constructs a canonicalized string from the request's headers that will be used to construct the signature string
* for signing a Blob or Queue service request under the Shared Key Full authentication scheme.
*
* @param address
* the request URI
* @param accountName
* the account name associated with the request
* @param method
* the verb to be used for the HTTP request.
* @param contentType
* the content type of the HTTP request.
* @param contentLength
* the length of the content written to the outputstream in bytes, -1 if unknown
* @param date
* the date/time specification for the HTTP request
* @param conn
* the HttpURLConnection for the operation.
* @return A canonicalized string.
*/
private static String canonicalizeHttpRequest(final URL address, final String accountName, final String method, final String contentType, final long contentLength, final String date, final HttpURLConnection conn) throws UnsupportedEncodingException {
// The first element should be the Method of the request.
// I.e. GET, POST, PUT, or HEAD.
final StringBuilder canonicalizedString = new StringBuilder(EXPECTED_BLOB_QUEUE_CANONICALIZED_STRING_LENGTH);
canonicalizedString.append(conn.getRequestMethod());
// The next elements are
// If any element is missing it may be empty.
appendCanonicalizedElement(canonicalizedString, getHeaderValue(conn, HttpHeaderConfigurations.CONTENT_ENCODING, AbfsHttpConstants.EMPTY_STRING));appendCanonicalizedElement(canonicalizedString, getHeaderValue(conn, HttpHeaderConfigurations.CONTENT_LANGUAGE, AbfsHttpConstants.EMPTY_STRING));
appendCanonicalizedElement(canonicalizedString, contentLength <= 0 ? "" : String.valueOf(contentLength));
appendCanonicalizedElement(canonicalizedString, getHeaderValue(conn, HttpHeaderConfigurations.CONTENT_MD5, AbfsHttpConstants.EMPTY_STRING));
appendCanonicalizedElement(canonicalizedString, contentType != null ? contentType : AbfsHttpConstants.EMPTY_STRING);
final String dateString = getHeaderValue(conn, HttpHeaderConfigurations.X_MS_DATE, AbfsHttpConstants.EMPTY_STRING);
// If x-ms-date header exists, Date should be empty string
appendCanonicalizedElement(canonicalizedString, dateString.equals(AbfsHttpConstants.EMPTY_STRING) ? date : "");
appendCanonicalizedElement(canonicalizedString, getHeaderValue(conn, HttpHeaderConfigurations.IF_MODIFIED_SINCE, AbfsHttpConstants.EMPTY_STRING));
appendCanonicalizedElement(canonicalizedString, getHeaderValue(conn, HttpHeaderConfigurations.IF_MATCH, AbfsHttpConstants.EMPTY_STRING));
appendCanonicalizedElement(canonicalizedString, getHeaderValue(conn, HttpHeaderConfigurations.IF_NONE_MATCH, AbfsHttpConstants.EMPTY_STRING));
appendCanonicalizedElement(canonicalizedString, getHeaderValue(conn, HttpHeaderConfigurations.IF_UNMODIFIED_SINCE, AbfsHttpConstants.EMPTY_STRING));
appendCanonicalizedElement(canonicalizedString, getHeaderValue(conn, HttpHeaderConfigurations.RANGE, AbfsHttpConstants.EMPTY_STRING));
addCanonicalizedHeaders(conn, canonicalizedString);
appendCanonicalizedElement(canonicalizedString, getCanonicalizedResource(address, accountName));return canonicalizedString.toString();} | 3.26 |
hadoop_SharedKeyCredentials_getCanonicalizedResource_rdh | /**
* Gets the canonicalized resource string for a Blob or Queue service request under the Shared Key Lite
* authentication scheme.
*
* @param address
* the resource URI.
* @param accountName
* the account name for the request.
* @return the canonicalized resource string.
*/
private static String getCanonicalizedResource(final URL address, final String accountName) throws UnsupportedEncodingException {
// Resource path
final StringBuilder resourcepath =
new StringBuilder(AbfsHttpConstants.FORWARD_SLASH);
resourcepath.append(accountName);
// Note that AbsolutePath starts with a '/'.
resourcepath.append(address.getPath());
final StringBuilder canonicalizedResource = new StringBuilder(resourcepath.toString());
// query parameters
if ((address.getQuery() == null) || (!address.getQuery().contains(AbfsHttpConstants.EQUAL))) {
// no query params.
return canonicalizedResource.toString();
}
final Map<String, String[]> queryVariables = parseQueryString(address.getQuery());
final
Map<String, String> lowercasedKeyNameValue = new HashMap<>();
for (final Entry<String, String[]>
entry : queryVariables.entrySet()) {
// sort the value and organize it as comma separated values
final List<String> v23 = Arrays.asList(entry.getValue());
Collections.sort(v23);
final StringBuilder stringValue = new StringBuilder();
for (final String value : v23) {
if (stringValue.length() > 0) {
stringValue.append(AbfsHttpConstants.COMMA);
}
stringValue.append(value);
}
// key turns out to be null for ?a&b&c&d
lowercasedKeyNameValue.put(entry.getKey() == null ? null : entry.getKey().toLowerCase(Locale.ROOT), stringValue.toString());
}final ArrayList<String> sortedKeys = new ArrayList<String>(lowercasedKeyNameValue.keySet());
Collections.sort(sortedKeys);
for (final String key : sortedKeys) {
final StringBuilder queryParamString = new StringBuilder();
queryParamString.append(key);
queryParamString.append(":");
queryParamString.append(lowercasedKeyNameValue.get(key));
appendCanonicalizedElement(canonicalizedResource, queryParamString.toString());
}
return canonicalizedResource.toString();
} | 3.26 |
hadoop_SharedKeyCredentials_parseQueryString_rdh | /**
* Parses a query string into a one to many hashmap.
*
* @param parseString
* the string to parse
* @return a HashMap<String, String[]> of the key values.
*/
private static HashMap<String, String[]> parseQueryString(String parseString) throws UnsupportedEncodingException {
final HashMap<String, String[]> retVals = new HashMap<>();
if ((parseString == null) || parseString.isEmpty()) {
return retVals;
}
// 1. Remove ? if present
final int queryDex = parseString.indexOf(AbfsHttpConstants.QUESTION_MARK);if ((queryDex >= 0) && (parseString.length() > 0))
{
parseString = parseString.substring(queryDex + 1);
}
// 2. split name value pairs by splitting on the 'c&' character
final String[] valuePairs = (parseString.contains(AbfsHttpConstants.AND_MARK)) ? parseString.split(AbfsHttpConstants.AND_MARK) : parseString.split(AbfsHttpConstants.SEMICOLON);
// 3. for each field value pair parse into appropriate map entries
for (int v36 = 0; v36 < valuePairs.length; v36++) {
final int equalDex = valuePairs[v36].indexOf(AbfsHttpConstants.EQUAL);
if ((equalDex < 0) || (equalDex == (valuePairs[v36].length() - 1))) {
continue;
}String key = valuePairs[v36].substring(0, equalDex);
String value
= valuePairs[v36].substring(equalDex + 1);
key =
safeDecode(key);
value = safeDecode(value);
// 3.1 add to map
String[] values = retVals.get(key);
if (values == null) {
values = new String[]{ value };
if
(!value.equals("")) {
retVals.put(key, values);
}
}
}
return retVals;
} | 3.26 |
hadoop_SharedKeyCredentials_canonicalize_rdh | /**
* Constructs a canonicalized string for signing a request.
*
* @param conn
* the HttpURLConnection to canonicalize
* @param accountName
* the account name associated with the request
* @param contentLength
* the length of the content written to the outputstream in bytes,
* -1 if unknown
* @return a canonicalized string.
*/
private String canonicalize(final HttpURLConnection conn, final String accountName, final Long contentLength) throws UnsupportedEncodingException {
if (contentLength < (-1)) {
throw new IllegalArgumentException("The Content-Length header must be greater than or equal to -1.");
}
String contentType = getHeaderValue(conn, HttpHeaderConfigurations.CONTENT_TYPE, "");
return canonicalizeHttpRequest(conn.getURL(), accountName, conn.getRequestMethod(), contentType, contentLength, null, conn);
} | 3.26 |
hadoop_SharedKeyCredentials_addCanonicalizedHeaders_rdh | /**
* Add x-ms- prefixed headers in a fixed order.
*
* @param conn
* the HttpURLConnection for the operation
* @param canonicalizedString
* the canonicalized string to add the canonicalized headerst to.
*/
private static void addCanonicalizedHeaders(final HttpURLConnection conn, final
StringBuilder canonicalizedString) {
// Look for header names that start with
// HeaderNames.PrefixForStorageHeader
// Then sort them in case-insensitive manner.
final Map<String, List<String>> headers = conn.getRequestProperties();
final ArrayList<String> httpStorageHeaderNameArray = new ArrayList<String>();
for (final String key : headers.keySet()) {
if (key.toLowerCase(Locale.ROOT).startsWith(AbfsHttpConstants.HTTP_HEADER_PREFIX)) {
httpStorageHeaderNameArray.add(key.toLowerCase(Locale.ROOT));
}
}
Collections.sort(httpStorageHeaderNameArray);// Now go through each header's values in the sorted order and append
// them to the canonicalized string.
for (final String key : httpStorageHeaderNameArray) {
final StringBuilder canonicalizedElement = new StringBuilder(key);
String delimiter = ":";
final ArrayList<String> values = getHeaderValues(headers, key);
boolean appendCanonicalizedElement = false;
// Go through values, unfold them, and then append them to the
// canonicalized element string.
for (final String value : values) {
if (value != null) {
appendCanonicalizedElement = true;
}
// Unfolding is simply removal of CRLF.
final String unfoldedValue = CRLF.matcher(value).replaceAll(Matcher.quoteReplacement(""));
// Append it to the canonicalized element string.
canonicalizedElement.append(delimiter);
canonicalizedElement.append(unfoldedValue);
delimiter = ",";
}
// Now, add this canonicalized element to the canonicalized header
// string.
if (appendCanonicalizedElement) {
appendCanonicalizedElement(canonicalizedString, canonicalizedElement.toString());
}
}
} | 3.26 |
hadoop_SharedKeyCredentials_appendCanonicalizedElement_rdh | /**
* Append a string to a string builder with a newline constant.
*
* @param builder
* the StringBuilder object
* @param element
* the string to append.
*/
private static void appendCanonicalizedElement(final StringBuilder builder, final String element) {
builder.append("\n");builder.append(element);
} | 3.26 |
hadoop_SharedKeyCredentials_initializeMac_rdh | /**
* Initialize the HmacSha256 associated with the account key.
*/
private void initializeMac() {
// Initializes the HMAC-SHA256 Mac and SecretKey.
try {
hmacSha256 = Mac.getInstance(HMAC_SHA256);
hmacSha256.init(new SecretKeySpec(accountKey, HMAC_SHA256));
} catch (final Exception e) {
throw new IllegalArgumentException(e);
}
} | 3.26 |
hadoop_SharedKeyCredentials_getGMTTime_rdh | /**
* Returns the current GMT date/time String using the RFC1123 pattern.
*
* @return A <code>String</code> that represents the current GMT date/time using the RFC1123 pattern.
*/
static String getGMTTime() {
return getGMTTime(new Date());
} | 3.26 |
hadoop_SharedKeyCredentials_initialValue_rdh | /**
* Thread local for storing GMT date format.
*/private static ThreadLocal<DateFormat> rfc1123GmtDateTimeFormatter = new ThreadLocal<DateFormat>() {
@Override
protected DateFormat initialValue() {
final DateFormat formatter
= new
SimpleDateFormat(RFC1123_PATTERN,
Locale.ROOT);
formatter.setTimeZone(GMT_ZONE);
return formatter;
} | 3.26 |
hadoop_ByteArray_buffer_rdh | /**
*
* @return the underlying buffer.
*/
@Override
public byte[] buffer() {
return buffer;
} | 3.26 |
hadoop_ByteArray_offset_rdh | /**
*
* @return the offset in the buffer.
*/
@Override
public int offset() {
return offset;
} | 3.26 |
hadoop_ByteArray_size_rdh | /**
*
* @return the size of the byte array.
*/
@Override
public int size() {
return len;
} | 3.26 |
hadoop_SignerManager_initCustomSigners_rdh | /**
* Initialize custom signers and register them with the AWS SDK.
*/
public void initCustomSigners() {
String[] customSigners = ownerConf.getTrimmedStrings(CUSTOM_SIGNERS);
if ((customSigners == null) || (customSigners.length == 0))
{
// No custom signers specified, nothing to do.
LOG.debug("No custom signers specified");
return;
}
for (String customSigner : customSigners) {String[] parts = customSigner.split(":");
if
(!(((parts.length == 1) || (parts.length == 2)) || (parts.length == 3))) {
String message = ((("Invalid format (Expected name, name:SignerClass," + " name:SignerClass:SignerInitializerClass)") + " for CustomSigner: [") + customSigner) + "]";
LOG.error(message);
throw new IllegalArgumentException(message);
}
if (parts.length == 1) {
// Nothing to do. Trying to use a pre-defined Signer
} else {
// Register any custom Signer
maybeRegisterSigner(parts[0], parts[1], ownerConf);
// If an initializer is specified, take care of instantiating it and
// setting it up
if
(parts.length == 3) {
Class<? extends AwsSignerInitializer> clazz = null;
try {
clazz = ((Class<? extends
AwsSignerInitializer>) (ownerConf.getClassByName(parts[2])));
} catch (ClassNotFoundException e) {
throw new RuntimeException(String.format("SignerInitializer class" +
" [%s] not found for signer [%s]", parts[2], parts[0]), e);
}
LOG.debug("Creating signer initializer: [{}] for signer: [{}]", parts[2], parts[0]);
AwsSignerInitializer signerInitializer = ReflectionUtils.newInstance(clazz, null);
initializers.add(signerInitializer);
signerInitializer.registerStore(bucketName, ownerConf, delegationTokenProvider, ownerUgi);
}
}}
} | 3.26 |
hadoop_MemoryMappableBlockLoader_load_rdh | /**
* Load the block.
*
* mmap and mlock the block, and then verify its checksum.
*
* @param length
* The current length of the block.
* @param blockIn
* The block input stream. Should be positioned at the
* start. The caller must close this.
* @param metaIn
* The meta file input stream. Should be positioned at
* the start. The caller must close this.
* @param blockFileName
* The block file name, for logging purposes.
* @param key
* The extended block ID.
* @throws IOException
* If mapping block to memory fails or checksum fails.
* @return The Mappable block.
*/
@Override
MappableBlock load(long length, FileInputStream blockIn, FileInputStream metaIn, String blockFileName, ExtendedBlockId key)
throws IOException {
MemoryMappedBlock mappableBlock = null;
MappedByteBuffer
mmap = null;
try (FileChannel blockChannel = blockIn.getChannel()) {
if (blockChannel == null) {
throw new IOException("Block InputStream has no FileChannel.");
}
mmap = blockChannel.map(FileChannel.MapMode.READ_ONLY, 0, length);
NativeIO.POSIX.getCacheManipulator().mlock(blockFileName, mmap, length);
verifyChecksum(length, metaIn, blockChannel, blockFileName);
mappableBlock = new MemoryMappedBlock(mmap, length);
} finally {
if (mappableBlock == null) {
if (mmap != null) {
NativeIO.POSIX.munmap(mmap);// unmapping also unlocks
}
}
}
return mappableBlock;} | 3.26 |
hadoop_MapTaskImpl_getSplitsAsString_rdh | /**
*
* @return a String formatted as a comma-separated list of splits.
*/
@Override
protected String getSplitsAsString() {
String[] splits = getTaskSplitMetaInfo().getLocations();
if ((splits ==
null) || (splits.length == 0))
return "";
StringBuilder v1 = new StringBuilder();
for (int i = 0; i < splits.length; i++) {
if (i != 0)
v1.append(",");
v1.append(splits[i]);
}
return v1.toString();
} | 3.26 |
hadoop_LocalIdentityTransformer_transformIdentityForGetRequest_rdh | /**
* Perform identity transformation for the Get request results.
*
* @param originalIdentity
* the original user or group in the get request results: FileStatus, AclStatus.
* @param isUserName
* indicate whether the input originalIdentity is an owner name or owning group name.
* @param localIdentity
* the local user or group, should be parsed from UserGroupInformation.
* @return local identity.
*/
@Overridepublic String transformIdentityForGetRequest(String originalIdentity, boolean isUserName, String localIdentity) throws IOException {
String localIdentityForOrig = (isUserName) ? localToAadIdentityLookup.lookupForLocalUserIdentity(originalIdentity) : localToAadIdentityLookup.lookupForLocalGroupIdentity(originalIdentity);
if ((localIdentityForOrig == null) || localIdentityForOrig.isEmpty()) {
return super.transformIdentityForGetRequest(originalIdentity, isUserName, localIdentity);
}
return localIdentityForOrig;
} | 3.26 |
hadoop_ReadBufferWorker_run_rdh | /**
* Waits until a buffer becomes available in ReadAheadQueue.
* Once a buffer becomes available, reads the file specified in it and then posts results back to buffer manager.
* Rinse and repeat. Forever.
*/
public void run() {
try {
UNLEASH_WORKERS.await();
} catch (InterruptedException ex) {
Thread.currentThread().interrupt();
}
ReadBufferManager bufferManager = ReadBufferManager.getBufferManager();
ReadBuffer buffer;
while (true) {try {
buffer = bufferManager.getNextBlockToRead();// blocks, until a buffer is available for this thread
} catch (InterruptedException ex) {
Thread.currentThread().interrupt();
return; }if (buffer !=
null) {
try {
// do the actual read, from the file.
int bytesRead = // If AbfsInputStream was created with bigger buffer size than
// read-ahead buffer size, make sure a valid length is passed
// for remote read
buffer.getStream().readRemote(buffer.getOffset(), buffer.getBuffer(), 0, Math.min(buffer.getRequestedLength(), buffer.getBuffer().length), buffer.getTracingContext());
bufferManager.doneReading(buffer, ReadBufferStatus.AVAILABLE, bytesRead);// post result back to ReadBufferManager
} catch (IOException ex) {
buffer.setErrException(ex);
bufferManager.doneReading(buffer, ReadBufferStatus.READ_FAILED, 0);
} catch (Exception ex) {
buffer.setErrException(new PathIOException(buffer.getStream().getPath(), ex));
bufferManager.doneReading(buffer, ReadBufferStatus.READ_FAILED, 0);
}
}
}
} | 3.26 |
hadoop_ReadBufferWorker_getId_rdh | /**
* return the ID of ReadBufferWorker.
*/
public int getId() {
return this.id;
} | 3.26 |
hadoop_TokenIdentifier_getTrackingId_rdh | /**
* Returns a tracking identifier that can be used to associate usages of a
* token across multiple client sessions.
*
* Currently, this function just returns an MD5 of {{@link #getBytes()}.
*
* @return tracking identifier
*/
public String getTrackingId() {
if (trackingId == null) {
trackingId = DigestUtils.md5Hex(getBytes());
}
return trackingId;
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.