name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hadoop_CacheDirectiveStats_getFilesNeeded_rdh | /**
*
* @return The number of files needed.
*/
public long getFilesNeeded() {
return filesNeeded;
} | 3.26 |
hadoop_CacheDirectiveStats_getBytesCached_rdh | /**
*
* @return The bytes cached.
*/
public long getBytesCached() {
return bytesCached;
} | 3.26 |
hadoop_CacheDirectiveStats_setFilesCached_rdh | /**
* Sets the files cached by this directive.
*
* @param filesCached
* The number of files cached.
* @return This builder, for call chaining.
*/
public Builder setFilesCached(long filesCached) {
this.filesCached = filesCached;
return this;
} | 3.26 |
hadoop_CacheDirectiveStats_setBytesNeeded_rdh | /**
* Sets the bytes needed by this directive.
*
* @param bytesNeeded
* The bytes needed.
* @return This builder, for call chaining.
*/
public Builder setBytesNeeded(long bytesNeeded) {
this.bytesNeeded = bytesNeeded;
return this;
} | 3.26 |
hadoop_CacheDirectiveStats_setBytesCached_rdh | /**
* Sets the bytes cached by this directive.
*
* @param bytesCached
* The bytes cached.
* @return This builder, for call chaining.
*/
public Builder setBytesCached(long bytesCached) {this.bytesCached = bytesCached;
return this;
} | 3.26 |
hadoop_CacheDirectiveStats_setFilesNeeded_rdh | /**
* Sets the files needed by this directive.
*
* @param filesNeeded
* The number of files needed
* @return This builder, for call chaining.
*/
public Builder setFilesNeeded(long filesNeeded) {
this.filesNeeded = filesNeeded;
return this;
} | 3.26 |
hadoop_CacheDirectiveStats_setHasExpired_rdh | /**
* Sets whether this directive has expired.
*
* @param hasExpired
* if this directive has expired
* @return This builder, for call chaining.
*/
public Builder setHasExpired(boolean hasExpired) {
this.hasExpired = hasExpired;
return this;
} | 3.26 |
hadoop_CacheDirectiveStats_getBytesNeeded_rdh | /**
*
* @return The bytes needed.
*/
public long getBytesNeeded() {
return bytesNeeded;
} | 3.26 |
hadoop_CacheDirectiveStats_getFilesCached_rdh | /**
*
* @return The number of files cached.
*/
public long getFilesCached() {
return filesCached;
} | 3.26 |
hadoop_HdfsNamedFileStatus_getStoragePolicy_rdh | /**
*
* @return the storage policy id
*/
@Override
public byte getStoragePolicy() {
return storagePolicy;
} | 3.26 |
hadoop_HdfsNamedFileStatus_getSymlinkInBytes_rdh | /**
* Opaque referant for the symlink, to be resolved at the client.
*/
@Override
public byte[] getSymlinkInBytes() {
return uSymlink;
} | 3.26 |
hadoop_HdfsNamedFileStatus_getErasureCodingPolicy_rdh | /**
* Get the erasure coding policy if it's set.
*
* @return the erasure coding policy
*/
@Override
public ErasureCodingPolicy getErasureCodingPolicy() {
return ecPolicy;
} | 3.26 |
hadoop_HdfsNamedFileStatus_getLocalNameInBytes_rdh | /**
* Get the Java UTF8 representation of the local name.
*
* @return the local name in java UTF8
*/
@Override
public byte[] getLocalNameInBytes()
{
return uPath;
} | 3.26 |
hadoop_DatanodeProtocolClientSideTranslatorPB_isMethodSupported_rdh | // ProtocolMetaInterface
@Override
public boolean isMethodSupported(String methodName) throws IOException {
return RpcClientUtil.isMethodSupported(rpcProxy, DatanodeProtocolPB.class, RpcKind.RPC_PROTOCOL_BUFFER, RPC.getProtocolVersion(DatanodeProtocolPB.class), methodName);
} | 3.26 |
hadoop_DeviceMappingManager_defaultScheduleAction_rdh | // Default scheduling logic
private void defaultScheduleAction(Set<Device> allowed, Map<Device, ContainerId> used, Set<Device> assigned, ContainerId containerId, int count) {
LOG.debug((((("Using default scheduler. Allowed:" + allowed) + ",Used:") + used) + ", containerId:") + containerId);
for (Device device : allowed) {
if (!used.containsKey(device)) {
used.put(device, containerId);
assigned.add(device);
if (assigned.size() == count) {
return;
}
}
}// end for
} | 3.26 |
hadoop_NativeSingleLineParser_aggregateSkyline_rdh | /**
* Aggregates different jobs' {@link ResourceSkyline}s within the same
* pipeline together.
*
* @param resourceSkyline
* newly extracted {@link ResourceSkyline}.
* @param recurrenceId
* the {@link RecurrenceId} which the resourceSkyline
* belongs to.
* @param skylineRecords
* a {@link Map} which stores the
* {@link ResourceSkyline}s for all pipelines during this parsing.
*/
private void aggregateSkyline(final ResourceSkyline resourceSkyline, final RecurrenceId recurrenceId, final Map<RecurrenceId, List<ResourceSkyline>> skylineRecords)
{
List<ResourceSkyline> resourceSkylines = skylineRecords.get(recurrenceId);
if (resourceSkylines == null) {
resourceSkylines = new ArrayList<ResourceSkyline>();
skylineRecords.put(recurrenceId, resourceSkylines);
}
resourceSkylines.add(resourceSkyline);
} | 3.26 |
hadoop_QuorumException_create_rdh | /**
* Create a QuorumException instance with a descriptive message detailing
* the underlying exceptions, as well as any successful responses which
* were returned.
*
* @param <K>
* the keys for the quorum calls
* @param <V>
* the success response type
* @param successes
* any successful responses returned
* @param exceptions
* the exceptions returned
*/
public static <K, V> QuorumException create(String simpleMsg, Map<K, V> successes, Map<K,
Throwable> exceptions) {
Preconditions.checkArgument(!exceptions.isEmpty(), "Must pass exceptions");
StringBuilder msg = new StringBuilder();
msg.append(simpleMsg).append(". ");
if (!successes.isEmpty()) {
msg.append(successes.size()).append(" successful responses:\n");
Joiner.on("\n").useForNull("null [success]").withKeyValueSeparator(": ").appendTo(msg, successes);
msg.append("\n");
}
msg.append(exceptions.size() + " exceptions thrown:\n");
boolean isFirst = true;for (Map.Entry<K, Throwable> e : exceptions.entrySet()) {
if (!isFirst) {
msg.append("\n");
}
isFirst = false;
msg.append(e.getKey()).append(": ");
if (e.getValue() instanceof RuntimeException) {
msg.append(StringUtils.stringifyException(e.getValue()));
} else if (e.getValue().getLocalizedMessage() != null) {
msg.append(e.getValue().getLocalizedMessage());
} else {
msg.append(StringUtils.stringifyException(e.getValue()));
}
}
return new QuorumException(msg.toString());
} | 3.26 |
hadoop_DiskBalancerWorkStatus_getCurrentState_rdh | /**
* Gets current Status.
*
* @return - Json String
*/
public List<DiskBalancerWorkEntry> getCurrentState() {
return currentState;
} | 3.26 |
hadoop_DiskBalancerWorkStatus_setSourcePath_rdh | /**
* Sets the Source Path.
*
* @param sourcePath
* - Volume Path.
*/
public void setSourcePath(String sourcePath) {
this.f1 = sourcePath;
} | 3.26 |
hadoop_DiskBalancerWorkStatus_currentStateString_rdh | /**
* Return current state as a string.
*
* @throws IOException
*/
public String currentStateString() throws IOException {
return MAPPER_WITH_INDENT_OUTPUT.writeValueAsString(currentState);
} | 3.26 |
hadoop_DiskBalancerWorkStatus_setDestPath_rdh | /**
* Sets the destination path.
*
* @param destPath
* - Path
*/
public void setDestPath(String destPath) {
this.destPath = destPath;
} | 3.26 |
hadoop_DiskBalancerWorkStatus_getIntResult_rdh | /**
* Get int value of result.
*
* @return int
*/
public int getIntResult() {
return result;
} | 3.26 |
hadoop_DiskBalancerWorkStatus_setWorkItem_rdh | /**
* Sets the work item.
*
* @param workItem
* - sets the work item information
*/
public void setWorkItem(DiskBalancerWorkItem workItem) {
this.workItem = workItem;
} | 3.26 |
hadoop_DiskBalancerWorkStatus_addWorkEntry_rdh | /**
* Adds a new work entry to the list.
*
* @param entry
* - DiskBalancerWorkEntry
*/
public void addWorkEntry(DiskBalancerWorkEntry entry) {
Preconditions.checkNotNull(entry);
currentState.add(entry);} | 3.26 |
hadoop_DiskBalancerWorkStatus_getSourcePath_rdh | /**
* Returns the source path.
*
* @return - Source path
*/
public String getSourcePath() {
return f1;
} | 3.26 |
hadoop_DiskBalancerWorkStatus_getDestPath_rdh | /**
* Gets the Destination path.
*
* @return - Path
*/
public String getDestPath() {
return destPath;
} | 3.26 |
hadoop_DiskBalancerWorkStatus_getWorkItem_rdh | /**
* Gets the current status of work for these volumes.
*
* @return - Work Item
*/
public DiskBalancerWorkItem getWorkItem() {
return workItem;
} | 3.26 |
hadoop_DiskBalancerWorkStatus_getResult_rdh | /**
* Returns result.
*
* @return long
*/
public Result getResult() {
return result;
} | 3.26 |
hadoop_DiskBalancerWorkStatus_parseJson_rdh | /**
* Returns a DiskBalancerWorkStatus object from the Json .
*
* @param json
* - json String
* @return DiskBalancerWorkStatus
* @throws IOException
*/
public static DiskBalancerWorkStatus parseJson(String json) throws IOException {
return READER_WORKSTATUS.readValue(json);
} | 3.26 |
hadoop_PlacementConstraintManagerService_getValidSourceTag_rdh | /**
* This method will return a single allocation tag. It should be called after
* validating the tags by calling {@link #validateSourceTags}.
*
* @param sourceTags
* the source allocation tags
* @return the single source tag
*/
protected String getValidSourceTag(Set<String> sourceTags) {
return sourceTags.iterator().next();
} | 3.26 |
hadoop_PlacementConstraintManagerService_validateSourceTags_rdh | /**
* Validates whether the allocation tags that will enable a constraint have
* the expected format. At the moment we support a single allocation tag per
* constraint.
*
* @param sourceTags
* the source allocation tags
* @return true if the tags have the expected format
*/
protected boolean validateSourceTags(Set<String> sourceTags) {
if (sourceTags.isEmpty()) {
LOG.warn("A placement constraint cannot be associated with an empty " + "set of tags.");
return false;
}
if (sourceTags.size() > 1) {
LOG.warn("Only a single tag can be associated with a placement " + "constraint currently.");
return false;
}
return true;
} | 3.26 |
hadoop_PatternValidator_validate_rdh | /**
* Validate the name -restricting it to the set defined in
*
* @param name
* name to validate
* @throws IllegalArgumentException
* if not a valid name
*/
public void validate(String name) {
if (!matches(name)) {
throw new IllegalArgumentException(String.format(E_INVALID_NAME, name, pattern));
}
} | 3.26 |
hadoop_SchedulingRequest_executionType_rdh | /**
* Set the <code>executionType</code> of the request.
*
* @see SchedulingRequest#setExecutionType(ExecutionTypeRequest)
* @param executionType
* <code>executionType</code> of the request
* @return {@link SchedulingRequest.SchedulingRequestBuilder}
*/
@Public
@Unstable
public SchedulingRequestBuilder executionType(ExecutionTypeRequest executionType) {
f0.setExecutionType(executionType);
return this;
} | 3.26 |
hadoop_SchedulingRequest_priority_rdh | /**
* Set the <code>priority</code> of the request.
*
* @param priority
* <code>priority</code> of the request
* @return {@link SchedulingRequest.SchedulingRequestBuilder}
* @see SchedulingRequest#setPriority(Priority)
*/
@Public
@Unstable
public SchedulingRequestBuilder priority(Priority priority) {
f0.setPriority(priority);
return this;
} | 3.26 |
hadoop_SchedulingRequest_build_rdh | /**
* Return generated {@link SchedulingRequest} object.
*
* @return {@link SchedulingRequest}
*/
@Public
@Unstable
public SchedulingRequest build() {return f0;
} | 3.26 |
hadoop_SchedulingRequest_resourceSizing_rdh | /**
* Set the <code>executionType</code> of the request.
*
* @see SchedulingRequest#setResourceSizing(ResourceSizing)
* @param resourceSizing
* <code>resourceSizing</code> of the request
* @return {@link SchedulingRequest.SchedulingRequestBuilder}
*/
@Public
@Unstable
public SchedulingRequestBuilder resourceSizing(ResourceSizing resourceSizing) {
f0.setResourceSizing(resourceSizing);
return this; } | 3.26 |
hadoop_SchedulingRequest_allocationRequestId_rdh | /**
* Set the <code>allocationRequestId</code> of the request.
*
* @see SchedulingRequest#setAllocationRequestId(long)
* @param allocationRequestId
* <code>allocationRequestId</code> of the
* request
* @return {@link SchedulingRequest.SchedulingRequestBuilder}
*/
@Public
@Unstable
public SchedulingRequestBuilder allocationRequestId(long allocationRequestId) {
f0.setAllocationRequestId(allocationRequestId);
return this;
} | 3.26 |
hadoop_SchedulingRequest_allocationTags_rdh | /**
* Set the <code>allocationTags</code> of the request.
*
* @see SchedulingRequest#setAllocationTags(Set)
* @param allocationTags
* <code>allocationsTags</code> of the request
* @return {@link SchedulingRequest.SchedulingRequestBuilder}
*/
@Public
@Unstable
public SchedulingRequestBuilder allocationTags(Set<String> allocationTags) {
f0.m1(allocationTags);
return this;
} | 3.26 |
hadoop_EncodingState_checkParameters_rdh | /**
* Check and validate decoding parameters, throw exception accordingly.
*
* @param inputs
* input buffers to check
* @param outputs
* output buffers to check
*/
<T> void checkParameters(T[] inputs, T[] outputs) {
if (inputs.length != encoder.getNumDataUnits()) {
throw new HadoopIllegalArgumentException("Invalid inputs length");
}if
(outputs.length != encoder.getNumParityUnits()) {
throw new HadoopIllegalArgumentException("Invalid outputs length");
}
} | 3.26 |
hadoop_S3AInputPolicy_getFirstSupportedPolicy_rdh | /**
* Scan the list of input policies, returning the first one supported.
*
* @param policies
* list of policies.
* @param defaultPolicy
* fallback
* @return a policy or the defaultPolicy, which may be null
*/
public static S3AInputPolicy getFirstSupportedPolicy(Collection<String> policies, @Nullable
S3AInputPolicy defaultPolicy) {
for (String s : policies) {
S3AInputPolicy nextPolicy = S3AInputPolicy.getPolicy(s, null);
if (nextPolicy != null) {
return nextPolicy;}
}
return defaultPolicy;
} | 3.26 |
hadoop_S3AInputPolicy_getPolicy_rdh | /**
* Choose an access policy.
*
* @param name
* strategy name from a configuration option, etc.
* @param defaultPolicy
* default policy to fall back to.
* @return the chosen strategy
*/
public static S3AInputPolicy getPolicy(String name, @Nullable
S3AInputPolicy defaultPolicy) {
String trimmed =
name.trim().toLowerCase(Locale.ENGLISH);
switch (trimmed) {
case FS_OPTION_OPENFILE_READ_POLICY_ADAPTIVE :
case FS_OPTION_OPENFILE_READ_POLICY_DEFAULT :
case Constants.INPUT_FADV_NORMAL :
return Normal;
// all these options currently map to random IO.
case FS_OPTION_OPENFILE_READ_POLICY_RANDOM :
case FS_OPTION_OPENFILE_READ_POLICY_VECTOR :
return Random;
case FS_OPTION_OPENFILE_READ_POLICY_SEQUENTIAL :
case
FS_OPTION_OPENFILE_READ_POLICY_WHOLE_FILE :
return Sequential;
default :
return defaultPolicy;
}
} | 3.26 |
hadoop_BalanceProcedureScheduler_remove_rdh | /**
* Remove the job from scheduler if it finishes.
*/
public BalanceJob remove(BalanceJob job) {
BalanceJob inner = findJob(job);
if (inner == null) {
return null;
} else if (job.isJobDone()) {
synchronized(this) {
return jobSet.remove(inner);
}
}
return null;
} | 3.26 |
hadoop_BalanceProcedureScheduler_submit_rdh | /**
* Submit the job.
*/
public synchronized void submit(BalanceJob job) throws IOException { if (!running.get()) {
throw new IOException("Scheduler is shutdown.");
}
String jobId = m0();
job.setId(jobId);
job.setScheduler(this);
journal.saveJob(job);
jobSet.put(job, job);
runningQueue.add(job);
f0.info("Add new job={}", job);
} | 3.26 |
hadoop_BalanceProcedureScheduler_waitUntilDone_rdh | /**
* Wait permanently until the job is done.
*/
public void waitUntilDone(BalanceJob job) {
BalanceJob found = findJob(job);
if ((found == null) || found.isJobDone()) {
return;
}
while (!found.isJobDone()) {
try {
found.waitJobDone();
} catch (InterruptedException e) {
}
}
} | 3.26 |
hadoop_BalanceProcedureScheduler_shutDown_rdh | /**
* Shutdown the scheduler.
*/
public synchronized void shutDown() {
if (!running.get()) {
return;
}
running.set(false);
readerThread.interrupt();
roosterThread.interrupt();
recoverThread.interrupt();
f1.shutdownNow();
} | 3.26 |
hadoop_BalanceProcedureScheduler_getAllJobs_rdh | /**
* Return all jobs in the scheduler.
*/
public Collection<BalanceJob> getAllJobs() {
return jobSet.values();
} | 3.26 |
hadoop_BalanceProcedureScheduler_writeJournal_rdh | /**
* Save current status to journal.
*/
boolean writeJournal(BalanceJob job) {
try {
journal.saveJob(job);
return true;
} catch (Exception e) {
f0.warn("Save procedure failed, add to recoverQueue. job=" + job, e);
recoverQueue.add(job);
return false;
}
} | 3.26 |
hadoop_BalanceProcedureScheduler_isRunning_rdh | /**
* The running state of the scheduler.
*/
public boolean isRunning() {
return running.get();
} | 3.26 |
hadoop_BalanceProcedureScheduler_init_rdh | /**
* Init the scheduler.
*
* @param recoverJobs
* whether to recover all the jobs from journal or not.
*/
public synchronized void init(boolean recoverJobs) throws IOException {
this.runningQueue = new LinkedBlockingQueue<>();
this.delayQueue = new DelayQueue<>();
this.recoverQueue = new LinkedBlockingQueue<>();this.jobSet = new ConcurrentHashMap<>();
// start threads.
this.roosterThread = new Rooster();
this.roosterThread.setDaemon(true);
roosterThread.start();
this.recoverThread = new Recover();
this.recoverThread.setDaemon(true);
recoverThread.start();
int workerNum = conf.getInt(WORK_THREAD_NUM, WORK_THREAD_NUM_DEFAULT);
f1 = new ThreadPoolExecutor(workerNum, workerNum * 2, 1, TimeUnit.MILLISECONDS, new LinkedBlockingDeque<>());
this.readerThread
= new Reader();
this.readerThread.start();
// init journal.
journal = new BalanceJournalInfoHDFS();
journal.setConf(conf);if (recoverJobs) {
recoverAllJobs();
}
} | 3.26 |
hadoop_BalanceProcedureScheduler_recoverAllJobs_rdh | /**
* Search all jobs and add them to recoverQueue. It's called once after the
* scheduler starts.
*/
private void recoverAllJobs() throws IOException {
BalanceJob[] jobs = journal.listAllJobs();
for (BalanceJob
job : jobs) {
recoverQueue.add(job);
jobSet.put(job, job);
f0.info("Recover federation balance job {}.", job);
}
} | 3.26 |
hadoop_BalanceProcedureScheduler_delay_rdh | /**
* Delay this job.
*/
void delay(BalanceJob job, long delayInMilliseconds) {
delayQueue.add(new DelayWrapper(job, delayInMilliseconds));
f0.info("Need delay {}ms. Add to delayQueue. job={}", delayInMilliseconds, job);
} | 3.26 |
hadoop_BalanceProcedureScheduler_shutDownAndWait_rdh | /**
* Shutdown scheduler and wait at most timeout seconds for procedures to
* finish.
*
* @param timeout
* Wait at most timeout seconds for procedures to finish.
*/
public synchronized void shutDownAndWait(int timeout) {
shutDown();
while (readerThread.isAlive()) {
try {
readerThread.join();
} catch (InterruptedException e) {
}}
while (roosterThread.isAlive()) {try {
roosterThread.join();
} catch (InterruptedException e) {
}
}
while (recoverThread.isAlive()) {
try
{
recoverThread.join();
} catch (InterruptedException
e) {
}
}
while (!f1.isTerminated()) {
try {
f1.awaitTermination(timeout, TimeUnit.SECONDS);
} catch (InterruptedException e) {
}
}
} | 3.26 |
hadoop_HAProxyFactory_setAlignmentContext_rdh | /**
* Set the alignment context to be used when creating new proxies using
* this factory. Not all implementations will use this alignment context.
*/
default void setAlignmentContext(AlignmentContext alignmentContext) {
// noop
} | 3.26 |
hadoop_XMLParser_transitionTo_rdh | /**
* Attempt to transition to another state.
*
* @param nextState
* The new state to transition to.
* @throws IOException
* If the transition from the current state to
* {@code nextState} is not allowed.
*/
private void transitionTo(State nextState) throws IOException {
if (currentState.m0(nextState)) {currentState =
nextState;} else {
throw new IOException((("State transition not allowed; from " + currentState) + " to ") + nextState);
}
} | 3.26 |
hadoop_XMLParser_parseLine_rdh | /**
* Accept a single line of the XML file, and return a {@link BlockInfo} for
* any blocks contained within that line. Update internal state dependent on
* other XML values seen, e.g. the beginning of a file.
*
* @param line
* The XML line to parse.
* @return {@code BlockInfo}s for any blocks found.
*/
List<BlockInfo> parseLine(String line) throws IOException {
if (currentState == State.DEFAULT) {if (line.contains("<INodeSection>")) {transitionTo(State.INODE_SECTION);
} else {
return Collections.emptyList();
}
}
if (line.contains("<inode>")) {
transitionTo(State.INODE);
}
if (line.contains("<type>FILE</type>")) {
transitionTo(State.FILE);
}
List<String> replicationStrings =
valuesFromXMLString(line, "replication");
if (!replicationStrings.isEmpty()) {
if (replicationStrings.size() > 1) {
throw new IOException(String.format("Found %s replication strings", replicationStrings.size()));
}
transitionTo(State.FILE_WITH_REPLICATION);
currentReplication = Short.parseShort(replicationStrings.get(0));
}
Matcher blockMatcher = BLOCK_PATTERN.matcher(line);
List<BlockInfo> blockInfos = new ArrayList<>();
while (blockMatcher.find()) {
if (currentState != State.FILE_WITH_REPLICATION) {
throw new IOException("Found a block string when in state: " +
currentState);
}
long id = Long.parseLong(blockMatcher.group(1));
long gs = Long.parseLong(blockMatcher.group(2));
long
size = Long.parseLong(blockMatcher.group(3));
blockInfos.add(new BlockInfo(id, gs, size, currentReplication));
}
if (line.contains("</inode>")) {
transitionTo(State.INODE_SECTION);
}
if (line.contains("</INodeSection>")) {
transitionTo(State.DEFAULT);}
return blockInfos;
} | 3.26 |
hadoop_XMLParser_valuesFromXMLString_rdh | /**
*
* @param xml
* An XML string
* @param field
* The field whose value(s) should be extracted
* @return List of the field's values.
*/
private static List<String> valuesFromXMLString(String
xml, String field) {
Matcher m = Pattern.compile(((("<" + field) + ">(.+?)</") + field) + ">").matcher(xml);
List<String>
found = new ArrayList<>();
while (m.find()) {
found.add(m.group(1));
}
return found;
} | 3.26 |
hadoop_ObserverReadProxyProviderWithIPFailover_cloneDelegationTokenForVirtualIP_rdh | /**
* Clone delegation token for the virtual IP. Specifically
* clone the dt that corresponds to the name service uri,
* to the configured corresponding virtual IP.
*
* @param conf
* configuration
* @param haURI
* the ha uri, a name service id in this case.
*/
private void cloneDelegationTokenForVirtualIP(Configuration conf, URI haURI)
{
URI v0 = getFailoverVirtualIP(conf, haURI.getHost());
InetSocketAddress vipAddress = new InetSocketAddress(v0.getHost(), v0.getPort());
HAUtilClient.cloneDelegationTokenForLogicalUri(ugi, haURI, Collections.singleton(vipAddress));
} | 3.26 |
hadoop_PersistentCommitData_saveFile_rdh | /**
* Save to a file.
* This uses the createFile() API, which S3A supports for
* faster load and declaring sequential access, always
*
* @param <T>
* type of persistent format
* @param fs
* filesystem
* @param path
* path to save to
* @param instance
* data to save
* @param serializer
* serializer to use
* @param performance
* skip all safety check on the write
* @return any IOStatistics from the output stream, or null
* @throws IOException
* IO failure
*/
public static <T extends PersistentCommitData> IOStatistics saveFile(final FileSystem fs, final Path path, final T instance, final JsonSerialization<T> serializer, final boolean performance) throws IOException {
FSDataOutputStreamBuilder builder = fs.createFile(path).create().recursive().overwrite(true);
// switch to performance mode
builder.opt(FS_S3A_CREATE_PERFORMANCE, performance);
return saveToStream(path, instance, builder, serializer);
} | 3.26 |
hadoop_PersistentCommitData_saveToStream_rdh | /**
* Save to a file.
* This uses the createFile() API, which S3A supports for
* faster load and declaring sequential access, always
*
* @param <T>
* type of persistent format
* @param path
* path to save to (used for logging)
* @param instance
* data to save
* @param builder
* builder already prepared for the write
* @param serializer
* serializer to use
* @return any IOStatistics from the output stream, or null
* @throws IOException
* IO failure
*/
public static <T extends
PersistentCommitData> IOStatistics saveToStream(final Path path, final T instance, final FSDataOutputStreamBuilder builder, final JsonSerialization<T> serializer) throws IOException {
LOG.debug("saving commit data to file {}", path);
FSDataOutputStream dataOutputStream = builder.build();
try {
dataOutputStream.write(serializer.toBytes(instance));
} finally {
dataOutputStream.close();
}
return
dataOutputStream.getIOStatistics();
} | 3.26 |
hadoop_PersistentCommitData_load_rdh | /**
* Load an instance from a status, then validate it.
* This uses the openFile() API, which S3A supports for
* faster load and declaring sequential access, always
*
* @param <T>
* type of persistent format
* @param fs
* filesystem
* @param status
* status of file to load
* @param serializer
* serializer to use
* @return the loaded instance
* @throws IOException
* IO failure
* @throws ValidationFailure
* if the data is invalid
*/public static <T extends PersistentCommitData> T load(FileSystem fs, FileStatus status, JsonSerialization<T> serializer) throws IOException {
Path path = status.getPath();
LOG.debug("Reading commit data from file {}", path);
T result = serializer.load(fs, path, status);
result.validate();
return result;
} | 3.26 |
hadoop_TimelineEvents_addEvent_rdh | /**
* Add a single event to the existing event list
*
* @param event
* a single event
*/
public void addEvent(TimelineEvent event) {
events.add(event);
} | 3.26 |
hadoop_TimelineEvents_setEntityType_rdh | /**
* Set the entity type
*
* @param entityType
* the entity type
*/
public void setEntityType(String entityType) {
this.entityType = entityType;
} | 3.26 |
hadoop_TimelineEvents_addEvents_rdh | /**
* Add a list of {@link EventsOfOneEntity} instances into the existing list
*
* @param allEvents
* a list of {@link EventsOfOneEntity} instances
*/
public void addEvents(List<EventsOfOneEntity> allEvents) {
this.allEvents.addAll(allEvents);
} | 3.26 |
hadoop_TimelineEvents_getEvents_rdh | /**
* Get a list of events
*
* @return a list of events
*/
@XmlElement(name = "events")
public List<TimelineEvent> getEvents() {
return events;
} | 3.26 |
hadoop_TimelineEvents_setEntityId_rdh | /**
* Set the entity Id
*
* @param entityId
* the entity Id
*/
public void setEntityId(String entityId) {
this.entityId = entityId;
} | 3.26 |
hadoop_TimelineEvents_getAllEvents_rdh | /**
* Get a list of {@link EventsOfOneEntity} instances
*
* @return a list of {@link EventsOfOneEntity} instances
*/
@XmlElement(name = "events")
public List<EventsOfOneEntity> getAllEvents() { return allEvents;
} | 3.26 |
hadoop_TimelineEvents_getEntityType_rdh | /**
* Get the entity type
*
* @return the entity type
*/
@XmlElement(name = "entitytype")
public String getEntityType() {
return entityType;
} | 3.26 |
hadoop_TimelineEvents_setEvents_rdh | /**
* Set the event list to the given list of events
*
* @param events
* a list of events
*/
public void setEvents(List<TimelineEvent> events) {
this.events = events;
} | 3.26 |
hadoop_TimelineEvents_getEntityId_rdh | /**
* Get the entity Id
*
* @return the entity Id
*/
@XmlElement(name = "entity")
public String getEntityId() {
return entityId;
} | 3.26 |
hadoop_ErasureCodingPolicyState_write_rdh | /**
* Write to out.
*/
public void write(DataOutput out) throws IOException {
out.writeByte(ordinal());
} | 3.26 |
hadoop_ErasureCodingPolicyState_read_rdh | /**
* Read from in.
*/
public static ErasureCodingPolicyState read(DataInput in) throws IOException {
return fromValue(in.readByte());
} | 3.26 |
hadoop_FederationStateStoreUtils_convertMasterKeyToDelegationKey_rdh | /**
* Convert MasterKey to DelegationKey.
*
* @param masterKey
* masterKey.
* @return DelegationKey.
*/
private static DelegationKey convertMasterKeyToDelegationKey(RouterMasterKey masterKey) {
ByteBuffer keyByteBuf = masterKey.getKeyBytes();
byte[] keyBytes = new byte[keyByteBuf.remaining()];
keyByteBuf.get(keyBytes);
return new DelegationKey(masterKey.getKeyId(), masterKey.getExpiryDate(), keyBytes);
} | 3.26 |
hadoop_FederationStateStoreUtils_logAndThrowStoreException_rdh | /**
* Throws an <code>FederationStateStoreException</code> due to an error in
* <code>FederationStateStore</code>.
*
* @param t
* the throwable raised in the called class.
* @param log
* the logger interface.
* @param errMsgFormat
* the error message format string.
* @param args
* referenced by the format specifiers in the format string.
* @throws YarnException
* on failure
*/
public static void logAndThrowStoreException(Throwable
t, Logger log, String errMsgFormat, Object... args) throws YarnException {
String errMsg = String.format(errMsgFormat, args);
if (t != null) {
log.error(errMsg, t);
throw new FederationStateStoreException(errMsg, t);
} else {
log.error(errMsg);
throw
new FederationStateStoreException(errMsg);
}
} | 3.26 |
hadoop_FederationStateStoreUtils_setUsername_rdh | /**
* Sets a specific username for <code>HikariDataSource</code> SQL connections.
*
* @param dataSource
* the <code>HikariDataSource</code> connections
* @param userNameDB
* the value to set
*/
public static void setUsername(HikariDataSource dataSource, String userNameDB) {
if (userNameDB != null) {
dataSource.setUsername(userNameDB);
LOG.debug("Setting non NULL Username for Store connection");
} else {
LOG.debug("NULL Username specified for Store connection, so ignoring");
}
} | 3.26 |
hadoop_FederationStateStoreUtils_encodeWritable_rdh | /**
* Encode for Writable objects.
* This method will convert the writable object to a base64 string.
*
* @param key
* Writable Key.
* @return base64 string.
* @throws IOException
* raised on errors performing I/O.
*/
public static String encodeWritable(Writable key) throws IOException {
ByteArrayOutputStream bos = new ByteArrayOutputStream();
DataOutputStream dos = new DataOutputStream(bos);key.write(dos);
dos.flush();
return Base64.getUrlEncoder().encodeToString(bos.toByteArray());
} | 3.26 |
hadoop_FederationStateStoreUtils_setProperty_rdh | /**
* Sets a specific value for a specific property of
* <code>HikariDataSource</code> SQL connections.
*
* @param dataSource
* the <code>HikariDataSource</code> connections
* @param property
* the property to set
* @param value
* the value to set
*/
public static void setProperty(HikariDataSource dataSource, String property, String value) {
LOG.debug("Setting property {} with value {}", property, value);
if (((property != null) && (!property.isEmpty())) && (value != null)) {
dataSource.addDataSourceProperty(property, value);
}
} | 3.26 |
hadoop_FederationStateStoreUtils_setPassword_rdh | /**
* Sets a specific password for <code>HikariDataSource</code> SQL connections.
*
* @param dataSource
* the <code>HikariDataSource</code> connections
* @param password
* the value to set
*/
public static void setPassword(HikariDataSource dataSource, String password) {
if (password != null) {
dataSource.setPassword(password);
LOG.debug("Setting non NULL Credentials for Store connection");
} else {
LOG.debug("NULL Credentials specified for Store connection, so ignoring");
}
} | 3.26 |
hadoop_FederationStateStoreUtils_decodeWritable_rdh | /**
* Decode Base64 string to Writable object.
*
* @param w
* Writable Key.
* @param idStr
* base64 string.
* @throws IOException
* raised on errors performing I/O.
*/
public static void decodeWritable(Writable w,
String idStr) throws IOException {DataInputStream in = new DataInputStream(new ByteArrayInputStream(Base64.getUrlDecoder().decode(idStr)));
w.readFields(in);
} | 3.26 |
hadoop_FederationStateStoreUtils_logAndThrowException_rdh | /**
* Throws an exception due to an error in <code>FederationStateStore</code>.
*
* @param log
* the logger interface
* @param errMsg
* the error message
* @param t
* the throwable raised in the called class.
* @throws YarnException
* on failure
*/
public static void logAndThrowException(Logger log, String errMsg, Throwable t) throws YarnException {
if
(t != null) {
log.error(errMsg,
t);
throw new YarnException(errMsg, t);
} else {
log.error(errMsg);
throw new YarnException(errMsg);
}
} | 3.26 |
hadoop_FederationStateStoreUtils_returnToPool_rdh | /**
* Returns the SQL <code>FederationStateStore</code> connections to the pool.
*
* @param log
* the logger interface
* @param cstmt
* the interface used to execute SQL stored procedures
* @param conn
* the SQL connection
* @throws YarnException
* on failure
*/
public static void returnToPool(Logger log, CallableStatement cstmt, Connection conn) throws YarnException {
returnToPool(log, cstmt, conn, null);
} | 3.26 |
hadoop_FederationStateStoreUtils_logAndThrowInvalidInputException_rdh | /**
* Throws an <code>FederationStateStoreInvalidInputException</code> due to an
* error in <code>FederationStateStore</code>.
*
* @param log
* the logger interface
* @param errMsg
* the error message
* @throws YarnException
* on failure
*/
public static void logAndThrowInvalidInputException(Logger log, String errMsg) throws YarnException {
log.error(errMsg);
throw new FederationStateStoreInvalidInputException(errMsg);
} | 3.26 |
hadoop_FederationStateStoreUtils_logAndThrowRetriableException_rdh | /**
* Throws an <code>FederationStateStoreRetriableException</code> due to an
* error in <code>FederationStateStore</code>.
*
* @param log
* the logger interface.
* @param errMsgFormat
* the error message format string.
* @param args
* referenced by the format specifiers in the format string.
* @throws YarnException
* on failure
*/
public static void logAndThrowRetriableException(Logger log, String errMsgFormat, Object... args) throws YarnException {
String v3 = String.format(errMsgFormat, args);
log.error(v3);
throw new FederationStateStoreRetriableException(v3);
} | 3.26 |
hadoop_TaskAttemptScanDirectoryStage_scanDirectoryTree_rdh | /**
* Recursively scan a directory tree.
* The manifest will contain all files to rename
* (source and dest) and directories to create.
* All files are processed before any of the subdirs are.
* This helps in statistics gathering.
* There's some optimizations which could be done with async
* fetching of the iterators of those subdirs, but as this
* is generally off-critical path then that "enhancement"
* can be postponed until data suggests this needs improvement.
*
* @param manifest
* manifest to update
* @param srcDir
* dir to scan
* @param destDir
* destination directory
* @param depth
* depth from the task attempt dir.
* @param parentDirExists
* does the parent dir exist?
* @return the maximum depth of child directories
* @throws IOException
* IO failure.
*/
private int scanDirectoryTree(TaskManifest manifest, Path srcDir, Path destDir, int depth, boolean parentDirExists) throws IOException {// generate some task progress in case directory scanning is very slow.
progress();
int maxDepth = 0;
int files = 0;
boolean dirExists = parentDirExists;
List<FileStatus> subdirs = new ArrayList<>();
try (DurationInfo ignored = new DurationInfo(LOG, false, "Task Attempt %s source dir %s, dest dir %s", getTaskAttemptId(), srcDir, destDir)) {
// list the directory. This may block until the listing is complete,
// or, if the FS does incremental or asynchronous fetching,
// then the next()/hasNext() call will block for the results
// unless turned off, ABFS does to this async
final RemoteIterator<FileStatus> listing = listStatusIterator(srcDir);
// when the FS (especially ABFS) does an asyn fetch of the listing,
// we can probe for the status of the destination dir while that
// page is being fetched.
// probe for and add the dest dir entry for all but
// the base dir
if (depth > 0) {
final EntryStatus v15;
if (parentDirExists) {
final FileStatus destDirStatus = getFileStatusOrNull(destDir);
v15 = EntryStatus.toEntryStatus(destDirStatus);
dirExists = destDirStatus != null;
} else {
// if there is no parent dir, then there is no need to look
// for this directory -report it as missing automatically.
v15 = EntryStatus.not_found;
}manifest.addDirectory(DirEntry.dirEntry(destDir, v15, depth));
}
// process the listing; this is where abfs will block
// to wait the result of the list call.
while (listing.hasNext()) {
final FileStatus st = listing.next();
if (st.isFile()) {
// this is a file, so add to the list of files to commit.
files++;
final FileEntry entry = fileEntry(st, destDir);
manifest.addFileToCommit(entry);
LOG.debug("To rename: {}", entry);
} else if (st.isDirectory()) {
// will need to scan this directory too.
subdirs.add(st);
} else {// some other object. ignoring
LOG.info("Ignoring FS object {}", st);
}
}
// add any statistics provided by the listing.
maybeAddIOStatistics(getIOStatistics(), listing);
}
// now scan the subdirectories
LOG.debug("{}: Number of subdirectories under {} found: {}; file count {}", getName(), srcDir, subdirs.size(), files);
for (FileStatus st : subdirs) {
Path destSubDir = new Path(destDir, st.getPath().getName());
final int d = scanDirectoryTree(manifest, st.getPath(), destSubDir, depth + 1, dirExists);
maxDepth = Math.max(maxDepth,
d);
}
return 1 + maxDepth;
} | 3.26 |
hadoop_TaskAttemptScanDirectoryStage_executeStage_rdh | /**
* Build the Manifest.
*
* @return the manifest
* @throws IOException
* failure.
*/
@Override
protected TaskManifest executeStage(final Void arguments) throws IOException {
final Path
taskAttemptDir = getRequiredTaskAttemptDir();
final TaskManifest manifest = createTaskManifest(getStageConfig());
LOG.info("{}: scanning directory {}", getName(), taskAttemptDir);
final int depth = scanDirectoryTree(manifest, taskAttemptDir, getDestinationDir(), 0, true);
List<FileEntry> filesToCommit = manifest.getFilesToCommit();
LongSummaryStatistics fileSummary = filesToCommit.stream().mapToLong(FileEntry::getSize).summaryStatistics();
long fileDataSize = fileSummary.getSum();
long fileCount = fileSummary.getCount();
int dirCount = manifest.getDestDirectories().size();
LOG.info("{}: directory {} contained {} file(s); data size {}", getName(), taskAttemptDir, fileCount, fileDataSize);
LOG.info("{}: Directory count = {}; maximum depth {}", getName(), dirCount, depth);
// add statistics about the task output which, when aggregated, provides
// insight into structure of job, task skew, etc.
IOStatisticsStore iostats = getIOStatistics();
iostats.addSample(COMMITTER_TASK_DIRECTORY_COUNT_MEAN, dirCount);
iostats.addSample(COMMITTER_TASK_DIRECTORY_DEPTH_MEAN, depth);
iostats.addSample(COMMITTER_TASK_FILE_COUNT_MEAN, fileCount);
iostats.addSample(COMMITTER_TASK_FILE_SIZE_MEAN, fileDataSize);
return manifest;
} | 3.26 |
hadoop_VisualizeStateMachine_getGraphFromClasses_rdh | /**
* get Graph From Classes.
*
* @param graphName
* graphName.
* @param classes
* list of classes which have static field
* stateMachineFactory of type StateMachineFactory
* @return graph represent this StateMachine
* @throws Exception
* exception occurs.
*/
public static Graph getGraphFromClasses(String graphName, List<String> classes) throws Exception {
Graph ret = null;
if (classes.size() != 1) {
ret = new Graph(graphName);
}
for (String className : classes) {
Class clz = Class.forName(className);
Field factoryField =
clz.getDeclaredField("stateMachineFactory");
factoryField.setAccessible(true);StateMachineFactory factory = ((StateMachineFactory) (factoryField.get(null)));
if (classes.size() == 1) {
return factory.generateStateGraph(graphName);
}
String gname = clz.getSimpleName();
if (gname.endsWith("Impl"))
{
gname = gname.substring(0, gname.length() - 4);
}
if (ret != null)
{
ret.addSubGraph(factory.generateStateGraph(gname));
}
}
return ret;
} | 3.26 |
hadoop_VirtualRecordReader_nextKeyValue_rdh | // The map function per split should be invoked only once.
@Override
public boolean nextKeyValue() throws IOException, InterruptedException {
if (numRows > 0) {
numRows--;
return true;
} else {
return false;
}
} | 3.26 |
hadoop_User_getName_rdh | /**
* Get the full name of the user.
*/
@Override
public String getName() {
return fullName;
} | 3.26 |
hadoop_User_getLastLogin_rdh | /**
* Get the time of the last login.
*
* @return the number of milliseconds since the beginning of time.
*/
public long
getLastLogin() {
return lastLogin;
} | 3.26 |
hadoop_User_setLogin_rdh | /**
* Set the login object
*
* @param login
*/
public void setLogin(LoginContext login) {
this.login = login;
} | 3.26 |
hadoop_User_getLogin_rdh | /**
* Returns login object
*
* @return login
*/
public LoginContext getLogin() {
return login;
} | 3.26 |
hadoop_User_getShortName_rdh | /**
* Get the user name up to the first '/' or '@'
*
* @return the leading part of the user name
*/
public String getShortName() {
return shortName;
} | 3.26 |
hadoop_User_setLastLogin_rdh | /**
* Set the last login time.
*
* @param time
* the number of milliseconds since the beginning of time
*/
public void setLastLogin(long time) {
lastLogin = time;
} | 3.26 |
hadoop_NativeBatchProcessor_flushOutput_rdh | /**
* Called by native side, clean output buffer so native side can continue
* processing
*/
private void flushOutput(int length) throws IOException {
if (null != rawInputBuffer) {
rawInputBuffer.position(0);
rawInputBuffer.limit(length);
if (null != dataReceiver) {
try {dataReceiver.receiveData();
} catch (IOException e) {
e.printStackTrace();
throw e;
}
}
}
} | 3.26 |
hadoop_MultipartUploaderBuilderImpl_permission_rdh | /**
* Set permission for the file.
*/
@Override
public B permission(@Nonnull
final FsPermission perm) {
checkNotNull(perm);
permission = perm;
return getThisBuilder();
} | 3.26 |
hadoop_MultipartUploaderBuilderImpl_replication_rdh | /**
* Set replication factor.
*/
@Override
public B replication(short replica) {
replication = replica;
return getThisBuilder();
} | 3.26 |
hadoop_MultipartUploaderBuilderImpl_create_rdh | /**
* Create an FSDataOutputStream at the specified path.
*/
@Override
public B create() {
flags.add(CreateFlag.CREATE);
return getThisBuilder();
} | 3.26 |
hadoop_MultipartUploaderBuilderImpl_overwrite_rdh | /**
* Set to true to overwrite the existing file.
* Set it to false, an exception will be thrown when calling {@link #build()}
* if the file exists.
*/
@Override
public B overwrite(boolean overwrite) {
if (overwrite) {
flags.add(CreateFlag.OVERWRITE);
} else {
flags.remove(CreateFlag.OVERWRITE);
}
return getThisBuilder();
} | 3.26 |
hadoop_MultipartUploaderBuilderImpl_append_rdh | /**
* Append to an existing file (optional operation).
*/
@Override
public B append() {
flags.add(CreateFlag.APPEND);
return getThisBuilder();
} | 3.26 |
hadoop_MultipartUploaderBuilderImpl_checksumOpt_rdh | /**
* Set checksum opt.
*/
@Override
public B checksumOpt(@Nonnull
final ChecksumOpt chksumOpt) {
checkNotNull(chksumOpt);
checksumOpt =
chksumOpt;
return getThisBuilder();
} | 3.26 |
hadoop_AbfsOutputStreamStatisticsImpl_queueShrunk_rdh | /**
* {@inheritDoc }
*
* Records the number of times AbfsOutputStream try to remove the completed
* write operations from the beginning of write operation task queue.
*/
@Override
public void queueShrunk() {
ioStatisticsStore.incrementCounter(StreamStatisticNames.QUEUE_SHRUNK_OPS);
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.