name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hadoop_StageConfig_getTaskAttemptDir_rdh | /**
* Task attempt directory.
*
* @return the task attempt dir.
*/
public Path getTaskAttemptDir() {
return taskAttemptDir;
} | 3.26 |
hadoop_StageConfig_getTaskManifestDir_rdh | /**
* Directory to put task manifests into.
*
* @return a path under the job attempt dir.
*/
public Path getTaskManifestDir() {
return taskManifestDir;
} | 3.26 |
hadoop_StageConfig_m0_rdh | /**
* Set the job directories from the attempt directories
* information. Does not set task attempt fields.
*
* @param dirs
* source of directories.
* @return this
*/
public StageConfig m0(final ManifestCommitterSupport.AttemptDirectories dirs) {
checkOpen();
withJobAttemptDir(dirs.getJobAttemptDir()).withJobAttemptTaskSubDir(dirs.getJobAttemptTaskSubDir()).withDestinationDir(dirs.getOutputPath()).withOutputTempSubDir(dirs.getOutputTempSubDir()).withTaskManifestDir(dirs.getTaskManifestDir()); return this;
} | 3.26 |
hadoop_StageConfig_getJobSuccessMarkerPath_rdh | /**
* Get the location of the success marker.
*
* @return a path under the destination directory.
*/
public Path getJobSuccessMarkerPath() {
return new Path(destinationDir, SUCCESS_MARKER);
} | 3.26 |
hadoop_StageConfig_getDestinationDir_rdh | /**
* Destination of job.
*/
public Path getDestinationDir() {
return destinationDir;
} | 3.26 |
hadoop_StageConfig_withTaskAttemptId_rdh | /**
* Set builder value.
*
* @param value
* new value
* @return this
*/
public StageConfig withTaskAttemptId(final String value) {
checkOpen();
taskAttemptId = value;
return this;
} | 3.26 |
hadoop_StageConfig_enterStage_rdh | /**
* Enter the stage; calls back to
* {@link #enterStageEventHandler} if non-null.
*
* @param stage
* stage entered
*/
public void enterStage(String stage) {
if (enterStageEventHandler != null) {enterStageEventHandler.enterStage(stage);
}
} | 3.26 |
hadoop_StageConfig_exitStage_rdh | /**
* Exit the stage; calls back to
* {@link #enterStageEventHandler} if non-null.
*
* @param stage
* stage entered
*/
public void exitStage(String stage) {
if (enterStageEventHandler != null) {enterStageEventHandler.exitStage(stage);
}
} | 3.26 |
hadoop_StageConfig_getJobAttemptTaskSubDir_rdh | /**
* Get the path to the subdirectory under $jobID where task
* attempts are. List this dir to find all task attempt dirs.
*
* @return a path under the job attempt dir.
*/
public Path getJobAttemptTaskSubDir() {
return jobAttemptTaskSubDir;
} | 3.26 |
hadoop_StageConfig_withIOProcessors_rdh | /**
* Set builder value.
*
* @param value
* new value
* @return this
*/
public StageConfig withIOProcessors(final TaskPool.Submitter value) {
checkOpen();
ioProcessors = value;
return this;
} | 3.26 |
hadoop_StageConfig_withJobAttemptDir_rdh | /**
* Set Job attempt directory.
*
* @param dir
* new dir
* @return this
*/
public StageConfig withJobAttemptDir(final Path dir) {
checkOpen();
jobAttemptDir = dir;
return this;
} | 3.26 |
hadoop_StageConfig_withTaskManifestDir_rdh | /**
* Set builder value.
*
* @param value
* new value
* @return the builder
*/
public StageConfig withTaskManifestDir(Path value) {
checkOpen();
taskManifestDir = value;
return this;
} | 3.26 |
hadoop_SFTPConnectionPool_returnToPool_rdh | /**
* Add the channel into pool.
*
* @param channel
*/
synchronized void returnToPool(ChannelSftp channel) {
ConnectionInfo info = con2infoMap.get(channel);
HashSet<ChannelSftp> cons = idleConnections.get(info);
if (cons == null) {
cons = new HashSet<>();idleConnections.put(info, cons);
}
cons.add(channel);
} | 3.26 |
hadoop_RenameFilesStage_getTotalFileSize_rdh | /**
* Get the total file size of the committed task.
*
* @return a number greater than or equal to zero.
*/
public synchronized long getTotalFileSize() {
return totalFileSize;
} | 3.26 |
hadoop_RenameFilesStage_executeStage_rdh | /**
* Rename files in job commit.
*
* @param args
* tuple of (manifest data, set of created dirs)
* @return the job report.
* @throws IOException
* failure
*/
@Override
protected ManifestSuccessData executeStage(Triple<LoadedManifestData, Set<Path>, Integer> args) throws IOException {
final LoadedManifestData manifestData = args.getLeft();
f0 = args.getMiddle();final EntryFileIO entryFileIO = new EntryFileIO(getStageConfig().getConf());
final ManifestSuccessData success = createManifestOutcome(getStageConfig(), OP_STAGE_JOB_COMMIT);
LOG.info("{}: Executing Manifest Job Commit with {} files", getName(), manifestData.getFileCount());
// iterate over the entries in the file.
try (SequenceFile.Reader reader = entryFileIO.createReader(manifestData.getEntrySequenceData())) {
TaskPool.foreach(entryFileIO.iterateOver(reader)).executeWith(getIOProcessors()).stopOnFailure().run(this::commitOneFile);
}
// synchronized block to keep spotbugs happy.
List<FileEntry> committed = getFilesCommitted();
LOG.info("{}: Files committed: {}. Total size {}", getName(), committed.size(), getTotalFileSize());
// Add a subset of the destination files to the success file;
// enough for simple testing
success.setFilenamePaths(committed.subList(0, Math.min(committed.size(), args.getRight())).stream().map(FileEntry::getDestPath).collect(Collectors.toList()));
success.setSuccess(true);
return success;} | 3.26 |
hadoop_RenameFilesStage_commitOneFile_rdh | /**
* Commit one file by rename, then, if that doesn't fail,
* add to the files committed list.
*
* @param entry
* entry to commit.
* @throws IOException
* faiure.
*/
private void commitOneFile(FileEntry entry) throws IOException {
updateAuditContext(OP_STAGE_JOB_RENAME_FILES);
// report progress back
progress();
// if the dest dir is to be deleted,
// look to see if the parent dir was created.
// if it was. we know that the file doesn't exist.
final boolean deleteDest = getStageConfig().getDeleteTargetPaths() && (!f0.contains(entry.getDestPath().getParent()));
// do the rename
commitFile(entry, deleteDest);
// update the list and IOStats
synchronized(this) {
filesCommitted.add(entry);
totalFileSize += entry.getSize();
}
} | 3.26 |
hadoop_RenameFilesStage_getFilesCommitted_rdh | /**
* Get the list of files committed.
* Access is not synchronized.
*
* @return direct access to the list of files.
*/
public synchronized List<FileEntry> getFilesCommitted() {
return filesCommitted;
} | 3.26 |
hadoop_JobSummary_add_rdh | // A little optimization for a very common case
SummaryBuilder add(String key, long value) {
return _add(key, Long.toString(value));
} | 3.26 |
hadoop_ProbeStatus_succeed_rdh | /**
* The probe has succeeded -capture the current timestamp, set
* success to true, and record any other data needed.
*
* @param probe
* probe
*/
public void succeed(Probe probe) {finish(probe, true, probe.getName(), null);
} | 3.26 |
hadoop_ProbeStatus_getOriginator_rdh | /**
* Get the probe that generated this result. May be null
*
* @return a possibly null reference to a probe
*/
public Probe getOriginator() {
return originator;
} | 3.26 |
hadoop_ProbeStatus_fail_rdh | /**
* A probe has failed either because the test returned false, or an exception
* was thrown. The {@link #success} field is set to false, any exception
* thrown is recorded.
*
* @param probe
* probe that failed
* @param thrown
* an exception that was thrown.
*/
public void fail(Probe probe, Throwable thrown) {
finish(probe, false, "Failure in " + probe, thrown);
} | 3.26 |
hadoop_ProbeStatus_setSuccess_rdh | /**
* Set both the success and the real outcome bits to the same value
*
* @param success
* the new value
*/
public void setSuccess(boolean success) {
this.success = success;
realOutcome = success;
} | 3.26 |
hadoop_ProbeStatus_markAsSuccessful_rdh | /**
* Flip the success bit on while the real outcome bit is kept false
*/
public void markAsSuccessful() {
success = true;
} | 3.26 |
hadoop_ClientThrottlingIntercept_sendingRequest_rdh | /**
* Called before the Azure Storage SDK sends a request. Client-side throttling
* uses this to suspend the request, if necessary, to minimize errors and
* maximize throughput.
*
* @param event
* The connection, operation, and request state.
*/
public static void sendingRequest(SendingRequestEvent event) {
BlobOperationDescriptor.OperationType operationType = BlobOperationDescriptor.getOperationType(((HttpURLConnection) (event.getConnectionObject())));
switch (operationType) {
case GetBlob :
singleton.f1.suspendIfNecessary();
break;
case AppendBlock :
case PutBlock :
case PutPage :
singleton.writeThrottler.suspendIfNecessary();
break;
default :
break;
}
} | 3.26 |
hadoop_ClientThrottlingIntercept_eventOccurred_rdh | /**
* Called after the Azure Storage SDK receives a response. Client-side
* throttling uses this
* to collect metrics.
*
* @param event
* The connection, operation, and request state.
*/
@Override
public void eventOccurred(ResponseReceivedEvent event) {
singleton.responseReceived(event);
} | 3.26 |
hadoop_ClientThrottlingIntercept_responseReceived_rdh | /**
* Called after the Azure Storage SDK receives a response. Client-side
* throttling uses this to collect metrics.
*
* @param event
* The connection, operation, and request state.
*/
public static void responseReceived(ResponseReceivedEvent event) {
updateMetrics(((HttpURLConnection) (event.getConnectionObject())), event.getRequestResult());
} | 3.26 |
hadoop_ClientThrottlingIntercept_errorReceivingResponse_rdh | /**
* Called when a network error occurs before the HTTP status and response
* headers are received. Client-side throttling uses this to collect metrics.
*
* @param event
* The connection, operation, and request state.
*/
public static void errorReceivingResponse(ErrorReceivingResponseEvent event) {
updateMetrics(((HttpURLConnection) (event.getConnectionObject())), event.getRequestResult());} | 3.26 |
hadoop_InvalidMagicNumberException_isHandshake4Encryption_rdh | /**
* Return true if it's handshake for encryption
*
* @return boolean true if it's handshake for encryption
*/
public boolean isHandshake4Encryption() {
return handshake4Encryption;
} | 3.26 |
hadoop_LoggedJob_getJobProperties_rdh | /**
* Get the configuration properties of the job.
*/
public JobProperties getJobProperties() {
return jobProperties;
} | 3.26 |
hadoop_LoggedJob_setUnknownAttribute_rdh | // for input parameter ignored.
@JsonAnySetter
public void setUnknownAttribute(String attributeName, Object ignored) {
if (!alreadySeenAnySetterAttributes.contains(attributeName)) {
alreadySeenAnySetterAttributes.add(attributeName);
System.err.println(("In LoggedJob, we saw the unknown attribute " + attributeName) + ".");
}
} | 3.26 |
hadoop_LoggedJob_compareStrings_rdh | // I'll treat this as an atomic object type
private void compareStrings(List<String> c1, List<String> c2, TreePath loc, String eltname) throws
DeepInequalityException {
if ((c1 == null) && (c2 == null)) {
return;
}
TreePath recursePath = new TreePath(loc, eltname);
if (((c1 == null) || (c2 == null)) || (!c1.equals(c2))) {
throw new DeepInequalityException(eltname + " miscompared", recursePath);
}
} | 3.26 |
hadoop_LoggedJob_setJobProperties_rdh | /**
* Set the configuration properties of the job.
*/
void setJobProperties(Properties conf) {
this.jobProperties = new JobProperties(conf);
} | 3.26 |
hadoop_MountTableRefresherThread_isSuccess_rdh | /**
*
* @return true if cache was refreshed successfully.
*/
public boolean isSuccess() {
return success;
} | 3.26 |
hadoop_MountTableRefresherThread_m0_rdh | /**
* Refresh mount table cache of local and remote routers. Local and remote
* routers will be refreshed differently. Let's understand what are the
* local and remote routers and refresh will be done differently on these
* routers. Suppose there are three routers R1, R2 and R3. User want to add
* new mount table entry. He will connect to only one router, not all the
* routers. Suppose He connects to R1 and calls add mount table entry through
* API or CLI. Now in this context R1 is local router, R2 and R3 are remote
* routers. Because add mount table entry is invoked on R1, R1 will update the
* cache locally it need not make RPC call. But R1 will make RPC calls to
* update cache on R2 and R3.
*/
@Override
public void m0() {
try {
SecurityUtil.doAsLoginUser(() -> {
if (UserGroupInformation.isSecurityEnabled()) {
UserGroupInformation.getLoginUser().checkTGTAndReloginFromKeytab();
}
RefreshMountTableEntriesResponse refreshMountTableEntries = manager.refreshMountTableEntries(RefreshMountTableEntriesRequest.newInstance());
success = refreshMountTableEntries.getResult();
return true;
});
} catch (IOException e) {
LOG.error("Failed to refresh mount table entries cache at router {}", adminAddress, e);
} finally {
countDownLatch.countDown();
}
} | 3.26 |
hadoop_Trilean_toBoolean_rdh | /**
* Converts the Trilean enum to boolean.
*
* @return the corresponding boolean.
* @throws TrileanConversionException
* when tried to convert Trilean.UNKNOWN.
*/
public boolean toBoolean() throws TrileanConversionException {
if (this == Trilean.UNKNOWN) {
throw new TrileanConversionException();
}
return Boolean.valueOf(this.name());
} | 3.26 |
hadoop_Trilean_getTrilean_rdh | /**
* Converts String to Trilean.
*
* @param str
* the string to convert.
* @return the corresponding Trilean for the passed string str.
*/
public static Trilean getTrilean(String
str) {
if (TRUE_STR.equalsIgnoreCase(str)) {
return Trilean.TRUE;
}
if (FALSE_STR.equalsIgnoreCase(str)) {
return Trilean.FALSE;
}
return Trilean.UNKNOWN;
} | 3.26 |
hadoop_BlockManager_get_rdh | /**
* Gets the block having the given {@code blockNumber}.
*
* The entire block is read into memory and returned as a {@code BufferData}.
* The blocks are treated as a limited resource and must be released when
* one is done reading them.
*
* @param blockNumber
* the number of the block to be read and returned.
* @return {@code BufferData} having data from the given block.
* @throws IOException
* if there an error reading the given block.
* @throws IllegalArgumentException
* if blockNumber is negative.
*/
public BufferData get(int blockNumber) throws IOException {
checkNotNegative(blockNumber, "blockNumber");
int size = blockData.getSize(blockNumber);
ByteBuffer buffer = ByteBuffer.allocate(size);
long startOffset = blockData.getStartOffset(blockNumber);
read(buffer, startOffset, size);
buffer.flip();return new BufferData(blockNumber, buffer);
} | 3.26 |
hadoop_BlockManager_requestCaching_rdh | /**
* Requests that the given block should be copied to the cache. Optional operation.
*
* @param data
* the {@code BufferData} instance to optionally cache.
*/
public void requestCaching(BufferData data) {
// Do nothing because we do not support caching.
} | 3.26 |
hadoop_BlockManager_release_rdh | /**
* Releases resources allocated to the given block.
*
* @param data
* the {@code BufferData} to release.
* @throws IllegalArgumentException
* if data is null.
*/
public void release(BufferData data) {
checkNotNull(data, "data");
// Do nothing because we allocate a new buffer each time.
} | 3.26 |
hadoop_BlockManager_m0_rdh | /**
* Requests optional prefetching of the given block.
*
* @param blockNumber
* the id of the block to prefetch.
* @throws IllegalArgumentException
* if blockNumber is negative.
*/
public void m0(int blockNumber) {
checkNotNegative(blockNumber, "blockNumber");
// Do nothing because we do not support prefetches.
} | 3.26 |
hadoop_BlockManager_cancelPrefetches_rdh | /**
* Requests cancellation of any previously issued prefetch requests.
*/
public void cancelPrefetches() {
// Do nothing because we do not support prefetches.
} | 3.26 |
hadoop_BlockManager_getBlockData_rdh | /**
* Gets block data information.
*
* @return instance of {@code BlockData}.
*/
public BlockData getBlockData() {
return blockData;
} | 3.26 |
hadoop_TeraChecksum_main_rdh | /**
*
* @param args
*/
public static void main(String[] args) throws Exception {
int res = ToolRunner.run(new Configuration(), new TeraChecksum(), args);
System.exit(res);
} | 3.26 |
hadoop_ConverterUtils_convertFromYarn_rdh | /**
* Convert a protobuf token into a rpc token and set its service.
*
* @param protoToken
* the yarn token
* @param service
* the service for the token
* @param <T>
* Generic Type T.
* @return rpc token
*/
public static <T extends TokenIdentifier> Token<T> convertFromYarn(Token protoToken, Text service) {
Token<T> token = new Token<T>(protoToken.getIdentifier().array(), protoToken.getPassword().array(), new Text(protoToken.getKind()), new Text(protoToken.getService()));
if (service != null) {
token.setService(service);
}
return token;
} | 3.26 |
hadoop_ConverterUtils_getPathFromYarnURL_rdh | /**
* return a hadoop path from a given url
* This method is deprecated, use {@link URL#toPath()} instead.
*
* @param url
* url to convert
* @return path from {@link URL}
* @throws URISyntaxException
* exception thrown to indicate that a string could not be parsed as a
* URI reference.
*/
@Public
@Deprecated
public static Path getPathFromYarnURL(URL url) throws URISyntaxException {
return url.toPath();
} | 3.26 |
hadoop_ConverterUtils_getYarnUrlFromURI_rdh | /* This method is deprecated, use {@link URL#fromURI(URI)} instead. */
@Public
@Deprecated
public static URL getYarnUrlFromURI(URI uri) {return URL.fromURI(uri);
} | 3.26 |
hadoop_ConverterUtils_getYarnUrlFromPath_rdh | /* This method is deprecated, use {@link URL#fromPath(Path)} instead. */
@Public
@Deprecated
public static URL getYarnUrlFromPath(Path path) {
return URL.fromPath(path);} | 3.26 |
hadoop_ConverterUtils_toString_rdh | /* This method is deprecated, use {@link ContainerId#toString()} instead. */
@Public
@Deprecated
public static String toString(ContainerId cId) {
return cId == null ? null : cId.toString();
} | 3.26 |
hadoop_PolicyProvider_getServices_rdh | /**
* A default {@link PolicyProvider} without any defined services.
*/public static final PolicyProvider DEFAULT_POLICY_PROVIDER = new PolicyProvider() {
@Override
public Service[] getServices() {
return null;
} | 3.26 |
hadoop_MultiStateTransitionListener_addListener_rdh | /**
* Add a listener to the list of listeners.
*
* @param listener
* A listener.
*/
public void addListener(StateTransitionListener<OPERAND, EVENT, STATE> listener) {
listeners.add(listener);
} | 3.26 |
hadoop_QueuePriorityContainerCandidateSelector_preemptionAllowed_rdh | /**
* Do we allow demandingQueue preempt resource from toBePreemptedQueue
*
* @param demandingQueue
* demandingQueue
* @param toBePreemptedQueue
* toBePreemptedQueue
* @return can/cannot
*/
private boolean preemptionAllowed(String demandingQueue, String toBePreemptedQueue) {
return priorityDigraph.contains(demandingQueue, toBePreemptedQueue);
} | 3.26 |
hadoop_QueuePriorityContainerCandidateSelector_canPreemptEnoughResourceForAsked_rdh | /**
* Can we preempt enough resource for given:
*
* @param requiredResource
* askedResource
* @param demandingQueue
* demandingQueue
* @param schedulerNode
* node
* @param lookingForNewReservationPlacement
* Are we trying to look for move
* reservation to the node
* @param newlySelectedContainers
* newly selected containers, will be set when
* we can preempt enough resources from the node.
* @return can/cannot
*/
private boolean canPreemptEnoughResourceForAsked(Resource
requiredResource, String demandingQueue, FiCaSchedulerNode schedulerNode, boolean lookingForNewReservationPlacement, List<RMContainer> newlySelectedContainers) {
// Do not check touched nodes again.
if (touchedNodes.contains(schedulerNode.getNodeID())) {
return false;}
TempSchedulerNode node = f0.get(schedulerNode.getNodeID());
if (null == node) {
node = TempSchedulerNode.fromSchedulerNode(schedulerNode);
f0.put(schedulerNode.getNodeID(), node);
}
if ((null != schedulerNode.getReservedContainer()) && lookingForNewReservationPlacement) {
// Node reserved by the others, skip this node
// We will not try to move the reservation to node which reserved already.
return false;
}
// Need to preemption = asked - (node.total - node.allocated)
Resource lacking = Resources.subtract(requiredResource, Resources.subtract(node.getTotalResource(), node.getAllocatedResource()));
// On each host, simply check if we could preempt containers from
// lower-prioritized queues or not
List<RMContainer> runningContainers = node.getRunningContainers();
Collections.sort(runningContainers, CONTAINER_CREATION_TIME_COMPARATOR);
// First of all, consider already selected containers
for (RMContainer runningContainer : runningContainers) {
if (CapacitySchedulerPreemptionUtils.isContainerAlreadySelected(runningContainer, selectedCandidates)) {
Resources.subtractFrom(lacking, runningContainer.getAllocatedResource());
}
}// If we already can allocate the reserved container after preemption,
// skip following steps
if (Resources.fitsIn(rc, lacking, Resources.none())) {
return true;
}
Resource allowed = Resources.clone(totalPreemptionAllowed);
Resource selected = Resources.createResource(0);
for (RMContainer runningContainer : runningContainers) {
if (CapacitySchedulerPreemptionUtils.isContainerAlreadySelected(runningContainer, selectedCandidates)) {
// ignore selected containers
continue;
}
// Only preempt resource from queue with lower priority
if (!preemptionAllowed(demandingQueue, runningContainer.getQueueName())) {
continue;
}
// Don't preempt AM container
if (runningContainer.isAMContainer()) {
continue;
}
// Not allow to preempt more than limit
if (Resources.greaterThanOrEqual(rc, clusterResource, allowed, runningContainer.getAllocatedResource())) {
Resources.subtractFrom(allowed, runningContainer.getAllocatedResource());
Resources.subtractFrom(lacking, runningContainer.getAllocatedResource());
Resources.addTo(selected, runningContainer.getAllocatedResource());
if
(null != newlySelectedContainers) {
newlySelectedContainers.add(runningContainer);}
}
// Lacking <= 0 means we can allocate the reserved container
if (Resources.fitsIn(rc,
lacking, Resources.none())) {
return true;
}
}
return
false;
} | 3.26 |
hadoop_QueuePriorityContainerCandidateSelector_isQueueSatisfied_rdh | /**
* Do we allow the demanding queue preempt resource from other queues?
* A satisfied queue is not allowed to preempt resource from other queues.
*
* @param demandingQueue
* @return allowed/not
*/
private boolean isQueueSatisfied(String demandingQueue, String partition)
{
TempQueuePerPartition tq = preemptionContext.getQueueByPartition(demandingQueue, partition);
if (null == tq) {
return false;
}
Resource guaranteed = tq.getGuaranteed();
Resource usedDeductReservd = Resources.subtract(tq.getUsed(), tq.getReserved());
Resource markedToPreemptFromOtherQueue = f1.get(demandingQueue, partition);
if (null == markedToPreemptFromOtherQueue) {
markedToPreemptFromOtherQueue = Resources.none();
}
// return Used - reserved + to-preempt-from-other-queue >= guaranteed
boolean flag = Resources.greaterThanOrEqual(rc, clusterResource, Resources.add(usedDeductReservd, markedToPreemptFromOtherQueue), guaranteed);
return flag;
} | 3.26 |
hadoop_ClientDatanodeProtocolServerSideTranslatorPB_getDiskBalancerSetting_rdh | /**
* Returns a run-time setting from diskbalancer like Bandwidth.
*/
@Override
public DiskBalancerSettingResponseProto getDiskBalancerSetting(RpcController controller, DiskBalancerSettingRequestProto request) throws ServiceException {
try {
String val = impl.getDiskBalancerSetting(request.getKey());
return DiskBalancerSettingResponseProto.newBuilder().setValue(val).build();
} catch (Exception e) {
throw new ServiceException(e);}
} | 3.26 |
hadoop_ClientDatanodeProtocolServerSideTranslatorPB_queryDiskBalancerPlan_rdh | /**
* Gets the status of an executing Plan.
*/
@Override
public QueryPlanStatusResponseProto queryDiskBalancerPlan(RpcController controller, QueryPlanStatusRequestProto request) throws ServiceException {
try {
DiskBalancerWorkStatus result
= impl.queryDiskBalancerPlan();
return QueryPlanStatusResponseProto.newBuilder().setResult(result.getResult().getIntResult()).setPlanID(result.getPlanID()).setPlanFile(result.getPlanFile()).setCurrentStatus(result.currentStateString()).build();
} catch (Exception e) {
throw new ServiceException(e);
}
} | 3.26 |
hadoop_ReservationListRequest_m0_rdh | /**
* The {@link ReservationListRequest} will use the reservationId to search for
* reservations to list if it is provided. Otherwise, it will select active
* reservations within the startTime and endTime (inclusive).
*
* @param queue
* Required. Cannot be null or empty. Refers to the reservable
* queue in the scheduler that was selected when creating a
* reservation submission {@link ReservationSubmissionRequest}.
* @param reservationId
* Optional. String representation of
* {@code ReservationId} If provided, other fields will
* be ignored.
* @return the list of reservations via {@link ReservationListRequest}
*/
@Public
@Unstable
public static ReservationListRequest m0(String
queue, String reservationId) {
return newInstance(queue, reservationId, -1, -1, false);
} | 3.26 |
hadoop_ReservationListRequest_newInstance_rdh | /**
* The {@link ReservationListRequest} will use the reservationId to search for
* reservations to list if it is provided. Otherwise, it will select active
* reservations within the startTime and endTime (inclusive).
*
* @param queue
* Required. Cannot be null or empty. Refers to the reservable
* queue in the scheduler that was selected when creating a
* reservation submission {@link ReservationSubmissionRequest}.
* @param reservationId
* Optional. String representation of
* {@code ReservationId} If provided, other fields will
* be ignored.
* @param includeReservationAllocations
* Optional. Flag that
* determines whether the entire reservation allocations are
* to be returned. Reservation allocations are subject to
* change in the event of re-planning as described by
* {@code ReservationDefinition}.
* @return the list of reservations via {@link ReservationListRequest}
*/
@Public
@Unstable
public static ReservationListRequest newInstance(String queue, String reservationId, boolean includeReservationAllocations) {
return newInstance(queue, reservationId, -1, -1, includeReservationAllocations);
} | 3.26 |
hadoop_RouterFsckServlet_m0_rdh | /**
* Handle fsck request.
*/
@Override
public void m0(HttpServletRequest request, HttpServletResponse response) throws IOException {final Map<String, String[]> pmap = request.getParameterMap();
final PrintWriter out = response.getWriter();
final InetAddress remoteAddress = InetAddress.getByName(request.getRemoteAddr());
final ServletContext context = getServletContext();
final Configuration conf = RouterHttpServer.getConfFromContext(context);final UserGroupInformation ugi = getUGI(request, conf);
try {
ugi.doAs(((PrivilegedExceptionAction<Object>) (() -> {
Router router = RouterHttpServer.getRouterFromContext(context);
new RouterFsck(router, pmap, out, remoteAddress).fsck();
return null;})));
} catch (InterruptedException e) {
response.sendError(HttpURLConnection.HTTP_BAD_REQUEST, e.getMessage());
}
} | 3.26 |
hadoop_YarnClient_getApplications_rdh | /**
* <p>
* Get a list of ApplicationReports that match the given
* {@link GetApplicationsRequest}.
* </p>
*
* <p>
* If the user does not have <code>VIEW_APP</code> access for an application
* then the corresponding report will be filtered as described in
* {@link #getApplicationReport(ApplicationId)}.
* </p>
*
* @param request
* the request object to get the list of applications.
* @return The list of ApplicationReports that match the request
* @throws YarnException
* Exception specific to YARN.
* @throws IOException
* Exception mostly related to connection errors.
*/
public List<ApplicationReport> getApplications(GetApplicationsRequest request) throws YarnException, IOException {
throw new UnsupportedOperationException(("The sub-class extending " + YarnClient.class.getName()) + " is expected to implement this !");
} | 3.26 |
hadoop_YarnClient_createYarnClient_rdh | /**
* Create a new instance of YarnClient.
*/
@Public
public static YarnClient createYarnClient() {
YarnClient client = new
YarnClientImpl();
return client;
} | 3.26 |
hadoop_IrqHandler_handle_rdh | /**
* Handler for the JVM API for signal handling.
*
* @param s
* signal raised
*/
@Override
public void handle(Signal s) {
signalCount.incrementAndGet();
InterruptData data = new InterruptData(s.getName(), s.getNumber());
LOG.info("Interrupted: {}", data);
handler.interrupted(data);
} | 3.26 |
hadoop_IrqHandler_getSignalCount_rdh | /**
* Get the count of how many times a signal has been raised.
*
* @return the count of signals
*/
public int getSignalCount() {
return signalCount.get();
} | 3.26 |
hadoop_IrqHandler_getName_rdh | /**
*
* @return the signal name.
*/
public String getName() {
return name;
} | 3.26 |
hadoop_IrqHandler_bind_rdh | /**
* Bind to the interrupt handler.
*
* @throws IllegalArgumentException
* if the exception could not be set
*/
public void bind() {
Preconditions.checkState(signal == null, "Handler already bound");try {
signal = new Signal(name);
Signal.handle(signal, this);
} catch (IllegalArgumentException e) {
throw new IllegalArgumentException((("Could not set handler for signal \"" + name) + "\".") + "This can happen if the JVM has the -Xrs set.", e);}
} | 3.26 |
hadoop_FederationUtil_getJmx_rdh | /**
* Get a JMX data from a web endpoint.
*
* @param beanQuery
* JMX bean.
* @param webAddress
* Web address of the JMX endpoint.
* @param connectionFactory
* to open http/https connection.
* @param scheme
* to use for URL connection.
* @return JSON with the JMX data
*/
public static JSONArray
getJmx(String beanQuery, String webAddress, URLConnectionFactory connectionFactory,
String scheme) {
JSONArray ret = null;
BufferedReader reader = null;
try {
String host = webAddress;
int port = -1;
if (webAddress.indexOf(":") >
0) {
String[] webAddressSplit = webAddress.split(":");
host = webAddressSplit[0];
port = Integer.parseInt(webAddressSplit[1]);
}
URL jmxURL = new URL(scheme, host, port, "/jmx?qry=" + beanQuery);
LOG.debug("JMX URL: {}", jmxURL);
// Create a URL connection
URLConnection conn = connectionFactory.openConnection(jmxURL, UserGroupInformation.isSecurityEnabled());
conn.setConnectTimeout(5 * 1000);
conn.setReadTimeout(5 * 1000);
InputStream in = conn.getInputStream();
InputStreamReader isr = new InputStreamReader(in, "UTF-8");
reader = new BufferedReader(isr);
StringBuilder sb = new StringBuilder();
String line = null;
while ((line =
reader.readLine()) != null) {
sb.append(line);
}
String jmxOutput =
sb.toString();
// Parse JSON
JSONObject json = new JSONObject(jmxOutput);ret = json.getJSONArray("beans");
} catch (IOException e) {
LOG.error("Cannot read JMX bean {} from server {}", beanQuery, webAddress, e);} catch (JSONException e) {
// We shouldn't need more details if the JSON parsing fails.
LOG.error("Cannot parse JMX output for {} from server {}: {}", beanQuery, webAddress, e.getMessage());
} catch (Exception e) {
LOG.error("Cannot parse JMX output for {} from server {}", beanQuery, webAddress, e);
} finally {
if (reader != null) {try {
reader.close();
} catch (IOException e) {
LOG.error("Problem closing {}", webAddress, e);
}
}
}
return ret;
} | 3.26 |
hadoop_FederationUtil_getVersion_rdh | /**
* Fetch the Hadoop version string for this jar.
*
* @return Hadoop version string, e.g., 3.0.1.
*/
public static String getVersion() {
return VersionInfo.getVersion();
} | 3.26 |
hadoop_FederationUtil_updateMountPointStatus_rdh | /**
* Add the number of children for an existing HdfsFileStatus object.
*
* @param dirStatus
* HdfsfileStatus object.
* @param children
* number of children to be added.
* @return HdfsFileStatus with the number of children specified.
*/
public static HdfsFileStatus updateMountPointStatus(HdfsFileStatus dirStatus, int children) {
// Get flags to set in new FileStatus.
EnumSet<HdfsFileStatus.Flags> flags = DFSUtil.getFlags(dirStatus.isEncrypted(), dirStatus.isErasureCoded(), dirStatus.isSnapshotEnabled(), dirStatus.hasAcl());
EnumSet.noneOf(Flags.class);
return new HdfsFileStatus.Builder().atime(dirStatus.getAccessTime()).blocksize(dirStatus.getBlockSize()).children(children).ecPolicy(dirStatus.getErasureCodingPolicy()).feInfo(dirStatus.getFileEncryptionInfo()).fileId(dirStatus.getFileId()).group(dirStatus.getGroup()).isdir(dirStatus.isDir()).length(dirStatus.getLen()).mtime(dirStatus.getModificationTime()).owner(dirStatus.getOwner()).path(dirStatus.getLocalNameInBytes()).perm(dirStatus.getPermission()).replication(dirStatus.getReplication()).storagePolicy(dirStatus.getStoragePolicy()).symlink(dirStatus.getSymlinkInBytes()).flags(flags).build();
} | 3.26 |
hadoop_FederationUtil_newFairnessPolicyController_rdh | /**
* Creates an instance of an RouterRpcFairnessPolicyController
* from the configuration.
*
* @param conf
* Configuration that defines the fairness controller class.
* @return Fairness policy controller.
*/
public static RouterRpcFairnessPolicyController newFairnessPolicyController(Configuration conf) {
Class<? extends RouterRpcFairnessPolicyController> clazz = conf.getClass(RBFConfigKeys.DFS_ROUTER_FAIRNESS_POLICY_CONTROLLER_CLASS, RBFConfigKeys.DFS_ROUTER_FAIRNESS_POLICY_CONTROLLER_CLASS_DEFAULT, RouterRpcFairnessPolicyController.class);
return newInstance(conf, null, null, clazz);
} | 3.26 |
hadoop_FederationUtil_getCompileInfo_rdh | /**
* Fetch the build/compile information for this jar.
*
* @return String Compilation info.
*/
public static String getCompileInfo() {
return (((VersionInfo.getDate() + " by ") + VersionInfo.getUser()) + " from ") + VersionInfo.getBranch();
} | 3.26 |
hadoop_FederationUtil_newFileSubclusterResolver_rdh | /**
* Creates an instance of a FileSubclusterResolver from the configuration.
*
* @param conf
* Configuration that defines the file resolver class.
* @param router
* Router service.
* @return New file subcluster resolver.
*/
public static FileSubclusterResolver newFileSubclusterResolver(Configuration conf, Router router) {
Class<? extends FileSubclusterResolver> clazz = conf.getClass(RBFConfigKeys.FEDERATION_FILE_RESOLVER_CLIENT_CLASS,
RBFConfigKeys.FEDERATION_FILE_RESOLVER_CLIENT_CLASS_DEFAULT, FileSubclusterResolver.class);
return newInstance(conf, router, Router.class, clazz);
} | 3.26 |
hadoop_FederationUtil_newActiveNamenodeResolver_rdh | /**
* Creates an instance of an ActiveNamenodeResolver from the configuration.
*
* @param conf
* Configuration that defines the namenode resolver class.
* @param stateStore
* State store passed to class constructor.
* @return New active namenode resolver.
*/
public static ActiveNamenodeResolver newActiveNamenodeResolver(Configuration conf, StateStoreService stateStore) {
Class<? extends ActiveNamenodeResolver> clazz = conf.getClass(RBFConfigKeys.FEDERATION_NAMENODE_RESOLVER_CLIENT_CLASS, RBFConfigKeys.FEDERATION_NAMENODE_RESOLVER_CLIENT_CLASS_DEFAULT, ActiveNamenodeResolver.class);
return newInstance(conf, stateStore, StateStoreService.class, clazz);
} | 3.26 |
hadoop_DurationTracker_asDuration_rdh | /**
* Get the duration of an operation as a java Duration
* instance. If the duration tracker hasn't completed,
* or its duration tracking doesn't actually measure duration,
* returns Duration.ZERO.
*
* @return a duration, value of ZERO until close().
*/
default Duration asDuration() {
return Duration.ZERO;
} | 3.26 |
hadoop_CachingBlockManager_cancelPrefetches_rdh | /**
* Requests cancellation of any previously issued prefetch requests.
*/
@Override
public void cancelPrefetches() {
BlockOperations.Operation op = ops.cancelPrefetches();
for (BufferData data : bufferPool.getAll()) {
// We add blocks being prefetched to the local cache so that the prefetch is not wasted.
if (data.stateEqualsOneOf(State.PREFETCHING, State.READY)) {
requestCaching(data);
}
}
ops.end(op);
} | 3.26 |
hadoop_CachingBlockManager_numCached_rdh | /**
* Number of caching operations completed.
*
* @return the number of cached buffers.
*/
public int numCached() {
return f0.size();
} | 3.26 |
hadoop_CachingBlockManager_numAvailable_rdh | /**
* Number of ByteBuffers available to be acquired.
*
* @return the number of available buffers.
*/
public int numAvailable()
{
return bufferPool.numAvailable();
} | 3.26 |
hadoop_CachingBlockManager_requestCaching_rdh | /**
* Requests that the given block should be copied to the local cache.
* The block must not be accessed by the caller after calling this method
* because it will released asynchronously relative to the caller.
*
* @throws IllegalArgumentException
* if data is null.
*/
@Override
public void requestCaching(BufferData data) {
if (closed) {
return;
}
if (cachingDisabled.get()) {
data.setDone();
return;
}
Validate.checkNotNull(data, "data");
// Opportunistic check without locking.
if (!data.stateEqualsOneOf(EXPECTED_STATE_AT_CACHING))
{
return;
}synchronized(data) {
// Reconfirm state after locking.
if (!data.stateEqualsOneOf(EXPECTED_STATE_AT_CACHING)) {
return;
}
if (f0.containsBlock(data.getBlockNumber())) {
data.setDone();
return;
}
BufferData.State state = data.getState();
BlockOperations.Operation op = ops.requestCaching(data.getBlockNumber());
Future<Void> blockFuture;
if (state == State.PREFETCHING) {
blockFuture = data.getActionFuture();
} else {
CompletableFuture<Void> cf = new CompletableFuture<>();
cf.complete(null);
blockFuture = cf;
}
CachePutTask task = new CachePutTask(data, blockFuture, this, Instant.now());
Future<Void> actionFuture = futurePool.executeFunction(task);
data.setCaching(actionFuture);
ops.end(op);
}
} | 3.26 |
hadoop_CachingBlockManager_numReadErrors_rdh | /**
* Number of errors encountered when reading.
*
* @return the number of errors encountered when reading.
*/
public int numReadErrors() {
return numReadErrors.get();
} | 3.26 |
hadoop_CachingBlockManager_numCachingErrors_rdh | /**
* Number of errors encountered when caching.
*
* @return the number of errors encountered when caching.
*/
public int numCachingErrors() {
return numCachingErrors.get();
} | 3.26 |
hadoop_CachingBlockManager_release_rdh | /**
* Releases resources allocated to the given block.
*
* @throws IllegalArgumentException
* if data is null.
*/
@Override
public void release(BufferData data) {
if (closed) {
return;
}
Validate.checkNotNull(data, "data");
BlockOperations.Operation op = ops.release(data.getBlockNumber());
bufferPool.release(data);
ops.end(op);
} | 3.26 |
hadoop_CachingBlockManager_get_rdh | /**
* Gets the block having the given {@code blockNumber}.
*
* @throws IllegalArgumentException
* if blockNumber is negative.
*/
@Overridepublic BufferData get(int blockNumber) throws IOException {
checkNotNegative(blockNumber, "blockNumber");
BufferData data;
final int maxRetryDelayMs = (bufferPoolSize * 120) * 1000;
final int statusUpdateDelayMs = 120 * 1000;
Retryer retryer = new Retryer(10, maxRetryDelayMs, statusUpdateDelayMs);
boolean v4;
do {
if (closed) {
throw new IOException("this stream is already closed");
}
data = bufferPool.acquire(blockNumber);
v4 = getInternal(data);
if (retryer.updateStatus()) {
LOG.warn("waiting to get block: {}", blockNumber);
LOG.info("state = {}", this.toString());
}
} while ((!v4) && retryer.continueRetry() );
if
(v4) {
return data;
} else {
String message = String.format("Wait failed for get(%d)", blockNumber);
throw new IllegalStateException(message);
}
} | 3.26 |
hadoop_RPCUtil_getRemoteException_rdh | /**
* Returns an instance of {@link YarnException}.
*
* @param message
* yarn exception message.
* @return instance of YarnException.
*/public static YarnException getRemoteException(String message) {return new YarnException(message);
} | 3.26 |
hadoop_TimelineEntityGroupId_getTimelineEntityGroupId_rdh | /**
* Get the <code>timelineEntityGroupId</code>.
*
* @return <code>timelineEntityGroupId</code>
*/
public String getTimelineEntityGroupId() {
return this.id;
} | 3.26 |
hadoop_DefaultCostProvider_getCost_rdh | /**
* Returns 1, regardless of the processing details.
*
* @param details
* Process details (ignored)
* @return 1
*/
@Overridepublic long getCost(ProcessingDetails details) {
return 1;
} | 3.26 |
hadoop_STSClientFactory_builder_rdh | /**
* Create the builder ready for any final configuration options.
* Picks up connection settings from the Hadoop configuration, including
* proxy secrets.
*
* @param conf
* AWS configuration.
* @param credentials
* AWS credential chain to use
* @param stsEndpoint
* optional endpoint "https://sns.us-west-1.amazonaws.com"
* @param stsRegion
* the region, e.g "us-west-1". Must be set if endpoint is.
* @param bucket
* bucket name
* @return the builder to call {@code build()}
* @throws IOException
* problem reading proxy secrets
*/
public static StsClientBuilder builder(final AwsCredentialsProvider credentials, final Configuration conf, final String stsEndpoint,
final String stsRegion, final String bucket) throws IOException {
final StsClientBuilder stsClientBuilder = StsClient.builder();Preconditions.checkArgument(credentials != null, "No credentials");
final ClientOverrideConfiguration.Builder clientOverrideConfigBuilder = AWSClientConfig.createClientConfigBuilder(conf, AWS_SERVICE_IDENTIFIER_STS);
final ApacheHttpClient.Builder httpClientBuilder = AWSClientConfig.createHttpClientBuilder(conf);
final RetryPolicy.Builder retryPolicyBuilder = AWSClientConfig.createRetryPolicyBuilder(conf);
final ProxyConfiguration
proxyConfig = AWSClientConfig.createProxyConfiguration(conf, bucket);
clientOverrideConfigBuilder.retryPolicy(retryPolicyBuilder.build());
httpClientBuilder.proxyConfiguration(proxyConfig);
stsClientBuilder.httpClientBuilder(httpClientBuilder).overrideConfiguration(clientOverrideConfigBuilder.build()).credentialsProvider(credentials);
boolean destIsStandardEndpoint = STS_STANDARD.equals(stsEndpoint);
if (isNotEmpty(stsEndpoint) && (!destIsStandardEndpoint)) {
Preconditions.checkArgument(isNotEmpty(stsRegion), "STS endpoint is set to %s but no signing region was provided", stsEndpoint);
LOG.debug("STS Endpoint={}; region='{}'", stsEndpoint, stsRegion);
stsClientBuilder.endpointOverride(getSTSEndpoint(stsEndpoint)).region(Region.of(stsRegion));
} else
{
Preconditions.checkArgument(isEmpty(stsRegion), "STS signing region set set to %s but no STS endpoint specified", stsRegion);
}
return stsClientBuilder;
} | 3.26 |
hadoop_STSClientFactory_getSTSEndpoint_rdh | /**
* Given a endpoint string, create the endpoint URI.
*
* @param endpoint
* possibly null endpoint.
* @return an endpoint uri
*/
private static URI getSTSEndpoint(String endpoint) {
try {
return new URIBuilder().setScheme("https").setHost(endpoint).build();
} catch (URISyntaxException e) {
throw new IllegalArgumentException(e);
}
} | 3.26 |
hadoop_STSClientFactory_requestRole_rdh | /**
* Request a set of role credentials.
*
* @param roleARN
* ARN to request
* @param sessionName
* name of the session
* @param policy
* optional policy; "" is treated as "none"
* @param duration
* duration of the credentials
* @param timeUnit
* time unit of duration
* @return the role result
* @throws IOException
* on a failure of the request
*/
@Retries.RetryTranslated
public Credentials requestRole(final String roleARN, final String sessionName, final String policy, final long duration, final TimeUnit timeUnit)
throws IOException {
LOG.debug("Requesting role {} with duration {}; policy = {}", roleARN, duration, policy);
AssumeRoleRequest.Builder requestBuilder = AssumeRoleRequest.builder().durationSeconds(((int) (timeUnit.toSeconds(duration)))).roleArn(roleARN).roleSessionName(sessionName);
if (isNotEmpty(policy)) {
requestBuilder.policy(policy);
}
return invoker.retry("request role credentials", "", true, () -> stsClient.assumeRole(requestBuilder.build()).credentials());
} | 3.26 |
hadoop_STSClientFactory_createClientConnection_rdh | /**
* Create an STS Client instance.
*
* @param stsClient
* STS instance
* @param invoker
* invoker to use
* @return an STS client bonded to that interface.
*/
public static STSClient createClientConnection(final StsClient stsClient, final Invoker invoker) {
return new STSClient(stsClient, invoker);
} | 3.26 |
hadoop_STSClientFactory_requestSessionCredentials_rdh | /**
* Request a set of session credentials.
*
* @param duration
* duration of the credentials
* @param timeUnit
* time unit of duration
* @return the role result
* @throws IOException
* on a failure of the request
*/
@Retries.RetryTranslated
public Credentials requestSessionCredentials(final long duration, final TimeUnit timeUnit) throws IOException {
int durationSeconds = ((int) (timeUnit.toSeconds(duration)));
LOG.debug("Requesting session token of duration {}", duration);
final GetSessionTokenRequest request = GetSessionTokenRequest.builder().durationSeconds(durationSeconds).build(); return invoker.retry("request session credentials", "", true, () -> { LOG.info("Requesting Amazon STS Session credentials");
return stsClient.getSessionToken(request).credentials();
});
} | 3.26 |
hadoop_SecureStorageInterfaceImpl_getLeaseCondition_rdh | /**
* Return and access condition for this lease, or else null if
* there's no lease.
*/
private AccessCondition getLeaseCondition(SelfRenewingLease lease) {
AccessCondition leaseCondition = null;
if (lease != null) {
leaseCondition = AccessCondition.generateLeaseCondition(lease.getLeaseID());
}
return leaseCondition;
} | 3.26 |
hadoop_TaskManifest_createSerializer_rdh | /**
* Get a JSON serializer for this class.
*
* @return a serializer.
*/
@Override
public JsonSerialization<TaskManifest> createSerializer() {
return serializer();
} | 3.26 |
hadoop_TaskManifest_addDirectory_rdh | /**
* Add a directory to the list of directories to create.
*
* @param entry
* entry to add
*/
public void addDirectory(DirEntry entry) {
destDirectories.add(entry);
} | 3.26 |
hadoop_TaskManifest_getTotalFileSize_rdh | /**
* Calculate the total amount of data which will be committed.
*
* @return the sum of sizes of all files to commit.
*/
@JsonIgnore
public long getTotalFileSize() {
return filesToCommit.stream().mapToLong(FileEntry::getSize).sum();
} | 3.26 |
hadoop_TaskManifest_serializer_rdh | /**
* Create a JSON serializer for this class.
*
* @return a serializer.
*/
public static JsonSerialization<TaskManifest> serializer() {
return new JsonSerialization<>(TaskManifest.class, false, true);
} | 3.26 |
hadoop_TaskManifest_load_rdh | /**
* Load an instance from a file, then validate it.
* If loading through a listing; use this API so that filestatus
* hints can be used.
*
* @param serializer
* serializer.
* @param fs
* filesystem
* @param path
* path to load from
* @param status
* status of file to load
* @return the loaded instance
* @throws IOException
* IO failure/the data is invalid
*/
public static TaskManifest load(JsonSerialization<TaskManifest> serializer, FileSystem fs, Path path, FileStatus status) throws IOException {
LOG.debug("Reading Manifest in file {}", path);
return serializer.load(fs, path, status).validate();
} | 3.26 |
hadoop_TaskManifest_toJson_rdh | /**
* To JSON.
*
* @return json string value.
* @throws IOException
* failure
*/
public String toJson() throws IOException {
return serializer().toJson(this);
} | 3.26 |
hadoop_TaskManifest_validate_rdh | /**
* Validate the data: those fields which must be non empty, must be set.
*
* @throws IOException
* if the data is invalid
* @return */public TaskManifest validate() throws IOException {
verify(TYPE.equals(f0), "Wrong type: %s", f0);
verify(version == VERSION, "Wrong version: %s", version);
validateCollectionClass(extraData.keySet(), String.class);
validateCollectionClass(extraData.values(),
String.class);
Set<String> destinations = new HashSet<>(filesToCommit.size());
validateCollectionClass(filesToCommit, FileEntry.class);
for (FileEntry c : filesToCommit) {
c.validate();
verify(!destinations.contains(c.getDest()), "Destination %s is written to by more than one pending commit", c.getDest());
destinations.add(c.getDest());}
return this;
} | 3.26 |
hadoop_TaskManifest_getDestDirectories_rdh | /**
* All the destination directories.
*
* @return directory list.
*/
public List<DirEntry> getDestDirectories() {
return destDirectories;
} | 3.26 |
hadoop_RollingWindowAverage_addPoint_rdh | /**
* Add a new data point that just happened.
*
* @param value
* The value of the data point.
*/
public synchronized void addPoint(long value) {
currentPoints.offer(new DataPoint(new Date(), value));
cleanupOldPoints();
} | 3.26 |
hadoop_RollingWindowAverage_getCurrentAverage_rdh | /**
* Get the current average.
*
* @return The current average.
*/
public synchronized long getCurrentAverage() {
cleanupOldPoints();
if (currentPoints.isEmpty()) {
return 0;
}
long sum = 0;
for (DataPoint current : currentPoints) {sum += current.getValue();
}
return sum / currentPoints.size();
} | 3.26 |
hadoop_RollingWindowAverage_cleanupOldPoints_rdh | /**
* Clean up points that don't count any more (are before our
* rolling window) from our current queue of points.
*/
private void cleanupOldPoints() {
Date cutoffTime = new Date(new Date().getTime() - windowSizeMs);
while ((!currentPoints.isEmpty()) && currentPoints.peekFirst().getEventTime().before(cutoffTime)) {
currentPoints.removeFirst();
}
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.