name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hadoop_ZStandardCompressor_reinit_rdh | /**
* Prepare the compressor to be used in a new stream with settings defined in
* the given Configuration. It will reset the compressor's compression level
* and compression strategy.
*
* @param conf
* Configuration storing new settings
*/
@Override
public void reinit(Configuration conf) {
if (conf == null) {return;
}
level = ZStandardCodec.getCompressionLevel(conf);
reset();
LOG.debug("Reinit compressor with new compression configuration");
} | 3.26 |
hadoop_ZStandardCompressor_getBytesRead_rdh | /**
* <p>Returns the total number of uncompressed bytes input so far.</p>
*
* @return the total (non-negative) number of uncompressed bytes input so far
*/
@Override
public long getBytesRead() {
checkStream();
return bytesRead;
} | 3.26 |
hadoop_ZStandardCompressor_getBytesWritten_rdh | /**
* Returns the total number of compressed bytes output so far.
*
* @return the total (non-negative) number of compressed bytes output so far
*/
@Override
public long getBytesWritten() {
checkStream();
return bytesWritten;
} | 3.26 |
hadoop_PrintJarMainClass_main_rdh | /**
*
* @param args
* args.
*/
public static void main(String[] args) {
try (JarFile jar_file = new JarFile(args[0])) {
Manifest manifest = jar_file.getManifest();
if (manifest != null) {
String
value
= manifest.getMainAttributes().getValue("Main-Class");
if (value != null) {
System.out.println(value.replaceAll("/", "."));
return;
}
}
} catch (Throwable e) {
// ignore it
}
System.out.println("UNKNOWN");
System.exit(1);
} | 3.26 |
hadoop_NMTokenSecretManagerInRM_activateNextMasterKey_rdh | /**
* Activate the new master-key
*/
@Privatepublic void activateNextMasterKey() {
super.writeLock.lock(); try {
LOG.info("Activating next master key with id: " + this.nextMasterKey.getMasterKey().getKeyId());
this.currentMasterKey = this.nextMasterKey;
this.nextMasterKey = null;
clearApplicationNMTokenKeys();
} finally {
super.writeLock.unlock();
}
} | 3.26 |
hadoop_NMTokenSecretManagerInRM_rollMasterKey_rdh | /**
* Creates a new master-key and sets it as the primary.
*/
@Private
public void rollMasterKey() {
super.writeLock.lock();try {
LOG.info("Rolling master-key for nm-tokens");
if (this.currentMasterKey == null) {
// Setting up for the first time.
this.currentMasterKey = createNewMasterKey();
} else {
this.nextMasterKey = createNewMasterKey();
LOG.info(((("Going to activate master-key with key-id " + this.nextMasterKey.getMasterKey().getKeyId())
+ " in ") + this.activationDelay) + "ms");
this.timer.schedule(new NextKeyActivator(), this.activationDelay);
}
} finally {
super.writeLock.unlock();
}
} | 3.26 |
hadoop_NMTokenSecretManagerInRM_removeNodeKey_rdh | /**
* This is to be called when NodeManager reconnects or goes down. This will
* remove if NMTokens if present for any running application from cache.
*
* @param nodeId
* Node Id.
*/
public void removeNodeKey(NodeId nodeId) {
this.writeLock.lock();
try {
Iterator<HashSet<NodeId>> appNodeKeySetIterator = this.appAttemptToNodeKeyMap.values().iterator();
while (appNodeKeySetIterator.hasNext()) {
appNodeKeySetIterator.next().remove(nodeId);
}
} finally {
this.writeLock.unlock();
}
} | 3.26 |
hadoop_TagAddProcessor_m0_rdh | /**
* This processor will add the tag to application submission context.
*/class TagAddProcessor implements ContextProcessor {
@Override
public void m0(String host, String value, ApplicationId applicationId, ApplicationSubmissionContext submissionContext) {
Set<String> applicationTags = submissionContext.getApplicationTags();
if (applicationTags == null) {
applicationTags = new HashSet<>();
} else {
applicationTags = new HashSet<>(applicationTags);
}
applicationTags.add(value);
submissionContext.setApplicationTags(applicationTags);
} | 3.26 |
hadoop_RouterFedBalance_setTrashOpt_rdh | /**
* Specify the trash behaviour of the source path.
*
* @param value
* the trash option.
*/
public Builder setTrashOpt(TrashOption value) {
this.trashOpt = value;
return this;
} | 3.26 |
hadoop_RouterFedBalance_setBandWidth_rdh | /**
* Specify bandwidth per map in MB.
*
* @param value
* the bandwidth.
*/
public Builder setBandWidth(int value) {
this.f0 = value;
return this;
} | 3.26 |
hadoop_RouterFedBalance_setForceCloseOpen_rdh | /**
* Whether force close all open files while there is no diff.
*
* @param value
* true if force close all the open files.
*/
public Builder setForceCloseOpen(boolean value) {
this.forceCloseOpen = value;
return this;
} | 3.26 |
hadoop_RouterFedBalance_setDiffThreshold_rdh | /**
* Specify the threshold of diff entries.
*
* @param value
* the threshold of a fast distcp.
*/
public Builder setDiffThreshold(int value) {
this.diffThreshold = value;
return this;
} | 3.26 |
hadoop_RouterFedBalance_build_rdh | /**
* Build the balance job.
*/
public BalanceJob build() throws IOException {
// Construct job context.
FedBalanceContext context;
Path dst = new Path(inputDst);
if (dst.toUri().getAuthority() == null) {
throw new IOException("The destination cluster must be specified.");
}
Path src = getSrcPath(inputSrc);
String
mount = inputSrc;context = new FedBalanceContext.Builder(src, dst, mount, getConf()).setForceCloseOpenFiles(forceCloseOpen).setUseMountReadOnly(true).setMapNum(map).setBandwidthLimit(f0).setTrash(trashOpt).setDelayDuration(delayDuration).setDiffThreshold(diffThreshold).build();
LOG.info(context.toString());
// Construct the balance job.
BalanceJob.Builder<BalanceProcedure> builder = new BalanceJob.Builder<>();
RouterDistCpProcedure dcp = new RouterDistCpProcedure(DISTCP_PROCEDURE, null, delayDuration, context);
builder.nextProcedure(dcp);
MountTableProcedure mtp = new MountTableProcedure(MOUNT_TABLE_PROCEDURE, null, delayDuration, inputSrc, dst.toUri().getPath(), dst.toUri().getAuthority(), getConf());
builder.nextProcedure(mtp);
TrashProcedure tp = new TrashProcedure(TRASH_PROCEDURE, null, delayDuration, context);
builder.nextProcedure(tp);
return builder.build();
} | 3.26 |
hadoop_RouterFedBalance_submit_rdh | /**
* Start a ProcedureScheduler and submit the job.
*
* @param command
* the command options.
* @param inputSrc
* the source input. This specifies the source path.
* @param inputDst
* the dst input. This specifies the dst path.
*/
private int submit(CommandLine command, String inputSrc, String inputDst) throws IOException {
Builder builder = new Builder(inputSrc, inputDst);
// parse options.
builder.setForceCloseOpen(command.hasOption(FORCE_CLOSE_OPEN.getOpt()));
if (command.hasOption(MAP.getOpt())) {
builder.setMap(Integer.parseInt(command.getOptionValue(MAP.getOpt())));
}
if (command.hasOption(BANDWIDTH.getOpt())) {
builder.setBandWidth(Integer.parseInt(command.getOptionValue(BANDWIDTH.getOpt())));
}
if (command.hasOption(DELAY_DURATION.getOpt())) {
builder.setDelayDuration(Long.parseLong(command.getOptionValue(DELAY_DURATION.getOpt())));
}
if (command.hasOption(DIFF_THRESHOLD.getOpt())) {
builder.setDiffThreshold(Integer.parseInt(command.getOptionValue(DIFF_THRESHOLD.getOpt())));
}
if (command.hasOption(TRASH.getOpt())) {
String val = command.getOptionValue(TRASH.getOpt());
if (val.equalsIgnoreCase("skip")) {
builder.setTrashOpt(TrashOption.SKIP);
} else if (val.equalsIgnoreCase("trash")) {
builder.setTrashOpt(TrashOption.TRASH);
} else if (val.equalsIgnoreCase("delete")) {
builder.setTrashOpt(TrashOption.DELETE);
} else {
printUsage();
return -1;
}
}
// Submit the job.
BalanceProcedureScheduler scheduler = new BalanceProcedureScheduler(getConf());scheduler.init(false);
try {
BalanceJob balanceJob = builder.build();
// Submit and wait until the job is done.
scheduler.submit(balanceJob);
scheduler.waitUntilDone(balanceJob);
} catch (IOException e) {
LOG.error("Submit balance job failed.", e);
return -1;
} finally {
scheduler.shutDown();
}
return 0;
} | 3.26 |
hadoop_RouterFedBalance_getSrcPath_rdh | /**
* Get src uri from Router.
*/
private Path getSrcPath(String fedPath) throws IOException {
String address = getConf().getTrimmed(RBFConfigKeys.DFS_ROUTER_ADMIN_ADDRESS_KEY, RBFConfigKeys.DFS_ROUTER_ADMIN_ADDRESS_DEFAULT);
InetSocketAddress routerSocket = NetUtils.createSocketAddr(address);
RouterClient rClient = new RouterClient(routerSocket, getConf());
try {
MountTableManager mountTable = rClient.getMountTableManager();
MountTable entry = MountTableProcedure.getMountEntry(fedPath, mountTable);
if (entry == null)
{
throw new IllegalArgumentException("The mount point doesn't exist. path=" + fedPath);
} else if (entry.getDestinations().size() > 1) {
throw new IllegalArgumentException("The mount point has more than one destination. path=" + fedPath);
} else {
String ns = entry.getDestinations().get(0).getNameserviceId();
String path = entry.getDestinations().get(0).getDest();
return new Path(("hdfs://" + ns) + path);
}
} finally {
rClient.close();
}
} | 3.26 |
hadoop_RouterFedBalance_main_rdh | /**
* Main function of the RouterFedBalance program. Parses the input arguments
* and invokes the RouterFedBalance::run() method, via the ToolRunner.
*
* @param argv
* Command-line arguments sent to RouterFedBalance.
*/
public static void main(String[] argv) {
Configuration conf = getDefaultConf();
RouterFedBalance fedBalance =
new RouterFedBalance();
fedBalance.setConf(conf);
int
exitCode;
try {
exitCode = ToolRunner.run(fedBalance, argv);
} catch (Exception e) {
LOG.warn("Couldn't complete RouterFedBalance operation.", e);
exitCode = -1;
}
System.exit(exitCode);
} | 3.26 |
hadoop_RouterFedBalance_continueJob_rdh | /**
* Recover and continue the unfinished jobs.
*/
private int continueJob() throws InterruptedException {
BalanceProcedureScheduler scheduler = new BalanceProcedureScheduler(getConf());
try {
scheduler.init(true);
while (true) {
Collection<BalanceJob> jobs = scheduler.getAllJobs();
int unfinished = 0;
for (BalanceJob job
: jobs) {
if (!job.isJobDone()) {
unfinished++;
}
LOG.info(job.toString());
}
if (unfinished == 0) {break;
}
Thread.sleep(TimeUnit.SECONDS.toMillis(10));
}
} catch (IOException e) {
LOG.error("Continue balance job failed.", e); return -1;
} finally {
scheduler.shutDown();
}
return 0;
} | 3.26 |
hadoop_RouterFedBalance_setDelayDuration_rdh | /**
* Specify the duration(millie seconds) when the procedure needs retry.
*
* @param value
* the delay duration of the job.
*/
public Builder setDelayDuration(long value) {
this.delayDuration = value;
return this;
} | 3.26 |
hadoop_QueueCapacityConfigParser_parse_rdh | /**
* Creates a {@code QueueCapacityVector} parsed from the capacity configuration
* property set for a queue.
*
* @param capacityString
* capacity string to parse
* @param queuePath
* queue for which the capacity property is parsed
* @return a parsed capacity vector
*/
public QueueCapacityVector parse(String capacityString, String queuePath) {
if (queuePath.equals(CapacitySchedulerConfiguration.ROOT)) {
return QueueCapacityVector.of(100.0F, ResourceUnitCapacityType.PERCENTAGE);
}
if (capacityString == null) {
return new QueueCapacityVector();
}
// Trim all spaces from capacity string
capacityString = capacityString.replaceAll(" ", "");
for (Parser parser : parsers) {
Matcher matcher = parser.regex.matcher(capacityString);
if (matcher.find()) {
return parser.parser.apply(matcher);
}
}return new QueueCapacityVector();
} | 3.26 |
hadoop_QueueCapacityConfigParser_heterogeneousParser_rdh | /**
* A parser method that is usable on resource capacity values e.g. mixed or
* absolute resource.
*
* @param matcher
* a regex matcher that contains the matched resource string
* @return a parsed capacity vector
*/
private QueueCapacityVector heterogeneousParser(Matcher
matcher) {
QueueCapacityVector capacityVector = QueueCapacityVector.newInstance();
/* Absolute resource configuration for a queue will be grouped by "[]".
Syntax of absolute resource config could be like below
"memory=4Gi vcores=2". Ideally this means "4GB of memory and 2 vcores".
*/
// Get the sub-group.
String bracketedGroup = matcher.group(0);
// Get the string inside starting and closing []
bracketedGroup = bracketedGroup.substring(1, bracketedGroup.length() - 1);
// Split by comma and equals delimiter eg. the string memory=1024,vcores=6
// is converted to an array of array as {{memory,1024}, {vcores, 6}}
for (String kvPair : bracketedGroup.trim().split(",")) {
String[] splits = kvPair.split("=");
// Ensure that each sub string is key value pair separated by '='.
if (splits.length > 1) {
setCapacityVector(capacityVector, splits[0], splits[1]);
}
}
// Memory always have to be defined
if (capacityVector.getMemory() == 0L) {
return new QueueCapacityVector();
}
return capacityVector;
} | 3.26 |
hadoop_QueueCapacityConfigParser_uniformParser_rdh | /**
* A parser method that is usable on uniform capacity values e.g. percentage or
* weight.
*
* @param matcher
* a regex matcher that contains parsed value and its possible
* suffix
* @return a parsed capacity vector
*/
private QueueCapacityVector uniformParser(Matcher matcher) {
ResourceUnitCapacityType capacityType = null;
String value = matcher.group(1);if (matcher.groupCount() == 2) {
String matchedSuffix = matcher.group(2);
for (ResourceUnitCapacityType suffix : ResourceUnitCapacityType.values()) {
// Absolute uniform syntax is not supported
if (suffix.equals(ResourceUnitCapacityType.ABSOLUTE)) {continue;
}
// when capacity is given in percentage, we do not need % symbol
String v6 = suffix.getPostfix().replaceAll("%", "");
if (v6.equals(matchedSuffix)) {
capacityType = suffix;
}
}
}
if (capacityType == null) {
return new QueueCapacityVector();
}
return QueueCapacityVector.of(Float.parseFloat(value), capacityType);
} | 3.26 |
hadoop_StripedDataStreamer_getFollowingBlock_rdh | /**
* The upper level DFSStripedOutputStream will allocate the new block group.
* All the striped data streamer only needs to fetch from the queue, which
* should be already be ready.
*/
private LocatedBlock getFollowingBlock() throws IOException {
if (!this.isHealthy()) {
// No internal block for this streamer, maybe no enough healthy DN.
// Throw the exception which has been set by the StripedOutputStream.
this.getLastException().check(false);
}
return coordinator.getFollowingBlocks().poll(index);
} | 3.26 |
hadoop_RMActiveServiceContext_getTokenSequenceNo_rdh | /**
* Get token sequence no.
*
* @return the tokenSequenceNo
*/
public Long getTokenSequenceNo() {
return tokenSequenceNo.get();
} | 3.26 |
hadoop_RMActiveServiceContext_incrTokenSequenceNo_rdh | /**
* Increment token sequence no.
*/
public void incrTokenSequenceNo() {
this.tokenSequenceNo.incrementAndGet();
} | 3.26 |
hadoop_DecodingValidator_validate_rdh | /**
* Validate outputs decoded from inputs, by decoding an input back from
* those outputs and comparing it with the original one.
*
* @param inputs
* input buffers used for decoding
* @param erasedIndexes
* indexes of erased units used for decoding
* @param outputs
* decoded output buffers
* @throws IOException
* raised on errors performing I/O.
*/
public void validate(ECChunk[] inputs, int[] erasedIndexes, ECChunk[] outputs) throws IOException {
ByteBuffer[] newInputs = CoderUtil.toBuffers(inputs);
ByteBuffer[] newOutputs = CoderUtil.toBuffers(outputs);
validate(newInputs, erasedIndexes, newOutputs);
} | 3.26 |
hadoop_RMContainerTokenSecretManager_activateNextMasterKey_rdh | /**
* Activate the new master-key
*/
@Private
public void activateNextMasterKey() {
super.writeLock.lock();
try {
LOG.info("Activating next master key with id: " + this.f0.getMasterKey().getKeyId());
this.currentMasterKey = this.f0;
this.f0 = null;
} finally {
super.writeLock.unlock();
}
} | 3.26 |
hadoop_RMContainerTokenSecretManager_rollMasterKey_rdh | /**
* Creates a new master-key and sets it as the primary.
*/
@Private
public void rollMasterKey() {
super.writeLock.lock();
try {LOG.info("Rolling master-key for container-tokens");
if
(this.currentMasterKey == null) {
// Setting up for the first time.
this.currentMasterKey = createNewMasterKey();
} else {
this.f0
= createNewMasterKey();
LOG.info(((("Going to activate master-key with key-id " + this.f0.getMasterKey().getKeyId()) + " in ") + this.activationDelay) + "ms");
this.timer.schedule(new NextKeyActivator(), this.activationDelay);
}
} finally {
super.writeLock.unlock();
}
} | 3.26 |
hadoop_RMContainerTokenSecretManager_createContainerToken_rdh | /**
* Helper function for creating ContainerTokens.
*
* @param containerId
* Container Id
* @param containerVersion
* Container version
* @param nodeId
* Node Id
* @param appSubmitter
* App Submitter
* @param capability
* Capability
* @param priority
* Priority
* @param createTime
* Create Time
* @param logAggregationContext
* Log Aggregation Context
* @param nodeLabelExpression
* Node Label Expression
* @param containerType
* Container Type
* @param execType
* Execution Type
* @param allocationRequestId
* allocationRequestId
* @param allocationTags
* allocation Tags
* @return the container-token
*/
public Token createContainerToken(ContainerId containerId, int containerVersion, NodeId nodeId, String appSubmitter, Resource capability, Priority priority, long createTime, LogAggregationContext logAggregationContext, String nodeLabelExpression, ContainerType containerType, ExecutionType execType, long allocationRequestId, Set<String> allocationTags) {
byte[] password;
ContainerTokenIdentifier tokenIdentifier;
long expiryTimeStamp = System.currentTimeMillis() + containerTokenExpiryInterval;
// Lock so that we use the same MasterKey's keyId and its bytes
this.readLock.lock();
try {
tokenIdentifier = new ContainerTokenIdentifier(containerId, containerVersion, nodeId.toString(), appSubmitter, capability, expiryTimeStamp, this.currentMasterKey.getMasterKey().getKeyId(), ResourceManager.getClusterTimeStamp(), priority, createTime, logAggregationContext,
nodeLabelExpression, containerType, execType, allocationRequestId, allocationTags);
password = this.createPassword(tokenIdentifier);} finally {
this.readLock.unlock();
}
return BuilderUtils.newContainerToken(nodeId, password, tokenIdentifier);
} | 3.26 |
hadoop_ProtoBase_hashCode_rdh | // TODO Force a comparator?
@Override
public int hashCode() {
return getProto().hashCode();
} | 3.26 |
hadoop_DistributedSQLCounter_selectCounterValue_rdh | /**
* Obtains the value of the counter.
*
* @return counter value.
* @throws SQLException
* if querying the database fails.
*/
public int selectCounterValue() throws
SQLException {
try (Connection connection = connectionFactory.getConnection()) {
return selectCounterValue(false, connection);
}
} | 3.26 |
hadoop_DistributedSQLCounter_updateCounterValue_rdh | /**
* Sets the counter to the given value.
*
* @param value
* Value to assign to counter.
* @param connection
* Connection to database hosting the counter table.
* @throws SQLException
* if querying the database fails.
*/
public void updateCounterValue(int value, Connection connection) throws SQLException {
String queryText = String.format("UPDATE %s SET %s = ?", table, field);
LOG.debug((("Update counter statement: " +
queryText) + ". Value: ") + value);
try (PreparedStatement statement = connection.prepareStatement(queryText)) {
statement.setInt(1, value);
statement.execute();
}
} | 3.26 |
hadoop_DistributedSQLCounter_incrementCounterValue_rdh | /**
* Increments the counter by the given amount and
* returns the previous counter value.
*
* @param amount
* Amount to increase the counter.
* @return Previous counter value.
* @throws SQLException
* if querying the database fails.
*/
public int incrementCounterValue(int amount) throws SQLException {
// Disabling auto-commit to ensure that all statements on this transaction
// are committed at once.
try (Connection connection = connectionFactory.getConnection(false)) {
// Preventing dirty reads and non-repeatable reads to ensure that the
// value read will not be updated by a different connection.
if (connection.getTransactionIsolation() < Connection.TRANSACTION_REPEATABLE_READ) {
connection.setTransactionIsolation(Connection.TRANSACTION_REPEATABLE_READ);
}
try {
// Reading the counter value "FOR UPDATE" to lock the value record,
// forcing other connections to wait until this transaction is committed.
int lastValue = selectCounterValue(true, connection);
// Calculate the new counter value and handling overflow by
// resetting the counter to 0.
int newValue = lastValue + amount;
if
(newValue < 0) {
lastValue = 0; newValue = amount;
}
updateCounterValue(newValue, connection);
connection.commit();return lastValue;
} catch (Exception e) {
// Rollback transaction to release table locks
connection.rollback();
throw e;
}
}
} | 3.26 |
hadoop_PoolAlignmentContext_updateRequestState_rdh | /**
* Client side implementation for routers to provide state info in requests to
* namenodes.
*/
@Override
public void updateRequestState(RpcHeaderProtos.RpcRequestHeaderProto.Builder header) {
header.setStateId(poolLocalStateId.get());
} | 3.26 |
hadoop_PoolAlignmentContext_receiveResponseState_rdh | /**
* Router updates a globally shared value using response from
* namenodes.
*/
@Override
public void receiveResponseState(RpcHeaderProtos.RpcResponseHeaderProto header) {
sharedGlobalStateId.accumulate(header.getStateId());
} | 3.26 |
hadoop_Verifier_writeFlavorAndVerifier_rdh | /**
* Write AuthFlavor and the verifier to the XDR.
*
* @param verifier
* written to XDR
* @param xdr
* XDR message
*/
public static void writeFlavorAndVerifier(Verifier verifier, XDR xdr) {
if (verifier instanceof VerifierNone) {
xdr.writeInt(AuthFlavor.AUTH_NONE.getValue());
} else if
(verifier instanceof VerifierGSS) {
xdr.writeInt(AuthFlavor.RPCSEC_GSS.getValue());
} else {
throw new UnsupportedOperationException("Cannot recognize the verifier");
}
verifier.write(xdr);
} | 3.26 |
hadoop_Verifier_readFlavorAndVerifier_rdh | /**
* Read both AuthFlavor and the verifier from the XDR.
*
* @param xdr
* XDR message
* @return verifier
*/
public static Verifier readFlavorAndVerifier(XDR xdr) {
AuthFlavor flavor = AuthFlavor.fromValue(xdr.readInt());
final Verifier verifer;
if (flavor == AuthFlavor.AUTH_NONE) {
verifer = new VerifierNone();
} else if (flavor == AuthFlavor.AUTH_SYS) {
// Added in HADOOP-15307 based on HDFS-5085:
// When the auth flavor is AUTH_SYS, the corresponding verifier is
// AUTH_NONE. I.e., it is impossible to have a verifier with auth
// flavor AUTH_SYS.
verifer = new VerifierNone();
} else if (flavor == AuthFlavor.RPCSEC_GSS) {
verifer = new VerifierGSS();
} else {
throw new UnsupportedOperationException("Unsupported verifier flavor: " + flavor);
}
verifer.read(xdr);
return verifer;
} | 3.26 |
hadoop_ReplicaInfo_getBytesReserved_rdh | /**
* Number of bytes reserved for this replica on disk.
*/
public long getBytesReserved() {
return 0;
} | 3.26 |
hadoop_ReplicaInfo_toString_rdh | // Object
@Override
public String toString() {
return (((((((((((((getClass().getSimpleName() + ", ") + super.toString()) + ", ") + getState()) + "\n getNumBytes() = ") + getNumBytes()) + "\n getBytesOnDisk() = ") + getBytesOnDisk()) + "\n getVisibleLength()= ") + getVisibleLength()) + "\n getVolume() = ") + getVolume())
+ "\n getBlockURI() = ") + getBlockURI();
} | 3.26 |
hadoop_ReplicaInfo_getStorageUuid_rdh | /**
* Get the storageUuid of the volume that stores this replica.
*/
@Overridepublic String getStorageUuid() {
return volume.getStorageID();
} | 3.26 |
hadoop_ReplicaInfo_getFileIoProvider_rdh | /**
* Get the {@link FileIoProvider} for disk IO operations.
*/
public FileIoProvider getFileIoProvider() {
// In tests and when invoked via FsDatasetUtil#computeChecksum, the
// target volume for this replica may be unknown and hence null.
// Use the DEFAULT_FILE_IO_PROVIDER with no-op hooks.
return volume != null ? volume.getFileIoProvider() : DEFAULT_FILE_IO_PROVIDER;
} | 3.26 |
hadoop_ReplicaInfo_getVolume_rdh | /**
*
* @return the volume where this replica is located on disk
*/
public FsVolumeSpi getVolume() {
return volume;
} | 3.26 |
hadoop_ReplicaInfo_setVolume_rdh | /**
* Set the volume where this replica is located on disk.
*/
void setVolume(FsVolumeSpi vol) {
this.volume = vol;
} | 3.26 |
hadoop_ReplicaInfo_getOriginalBytesReserved_rdh | /**
* Number of bytes originally reserved for this replica. The actual
* reservation is adjusted as data is written to disk.
*
* @return the number of bytes originally reserved for this replica.
*/
public long getOriginalBytesReserved() {
return 0;
} | 3.26 |
hadoop_ArrayFile_next_rdh | /**
* Read and return the next value in the file.
*
* @param value
* value.
* @throws IOException
* raised on errors performing I/O.
* @return Writable.
*/
public synchronized Writable next(Writable value) throws IOException {
return next(f0, value) ? value : null;
} | 3.26 |
hadoop_ArrayFile_seek_rdh | /**
* Positions the reader before its <code>n</code>th value.
*
* @param n
* n key.
* @throws IOException
* raised on errors performing I/O.
*/
public synchronized void seek(long n) throws IOException {
f0.set(n);
seek(f0);
} | 3.26 |
hadoop_ArrayFile_append_rdh | /**
* Append a value to the file.
*
* @param value
* value.
* @throws IOException
* raised on errors performing I/O.
*/
public synchronized void append(Writable value) throws IOException {
super.append(count, value);
// add to map
count.set(count.get() + 1);// increment count
} | 3.26 |
hadoop_ArrayFile_key_rdh | /**
* Returns the key associated with the most recent call to {@link #seek(long)}, {@link #next(Writable)}, or {@link #get(long,Writable)}.
*
* @return key key.
* @throws IOException
* raised on errors performing I/O.
*/
public synchronized long key() throws IOException {
return f0.get();
} | 3.26 |
hadoop_ArrayFile_get_rdh | /**
* Return the <code>n</code>th value in the file.
*
* @param n
* n key.
* @param value
* value.
* @throws IOException
* raised on errors performing I/O.
* @return writable.
*/
public synchronized Writable get(long n, Writable value) throws IOException {
f0.set(n);
return get(f0, value);
} | 3.26 |
hadoop_WordStandardDeviation_reduce_rdh | /**
* Sums all the individual values within the iterator and writes them to the
* same key.
*
* @param key
* This will be one of 2 constants: LENGTH_STR, COUNT_STR, or
* SQUARE_STR.
* @param values
* This will be an iterator of all the values associated with that
* key.
*/
public void reduce(Text key, Iterable<LongWritable> values, Context context) throws IOException, InterruptedException {
int sum = 0;
for (LongWritable value : values)
{
sum += value.get();
}
val.set(sum);
context.write(key, val);
} | 3.26 |
hadoop_WordStandardDeviation_map_rdh | /**
* Emits 3 key-value pairs for counting the word, its length, and the
* squares of its length. Outputs are (Text, LongWritable).
*
* @param value
* This will be a line of text coming in from our input file.
*/
public void map(Object key,
Text value, Context context)
throws IOException, InterruptedException {
StringTokenizer itr = new StringTokenizer(value.toString());
while (itr.hasMoreTokens()) {
String string = itr.nextToken();
this.wordLen.set(string.length());
// the square of an integer is an integer...
this.wordLenSq.set(((long) (Math.pow(string.length(), 2.0))));
context.write(LENGTH, this.wordLen);
context.write(SQUARE, this.wordLenSq);
context.write(COUNT, ONE);
}
} | 3.26 |
hadoop_WordStandardDeviation_readAndCalcStdDev_rdh | /**
* Reads the output file and parses the summation of lengths, the word count,
* and the lengths squared, to perform a quick calculation of the standard
* deviation.
*
* @param path
* The path to find the output file in. Set in main to the output
* directory.
* @throws IOException
* If it cannot access the output directory, we throw an exception.
*/
private double
readAndCalcStdDev(Path path, Configuration conf) throws IOException {
FileSystem fs =
FileSystem.get(conf);
Path file = new Path(path, "part-r-00000");
if (!fs.exists(file))throw new IOException("Output not found!");
double v6 = 0;BufferedReader br = null;
try {
br = new BufferedReader(new InputStreamReader(fs.open(file), StandardCharsets.UTF_8));
long count = 0;long length = 0;
long square = 0;
String line;
while ((line = br.readLine()) != null) {
StringTokenizer st = new StringTokenizer(line);
// grab type
String type = st.nextToken();
// differentiate
if (type.equals(COUNT.toString())) {
String
countLit = st.nextToken();
count = Long.parseLong(countLit);
} else if (type.equals(LENGTH.toString())) {
String lengthLit =
st.nextToken();
length = Long.parseLong(lengthLit);
} else if (type.equals(SQUARE.toString())) {
String squareLit = st.nextToken();
square = Long.parseLong(squareLit);
}
}
// average = total sum / number of elements;
double v17 = ((double) (length)) / ((double) (count));
// standard deviation = sqrt((sum(lengths ^ 2)/count) - (mean ^ 2))
v17 = Math.pow(v17, 2.0);
double term = ((double) (square)) / ((double) (count));
v6 = Math.sqrt(term - v17);
System.out.println("The standard deviation is: " + v6);
} finally {
if (br != null) {
br.close();
}
}
return v6;
} | 3.26 |
hadoop_DirectBufferPool_returnBuffer_rdh | /**
* Return a buffer into the pool. After being returned,
* the buffer may be recycled, so the user must not
* continue to use it in any way.
*
* @param buf
* the buffer to return
*/
public void returnBuffer(ByteBuffer buf) {
buf.clear();// reset mark, limit, etc
int size = buf.capacity();
Queue<WeakReference<ByteBuffer>> list = buffersBySize.get(size);
if (list == null) {
list = new ConcurrentLinkedQueue<WeakReference<ByteBuffer>>();
Queue<WeakReference<ByteBuffer>> prev = buffersBySize.putIfAbsent(size, list);
// someone else put a queue in the map before we did
if (prev != null) {
list = prev;
}
}
list.add(new WeakReference<ByteBuffer>(buf));
} | 3.26 |
hadoop_DirectBufferPool_getBuffer_rdh | /**
* Allocate a direct buffer of the specified size, in bytes.
* If a pooled buffer is available, returns that. Otherwise
* allocates a new one.
*
* @param size
* size.
* @return ByteBuffer.
*/
public ByteBuffer getBuffer(int size) {
Queue<WeakReference<ByteBuffer>> list = buffersBySize.get(size);
if (list
== null) {
// no available buffers for this size
return ByteBuffer.allocateDirect(size);
}
WeakReference<ByteBuffer> ref;
while ((ref = list.poll()) != null) {
ByteBuffer b = ref.get();
if (b != null) {
return b;
}
}
return ByteBuffer.allocateDirect(size);
} | 3.26 |
hadoop_DirectBufferPool_countBuffersOfSize_rdh | /**
* Return the number of available buffers of a given size.
* This is used only for tests.
*/
@VisibleForTesting
int countBuffersOfSize(int size) {
Queue<WeakReference<ByteBuffer>> list = buffersBySize.get(size);
if (list == null) {
return 0;
}
return list.size();
} | 3.26 |
hadoop_HsCountersPage_content_rdh | /**
* The content of this page is the CountersBlock now.
*
* @return CountersBlock.class
*/
@Override
protected Class<? extends SubView> content() {
return CountersBlock.class;
} | 3.26 |
hadoop_JobMetaData_getRecurrenceId_rdh | /**
* Get {@link RecurrenceId}.
*
* @return {@link RecurrenceId}.
*/
public final RecurrenceId getRecurrenceId() {
return recurrenceId;
} | 3.26 |
hadoop_JobMetaData_setContainerEnd_rdh | /**
* Add container release time.
*
* @param containerId
* id of the container.
* @param time
* container release time.
* @return the reference to current {@link JobMetaData}.
*/
public final JobMetaData setContainerEnd(final String containerId, final long time) {
if (rawEnd.put(containerId, time) != null) {LOGGER.warn("find duplicate container release time for {}, so we replace" + " it with {}.", containerId, time);
}
return this;} | 3.26 |
hadoop_JobMetaData_getResourceSkyline_rdh | /**
* Get {@link ResourceSkyline}.
*
* @return {@link ResourceSkyline}.
*/
public final ResourceSkyline getResourceSkyline() {
return f0;
} | 3.26 |
hadoop_JobMetaData_setRecurrenceId_rdh | /**
* Set {@link RecurrenceId}.
*
* @param recurrenceIdConfig
* the {@link RecurrenceId}.
* @return the reference to current {@link JobMetaData}.
*/
public final JobMetaData setRecurrenceId(final RecurrenceId recurrenceIdConfig) {
this.recurrenceId = recurrenceIdConfig;
return this;
} | 3.26 |
hadoop_JobMetaData_setContainerStart_rdh | /**
* Add container launch time.
*
* @param containerId
* id of the container.
* @param time
* container launch time.
* @return the reference to current {@link JobMetaData}.
*/
public final JobMetaData setContainerStart(final String containerId, final long time) {if (rawStart.put(containerId, time) != null) {
LOGGER.warn("find duplicate container launch time for {}, so we replace" + " it with {}.", containerId, time);
}
return this;
} | 3.26 |
hadoop_JobMetaData_setJobFinishTime_rdh | /**
* Set job finish time.
*
* @param jobFinishTimeConfig
* job finish time.
* @return the reference to current {@link JobMetaData}.
*/
public final JobMetaData setJobFinishTime(final long jobFinishTimeConfig) {
f0.setJobFinishTime(jobFinishTimeConfig);return this;
} | 3.26 |
hadoop_JobMetaData_createSkyline_rdh | /**
* Normalized container launch/release time, and generate the
* {@link ResourceSkyline}.
*/
public final void createSkyline() {
final long jobSubmissionTime = f0.getJobSubmissionTime();
Resource containerSpec = f0.getContainerSpec();
final TreeMap<Long, Resource> resourceOverTime = new TreeMap<>();
final RLESparseResourceAllocation skylineList = new RLESparseResourceAllocation(resourceOverTime, new DefaultResourceCalculator());
f0.setSkylineList(skylineList);
if (containerSpec == null) {
// if RmParser fails to extract container resource spec from logs, we will
// statically set
// it to be <1core, 1GB>
containerSpec = Resource.newInstance(1024,
1);
}
f0.setContainerSpec(containerSpec);
for (final Map.Entry<String, Long> entry : rawStart.entrySet()) {
final long timeStart = entry.getValue();
final Long timeEnd = rawEnd.get(entry.getKey());
if (timeEnd == null) {
LOGGER.warn("container release time not found for {}.", entry.getKey());
} else {
final ReservationInterval riAdd = new ReservationInterval((timeStart - jobSubmissionTime) / 1000, (timeEnd - jobSubmissionTime) / 1000);
f0.getSkylineList().addInterval(riAdd, containerSpec);
}
}
} | 3.26 |
hadoop_S3AReadOpContext_withInputPolicy_rdh | /**
* Set builder value.
*
* @param value
* new value
* @return the builder
*/
public S3AReadOpContext withInputPolicy(final S3AInputPolicy value) {
inputPolicy = value;
return this;
} | 3.26 |
hadoop_S3AReadOpContext_getReadahead_rdh | /**
* Get the readahead for this operation.
*
* @return a value {@literal >=} 0
*/
public long getReadahead() {
return readahead;
} | 3.26 |
hadoop_S3AReadOpContext_getPath_rdh | /**
* Get the path of this read.
*
* @return path.
*/
public Path getPath() {
return path;
} | 3.26 |
hadoop_S3AReadOpContext_getFuturePool_rdh | /**
* Gets the {@code ExecutorServiceFuturePool} used for asynchronous prefetches.
*
* @return the {@code ExecutorServiceFuturePool} used for asynchronous prefetches.
*/
public ExecutorServiceFuturePool getFuturePool() {
return this.futurePool;
} | 3.26 |
hadoop_S3AReadOpContext_getVectoredIOContext_rdh | /**
* Get Vectored IO context for this this read op.
*
* @return vectored IO context.
*/
public VectoredIOContext getVectoredIOContext() {
return vectoredIOContext;
} | 3.26 |
hadoop_S3AReadOpContext_withReadahead_rdh | /**
* Set builder value.
*
* @param value
* new value
* @return the builder
*/
public S3AReadOpContext withReadahead(final long value) {
readahead = value;
return this;
} | 3.26 |
hadoop_S3AReadOpContext_getPrefetchBlockCount_rdh | /**
* Gets the size of prefetch queue (in number of blocks).
*
* @return the size of prefetch queue (in number of blocks).
*/
public int getPrefetchBlockCount() {
return this.prefetchBlockCount;
} | 3.26 |
hadoop_S3AReadOpContext_withChangeDetectionPolicy_rdh | /**
* Set builder value.
*
* @param value
* new value
* @return the builder
*/
public S3AReadOpContext withChangeDetectionPolicy(final ChangeDetectionPolicy value) {
changeDetectionPolicy
= value;
return this;
} | 3.26 |
hadoop_S3AReadOpContext_getInputPolicy_rdh | /**
* Get the IO policy.
*
* @return the initial input policy.
*/
public S3AInputPolicy getInputPolicy() {
return inputPolicy;
} | 3.26 |
hadoop_S3AReadOpContext_getAuditSpan_rdh | /**
* Get the audit which was active when the file was opened.
*
* @return active span
*/
public AuditSpan getAuditSpan() {
return auditSpan;
} | 3.26 |
hadoop_S3AReadOpContext_getPrefetchBlockSize_rdh | /**
* Gets the size in bytes of a single prefetch block.
*
* @return the size in bytes of a single prefetch block.
*/
public int getPrefetchBlockSize() {
return this.prefetchBlockSize;
} | 3.26 |
hadoop_S3AReadOpContext_withAsyncDrainThreshold_rdh | /**
* Set builder value.
*
* @param value
* new value
* @return the builder
*/
public S3AReadOpContext withAsyncDrainThreshold(final long value) {
asyncDrainThreshold = value;
return this;
} | 3.26 |
hadoop_S3AReadOpContext_getReadInvoker_rdh | /**
* Get invoker to use for read operations.
*
* @return invoker to use for read codepaths
*/
public Invoker getReadInvoker() {
return invoker;
} | 3.26 |
hadoop_S3AReadOpContext_getIOStatisticsAggregator_rdh | /**
* Return the IOStatistics aggregator.
*
* @return instance of IOStatisticsAggregator.
*/
public IOStatisticsAggregator getIOStatisticsAggregator() {
return ioStatisticsAggregator;
} | 3.26 |
hadoop_S3AReadOpContext_withAuditSpan_rdh | /**
* Set builder value.
*
* @param value
* new value
* @return the builder
*/
public S3AReadOpContext withAuditSpan(final AuditSpan value) {
auditSpan = value;
return this;
} | 3.26 |
hadoop_NodeHealthStatus_newInstance_rdh | /**
* {@code NodeHealthStatus} is a summary of the health status of the node.
* <p>
* It includes information such as:
* <ul>
* <li>
* An indicator of whether the node is healthy, as determined by the
* health-check script.
* </li>
* <li>The previous time at which the health status was reported.</li>
* <li>A diagnostic report on the health status.</li>
* </ul>
*
* @see NodeReport
* @see ApplicationClientProtocol#getClusterNodes(org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesRequest)
*/
@Public
@Stablepublic abstract class NodeHealthStatus {
@Private
public static NodeHealthStatus newInstance(boolean isNodeHealthy, String healthReport, long lastHealthReport) {
NodeHealthStatus status = Records.newRecord(NodeHealthStatus.class);
status.setIsNodeHealthy(isNodeHealthy);
status.setHealthReport(healthReport);
status.setLastHealthReportTime(lastHealthReport);
return status;
} | 3.26 |
hadoop_ReadBufferManager_testMimicFullUseAndAddFailedBuffer_rdh | /**
* Test method that can mimic no free buffers scenario and also add a ReadBuffer
* into completedReadList. This readBuffer will get picked up by TryEvict()
* next time a new queue request comes in.
*
* @param buf
* that needs to be added to completedReadlist
*/
@VisibleForTesting
void testMimicFullUseAndAddFailedBuffer(ReadBuffer buf) {
freeList.clear();
completedReadList.add(buf);
} | 3.26 |
hadoop_ReadBufferManager_queueReadAhead_rdh | /* AbfsInputStream-facing methods */
/**
* {@link AbfsInputStream} calls this method to queue read-aheads.
*
* @param stream
* The {@link AbfsInputStream} for which to do the read-ahead
* @param requestedOffset
* The offset in the file which shoukd be read
* @param requestedLength
* The length to read
*/
void queueReadAhead(final AbfsInputStream stream, final long requestedOffset, final int requestedLength, TracingContext tracingContext) {
if (LOGGER.isTraceEnabled()) {
LOGGER.trace("Start Queueing readAhead for {} offset {} length {}", stream.getPath(), requestedOffset, requestedLength);
}
ReadBuffer buffer;
synchronized(this) {
if (m0(stream, requestedOffset)) {
return;// already queued, do not queue again
}
if (freeList.isEmpty() && (!tryEvict()))
{return;// no buffers available, cannot queue anything
}
buffer = new ReadBuffer();
buffer.setStream(stream);
buffer.setOffset(requestedOffset);
buffer.setLength(0);
buffer.setRequestedLength(requestedLength);
buffer.setStatus(ReadBufferStatus.NOT_AVAILABLE);
buffer.setLatch(new CountDownLatch(1));
buffer.setTracingContext(tracingContext);
Integer bufferIndex = freeList.pop();// will return a value, since we have checked size > 0 already
buffer.setBuffer(buffers[bufferIndex]);
buffer.setBufferindex(bufferIndex);
readAheadQueue.add(buffer);
notifyAll();
if (LOGGER.isTraceEnabled()) {
LOGGER.trace("Done q-ing readAhead for file {} offset {} buffer idx {}", stream.getPath(),
requestedOffset, buffer.getBufferindex());
}
}
} | 3.26 |
hadoop_ReadBufferManager_tryEvict_rdh | /**
* If any buffer in the completedlist can be reclaimed then reclaim it and return the buffer to free list.
* The objective is to find just one buffer - there is no advantage to evicting more than one.
*
* @return whether the eviction succeeeded - i.e., were we able to free up one buffer
*/
private synchronized boolean tryEvict() {
ReadBuffer nodeToEvict
= null;
if (completedReadList.size() <= 0) {
return false;// there are no evict-able buffers
}
long currentTimeInMs = currentTimeMillis();
// first, try buffers where all bytes have been consumed (approximated as first and last bytes consumed)
for (ReadBuffer buf : completedReadList) {
if (buf.isFirstByteConsumed() && buf.isLastByteConsumed()) {
nodeToEvict = buf;
break;
}
}
if (nodeToEvict != null) {
return evict(nodeToEvict);
}
// next, try buffers where any bytes have been consumed (may be a bad idea? have to experiment and see)
for (ReadBuffer buf : completedReadList) {
if (buf.isAnyByteConsumed()) {
nodeToEvict = buf;
break;
}
}
if (nodeToEvict != null) {
return evict(nodeToEvict);
}
// next, try any old nodes that have not been consumed
// Failed read buffers (with buffer index=-1) that are older than
// thresholdAge should be cleaned up, but at the same time should not
// report successful eviction.
// Queue logic expects that a buffer is freed up for read ahead when
// eviction is successful, whereas a failed ReadBuffer would have released
// its buffer when its status was set to READ_FAILED.
long earliestBirthday = Long.MAX_VALUE;
ArrayList<ReadBuffer> oldFailedBuffers = new ArrayList<>();
for (ReadBuffer buf : completedReadList) {
if ((buf.getBufferindex() != (-1)) && (buf.getTimeStamp() < earliestBirthday)) {
nodeToEvict = buf;
earliestBirthday = buf.getTimeStamp();
} else if ((buf.getBufferindex() == (-1)) && ((currentTimeInMs - buf.getTimeStamp()) > thresholdAgeMilliseconds)) {
oldFailedBuffers.add(buf);}
}
for (ReadBuffer buf : oldFailedBuffers) {
evict(buf);
}
if (((currentTimeInMs - earliestBirthday) > thresholdAgeMilliseconds) && (nodeToEvict != null)) {
return evict(nodeToEvict);
}
LOGGER.trace("No buffer eligible for eviction");
// nothing can be evicted
return false;
} | 3.26 |
hadoop_ReadBufferManager_getBufferFromCompletedQueue_rdh | /**
* Returns buffers that failed or passed from completed queue.
*
* @param stream
* @param requestedOffset
* @return */
private ReadBuffer getBufferFromCompletedQueue(final AbfsInputStream stream, final long requestedOffset) {
for (ReadBuffer buffer : completedReadList) {// Buffer is returned if the requestedOffset is at or above buffer's
// offset but less than buffer's length or the actual requestedLength
if (((buffer.getStream() == stream) && (requestedOffset >= buffer.getOffset())) && ((requestedOffset < (buffer.getOffset() + buffer.getLength())) || (requestedOffset < (buffer.getOffset() + buffer.getRequestedLength())))) {
return buffer;
}
}
return null;} | 3.26 |
hadoop_ReadBufferManager_waitForProcess_rdh | /* Internal methods */
private void waitForProcess(final AbfsInputStream stream, final long position) {
ReadBuffer readBuf;
synchronized(this) {
clearFromReadAheadQueue(stream, position);
readBuf = m1(inProgressList, stream, position);
}
if (readBuf != null) {
// if in in-progress queue, then block for it
try {
if (LOGGER.isTraceEnabled()) {
LOGGER.trace("got a relevant read buffer for file {} offset {} buffer idx {}", stream.getPath(), readBuf.getOffset(), readBuf.getBufferindex());
}
readBuf.getLatch().await();// blocking wait on the caller stream's thread
// Note on correctness: readBuf gets out of inProgressList only in 1 place: after worker thread
// is done processing it (in doneReading). There, the latch is set after removing the buffer from
// inProgressList. So this latch is safe to be outside the synchronized block.
// Putting it in synchronized would result in a deadlock, since this thread would be holding the lock
// while waiting, so no one will be able to change any state. If this becomes more complex in the future,
// then the latch cane be removed and replaced with wait/notify whenever inProgressList is touched.
}
catch (InterruptedException ex) {
Thread.currentThread().interrupt();
}
if (LOGGER.isTraceEnabled()) {
LOGGER.trace("latch done for file {} buffer idx {} length {}", stream.getPath(), readBuf.getBufferindex(),
readBuf.getLength()); }
}
} | 3.26 |
hadoop_ReadBufferManager_getNextBlockToRead_rdh | /* ReadBufferWorker-thread-facing methods */
/**
* ReadBufferWorker thread calls this to get the next buffer that it should work on.
*
* @return {@link ReadBuffer}
* @throws InterruptedException
* if thread is interrupted
*/
ReadBuffer getNextBlockToRead() throws InterruptedException {
ReadBuffer buffer = null;
synchronized(this) {// buffer = readAheadQueue.take(); // blocking method
while (readAheadQueue.size() == 0) {
wait();
} buffer = readAheadQueue.remove();notifyAll();
if (buffer == null) {
return null;// should never happen
}
buffer.setStatus(ReadBufferStatus.READING_IN_PROGRESS);
inProgressList.add(buffer);
}
if (LOGGER.isTraceEnabled()) {
LOGGER.trace("ReadBufferWorker picked file {} for offset {}", buffer.getStream().getPath(), buffer.getOffset()); }
return buffer;
} | 3.26 |
hadoop_ReadBufferManager_testResetReadBufferManager_rdh | /**
* Test method that can clean up the current state of readAhead buffers and
* the lists. Will also trigger a fresh init.
*/
@VisibleForTesting
void testResetReadBufferManager() {
synchronized(this) {
ArrayList<ReadBuffer> completedBuffers = new ArrayList<>();
for (ReadBuffer buf : completedReadList) {
if (buf != null) {
completedBuffers.add(buf);
}
}
for (ReadBuffer buf : completedBuffers) {
evict(buf);
}
readAheadQueue.clear();
inProgressList.clear();
completedReadList.clear();
freeList.clear();
for (int i = 0; i < f0; i++) {
buffers[i] = null;
}
buffers = null;
resetBufferManager();
}
} | 3.26 |
hadoop_ReadBufferManager_resetBufferManager_rdh | /**
* Reset buffer manager to null.
*/
@VisibleForTesting
static void resetBufferManager() {
bufferManager = null;
} | 3.26 |
hadoop_ReadBufferManager_doneReading_rdh | /**
* ReadBufferWorker thread calls this method to post completion.
*
* @param buffer
* the buffer whose read was completed
* @param result
* the {@link ReadBufferStatus} after the read operation in the worker thread
* @param bytesActuallyRead
* the number of bytes that the worker thread was actually able to read
*/
void doneReading(final ReadBuffer buffer, final ReadBufferStatus result, final int bytesActuallyRead) {
if (LOGGER.isTraceEnabled()) {
LOGGER.trace("ReadBufferWorker completed read file {} for offset {} outcome {} bytes {}", buffer.getStream().getPath(), buffer.getOffset(), result, bytesActuallyRead);
}
synchronized(this) {
// If this buffer has already been purged during
// close of InputStream then we don't update the lists.
if (inProgressList.contains(buffer)) {
inProgressList.remove(buffer);
if ((result == ReadBufferStatus.AVAILABLE) && (bytesActuallyRead > 0)) {
buffer.setStatus(ReadBufferStatus.AVAILABLE);buffer.setLength(bytesActuallyRead);
} else {
freeList.push(buffer.getBufferindex()); // buffer will be deleted as per the eviction policy.
}
// completed list also contains FAILED read buffers
// for sending exception message to clients.
buffer.setStatus(result);
buffer.setTimeStamp(currentTimeMillis());
completedReadList.add(buffer);
}
}
// outside the synchronized, since anyone receiving a wake-up from the latch must see safe-published results
buffer.getLatch().countDown();// wake up waiting threads (if any)
} | 3.26 |
hadoop_ReadBufferManager_purgeBuffersForStream_rdh | /**
* Purging the buffers associated with an {@link AbfsInputStream}
* from {@link ReadBufferManager} when stream is closed.
*
* @param stream
* input stream.
*/
public synchronized void purgeBuffersForStream(AbfsInputStream stream) {
LOGGER.debug("Purging stale buffers for AbfsInputStream {} ", stream);
readAheadQueue.removeIf(readBuffer -> readBuffer.getStream() == stream);
purgeList(stream, completedReadList);
} | 3.26 |
hadoop_ReadBufferManager_purgeList_rdh | /**
* Method to remove buffers associated with a {@link AbfsInputStream}
* when its close method is called.
* NOTE: This method is not threadsafe and must be called inside a
* synchronised block. See caller.
*
* @param stream
* associated input stream.
* @param list
* list of buffers like {@link this#completedReadList}
* or {@link this#inProgressList}.
*/
private void purgeList(AbfsInputStream stream, LinkedList<ReadBuffer> list)
{
for (Iterator<ReadBuffer> it
= list.iterator(); it.hasNext();) {
ReadBuffer readBuffer = it.next();
if (readBuffer.getStream() == stream) {
it.remove();
// As failed ReadBuffers (bufferIndex = -1) are already pushed to free
// list in doneReading method, we will skip adding those here again.
if (readBuffer.getBufferindex() != (-1)) {
freeList.push(readBuffer.getBufferindex());
}
}
}
} | 3.26 |
hadoop_ReadBufferManager_currentTimeMillis_rdh | /**
* Similar to System.currentTimeMillis, except implemented with System.nanoTime().
* System.currentTimeMillis can go backwards when system clock is changed (e.g., with NTP time synchronization),
* making it unsuitable for measuring time intervals. nanotime is strictly monotonically increasing per CPU core.
* Note: it is not monotonic across Sockets, and even within a CPU, its only the
* more recent parts which share a clock across all cores.
*
* @return current time in milliseconds
*/
private long currentTimeMillis() {
return (System.nanoTime() / 1000) / 1000;
} | 3.26 |
hadoop_ReadBufferManager_getBlock_rdh | /**
* {@link AbfsInputStream} calls this method read any bytes already available in a buffer (thereby saving a
* remote read). This returns the bytes if the data already exists in buffer. If there is a buffer that is reading
* the requested offset, then this method blocks until that read completes. If the data is queued in a read-ahead
* but not picked up by a worker thread yet, then it cancels that read-ahead and reports cache miss. This is because
* depending on worker thread availability, the read-ahead may take a while - the calling thread can do it's own
* read to get the data faster (copmared to the read waiting in queue for an indeterminate amount of time).
*
* @param stream
* the file to read bytes for
* @param position
* the offset in the file to do a read for
* @param length
* the length to read
* @param buffer
* the buffer to read data into. Note that the buffer will be written into from offset 0.
* @return the number of bytes read
*/
int getBlock(final AbfsInputStream stream, final long position, final int length, final byte[] buffer) throws IOException {
// not synchronized, so have to be careful with locking
if (LOGGER.isTraceEnabled()) {
LOGGER.trace("getBlock for file {} position {} thread {}", stream.getPath(), position, Thread.currentThread().getName());
}
waitForProcess(stream, position);
int v5 = 0;
synchronized(this) {
v5 = getBlockFromCompletedQueue(stream, position, length, buffer);
}
if (v5
> 0) {
if (LOGGER.isTraceEnabled()) {
LOGGER.trace("Done read from Cache for {} position {} length {}", stream.getPath(), position, v5);
}
return v5;
}
// otherwise, just say we got nothing - calling thread can do its own read
return 0;
} | 3.26 |
hadoop_ProtobufHelper_getFixedByteString_rdh | /**
* Get the ByteString for frequently used fixed and small set strings.
*
* @param key
* string
* @return ByteString for frequently used fixed and small set strings.
*/
public static ByteString getFixedByteString(String
key) {
return ShadedProtobufHelper.getFixedByteString(key);
} | 3.26 |
hadoop_ProtobufHelper_getByteString_rdh | /**
* Get the byte string of a non-null byte array.
* If the array is 0 bytes long, return a singleton to reduce object allocation.
*
* @param bytes
* bytes to convert.
* @return a value
*/
public static ByteString getByteString(byte[] bytes) {
// return singleton to reduce object allocation
return ShadedProtobufHelper.getByteString(bytes);
} | 3.26 |
hadoop_ProtobufHelper_tokenFromProto_rdh | /**
* Get a token from a TokenProto payload.
*
* @param tokenProto
* marshalled token
* @return the token.
*/
public static Token<? extends TokenIdentifier> tokenFromProto(TokenProto tokenProto) {return ShadedProtobufHelper.tokenFromProto(tokenProto);
} | 3.26 |
hadoop_ProtobufHelper_protoFromToken_rdh | /**
* Create a {@code TokenProto} instance
* from a hadoop token.
* This builds and caches the fields
* (identifier, password, kind, service) but not
* renewer or any payload.
*
* @param tok
* token
* @return a marshallable protobuf class.
*/
public static TokenProto protoFromToken(Token<?> tok) {
return ShadedProtobufHelper.protoFromToken(tok);
} | 3.26 |
hadoop_ManifestCommitterSupport_createJobSummaryFilename_rdh | /**
* Create the filename for a report from the jobID.
*
* @param jobId
* jobId
* @return filename for a report.
*/
public static String createJobSummaryFilename(String jobId) {
return String.format(SUMMARY_FILENAME_FORMAT, jobId);
} | 3.26 |
hadoop_ManifestCommitterSupport_addHeapInformation_rdh | /**
* Add heap information to IOStatisticSetters gauges, with a stage in front of every key.
*
* @param ioStatisticsSetters
* map to update
* @param stage
* stage
*/
public static void addHeapInformation(IOStatisticsSetters ioStatisticsSetters, String stage) {
final long totalMemory = Runtime.getRuntime().totalMemory();
final long v6 = Runtime.getRuntime().freeMemory();
final
String prefix = "stage.";
ioStatisticsSetters.setGauge(((prefix + stage) + ".") +
TOTAL_MEMORY, totalMemory);
ioStatisticsSetters.setGauge(((prefix + stage) + ".") + FREE_MEMORY, v6);
ioStatisticsSetters.setGauge(((prefix + stage) + ".") + HEAP_MEMORY, totalMemory - v6);} | 3.26 |
hadoop_ManifestCommitterSupport_createIOStatisticsStore_rdh | /**
* Create an IOStatistics Store with the standard statistics
* set up.
*
* @return a store builder preconfigured with the standard stats.
*/
public static IOStatisticsStoreBuilder createIOStatisticsStore() {
final IOStatisticsStoreBuilder store = iostatisticsStore();
store.withSampleTracking(COUNTER_STATISTICS);
store.withDurationTracking(DURATION_STATISTICS);
return store;} | 3.26 |
hadoop_ManifestCommitterSupport_createManifestStoreOperations_rdh | /**
* Create the manifest store operations for the given FS.
* This supports binding to custom filesystem handlers.
*
* @param conf
* configuration.
* @param filesystem
* fs.
* @param path
* path under FS.
* @return a bonded store operations.
* @throws IOException
* on binding/init problems.
*/
public static ManifestStoreOperations createManifestStoreOperations(final Configuration conf, final FileSystem filesystem, final Path path) throws IOException {
try {
final
Class<? extends ManifestStoreOperations> storeClass = conf.getClass(OPT_STORE_OPERATIONS_CLASS, ManifestStoreOperationsThroughFileSystem.class, ManifestStoreOperations.class);
final ManifestStoreOperations operations = storeClass.getDeclaredConstructor().newInstance();
operations.bindToFileSystem(filesystem, path);
return operations;} catch (Exception e) {
throw new PathIOException(path.toString(), (("Failed to create Store Operations from configuration option " + OPT_STORE_OPERATIONS_CLASS) + ":") + e, e);
}
} | 3.26 |
hadoop_ManifestCommitterSupport_manifestTempPathForTaskAttempt_rdh | /**
* Get the path in the manifest subdir for the temp path to save a
* task attempt's manifest before renaming it to the
* path defined by {@link #manifestPathForTask(Path, String)}.
*
* @param manifestDir
* manifest directory
* @param taskAttemptId
* task attempt ID.
* @return the path to save/load the manifest.
*/
public static Path manifestTempPathForTaskAttempt(Path manifestDir, String taskAttemptId) {
return new Path(manifestDir, (taskAttemptId + MANIFEST_SUFFIX) + TMP_SUFFIX);
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.