name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hadoop_DiskBalancerWorkItem_setTolerancePercent_rdh | /**
* Sets the tolerance percentage.
*
* @param tolerancePercent
* - tolerance.
*/
public void setTolerancePercent(long tolerancePercent) {
this.tolerancePercent = tolerancePercent;
} | 3.26 |
hadoop_DiskBalancerWorkItem_getTolerancePercent_rdh | /**
* Allowed deviation from ideal storage in percentage.
*
* @return long
*/
public long getTolerancePercent() {
return tolerancePercent;
} | 3.26 |
hadoop_DiskBalancerWorkItem_setBandwidth_rdh | /**
* Sets max disk bandwidth to use, in MBs per second.
*
* @param bandwidth
* - long.
*/
public void setBandwidth(long bandwidth) {
this.bandwidth = bandwidth;
} | 3.26 |
hadoop_DiskBalancerWorkItem_incBlocksCopied_rdh | /**
* increments the number of blocks copied.
*/public void incBlocksCopied() {
blocksCopied++;
} | 3.26 |
hadoop_DiskBalancerWorkItem_setErrMsg_rdh | /**
* Sets the error message.
*
* @param errMsg
* - Msg.
*/
public void setErrMsg(String errMsg) { this.errMsg = errMsg;
} | 3.26 |
hadoop_DiskBalancerWorkItem_setBytesCopied_rdh | /**
* Sets bytes copied so far.
*
* @param bytesCopied
* - long
*/
public void setBytesCopied(long bytesCopied) {
this.bytesCopied = bytesCopied;
} | 3.26 |
hadoop_DiskBalancerWorkItem_setErrorCount_rdh | /**
* Sets the Error counts for this step.
*
* @param errorCount
* long.
*/
public void setErrorCount(long errorCount) {
this.errorCount = errorCount;
} | 3.26 |
hadoop_DiskBalancerWorkItem_incErrorCount_rdh | /**
* Incs Error Count.
*/
public void incErrorCount() {
this.errorCount++;
} | 3.26 |
hadoop_DiskBalancerWorkItem_getMaxDiskErrors_rdh | /**
* Gets maximum disk errors to tolerate before we fail this copy step.
*
* @return long.
*/
public long getMaxDiskErrors() {
return maxDiskErrors;
} | 3.26 |
hadoop_DiskBalancerWorkItem_getErrMsg_rdh | /**
* Gets the error message.
*/public String getErrMsg() {
return errMsg;
} | 3.26 |
hadoop_DiskBalancerWorkItem_setBlocksCopied_rdh | /**
* Number of blocks copied so far.
*
* @param blocksCopied
* Blocks copied.
*/
public void setBlocksCopied(long blocksCopied) {
this.blocksCopied = blocksCopied;
} | 3.26 |
hadoop_DiskBalancerWorkItem_getBandwidth_rdh | /**
* Max disk bandwidth to use. MB per second.
*
* @return - long.
*/
public long getBandwidth() {
return bandwidth;
} | 3.26 |
hadoop_DiskBalancerWorkItem_getSecondsElapsed_rdh | /**
* Gets the number of seconds elapsed from the start time.
*
* The reason why we have this is of time skews. The client's current time
* may not match with the server time stamp, hence the elapsed second
* cannot be computed from only startTime.
*
* @return seconds elapsed from start time.
*/
public long getSecondsElapsed() {
return secondsElapsed;
} | 3.26 |
hadoop_DiskBalancerWorkItem_getBlocksCopied_rdh | /**
* Returns number of blocks copied for this DiskBalancerWorkItem.
*
* @return long count of blocks.
*/
public long getBlocksCopied() { return blocksCopied;
} | 3.26 |
hadoop_DiskBalancerWorkItem_getBytesToCopy_rdh | /**
* Returns bytes to copy.
*
* @return - long
*/
public long getBytesToCopy()
{
return bytesToCopy;
} | 3.26 |
hadoop_DiskBalancerWorkItem_getStartTime_rdh | /**
* Records the Start time of execution.
*
* @return startTime
*/
public long getStartTime() {
return startTime;} | 3.26 |
hadoop_DiskBalancerWorkItem_incCopiedSoFar_rdh | /**
* Increments bytesCopied by delta.
*
* @param delta
* - long
*/
public void incCopiedSoFar(long delta) {
this.bytesCopied +=
delta;
} | 3.26 |
hadoop_DiskBalancerWorkItem_parseJson_rdh | /**
* Reads a DiskBalancerWorkItem Object from a Json String.
*
* @param json
* - Json String.
* @return DiskBalancerWorkItem Object
* @throws IOException
*/
public static DiskBalancerWorkItem parseJson(String json) throws IOException {
Preconditions.checkNotNull(json);
return READER.readValue(json);
} | 3.26 |
hadoop_DiskBalancerWorkItem_getErrorCount_rdh | /**
* Returns the number of errors encountered.
*
* @return long
*/
public long getErrorCount() {
return errorCount;
} | 3.26 |
hadoop_DiskBalancerWorkItem_getBytesCopied_rdh | /**
* Returns bytes copied so far.
*
* @return long
*/
public long getBytesCopied() {
return bytesCopied;
} | 3.26 |
hadoop_ReferenceCountMap_getEntries_rdh | /**
* Get entries in the reference Map.
*
* @return */
@VisibleForTesting
public ImmutableList<E> getEntries() {
return new ImmutableList.Builder<E>().addAll(referenceMap.keySet()).build();
} | 3.26 |
hadoop_ReferenceCountMap_getUniqueElementsSize_rdh | /**
* Get the number of unique elements
*/
public int getUniqueElementsSize() {
return referenceMap.size();
} | 3.26 |
hadoop_ReferenceCountMap_put_rdh | /**
* Add the reference. If the instance already present, just increase the
* reference count.
*
* @param key
* Key to put in reference map
* @return Referenced instance
*/
public E put(E key) {
E value = referenceMap.putIfAbsent(key, key);
if (value == null) {
value = key;
}
value.incrementAndGetRefCount();
return value;
} | 3.26 |
hadoop_ReferenceCountMap_clear_rdh | /**
* Clear the contents
*/
@VisibleForTesting
public void clear() {
referenceMap.clear();
} | 3.26 |
hadoop_ReferenceCountMap_getReferenceCount_rdh | /**
* Get the reference count for the key
*/
public long getReferenceCount(E key) {
ReferenceCounter
counter = referenceMap.get(key);
if (counter != null) {
return counter.getRefCount();
}
return 0;
} | 3.26 |
hadoop_DecayRpcSchedulerDetailedMetrics_addProcessingTime_rdh | /**
* Instrument a Call processing time based on its priority.
*
* @param priority
* of the RPC call
* @param processingTime
* of the RPC call in the queue of the priority
*/
public void addProcessingTime(int priority, long processingTime) {rpcProcessingRates.add(processingNamesForLevels[priority], processingTime);
} | 3.26 |
hadoop_DecayRpcSchedulerDetailedMetrics_getProcessingName_rdh | /**
*
* @return Returns the rate name inside the metric.
* @param priority
* input priority.
*/
public String getProcessingName(int priority) {
return ("DecayRPCSchedulerPriority." + priority) + ".RpcProcessingTime";
} | 3.26 |
hadoop_DecayRpcSchedulerDetailedMetrics_addQueueTime_rdh | /**
* Instrument a Call queue time based on its priority.
*
* @param priority
* of the RPC call
* @param queueTime
* of the RPC call in the queue of the priority
*/
public void addQueueTime(int priority, long queueTime) {
rpcQueueRates.add(queueNamesForLevels[priority], queueTime);
} | 3.26 |
hadoop_DecayRpcSchedulerDetailedMetrics_init_rdh | /**
* Initialize the metrics for JMX with priority levels.
*
* @param numLevels
* input numLevels.
*/
public void init(int numLevels) {
LOG.info("Initializing RPC stats for {} priority levels", numLevels);
queueNamesForLevels = new String[numLevels];
processingNamesForLevels = new String[numLevels];
for (int i = 0; i < numLevels; i++) {
queueNamesForLevels[i] = getQueueName(i + 1);
processingNamesForLevels[i] = getProcessingName(i + 1);
}
rpcQueueRates.init(queueNamesForLevels);
rpcProcessingRates.init(processingNamesForLevels);
} | 3.26 |
hadoop_DecayRpcSchedulerDetailedMetrics_shutdown_rdh | /**
* Shutdown the instrumentation process.
*/
public void shutdown() {
DefaultMetricsSystem.instance().unregisterSource(name);
} | 3.26 |
hadoop_DecayRpcSchedulerDetailedMetrics_getQueueName_rdh | /**
*
* @return Returns the rate name inside the metric.
* @param priority
* input priority.
*/
public String getQueueName(int priority) {
return ("DecayRPCSchedulerPriority." + priority) + ".RpcQueueTime";
} | 3.26 |
hadoop_ReconfigurationException_getProperty_rdh | /**
* Get property that cannot be changed.
*
* @return property info.
*/
public String getProperty() {
return property;
} | 3.26 |
hadoop_ReconfigurationException_getOldValue_rdh | /**
* Get old value of property that cannot be changed.
*
* @return old value.
*/
public String getOldValue() {
return oldVal;
} | 3.26 |
hadoop_ZKDelegationTokenSecretManagerImpl_rebuildTokenCache_rdh | /**
* This function will rebuild local token cache from zk storage.
* It is first called when the secret manager is initialized and
* then regularly at a configured interval.
*
* @param initial
* whether this is called during initialization
* @throws IOException
*/
private void rebuildTokenCache(boolean initial) throws IOException {
localTokenCache.clear();
// Use bare zookeeper client to get all children since curator will
// wrap the same API with a sorting process. This is time consuming given
// millions of tokens
List<String> zkTokens;
try {
zkTokens = zookeeper.getChildren(TOKEN_PATH, false);
} catch (KeeperException | InterruptedException e) {
throw new IOException("Tokens cannot be fetched from path " + TOKEN_PATH, e);
}
byte[] data;for (String tokenPath : zkTokens) {
try {
data = zkClient.getData().forPath((ZK_DTSM_TOKENS_ROOT + "/") + tokenPath);
} catch (KeeperException.NoNodeException e) {
f0.debug(("No node in path [" + tokenPath) + "]");
continue;
} catch (Exception ex) {
throw new IOException(ex);
}
// Store data to currentTokenMap
AbstractDelegationTokenIdentifier ident = processTokenAddOrUpdate(data);
// Store data to localTokenCache for sync
localTokenCache.add(ident);
}
if (!initial) {
// Sync zkTokens with local cache, specifically
// 1) add/update tokens to local cache from zk, which is done through
// processTokenAddOrUpdate above
// 2) remove tokens in local cache but not in zk anymore
for (AbstractDelegationTokenIdentifier ident : currentTokens.keySet()) {
if (!localTokenCache.contains(ident)) {
currentTokens.remove(ident);
}
}
} syncTokenOwnerStats();
} | 3.26 |
hadoop_OperationCallbacks_abortMultipartUploadsUnderPrefix_rdh | /**
* Abort multipart uploads under a path; paged.
*
* @param prefix
* prefix for uploads to abort
* @return a count of aborts
* @throws IOException
* trouble; FileNotFoundExceptions are swallowed.
*/
@Retries.RetryTranslated
default long abortMultipartUploadsUnderPrefix(String prefix) throws IOException {
return 0;
} | 3.26 |
hadoop_ParentQueue_addDynamicChildQueue_rdh | // New method to add child queue
private CSQueue addDynamicChildQueue(String childQueuePath, boolean isLeaf) throws SchedulerDynamicEditException {
writeLock.lock();
try {
// Check if queue exists, if queue exists, write a warning message (this
// should not happen, since it will be handled before calling this method)
// , but we will move on.
CSQueue queue = queueContext.getQueueManager().getQueueByFullName(childQueuePath);
if
(queue != null) {
f0.warn(("This should not happen, trying to create queue=" + childQueuePath) + ", however the queue already exists");
return queue;
}
// Check if the max queue limit is exceeded.
int maxQueues = queueContext.getConfiguration().getAutoCreatedQueuesV2MaxChildQueuesLimit(getQueuePath());
if (childQueues.size() >= maxQueues) {
throw new SchedulerDynamicEditException(((((("Cannot auto create queue " + childQueuePath) + ". Max Child ") + "Queue limit exceeded which is configured as: ") + maxQueues) + " and number of child queues is: ") + childQueues.size());
}
// First, check if we allow creation or not
boolean weightsAreUsed = false;
try
{
weightsAreUsed = getCapacityConfigurationTypeForQueues(childQueues) == QueueCapacityType.WEIGHT; } catch (IOException e)
{
f0.warn("Caught Exception during auto queue creation", e);
}
if
((!weightsAreUsed) && queueContext.getConfiguration().isLegacyQueueMode()) {
throw new SchedulerDynamicEditException(((("Trying to create new queue=" + childQueuePath) + " but not all the queues under parent=") + this.getQueuePath()) + " are using weight-based capacity. Failed to created queue");
}
CSQueue newQueue = createNewQueue(childQueuePath, isLeaf);
this.childQueues.add(newQueue);
updateLastSubmittedTimeStamp();// Call updateClusterResource.
// Which will deal with all effectiveMin/MaxResource
// Calculation
this.updateClusterResource(queueContext.getClusterResource(), new ResourceLimits(queueContext.getClusterResource()));
return newQueue;
} finally {
writeLock.unlock();
}
} | 3.26 |
hadoop_BaseContainerTokenSecretManager_createNewMasterKey_rdh | // Need lock as we increment serialNo etc.
protected MasterKeyData createNewMasterKey() {
this.writeLock.lock();
try {
return new MasterKeyData(serialNo++, generateSecret());
} finally {
this.writeLock.unlock();
}
} | 3.26 |
hadoop_BaseContainerTokenSecretManager_m0_rdh | /**
* Used by the RPC layer.
*/
@Override
public ContainerTokenIdentifier m0() {
return new ContainerTokenIdentifier();
} | 3.26 |
hadoop_PerGpuUtilizations_getOverallGpuUtilization_rdh | /**
* Overall percent GPU utilization
*
* @return utilization
*/
@XmlJavaTypeAdapter(StrToFloatBeforeSpaceAdapter.class)
@XmlElement(name = "gpu_util")
public Float getOverallGpuUtilization() {
return overallGpuUtilization;
} | 3.26 |
hadoop_ZKPathDumper_append_rdh | /**
* Append the specified indentation to a builder
*
* @param builder
* string build to append to
* @param indent
* current indentation
* @param c
* charactor to use for indentation
*/
private void append(StringBuilder builder, int indent, char c) {
for (int i = 0; i < indent; i++) {
builder.append(c);
}
} | 3.26 |
hadoop_ZKPathDumper_toString_rdh | /**
* Trigger the recursive registry dump.
*
* @return a string view of the registry
*/
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("ZK tree for ").append(f0).append('\n');
expand(builder, f0, 1);
return builder.toString();
} | 3.26 |
hadoop_ZKPathDumper_expand_rdh | /**
* Recursively expand the path into the supplied string builder, increasing
* the indentation by {@link #INDENT} as it proceeds (depth first) down
* the tree
*
* @param builder
* string build to append to
* @param path
* path to examine
* @param indent
* current indentation
*/
private void expand(StringBuilder builder, String path, int indent) {
try {
GetChildrenBuilder childrenBuilder = curator.getChildren();
List<String> v2 = childrenBuilder.forPath(path);
for (String child : v2) {
String childPath = (path + "/") + child;
String body;
Stat stat = curator.checkExists().forPath(childPath);
StringBuilder v7 = new StringBuilder(256);
v7.append(" [").append(stat.getDataLength()).append("]");
if (stat.getEphemeralOwner() > 0) {
v7.append("*");
}
if (verbose) {
// verbose: extract ACLs
builder.append(" -- ");
List<ACL> acls = curator.getACL().forPath(childPath);
for (ACL acl : acls) {
builder.append(RegistrySecurity.aclToString(acl));
builder.append(" ");
}
}
body = v7.toString();
// print each child
append(builder, indent, ' ');
builder.append('/').append(child);
builder.append(body);
builder.append('\n');
// recurse
expand(builder, childPath, indent + INDENT);}
} catch (Exception e) {
builder.append(e.toString()).append("\n");
}
} | 3.26 |
hadoop_TemporaryAWSCredentialsProvider_createCredentials_rdh | /**
* The credentials here must include a session token, else this operation
* will raise an exception.
*
* @param config
* the configuration
* @return temporary credentials.
* @throws IOException
* on any failure to load the credentials.
* @throws NoAuthWithAWSException
* validation failure
* @throws NoAwsCredentialsException
* the credentials are actually empty.
*/
@Override
protected AwsCredentials createCredentials(Configuration config) throws IOException {
MarshalledCredentials creds = MarshalledCredentialBinding.fromFileSystem(getUri(), config);
MarshalledCredentials.CredentialTypeRequired sessionOnly = CredentialTypeRequired.SessionOnly;
// treat only having non-session creds as empty.
if (!creds.isValid(sessionOnly)) {
throw new NoAwsCredentialsException(COMPONENT);
}
return MarshalledCredentialBinding.toAWSCredentials(creds, sessionOnly, COMPONENT);
} | 3.26 |
hadoop_FullCredentialsTokenBinding_loadAWSCredentials_rdh | /**
* Load the AWS credentials.
*
* @throws IOException
* failure
*/
private void
loadAWSCredentials() throws IOException {
credentialOrigin = AbstractS3ATokenIdentifier.createDefaultOriginMessage();
Configuration conf = getConfig();
URI uri = getCanonicalUri();
// look for access keys to FS
S3xLoginHelper.Login secrets = S3AUtils.getAWSAccessKeys(uri, conf);
if (secrets.hasLogin()) {
awsCredentials = new MarshalledCredentials(secrets.getUser(), secrets.getPassword(), "");
credentialOrigin += "; source = Hadoop configuration data";
} else {
// if there are none, look for the environment variables.
awsCredentials = MarshalledCredentialBinding.fromEnvironment(System.getenv());
if (awsCredentials.isValid(CredentialTypeRequired.AnyNonEmpty)) {
// valid tokens, so mark as origin
credentialOrigin += "; source = Environment variables";
} else {
credentialOrigin = "no credentials in configuration or" + " environment variables";
}
}
awsCredentials.validate(credentialOrigin + ": ", CredentialTypeRequired.AnyNonEmpty);
} | 3.26 |
hadoop_FullCredentialsTokenBinding_deployUnbonded_rdh | /**
* Serve up the credentials retrieved from configuration/environment in
* {@link #loadAWSCredentials()}.
*
* @return a credential provider for the unbonded instance.
* @throws IOException
* failure to load
*/
@Override
public AWSCredentialProviderList deployUnbonded() throws IOException {
requireServiceStarted();
loadAWSCredentials();
return new AWSCredentialProviderList("Full Credentials Token Binding", new MarshalledCredentialProvider(FULL_TOKEN, getStoreContext().getFsURI(), getConfig(), awsCredentials, CredentialTypeRequired.AnyNonEmpty));
} | 3.26 |
hadoop_FullCredentialsTokenBinding_createTokenIdentifier_rdh | /**
* Create a new delegation token.
*
* It's slightly inefficient to create a new one every time, but
* it avoids concurrency problems with managing any singleton.
*
* @param policy
* minimum policy to use, if known.
* @param encryptionSecrets
* encryption secrets.
* @return a DT identifier
* @throws IOException
* failure
*/
@Override
public AbstractS3ATokenIdentifier createTokenIdentifier(final Optional<RoleModel.Policy> policy, final EncryptionSecrets encryptionSecrets, final Text renewer) throws IOException {
requireServiceStarted();
Preconditions.checkNotNull(awsCredentials, "No AWS credentials to use for a delegation token");
return new FullCredentialsTokenIdentifier(getCanonicalUri(), getOwnerText(), renewer, awsCredentials, encryptionSecrets, credentialOrigin);
} | 3.26 |
hadoop_AbfsManifestStoreOperations_bindToFileSystem_rdh | /**
* Bind to the store.
*
* @param filesystem
* FS.
* @param path
* path to work under
* @throws IOException
* binding problems.
*/
@Override
public void bindToFileSystem(FileSystem filesystem, Path path) throws IOException {
if (!(filesystem instanceof AzureBlobFileSystem)) {
throw new PathIOException(path.toString(), "Not an abfs filesystem: " + filesystem.getClass());
}
super.bindToFileSystem(filesystem, path);
try {
resilientCommitByRename = getFileSystem().createResilientCommitSupport(path);
// this also means that etags are preserved.
etagsPreserved = true;
LOG.debug("Bonded to filesystem with resilient commits under path {}", path);
} catch (UnsupportedOperationException e) {
LOG.debug("No resilient commit support under path {}", path);}
} | 3.26 |
hadoop_AbfsManifestStoreOperations_commitFile_rdh | /**
* Commit a file through an internal ABFS operation.
* If resilient commit is unavailable, invokes the superclass, which
* will raise an UnsupportedOperationException
*
* @param entry
* entry to commit
* @return the outcome
* @throws IOException
* any failure in resilient commit.
* @throws UnsupportedOperationException
* if not available.
*/
@Override
public CommitFileResult commitFile(final FileEntry entry) throws IOException {
if (resilientCommitByRename != null) {
final Pair<Boolean, Duration> result = resilientCommitByRename.commitSingleFileByRename(entry.getSourcePath(), entry.getDestPath(), entry.getEtag());
return CommitFileResult.fromResilientCommit(result.getLeft(), result.getRight());
} else {
return super.commitFile(entry);
}
} | 3.26 |
hadoop_AbfsManifestStoreOperations_storeSupportsResilientCommit_rdh | /**
* Resilient commits available on hierarchical stores.
*
* @return true if the FS can use etags on renames.
*/
@Override
public boolean storeSupportsResilientCommit() {
return resilientCommitByRename != null;
} | 3.26 |
hadoop_AbfsManifestStoreOperations_storePreservesEtagsThroughRenames_rdh | /**
* Etags are preserved through Gen2 stores, but not wasb stores.
*
* @param path
* path to probe.
* @return true if this store preserves etags.
*/
@Override
public boolean storePreservesEtagsThroughRenames(final Path path) {
return etagsPreserved;
} | 3.26 |
hadoop_MutableInverseQuantiles_setQuantiles_rdh | /**
* Sets quantileInfo.
*
* @param ucName
* capitalized name of the metric
* @param uvName
* capitalized type of the values
* @param desc
* uncapitalized long-form textual description of the metric
* @param lvName
* uncapitalized type of the values
* @param df
* Number formatter for inverse percentile value
*/
void setQuantiles(String ucName, String uvName, String
desc, String lvName, DecimalFormat df) {
for (int i = 0; i < INVERSE_QUANTILES.length; i++) {
double inversePercentile = 100 * (1 - INVERSE_QUANTILES[i].quantile);String v2 = ((ucName + df.format(inversePercentile)) + "thInversePercentile") + uvName;
String descTemplate = (((((df.format(inversePercentile) + " inverse percentile ") + lvName) + " with ") + getInterval()) + " second interval for ") + desc;
addQuantileInfo(i, info(v2, descTemplate));
}
} | 3.26 |
hadoop_MutableInverseQuantiles_getQuantiles_rdh | /**
* Returns the array of Inverse Quantiles declared in MutableInverseQuantiles.
*
* @return array of Inverse Quantiles
*/
public synchronized Quantile[] getQuantiles() {
return INVERSE_QUANTILES;
} | 3.26 |
hadoop_WriteManager_m0_rdh | // Do a possible commit before read request in case there is buffered data
// inside DFSClient which has been flushed but not synced.
int m0(DFSClient dfsClient, FileHandle fileHandle, long commitOffset) {
int status;
OpenFileCtx v17
= fileContextCache.get(fileHandle);
if (v17 == null) {
if (LOG.isDebugEnabled()) {
LOG.debug(((("No opened stream for fileId: " + fileHandle.dumpFileHandle()) + " commitOffset=") + commitOffset) + ". Return success in this case.");
}
status = Nfs3Status.NFS3_OK;
} else {
// commit request triggered by read won't create pending comment obj
COMMIT_STATUS ret = v17.checkCommit(dfsClient, commitOffset, null,
0, null, true);
switch (ret) {
case COMMIT_FINISHED :
case COMMIT_INACTIVE_CTX :
status = Nfs3Status.NFS3_OK;
break;case COMMIT_INACTIVE_WITH_PENDING_WRITE :
case COMMIT_ERROR :
status = Nfs3Status.NFS3ERR_IO;
break;
case COMMIT_WAIT :
case COMMIT_SPECIAL_WAIT :/**
* This should happen rarely in some possible cases, such as read
* request arrives before DFSClient is able to quickly flush data to DN,
* or Prerequisite writes is not available. Won't wait since we don't
* want to block read.
*/
status = Nfs3Status.NFS3ERR_JUKEBOX;
break;
case COMMIT_SPECIAL_SUCCESS :
// Read beyond eof could result in partial read
status = Nfs3Status.NFS3_OK;
break;
default :
LOG.error("Should not get commit return code: " + ret.name()); throw new RuntimeException("Should not get commit return code: " + ret.name());
}}
return status;
} | 3.26 |
hadoop_WriteManager_getFileAttr_rdh | /**
* If the file is in cache, update the size based on the cached data size
*/
Nfs3FileAttributes getFileAttr(DFSClient client, FileHandle fileHandle, IdMappingServiceProvider iug) throws IOException {String fileIdPath = Nfs3Utils.getFileIdPath(fileHandle);
Nfs3FileAttributes attr = Nfs3Utils.getFileAttr(client, fileIdPath, iug);
if (attr != null) {
OpenFileCtx openFileCtx = fileContextCache.get(fileHandle);
if (openFileCtx
!= null) {
attr.setSize(openFileCtx.getNextOffset());
attr.setUsed(openFileCtx.getNextOffset());
}
}
return attr;} | 3.26 |
hadoop_SliderFileSystem_deleteComponentDir_rdh | /**
* Deletes the component directory.
*
* @param serviceVersion
* @param compName
* @throws IOException
*/
public void deleteComponentDir(String serviceVersion, String compName) throws IOException {
Path path = getComponentDir(serviceVersion, compName);
if (fileSystem.exists(path)) {
fileSystem.delete(path, true);
LOG.debug("deleted dir {}", path);}
Path publicResourceDir = getComponentPublicResourceDir(serviceVersion, compName);
if (fileSystem.exists(publicResourceDir)) {
fileSystem.delete(publicResourceDir, true);
LOG.debug("deleted public resource dir {}", publicResourceDir);
}
} | 3.26 |
hadoop_SliderFileSystem_deleteComponentsVersionDirIfEmpty_rdh | /**
* Deletes the components version directory.
*
* @param serviceVersion
* @throws IOException
*/
public void deleteComponentsVersionDirIfEmpty(String serviceVersion) throws IOException {
Path
path = new Path(new Path(getAppDir(), "components"), serviceVersion);if (fileSystem.exists(path) && (fileSystem.listStatus(path).length == 0)) {
fileSystem.delete(path, true);
LOG.info("deleted dir {}", path);
}
Path publicResourceDir = new Path(new Path(getBasePath(), (getAppDir().getName() + "/") + "components"), serviceVersion);
if (fileSystem.exists(publicResourceDir) && (fileSystem.listStatus(publicResourceDir).length == 0)) {
fileSystem.delete(publicResourceDir, true);
LOG.info("deleted public resource dir {}", publicResourceDir);
}
} | 3.26 |
hadoop_SliderFileSystem_getComponentPublicResourceDir_rdh | /**
* Returns the component public resource directory path.
*
* @param serviceVersion
* service version
* @param compName
* component name
* @return component public resource directory
*/
public Path getComponentPublicResourceDir(String serviceVersion, String compName) {
return new Path(new Path(getBasePath(), (getAppDir().getName() + "/") + "components"), (serviceVersion + "/") + compName);
} | 3.26 |
hadoop_SliderFileSystem_getComponentDir_rdh | /**
* Returns the component directory path.
*
* @param serviceVersion
* service version
* @param compName
* component name
* @return component directory
*/
public Path getComponentDir(String serviceVersion, String compName) {
return new Path(new Path(getAppDir(), "components"), (serviceVersion + "/") + compName);
} | 3.26 |
hadoop_BlockManagerParameters_withBufferPoolSize_rdh | /**
* Sets the in-memory cache size as number of blocks.
*
* @param poolSize
* The buffer pool size as number of blocks.
* @return The builder.
*/
public BlockManagerParameters withBufferPoolSize(final int poolSize) {
this.bufferPoolSize
= poolSize;
return this;
} | 3.26 |
hadoop_BlockManagerParameters_withMaxBlocksCount_rdh | /**
* Sets the max blocks count to be kept in cache at any time.
*
* @param blocksCount
* The max blocks count.
* @return The builder.
*/
public BlockManagerParameters withMaxBlocksCount(final int blocksCount) {
this.maxBlocksCount = blocksCount;
return this;
} | 3.26 |
hadoop_BlockManagerParameters_getTrackerFactory_rdh | /**
*
* @return The duration tracker with statistics to update.
*/
public DurationTrackerFactory getTrackerFactory() {
return trackerFactory;
} | 3.26 |
hadoop_BlockManagerParameters_getBlockData_rdh | /**
*
* @return The object holding blocks data info for the underlying file.
*/
public BlockData getBlockData() { return blockData;
} | 3.26 |
hadoop_BlockManagerParameters_getConf_rdh | /**
*
* @return The configuration object.
*/
public Configuration getConf() {
return conf;
} | 3.26 |
hadoop_BlockManagerParameters_getFuturePool_rdh | /**
*
* @return The Executor future pool to perform async prefetch tasks.
*/
public ExecutorServiceFuturePool getFuturePool() { return futurePool;
} | 3.26 |
hadoop_BlockManagerParameters_withTrackerFactory_rdh | /**
* Sets the duration tracker with statistics to update.
*
* @param factory
* The tracker factory object.
* @return The builder.
*/
public BlockManagerParameters withTrackerFactory(final DurationTrackerFactory
factory) {this.trackerFactory = factory;
return this;
} | 3.26 |
hadoop_BlockManagerParameters_getMaxBlocksCount_rdh | /**
*
* @return The max blocks count to be kept in cache at any time.
*/
public int getMaxBlocksCount() {
return maxBlocksCount;
} | 3.26 |
hadoop_BlockManagerParameters_withBlockData_rdh | /**
* Sets the object holding blocks data info for the underlying file.
*
* @param data
* The block data object.
* @return The builder.
*/
public BlockManagerParameters withBlockData(final BlockData data) {
this.blockData
= data;
return this;
} | 3.26 |
hadoop_BlockManagerParameters_getBufferPoolSize_rdh | /**
*
* @return The size of the in-memory cache.
*/
public int getBufferPoolSize() {
return bufferPoolSize;
} | 3.26 |
hadoop_BlockManagerParameters_withFuturePool_rdh | /**
* Sets the executor service future pool that is later used to perform
* async prefetch tasks.
*
* @param pool
* The future pool.
* @return The builder.
*/
public BlockManagerParameters withFuturePool(final ExecutorServiceFuturePool pool) {
this.futurePool = pool;
return this;
} | 3.26 |
hadoop_BlockManagerParameters_withConf_rdh | /**
* Sets the configuration object.
*
* @param configuration
* The configuration object.
* @return The builder.
*/
public BlockManagerParameters withConf(final Configuration configuration) {
this.conf = configuration;
return this;
} | 3.26 |
hadoop_BlockManagerParameters_withLocalDirAllocator_rdh | /**
* Sets the local dir allocator for round-robin disk allocation
* while creating files.
*
* @param dirAllocator
* The local dir allocator object.
* @return The builder.
*/
public BlockManagerParameters withLocalDirAllocator(final
LocalDirAllocator dirAllocator) {
this.localDirAllocator = dirAllocator;
return this;
} | 3.26 |
hadoop_BlockManagerParameters_getPrefetchingStatistics_rdh | /**
*
* @return The prefetching statistics for the stream.
*/
public PrefetchingStatistics getPrefetchingStatistics() {
return prefetchingStatistics;
} | 3.26 |
hadoop_SchedulerHealth_getReleaseCount_rdh | /**
* Get the count of release from the latest scheduler health report.
*
* @return release count
*/
public Long getReleaseCount() {
return getOperationCount(Operation.RELEASE);} | 3.26 |
hadoop_SchedulerHealth_getAggregateFulFilledReservationsCount_rdh | /**
* Get the aggregate of all the fulfilled reservations count.
*
* @return aggregate fulfilled reservations count
*/
public Long getAggregateFulFilledReservationsCount() {
return getAggregateOperationCount(Operation.FULFILLED_RESERVATION);
} | 3.26 |
hadoop_SchedulerHealth_getResourcesReserved_rdh | /**
* Get the resources reserved in the last scheduler run.
*
* @return resources reserved
*/
public Resource getResourcesReserved() {
return getResourceDetails(Operation.RESERVATION);
} | 3.26 |
hadoop_SchedulerHealth_getAggregatePreemptionCount_rdh | /**
* Get the aggregate of all the preemption count.
*
* @return aggregate preemption count
*/
public Long getAggregatePreemptionCount() {
return getAggregateOperationCount(Operation.PREEMPTION);
} | 3.26 |
hadoop_SchedulerHealth_getLastReleaseDetails_rdh | /**
* Get the details of last release.
*
* @return last release details
*/
public DetailedInformation getLastReleaseDetails() {
return getDetailedInformation(Operation.RELEASE);
} | 3.26 |
hadoop_SchedulerHealth_getReservationCount_rdh | /**
* Get the count of reservation from the latest scheduler health report.
*
* @return reservation count
*/
public Long getReservationCount() {
return getOperationCount(Operation.RESERVATION);
} | 3.26 |
hadoop_SchedulerHealth_getPreemptionCount_rdh | /**
* Get the count of preemption from the latest scheduler health report.
*
* @return preemption count
*/
public Long getPreemptionCount() {
return getOperationCount(Operation.PREEMPTION);
} | 3.26 |
hadoop_SchedulerHealth_getLastSchedulerRunTime_rdh | /**
* Get the timestamp of the latest scheduler operation.
*
* @return the scheduler's latest timestamp
*/
public long getLastSchedulerRunTime() {
return lastSchedulerRunTime;
} | 3.26 |
hadoop_SchedulerHealth_getLastAllocationDetails_rdh | /**
* Get the details of last allocation.
*
* @return last allocation details
*/
public DetailedInformation getLastAllocationDetails() {
return getDetailedInformation(Operation.ALLOCATION);
} | 3.26 |
hadoop_SchedulerHealth_getLastPreemptionDetails_rdh | /**
* Get the details of last preemption.
*
* @return last preemption details
*/
public DetailedInformation getLastPreemptionDetails() {
return getDetailedInformation(Operation.PREEMPTION);} | 3.26 |
hadoop_SchedulerHealth_getAllocationCount_rdh | /**
* Get the count of allocation from the latest scheduler health report.
*
* @return allocation count
*/
public Long getAllocationCount() {
return getOperationCount(Operation.ALLOCATION);
} | 3.26 |
hadoop_SchedulerHealth_getResourcesReleased_rdh | /**
* Get the resources released in the last scheduler run.
*
* @return resources released
*/
public Resource getResourcesReleased() {
return getResourceDetails(Operation.RELEASE);
} | 3.26 |
hadoop_SchedulerHealth_getAggregateAllocationCount_rdh | /**
* Get the aggregate of all the allocations count.
*
* @return aggregate allocation count
*/
public Long getAggregateAllocationCount() {
return getAggregateOperationCount(Operation.ALLOCATION);
} | 3.26 |
hadoop_SchedulerHealth_m0_rdh | /**
* Get the resources allocated in the last scheduler run.
*
* @return resources allocated
*/
public Resource m0() {
return getResourceDetails(Operation.ALLOCATION);
} | 3.26 |
hadoop_SchedulerHealth_getLastReservationDetails_rdh | /**
* Get the details of last reservation.
*
* @return last reservation details
*/
public DetailedInformation getLastReservationDetails() {
return getDetailedInformation(Operation.RESERVATION);} | 3.26 |
hadoop_SchedulerHealth_getAggregateReleaseCount_rdh | /**
* Get the aggregate of all the release count.
*
* @return aggregate release count
*/
public Long getAggregateReleaseCount() {
return getAggregateOperationCount(Operation.RELEASE);
} | 3.26 |
hadoop_SchedulerHealth_getAggregateReservationCount_rdh | /**
* Get the aggregate of all the reservations count.
*
* @return aggregate reservation count
*/
public Long getAggregateReservationCount() {
return getAggregateOperationCount(Operation.RESERVATION);
} | 3.26 |
hadoop_SimpleNamingService_getNewName_rdh | /**
* Generate a new checkpoint Name
*
* @return the checkpoint name
*/
public String getNewName() {
return "checkpoint_" + name;
} | 3.26 |
hadoop_XFrameOptionsFilter_addIntHeader_rdh | // don't allow additional values to be added along
// with the configured options value
@Override
public void addIntHeader(String name, int value) {
if (!name.equals(X_FRAME_OPTIONS)) {
super.addIntHeader(name, value);
}
} | 3.26 |
hadoop_ResourceVector_increment_rdh | /**
* Increments the given resource by the specified value.
*
* @param resourceName
* name of the resource
* @param value
* value to be added to the resource's current value
*/
public void increment(String resourceName, double value) {
setValue(resourceName, getValue(resourceName) + value);
} | 3.26 |
hadoop_ResourceVector_decrement_rdh | /**
* Decrements the given resource by the specified value.
*
* @param resourceName
* name of the resource
* @param value
* value to be subtracted from the resource's current value
*/
public void decrement(String resourceName, double value) {
setValue(resourceName, getValue(resourceName) - value);
} | 3.26 |
hadoop_ResourceVector_m0_rdh | /**
* Creates a new {@code ResourceVector} with all pre-defined resources set to
* the same value.
*
* @param value
* the value to set all resources to
* @return uniform resource vector
*/
public static ResourceVector m0(double value) {
ResourceVector emptyResourceVector = new ResourceVector(); for (ResourceInformation resource : ResourceUtils.getResourceTypesArray()) {
emptyResourceVector.setValue(resource.getName(), value);
}
return
emptyResourceVector;
} | 3.26 |
hadoop_ResourceVector_m1_rdh | /**
* Creates a new {@code ResourceVector} with the values set in a
* {@code Resource} object.
*
* @param resource
* resource object the resource vector will be based on
* @return uniform resource vector
*/
public static ResourceVector m1(Resource resource) {
ResourceVector resourceVector = new ResourceVector();
for (ResourceInformation resourceInformation : resource.getResources()) {
resourceVector.setValue(resourceInformation.getName(), resourceInformation.getValue());
}
return resourceVector;
} | 3.26 |
hadoop_ResourceVector_newInstance_rdh | /**
* Creates a new {@code ResourceVector} with all pre-defined resources set to
* zero.
*
* @return zero resource vector
*/
public static ResourceVector newInstance() {
ResourceVector zeroResourceVector = new ResourceVector();
for (ResourceInformation resource : ResourceUtils.getResourceTypesArray()) {
zeroResourceVector.setValue(resource.getName(), 0);
}
return zeroResourceVector;
} | 3.26 |
hadoop_OuterJoinRecordReader_combine_rdh | /**
* Emit everything from the collector.
*/
protected boolean combine(Object[] srcs, TupleWritable dst) {
assert srcs.length == dst.size();
return true;
} | 3.26 |
hadoop_Base64_decode_rdh | /**
* Decodes a given Base64 string into its corresponding byte array.
*
* @param data
* the Base64 string, as a <code>String</code> object, to decode
* @return the corresponding decoded byte array
* @throws IllegalArgumentException
* If the string is not a valid base64 encoded string
*/
public static byte[] decode(final String data)
{
if (data == null) {
throw new IllegalArgumentException("The data parameter is not a valid base64-encoded string.");
}
int byteArrayLength = (3 * data.length()) / 4;
if (data.endsWith("==")) {
byteArrayLength -= 2;
} else if (data.endsWith("=")) {
byteArrayLength -= 1;
}
final byte[] retArray =
new byte[byteArrayLength];
int byteDex = 0;
int charDex = 0;
for (; charDex < data.length(); charDex += 4) {
// get 4 chars, convert to 3 bytes
final int char1 = DECODE_64[((byte) (data.charAt(charDex)))];
final int char2 = DECODE_64[((byte) (data.charAt(charDex + 1)))];final int char3 = DECODE_64[((byte) (data.charAt(charDex + 2)))];
final int char4 = DECODE_64[((byte) (data.charAt(charDex + 3)))];
if ((((char1 < 0) || (char2 < 0)) || (char3 == (-1))) ||
(char4 == (-1))) {
// invalid character(-1), or bad padding (-2)
throw new IllegalArgumentException("The data parameter is not a valid base64-encoded string.");
}
int tVal = char1 << 18;
tVal += char2 << 12;
tVal += (char3 & 0xff) << 6;
tVal += char4 & 0xff;
if (char3 == (-2)) {
// two "==" pad chars, check bits 12-24
tVal &= 0xfff000;
retArray[byteDex++] = ((byte) ((tVal >> 16) & 0xff));
} else if (char4 == (-2)) {
// one pad char "=" , check bits 6-24.
tVal &= 0xffffc0;
retArray[byteDex++] = ((byte) ((tVal >> 16) & 0xff));
retArray[byteDex++] = ((byte) ((tVal >> 8) & 0xff));
} else {
// No pads take all 3 bytes, bits 0-24
retArray[byteDex++] = ((byte) ((tVal >> 16) & 0xff));
retArray[byteDex++] = ((byte) ((tVal >> 8) & 0xff));retArray[byteDex++] = ((byte) (tVal & 0xff));
}
}
return retArray;} | 3.26 |
hadoop_Base64_validateIsBase64String_rdh | /**
* Determines whether the given string contains only Base64 characters.
*
* @param data
* the string, as a <code>String</code> object, to validate
* @return <code>true</code> if <code>data</code> is a valid Base64 string, otherwise <code>false</code>
*/
public static boolean validateIsBase64String(final String data)
{
if ((data == null) || ((data.length() % 4) != 0)) {
return false;
}
for (int m = 0; m < data.length(); m++) {
final byte charByte = ((byte) (data.charAt(m)));
// pad char detected
if (DECODE_64[charByte] == (-2)) {
if (m < (data.length() - 2)) {
return false;
} else if ((m == (data.length() - 2)) && (DECODE_64[((byte) (data.charAt(m + 1)))] != (-2))) {
return false;
}
}
if ((charByte < 0) || (DECODE_64[charByte] == (-1))) {
return false;
}
}
return true;
} | 3.26 |
hadoop_Base64_encode_rdh | /**
* Encodes a byte array as a Base64 string.
*
* @param data
* the byte array to encode
* @return the Base64-encoded string, as a <code>String</code> object
*/
public static String encode(final Byte[] data) {
final StringBuilder builder = new StringBuilder();
final int dataRemainder = data.length % 3;
int j = 0;
int n = 0;
for (; j < data.length; j += 3) {
if (j < (data.length - dataRemainder)) {
n = (((data[j] & 0xff) << 16) + ((data[j + 1] & 0xff) << 8)) + (data[j + 2] & 0xff);
} else if (dataRemainder == 1) {
n = (data[j] & 0xff) << 16;
} else if (dataRemainder == 2) {
n = ((data[j] & 0xff)
<< 16) + ((data[j + 1] & 0xff) << 8);
}
// Left here for readability
// byte char1 = (byte) ((n >>> 18) & 0x3F);
// byte char2 = (byte) ((n >>> 12) & 0x3F);
// byte char3 = (byte) ((n >>> 6) & 0x3F);
// byte char4 = (byte) (n & 0x3F);
builder.append(BASE_64_CHARS.charAt(((byte) ((n >>> 18) & 0x3f))));
builder.append(BASE_64_CHARS.charAt(((byte) ((n >>> 12) & 0x3f))));
builder.append(BASE_64_CHARS.charAt(((byte) ((n >>>
6) & 0x3f))));
builder.append(BASE_64_CHARS.charAt(((byte) (n & 0x3f))));
}
final int bLength = builder.length();
// append '=' to pad
if ((data.length % 3) == 1) {
builder.replace(bLength - 2, bLength, "==");
} else if ((data.length % 3) == 2) {
builder.replace(bLength - 1, bLength, "=");
}
return builder.toString();
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.