name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hadoop_OpenFileCtxCache_cleanAll_rdh | // Evict all entries
void cleanAll() {
ArrayList<OpenFileCtx> cleanedContext = new ArrayList<OpenFileCtx>();
synchronized(this) {
Iterator<Entry<FileHandle, OpenFileCtx>>
it = openFileMap.entrySet().iterator();
if (LOG.isTraceEnabled()) {
LOG.trace("openFileMap size:" + size());
}
while (it.hasNext()) {
Entry<FileHandle, OpenFileCtx> pairs = it.next();
OpenFileCtx ctx = pairs.getValue();
it.remove();
cleanedContext.add(ctx);
}
}
// Invoke the cleanup outside the lock
for (OpenFileCtx ofc : cleanedContext) {
ofc.cleanup();
}} | 3.26 |
hadoop_AMRMClientRelayerMetrics_getInstance_rdh | /**
* Initialize the singleton instance.
*
* @return the singleton
*/
public static AMRMClientRelayerMetrics getInstance() {
if (!isInitialized.get()) {
synchronized(AMRMClientRelayerMetrics.class) {
if (instance == null) {
instance = new AMRMClientRelayerMetrics();
DefaultMetricsSystem.instance().register(RECORD_INFO.name(), RECORD_INFO.description(), instance);
isInitialized.set(true);
}
}
}
return instance;
} | 3.26 |
hadoop_ECBlockGroup_getErasedCount_rdh | /**
* Get erased blocks count
*
* @return erased count of blocks
*/
public int getErasedCount() {
int erasedCount = 0;
for (ECBlock dataBlock : dataBlocks) {
if (dataBlock.isErased())
erasedCount++;
}
for
(ECBlock parityBlock : parityBlocks) {
if (parityBlock.isErased())
erasedCount++;
}
return erasedCount;
} | 3.26 |
hadoop_ECBlockGroup_getDataBlocks_rdh | /**
* Get data blocks
*
* @return data blocks
*/
public ECBlock[] getDataBlocks() {
return dataBlocks;} | 3.26 |
hadoop_ECBlockGroup_getParityBlocks_rdh | /**
* Get parity blocks
*
* @return parity blocks
*/
public ECBlock[] getParityBlocks() {
return parityBlocks;
} | 3.26 |
hadoop_TaskAttemptContainerLaunchedEvent_getShufflePort_rdh | /**
* Get the port that the shuffle handler is listening on. This is only
* valid if the type of the event is TA_CONTAINER_LAUNCHED
*
* @return the port the shuffle handler is listening on.
*/
public int getShufflePort() {
return shufflePort;
} | 3.26 |
hadoop_IOUtilsClient_cleanupWithLogger_rdh | /**
* Close the Closeable objects and <b>ignore</b> any {@link IOException} or
* null pointers. Must only be used for cleanup in exception handlers.
*
* @param log
* the log to record problems to at debug level. Can be null.
* @param closeables
* the objects to close
*/
public static void cleanupWithLogger(Logger log, Closeable... closeables) {
for (Closeable c : closeables) {
if (c != null) {
try {
c.close();
} catch (Throwable e) {
if ((log != null) && log.isDebugEnabled()) {
log.debug("Exception in closing " + c, e);
}
}
}
}
} | 3.26 |
hadoop_PendingSet_serializer_rdh | /**
* Get a shared JSON serializer for this class.
*
* @return a serializer.
*/
public static JsonSerialization<PendingSet> serializer() {
return new JsonSerialization<>(PendingSet.class, false, false);
} | 3.26 |
hadoop_PendingSet_size_rdh | /**
* Number of commits.
*
* @return the number of commits in this structure.
*/
public int size() {
return commits != null ? commits.size() : 0;
} | 3.26 |
hadoop_PendingSet_putExtraData_rdh | /**
* Set/Update an extra data entry.
*
* @param key
* key
* @param value
* value
*/
public void putExtraData(String key, String value) {
extraData.put(key, value);} | 3.26 |
hadoop_PendingSet_getVersion_rdh | /**
*
* @return the version marker.
*/
public int getVersion() {
return f0;
} | 3.26 |
hadoop_PendingSet_getCommits_rdh | /**
*
* @return commit list.
*/
public List<SinglePendingCommit> getCommits() {
return commits;
} | 3.26 |
hadoop_PendingSet_validate_rdh | /**
* Validate the data: those fields which must be non empty, must be set.
*
* @throws ValidationFailure
* if the data is invalid
*/
public void validate() throws ValidationFailure {
verify(f0 == VERSION, "Wrong version: %s", f0);
validateCollectionClass(extraData.keySet(), String.class);
validateCollectionClass(extraData.values(), String.class);
Set<String> destinations = new HashSet<>(commits.size());
validateCollectionClass(commits, SinglePendingCommit.class);
for
(SinglePendingCommit c : commits) {
c.validate();
verify(!destinations.contains(c.getDestinationKey()), "Destination %s is written to by more than one pending commit", c.getDestinationKey());
destinations.add(c.getDestinationKey());
}
} | 3.26 |
hadoop_PendingSet_load_rdh | /**
* Load an instance from a file, then validate it.
*
* @param fs
* filesystem
* @param path
* path
* @param status
* status of file to load
* @return the loaded instance
* @throws IOException
* IO failure
* @throws ValidationFailure
* if the data is invalid
*/
public static PendingSet load(FileSystem fs, Path path, @Nullable
FileStatus status) throws IOException {
LOG.debug("Reading pending commits in file {}", path);
PendingSet instance = serializer().load(fs, path, status);
instance.validate();
return instance;
} | 3.26 |
hadoop_PendingSet_m0_rdh | /**
* Add a commit.
*
* @param commit
* the single commit
*/
public void m0(SinglePendingCommit commit) {
commits.add(commit);
// add any statistics.
IOStatisticsSnapshot st = commit.getIOStatistics();
if (st != null) {
iostats.aggregate(st);
st.clear();
}
} | 3.26 |
hadoop_PendingSet_m1_rdh | /**
*
* @return Job ID, if known.
*/
public String m1() {
return jobId;
} | 3.26 |
hadoop_PendingSet_readObject_rdh | /**
* Deserialize via java Serialization API: deserialize the instance
* and then call {@link #validate()} to verify that the deserialized
* data is valid.
*
* @param inStream
* input stream
* @throws IOException
* IO problem or validation failure
* @throws ClassNotFoundException
* reflection problems
*/
private void readObject(ObjectInputStream inStream) throws IOException, ClassNotFoundException {
inStream.defaultReadObject();
validate();
} | 3.26 |
hadoop_ApplicationPlacementAllocatorFactory_getAppPlacementAllocator_rdh | /**
* Get AppPlacementAllocator related to the placement type requested.
*
* @param appPlacementAllocatorName
* allocator class name.
* @param appSchedulingInfo
* app SchedulingInfo.
* @param schedulerRequestKey
* scheduler RequestKey.
* @param rmContext
* RMContext.
* @return Specific AppPlacementAllocator instance based on type
*/
public static AppPlacementAllocator<SchedulerNode> getAppPlacementAllocator(String appPlacementAllocatorName, AppSchedulingInfo appSchedulingInfo, SchedulerRequestKey schedulerRequestKey, RMContext rmContext) {
Class<?> policyClass;
try {
if (StringUtils.isEmpty(appPlacementAllocatorName)) {
policyClass = ApplicationSchedulingConfig.DEFAULT_APPLICATION_PLACEMENT_TYPE_CLASS;
} else {
policyClass = Class.forName(appPlacementAllocatorName);
}
} catch (ClassNotFoundException e) {
policyClass = ApplicationSchedulingConfig.DEFAULT_APPLICATION_PLACEMENT_TYPE_CLASS;
}if (!AppPlacementAllocator.class.isAssignableFrom(policyClass)) {
policyClass = ApplicationSchedulingConfig.DEFAULT_APPLICATION_PLACEMENT_TYPE_CLASS;
}
@SuppressWarnings("unchecked")
AppPlacementAllocator<SchedulerNode> placementAllocatorInstance = ((AppPlacementAllocator<SchedulerNode>) (ReflectionUtils.newInstance(policyClass, null)));placementAllocatorInstance.initialize(appSchedulingInfo, schedulerRequestKey, rmContext);
return placementAllocatorInstance;
} | 3.26 |
hadoop_RouterClientMetrics_incInvokedMethod_rdh | /**
* Increase the metrics based on the method being invoked.
*
* @param method
* method being invoked
*/
public void
incInvokedMethod(Method method) {
switch (method.getName()) {
case "getBlockLocations" :
getBlockLocationsOps.incr();
break;
case "getServerDefaults"
:
getServerDefaultsOps.incr();
break;
case "create" :
createOps.incr();
break;
case "append" :
appendOps.incr();
break;
case "recoverLease" :
recoverLeaseOps.incr();
break;
case
"setReplication" :
setReplicationOps.incr();
break;
case "setStoragePolicy" :
setStoragePolicyOps.incr();
break;
case "getStoragePolicies" :
getStoragePoliciesOps.incr();
break;
case "setPermission" :
setPermissionOps.incr();
break;
case "setOwner" :
setOwnerOps.incr();
break;
case "addBlock" :
addBlockOps.incr();
break;
case "getAdditionalDatanode" :
getAdditionalDatanodeOps.incr();
break;
case "abandonBlock" :
abandonBlockOps.incr();
break;
case "complete" :
completeOps.incr();
break;
case "updateBlockForPipeline" :
updateBlockForPipelineOps.incr();
break;
case "updatePipeline" :
updatePipelineOps.incr();
break;
case "getPreferredBlockSize" :
getPreferredBlockSizeOps.incr();
break;
case "rename" :
renameOps.incr();
break;
case "rename2" :
rename2Ops.incr();
break;
case "concat" : concatOps.incr();
break;
case
"truncate" :truncateOps.incr();
break;
case "delete" :
deleteOps.incr();break;
case "mkdirs" :mkdirsOps.incr();
break;
case "renewLease" :
renewLeaseOps.incr();
break;case "getListing" :
getListingOps.incr();
break;
case "getBatchedListing" :
getBatchedListingOps.incr();
break;
case "getFileInfo" : getFileInfoOps.incr();
break;
case "isFileClosed" :
isFileClosedOps.incr();
break;
case "getFileLinkInfo" :
getFileLinkInfoOps.incr();
break;
case "getLocatedFileInfo" :
getLocatedFileInfoOps.incr();
break;
case "getStats" :
getStatsOps.incr();
break;
case "getDatanodeReport" :
getDatanodeReportOps.incr();
break;
case "getDatanodeStorageReport" :
getDatanodeStorageReportOps.incr();
break;
case "setSafeMode" :
f0.incr();
break;
case "restoreFailedStorage" :
restoreFailedStorageOps.incr();
break;
case "saveNamespace" :
saveNamespaceOps.incr();break;
case
"rollEdits" :
rollEditsOps.incr();
break;
case "refreshNodes" :
refreshNodesOps.incr();
break;
case "finalizeUpgrade" :
finalizeUpgradeOps.incr();
break;
case
"upgradeStatus" :
upgradeStatusOps.incr();
break; case "rollingUpgrade" :
rollingUpgradeOps.incr();
break;
case "metaSave" :
metaSaveOps.incr();
break;
case "listCorruptFileBlocks" :
listCorruptFileBlocksOps.incr();
break;
case "setBalancerBandwidth" :
setBalancerBandwidthOps.incr();
break;
case "getContentSummary" :
getContentSummaryOps.incr();
break;
case "fsync" :
fsyncOps.incr();
break;
case "setTimes" :
setTimesOps.incr();
break;
case "createSymlink" :
createSymlinkOps.incr();
break;
case "getLinkTarget" :
getLinkTargetOps.incr();
break;
case "allowSnapshot" :
allowSnapshotOps.incr();
break;
case "disallowSnapshot" :
disallowSnapshotOps.incr();break;
case "renameSnapshot" :
renameSnapshotOps.incr();break;
case "getSnapshottableDirListing" :
getSnapshottableDirListingOps.incr(); break;
case "getSnapshotListing" :
getSnapshotListingOps.incr();
break;
case "getSnapshotDiffReport" :
getSnapshotDiffReportOps.incr();
break;
case "getSnapshotDiffReportListing" :
getSnapshotDiffReportListingOps.incr();
break;
case "addCacheDirective" :
addCacheDirectiveOps.incr();
break;
case "modifyCacheDirective" :
modifyCacheDirectiveOps.incr();
break;
case "removeCacheDirective" :
removeCacheDirectiveOps.incr();
break;
case "listCacheDirectives" :
listCacheDirectivesOps.incr();
break;
case "addCachePool" :
addCachePoolOps.incr();
break;
case "modifyCachePool" :
modifyCachePoolOps.incr();
break;
case "removeCachePool" :
removeCachePoolOps.incr();
break;
case "listCachePools" :
listCachePoolsOps.incr();
break;
case "modifyAclEntries" :
modifyAclEntriesOps.incr();
break;
case "removeAclEntries" :
removeAclEntriesOps.incr();
break;
case "removeDefaultAcl" :
removeDefaultAclOps.incr();
break;
case "removeAcl" :
removeAclOps.incr();
break;
case "setAcl" :
setAclOps.incr();
break;
case "getAclStatus" :
getAclStatusOps.incr();
break;
case "createEncryptionZone" :
createEncryptionZoneOps.incr();
break;
case "getEZForPath" :
getEZForPathOps.incr();
break;
case
"listEncryptionZones" :
listEncryptionZonesOps.incr();
break;
case "reencryptEncryptionZone" :
reencryptEncryptionZoneOps.incr();
break;
case "listReencryptionStatus" :
listReencryptionStatusOps.incr();
break;
case "setXAttr" :
setXAttrOps.incr();
break;
case "getXAttrs" :
getXAttrsOps.incr();
break;
case "listXAttrs" :
listXAttrsOps.incr();
break;
case
"removeXAttr" :
removeXAttrsOps.incr();
break;
case "checkAccess" :
checkAccessOps.incr();
break;
case "getCurrentEditLogTxid" : getCurrentEditLogTxidOps.incr();
break;
case "getEditsFromTxid" :
getEditsFromTxidOps.incr();
break;
case "getDataEncryptionKey" :
getDataEncryptionKeyOps.incr();
break;case "createSnapshot" :
createSnapshotOps.incr();
break; case "deleteSnapshot" :
deleteSnapshotOps.incr();
break;
case "setQuota" :setQuotaOps.incr();
break;
case "getQuotaUsage" :
getQuotaUsageOps.incr();
break;
case "reportBadBlocks" :
reportBadBlocksOps.incr();
break;
case "unsetStoragePolicy" :
unsetStoragePolicyOps.incr();
break;
case "getStoragePolicy" :
getStoragePolicyOps.incr();
break;
case "getErasureCodingPolicies" :
getErasureCodingPoliciesOps.incr();
break;
case "getErasureCodingCodecs" :
getErasureCodingCodecsOps.incr();
break;
case "addErasureCodingPolicies" :
addErasureCodingPoliciesOps.incr();
break;
case "removeErasureCodingPolicy" : f1.incr();break;
case "disableErasureCodingPolicy" :
disableErasureCodingPolicyOps.incr();
break;
case "enableErasureCodingPolicy" :
enableErasureCodingPolicyOps.incr();
break;
case "getErasureCodingPolicy" :
getErasureCodingPolicyOps.incr();
break;
case "setErasureCodingPolicy" :
setErasureCodingPolicyOps.incr();
break;
case "unsetErasureCodingPolicy" :
unsetErasureCodingPolicyOps.incr();
break;
case "getECTopologyResultForPolicies" :
getECTopologyResultForPoliciesOps.incr();
break;
case "getECBlockGroupStats" :
getECBlockGroupStatsOps.incr();
break;
case "getReplicatedBlockStats" :
getReplicatedBlockStatsOps.incr();
break;
case "listOpenFiles" :
listOpenFilesOps.incr();
break;
case "msync" :
msyncOps.incr();
break;
case "satisfyStoragePolicy" :
satisfyStoragePolicyOps.incr();
break;
case "getHAServiceState" :
getHAServiceStateOps.incr();
break;
case "getSlowDatanodeReport" :
getSlowDatanodeReportOps.incr();
break;
default :
otherOps.incr();
}
} | 3.26 |
hadoop_RouterClientMetrics_incInvokedConcurrent_rdh | /**
* Increase the concurrent metrics based on the method being invoked.
*
* @param method
* concurrently invoked method
*/
public void incInvokedConcurrent(Method method) {
switch (method.getName()) {
case "setReplication" :
concurrentSetReplicationOps.incr();
break;
case "setPermission" :
concurrentSetPermissionOps.incr();
break;
case "setOwner" :
concurrentSetOwnerOps.incr();
break;
case "rename" :
f2.incr();
break;
case "rename2" :concurrentRename2Ops.incr();
break;
case "delete" :
concurrentDeleteOps.incr();
break;
case "mkdirs" :
concurrentMkdirsOps.incr();
break;
case "renewLease" :concurrentRenewLeaseOps.incr();
break;
case "getListing" :
concurrentGetListingOps.incr();
break;
case "getFileInfo" :
concurrentGetFileInfoOps.incr();
break;
case "getStats" :
concurrentGetStatsOps.incr();
break;
case "getDatanodeReport" :
concurrentGetDatanodeReportOps.incr();
break;
case "setSafeMode" :
concurrentSetSafeModeOps.incr();
break;
case "restoreFailedStorage" :
concurrentRestoreFailedStorageOps.incr();
break;
case "saveNamespace" :
concurrentSaveNamespaceOps.incr();
break;
case "rollEdits" :
concurrentRollEditsOps.incr();
break;
case
"refreshNodes" :
concurrentRefreshNodesOps.incr();
break;
case "finalizeUpgrade" :
concurrentFinalizeUpgradeOps.incr();
break;
case "rollingUpgrade" :
concurrentRollingUpgradeOps.incr();
break;
case "metaSave" :
concurrentMetaSaveOps.incr();
break;
case "listCorruptFileBlocks" :
concurrentListCorruptFileBlocksOps.incr();
break;case "setBalancerBandwidth" :
f3.incr();
break;
case "getContentSummary" :
concurrentGetContentSummaryOps.incr();
break;
case "modifyAclEntries" :
concurrentModifyAclEntriesOps.incr();
break;case "removeAclEntries" :
concurrentRemoveAclEntriesOps.incr();
break;
case "removeDefaultAcl" :
concurrentRemoveDefaultAclOps.incr();
break;
case "removeAcl" :
f4.incr();
break;
case "setAcl"
:
concurrentSetAclOps.incr();
break;
case "setXAttr"
:
concurrentSetXAttrOps.incr();
break;
case "removeXAttr" :concurrentRemoveXAttrOps.incr();
break;
case "getCurrentEditLogTxid" :
concurrentGetCurrentEditLogTxidOps.incr();
break;
case "getReplicatedBlockStats" :
concurrentGetReplicatedBlockStatsOps.incr();
break;
case "setQuota" :
concurrentSetQuotaOps.incr();
break;
case "getQuotaUsage" :concurrentGetQuotaUsageOps.incr();
break;
case "getSlowDatanodeReport"
:
concurrentGetSlowDatanodeReportOps.incr();
break;
default :
concurrentOtherOps.incr();
}
} | 3.26 |
hadoop_BlockBlobAppendStream_maybeSetFirstError_rdh | /**
* Set {@link #firstError} to the exception if it is not already set.
*
* @param exception
* exception to save
*/
private void maybeSetFirstError(IOException exception) {
firstError.compareAndSet(null, exception);} | 3.26 |
hadoop_BlockBlobAppendStream_maybeThrowFirstError_rdh | /**
* Throw the first error caught if it has not been raised already
*
* @throws IOException
* if one is caught and needs to be thrown.
*/
private void maybeThrowFirstError() throws IOException {
if (firstError.get() != null) {
firstErrorThrown = true;
throw firstError.get();
}
} | 3.26 |
hadoop_BlockBlobAppendStream_setMaxBlockSize_rdh | /**
* Set payload size of the stream.
* It is intended to be used for unit testing purposes only.
*/
@VisibleForTesting
synchronized void setMaxBlockSize(int size) {
f0.set(size);
// it is for testing only so we can abandon the previously allocated
// payload
this.outBuffer = ByteBuffer.allocate(f0.get());
} | 3.26 |
hadoop_BlockBlobAppendStream_setCompactionBlockCount_rdh | /**
* Set compaction parameters.
* It is intended to be used for unit testing purposes only.
*/
@VisibleForTesting
void setCompactionBlockCount(int activationCount) {
activateCompactionBlockCount = activationCount;
} | 3.26 |
hadoop_BlockBlobAppendStream_writeBlockRequestInternal_rdh | /**
* This is shared between upload block Runnable and CommitBlockList. The
* method captures retry logic
*
* @param blockId
* block name
* @param dataPayload
* block content
*/
private void writeBlockRequestInternal(String blockId, ByteBuffer dataPayload, boolean bufferPoolBuffer) {
IOException lastLocalException = null;
int uploadRetryAttempts = 0;
while (uploadRetryAttempts < MAX_BLOCK_UPLOAD_RETRIES) {
try {
long startTime = System.nanoTime();
blob.uploadBlock(blockId, accessCondition, new ByteArrayInputStream(dataPayload.array()), dataPayload.position(), new BlobRequestOptions(), opContext);
LOG.debug("upload block finished for {} ms. block {} ", TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startTime), blockId);
break;
} catch (Exception ioe) {
LOG.debug("Encountered exception during uploading block for Blob {}" + " Exception : {}", key, ioe);
uploadRetryAttempts++;
lastLocalException = new AzureException("Encountered Exception while uploading block: " + ioe, ioe);
try {
Thread.sleep(BLOCK_UPLOAD_RETRY_INTERVAL * (uploadRetryAttempts + 1));
} catch (InterruptedException ie) {
Thread.currentThread().interrupt();
break;
}
}
}
if (bufferPoolBuffer) {
poolReadyByteBuffers.putBuffer(dataPayload);
}
if (uploadRetryAttempts == MAX_BLOCK_UPLOAD_RETRIES) {
maybeSetFirstError(lastLocalException);
}
} | 3.26 |
hadoop_BlockBlobAppendStream_generateNewerVersionBlockId_rdh | /**
* Helper method that generates an newer (4.2.0) version blockId.
*
* @return String representing the block ID generated.
*/
private String generateNewerVersionBlockId(String prefix, long id) {
String blockIdSuffix = String.format("%06d", id);
byte[] v8 = (prefix + blockIdSuffix).getBytes(StandardCharsets.UTF_8);
return new String(Base64.encodeBase64(v8), StandardCharsets.UTF_8);
} | 3.26 |
hadoop_BlockBlobAppendStream_generateBlockId_rdh | /**
* Helper method that generates the next block id for uploading a block to
* azure storage.
*
* @return String representing the block ID generated.
* @throws IOException
* if the stream is in invalid state
*/
private String generateBlockId() throws IOException {
if ((nextBlockCount == UNSET_BLOCKS_COUNT) || (blockIdPrefix == null)) {
throw new AzureException("Append Stream in invalid state. nextBlockCount not set correctly");
}
return !blockIdPrefix.isEmpty() ? generateNewerVersionBlockId(blockIdPrefix, nextBlockCount++) : generateOlderVersionBlockId(nextBlockCount++);
} | 3.26 |
hadoop_BlockBlobAppendStream_generateOlderVersionBlockId_rdh | /**
* Helper method that generates an older (2.2.0) version blockId.
*
* @return String representing the block ID generated.
*/
private String generateOlderVersionBlockId(long id) {
byte[] blockIdInBytes = new byte[8];
for (int m = 0; m < 8; m++) {
blockIdInBytes[7
- m] = ((byte) ((id >> (8 * m)) & 0xff));
}
return new String(Base64.encodeBase64(blockIdInBytes), StandardCharsets.UTF_8);
} | 3.26 |
hadoop_BlockBlobAppendStream_execute_rdh | /**
* Execute command.
*/
public void execute() throws InterruptedException, IOException {if (committedBlobLength.get() >= getCommandBlobOffset()) {
LOG.debug("commit already applied for {}", key);
return;
}
if (lastBlock == null) {
LOG.debug("nothing to commit for {}", key);
return;
}
LOG.debug("active commands: {} for {}", activeBlockCommands.size(), key);
for (UploadCommand activeCommand : activeBlockCommands) {
if (activeCommand.getCommandBlobOffset() < getCommandBlobOffset()) {
activeCommand.dump();
activeCommand.awaitAsDependent();
} else {break;
}
}
// stop all uploads until the block list is committed
uploadingSemaphore.acquire(MAX_NUMBER_THREADS_IN_THREAD_POOL);
BlockEntry uncommittedBlock;
do {
uncommittedBlock = uncommittedBlockEntries.poll();
f1.add(uncommittedBlock);
} while (uncommittedBlock != lastBlock );
if (f1.size() > activateCompactionBlockCount) {
LOG.debug("Block compaction: activated with {} blocks for {}", f1.size(), key);
// Block compaction
long startCompaction = System.nanoTime();
blockCompaction();LOG.debug("Block compaction finished for {} ms with {} blocks for {}", TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startCompaction), f1.size(), key);
}
writeBlockListRequestInternal();
uploadingSemaphore.release(MAX_NUMBER_THREADS_IN_THREAD_POOL);
// remove blocks previous commands
for (Iterator<UploadCommand> v20 = activeBlockCommands.iterator(); v20.hasNext();) {
UploadCommand activeCommand = v20.next();
if (activeCommand.getCommandBlobOffset() <= getCommandBlobOffset()) {v20.remove();
} else {
break;
}
}
committedBlobLength.set(getCommandBlobOffset());
} | 3.26 |
hadoop_BlockBlobAppendStream_hsync_rdh | /**
* Force all data in the output stream to be written to Azure storage.
* Wait to return until this is complete.
*/@Override
public void hsync() throws IOException {
// when block compaction is disabled, hsync is empty function
if (compactionEnabled) {
flush();}
} | 3.26 |
hadoop_BlockBlobAppendStream_flush_rdh | /**
* Flushes this output stream and forces any buffered output bytes to be
* written out. If any data remains in the payload it is committed to the
* service. Data is queued for writing and forced out to the service
* before the call returns.
*/
@Override
public void flush()
throws IOException {
if (closed) {// calling close() after the stream is closed starts with call to flush()
return;
}
addBlockUploadCommand();
if (committedBlobLength.get() < blobLength) {
try {
// wait until the block list is committed
addFlushCommand().await();
} catch (InterruptedException ie) {
Thread.currentThread().interrupt();
}
}
} | 3.26 |
hadoop_BlockBlobAppendStream_write_rdh | /**
* Writes length bytes from the specified byte array starting at offset to
* this output stream.
*
* @param data
* the byte array to write.
* @param offset
* the start offset in the data.
* @param length
* the number of bytes to write.
* @throws IOException
* if an I/O error occurs. In particular, an IOException may be
* thrown if the output stream has been closed.
*/
@Override
public synchronized void write(final byte[] data, int offset, int length) throws
IOException {
Preconditions.checkArgument(data != null, "null data");
if (((offset < 0) || (length < 0)) || (length > (data.length - offset))) {
throw new IndexOutOfBoundsException();
}
if (closed) {
throw new IOException(FSExceptionMessages.STREAM_IS_CLOSED);
}
while (outBuffer.remaining() < length) {
int remaining = outBuffer.remaining();
outBuffer.put(data, offset, remaining);
// upload payload to azure storage
addBlockUploadCommand();
offset += remaining;
length -= remaining;
}
outBuffer.put(data, offset, length);
} | 3.26 |
hadoop_BlockBlobAppendStream_blockCompaction_rdh | /**
* Block compaction process.
*
* Block compaction is only enabled when the number of blocks exceeds
* activateCompactionBlockCount. The algorithm searches for the longest
* segment [b..e) where (e-b) > 2 && |b| + |b+1| ... |e-1| < maxBlockSize
* such that size(b1) + size(b2) + ... + size(bn) < maximum-block-size.
* It then downloads the blocks in the sequence, concatenates the data to
* form a single block, uploads this new block, and updates the block
* list to replace the sequence of blocks with the new block.
*/
private void blockCompaction() throws IOException {
// current segment [segmentBegin, segmentEnd) and file offset/size of the
// current segment
int segmentBegin = 0;
int segmentEnd = 0;
long segmentOffsetBegin = 0;
long segmentOffsetEnd = 0;
// longest segment [maxSegmentBegin, maxSegmentEnd) and file offset/size of
// the longest segment
int maxSegmentBegin = 0;
int maxSegmentEnd = 0;
long maxSegmentOffsetBegin = 0;
long maxSegmentOffsetEnd
= 0;
for (BlockEntry block : f1) {
segmentEnd++;
segmentOffsetEnd += block.getSize();
if ((segmentOffsetEnd - segmentOffsetBegin) > f0.get()) {
if ((segmentEnd - segmentBegin) > 2) {
if ((maxSegmentEnd -
maxSegmentBegin) < (segmentEnd - segmentBegin)) {
maxSegmentBegin = segmentBegin;
maxSegmentEnd = segmentEnd;
maxSegmentOffsetBegin = segmentOffsetBegin;
maxSegmentOffsetEnd = segmentOffsetEnd - block.getSize();
}
}
segmentBegin = segmentEnd - 1;
segmentOffsetBegin = segmentOffsetEnd - block.getSize();
}
}
if ((maxSegmentEnd - maxSegmentBegin) > 1) {
LOG.debug("Block compaction: {} blocks for {}", maxSegmentEnd - maxSegmentBegin, key);
// download synchronously all the blocks from the azure storage
ByteArrayOutputStreamInternal blockOutputStream = new ByteArrayOutputStreamInternal(f0.get());
try {
long length = maxSegmentOffsetEnd - maxSegmentOffsetBegin;
blob.downloadRange(maxSegmentOffsetBegin, length, blockOutputStream, new BlobRequestOptions(), opContext);
} catch (StorageException ex) {
LOG.error("Storage exception encountered during block compaction phase" + " : {} Storage Exception : {} Error Code: {}", key, ex, ex.getErrorCode());throw new AzureException("Encountered Exception while committing append blocks " + ex, ex);
}
// upload synchronously new block to the azure storage
String blockId = generateBlockId();
ByteBuffer byteBuffer = ByteBuffer.wrap(blockOutputStream.getByteArray());
byteBuffer.position(blockOutputStream.size());
writeBlockRequestInternal(blockId, byteBuffer, false);
// replace blocks from the longest segment with new block id
f1.subList(maxSegmentBegin + 1, maxSegmentEnd - 1).clear();
BlockEntry newBlock = f1.get(maxSegmentBegin);
newBlock.setId(blockId);
newBlock.setSearchMode(BlockSearchMode.LATEST);
newBlock.setSize(maxSegmentOffsetEnd - maxSegmentOffsetBegin);
}
} | 3.26 |
hadoop_BlockBlobAppendStream_hasCapability_rdh | /**
* The Synchronization capabilities of this stream depend upon the compaction
* policy.
*
* @param capability
* string to query the stream support for.
* @return true for hsync and hflush when compaction is enabled.
*/
@Override
public boolean hasCapability(String capability) {
if (!compactionEnabled) {
return false;
}
return StoreImplementationUtils.isProbeForSyncable(capability);
} | 3.26 |
hadoop_BlockBlobAppendStream_getBlockList_rdh | /**
* Get the list of block entries. It is used for testing purposes only.
*
* @return List of block entries.
*/@VisibleForTesting
List<BlockEntry> getBlockList() throws StorageException, IOException {
return blob.downloadBlockList(BlockListingFilter.COMMITTED, new BlobRequestOptions(), opContext);
} | 3.26 |
hadoop_BlockBlobAppendStream_writeBlockListRequestInternal_rdh | /**
* Write block list. The method captures retry logic
*/
private void writeBlockListRequestInternal() {
IOException lastLocalException = null;
int uploadRetryAttempts = 0;
while (uploadRetryAttempts < MAX_BLOCK_UPLOAD_RETRIES) {
try {
long startTime = System.nanoTime();
blob.commitBlockList(f1, accessCondition, new BlobRequestOptions(), opContext);
LOG.debug("Upload block list took {} ms for blob {} ", TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startTime), key);
break;
} catch (Exception ioe) {
LOG.debug("Encountered exception during uploading block for Blob {}" + " Exception : {}", key, ioe);
uploadRetryAttempts++;
lastLocalException = new AzureException("Encountered Exception while uploading block: " + ioe, ioe);
try {
Thread.sleep(BLOCK_UPLOAD_RETRY_INTERVAL * (uploadRetryAttempts + 1));
} catch (InterruptedException ie) {
Thread.currentThread().interrupt();
break;
}
}
}
if (uploadRetryAttempts == MAX_BLOCK_UPLOAD_RETRIES) {
maybeSetFirstError(lastLocalException);
}
} | 3.26 |
hadoop_BlockBlobAppendStream_hflush_rdh | /**
* Force all data in the output stream to be written to Azure storage.
* Wait to return until this is complete.
*/
@Override
public void hflush()
throws IOException {
// when block compaction is disabled, hflush is empty function
if (compactionEnabled) {
flush();
}
} | 3.26 |
hadoop_BlockBlobAppendStream_addFlushCommand_rdh | /**
* Prepare block list commit command and queue the command in thread pool
* executor.
*/
private synchronized UploadCommand
addFlushCommand() throws IOException {
maybeThrowFirstError();
if (blobExist
&&
lease.isFreed()) {
throw new AzureException(String.format("Attempting to upload block list on blob : %s" + " that does not have lease on the Blob. Failing upload", key));
}
UploadCommand command = new UploadBlockListCommand();
activeBlockCommands.add(command);
ioThreadPool.execute(new WriteRequest(command));
return command;
} | 3.26 |
hadoop_BlockBlobAppendStream_setBlocksCountAndBlockIdPrefix_rdh | /**
* Helper method used to generate the blockIDs. The algorithm used is similar
* to the Azure storage SDK.
*/
private void setBlocksCountAndBlockIdPrefix(List<BlockEntry> blockEntries) {
if ((nextBlockCount == UNSET_BLOCKS_COUNT) && (blockIdPrefix == null)) {
Random sequenceGenerator =
new Random();
String blockZeroBlockId = (!blockEntries.isEmpty()) ? blockEntries.get(0).getId() : "";
String prefix = UUID.randomUUID().toString() + "-";
String sampleNewerVersionBlockId = generateNewerVersionBlockId(prefix, 0);
if ((!blockEntries.isEmpty()) && (blockZeroBlockId.length() < sampleNewerVersionBlockId.length())) {
// If blob has already been created with 2.2.0, append subsequent blocks
// with older version (2.2.0) blockId compute nextBlockCount, the way it
// was done before; and don't use blockIdPrefix
this.blockIdPrefix = "";
nextBlockCount = ((long) (sequenceGenerator.nextInt(Integer.MAX_VALUE))) + sequenceGenerator.nextInt(Integer.MAX_VALUE - MAX_BLOCK_COUNT);
nextBlockCount += blockEntries.size();
} else {
// If there are no existing blocks, create the first block with newer
// version (4.2.0) blockId. If blob has already been created with 4.2.0,
// append subsequent blocks with newer version (4.2.0) blockId
this.blockIdPrefix = prefix;
nextBlockCount = blockEntries.size();
}
}
} | 3.26 |
hadoop_ResourceMappings_addAssignedResources_rdh | /**
* Adds the resources for a given resource type.
*
* @param resourceType
* Resource Type
* @param assigned
* Assigned resources to add
*/
public void addAssignedResources(String
resourceType, AssignedResources assigned) {
assignedResourcesMap.put(resourceType, assigned);
} | 3.26 |
hadoop_ResourceMappings_getAssignedResources_rdh | /**
* Get all resource mappings.
*
* @param resourceType
* resourceType
* @return map of resource mapping
*/
public List<Serializable> getAssignedResources(String resourceType) {
AssignedResources ar = assignedResourcesMap.get(resourceType);
if (null == ar) {
return Collections.emptyList();
}
return ar.getAssignedResources();
} | 3.26 |
hadoop_InMemoryLevelDBAliasMapServer_getAliasMap_rdh | /**
* Get the {@link InMemoryAliasMap} used by this server.
*
* @return the inmemoryaliasmap used.
*/
public InMemoryAliasMap getAliasMap() {
return aliasMap;
} | 3.26 |
hadoop_ChainReducer_setReducer_rdh | /**
* Sets the {@link Reducer} class to the chain job.
*
* <p>
* The key and values are passed from one element of the chain to the next, by
* value. For the added Reducer the configuration given for it,
* <code>reducerConf</code>, have precedence over the job's Configuration.
* This precedence is in effect when the task is running.
* </p>
* <p>
* IMPORTANT: There is no need to specify the output key/value classes for the
* ChainReducer, this is done by the setReducer or the addMapper for the last
* element in the chain.
* </p>
*
* @param job
* the job
* @param klass
* the Reducer class to add.
* @param inputKeyClass
* reducer input key class.
* @param inputValueClass
* reducer input value class.
* @param outputKeyClass
* reducer output key class.
* @param outputValueClass
* reducer output value class.
* @param reducerConf
* a configuration for the Reducer class. It is recommended to use a
* Configuration without default values using the
* <code>Configuration(boolean loadDefaults)</code> constructor with
* FALSE.
*/
public static void setReducer(Job job, Class<? extends Reducer> klass, Class<?>
inputKeyClass, Class<?> inputValueClass, Class<?> outputKeyClass, Class<?> outputValueClass, Configuration reducerConf) {
job.setReducerClass(ChainReducer.class);
job.setOutputKeyClass(outputKeyClass);
job.setOutputValueClass(outputValueClass);
Chain.setReducer(job, klass, inputKeyClass, inputValueClass, outputKeyClass, outputValueClass, reducerConf);
}
/**
* Adds a {@link Mapper} | 3.26 |
hadoop_AbfsIoUtils_m0_rdh | /**
* Dump the headers of a request/response to the log at DEBUG level.
*
* @param origin
* header origin for log
* @param headers
* map of headers.
*/
public static void m0(final String origin, final Map<String, List<String>> headers) {
if (LOG.isDebugEnabled())
{
LOG.debug("{}", origin);
for (Map.Entry<String, List<String>> entry : headers.entrySet()) {String key = entry.getKey();
if (key == null) {
key = "HTTP Response";
}
String values = StringUtils.join(";", entry.getValue());
if (key.contains("Cookie"))
{
values = "*cookie info*";
}
if (key.equals("sig")) {
values = "XXXX";
}
LOG.debug(" {}={}", key, values);
}
}
} | 3.26 |
hadoop_ResourceEstimatorUtil_createProviderInstance_rdh | /**
* Helper method to create instances of Object using the class name specified
* in the configuration object.
*
* @param conf
* the yarn configuration
* @param configuredClassName
* the configuration provider key
* @param defaultValue
* the default implementation class
* @param type
* the required interface/base class
* @param <T>
* The type of the instance to create
* @return the instances created
* @throws ResourceEstimatorException
* if the provider initialization fails.
*/
@SuppressWarnings("unchecked")
public static <T> T createProviderInstance(Configuration conf, String configuredClassName, String defaultValue, Class<T> type) throws ResourceEstimatorException {
String className = conf.get(configuredClassName);
if (className == null) {
className = defaultValue;
}
try {
Class<?> concreteClass = Class.forName(className);
if (type.isAssignableFrom(concreteClass)) {
Constructor<T> v2 = ((Constructor<T>) (concreteClass.getDeclaredConstructor(EMPTY_ARRAY)));
v2.setAccessible(true);
return v2.newInstance();
} else {
StringBuilder errMsg
= new StringBuilder();
errMsg.append("Class: ").append(className).append(" not instance of ").append(type.getCanonicalName());
throw new ResourceEstimatorException(errMsg.toString());
}
} catch (ClassNotFoundException
e) {
StringBuilder errMsg = new StringBuilder();errMsg.append("Could not instantiate : ").append(className).append(" due to exception: ").append(e.getCause());
throw new ResourceEstimatorException(errMsg.toString());
} catch (ReflectiveOperationException e) {
StringBuilder errMsg = new StringBuilder();
errMsg.append("Could not instantiate : ").append(className).append(" due to exception: ").append(e.getCause());
throw new ResourceEstimatorException(errMsg.toString());
}
} | 3.26 |
hadoop_Lz4Codec_createDecompressor_rdh | /**
* Create a new {@link Decompressor} for use by this {@link CompressionCodec}.
*
* @return a new decompressor for use by this codec
*/
@Override
public Decompressor createDecompressor() {
int bufferSize = conf.getInt(CommonConfigurationKeys.IO_COMPRESSION_CODEC_LZ4_BUFFERSIZE_KEY, CommonConfigurationKeys.IO_COMPRESSION_CODEC_LZ4_BUFFERSIZE_DEFAULT);
return new Lz4Decompressor(bufferSize);
} | 3.26 |
hadoop_Lz4Codec_setConf_rdh | /**
* Set the configuration to be used by this object.
*
* @param conf
* the configuration object.
*/
@Override
public void setConf(Configuration conf) {
this.conf = conf;
} | 3.26 |
hadoop_Lz4Codec_createCompressor_rdh | /**
* Create a new {@link Compressor} for use by this {@link CompressionCodec}.
*
* @return a new compressor for use by this codec
*/
@Override
public Compressor createCompressor() {
int bufferSize = conf.getInt(CommonConfigurationKeys.IO_COMPRESSION_CODEC_LZ4_BUFFERSIZE_KEY, CommonConfigurationKeys.IO_COMPRESSION_CODEC_LZ4_BUFFERSIZE_DEFAULT);
boolean useLz4HC = conf.getBoolean(CommonConfigurationKeys.IO_COMPRESSION_CODEC_LZ4_USELZ4HC_KEY, CommonConfigurationKeys.IO_COMPRESSION_CODEC_LZ4_USELZ4HC_DEFAULT);
return new Lz4Compressor(bufferSize,
useLz4HC);
}
/**
* Create a {@link CompressionInputStream} | 3.26 |
hadoop_Lz4Codec_getCompressorType_rdh | /**
* Get the type of {@link Compressor} needed by this {@link CompressionCodec}.
*
* @return the type of compressor needed by this codec.
*/
@Override
public Class<? extends Compressor> getCompressorType() {
return Lz4Compressor.class;
} | 3.26 |
hadoop_Lz4Codec_getDefaultExtension_rdh | /**
* Get the default filename extension for this kind of compression.
*
* @return <code>.lz4</code>.
*/
@Override
public String getDefaultExtension() {
return CodecConstants.LZ4_CODEC_EXTENSION;
} | 3.26 |
hadoop_Lz4Codec_createOutputStream_rdh | /**
* Create a {@link CompressionOutputStream} that will write to the given
* {@link OutputStream} with the given {@link Compressor}.
*
* @param out
* the location for the final output stream
* @param compressor
* compressor to use
* @return a stream the user can write uncompressed data to have it compressed
* @throws IOException
* raised on errors performing I/O.
*/ @Override
public CompressionOutputStream createOutputStream(OutputStream out, Compressor compressor) throws IOException {
int bufferSize = conf.getInt(CommonConfigurationKeys.IO_COMPRESSION_CODEC_LZ4_BUFFERSIZE_KEY, CommonConfigurationKeys.IO_COMPRESSION_CODEC_LZ4_BUFFERSIZE_DEFAULT);
int
compressionOverhead = (bufferSize / 255) + 16;
return new BlockCompressorStream(out, compressor, bufferSize, compressionOverhead);
} | 3.26 |
hadoop_Lz4Codec_createInputStream_rdh | /**
* Create a {@link CompressionInputStream} that will read from the given
* {@link InputStream} with the given {@link Decompressor}.
*
* @param in
* the stream to read compressed bytes from
* @param decompressor
* decompressor to use
* @return a stream to read uncompressed bytes from
* @throws IOException
* raised on errors performing I/O.
*/
@Override
public CompressionInputStream
createInputStream(InputStream in, Decompressor decompressor) throws IOException { return new BlockDecompressorStream(in, decompressor, conf.getInt(CommonConfigurationKeys.IO_COMPRESSION_CODEC_LZ4_BUFFERSIZE_KEY, CommonConfigurationKeys.IO_COMPRESSION_CODEC_LZ4_BUFFERSIZE_DEFAULT));
}
/**
* Get the type of {@link Decompressor} needed by this {@link CompressionCodec} | 3.26 |
hadoop_ExternalSPSBlockMoveTaskHandler_cleanUp_rdh | /**
* Cleanup the resources.
*/
void cleanUp() {
blkMovementTracker.stopTracking();
if (movementTrackerThread != null) {
movementTrackerThread.interrupt();
}
} | 3.26 |
hadoop_ExternalSPSBlockMoveTaskHandler_startMovementTracker_rdh | /**
* Initializes block movement tracker daemon and starts the thread.
*/
private void startMovementTracker() {
movementTrackerThread = new Daemon(this.blkMovementTracker);
movementTrackerThread.setName("BlockStorageMovementTracker");
movementTrackerThread.start();
} | 3.26 |
hadoop_FSDirSatisfyStoragePolicyOp_satisfyStoragePolicy_rdh | /**
* Satisfy storage policy function which will add the entry to SPS call queue
* and will perform satisfaction async way.
*
* @param fsd
* fs directory
* @param bm
* block manager
* @param src
* source path
* @param logRetryCache
* whether to record RPC ids in editlog for retry cache rebuilding
* @return file status info
* @throws IOException
*/
static FileStatus satisfyStoragePolicy(FSDirectory fsd, BlockManager bm, String src, boolean logRetryCache) throws IOException {
assert fsd.getFSNamesystem().hasWriteLock();
FSPermissionChecker pc = fsd.getPermissionChecker();
INodesInPath iip;
fsd.writeLock();
try {
// check operation permission.
iip = fsd.resolvePath(pc, src, DirOp.WRITE);
if (fsd.isPermissionEnabled()) {
fsd.checkPathAccess(pc, iip, FsAction.WRITE);
}
INode inode = FSDirectory.resolveLastINode(iip);
if (inode.isFile() && (inode.asFile().numBlocks() == 0)) {
if (NameNode.LOG.isInfoEnabled()) {
NameNode.LOG.info("Skipping satisfy storage policy on path:{} as " + "this file doesn't have any blocks!", inode.getFullPathName());
}
} else if (inodeHasSatisfyXAttr(inode)) {
NameNode.LOG.warn((("Cannot request to call satisfy storage policy on path: " + inode.getFullPathName()) + ", as this file/dir was already called for satisfying ") + "storage policy.");
}
else {
XAttr satisfyXAttr = XAttrHelper.buildXAttr(XATTR_SATISFY_STORAGE_POLICY);
List<XAttr> xAttrs = Arrays.asList(satisfyXAttr);
List<XAttr> existingXAttrs = XAttrStorage.readINodeXAttrs(inode);
List<XAttr> newXAttrs = FSDirXAttrOp.setINodeXAttrs(fsd, existingXAttrs, xAttrs, EnumSet.of(XAttrSetFlag.CREATE));XAttrStorage.updateINodeXAttrs(inode, newXAttrs, iip.getLatestSnapshotId());fsd.getEditLog().logSetXAttrs(src, xAttrs, logRetryCache);
// Adding directory in the pending queue, so FileInodeIdCollector
// process directory child in batch and recursively
StoragePolicySatisfyManager spsManager = fsd.getBlockManager().getSPSManager();
if (spsManager != null) {
spsManager.addPathId(inode.getId());
}
}
} finally {
fsd.writeUnlock();
}
return fsd.getAuditFileInfo(iip);
} | 3.26 |
hadoop_FSStoreOpHandler_get_rdh | /**
* Will return StoreOp instance based on opCode and StoreType.
*
* @param opCode
* opCode.
* @param storeType
* storeType.
* @return instance of FSNodeStoreLogOp.
*/
public static FSNodeStoreLogOp get(int opCode, StoreType storeType) {
return newInstance(editLogOp.get(storeType).get(opCode));
} | 3.26 |
hadoop_FSStoreOpHandler_getMirrorOp_rdh | /**
* Get mirror operation of store Type.
*
* @param storeType
* storeType.
* @return instance of FSNodeStoreLogOp.
*/
public static FSNodeStoreLogOp getMirrorOp(StoreType storeType) {
return newInstance(mirrorOp.get(storeType));
} | 3.26 |
hadoop_GenericRefreshProtocolServerSideTranslatorPB_pack_rdh | // Convert a collection of RefreshResponse objects to a
// RefreshResponseCollection proto
private GenericRefreshResponseCollectionProto pack(Collection<RefreshResponse> responses) {
GenericRefreshResponseCollectionProto.Builder b = GenericRefreshResponseCollectionProto.newBuilder();
for (RefreshResponse response : responses) {
GenericRefreshResponseProto.Builder respBuilder = GenericRefreshResponseProto.newBuilder();respBuilder.setExitStatus(response.getReturnCode());
respBuilder.setUserMessage(response.getMessage());
respBuilder.setSenderName(response.getSenderName());
// Add to collection
b.addResponses(respBuilder);
}
return b.build();
} | 3.26 |
hadoop_FSBuilderSupport_getLong_rdh | /**
* Get a long value with resilience to unparseable values.
*
* @param key
* key to log
* @param defVal
* default value
* @return long value
*/
public long getLong(String key, long defVal) {
final String v = options.getTrimmed(key, "");
if (v.isEmpty()) {
return defVal;
}try {
return options.getLong(key, defVal);
} catch (NumberFormatException e) {
final String msg = String.format("The option %s value \"%s\" is not a long integer; using the default value %s", key, v, defVal);
// not a long,
LOG_PARSE_ERROR.warn(msg);
LOG.debug("{}", msg, e);
return defVal;}
} | 3.26 |
hadoop_FSBuilderSupport_getPositiveLong_rdh | /**
* Get a long value with resilience to unparseable values.
* Negative values are replaced with the default.
*
* @param key
* key to log
* @param defVal
* default value
* @return long value
*/
public long getPositiveLong(String key, long defVal) {
long l = getLong(key, defVal);
if (l < 0) { LOG.debug("The option {} has a negative value {}, replacing with the default {}", key, l, defVal);
l = defVal;
}
return l;
} | 3.26 |
hadoop_ShadedProtobufHelper_tokenFromProto_rdh | /**
* Create a hadoop token from a protobuf token.
*
* @param tokenProto
* token
* @return a new token
*/
public static Token<? extends TokenIdentifier> tokenFromProto(TokenProto tokenProto) {
Token<? extends TokenIdentifier> token = new Token<>(tokenProto.getIdentifier().toByteArray(), tokenProto.getPassword().toByteArray(), new Text(tokenProto.getKind()),
new Text(tokenProto.getService()));
return token;
} | 3.26 |
hadoop_ShadedProtobufHelper_getByteString_rdh | /**
* Get the byte string of a non-null byte array.
* If the array is 0 bytes long, return a singleton to reduce object allocation.
*
* @param bytes
* bytes to convert.
* @return the protobuf byte string representation of the array.
*/
public static ByteString getByteString(byte[] bytes) {// return singleton to reduce object allocation
return bytes.length == 0 ? ByteString.EMPTY : ByteString.copyFrom(bytes);
} | 3.26 |
hadoop_ShadedProtobufHelper_protoFromToken_rdh | /**
* Create a {@code TokenProto} instance
* from a hadoop token.
* This builds and caches the fields
* (identifier, password, kind, service) but not
* renewer or any payload.
*
* @param tok
* token
* @return a marshallable protobuf class.
*/
public static TokenProto protoFromToken(Token<?> tok) {
TokenProto.Builder
builder = TokenProto.newBuilder().setIdentifier(getByteString(tok.getIdentifier())).setPassword(getByteString(tok.getPassword())).setKindBytes(getFixedByteString(tok.getKind())).setServiceBytes(getFixedByteString(tok.getService()));
return builder.build();
} | 3.26 |
hadoop_ShadedProtobufHelper_ipc_rdh | /**
* Evaluate a protobuf call, converting any ServiceException to an IOException.
*
* @param call
* invocation to make
* @return the result of the call
* @param <T>
* type of the result
* @throws IOException
* any translated protobuf exception
*/
public static <T> T ipc(IpcCall<T> call) throws IOException {
try {
return
call.call();
} catch (ServiceException e) {
throw getRemoteException(e);
}
} | 3.26 |
hadoop_ShadedProtobufHelper_getFixedByteString_rdh | /**
* Get the ByteString for frequently used fixed and small set strings.
*
* @param key
* string
* @return ByteString for frequently used fixed and small set strings.
*/
public static ByteString getFixedByteString(String key) {
ByteString value = FIXED_BYTESTRING_CACHE.get(key);
if (value == null) {
value = ByteString.copyFromUtf8(key);
FIXED_BYTESTRING_CACHE.put(key, value);
}return value;
} | 3.26 |
hadoop_OBSInputStream_onReadFailure_rdh | /**
* Handle an IOE on a read by attempting to re-open the stream. The
* filesystem's readException count will be incremented.
*
* @param ioe
* exception caught.
* @param length
* length of data being attempted to read
* @throws IOException
* any exception thrown on the re-open attempt.
*/
private void onReadFailure(final IOException ioe, final int length)
throws IOException {
LOG.debug(("Got exception while trying to read from stream {}" + " trying to recover: ") + ioe, uri);
int i = 1;
while (true) {
try {
reopen("failure recovery", streamCurrentPos, length);
return;
} catch (OBSIOException e) {
LOG.warn("OBSIOException occurred in reopen for failure recovery, " + "the {} retry time", i, e);
if (i == READ_RETRY_TIME) {
throw e;
}
try {
Thread.sleep(DELAY_TIME);
} catch (InterruptedException ie) {
throw e;
}
}
i++;
}
} | 3.26 |
hadoop_OBSInputStream_reopen_rdh | /**
* Opens up the stream at specified target position and for given length.
*
* @param reason
* reason for reopen
* @param targetPos
* target position
* @param length
* length requested
* @throws IOException
* on any failure to open the object
*/private synchronized void reopen(final String reason, final long targetPos, final long length) throws IOException {
long startTime = System.currentTimeMillis();
long threadId = Thread.currentThread().getId();
if (wrappedStream != null) {
closeStream(("reopen(" + reason) + ")", contentRangeFinish);
}
contentRangeFinish = m0(targetPos, length, contentLength, readAheadRange);
try {
GetObjectRequest request = new GetObjectRequest(bucket, key);
request.setRangeStart(targetPos);
request.setRangeEnd(contentRangeFinish);
if (fs.getSse().isSseCEnable()) {
request.setSseCHeader(fs.getSse().getSseCHeader());
}
wrappedStream = client.getObject(request).getObjectContent();
contentRangeStart = targetPos;
if (wrappedStream == null) {
throw new IOException((("Null IO stream from reopen of (" + reason) + ") ") + uri);
}
} catch (ObsException e) {
throw translateException("Reopen at position " + targetPos, uri, e);
}
this.streamCurrentPos = targetPos;
long endTime = System.currentTimeMillis();
LOG.debug(("reopen({}) for {} range[{}-{}], length={}," + " streamPosition={}, nextReadPosition={}, thread={}, ") + "timeUsedInMilliSec={}", uri, reason, targetPos, contentRangeFinish, length, streamCurrentPos, nextReadPos, threadId, endTime - startTime);
} | 3.26 |
hadoop_OBSInputStream_closeStream_rdh | /**
* Close a stream: decide whether to abort or close, based on the length of
* the stream and the current position. If a close() is attempted and fails,
* the operation escalates to an abort.
*
* <p>This does not set the {@link #closed} flag.
*
* @param reason
* reason for stream being closed; used in messages
* @param length
* length of the stream
* @throws IOException
* on any failure to close stream
*/
private synchronized void closeStream(final String reason, final long length) throws IOException {
if (wrappedStream != null) {
try {
wrappedStream.close();
} catch (IOException e) {
// exception escalates to an abort
LOG.debug("When closing {} stream for {}", uri, reason, e);
throw e;
}
LOG.debug("Stream {} : {}; streamPos={}, nextReadPos={}," + " request range {}-{} length={}", uri, reason, streamCurrentPos, nextReadPos, contentRangeStart, contentRangeFinish, length);
wrappedStream = null;
}
} | 3.26 |
hadoop_OBSInputStream_read_rdh | /**
* Read bytes starting from the specified position.
*
* @param position
* start read from this position
* @param buffer
* read buffer
* @param offset
* offset into buffer
* @param length
* number of bytes to read
* @return actual number of bytes read
* @throws IOException
* on any failure to read
*/
@Override
public int read(final long position, final byte[] buffer, final int offset, final int length) throws IOException {
int len = length;
checkNotClosed();
validatePositionedReadArgs(position, buffer, offset, len);
if ((position
< 0) || (position >= contentLength)) {
return -1;
}
if ((position + len) > contentLength) {
len = ((int) (contentLength - position));
}if (fs.isReadTransformEnabled()) {
return super.read(position, buffer,
offset, len);
}
return randomReadWithNewInputStream(position, buffer, offset, len);
} | 3.26 |
hadoop_OBSInputStream_remainingInFile_rdh | /**
* Bytes left in stream.
*
* @return how many bytes are left to read
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public synchronized long remainingInFile() {return this.contentLength - this.streamCurrentPos;
} | 3.26 |
hadoop_OBSInputStream_seekInStream_rdh | /**
* Adjust the stream to a specific position.
*
* @param targetPos
* target seek position
* @throws IOException
* on any failure to seek
*/
private void seekInStream(final long targetPos) throws IOException {
checkNotClosed();
if (wrappedStream == null) {
return;
}
// compute how much more to skip
long diff = targetPos - streamCurrentPos;
if (diff > 0) {
// forward seek -this is where data can be skipped
int available = wrappedStream.available();
// always seek at least as far as what is available
long forwardSeekRange = Math.max(readAheadRange, available);
// work out how much is actually left in the stream
// then choose whichever comes first: the range or the EOF
long remainingInCurrentRequest = remainingInCurrentRequest();
long forwardSeekLimit = Math.min(remainingInCurrentRequest, forwardSeekRange);
boolean skipForward = (remainingInCurrentRequest > 0) && (diff <= forwardSeekLimit);if (skipForward) {
// the forward seek range is within the limits
LOG.debug("Forward seek on {}, of {} bytes", uri, diff);
long skippedOnce = wrappedStream.skip(diff);
while ((diff > 0) && (skippedOnce > 0)) {
streamCurrentPos += skippedOnce;
diff -= skippedOnce;
incrementBytesRead(skippedOnce);
skippedOnce = wrappedStream.skip(diff);
}
if (streamCurrentPos == targetPos) {
// all is well
return;
} else {
// log a warning; continue to attempt to re-open
LOG.info("Failed to seek on {} to {}. Current position {}", uri, targetPos, streamCurrentPos);}
}
} else if ((diff == 0) && (remainingInCurrentRequest() > 0)) {
// targetPos == streamCurrentPos
// if there is data left in the stream, keep going
return;
}
// if the code reaches here, the stream needs to be reopened.
// close the stream; if read the object will be opened at the
// new streamCurrentPos
closeStream("seekInStream()", this.contentRangeFinish);
streamCurrentPos
= targetPos;
} | 3.26 |
hadoop_OBSInputStream_incrementBytesRead_rdh | /**
* Increment the bytes read counter if there is a stats instance and the
* number of bytes read is more than zero.
*
* @param bytesRead
* number of bytes read
*/private void incrementBytesRead(final long bytesRead) {
if ((statistics != null) && (bytesRead > 0)) {
statistics.incrementBytesRead(bytesRead);
}
} | 3.26 |
hadoop_OBSInputStream_readFully_rdh | /**
* Subclass {@code readFully()} operation which only seeks at the start of the
* series of operations; seeking back at the end.
*
* <p>This is significantly higher performance if multiple read attempts
* are needed to fetch the data, as it does not break the HTTP connection.
*
* <p>To maintain thread safety requirements, this operation is
* synchronized for the duration of the sequence. {@inheritDoc }
*/
@Override
public void readFully(final long position, final byte[] buffer, final int offset, final int length) throws IOException {
long startTime =
System.currentTimeMillis();
long threadId = Thread.currentThread().getId();
checkNotClosed();
validatePositionedReadArgs(position, buffer, offset, length);
if (length == 0) {
return;
}
int nread = 0;
synchronized(this) {
long oldPos = getPos();
try {
seek(position);
while (nread < length) {
int nbytes = read(buffer, offset + nread, length - nread);
if (nbytes < 0) {throw new EOFException(FSExceptionMessages.EOF_IN_READ_FULLY);
}
nread += nbytes;}
} finally {
seekQuietly(oldPos);
}
}
long
endTime = System.currentTimeMillis();
LOG.debug("ReadFully uri:{}, contentLength:{}, destLen:{}, readLen:{}, " + "position:{}, thread:{}, timeUsedMilliSec:{}", uri, contentLength, length, nread, position, threadId, endTime - startTime);
} | 3.26 |
hadoop_OBSInputStream_remainingInCurrentRequest_rdh | /**
* Bytes left in the current request. Only valid if there is an active
* request.
*
* @return how many bytes are left to read in the current GET.
*/
@[email protected]
public synchronized long remainingInCurrentRequest() {
return this.contentRangeFinish - this.streamCurrentPos;
} | 3.26 |
hadoop_OBSInputStream_seekQuietly_rdh | /**
* Seek without raising any exception. This is for use in {@code finally}
* clauses
*
* @param positiveTargetPos
* a target position which must be positive.
*/
private void seekQuietly(final long positiveTargetPos) {
try {
seek(positiveTargetPos);
} catch (IOException ioe) {
LOG.debug("Ignoring IOE on seek of {} to {}", uri, positiveTargetPos, ioe);
}
} | 3.26 |
hadoop_OBSInputStream_m0_rdh | /**
* Calculate the limit for a get request, based on input policy and state of
* object.
*
* @param targetPos
* position of the read
* @param length
* length of bytes requested; if less than zero
* "unknown"
* @param contentLength
* total length of file
* @param readahead
* current readahead value
* @return the absolute value of the limit of the request.
*/
static long m0(final long targetPos, final long length, final long contentLength, final long readahead) {
// cannot read past the end of the object
return Math.min(contentLength, length < 0 ? contentLength : targetPos + Math.max(readahead, length));
} | 3.26 |
hadoop_OBSInputStream_close_rdh | /**
* Close the stream. This triggers publishing of the stream statistics back to
* the filesystem statistics. This operation is synchronized, so that only one
* thread can attempt to close the connection; all later/blocked calls are
* no-ops.
*
* @throws IOException
* on any problem
*/
@Override
public synchronized void close() throws IOException {
if (!closed) {
closed = true;
// close or abort the stream
closeStream("close() operation", this.contentRangeFinish);
// this is actually a no-op
super.close();
}
} | 3.26 |
hadoop_OBSInputStream_lazySeek_rdh | /**
* Perform lazy seek and adjust stream to correct position for reading.
*
* @param targetPos
* position from where data should be read
* @param len
* length of the content that needs to be read
* @throws IOException
* on any failure to lazy seek
*/
private void lazySeek(final long targetPos, final long len) throws IOException {
for (int i = 0; i < SEEK_RETRY_TIME; i++) {
try {
// For lazy seek
seekInStream(targetPos);
// re-open at specific location if needed
if (wrappedStream == null) {
reopen("read from new offset", targetPos, len);
}
break;
} catch (IOException e) {
if (wrappedStream != null) {
closeStream("lazySeek() seekInStream has exception ", this.contentRangeFinish);
}
Throwable cause = e.getCause();
if (cause instanceof ObsException) {
ObsException obsException = ((ObsException) (cause));
int status = obsException.getResponseCode();
switch (status) {
case OBSCommonUtils.UNAUTHORIZED_CODE :
case OBSCommonUtils.FORBIDDEN_CODE :
case OBSCommonUtils.NOT_FOUND_CODE :
case OBSCommonUtils.GONE_CODE :
case OBSCommonUtils.EOF_CODE :
throw e;
default :
break;
}
}
LOG.warn("IOException occurred in lazySeek, retry: {}", i, e);
if (i == (SEEK_RETRY_TIME - 1)) {
throw e;
}try {
Thread.sleep(DELAY_TIME);
} catch (InterruptedException ie) {
throw e;
}
}}
} | 3.26 |
hadoop_OBSInputStream_checkNotClosed_rdh | /**
* Verify that the input stream is open. Non blocking; this gives the last
* state of the volatile {@link #closed} field.
*
* @throws IOException
* if the connection is closed.
*/
private void checkNotClosed() throws IOException {
if (closed) {throw new IOException((uri + ": ") + FSExceptionMessages.STREAM_IS_CLOSED);
}
} | 3.26 |
hadoop_NoopAuditManagerS3A_checkAccess_rdh | /**
* Forward to the auditor.
*
* @param path
* path to check
* @param status
* status of the path.
* @param mode
* access mode.
* @throws IOException
* failure
*/
@Override
public boolean checkAccess(final Path path, final S3AFileStatus status, final FsAction mode) throws IOException {
return auditor.checkAccess(path, status, mode);
} | 3.26 |
hadoop_NoopAuditManagerS3A_getUnbondedSpan_rdh | /**
* Unbonded span to use after deactivation.
*/
private AuditSpanS3A getUnbondedSpan() {return auditor.getUnbondedSpan();
} | 3.26 |
hadoop_NoopAuditManagerS3A_createNewSpan_rdh | /**
* A static source of no-op spans, using the same span ID
* source as managed spans.
*
* @param name
* operation name.
* @param path1
* first path of operation
* @param path2
* second path of operation
* @return a span for the audit
*/
public static AuditSpanS3A createNewSpan(final String name, final String path1, final String path2) {
return NoopSpan.INSTANCE;
} | 3.26 |
hadoop_PipesPartitioner_getPartition_rdh | /**
* If a partition result was set manually, return it. Otherwise, we call
* the Java partitioner.
*
* @param key
* the key to partition
* @param value
* the value to partition
* @param numPartitions
* the number of reduces
*/
public int getPartition(K key, V value, int numPartitions) {
Integer v0 = CACHE.get();if (v0 == null) {
return part.getPartition(key, value, numPartitions);
} else {
return v0;
}
} | 3.26 |
hadoop_PipesPartitioner_setNextPartition_rdh | /**
* Set the next key to have the given partition.
*
* @param newValue
* the next partition value
*/
static void setNextPartition(int newValue) {
CACHE.set(newValue);
} | 3.26 |
hadoop_AuthenticationHandlerUtil_checkAuthScheme_rdh | /**
* This method checks if the specified HTTP authentication <code>scheme</code>
* value is valid.
*
* @param scheme
* HTTP authentication scheme to be checked
* @return Canonical representation of HTTP authentication scheme
* @throws IllegalArgumentException
* In case the specified value is not a valid
* HTTP authentication scheme.
*/
public static String checkAuthScheme(String scheme) {
if (BASIC.equalsIgnoreCase(scheme)) {
return BASIC;
} else if (NEGOTIATE.equalsIgnoreCase(scheme)) {
return NEGOTIATE;
} else if (DIGEST.equalsIgnoreCase(scheme)) {
return DIGEST;
}
throw new IllegalArgumentException(String.format("Unsupported HTTP authentication scheme %s ." + " Supported schemes are [%s, %s, %s]", scheme, BASIC, NEGOTIATE, DIGEST));
} | 3.26 |
hadoop_AuthenticationHandlerUtil_getAuthenticationHandlerClassName_rdh | /**
* This method provides an instance of {@link AuthenticationHandler} based on
* specified <code>authHandlerName</code>.
*
* @param authHandler
* The short-name (or fully qualified class name) of the
* authentication handler.
* @return an instance of AuthenticationHandler implementation.
*/
public static String getAuthenticationHandlerClassName(String authHandler) {
if (authHandler == null) {
throw new NullPointerException();
}
String handlerName = authHandler.toLowerCase(Locale.ENGLISH);String authHandlerClassName = null;
if (handlerName.equals(PseudoAuthenticationHandler.TYPE)) {
authHandlerClassName = PseudoAuthenticationHandler.class.getName();
} else if (handlerName.equals(KerberosAuthenticationHandler.TYPE)) {authHandlerClassName = KerberosAuthenticationHandler.class.getName();
} else if (handlerName.equals(LdapAuthenticationHandler.TYPE)) {
authHandlerClassName
= LdapAuthenticationHandler.class.getName();
} else if (handlerName.equals(MultiSchemeAuthenticationHandler.TYPE)) {
authHandlerClassName = MultiSchemeAuthenticationHandler.class.getName();
} else {
authHandlerClassName = authHandler;
}
return authHandlerClassName;
} | 3.26 |
hadoop_OBSLoginHelper_canonicalizeUri_rdh | /**
* Canonicalize the given URI.
*
* <p>This strips out login information.
*
* @param uri
* the URI to canonicalize
* @param defaultPort
* default port to use in canonicalized URI if the input
* URI has no port and this value is greater than 0
* @return a new, canonicalized URI.
*/
public static URI canonicalizeUri(final URI uri, final int defaultPort) {
URI newUri = uri;
if ((uri.getPort() == (-1)) && (defaultPort > 0)) {
// reconstruct the uri with the default port set
try
{
newUri
= new URI(newUri.getScheme(), null, newUri.getHost(), defaultPort, newUri.getPath(), newUri.getQuery(), newUri.getFragment());
} catch (URISyntaxException e) {
// Should never happen!
throw new AssertionError("Valid URI became unparseable: " + newUri);
}
}
return newUri;
} | 3.26 |
hadoop_OBSLoginHelper_extractLoginDetailsWithWarnings_rdh | /**
* Extract the login details from a URI, logging a warning if the URI contains
* these.
*
* @param name
* URI of the filesystem
* @return a login tuple, possibly empty.
*/
public static Login extractLoginDetailsWithWarnings(final URI name) {
Login login = extractLoginDetails(name);
if (login.hasLogin()) {
LOG.warn(LOGIN_WARNING);
}
return login;
} | 3.26 |
hadoop_OBSLoginHelper_equals_rdh | /**
* Equality test matches user and password.
*
* @param o
* other object
* @return true if the objects are considered equivalent.
*/
@Override
public boolean equals(final Object o) {
if (this == o) {
return true;
}
if ((o == null) || (getClass() != o.getClass())) {
return false;
}Login that = ((Login) (o));
return Objects.equals(f0, that.f0) && Objects.equals(password, that.password);
} | 3.26 |
hadoop_OBSLoginHelper_checkPath_rdh | /**
* Check the path, ignoring authentication details. See {@link OBSFileSystem#checkPath(Path)} for the operation of this.
*
* <p>Essentially
*
* <ol>
* <li>The URI is canonicalized.
* <li>If the schemas match, the hosts are compared.
* <li>If there is a mismatch between null/non-null host,
* the default FS values are used to patch in the host.
* </ol>
* <p>
* That all originates in the core FS; the sole change here being to use
* {@link URI#getHost()}over {@link URI#getAuthority()}. Some of that code
* looks a relic of the code anti-pattern of using "hdfs:file.txt" to define
* the path without declaring the hostname. It's retained for compatibility.
*
* @param conf
* FS configuration
* @param fsUri
* the FS URI
* @param path
* path to check
* @param defaultPort
* default port of FS
*/
public static void checkPath(final Configuration conf, final URI fsUri, final Path path, final int defaultPort) {URI pathUri = path.toUri();
String thatScheme = pathUri.getScheme();
if (thatScheme == null) {
// fs is relative
return;
}
URI thisUri = canonicalizeUri(fsUri, defaultPort);
String thisScheme = thisUri.getScheme();
// hostname and scheme are not case sensitive in these checks
if (equalsIgnoreCase(thisScheme, thatScheme)) {
// schemes match
String thisHost = thisUri.getHost();
String thatHost = pathUri.getHost();
if ((thatHost == null) && // path's host is null
(thisHost != null)) {
// fs has a host
URI defaultUri = FileSystem.getDefaultUri(conf);
if (equalsIgnoreCase(thisScheme, defaultUri.getScheme())) { pathUri = defaultUri;// schemes match, so use this uri instead
} else {
pathUri = null;// can't determine auth of the path
}
}
if (pathUri != null) {
// canonicalize uri before comparing with this fs
pathUri = canonicalizeUri(pathUri, defaultPort);
thatHost = pathUri.getHost();
if (equalsIgnoreCase(thisHost, thatHost)) {
return;
}
}
}
// make sure the exception strips out any auth details
throw new IllegalArgumentException((("Wrong FS " + OBSLoginHelper.toString(pathUri)) + " -expected ") + fsUri);
} | 3.26 |
hadoop_OBSLoginHelper_extractLoginDetails_rdh | /**
* Extract the login details from a URI.
*
* @param name
* URI of the filesystem
* @return a login tuple, possibly empty.
*/
public static Login extractLoginDetails(final URI name) {
try {
String authority = name.getAuthority();
if
(authority == null) {
return Login.EMPTY;
}
int loginIndex = authority.indexOf('@');
if (loginIndex < 0) {
// no login
return Login.EMPTY;
}
String login = authority.substring(0, loginIndex);
int v4 = login.indexOf(':');
if (v4 > 0) {
String user = login.substring(0, v4);
String encodedPassword = login.substring(v4 + 1);
if (encodedPassword.contains(PLUS_UNENCODED)) {
LOG.warn(PLUS_WARNING);
encodedPassword = encodedPassword.replaceAll("\\" + PLUS_UNENCODED, PLUS_ENCODED);
}
String password = URLDecoder.decode(encodedPassword, "UTF-8");
return new Login(user, password);
} else if (v4 == 0) {
// there is no user, just a password. In this case,
// there's no login
return Login.EMPTY;
} else {
return new Login(login, "");
}
} catch (UnsupportedEncodingException e) {
// this should never happen; translate it if it does.
throw new RuntimeException(e);
}
} | 3.26 |
hadoop_OBSLoginHelper_buildFSURI_rdh | /**
* Build the filesystem URI. This can include stripping down of part of the
* URI.
*
* @param uri
* filesystem uri
* @return the URI to use as the basis for FS operation and qualifying paths.
* @throws IllegalArgumentException
* if the URI is in some way invalid.
*/
public static URI buildFSURI(final
URI uri) {
Objects.requireNonNull(uri, "null uri");
Objects.requireNonNull(uri.getScheme(), "null uri.getScheme()");
if ((uri.getHost() == null) && (uri.getAuthority() != null)) {
Objects.requireNonNull(uri.getHost(), ("null uri host." + " This can be caused by unencoded / in the ") + "password string");
}
Objects.requireNonNull(uri.getHost(), "null uri host.");
return URI.create((uri.getScheme() + "://") + uri.getHost());
} | 3.26 |
hadoop_OBSLoginHelper_toString_rdh | /**
* Create a stripped down string value for error messages.
*
* @param pathUri
* URI
* @return a shortened schema://host/path value
*/
public static String toString(final URI pathUri) {
return pathUri != null ? String.format("%s://%s/%s", pathUri.getScheme(), pathUri.getHost(), pathUri.getPath()) : "(null URI)";
} | 3.26 |
hadoop_OBSLoginHelper_hasLogin_rdh | /**
* Predicate to verify login details are defined.
*
* @return true if the username is defined (not null, not empty).
*/
public boolean hasLogin() {
return StringUtils.isNotEmpty(f0);
} | 3.26 |
hadoop_ContainerAllocationHistory_addAllocationEntry_rdh | /**
* Record the allocation history for the container.
*
* @param container
* to add record for
* @param requestSet
* resource request ask set
* @param fulfillTimeStamp
* time at which allocation happened
* @param fulfillLatency
* time elapsed in allocating since asked
*/
public synchronized void addAllocationEntry(Container container, ResourceRequestSet requestSet, long fulfillTimeStamp, long fulfillLatency) {
if (!requestSet.isANYRelaxable()) {
LOG.info("allocation history ignoring {}, relax locality is false", container);return;
}
this.relaxableG.add(new AbstractMap.SimpleEntry<>(fulfillTimeStamp, fulfillLatency));
if (this.relaxableG.size() > this.maxEntryCount) {
this.relaxableG.remove();
}
} | 3.26 |
hadoop_HsLogsPage_preHead_rdh | /* (non-Javadoc)
@see org.apache.hadoop.mapreduce.v2.hs.webapp.HsView#preHead(org.apache.hadoop.yarn.webapp.hamlet.Hamlet.HTML)
*/
@Overrideprotected void preHead(Page.HTML<__> html) {
commonPreHead(html);
setActiveNavColumnForTask();} | 3.26 |
hadoop_HsLogsPage_content_rdh | /**
* The content of this page is the JobBlock
*
* @return HsJobBlock.class
*/
@Override
protected Class<? extends SubView> content() {
return AggregatedLogsBlock.class;
} | 3.26 |
hadoop_FileSystemNodeLabelsStore_recover_rdh | /* (non-Javadoc)
@see org.apache.hadoop.yarn.nodelabels.NodeLabelsStore#recover(boolean)
*/
@Override
public void recover() throws YarnException, IOException {
super.recoverFromStore();
} | 3.26 |
hadoop_DeleteCompletionCallback_getEventCount_rdh | /**
* Get the number of deletion events
*
* @return the count of events
*/
public int getEventCount() {
return events.get();
} | 3.26 |
hadoop_ServiceLaunchException_getExitCode_rdh | /**
* Get the exit code
*
* @return the exit code
*/
@Override
public int getExitCode() {return exitCode;
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.