name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hadoop_Utils_getMinor_rdh | /**
* Get the minor version.
*
* @return The minor version.
*/
public int getMinor() {
return minor;
} | 3.26 |
hadoop_Utils_readVInt_rdh | /**
* Decoding the variable-length integer. Synonymous to
* <code>(int)Utils#readVLong(in)</code>.
*
* @param in
* input stream
* @return the decoded integer
* @throws IOException
* raised on errors performing I/O.
* @see Utils#readVLong(DataInput)
*/
public static int readVInt(DataInput in) throws IOException {
long ret = readVLong(in);
if ((ret
> Integer.MAX_VALUE) || (ret < Integer.MIN_VALUE))
{
throw new RuntimeException("Number too large to be represented as Integer");
}
return ((int) (ret));
} | 3.26 |
hadoop_Utils_getMajor_rdh | /**
* Get the major version.
*
* @return Major version.
*/
public int getMajor() {return major;
} | 3.26 |
hadoop_Utils_readString_rdh | /**
* Read a String as a VInt n, followed by n Bytes in Text format.
*
* @param in
* The input stream.
* @return The string
* @throws IOException
* raised on errors performing I/O.
*/
public static String readString(DataInput in) throws IOException {
int length = readVInt(in);
if (length == (-1))
return null;
byte[] buffer = new byte[length];in.readFully(buffer);
return Text.decode(buffer);
} | 3.26 |
hadoop_Utils_writeVLong_rdh | /**
* Encoding a Long integer into a variable-length encoding format.
* <ul>
* <li>if n in [-32, 127): encode in one byte with the actual value.
* Otherwise,
* <li>if n in [-20*2^8, 20*2^8): encode in two bytes: byte[0] = n/256 - 52;
* byte[1]=n&0xff. Otherwise,
* <li>if n IN [-16*2^16, 16*2^16): encode in three bytes: byte[0]=n/2^16 -
* 88; byte[1]=(n>>8)&0xff; byte[2]=n&0xff. Otherwise,
* <li>if n in [-8*2^24, 8*2^24): encode in four bytes: byte[0]=n/2^24 - 112;
* byte[1] = (n>>16)&0xff; byte[2] = (n>>8)&0xff;
* byte[3]=n&0xff.
* Otherwise:
* <li>if n in [-2^31, 2^31): encode in five bytes: byte[0]=-125; byte[1] =
* (n>>24)&0xff; byte[2]=(n>>16)&0xff;
* byte[3]=(n>>8)&0xff; byte[4]=n&0xff;
* <li>if n in [-2^39, 2^39): encode in six bytes: byte[0]=-124; byte[1] =
* (n>>32)&0xff; byte[2]=(n>>24)&0xff;
* byte[3]=(n>>16)&0xff; byte[4]=(n>>8)&0xff;
* byte[5]=n&0xff
* <li>if n in [-2^47, 2^47): encode in seven bytes: byte[0]=-123; byte[1] =
* (n>>40)&0xff; byte[2]=(n>>32)&0xff;
* byte[3]=(n>>24)&0xff; byte[4]=(n>>16)&0xff;
* byte[5]=(n>>8)&0xff; byte[6]=n&0xff;
* <li>if n in [-2^55, 2^55): encode in eight bytes: byte[0]=-122; byte[1] =
* (n>>48)&0xff; byte[2] = (n>>40)&0xff;
* byte[3]=(n>>32)&0xff; byte[4]=(n>>24)&0xff; byte[5]=
* (n>>16)&0xff; byte[6]=(n>>8)&0xff; byte[7]=n&0xff;
* <li>if n in [-2^63, 2^63): encode in nine bytes: byte[0]=-121; byte[1] =
* (n>>54)&0xff; byte[2] = (n>>48)&0xff;
* byte[3] = (n>>40)&0xff; byte[4]=(n>>32)&0xff;
* byte[5]=(n>>24)&0xff; byte[6]=(n>>16)&0xff; byte[7]=
* (n>>8)&0xff; byte[8]=n&0xff;
* </ul>
*
* @param out
* output stream
* @param n
* the integer number
* @throws IOException
* raised on errors performing I/O.
*/
@SuppressWarnings("fallthrough")
public static void writeVLong(DataOutput out, long n)
throws
IOException {
if ((n < 128) && (n >= (-32))) {out.writeByte(((int) (n)));
return;
}
long un = (n < 0) ? ~n : n;
// how many bytes do we need to represent the number with sign bit?
int len = ((Long.SIZE - Long.numberOfLeadingZeros(un)) / 8) + 1;
int firstByte = ((int) (n >> ((len - 1) * 8)));switch (len) {
case 1 :// fall it through to firstByte==-1, len=2.
firstByte >>= 8;
case 2 :
if ((firstByte < 20) && (firstByte >= (-20))) {
out.writeByte(firstByte - 52);
out.writeByte(((int) (n)));
return;
}
// fall it through to firstByte==0/-1, len=3.
firstByte >>= 8;
case 3 :
if ((firstByte < 16) && (firstByte >= (-16))) {
out.writeByte(firstByte - 88);
out.writeShort(((int) (n)));
return;
}
// fall it through to firstByte==0/-1, len=4.
firstByte >>= 8;
case 4 :
if ((firstByte < 8) && (firstByte >= (-8))) {
out.writeByte(firstByte - 112);
out.writeShort(((int) (n)) >>> 8);out.writeByte(((int) (n)));
return;
}
out.writeByte(len - 129);
out.writeInt(((int) (n)));
return;
case 5 :
out.writeByte(len - 129);
out.writeInt(((int) (n >>> 8)));
out.writeByte(((int) (n)));
return;
case
6 :
out.writeByte(len - 129);
out.writeInt(((int) (n >>> 16)));
out.writeShort(((int) (n)));
return;
case 7 :
out.writeByte(len - 129);
out.writeInt(((int) (n >>> 24)));
out.writeShort(((int) (n >>> 8)));
out.writeByte(((int) (n)));
return;
case 8 :
out.writeByte(len - 129);
out.writeLong(n);
return;
default :
throw new RuntimeException("Internal error"); }
} | 3.26 |
hadoop_Utils_compareTo_rdh | /**
* Compare this version with another version.
*/
@Override
public int compareTo(Version that) {
if (major != that.major) {
return major - that.major;
}
return minor - that.minor;
} | 3.26 |
hadoop_Utils_writeString_rdh | /**
* Write a String as a VInt n, followed by n Bytes as in Text format.
*
* @param out
* out.
* @param s
* s.
* @throws IOException
* raised on errors performing I/O.
*/
public static void writeString(DataOutput out, String s) throws IOException {
if (s != null) {
Text text = new Text(s);
byte[] buffer = text.getBytes();
int
len = text.getLength();
writeVInt(out, len);
out.write(buffer, 0, len);
} else {
writeVInt(out, -1);
}
} | 3.26 |
hadoop_Utils_writeVInt_rdh | /**
* Encoding an integer into a variable-length encoding format. Synonymous to
* <code>Utils#writeVLong(out, n)</code>.
*
* @param out
* output stream
* @param n
* The integer to be encoded
* @throws IOException
* raised on errors performing I/O.
* @see Utils#writeVLong(DataOutput, long)
*/
public static void writeVInt(DataOutput out, int n) throws IOException {
writeVLong(out, n);
} | 3.26 |
hadoop_Utils_toString_rdh | /**
* Return a string representation of the version.
*/
@Override
public String toString() {
return new StringBuilder("v").append(major).append(".").append(minor).toString();
} | 3.26 |
hadoop_GetContainersResponsePBImpl_initLocalContainerList_rdh | // Once this is called. containerList will never be null - until a getProto
// is called.
private void initLocalContainerList() {
if (this.containerList != null) {
return;
}
GetContainersResponseProtoOrBuilder p = (viaProto) ? proto : builder;
List<ContainerReportProto> list = p.getContainersList();
containerList = new ArrayList<ContainerReport>();
for (ContainerReportProto c : list) {
containerList.add(convertFromProtoFormat(c));
}
} | 3.26 |
hadoop_LocalJobOutputFiles_getInputFile_rdh | /**
* Return a local reduce input file created earlier
*
* @param mapId
* a map task id
*/
public Path getInputFile(int mapId) throws IOException {
return lDirAlloc.getLocalPathToRead(String.format(REDUCE_INPUT_FILE_FORMAT_STRING, f0, Integer.valueOf(mapId)), conf);
} | 3.26 |
hadoop_LocalJobOutputFiles_getOutputIndexFileForWrite_rdh | /**
* Create a local map output index file name.
*
* @param size
* the size of the file
*/
public Path getOutputIndexFileForWrite(long size) throws IOException {
String path = String.format(OUTPUT_FILE_INDEX_FORMAT_STRING, f0);
return lDirAlloc.getLocalPathForWrite(path, size, conf);
} | 3.26 |
hadoop_LocalJobOutputFiles_removeAll_rdh | /**
* Removes all of the files related to a task.
*/
public void removeAll() throws IOException {
conf.deleteLocalFiles(f0);
} | 3.26 |
hadoop_LocalJobOutputFiles_getInputFileForWrite_rdh | /**
* Create a local reduce input file name.
*
* @param mapId
* a map task id
* @param size
* the size of the file
*/
public Path getInputFileForWrite(TaskID mapId, long size, Configuration conf) throws IOException {
return lDirAlloc.getLocalPathForWrite(String.format(REDUCE_INPUT_FILE_FORMAT_STRING, f0, mapId.getId()), size, conf);
} | 3.26 |
hadoop_LocalJobOutputFiles_getOutputIndexFile_rdh | /**
* Return the path to a local map output index file created earlier
*/
public Path getOutputIndexFile() throws IOException {
String path = String.format(OUTPUT_FILE_INDEX_FORMAT_STRING, f0);
return lDirAlloc.getLocalPathToRead(path, conf);
} | 3.26 |
hadoop_LocalJobOutputFiles_getSpillIndexFileForWrite_rdh | /**
* Create a local map spill index file name.
*
* @param spillNumber
* the number
* @param size
* the size of the file
*/
public Path getSpillIndexFileForWrite(int spillNumber, long size) throws IOException {
String path = String.format(SPILL_INDEX_FILE_FORMAT_STRING, f0, spillNumber);
return lDirAlloc.getLocalPathForWrite(path, size, conf);
} | 3.26 |
hadoop_LocalJobOutputFiles_getSpillIndexFile_rdh | /**
* Return a local map spill index file created earlier
*
* @param spillNumber
* the number
*/
public Path getSpillIndexFile(int spillNumber) throws IOException {
String path = String.format(SPILL_INDEX_FILE_FORMAT_STRING, f0, spillNumber);
return lDirAlloc.getLocalPathToRead(path, conf);
} | 3.26 |
hadoop_LocalJobOutputFiles_getSpillFileForWrite_rdh | /**
* Create a local map spill file name.
*
* @param spillNumber
* the number
* @param size
* the size of the file
*/
public Path getSpillFileForWrite(int spillNumber, long size) throws IOException {
String path = String.format(SPILL_FILE_FORMAT_STRING, f0, spillNumber);
return lDirAlloc.getLocalPathForWrite(path, size, conf);
} | 3.26 |
hadoop_LocalJobOutputFiles_getOutputFileForWrite_rdh | /**
* Create a local map output file name.
*
* @param size
* the size of the file
*/
public Path getOutputFileForWrite(long size) throws IOException {
String path = String.format(OUTPUT_FILE_FORMAT_STRING, f0);
return lDirAlloc.getLocalPathForWrite(path, size, conf);
} | 3.26 |
hadoop_LocalJobOutputFiles_getSpillFile_rdh | /**
* Return a local map spill file created earlier.
*
* @param spillNumber
* the number
*/
public Path getSpillFile(int spillNumber) throws IOException {
String path = String.format(SPILL_FILE_FORMAT_STRING,
f0, spillNumber);
return lDirAlloc.getLocalPathToRead(path, conf);
} | 3.26 |
hadoop_LocalJobOutputFiles_getOutputFile_rdh | /**
* Return the path to local map output file created earlier
*/
public Path getOutputFile() throws IOException {
String path = String.format(OUTPUT_FILE_FORMAT_STRING, f0);
return lDirAlloc.getLocalPathToRead(path, conf);
} | 3.26 |
hadoop_ECChunk_getBuffer_rdh | /**
* Convert to ByteBuffer
*
* @return ByteBuffer
*/
public ByteBuffer getBuffer() {
return chunkBuffer;} | 3.26 |
hadoop_ECChunk_toBuffers_rdh | /**
* Convert an array of this chunks to an array of ByteBuffers
*
* @param chunks
* chunks to convert into buffers
* @return an array of ByteBuffers
*/
public static ByteBuffer[] toBuffers(ECChunk[] chunks) {
ByteBuffer[] buffers = new ByteBuffer[chunks.length];
ECChunk chunk;
for (int i = 0; i < chunks.length; i++) {
chunk = chunks[i];
if (chunk
== null) {
buffers[i] =
null;
} else {
buffers[i] = chunk.getBuffer();
}
}
return buffers;
} | 3.26 |
hadoop_ECChunk_toBytesArray_rdh | /**
* Convert to a bytes array, just for test usage.
*
* @return bytes array
*/
public byte[] toBytesArray() {byte[] bytesArr = new byte[chunkBuffer.remaining()];
// Avoid affecting the original one
chunkBuffer.mark();
chunkBuffer.get(bytesArr);
chunkBuffer.reset();
return bytesArr;
} | 3.26 |
hadoop_SubApplicationEntity_isSubApplicationEntity_rdh | /**
* Checks if the input TimelineEntity object is an SubApplicationEntity.
*
* @param te
* TimelineEntity object.
* @return true if input is an SubApplicationEntity, false otherwise
*/
public static boolean isSubApplicationEntity(TimelineEntity te) {
return (te != null) && (te instanceof SubApplicationEntity);
} | 3.26 |
hadoop_AbfsInputStreamStatisticsImpl_toString_rdh | /**
* String operator describes all the current statistics.
* <b>Important: there are no guarantees as to the stability
* of this value.</b>
*
* @return the current values of the stream statistics.
*/
@Override
public String toString() {
final StringBuilder sb = new StringBuilder("StreamStatistics{");
sb.append(ioStatisticsStore.toString());
sb.append('}');
return sb.toString();
} | 3.26 |
hadoop_AbfsInputStreamStatisticsImpl_readAheadBytesRead_rdh | /**
* Total bytes read from readAhead buffer during a read operation.
*
* @param bytes
* the bytes to be incremented.
*/
@Overridepublic void readAheadBytesRead(long bytes) {
ioStatisticsStore.incrementCounter(StreamStatisticNames.READ_AHEAD_BYTES_READ, bytes);
} | 3.26 |
hadoop_AbfsInputStreamStatisticsImpl_remoteReadOperation_rdh | /**
* {@inheritDoc }
*
* Increment the counter when a remote read operation occurs.
*/@Override
public void remoteReadOperation() {
ioStatisticsStore.incrementCounter(StreamStatisticNames.REMOTE_READ_OP);
} | 3.26 |
hadoop_AbfsInputStreamStatisticsImpl_seekBackwards_rdh | /**
* Seek backwards, incrementing the seek and backward seek counters.
*
* @param negativeOffset
* how far was the seek?
* This is expected to be negative.
*/
@Override
public void seekBackwards(long negativeOffset) {
seekOps.incrementAndGet();
ioStatisticsStore.incrementCounter(StreamStatisticNames.STREAM_READ_SEEK_BACKWARD_OPERATIONS);
ioStatisticsStore.incrementCounter(StreamStatisticNames.STREAM_READ_SEEK_BYTES_BACKWARDS, negativeOffset);
} | 3.26 |
hadoop_AbfsInputStreamStatisticsImpl_readOperationStarted_rdh | /**
* A {@code read(byte[] buf, int off, int len)} operation has started.
*/
@Override
public void readOperationStarted() {readOps.incrementAndGet();
} | 3.26 |
hadoop_AbfsInputStreamStatisticsImpl_bytesReadFromBuffer_rdh | /**
* {@inheritDoc }
*
* Total bytes read from the buffer.
*
* @param bytes
* number of bytes that are read from buffer.
*/
@Override
public void bytesReadFromBuffer(long bytes) {
ioStatisticsStore.incrementCounter(StreamStatisticNames.BYTES_READ_BUFFER, bytes);
}
/**
* {@inheritDoc } | 3.26 |
hadoop_AbfsInputStreamStatisticsImpl_seek_rdh | /**
* Record a forward or backward seek, adding a seek operation, a forward or
* a backward seek operation, and number of bytes skipped.
* The seek direction will be calculated based on the parameters.
*
* @param seekTo
* seek to the position.
* @param currentPos
* current position.
*/
@Override
public void seek(long seekTo, long currentPos) {
if (seekTo >= currentPos) {
this.seekForwards(seekTo - currentPos);
} else {
this.seekBackwards(currentPos - seekTo);
}
} | 3.26 |
hadoop_AbfsInputStreamStatisticsImpl_remoteBytesRead_rdh | /**
* Total bytes read remotely after nothing was read from readAhead buffer.
*
* @param bytes
* the bytes to be incremented.
*/
@Override
public void remoteBytesRead(long bytes) {
ioStatisticsStore.incrementCounter(StreamStatisticNames.REMOTE_BYTES_READ, bytes);
} | 3.26 |
hadoop_AbfsInputStreamStatisticsImpl_bytesRead_rdh | /**
* Increment the bytes read counter by the number of bytes;
* no-op if the argument is negative.
*
* @param bytes
* number of bytes read.
*/
@Override
public void bytesRead(long bytes) {
bytesRead.addAndGet(bytes);
} | 3.26 |
hadoop_AbfsInputStreamStatisticsImpl_getActionHttpGetRequest_rdh | /**
* Getter for the mean value of the time taken to complete a HTTP GET
* request by AbfsInputStream.
*
* @return mean value.
*/
@VisibleForTesting
public double getActionHttpGetRequest() {
return ioStatisticsStore.meanStatistics().get(ACTION_HTTP_GET_REQUEST + SUFFIX_MEAN).mean();
} | 3.26 |
hadoop_ConsumerRaisingIOE_andThen_rdh | /**
* after calling {@link #accept(Object)},
* invoke the next consumer in the chain.
*
* @param next
* next consumer
* @return the chain.
*/
default ConsumerRaisingIOE<T> andThen(ConsumerRaisingIOE<? super T> next) {
return
(T t) -> {
accept(t);
next.accept(t);
};
} | 3.26 |
hadoop_ApplicationReportSerDeser_toString_rdh | /**
* Convert an instance to a JSON string -sync access to a shared ser/deser
* object instance
*
* @param instance
* object to convert
* @return a JSON string description
* @throws JsonProcessingException
* parse problems
*/
public static String toString(SerializedApplicationReport instance) throws JsonProcessingException {
synchronized(staticinstance) {
return staticinstance.toJson(instance);
}
} | 3.26 |
hadoop_EmptyS3AStatisticsContext_m0_rdh | /**
* Always return the stub duration tracker.
*
* @param key
* statistic key prefix
* @param count
* #of times to increment the matching counter in this
* operation.
* @return stub tracker.
*/
public DurationTracker m0(String key, long count) {
return stubDurationTracker();
} | 3.26 |
hadoop_RpcNoSuchProtocolException_getRpcStatusProto_rdh | /**
* get the rpc status corresponding to this exception
*/
public RpcStatusProto getRpcStatusProto() {
return RpcStatusProto.ERROR;
} | 3.26 |
hadoop_RpcNoSuchProtocolException_getRpcErrorCodeProto_rdh | /**
* get the detailed rpc status corresponding to this exception
*/
public RpcErrorCodeProto getRpcErrorCodeProto() {
return RpcErrorCodeProto.ERROR_NO_SUCH_PROTOCOL;
} | 3.26 |
hadoop_GPGPolicyFacade_setPolicyManager_rdh | /**
* Provides a utility for the policy generator to write a policy manager
* into the FederationStateStore. The facade keeps a cache and will only write
* into the FederationStateStore if the policy configuration has changed.
*
* @param policyManager
* The policy manager we want to update into the state
* store. It contains policy information as well as
* the queue name we will update for.
* @throws YarnException
* exceptions from yarn servers.
*/
public void setPolicyManager(FederationPolicyManager policyManager) throws YarnException {
if (policyManager == null) {
LOG.warn("Attempting to set null policy manager");
return;
}
// Extract the configuration from the policy manager
String queue = policyManager.getQueue();
SubClusterPolicyConfiguration v5;
try {
v5 = policyManager.serializeConf();
} catch (FederationPolicyInitializationException e) {
LOG.warn("Error serializing policy for queue {}", queue);
throw e;
}if (v5 == null) {
// State store does not currently support setting a policy back to null
// because it reads the queue name to set from the policy!
LOG.warn("Skip setting policy to null for queue {} into state store", queue);
return;
}
// Compare with configuration cache, if different, write the conf into
// store and update our conf and manager cache
if (!confCacheEqual(queue, v5)) {
try {
if (readOnly) {
LOG.info("[read-only] Skipping policy update for queue {}", queue);
return;
}
LOG.info("Updating policy for queue {} into state store",
queue);stateStore.setPolicyConfiguration(v5);
policyConfMap.put(queue, v5);
policyManagerMap.put(queue, policyManager);
} catch (YarnException e) {
LOG.warn("Error writing SubClusterPolicyConfiguration to state " + "store for queue: {}", queue);
throw e;
}
} else {
LOG.info("Setting unchanged policy - state store write skipped");
}
} | 3.26 |
hadoop_GPGPolicyFacade_confCacheEqual_rdh | /**
*
* @param queue
* the queue to check the cached policy configuration for
* @param conf
* the new policy configuration
* @return whether or not the conf is equal to the cached conf
*/
private boolean confCacheEqual(String queue, SubClusterPolicyConfiguration conf) {
SubClusterPolicyConfiguration cachedConf = policyConfMap.get(queue);
if ((conf == null) && (cachedConf == null)) {return true;
} else if ((conf !=
null) && (cachedConf != null)) {
if (conf.equals(cachedConf)) {
return true;
}
}
return false;
} | 3.26 |
hadoop_GPGPolicyFacade_getPolicyManager_rdh | /**
* Provides a utility for the policy generator to read the policy manager
* from the FederationStateStore. Because the policy generator should be the
* only component updating the policy, this implementation does not use the
* reinitialization feature.
*
* @param queueName
* the name of the queue we want the policy manager for.
* @return the policy manager responsible for the queue policy.
* @throws YarnException
* exceptions from yarn servers.
*/public FederationPolicyManager getPolicyManager(String queueName) throws YarnException {
FederationPolicyManager policyManager = policyManagerMap.get(queueName);
// If we don't have the policy manager cached, pull configuration
// from the FederationStateStore to create and cache it
if (policyManager == null) {
try {
// If we don't have the configuration cached, pull it
// from the stateStore
SubClusterPolicyConfiguration conf = policyConfMap.get(queueName);
if (conf == null) {
conf = stateStore.getPolicyConfiguration(queueName);
}
// If configuration is still null, it does not exist in the
// FederationStateStore
if (conf == null) {
LOG.info("Read null policy for queue {}", queueName);
return
null;
}
policyManager = FederationPolicyUtils.instantiatePolicyManager(conf.getType());
policyManager.setQueue(queueName);
// TODO there is currently no way to cleanly deserialize a policy
// manager sub type from just the configuration
if (policyManager instanceof WeightedLocalityPolicyManager) {
WeightedPolicyInfo wpinfo = WeightedPolicyInfo.fromByteBuffer(conf.getParams());
WeightedLocalityPolicyManager wlpmanager = ((WeightedLocalityPolicyManager) (policyManager));
LOG.info("Updating policy for queue {} to configured weights router: " + "{}, amrmproxy: {}", queueName, wpinfo.getRouterPolicyWeights(), wpinfo.getAMRMPolicyWeights());
wlpmanager.setWeightedPolicyInfo(wpinfo);
} else {
LOG.warn("Warning: FederationPolicyManager of unsupported type {}, " + "initialization may be incomplete ", policyManager.getClass());
}
policyManagerMap.put(queueName, policyManager);
policyConfMap.put(queueName, conf);
} catch (YarnException e) {
LOG.error("Error reading SubClusterPolicyConfiguration from state " + "store for queue: {}", queueName);
throw e;
}
}
return policyManager;
} | 3.26 |
hadoop_BoundedRangeFileInputStream_skip_rdh | /* We may skip beyond the end of the file. */
@Override
public long skip(long n) throws IOException {
long len = Math.min(n, end - pos);
pos += len;
return len;
} | 3.26 |
hadoop_FilePool_getInputFiles_rdh | /**
* Gather a collection of files at least as large as minSize.
*
* @return The total size of files returned.
*/
public long getInputFiles(long minSize, Collection<FileStatus> files) throws IOException {
updateLock.readLock().lock();
try {
return root.selectFiles(minSize, files);
} finally {
updateLock.readLock().unlock();
}
} | 3.26 |
hadoop_FilePool_refresh_rdh | /**
* (Re)generate cache of input FileStatus objects.
*/
public void refresh() throws IOException {
updateLock.writeLock().lock();try {
root = new InnerDesc(fs, fs.getFileStatus(path), new MinFileFilter(conf.getLong(GRIDMIX_MIN_FILE, (128 * 1024) * 1024), conf.getLong(GRIDMIX_MAX_TOTAL, 100L * (1L << 40))));
if (0 == root.getSize()) {
throw new IOException("Found no satisfactory file in " + path);
}
} finally {
updateLock.writeLock().unlock();
}
} | 3.26 |
hadoop_FilePool_locationsFor_rdh | /**
* Get a set of locations for the given file.
*/
public BlockLocation[] locationsFor(FileStatus stat, long start, long len) throws IOException {
// TODO cache
return fs.getFileBlockLocations(stat, start, len);
} | 3.26 |
hadoop_GangliaMetricVisitor_getType_rdh | /**
*
* @return the type of a visited metric
*/
String getType() {
return type;
} | 3.26 |
hadoop_RegistryPathUtils_encodeForRegistry_rdh | /**
* Perform any formatting for the registry needed to convert
* non-simple-DNS elements
*
* @param element
* element to encode
* @return an encoded string
*/
public static String encodeForRegistry(String element) {
return IDN.toASCII(element);
} | 3.26 |
hadoop_RegistryPathUtils_createFullPath_rdh | /**
* Create a full path from the registry root and the supplied subdir
*
* @param path
* path of operation
* @return an absolute path
* @throws InvalidPathnameException
* if the path is invalid
*/
public static String createFullPath(String base, String path) throws InvalidPathnameException {
Preconditions.checkArgument(path != null, "null path");
Preconditions.checkArgument(base != null, "null path");
return validateZKPath(join(base, path));
} | 3.26 |
hadoop_RegistryPathUtils_split_rdh | /**
* split a path into elements, stripping empty elements
*
* @param path
* the path
* @return the split path
*/
public static List<String> split(String path) {
//
String[] pathelements = path.split("/");
List<String> dirs = new ArrayList<String>(pathelements.length);
for (String pathelement : pathelements) {
if (!pathelement.isEmpty()) {
dirs.add(pathelement);
}
}
return dirs;
} | 3.26 |
hadoop_RegistryPathUtils_validateZKPath_rdh | /**
* Validate ZK path with the path itself included in
* the exception text
*
* @param path
* path to validate
* @return the path parameter
* @throws InvalidPathnameException
* if the pathname is invalid.
*/
public static String validateZKPath(String path) throws InvalidPathnameException {
try {
PathUtils.validatePath(path);
} catch (IllegalArgumentException e) {
throw new InvalidPathnameException(path, (("Invalid Path \"" + path) + "\" : ") + e, e);
}
return path;
} | 3.26 |
hadoop_RegistryPathUtils_parentOf_rdh | /**
* Get the parent of a path
*
* @param path
* path to look at
* @return the parent path
* @throws PathNotFoundException
* if the path was at root.
*/
public static String parentOf(String path) throws PathNotFoundException {
List<String> elements = split(path);
int v9 = elements.size();
if (v9 == 0) {
throw new PathNotFoundException("No parent of " + path);
}
if (v9 == 1) {return "/";
}
elements.remove(v9 - 1);StringBuilder parent = new
StringBuilder(path.length());
for (String element : elements) {
parent.append("/");
parent.append(element);
}
return parent.toString();
} | 3.26 |
hadoop_RegistryPathUtils_validateElementsAsDNS_rdh | /**
* Validate ZK path as valid for a DNS hostname.
*
* @param path
* path to validate
* @return the path parameter
* @throws InvalidPathnameException
* if the pathname is invalid.
*/
public static String validateElementsAsDNS(String path) throws InvalidPathnameException {
List<String> splitpath = split(path);
for (String fragment : splitpath) {
if (!PATH_ENTRY_VALIDATION_PATTERN.matcher(fragment).matches()) {
throw new InvalidPathnameException(path, ("Invalid Path element \"" + fragment) + "\"");
}
}
return path;} | 3.26 |
hadoop_RegistryPathUtils_encodeYarnID_rdh | /**
* Perform whatever transforms are needed to get a YARN ID into
* a DNS-compatible name
*
* @param yarnId
* ID as string of YARN application, instance or container
* @return a string suitable for use in registry paths.
*/
public static String encodeYarnID(String yarnId) {
return yarnId.replace("container", "ctr").replace("_", "-");
} | 3.26 |
hadoop_RegistryPathUtils_lastPathEntry_rdh | /**
* Get the last entry in a path; for an empty path
* returns "". The split logic is that of
* {@link #split(String)}
*
* @param path
* path of operation
* @return the last path entry or "" if none.
*/
public static String lastPathEntry(String path) {
List<String> splits = split(path);
if (splits.isEmpty()) {
// empty path. Return ""
return "";
} else {
return splits.get(splits.size() - 1);
}
} | 3.26 |
hadoop_RegistryPathUtils_join_rdh | /**
* Join two paths, guaranteeing that there will not be exactly
* one separator between the two, and exactly one at the front
* of the path. There will be no trailing "/" except for the special
* case that this is the root path
*
* @param base
* base path
* @param path
* second path to add
* @return a combined path.
*/
public static String join(String base, String path) {
Preconditions.checkArgument(path != null, "null path");
Preconditions.checkArgument(base != null, "null path");
StringBuilder fullpath = new StringBuilder();
if (!base.startsWith("/")) {
fullpath.append('/');
}
fullpath.append(base);
// guarantee a trailing /
if (!fullpath.toString().endsWith("/")) {
fullpath.append("/");
}// strip off any at the beginning
if (path.startsWith("/")) {
// path starts with /, so append all other characters -if present
if (path.length() > 1) {fullpath.append(path.substring(1));
}
} else {
fullpath.append(path);}
// here there may be a trailing "/"
String finalpath = fullpath.toString(); if (finalpath.endsWith("/") && (!"/".equals(finalpath))) {
finalpath = finalpath.substring(0, finalpath.length() - 1);
}return finalpath;
} | 3.26 |
hadoop_ByteBufferPool_release_rdh | /**
* Clear the buffer pool thus releasing all the buffers.
*/
default void release() {
} | 3.26 |
hadoop_ApplicationColumn_getColumnQualifier_rdh | /**
*
* @return the column name value
*/
private String getColumnQualifier() {
return columnQualifier;
} | 3.26 |
hadoop_QueueResourceQuotas_getEffectiveMinResource_rdh | /* Effective Minimum Resource */
public Resource getEffectiveMinResource() {
return _get(NL, ResourceType.EFF_MIN_RESOURCE);
} | 3.26 |
hadoop_QueueResourceQuotas_getConfiguredMinResource_rdh | /* Configured Minimum Resource */
public Resource getConfiguredMinResource() {
return _get(NL, ResourceType.MIN_RESOURCE);
} | 3.26 |
hadoop_QueueResourceQuotas_getConfiguredMaxResource_rdh | /* Configured Maximum Resource */
public Resource getConfiguredMaxResource() {
return getConfiguredMaxResource(NL);
} | 3.26 |
hadoop_ActiveAuditManagerS3A_beforeExecution_rdh | /**
* Forward to the inner span.
* {@inheritDoc }
*/
@Override
public void beforeExecution(Context.BeforeExecution context, ExecutionAttributes executionAttributes) {
span.beforeExecution(context, executionAttributes);
} | 3.26 |
hadoop_ActiveAuditManagerS3A_beforeMarshalling_rdh | /**
* Forward to the inner span.
* {@inheritDoc }
*/
@Override
public void beforeMarshalling(Context.BeforeMarshalling context, ExecutionAttributes executionAttributes) {
span.beforeMarshalling(context, executionAttributes);} | 3.26 |
hadoop_ActiveAuditManagerS3A_getActiveAuditSpan_rdh | /**
* Return the active wrapped span.
*
* @return a span.
*/
@Override
public AuditSpanS3A getActiveAuditSpan() {
return activeSpan();
} | 3.26 |
hadoop_ActiveAuditManagerS3A_afterTransmission_rdh | /**
* Forward to the inner span.
* {@inheritDoc }
*/
@Override
public void afterTransmission(Context.AfterTransmission context, ExecutionAttributes
executionAttributes) {
span.afterTransmission(context, executionAttributes);} | 3.26 |
hadoop_ActiveAuditManagerS3A_setActiveThreadSpan_rdh | /**
* Set a specific span as the active span.
* This will wrap it.
*
* @param span
* span to use.
* @return the wrapped span.
*/
private AuditSpanS3A setActiveThreadSpan(AuditSpanS3A span) {
return switchToActiveSpan(new WrappingAuditSpan(span, span.isValidSpan()));
} | 3.26 |
hadoop_ActiveAuditManagerS3A_activeSpan_rdh | /**
* Get the active span.
* This is the wrapped span, not the inner one, and it is
* of that type.
*
* @return the active WrappingAuditSpan
*/
private WrappingAuditSpan activeSpan() {
return
activeSpanMap.getForCurrentThread();
} | 3.26 |
hadoop_ActiveAuditManagerS3A_switchToActiveSpan_rdh | /**
* Switch to a given span. If it is null, use the
* unbounded span.
*
* @param span
* to switch to; may be null
* @return the span switched to
*/
private WrappingAuditSpan switchToActiveSpan(WrappingAuditSpan span) {
if ((span != null)
&& span.isValidSpan()) {
activeSpanMap.setForCurrentThread(span);
} else {
activeSpanMap.removeForCurrentThread();
}return activeSpan();
} | 3.26 |
hadoop_ActiveAuditManagerS3A_set_rdh | /**
* Forward to the inner span.
* {@inheritDoc }
*/
@Override
public void set(final String key, final String value) {span.set(key, value);
} | 3.26 |
hadoop_ActiveAuditManagerS3A_modifyRequest_rdh | /**
* Forward to the inner span.
* {@inheritDoc }
*/@Override
public SdkRequest modifyRequest(Context.ModifyRequest context, ExecutionAttributes executionAttributes) {
return span.modifyRequest(context, executionAttributes);
} | 3.26 |
hadoop_ActiveAuditManagerS3A_modifyResponse_rdh | /**
* Forward to the inner span.
* {@inheritDoc }
*/
@Override
public SdkResponse modifyResponse(Context.ModifyResponse context, ExecutionAttributes executionAttributes) {
return span.modifyResponse(context, executionAttributes);
} | 3.26 |
hadoop_ActiveAuditManagerS3A_activate_rdh | /**
* Makes this the thread's active span and activate.
* If the span was already active: no-op.
*/
@Override
public AuditSpanS3A activate() {
if (!isActive()) {
switchToActiveSpan(this);
span.activate();
}
return this;
} | 3.26 |
hadoop_ActiveAuditManagerS3A_afterMarshalling_rdh | /**
* Forward to the inner span.
* {@inheritDoc }
*/
@Override
public void afterMarshalling(Context.AfterMarshalling context, ExecutionAttributes executionAttributes) {
span.afterMarshalling(context, executionAttributes);
} | 3.26 |
hadoop_ActiveAuditManagerS3A_modifyHttpRequest_rdh | /**
* Forward to the inner span.
* {@inheritDoc }
*/
@Override
public SdkHttpRequest modifyHttpRequest(Context.ModifyHttpRequest context, ExecutionAttributes executionAttributes) {
return span.modifyHttpRequest(context, executionAttributes);
}
/**
* Forward to the inner span.
* {@inheritDoc } | 3.26 |
hadoop_ActiveAuditManagerS3A_afterUnmarshalling_rdh | /**
* Forward to the inner span.
* {@inheritDoc }
*/
@Override
public void afterUnmarshalling(Context.AfterUnmarshalling context, ExecutionAttributes executionAttributes) {
span.afterUnmarshalling(context, executionAttributes);
} | 3.26 |
hadoop_ActiveAuditManagerS3A_extractAndActivateSpanFromRequest_rdh | /**
* Get the active span from the execution attributes,
* falling back to the active thread span if there
* is nothing in the attributes.
* Provided the span is a wrapped span, the span is
* activated.
*
* @param request
* request
* @param executionAttributes
* the execution attributes
* @return the active span
*/
private AuditSpanS3A extractAndActivateSpanFromRequest(final SdkRequest request, final ExecutionAttributes executionAttributes) {
AuditSpanS3A span = retrieveAttachedSpan(executionAttributes);if (span == null) {
// no span is attached. Not unusual for the copy operations,
// or for calls to GetBucketLocation made by the AWS client
LOG.debug("No audit span attached to request {}", request);
// fall back to the active thread span.
// this will be the unbonded span if the thread is unbonded.
span = getActiveAuditSpan();
} else if (span instanceof WrappingAuditSpan) {
switchToActiveSpan(((WrappingAuditSpan) (span)));} else {
// warn/log and continue without switching.
WARN_OF_SPAN_TYPE.warn(NOT_A_WRAPPED_SPAN + ": {}", span);
LOG.debug(NOT_A_WRAPPED_SPAN + ": {}", span);
}
return span;} | 3.26 |
hadoop_ActiveAuditManagerS3A_createExecutionInterceptors_rdh | /**
* Return a list of execution interceptors for the AWS SDK which
* relays to this class.
*
* @return a list of execution interceptors.
*/
@Override
public List<ExecutionInterceptor> createExecutionInterceptors() throws IOException
{
// wire up the AWS SDK To call back into this class when
// preparing to make S3 calls.
List<ExecutionInterceptor> executionInterceptors = new ArrayList<>();
executionInterceptors.add(this);
final String handlers = getConfig().getTrimmed(AUDIT_REQUEST_HANDLERS, "");
if (!handlers.isEmpty()) {
// warn and ignore v1 handlers.
V2Migration.v1RequestHandlersUsed(handlers);
}
// V2 SDK supports global/service interceptors, but they need to be configured on the
// classpath and don't get the filesystem/job configuration passed down.
final Class<?>[] interceptors = getConfig().getClasses(AUDIT_EXECUTION_INTERCEPTORS);
if (interceptors != null) {
for (Class<?> handler : interceptors) {
try {
LOG.debug("Adding intercept of class {}", handler);
Constructor<?> ctor =
handler.getConstructor();
final ExecutionInterceptor interceptor = ((ExecutionInterceptor) (ctor.newInstance()));
if (interceptor instanceof Configurable) { // pass in the configuration.
((Configurable) (interceptor)).setConf(getConfig());
}
executionInterceptors.add(interceptor);
} catch (ExceptionInInitializerError e) {
throw FutureIO.unwrapInnerException(e);
} catch (Exception e) {
throw new IOException(e);
}
}
}
return executionInterceptors;
} | 3.26 |
hadoop_ActiveAuditManagerS3A_isValidSpan_rdh | /**
* This span is valid if the span isn't closed and the inner
* span is valid.
*
* @return true if the span is considered valid.
*/
@Override
public boolean isValidSpan() {
return isValid && span.isValidSpan();
} | 3.26 |
hadoop_ActiveAuditManagerS3A_getUnbondedSpan_rdh | /**
* Get the unbounded span. Until this manager
* is fully initialized it will return the no-op
* span.
*
* @return the unbounded span.
*/
private WrappingAuditSpan getUnbondedSpan() {
return unbondedSpan;
} | 3.26 |
hadoop_ActiveAuditManagerS3A_isActive_rdh | /**
* Is the span active?
*
* @return true if this span is the active one for the current thread.
*/
private boolean isActive() {
return this == getActiveAuditSpan();
} | 3.26 |
hadoop_ActiveAuditManagerS3A_serviceStart_rdh | /**
* After starting the auditor, it is queried for its
* unbonded span, which is then wrapped and stored for
* use.
*/
@Override
protected void serviceStart() throws Exception
{
super.serviceStart();
setUnbondedSpan(new WrappingAuditSpan(auditor.getUnbondedSpan(), false));
LOG.debug("Started audit service {}", auditor);
} | 3.26 |
hadoop_ActiveAuditManagerS3A_onExecutionFailure_rdh | /**
* Forward to the inner span.
* {@inheritDoc }
*/
@Override
public void onExecutionFailure(Context.FailedExecution context, ExecutionAttributes executionAttributes) {
span.onExecutionFailure(context, executionAttributes);
} | 3.26 |
hadoop_ActiveAuditManagerS3A_getActiveSpanMap_rdh | /**
* Get the map of threads to active spans; allows
* for testing of weak reference resolution after GC.
*
* @return the span map
*/
@VisibleForTesting
WeakReferenceThreadMap<WrappingAuditSpan> getActiveSpanMap() {
return activeSpanMap;
} | 3.26 |
hadoop_ActiveAuditManagerS3A_noteSpanReferenceLost_rdh | /**
* Span reference lost from GC operations.
* This is only called when an attempt is made to retrieve on
* the active thread or when a prune operation is cleaning up.
*
* @param threadId
* thread ID.
*/
private void noteSpanReferenceLost(long threadId) {
auditor.noteSpanReferenceLost(threadId);
} | 3.26 |
hadoop_ActiveAuditManagerS3A_setUnbondedSpan_rdh | /**
* Set the unbonded span.
*
* @param unbondedSpan
* the new unbonded span
*/
private void setUnbondedSpan(final WrappingAuditSpan unbondedSpan) {
this.unbondedSpan = unbondedSpan;
} | 3.26 |
hadoop_ActiveAuditManagerS3A_modifyHttpResponse_rdh | /**
* Forward to the inner span.
* {@inheritDoc }
*/
@Override
public SdkHttpResponse modifyHttpResponse(Context.ModifyHttpResponse context, ExecutionAttributes executionAttributes) {
return span.modifyHttpResponse(context, executionAttributes);
} | 3.26 |
hadoop_ActiveAuditManagerS3A_afterExecution_rdh | /**
* Forward to the inner span.
* {@inheritDoc }
*/
@Override
public void afterExecution(Context.AfterExecution context, ExecutionAttributes executionAttributes) {
span.afterExecution(context, executionAttributes);
} | 3.26 |
hadoop_ActiveAuditManagerS3A_getSpanId_rdh | /**
* The Span ID in the audit manager is the ID of the auditor,
* which can be used in the filesystem toString() method
* to assist in correlating client logs with S3 logs.
* It is returned here as part of the implementation of
* {@link AWSAuditEventCallbacks}.
*
* @return the unique ID of the FS.
*/
@Override
public String getSpanId() {
return auditor != null ? auditor.getAuditorId() : "(auditor not yet created)";
} | 3.26 |
hadoop_ActiveAuditManagerS3A_deactivate_rdh | /**
* Switch to the unbounded span and then deactivate this span.
* No-op for invalid spans,
* so as to prevent the unbounded span from being closed
* and everything getting very confused.
*/
@Override
public void deactivate() {
// span is inactive; ignore
if (!isActive()) {
return;
}
// skipped for invalid spans,
// so as to prevent the unbounded span from being closed
// and everything getting very confused.
if (isValid) {
// deactivate the span
span.deactivate();
}
// remove the span from the reference map,
// sporadically triggering a prune operation.
removeActiveSpanFromMap();
}
/**
* Forward to the wrapped span.
* {@inheritDoc } | 3.26 |
hadoop_ActiveAuditManagerS3A_beforeUnmarshalling_rdh | /**
* Forward to the inner span.
* {@inheritDoc }
*/
@Override
public void beforeUnmarshalling(Context.BeforeUnmarshalling context, ExecutionAttributes executionAttributes) {
span.beforeUnmarshalling(context, executionAttributes);
} | 3.26 |
hadoop_ActiveAuditManagerS3A_requestCreated_rdh | /**
* Audit the creation of a request and retrieve
* a reference to the active thread span.
*/
@Override
public void requestCreated(final SdkRequest.Builder builder) {
AuditSpanS3A span = getActiveAuditSpan();
if (LOG.isTraceEnabled()) {
LOG.trace("Created Request {} in span {}", analyzer.analyze(builder.build()), span);
}
try {
span.requestCreated(builder);
} catch (AuditFailureException e) {
ioStatisticsStore.incrementCounter(AUDIT_FAILURE.getSymbol());
throw e;}
} | 3.26 |
hadoop_ActiveAuditManagerS3A_prune_rdh | /**
* Prune all null weak references, calling the referenceLost
* callback for each one.
*
* non-atomic and non-blocking.
*
* @return the number of entries pruned.
*/
@VisibleForTesting
int prune() {
return activeSpanMap.prune();
} | 3.26 |
hadoop_RouterResolver_getSubclusterMapping_rdh | /**
* Get subcluster mapping info.
*
* @return The map of subcluster info.
*/
protected Map<K,
V> getSubclusterMapping() {
return this.subclusterMapping;
} | 3.26 |
hadoop_RouterResolver_getRpcServer_rdh | /**
* Get the Router RPC server.
*
* @return Router RPC server. Null if not possible.
*/
protected RouterRpcServer getRpcServer() {
if (this.router == null) {
return null;
}
return router.getRpcServer();
} | 3.26 |
hadoop_RouterResolver_updateSubclusterMapping_rdh | /**
* Update <NamespaceId, Subcluster Info> mapping info periodically.
*/
private synchronized void updateSubclusterMapping() {
if ((subclusterMapping == null) || ((monotonicNow() - lastUpdated) > minUpdateTime)) {
// Fetch the mapping asynchronously
Thread updater = new Thread(new Runnable()
{
@Override
public void run() {
final MembershipStore v1 = getMembershipStore();
if (v1 == null) {
f0.error("Cannot access the Membership store.");return;
}
subclusterMapping = getSubclusterInfo(v1);
lastUpdated = monotonicNow();
}
});
updater.start();
// Wait until initialized
if (subclusterMapping == null) {
try {
f0.debug("Wait to get the mapping for the first time");
updater.join();
} catch (InterruptedException e) {
f0.error("Cannot wait for the updater to finish");
}
}
}
} | 3.26 |
hadoop_HAServiceTarget_supportObserver_rdh | /**
*
* @return true if this target supports the Observer state, false otherwise.
*/
public boolean supportObserver() {
return false;
} | 3.26 |
hadoop_HAServiceTarget_getProxy_rdh | /**
*
* @return a proxy to connect to the target HA Service.
* @param timeoutMs
* timeout in milliseconds.
* @param conf
* Configuration.
* @throws IOException
* raised on errors performing I/O.
*/
public HAServiceProtocol getProxy(Configuration conf, int timeoutMs) throws IOException {
return
getProxyForAddress(conf, timeoutMs, getAddress());
} | 3.26 |
hadoop_HAServiceTarget_getHealthMonitorProxy_rdh | /**
* Returns a proxy to connect to the target HA service for health monitoring.
* If {@link #getHealthMonitorAddress()} is implemented to return a non-null
* address, then this proxy will connect to that address. Otherwise, the
* returned proxy defaults to using {@link #getAddress()}, which means this
* method's behavior is identical to {@link #getProxy(Configuration, int)}.
*
* @param conf
* configuration.
* @param timeoutMs
* timeout in milliseconds
* @return a proxy to connect to the target HA service for health monitoring
* @throws IOException
* if there is an error
*/
public HAServiceProtocol getHealthMonitorProxy(Configuration conf, int timeoutMs) throws IOException {
return getHealthMonitorProxy(conf, timeoutMs, 1);
} | 3.26 |
hadoop_HAServiceTarget_isAutoFailoverEnabled_rdh | /**
*
* @return true if auto failover should be considered enabled
*/
public boolean isAutoFailoverEnabled() {
return false;
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.