name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hadoop_Base64_decodeAsByteObjectArray_rdh | /**
* Decodes a given Base64 string into its corresponding byte array.
*
* @param data
* the Base64 string, as a <code>String</code> object, to decode
* @return the corresponding decoded byte array
* @throws IllegalArgumentException
* If the string is not a valid base64 encoded string
*/
public static Byte[] decodeAsByteObjectArray(final String data) {
int byteArrayLength = (3 * data.length()) / 4;
if (data.endsWith("==")) {
byteArrayLength -= 2;
} else if (data.endsWith("=")) {
byteArrayLength -= 1;
}
final Byte[] retArray = new Byte[byteArrayLength];
int byteDex = 0;
int charDex = 0;
for (; charDex < data.length(); charDex += 4) {
// get 4 chars, convert to 3 bytes
final int char1 = DECODE_64[((byte) (data.charAt(charDex)))];
final int char2 = DECODE_64[((byte) (data.charAt(charDex + 1)))];final int char3 = DECODE_64[((byte) (data.charAt(charDex + 2)))];
final int char4 = DECODE_64[((byte) (data.charAt(charDex + 3)))];
if ((((char1 < 0) || (char2 < 0)) || (char3 == (-1))) || (char4 == (-1))) {
// invalid character(-1), or bad padding (-2)
throw new IllegalArgumentException("The data parameter is not a valid base64-encoded string.");
}
int tVal = char1 << 18;tVal += char2 << 12;
tVal += (char3 & 0xff) << 6;
tVal += char4 & 0xff;
if (char3 == (-2)) {
// two "==" pad chars, check bits 12-24
tVal &= 0xfff000;
retArray[byteDex++] = ((byte) ((tVal >> 16) & 0xff));
} else if (char4 == (-2)) {// one pad char "=" , check bits 6-24.
tVal &= 0xffffc0;
retArray[byteDex++] = ((byte) ((tVal >> 16) & 0xff));
retArray[byteDex++] = ((byte) ((tVal >> 8) & 0xff));
} else {
// No pads take all 3 bytes, bits 0-24
retArray[byteDex++] = ((byte) ((tVal >> 16) & 0xff));
retArray[byteDex++] = ((byte) ((tVal >> 8) & 0xff));
retArray[byteDex++] = ((byte) (tVal & 0xff));
}
}
return retArray;
} | 3.26 |
hadoop_LightWeightLinkedSet_m0_rdh | /**
* Remove and return n elements from the hashtable.
* The order in which entries are removed is corresponds
* to the order in which they were inserted.
*
* @return first element
*/
@Override
public List<T> m0(int n) {
if (n >= size) {
// if we need to remove all elements then do fast polling
return pollAll();
}
List<T> retList = new ArrayList<T>(n);
while (((n--) > 0) && (head != null)) {
T curr = head.element;
this.removeElem(curr);
retList.add(curr);
}
shrinkIfNecessary();
return retList;
} | 3.26 |
hadoop_LightWeightLinkedSet_pollFirst_rdh | /**
* Remove and return first element on the linked list of all elements.
*
* @return first element
*/
public T pollFirst() {
if (head == null) {
return null;
}
T first = head.element;
this.remove(first);
return first;
} | 3.26 |
hadoop_LightWeightLinkedSet_addElem_rdh | /**
* Add given element to the hash table
*
* @return true if the element was not present in the table, false otherwise
*/
@Override
protected boolean addElem(final T element) {
// validate element
if (element == null) {
throw new IllegalArgumentException("Null element is not supported.");}
// find hashCode & index
final int hashCode = element.hashCode();
final int index = getIndex(hashCode);
// return false if already present
if (getContainedElem(index, element,
hashCode) != null) {
return false;
}
modification++;
size++;
// update bucket linked list
DoubleLinkedElement<T> le = new DoubleLinkedElement<T>(element, hashCode);
le.next = entries[index];
entries[index] = le;
// insert to the end of the all-element linked list
le.after = null;
le.before = tail;
if (tail != null) {
tail.after = le;
}
tail = le;
if (head == null) {
head = le;
bookmark.next = head;
}
// Update bookmark, if necessary.
if (bookmark.next == null) {
bookmark.next = le;
}
return true;
} | 3.26 |
hadoop_LightWeightLinkedSet_pollAll_rdh | /**
* Remove all elements from the set and return them in order. Traverse the
* link list, don't worry about hashtable - faster version of the parent
* method.
*/
@Overridepublic List<T> pollAll() {List<T> retList = new ArrayList<T>(size);
while (head != null)
{
retList.add(head.element);
head = head.after;
}
this.clear();
return retList;
} | 3.26 |
hadoop_LightWeightLinkedSet_removeElem_rdh | /**
* Remove the element corresponding to the key, given key.hashCode() == index.
*
* @return Return the entry with the element if exists. Otherwise return null.
*/
@Override
protected DoubleLinkedElement<T> removeElem(final T key) {
DoubleLinkedElement<T> found = ((DoubleLinkedElement<T>) (super.removeElem(key)));
if (found == null) {
return null;
}
// update linked list
if (found.after != null) {
found.after.before = found.before;}
if (found.before != null) {
found.before.after = found.after;
}
if (head == found) {head = head.after;
} if (tail == found) {
tail = tail.before;
}
// Update bookmark, if necessary.
if (found == this.bookmark.next) {
this.bookmark.next = found.after;
}
return
found;
} | 3.26 |
hadoop_LightWeightLinkedSet_resetBookmark_rdh | /**
* Resets the bookmark to the beginning of the list.
*/
public void resetBookmark() {this.bookmark.next = this.head;
} | 3.26 |
hadoop_LightWeightLinkedSet_clear_rdh | /**
* Clear the set. Resize it to the original capacity.
*/
@Override
public void clear() {super.clear();
this.head = null;
this.tail = null;
this.resetBookmark();
} | 3.26 |
hadoop_LightWeightLinkedSet_getBookmark_rdh | /**
* Returns a new iterator starting at the bookmarked element.
*
* @return the iterator to the bookmarked element.
*/
public Iterator<T> getBookmark() {
LinkedSetIterator toRet = new LinkedSetIterator();toRet.next = this.bookmark.next;
this.bookmark = toRet;
return toRet;
} | 3.26 |
hadoop_LoadedManifestData_getEntrySequenceFile_rdh | /**
* Get the entry sequence data as a file.
*/
public File getEntrySequenceFile() {
return new File(entrySequenceData.toUri());
} | 3.26 |
hadoop_LoadedManifestData_getEntrySequenceData_rdh | /**
* Get the path to the entry sequence data file.
*
* @return the path
*/
public Path getEntrySequenceData() {
return entrySequenceData;
} | 3.26 |
hadoop_LoadedManifestData_deleteEntrySequenceFile_rdh | /**
* Delete the entry sequence file.
*
* @return whether or not the delete was successful.
*/
public boolean deleteEntrySequenceFile() {
return getEntrySequenceFile().delete();
} | 3.26 |
hadoop_NativeCrc32_verifyChunkedSums_rdh | /**
* Verify the given buffers of data and checksums, and throw an exception
* if any checksum is invalid. The buffers given to this function should
* have their position initially at the start of the data, and their limit
* set at the end of the data. The position, limit, and mark are not
* modified.
*
* @param bytesPerSum
* the chunk size (eg 512 bytes)
* @param checksumType
* the DataChecksum type constant (NULL is not supported)
* @param sums
* the DirectByteBuffer pointing at the beginning of the
* stored checksums
* @param data
* the DirectByteBuffer pointing at the beginning of the
* data to check
* @param basePos
* the position in the file where the data buffer starts
* @param fileName
* the name of the file being verified
* @throws ChecksumException
* if there is an invalid checksum
*/
public static void
verifyChunkedSums(int bytesPerSum, int checksumType, ByteBuffer sums, ByteBuffer data, String fileName, long basePos) throws ChecksumException {
nativeComputeChunkedSums(bytesPerSum, checksumType, sums, sums.position(), data, data.position(), data.remaining(), fileName, basePos, true);
} | 3.26 |
hadoop_NativeCrc32_isAvailable_rdh | /**
* Return true if the JNI-based native CRC extensions are available.
*/public static boolean isAvailable() {
if (isSparc) {
return false;
} else {
return NativeCodeLoader.isNativeCodeLoaded();
}
} | 3.26 |
hadoop_RecordCreatorFactory_setTtl_rdh | /**
* Set the TTL value for the records created by the factory.
*
* @param ttl
* the ttl value, in seconds.
*/
public static void setTtl(long ttl) {
RecordCreatorFactory.ttl = ttl;
} | 3.26 |
hadoop_RecordCreatorFactory_create_rdh | /**
* Creates a DNS SRV record.
*
* @param name
* the record name.
* @param target
* the record target/value.
* @return an A record.
*/
@Override
public SRVRecord create(Name name, HostPortInfo target) {
return new SRVRecord(name, DClass.IN, ttl, 1, 1, target.getPort(), target.getHost());
} | 3.26 |
hadoop_RecordCreatorFactory_getRecordCreator_rdh | /**
* Returns the DNS record creator for the provided type.
*
* @param type
* the DNS record type.
* @return the record creator.
*/
static RecordCreator
getRecordCreator(int type) {
switch (type) {
case
A :
return new ARecordCreator();
case CNAME :
return new CNAMERecordCreator();
case TXT :
return new TXTRecordCreator();
case AAAA :
return new AAAARecordCreator();
case PTR :
return new PTRRecordCreator();
case SRV :
return new SRVRecordCreator();
default :
throw new IllegalArgumentException("No type " +
type);
}
} | 3.26 |
hadoop_RecordCreatorFactory_setPort_rdh | /**
* Set the port.
*
* @param port
* the port.
*/
void setPort(int port) {
this.port = port;
} | 3.26 |
hadoop_RecordCreatorFactory_setHost_rdh | /**
* Set the host name.
*
* @param host
* the host name.
*/ void setHost(Name host) {
this.host = host;
} | 3.26 |
hadoop_RecordCreatorFactory_getHost_rdh | /**
* Return the host name.
*
* @return the host name.
*/
Name getHost() {
return host;
} | 3.26 |
hadoop_RecordCreatorFactory_getPort_rdh | /**
* Get the port.
*
* @return the port.
*/
int getPort() {
return port;
} | 3.26 |
hadoop_ClusterMetrics_getOccupiedReduceSlots_rdh | /**
* Get the number of occupied reduce slots in the cluster.
*
* @return occupied reduce slot count
*/
public int getOccupiedReduceSlots() {
return occupiedReduceSlots;
} | 3.26 |
hadoop_ClusterMetrics_getDecommissionedTaskTrackerCount_rdh | /**
* Get the number of decommissioned trackers in the cluster.
*
* @return decommissioned tracker count
*/
public int getDecommissionedTaskTrackerCount() {
return numDecommissionedTrackers;
} | 3.26 |
hadoop_ClusterMetrics_getReservedReduceSlots_rdh | /**
* Get the number of reserved reduce slots in the cluster.
*
* @return reserved reduce slot count
*/
public int getReservedReduceSlots() {
return reservedReduceSlots;
} | 3.26 |
hadoop_ClusterMetrics_getBlackListedTaskTrackerCount_rdh | /**
* Get the number of blacklisted trackers in the cluster.
*
* @return blacklisted tracker count
*/
public int getBlackListedTaskTrackerCount() {
return numBlacklistedTrackers;
} | 3.26 |
hadoop_ClusterMetrics_getTaskTrackerCount_rdh | /**
* Get the number of active trackers in the cluster.
*
* @return active tracker count.
*/
public int getTaskTrackerCount() {
return numTrackers;
} | 3.26 |
hadoop_ClusterMetrics_getReduceSlotCapacity_rdh | /**
* Get the total number of reduce slots in the cluster.
*
* @return reduce slot capacity
*/
public int getReduceSlotCapacity() {
return totalReduceSlots;
} | 3.26 |
hadoop_ClusterMetrics_getMapSlotCapacity_rdh | /**
* Get the total number of map slots in the cluster.
*
* @return map slot capacity
*/
public int getMapSlotCapacity() {
return totalMapSlots;
} | 3.26 |
hadoop_ClusterMetrics_getGrayListedTaskTrackerCount_rdh | /**
* Get the number of graylisted trackers in the cluster.
*
* @return graylisted tracker count
*/
public int getGrayListedTaskTrackerCount() {
return numGraylistedTrackers;
} | 3.26 |
hadoop_ClusterMetrics_getReservedMapSlots_rdh | /**
* Get number of reserved map slots in the cluster.
*
* @return reserved map slot count
*/
public int getReservedMapSlots() {
return reservedMapSlots;
} | 3.26 |
hadoop_ClusterMetrics_getOccupiedMapSlots_rdh | /**
* Get number of occupied map slots in the cluster.
*
* @return occupied map slot count
*/
public int getOccupiedMapSlots() {
return occupiedMapSlots;
} | 3.26 |
hadoop_ClusterMetrics_getRunningReduces_rdh | /**
* Get the number of running reduce tasks in the cluster.
*
* @return running reduces
*/
public int getRunningReduces() {
return runningReduces;
} | 3.26 |
hadoop_ClusterMetrics_getRunningMaps_rdh | /**
* Get the number of running map tasks in the cluster.
*
* @return running maps
*/
public int getRunningMaps() {
return runningMaps;
} | 3.26 |
hadoop_PseudoAuthenticator_setConnectionConfigurator_rdh | /**
* Sets a {@link ConnectionConfigurator} instance to use for
* configuring connections.
*
* @param configurator
* the {@link ConnectionConfigurator} instance.
*/
@Override
public void setConnectionConfigurator(ConnectionConfigurator configurator) {
connConfigurator = configurator;
} | 3.26 |
hadoop_PseudoAuthenticator_m0_rdh | /**
* Returns the current user name.
* <p>
* This implementation returns the value of the Java system property 'user.name'
*
* @return the current user name.
*/
protected String m0() {
return System.getProperty("user.name");
} | 3.26 |
hadoop_PseudoAuthenticator_authenticate_rdh | /**
* Performs simple authentication against the specified URL.
* <p>
* If a token is given it does a NOP and returns the given token.
* <p>
* If no token is given, it will perform an HTTP <code>OPTIONS</code> request injecting an additional
* parameter {@link #USER_NAME} in the query string with the value returned by the {@link #getUserName()}
* method.
* <p>
* If the response is successful it will update the authentication token.
*
* @param url
* the URl to authenticate against.
* @param token
* the authentication token being used for the user.
* @throws IOException
* if an IO error occurred.
* @throws AuthenticationException
* if an authentication error occurred.
*/
@Override
public void authenticate(URL url, AuthenticatedURL.Token token) throws IOException, AuthenticationException {
String strUrl = url.toString();
String paramSeparator = (strUrl.contains("?")) ? "&" : "?";
strUrl += (paramSeparator + USER_NAME_EQ) + m0();
url = new URL(strUrl);
HttpURLConnection conn = token.openConnection(url, connConfigurator);
conn.setRequestMethod("OPTIONS");
conn.connect();
AuthenticatedURL.extractToken(conn, token);
} | 3.26 |
hadoop_ServerCommand_getAction_rdh | /**
* Get server command action.
*
* @return action code.
*/
public int getAction() {
return this.action;
} | 3.26 |
hadoop_HeaderProcessing_getXAttrs_rdh | /**
* See {@code FileSystem.getXAttrs(path, names}.
*
* @param path
* Path to get extended attributes
* @param names
* XAttr names.
* @return Map describing the XAttrs of the file or directory
* @throws IOException
* IO failure
*/
public Map<String, byte[]> getXAttrs(Path path, List<String> names) throws IOException {
Map<String, byte[]> headers = retrieveHeaders(path, INVOCATION_XATTR_GET_NAMED_MAP);Map<String, byte[]> result = new TreeMap<>();
headers.entrySet().stream().filter(entry -> names.contains(entry.getKey())).forEach(entry -> result.put(entry.getKey(), entry.getValue()));
return result;
} | 3.26 |
hadoop_HeaderProcessing_cloneObjectMetadata_rdh | /**
* Creates a copy of the passed metadata.
* This operation does not copy the {@code X_HEADER_MAGIC_MARKER}
* header to avoid confusion. If a marker file is renamed,
* it loses information about any remapped file.
* If new fields are added to ObjectMetadata which are not
* present in the user metadata headers, they will not be picked
* up or cloned unless this operation is updated.
*
* @param source
* the source metadata to copy
* @param dest
* the metadata to update; this is the return value.
* @param copyObjectRequestBuilder
* CopyObjectRequest builder
*/
public static void cloneObjectMetadata(HeadObjectResponse source, Map<String, String> dest, CopyObjectRequest.Builder copyObjectRequestBuilder) {
// Possibly null attributes
// Allowing nulls to pass breaks it during later use
if (source.cacheControl() != null) {
copyObjectRequestBuilder.cacheControl(source.cacheControl());
}
if (source.contentDisposition() != null) {
copyObjectRequestBuilder.contentDisposition(source.contentDisposition());
}
if (source.contentEncoding() != null) {
copyObjectRequestBuilder.contentEncoding(source.contentEncoding());
}
if (source.contentType() != null) {
copyObjectRequestBuilder.contentType(source.contentType());
}
if (source.serverSideEncryption() != null) {
copyObjectRequestBuilder.serverSideEncryption(source.serverSideEncryption());
}
if (source.sseCustomerAlgorithm() != null) {
copyObjectRequestBuilder.copySourceSSECustomerAlgorithm(source.sseCustomerAlgorithm());
}if (source.sseCustomerKeyMD5() != null) {
copyObjectRequestBuilder.copySourceSSECustomerKeyMD5(source.sseCustomerKeyMD5());
}
// copy user metadata except the magic marker header.
source.metadata().entrySet().stream().filter(e -> !e.getKey().equals(X_HEADER_MAGIC_MARKER)).forEach(e -> dest.put(e.getKey(), e.getValue()));
} | 3.26 |
hadoop_HeaderProcessing_maybeSetHeader_rdh | /**
* Set a header if the value is non null.
*
* @param headers
* header map
* @param name
* header name
* @param value
* value to encode.
*/
private void maybeSetHeader(final Map<String, byte[]> headers, final String name, final Object value) {
if (value != null) {
headers.put(name, encodeBytes(value));
}
} | 3.26 |
hadoop_HeaderProcessing_retrieveHeaders_rdh | /**
* Query the store, get all the headers into a map. Each Header
* has the "header." prefix.
* Caller must have read access.
* The value of each header is the string value of the object
* UTF-8 encoded.
*
* @param path
* path of object.
* @param statistic
* statistic to use for duration tracking.
* @return the headers
* @throws IOException
* failure, including file not found.
*/
private Map<String, byte[]> retrieveHeaders(final Path path, final Statistic statistic) throws IOException {
StoreContext context = getStoreContext();
String objectKey = context.pathToKey(path);
String symbol = statistic.getSymbol();S3AStatisticsContext v3 = context.getInstrumentation();Map<String, byte[]> headers = new
TreeMap<>();
HeadObjectResponse md;
// Attempting to get metadata for the root, so use head bucket.
if (objectKey.isEmpty()) {
HeadBucketResponse headBucketResponse = trackDuration(v3, symbol, () -> f1.getBucketMetadata());
if (((headBucketResponse.sdkHttpResponse() != null) && (headBucketResponse.sdkHttpResponse().headers() != null)) && (headBucketResponse.sdkHttpResponse().headers().get(AWSHeaders.CONTENT_TYPE) != null)) {
maybeSetHeader(headers, XA_CONTENT_TYPE, headBucketResponse.sdkHttpResponse().headers().get(AWSHeaders.CONTENT_TYPE).get(0));
}
maybeSetHeader(headers, XA_CONTENT_LENGTH, 0);
return headers;
}
try {
md = trackDuration(v3, symbol, () -> f1.getObjectMetadata(objectKey));
} catch (FileNotFoundException e) {// no entry. It could be a directory, so try again.
md = trackDuration(v3, symbol, () -> f1.getObjectMetadata(objectKey + "/"));
}
// all user metadata
Map<String, String> rawHeaders =
md.metadata();
rawHeaders.forEach((key, value) -> headers.put(XA_HEADER_PREFIX + key, encodeBytes(value))); // and add the usual content length &c, if set
maybeSetHeader(headers, XA_CACHE_CONTROL, md.cacheControl());
maybeSetHeader(headers, XA_CONTENT_DISPOSITION, md.contentDisposition());
maybeSetHeader(headers, XA_CONTENT_ENCODING, md.contentEncoding());
maybeSetHeader(headers, XA_CONTENT_LANGUAGE, md.contentLanguage());
// If CSE is enabled, use the unencrypted content length.
// TODO: CSE is not supported yet, add these headers in during CSE work.
// if (md.getUserMetaDataOf(Headers.CRYPTO_CEK_ALGORITHM) != null
// && md.getUserMetaDataOf(Headers.UNENCRYPTED_CONTENT_LENGTH) != null) {
// maybeSetHeader(headers, XA_CONTENT_LENGTH,
// md.getUserMetaDataOf(Headers.UNENCRYPTED_CONTENT_LENGTH));
// } else {
// maybeSetHeader(headers, XA_CONTENT_LENGTH,
// md.contentLength());
// }
// maybeSetHeader(headers, XA_CONTENT_MD5,
// md.getContentMD5());
// TODO: Add back in else block during CSE work.
maybeSetHeader(headers, XA_CONTENT_LENGTH, md.contentLength());
if (((md.sdkHttpResponse() != null) && (md.sdkHttpResponse().headers() != null)) && (md.sdkHttpResponse().headers().get("Content-Range") != null)) {
maybeSetHeader(headers, XA_CONTENT_RANGE, md.sdkHttpResponse().headers().get("Content-Range").get(0));
}
maybeSetHeader(headers, XA_CONTENT_TYPE, md.contentType());
maybeSetHeader(headers, XA_ETAG, md.eTag());
maybeSetHeader(headers, XA_LAST_MODIFIED, Date.from(md.lastModified()));
// AWS custom headers
maybeSetHeader(headers, XA_ARCHIVE_STATUS, md.archiveStatus());
maybeSetHeader(headers, XA_OBJECT_LOCK_LEGAL_HOLD_STATUS, md.objectLockLegalHoldStatus());
maybeSetHeader(headers, XA_OBJECT_LOCK_MODE, md.objectLockMode());
maybeSetHeader(headers, XA_OBJECT_LOCK_RETAIN_UNTIL_DATE, md.objectLockRetainUntilDate());
maybeSetHeader(headers, XA_OBJECT_REPLICATION_STATUS, md.replicationStatus());
maybeSetHeader(headers, f0, md.versionId());
maybeSetHeader(headers, XA_SERVER_SIDE_ENCRYPTION, md.serverSideEncryptionAsString());
maybeSetHeader(headers, XA_STORAGE_CLASS, md.storageClassAsString());
return headers;
} | 3.26 |
hadoop_HeaderProcessing_listXAttrs_rdh | /**
* See {@code FileSystem.listXAttrs(path)}.
*
* @param path
* Path to get extended attributes
* @return List of supported XAttrs
* @throws IOException
* IO failure
*/
public List<String> listXAttrs(final Path path) throws IOException {
return new ArrayList<>(retrieveHeaders(path, INVOCATION_OP_XATTR_LIST).keySet());
} | 3.26 |
hadoop_HeaderProcessing_getXAttr_rdh | /**
* Get an XAttr name and value for a file or directory.
*
* @param path
* Path to get extended attribute
* @param name
* XAttr name.
* @return byte[] XAttr value or null
* @throws IOException
* IO failure
*/
public byte[] getXAttr(Path path, String name) throws IOException {
return retrieveHeaders(path, INVOCATION_XATTR_GET_NAMED).get(name);
} | 3.26 |
hadoop_HeaderProcessing_decodeBytes_rdh | /**
* Get the string value from the bytes.
* if null : return null, otherwise the UTF-8 decoded
* bytes.
*
* @param bytes
* source bytes
* @return decoded value
*/
public static String decodeBytes(byte[] bytes) {
return bytes == null ? null : new String(bytes, StandardCharsets.UTF_8);
} | 3.26 |
hadoop_HeaderProcessing_encodeBytes_rdh | /**
* Stringify an object and return its bytes in UTF-8 encoding.
*
* @param s
* source
* @return encoded object or an empty buffer
*/
public static byte[] encodeBytes(@Nullable
Object s) {
return s == null ? EMPTY : s.toString().getBytes(StandardCharsets.UTF_8);
} | 3.26 |
hadoop_HeaderProcessing_extractXAttrLongValue_rdh | /**
* Convert an XAttr byte array to a long.
* testability.
*
* @param data
* data to parse
* @return either a length or none
*/
public static Optional<Long> extractXAttrLongValue(byte[] data) {
String xAttr;
xAttr
= HeaderProcessing.decodeBytes(data);
if (StringUtils.isNotEmpty(xAttr))
{
try {
long l = Long.parseLong(xAttr);
if (l >= 0) {
return Optional.of(l);
}
} catch (NumberFormatException ex) {
LOG.warn("Not a number: {}", xAttr, ex);
}
}
// missing/empty header or parse failure.
return Optional.empty();
} | 3.26 |
hadoop_EntityGroupFSTimelineStoreMetrics_incrGetEntityToSummaryOps_rdh | // Setters
// General read related
public void incrGetEntityToSummaryOps() {
getEntityToSummaryOps.incr();
} | 3.26 |
hadoop_EntityGroupFSTimelineStoreMetrics_incrNoRefreshCacheRead_rdh | // Cache related
public void incrNoRefreshCacheRead() {
noRefreshCacheRead.incr();
} | 3.26 |
hadoop_EntityGroupFSTimelineStoreMetrics_addActiveLogDirScanTime_rdh | // Log scanner and cleaner related
public void addActiveLogDirScanTime(long msec) {
f1.add(msec);
} | 3.26 |
hadoop_EntityGroupFSTimelineStoreMetrics_getEntitiesReadToSummary_rdh | // Getters
MutableCounterLong getEntitiesReadToSummary() {
return f0;
} | 3.26 |
hadoop_EntityGroupFSTimelineStoreMetrics_addSummaryLogReadTime_rdh | // Summary data related
public void addSummaryLogReadTime(long msec) {
summaryLogRead.add(msec);
} | 3.26 |
hadoop_NMController_index_rdh | // TODO: What use of this with info() in?
@Override
public void index() {setTitle(join("NodeManager - ", $(NM_NODENAME)));
} | 3.26 |
hadoop_AMRMProxyTokenSecretManager_recover_rdh | /**
* Recover secretManager from state store. Called after serviceInit before
* serviceStart.
*
* @param state
* the state to recover from
*/
public void recover(RecoveredAMRMProxyState state) {if (state != null) {
// recover the current master key
MasterKey currentKey = state.getCurrentMasterKey();
if (currentKey != null) {
this.currentMasterKey = new MasterKeyData(currentKey, createSecretKey(currentKey.getBytes().array()));
} else {
LOG.warn("No current master key recovered from NM StateStore" + " for AMRMProxyTokenSecretManager");
}
// recover the next master key if not null
MasterKey nextKey = state.getNextMasterKey();
if (nextKey != null) {
this.nextMasterKey = new MasterKeyData(nextKey, createSecretKey(nextKey.getBytes().array()));
this.timer.schedule(new NextKeyActivator(), this.activationDelay);
}
}
} | 3.26 |
hadoop_AMRMProxyTokenSecretManager_retrievePassword_rdh | /**
* Retrieve the password for the given {@link AMRMTokenIdentifier}. Used by
* RPC layer to validate a remote {@link AMRMTokenIdentifier}.
*/
@Override
public byte[] retrievePassword(AMRMTokenIdentifier identifier) throws InvalidToken {
this.readLock.lock();
try {
ApplicationAttemptId applicationAttemptId = identifier.getApplicationAttemptId();
LOG.debug("Trying to retrieve password for {}", applicationAttemptId);if (!appAttemptSet.contains(applicationAttemptId)) {
throw new InvalidToken(applicationAttemptId + " not found in AMRMProxyTokenSecretManager.");
}
if (identifier.getKeyId() == this.currentMasterKey.getMasterKey().getKeyId()) {
return createPassword(identifier.getBytes(), this.currentMasterKey.getSecretKey());
} else if ((nextMasterKey != null) && (identifier.getKeyId() == this.nextMasterKey.getMasterKey().getKeyId())) {
return createPassword(identifier.getBytes(), this.nextMasterKey.getSecretKey());
}
throw new InvalidToken("Invalid AMRMToken from " + applicationAttemptId);
} finally {this.readLock.unlock();
}
} | 3.26 |
hadoop_AMRMProxyTokenSecretManager_getMasterKey_rdh | // If nextMasterKey is not Null, then return nextMasterKey
// otherwise return currentMasterKey.
@VisibleForTesting
public MasterKeyData getMasterKey() {
this.readLock.lock();
try {
return nextMasterKey == null ? currentMasterKey : nextMasterKey;
} finally {
this.readLock.unlock();}
} | 3.26 |
hadoop_AMRMProxyTokenSecretManager_createIdentifier_rdh | /**
* Creates an empty TokenId to be used for de-serializing an
* {@link AMRMTokenIdentifier} by the RPC layer.
*/
@Override
public AMRMTokenIdentifier createIdentifier() {
return new AMRMTokenIdentifier();
} | 3.26 |
hadoop_NodeAttributeInfo_newInstance_rdh | /**
* <p>
* Node Attribute Info describes a NodeAttribute.
* </p>
*/
@Public
@Unstablepublic abstract class NodeAttributeInfo {
public static NodeAttributeInfo newInstance(NodeAttribute nodeAttribute) {
return newInstance(nodeAttribute.getAttributeKey(), nodeAttribute.getAttributeType());
} | 3.26 |
hadoop_ParsedHost_numberOfDistances_rdh | /**
* TODO handle arbitrary level of network names.
*/
static int numberOfDistances() {
return 3;
} | 3.26 |
hadoop_NodeIDsInfo_add_rdh | /**
* This method will generate a new NodeIDsInfo object based on the two NodeIDsInfo objects.
* The information to be combined includes the node list (removed duplicate node)
* and partitionInfo object.
*
* @param left
* left NodeIDsInfo Object.
* @param right
* right NodeIDsInfo Object.
* @return new NodeIDsInfo Object.
*/
public static NodeIDsInfo add(NodeIDsInfo left, NodeIDsInfo right)
{
Set<String> v0 = new HashSet<>();
if ((left != null) && (left.nodeIDsList != null)) {
v0.addAll(left.nodeIDsList);
}
if ((right != null) && (right.nodeIDsList != null)) {
v0.addAll(right.nodeIDsList);
}
PartitionInfo leftPartitionInfo = null;
if (left !=
null) {
leftPartitionInfo = left.getPartitionInfo();
}
PartitionInfo rightPartitionInfo = null;
if (right != null) {
rightPartitionInfo = right.getPartitionInfo();
}
PartitionInfo info = PartitionInfo.addTo(leftPartitionInfo, rightPartitionInfo);
return new NodeIDsInfo(v0, info);
} | 3.26 |
hadoop_ExternalCall_waitForCompletion_rdh | // wait for response to be triggered to support postponed calls
private void waitForCompletion()
throws InterruptedException {
synchronized(done)
{
while (!done.get()) {
try {
done.wait();
} catch
(InterruptedException ie) {if (Thread.interrupted()) {
throw ie;
}
}
}
}} | 3.26 |
hadoop_ExternalCall_run_rdh | // invoked by ipc handler
@Override
public final Void
run() throws IOException {
try {
result = action.run();
sendResponse();
} catch (Throwable
t) {
abortResponse(t);
}
return null;
} | 3.26 |
hadoop_TimelineEntity_getEntityId_rdh | /**
* Get the entity Id
*
* @return the entity Id
*/
@XmlElement(name = "entity")
public String getEntityId()
{
return entityId;
} | 3.26 |
hadoop_TimelineEntity_addRelatedEntity_rdh | /**
* Add an entity to the existing related entity map
*
* @param entityType
* the entity type
* @param entityId
* the entity Id
*/
public void addRelatedEntity(String entityType, String entityId) {Set<String> thisRelatedEntity = relatedEntities.get(entityType);if (thisRelatedEntity ==
null)
{
thisRelatedEntity = new HashSet<String>();
relatedEntities.put(entityType, thisRelatedEntity);
}
thisRelatedEntity.add(entityId);
} | 3.26 |
hadoop_TimelineEntity_setPrimaryFilters_rdh | /**
* Set the primary filter map to the given map of primary filters
*
* @param primaryFilters
* a map of primary filters
*/
public void setPrimaryFilters(Map<String, Set<Object>> primaryFilters) {
this.primaryFilters = TimelineServiceHelper.mapCastToHashMap(primaryFilters);
} | 3.26 |
hadoop_TimelineEntity_setEntityType_rdh | /**
* Set the entity type
*
* @param entityType
* the entity type
*/
public void setEntityType(String entityType) {
this.entityType = entityType;
} | 3.26 |
hadoop_TimelineEntity_setDomainId_rdh | /**
* Set the ID of the domain that the entity is to be put
*
* @param domainId
* the name space ID
*/
public void setDomainId(String domainId) {
this.domainId = domainId;
} | 3.26 |
hadoop_TimelineEntity_getStartTime_rdh | /**
* Get the start time of the entity
*
* @return the start time of the entity
*/
@XmlElement(name = "starttime")
public Long getStartTime() {
return startTime;
} | 3.26 |
hadoop_TimelineEntity_setOtherInfo_rdh | /**
* Set the other info map to the given map of other information
*
* @param otherInfo
* a map of other information
*/
public void setOtherInfo(Map<String, Object> otherInfo) {
this.otherInfo = TimelineServiceHelper.mapCastToHashMap(otherInfo);
} | 3.26 |
hadoop_TimelineEntity_getRelatedEntities_rdh | /**
* Get the related entities
*
* @return the related entities
*/
public Map<String, Set<String>> getRelatedEntities() {
return relatedEntities;
} | 3.26 |
hadoop_TimelineEntity_addOtherInfo_rdh | /**
* Add a map of other information of the entity to the existing other info map
*
* @param otherInfo
* a map of other information
*/
public void addOtherInfo(Map<String, Object> otherInfo) {
this.otherInfo.putAll(otherInfo);
} | 3.26 |
hadoop_TimelineEntity_setRelatedEntities_rdh | /**
* Set the related entity map to the given map of related entities
*
* @param relatedEntities
* a map of related entities
*/
public void setRelatedEntities(Map<String, Set<String>> relatedEntities) {
this.relatedEntities = TimelineServiceHelper.mapCastToHashMap(relatedEntities);
} | 3.26 |
hadoop_TimelineEntity_addPrimaryFilter_rdh | /**
* Add a single piece of primary filter to the existing primary filter map
*
* @param key
* the primary filter key
* @param value
* the primary filter value
*/
public void addPrimaryFilter(String key, Object value) {
Set<Object> thisPrimaryFilter = primaryFilters.get(key);
if (thisPrimaryFilter == null) {
thisPrimaryFilter
= new HashSet<Object>();
primaryFilters.put(key, thisPrimaryFilter);
}
thisPrimaryFilter.add(value);
} | 3.26 |
hadoop_TimelineEntity_getEvents_rdh | /**
* Get a list of events related to the entity
*
* @return a list of events related to the entity
*/
@XmlElement(name = "events")
public List<TimelineEvent> getEvents() {
return events;
} | 3.26 |
hadoop_TimelineEntity_addEvent_rdh | /**
* Add a single event related to the entity to the existing event list
*
* @param event
* a single event related to the entity
*/
public void addEvent(TimelineEvent event) {
events.add(event);
} | 3.26 |
hadoop_TimelineEntity_addEvents_rdh | /**
* Add a list of events related to the entity to the existing event list
*
* @param events
* a list of events related to the entity
*/
public void addEvents(List<TimelineEvent> events) {
this.events.addAll(events);
} | 3.26 |
hadoop_TimelineEntity_setEntityId_rdh | /**
* Set the entity Id
*
* @param entityId
* the entity Id
*/
public void setEntityId(String entityId) {
this.entityId = entityId;} | 3.26 |
hadoop_TimelineEntity_setEvents_rdh | /**
* Set the event list to the given list of events related to the entity
*
* @param events
* events a list of events related to the entity
*/
public void setEvents(List<TimelineEvent> events) { this.events = events;
} | 3.26 |
hadoop_TimelineEntity_getEntityType_rdh | /**
* Get the entity type
*
* @return the entity type
*/
@XmlElement(name = "entitytype")
public String getEntityType() {
return entityType;
} | 3.26 |
hadoop_TimelineEntity_addPrimaryFilters_rdh | /**
* Add a map of primary filters to the existing primary filter map
*
* @param primaryFilters
* a map of primary filters
*/
public void addPrimaryFilters(Map<String, Set<Object>> primaryFilters) {
for (Entry<String, Set<Object>> primaryFilter : primaryFilters.entrySet()) {
Set<Object> thisPrimaryFilter = this.primaryFilters.get(primaryFilter.getKey());
if (thisPrimaryFilter == null) {
this.primaryFilters.put(primaryFilter.getKey(), primaryFilter.getValue());} else {
thisPrimaryFilter.addAll(primaryFilter.getValue());
}
}
} | 3.26 |
hadoop_TimelineEntity_m0_rdh | /**
* Get the primary filters
*
* @return the primary filters
*/
public Map<String, Set<Object>> m0() {
return primaryFilters;
} | 3.26 |
hadoop_TimelineEntity_getDomainId_rdh | /**
* Get the ID of the domain that the entity is to be put
*
* @return the domain ID
*/
@XmlElement(name = "domain")
public String getDomainId() {
return domainId;
} | 3.26 |
hadoop_TimelineEntity_m1_rdh | // Required by JAXB
@Private
@XmlElement(name = "primaryfilters")
public HashMap<String, Set<Object>> m1() {
return primaryFilters;
} | 3.26 |
hadoop_TimelineEntity_addRelatedEntities_rdh | /**
* Add a map of related entities to the existing related entity map
*
* @param relatedEntities
* a map of related entities
*/
public void addRelatedEntities(Map<String, Set<String>> relatedEntities) {
for (Entry<String,
Set<String>> relatedEntity : relatedEntities.entrySet()) {
Set<String> thisRelatedEntity = this.relatedEntities.get(relatedEntity.getKey());
if (thisRelatedEntity == null) { this.relatedEntities.put(relatedEntity.getKey(), relatedEntity.getValue());
} else {
thisRelatedEntity.addAll(relatedEntity.getValue());
}
}} | 3.26 |
hadoop_TimelineEntity_getRelatedEntitiesJAXB_rdh | // Required by JAXB
@Private @XmlElement(name = "relatedentities")
public HashMap<String, Set<String>> getRelatedEntitiesJAXB() {
return relatedEntities;
} | 3.26 |
hadoop_TimelineEntity_getOtherInfo_rdh | /**
* Get the other information of the entity
*
* @return the other information of the entity
*/
public Map<String, Object> getOtherInfo() {
return otherInfo;
} | 3.26 |
hadoop_TimelineEntity_setStartTime_rdh | /**
* Set the start time of the entity
*
* @param startTime
* the start time of the entity
*/
public void setStartTime(Long startTime) {
this.startTime = startTime;
} | 3.26 |
hadoop_TimelineEntity_getOtherInfoJAXB_rdh | // Required by JAXB
@Private
@XmlElement(name = "otherinfo")
public HashMap<String, Object> getOtherInfoJAXB() {
return otherInfo;
} | 3.26 |
hadoop_FlowActivityRowKey_parseRowKeyFromString_rdh | /**
* Given the raw row key as string, returns the row key as an object.
*
* @param encodedRowKey
* String representation of row key.
* @return A <cite>FlowActivityRowKey</cite> object.
*/
public static FlowActivityRowKey parseRowKeyFromString(String encodedRowKey) {return new
FlowActivityRowKeyConverter().decodeFromString(encodedRowKey);
} | 3.26 |
hadoop_FlowActivityRowKey_getRowKeyAsString_rdh | /**
* Constructs a row key for the flow activity table as follows:
* {@code clusterId!dayTimestamp!user!flowName}.
*
* @return String representation of row key
*/
public String getRowKeyAsString() {
return flowActivityRowKeyConverter.encodeAsString(this);
} | 3.26 |
hadoop_FlowActivityRowKey_parseRowKey_rdh | /**
* Given the raw row key as bytes, returns the row key as an object.
*
* @param rowKey
* Byte representation of row key.
* @return A <cite>FlowActivityRowKey</cite> object.
*/
public static FlowActivityRowKey parseRowKey(byte[] rowKey) {
return new FlowActivityRowKeyConverter().decode(rowKey);
} | 3.26 |
hadoop_FlowActivityRowKey_getRowKey_rdh | /**
* Constructs a row key for the flow activity table as follows:
* {@code clusterId!dayTimestamp!user!flowName}.
*
* @return byte array for the row key
*/
public byte[] getRowKey() {
return flowActivityRowKeyConverter.encode(this);
} | 3.26 |
hadoop_FederationPolicyStoreInputValidator_checkType_rdh | /**
* Validate if the policy type is a valid or not.
*
* @param type
* the type of the policy to be verified
* @throws FederationStateStoreInvalidInputException
* if the policy is invalid
*/
private static void checkType(String type) throws FederationStateStoreInvalidInputException {
if ((type == null) || type.isEmpty()) {
String message = "Missing Policy Type." + " Please try again by specifying a Policy Type.";
LOG.warn(message);
throw new FederationStateStoreInvalidInputException(message);
}
} | 3.26 |
hadoop_FederationPolicyStoreInputValidator_validate_rdh | /**
* Quick validation on the input to check some obvious fail conditions (fail
* fast). Check if the provided
* {@link SetSubClusterPolicyConfigurationRequest} for adding a new policy is
* valid or not.
*
* @param request
* the {@link SetSubClusterPolicyConfigurationRequest} to
* validate against
* @throws FederationStateStoreInvalidInputException
* if the request is invalid
*/
public static void validate(SetSubClusterPolicyConfigurationRequest request) throws FederationStateStoreInvalidInputException {
if
(request == null) {
String message = "Missing SetSubClusterPolicyConfiguration Request." + " Please try again by specifying an policy insertion information.";
LOG.warn(message);
throw new FederationStateStoreInvalidInputException(message);
}
// validate subcluster policy configuration
checkSubClusterPolicyConfiguration(request.getPolicyConfiguration());
} | 3.26 |
hadoop_FederationPolicyStoreInputValidator_checkSubClusterPolicyConfiguration_rdh | /**
* Validate if the SubClusterPolicyConfiguration is valid or not.
*
* @param policyConfiguration
* the policy information to be verified
* @throws FederationStateStoreInvalidInputException
* if the policy information
* are invalid
*/
private static void checkSubClusterPolicyConfiguration(SubClusterPolicyConfiguration policyConfiguration) throws FederationStateStoreInvalidInputException {
if (policyConfiguration == null) {
String message = "Missing SubClusterPolicyConfiguration." + " Please try again by specifying a SubClusterPolicyConfiguration.";
LOG.warn(message);
throw new FederationStateStoreInvalidInputException(message);
}
// validate queue id
checkQueue(policyConfiguration.getQueue());
// validate policy type
checkType(policyConfiguration.getType());
} | 3.26 |
hadoop_FederationPolicyStoreInputValidator_checkQueue_rdh | /**
* Validate if the queue id is a valid or not.
*
* @param queue
* the queue id of the policy to be verified
* @throws FederationStateStoreInvalidInputException
* if the queue id is
* invalid
*/
private static void checkQueue(String queue) throws FederationStateStoreInvalidInputException {
if ((queue == null) || queue.isEmpty()) {
String message = "Missing Queue. Please try again by specifying a Queue.";
LOG.warn(message);
throw new FederationStateStoreInvalidInputException(message);
}
} | 3.26 |
hadoop_BackupState_prepareToExitState_rdh | // HAState
@Override
public void prepareToExitState(HAContext context) throws ServiceFailedException {
context.prepareToStopStandbyServices();
} | 3.26 |
hadoop_BackupState_exitState_rdh | // HAState
@Override
public void exitState(HAContext context) throws ServiceFailedException {
try {
context.stopActiveServices();
} catch (IOException e) {
throw new ServiceFailedException("Failed to stop backup services", e);
}
} | 3.26 |
hadoop_BackupState_shouldPopulateReplQueues_rdh | // HAState
@Override
public boolean shouldPopulateReplQueues() {
return false;
} | 3.26 |
hadoop_BackupState_enterState_rdh | // HAState
@Override
public void enterState(HAContext context) throws ServiceFailedException {
try {
context.startActiveServices();
} catch (IOException e) {
throw new ServiceFailedException("Failed to start backup services", e);}
} | 3.26 |
hadoop_BackupState_checkOperation_rdh | // HAState
@Override
public void checkOperation(HAContext context, OperationCategory op) throws StandbyException {
context.checkOperation(op);
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.