name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hadoop_MappingRuleResult_getQueue_rdh | /**
* This method returns the result queue. Currently only makes sense when
* result == PLACE.
*
* @return the queue this result is about
*/
public String getQueue() {
return queue;
} | 3.26 |
hadoop_MappingRuleResult_getResult_rdh | /**
* Returns the type of the result.
*
* @return the type of the result.
*/
public MappingRuleResultType getResult() {
return result;
} | 3.26 |
hadoop_MappingRuleResult_createSkipResult_rdh | /**
* Generator method for skip results.
*
* @return The generated MappingRuleResult
*/
public static MappingRuleResult createSkipResult() {
return RESULT_SKIP;
} | 3.26 |
hadoop_MappingRuleResult_createDefaultPlacementResult_rdh | /**
* Generator method for default placement results. It is a specialized
* placement result which will only use the "%default" as a queue name.
*
* @return The generated MappingRuleResult
*/
public static MappingRuleResult createDefaultPlacementResult() {
return RESULT_DEFAULT_PLACEMENT;
} | 3.26 |
hadoop_MappingRuleResult_createRejectResult_rdh | /**
* Generator method for reject results.
*
* @return The generated MappingRuleResult
*/
public static MappingRuleResult createRejectResult() {
return RESULT_REJECT;
} | 3.26 |
hadoop_MappingRuleResult_toString_rdh | /**
* Returns the string representation of the object.
*
* @return the string representation of the object
*/
@Override
public String toString() {
if (result == MappingRuleResultType.PLACE) {
return ((((result.name() + ": '") + normalizedQueue) + "' ('")
+ queue) + "')";
} else {
return result.name();
}
} | 3.26 |
hadoop_MappingRuleResult_isCreateAllowed_rdh | /**
* The method returns true if the result queue should be created when it does
* not exist yet.
*
* @return true if non-existent queues should be created
*/
public boolean isCreateAllowed() {
return f0;
} | 3.26 |
hadoop_MappingRuleResult_m0_rdh | /**
* Generator method for place results.
*
* @param queue
* The name of the queue in which we shall place the application
* @param allowCreate
* Flag to indicate if the placement rule is allowed to
* create a queue if possible.
* @return The generated MappingRuleResult
*/
public static MappingRuleResult m0(String queue, boolean allowCreate) {
return new MappingRuleResult(queue, MappingRuleResultType.PLACE, allowCreate);
} | 3.26 |
hadoop_MappingRuleResult_getNormalizedQueue_rdh | /**
* This method returns the normalized name of the result queue.
* Currently only makes sense when result == PLACE
* Normalized value must be set externally, this class cannot normalize
* it just provides a way to store the normalized name of a queue
*
* @return the queue name this result is about
*/
public String getNormalizedQueue() {
return normalizedQueue;
} | 3.26 |
hadoop_Name_registerExpression_rdh | /**
* Registers this expression with the specified factory.
*/
public static void registerExpression(ExpressionFactory factory) throws IOException {
factory.addClass(Name.class,
"-name");
factory.addClass(Name.Iname.class, "-iname");
} | 3.26 |
hadoop_Tail_expandArgument_rdh | // TODO: HADOOP-7234 will add glob support; for now, be backwards compat
@Override
protected List<PathData> expandArgument(String
arg) throws IOException {
List<PathData> items = new LinkedList<PathData>();
items.add(new PathData(arg, getConf()));
return items;
} | 3.26 |
hadoop_BlockReaderUtil_readAll_rdh | /* See {@link BlockReader#readAll(byte[], int, int)} */
public static int readAll(BlockReader
reader, byte[] buf, int offset, int len) throws IOException {
int n = 0;
for (; ;) {
int nread = reader.read(buf, offset + n, len - n);
if (nread <= 0)
return n == 0 ? nread : n;
n += nread;
if (n >= len)
return n;
}
} | 3.26 |
hadoop_BlockReaderUtil_readFully_rdh | /* See {@link BlockReader#readFully(byte[], int, int)} */
public static void readFully(BlockReader reader, byte[] buf, int off, int len) throws IOException {
int toRead = len;
while (toRead > 0) {
int ret = reader.read(buf, off, toRead);
if (ret < 0) {throw new IOException("Premature EOF from inputStream");
}
toRead -= ret;
off += ret;
}
} | 3.26 |
hadoop_ComponentContainers_addContainer_rdh | /**
* Add a container.
*
* @param container
* container
*/
public void addContainer(Container container) {
containers.add(container);
} | 3.26 |
hadoop_ComponentContainers_name_rdh | /**
* Name of the service component.
*/
public ComponentContainers name(String name) {
this.componentName
= name;
return this;
} | 3.26 |
hadoop_ComponentContainers_setContainers_rdh | /**
* Sets the containers.
*
* @param containers
* containers of the component.
*/
public void setContainers(List<Container> containers) {
this.containers = containers;
} | 3.26 |
hadoop_ComponentContainers_containers_rdh | /**
* Sets the containers.
*
* @param compContainers
* containers of the component.
*/
public ComponentContainers containers(List<Container> compContainers) {
this.containers =
compContainers;
return this;
} | 3.26 |
hadoop_BufferedIOStatisticsInputStream_getIOStatistics_rdh | /**
* Return any IOStatistics offered by the inner stream.
*
* @return inner IOStatistics or null
*/
@Override
public IOStatistics getIOStatistics() {
return retrieveIOStatistics(in);
} | 3.26 |
hadoop_BufferedIOStatisticsInputStream_hasCapability_rdh | /**
* If the inner stream supports {@link StreamCapabilities},
* forward the probe to it.
* Otherwise: return false.
*
* @param capability
* string to query the stream support for.
* @return true if a capability is known to be supported.
*/
@Override
public boolean hasCapability(final String capability) {
if (in instanceof StreamCapabilities) {return ((StreamCapabilities) (in)).hasCapability(capability);
} else {
return false;
}
} | 3.26 |
hadoop_NamenodeRegistration_getAddress_rdh | // NodeRegistration
@Override
public String getAddress() {
return rpcAddress; } | 3.26 |
hadoop_NamenodeRegistration_toString_rdh | // NodeRegistration
@Override
public String
toString() {
return ((((getClass().getSimpleName() + "(") + rpcAddress) + ", role=") + getRole()) + ")";
} | 3.26 |
hadoop_NamenodeRegistration_getVersion_rdh | // NodeRegistration
@Override
public int getVersion() {
return super.getLayoutVersion();
} | 3.26 |
hadoop_NamenodeRegistration_getRole_rdh | /**
* Get name-node role.
*/
public NamenodeRole getRole() {
return f0;
} | 3.26 |
hadoop_NamenodeRegistration_getRegistrationID_rdh | // NodeRegistration
@Override
public String getRegistrationID() {
return Storage.getRegistrationID(this);
} | 3.26 |
hadoop_Check_ge0_rdh | /**
* Verifies an long is greater or equal to zero.
*
* @param value
* integer value.
* @param name
* the name to use in the exception message.
* @return the value.
* @throws IllegalArgumentException
* if the long is greater or equal to zero.
*/
public static long ge0(long value, String name) {
if (value < 0) {
throw new IllegalArgumentException(MessageFormat.format("parameter [{0}] = [{1}] must be greater than or equals zero", name, value));
}
return value;
} | 3.26 |
hadoop_Check_validIdentifier_rdh | /**
* Verifies a value is a valid identifier,
* <code>[a-zA-Z_][a-zA-Z0-9_\-]*</code>, up to a maximum length.
*
* @param value
* string to check if it is a valid identifier.
* @param maxLen
* maximun length.
* @param name
* the name to use in the exception message.
* @return the value.
* @throws IllegalArgumentException
* if the string is not a valid identifier.
*/
public static String validIdentifier(String value, int maxLen, String name) {
Check.notEmpty(value, name);
if (value.length() > maxLen) {
throw new IllegalArgumentException(MessageFormat.format("[{0}] = [{1}] exceeds max len [{2}]", name, value, maxLen));
}
if (!IDENTIFIER_PATTERN.matcher(value).find()) {
throw new IllegalArgumentException(MessageFormat.format("[{0}] = [{1}] must be \"{2}\"", name, value, IDENTIFIER_PATTERN_STR));
}
return value;
} | 3.26 |
hadoop_Check_notNullElements_rdh | /**
* Verifies a list does not have any NULL elements.
*
* @param list
* the list to check.
* @param name
* the name to use in the exception message.
* @return the list.
* @throws IllegalArgumentException
* if the list has NULL elements.
*/
public static <T> List<T> notNullElements(List<T> list, String name) {
notNull(list, name);
for (int i =
0; i
< list.size(); i++)
{
notNull(list.get(i), MessageFormat.format("list [{0}] element [{1}]", name, i));
}
return list;
} | 3.26 |
hadoop_Check_notEmpty_rdh | /**
* Verifies a string is not NULL and not emtpy
*
* @param str
* the variable to check.
* @param name
* the name to use in the exception message.
* @return the variable.
* @throws IllegalArgumentException
* if the variable is NULL or empty.
*/
public static String notEmpty(String str, String name) {
if (str == null) {
throw new IllegalArgumentException(name + " cannot be null");}
if (str.length() == 0) {
throw new IllegalArgumentException(name + " cannot be empty");
}
return str;
} | 3.26 |
hadoop_Check_gt0_rdh | /**
* Verifies an long is greater than zero.
*
* @param value
* long value.
* @param name
* the name to use in the exception message.
* @return the value.
* @throws IllegalArgumentException
* if the long is zero or less.
*/
public static long gt0(long value, String name) {
if (value <= 0) {
throw new IllegalArgumentException(MessageFormat.format("parameter [{0}] = [{1}] must be greater than zero", name, value));
}
return value;
} | 3.26 |
hadoop_Check_notEmptyElements_rdh | /**
* Verifies a string list is not NULL and not emtpy
*
* @param list
* the list to check.
* @param name
* the name to use in the exception message.
* @return the variable.
* @throws IllegalArgumentException
* if the string list has NULL or empty
* elements.
*/
public static List<String> notEmptyElements(List<String> list, String name)
{
notNull(list, name);
for (int i = 0; i < list.size(); i++) {
notEmpty(list.get(i), MessageFormat.format("list [{0}] element [{1}]", name, i));
}
return list;
} | 3.26 |
hadoop_Check_notNull_rdh | /**
* Verifies a variable is not NULL.
*
* @param obj
* the variable to check.
* @param name
* the name to use in the exception message.
* @return the variable.
* @throws IllegalArgumentException
* if the variable is NULL.
*/
public static <T> T notNull(T obj, String name) {
if (obj == null) {
throw new IllegalArgumentException(name + " cannot be null");
}
return obj;
} | 3.26 |
hadoop_MountResponse_writeMNTResponse_rdh | /**
* Response for RPC call {@link MountInterface.MNTPROC#MNT}.
*
* @param status
* status of mount response
* @param xdr
* XDR message object
* @param xid
* transaction id
* @param handle
* file handle
* @return response XDR
*/
public static XDR writeMNTResponse(int status, XDR xdr, int xid, byte[] handle) {
RpcAcceptedReply.getAcceptInstance(xid, new VerifierNone()).write(xdr);
xdr.writeInt(status);
if (status == MNT_OK) {
xdr.writeVariableOpaque(handle);
// Only MountV3 returns a list of supported authFlavors
xdr.writeInt(1);
xdr.writeInt(AuthFlavor.AUTH_SYS.getValue());
}
return xdr;
} | 3.26 |
hadoop_MountResponse_writeExportList_rdh | /**
* Response for RPC call {@link MountInterface.MNTPROC#EXPORT}.
*
* @param xdr
* XDR message object
* @param xid
* transaction id
* @param exports
* export list
* @param hostMatcher
* the list of export host
* @return response XDR
*/
public static XDR writeExportList(XDR xdr, int xid, List<String> exports, List<NfsExports> hostMatcher) {
assert
exports.size() == hostMatcher.size();
RpcAcceptedReply.getAcceptInstance(xid, new VerifierNone()).write(xdr);
for (int i = 0; i < exports.size(); i++) {
xdr.writeBoolean(true);// Value follows - yes
xdr.writeString(exports.get(i));
// List host groups
String[] hostGroups = hostMatcher.get(i).getHostGroupList();
if (hostGroups.length > 0) {
for (int j = 0; j < hostGroups.length; j++) {
xdr.writeBoolean(true);// Value follows - yes
xdr.writeVariableOpaque(hostGroups[j].getBytes(StandardCharsets.UTF_8));
}
}
xdr.writeBoolean(false);// Value follows - no more group
}
xdr.writeBoolean(false);// Value follows - no
return xdr;
} | 3.26 |
hadoop_MountResponse_writeMountList_rdh | /**
* Response for RPC call {@link MountInterface.MNTPROC#DUMP}.
*
* @param xdr
* XDR message object
* @param xid
* transaction id
* @param mounts
* mount entries
* @return response XDR
*/
public static XDR writeMountList(XDR xdr, int xid, List<MountEntry> mounts) {
RpcAcceptedReply.getAcceptInstance(xid, new VerifierNone()).write(xdr);
for (MountEntry mountEntry : mounts) {
xdr.writeBoolean(true);// Value follows yes
xdr.writeString(mountEntry.getHost());
xdr.writeString(mountEntry.getPath());
}
xdr.writeBoolean(false);// Value follows no
return xdr;
} | 3.26 |
hadoop_Contracts_checkArg_rdh | /**
* Check an argument for false conditions
*
* @param arg
* the argument to check
* @param expression
* the boolean expression for the condition
* @param msg
* the error message if {@code expression} is false
* @return the argument for convenience
*/
public static double checkArg(double arg, boolean expression, Object msg) {
if (!expression) {
throw new IllegalArgumentException((String.valueOf(msg) + ": ") + arg);
}
return arg;
} | 3.26 |
hadoop_AppToFlowColumn_getColumnQualifier_rdh | /**
*
* @return the column name value
*/
private String
getColumnQualifier() {
return columnQualifier;
} | 3.26 |
hadoop_SnapshotDiffReportListing_getLastPath_rdh | /**
*
* @return {@link #lastPath}
*/
public byte[] getLastPath() {return lastPath;
} | 3.26 |
hadoop_Probe_init_rdh | /**
* perform any prelaunch initialization
*/
public void init() throws IOException {
} | 3.26 |
hadoop_IOStatisticsSource_getIOStatistics_rdh | /**
* Return a statistics instance.
* <p>
* It is not a requirement that the same instance is returned every time.
* {@link IOStatisticsSource}.
* <p>
* If the object implementing this is Closeable, this method
* may return null if invoked on a closed object, even if
* it returns a valid instance when called earlier.
*
* @return an IOStatistics instance or null
*/
default IOStatistics getIOStatistics() {
return null;
} | 3.26 |
hadoop_AbstractPolicyManager_internalPolicyGetter_rdh | /**
* Common functionality to instantiate a reinitialize a {@link ConfigurableFederationPolicy}.
*/
private ConfigurableFederationPolicy internalPolicyGetter(final FederationPolicyInitializationContext federationPolicyContext, ConfigurableFederationPolicy oldInstance, Class policy) throws FederationPolicyInitializationException {
FederationPolicyInitializationContextValidator.validate(federationPolicyContext, this.getClass().getCanonicalName());
if ((oldInstance == null) || (!oldInstance.getClass().equals(policy))) {
try {
oldInstance = ((ConfigurableFederationPolicy) (policy.newInstance()));
} catch (InstantiationException e) {
throw new FederationPolicyInitializationException(e);
} catch (IllegalAccessException e) {
throw new FederationPolicyInitializationException(e);
}
}
// copying the context to avoid side-effects
FederationPolicyInitializationContext modifiedContext = updateContext(federationPolicyContext, oldInstance.getClass().getCanonicalName());
oldInstance.reinitialize(modifiedContext);
return oldInstance;
} | 3.26 |
hadoop_AbstractPolicyManager_updateContext_rdh | /**
* This method is used to copy-on-write the context, that will be passed
* downstream to the router/amrmproxy policies.
*/
private FederationPolicyInitializationContext updateContext(FederationPolicyInitializationContext federationPolicyContext, String type) {
// copying configuration and context to avoid modification of original
SubClusterPolicyConfiguration newConf = SubClusterPolicyConfiguration.newInstance(federationPolicyContext.getSubClusterPolicyConfiguration());
newConf.setType(type);
return new FederationPolicyInitializationContext(newConf, federationPolicyContext.getFederationSubclusterResolver(), federationPolicyContext.getFederationStateStoreFacade(), federationPolicyContext.getHomeSubcluster());
} | 3.26 |
hadoop_RegisterApplicationMasterRequest_newInstance_rdh | /**
* Create a new instance of <code>RegisterApplicationMasterRequest</code>.
* If <em>port, trackingUrl</em> is not used, use the following default value:
* <ul>
* <li>port: -1</li>
* <li>trackingUrl: null</li>
* </ul>
* The port is allowed to be any integer larger than or equal to -1.
*
* @param host
* host on which the ApplicationMaster is running.
* @param port
* the RPC port on which the ApplicationMaster is responding.
* @param trackingUrl
* tracking URL for the ApplicationMaster.
* @return the new instance of <code>RegisterApplicationMasterRequest</code>
*/
@Public
@Stablepublic static RegisterApplicationMasterRequest newInstance(String host, int port, String trackingUrl) {
RegisterApplicationMasterRequest request =
Records.newRecord(RegisterApplicationMasterRequest.class);
request.setHost(host);
request.setRpcPort(port);
request.setTrackingUrl(trackingUrl);
return request;
} | 3.26 |
hadoop_RegisterApplicationMasterRequest_setPlacementConstraints_rdh | /**
* Set Placement Constraints applicable to the
* {@link org.apache.hadoop.yarn.api.records.SchedulingRequest}s
* of this application.
* The mapping is from a set of allocation tags to a
* <code>PlacementConstraint</code> associated with the tags.
* For example:
* Map <
* <hb_regionserver> -> node_anti_affinity,
* <hb_regionserver, hb_master> -> rack_affinity,
* ...
* >
*
* @param placementConstraints
* Placement Constraint Mapping.
*/
@Public
@Unstable
public void setPlacementConstraints(Map<Set<String>, PlacementConstraint> placementConstraints) {
} | 3.26 |
hadoop_RegisterApplicationMasterRequest_getPlacementConstraints_rdh | /**
* Return all Placement Constraints specified at the Application level. The
* mapping is from a set of allocation tags to a
* <code>PlacementConstraint</code> associated with the tags, i.e., each
* {@link org.apache.hadoop.yarn.api.records.SchedulingRequest} that has those
* tags will be placed taking into account the corresponding constraint.
*
* @return A map of Placement Constraints.
*/
@Public
@Unstable
public Map<Set<String>, PlacementConstraint> getPlacementConstraints() {
return new
HashMap<>();
} | 3.26 |
hadoop_RouterQuotaUsage_verifyNamespaceQuota_rdh | /**
* Verify if namespace quota is violated once quota is set. Relevant
* method {@link DirectoryWithQuotaFeature#verifyNamespaceQuota}.
*
* @throws NSQuotaExceededException
* If the quota is exceeded.
*/
public void verifyNamespaceQuota() throws NSQuotaExceededException {
long quota =
getQuota();
long fileAndDirectoryCount = getFileAndDirectoryCount();
if (Quota.isViolated(quota, fileAndDirectoryCount)) {
throw new NSQuotaExceededException(quota, fileAndDirectoryCount);
}
} | 3.26 |
hadoop_RouterQuotaUsage_verifyStoragespaceQuota_rdh | /**
* Verify if storage space quota is violated once quota is set. Relevant
* method {@link DirectoryWithQuotaFeature#verifyStoragespaceQuota}.
*
* @throws DSQuotaExceededException
* If the quota is exceeded.
*/
public void verifyStoragespaceQuota() throws DSQuotaExceededException {
long spaceQuota = getSpaceQuota();
long spaceConsumed
= getSpaceConsumed();
if (Quota.isViolated(spaceQuota, spaceConsumed)) {
throw new DSQuotaExceededException(spaceQuota, spaceConsumed);
}
} | 3.26 |
hadoop_RouterQuotaUsage_verifyQuotaByStorageType_rdh | /**
* Verify space quota by storage type is violated once quota is set. Relevant
* method {@link DirectoryWithQuotaFeature#verifyQuotaByStorageType}.
*
* @throws DSQuotaExceededException
* If the quota is exceeded.
*/
public void verifyQuotaByStorageType() throws DSQuotaExceededException {
for (StorageType t : StorageType.getTypesSupportingQuota()) {
long typeQuota = getTypeQuota(t);
if (typeQuota == HdfsConstants.QUOTA_RESET) {
continue;
}long typeConsumed = getTypeConsumed(t);
if (Quota.isViolated(typeQuota, typeConsumed)) {throw new DSQuotaExceededException(typeQuota, typeConsumed);
}
}
} | 3.26 |
hadoop_KMSAuditLogger_setEndTime_rdh | /**
* Set the time this audit event is finished.
*/
void setEndTime(long endTime) {
this.endTime = endTime;
} | 3.26 |
hadoop_DefaultStringifier_storeArray_rdh | /**
* Stores the array of items in the configuration with the given keyName.
*
* @param <K>
* the class of the item
* @param conf
* the configuration to use
* @param items
* the objects to be stored
* @param keyName
* the name of the key to use
* @throws IndexOutOfBoundsException
* if the items array is empty
* @throws IOException
* : forwards Exceptions from the underlying
* {@link Serialization} classes.
*/
public static <K> void storeArray(Configuration conf, K[] items, String keyName) throws IOException {
if (items.length == 0) {
throw new IndexOutOfBoundsException();
}
DefaultStringifier<K> stringifier = new DefaultStringifier<K>(conf, GenericsUtil.getClass(items[0]));try {
StringBuilder builder = new StringBuilder();
for (K item : items) {
builder.append(stringifier.toString(item)).append(f0);
}
conf.set(keyName, builder.toString());
} finally {
stringifier.close();
}
}
/**
* Restores the array of objects from the configuration.
*
* @param <K>
* the class of the item
* @param conf
* the configuration to use
* @param keyName
* the name of the key to use
* @param itemClass
* the class of the item
* @return restored object
* @throws IOException
* : forwards Exceptions from the underlying
* {@link Serialization} | 3.26 |
hadoop_DefaultStringifier_m0_rdh | /**
* Restores the object from the configuration.
*
* @param <K>
* the class of the item
* @param conf
* the configuration to use
* @param keyName
* the name of the key to use
* @param itemClass
* the class of the item
* @return restored object
* @throws IOException
* : forwards Exceptions from the underlying
* {@link Serialization} classes.
*/
public static <K> K m0(Configuration conf, String keyName, Class<K> itemClass) throws IOException {
DefaultStringifier<K> stringifier = new DefaultStringifier<K>(conf, itemClass);
try {
String itemStr = conf.get(keyName);
return stringifier.fromString(itemStr);
} finally {
stringifier.close();
}
} | 3.26 |
hadoop_DefaultStringifier_store_rdh | /**
* Stores the item in the configuration with the given keyName.
*
* @param <K>
* the class of the item
* @param conf
* the configuration to store
* @param item
* the object to be stored
* @param keyName
* the name of the key to use
* @throws IOException
* : forwards Exceptions from the underlying
* {@link Serialization} classes.
*/
public static <K> void store(Configuration conf, K item, String keyName) throws IOException {
DefaultStringifier<K> stringifier = new DefaultStringifier<K>(conf, GenericsUtil.getClass(item));
conf.set(keyName, stringifier.toString(item));stringifier.close();
} | 3.26 |
hadoop_WordList_getSize_rdh | /**
* Returns the size of the list.
*/ public int getSize() {
return list.size();
} | 3.26 |
hadoop_WordList_setWords_rdh | /**
* Sets the words.
*
* Note: That this API is only for Jackson JSON deserialization.
*/
public void setWords(Map<String, Integer> list) {
this.list = list;
} | 3.26 |
hadoop_WordList_contains_rdh | /**
* Returns 'true' if the list contains the specified word.
*/
public boolean contains(String word) {
return list.containsKey(word);
} | 3.26 |
hadoop_WordList_setSize_rdh | /**
* Setters and getters for Jackson JSON
*/
/**
* Sets the size of the list.
*
* Note: That this API is only for Jackson JSON deserialization.
*/
public void setSize(int size) {
list = new HashMap<String, Integer>(size);
} | 3.26 |
hadoop_WordList_getWords_rdh | /**
* Gets the words.
*
* Note: That this API is only for Jackson JSON serialization.
*/
public Map<String, Integer> getWords() {
return list;
} | 3.26 |
hadoop_WordList_add_rdh | /**
* Adds the specified word to the list if the word is not already added.
*/
public void add(String word) {
if (!contains(word)) {
int index = getSize();
list.put(word, index);
isUpdated = true;
}
} | 3.26 |
hadoop_WordList_indexOf_rdh | /**
* Returns the index of the specified word in the list.
*/
public int indexOf(String word) {
return list.get(word);
} | 3.26 |
hadoop_WordList_isUpdated_rdh | /**
* Returns 'true' if the list is updated since creation (and reload).
*/
@Override
public boolean isUpdated() {
return isUpdated;
} | 3.26 |
hadoop_WordList_setName_rdh | /**
* Note: That this API is only for Jackson JSON deserialization.
*/
@Override
public void setName(String name) {
this.name = name;
} | 3.26 |
hadoop_SetupTaskStage_executeStage_rdh | /**
* Set up a task.
*
* @param name
* task name (for logging)
* @return task attempt directory
* @throws IOException
* IO failure.
*/
@Override
protected Path executeStage(final String name) throws IOException {
return createNewDirectory("Task setup " + name, requireNonNull(getTaskAttemptDir(), "No task attempt directory"));
} | 3.26 |
hadoop_RetryReasonCategory_checkExceptionMessage_rdh | /**
* Checks if a required search-string is in the exception's message.
*/
Boolean checkExceptionMessage(final Exception exceptionCaptured, final String search) {
if (search == null) {
return false;
}
if (((exceptionCaptured != null) && (exceptionCaptured.getMessage() != null)) && exceptionCaptured.getMessage().toLowerCase(Locale.US).contains(search.toLowerCase(Locale.US))) {
return true;
}
return false;
} | 3.26 |
hadoop_WorkerId_toString_rdh | /**
* Print workerId.
*
* @return workeId in string
*/
@Override
public final String toString() {
return workerId.toString();
} | 3.26 |
hadoop_WorkerId_write_rdh | /**
* {@inheritDoc }
*/@Override
public final void write(final DataOutput dataOutput) throws
IOException {
workerId.write(dataOutput);hostname.write(dataOutput);
ipAdd.write(dataOutput);
} | 3.26 |
hadoop_WorkerId_setHostname_rdh | /**
* Set hostname for Worker.
*
* @param wkhostname
* : Hostname of worker
*/
public final void setHostname(final Text wkhostname) {
this.hostname = wkhostname;
} | 3.26 |
hadoop_WorkerId_setWorkerId_rdh | /**
* Set workerId.
*
* @param localworkerId
* : Worker identifier
*/
public final void setWorkerId(final
String localworkerId) {
this.workerId = new Text(localworkerId);
} | 3.26 |
hadoop_WorkerId_hashCode_rdh | /**
* Override hashcode method for WorkerId.
*/
@Override
public final int hashCode() {
final int prime = 31;
int result = 1; int workerHash = 0;
if (workerId == null) {
workerHash = 0;
} else {
workerHash = workerId.hashCode();
} int hostHash = 0;
if (hostname == null) {
hostHash = 0;
} else {
hostHash =
hostname.hashCode();
}
int ipHash = 0;
if (ipAdd == null) {
ipHash = 0;
} else {
ipHash = ipAdd.hashCode();
}
result = (prime * result) + workerHash;
result = (prime * result) + hostHash;
result = (prime * result) + ipHash;
return result;
} | 3.26 |
hadoop_WorkerId_getIPAddress_rdh | /**
* Get Worker IP address.
*
* @return IP address of worker node
*/
public final String getIPAddress() {
return this.ipAdd.toString();
} | 3.26 |
hadoop_WorkerId_getHostname_rdh | /**
* Get hostname for Worker.
*
* @return hostname of worker node
*/
public final Text getHostname() {
return hostname;
} | 3.26 |
hadoop_WorkerId_getWorkerId_rdh | /**
* Get workerId.
*
* @return workerId : Worker identifier
*/
public final String getWorkerId() {
return this.workerId.toString();
} | 3.26 |
hadoop_WorkerId_equals_rdh | /**
* Implememt equals method for WorkerId.
*/@Override
public final boolean equals(final Object o) {
if ((o == null) || (this.getClass() != o.getClass())) {
return false;}
WorkerId x = ((WorkerId) (o));
return x.getHostname().equals(this.hostname);
} | 3.26 |
hadoop_WorkerId_readFields_rdh | /**
* {@inheritDoc }
*/
@Override
public final void readFields(final DataInput dataInput) throws IOException {
workerId.readFields(dataInput);
hostname.readFields(dataInput);
ipAdd.readFields(dataInput);
} | 3.26 |
hadoop_SignerSecretProvider_destroy_rdh | /**
* Will be called on shutdown; subclasses should perform any cleanup here.
*/
public void destroy() {
} | 3.26 |
hadoop_StoragePolicySatisfyManager_clearPathIds_rdh | /**
* Removes the SPS path id from the list of sps paths.
*
* @throws IOException
*/
private void clearPathIds() {
synchronized(pathsToBeTraversed) {
Iterator<Long> iterator = pathsToBeTraversed.iterator();
while (iterator.hasNext()) {
Long trackId = iterator.next();
try {
namesystem.removeXattr(trackId, HdfsServerConstants.XATTR_SATISFY_STORAGE_POLICY);
} catch (IOException e) {
LOG.debug("Failed to remove sps xattr!", e);
}
iterator.remove();
}
}
} | 3.26 |
hadoop_StoragePolicySatisfyManager_start_rdh | /**
* This function will do following logic based on the configured sps mode:
*
* <p>
* If the configured mode is {@link StoragePolicySatisfierMode#EXTERNAL}, then
* it won't do anything. Administrator requires to start external sps service
* explicitly.
*
* <p>
* If the configured mode is {@link StoragePolicySatisfierMode#NONE}, then the
* service is disabled and won't do any action.
*/
public void start() {
if (!storagePolicyEnabled) {
LOG.info("Disabling StoragePolicySatisfier service as {} set to {}.", DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KEY, storagePolicyEnabled);
return;
}
switch (mode) {
case EXTERNAL :
LOG.info("Storage policy satisfier is configured as external, " + "please start external sps service explicitly to satisfy policy");
break;
case NONE :
LOG.info("Storage policy satisfier is disabled");
break;
default :
LOG.info("Given mode: {} is invalid", mode);
break;
}} | 3.26 |
hadoop_StoragePolicySatisfyManager_stop_rdh | /**
* This function will do following logic based on the configured sps mode:
*
* <p>
* If the configured mode is {@link StoragePolicySatisfierMode#EXTERNAL}, then
* it won't do anything. Administrator requires to stop external sps service
* explicitly, if needed.
*
* <p>
* If the configured mode is {@link StoragePolicySatisfierMode#NONE}, then the
* service is disabled and won't do any action.
*/ public void stop() {
if (!storagePolicyEnabled) {
if (LOG.isDebugEnabled()) {
LOG.debug("Storage policy is not enabled, ignoring");
}
return;
}
switch (mode) {
case EXTERNAL :
removeAllPathIds();
if (LOG.isDebugEnabled()) {
LOG.debug("Storage policy satisfier service is running outside namenode" + ", ignoring");}
break;
case NONE :
if (LOG.isDebugEnabled()) {
LOG.debug("Storage policy satisfier is not enabled, ignoring");
}
break;
default
:
if (LOG.isDebugEnabled()) {
LOG.debug("Invalid mode:{}, ignoring", mode);
}
break;
}
} | 3.26 |
hadoop_StoragePolicySatisfyManager_addPathId_rdh | /**
* Adds the sps path to SPSPathIds list.
*
* @param id
*/
public void addPathId(long id) {
synchronized(pathsToBeTraversed) {
pathsToBeTraversed.add(id);
}
} | 3.26 |
hadoop_StoragePolicySatisfyManager_removeAllPathIds_rdh | /**
* Clean up all sps path ids.
*/public void removeAllPathIds() {
synchronized(pathsToBeTraversed) {
pathsToBeTraversed.clear();
}
} | 3.26 |
hadoop_StoragePolicySatisfyManager_getPendingSPSPaths_rdh | /**
*
* @return the number of paths to be processed by storage policy satisfier.
*/
public int getPendingSPSPaths() {
return pathsToBeTraversed.size();
} | 3.26 |
hadoop_StoragePolicySatisfyManager_changeModeEvent_rdh | /**
* Sets new sps mode. If the new mode is none, then it will disable the sps
* feature completely by clearing all queued up sps path's hint.
*/
public void changeModeEvent(StoragePolicySatisfierMode newMode) {
if (!storagePolicyEnabled) {
LOG.info("Failed to change storage policy satisfier as {} set to {}.", DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KEY, storagePolicyEnabled);
return;
}
if (LOG.isDebugEnabled()) {
LOG.debug("Updating SPS service status, current mode:{}, new mode:{}", mode, newMode);
}
switch (newMode) {
case EXTERNAL : if (mode == newMode) {
LOG.info("Storage policy satisfier is already in mode:{}," + " so ignoring change mode event.", newMode);
return;
}
spsService.stopGracefully();
break;
case NONE :
if (mode == newMode) {
LOG.info("Storage policy satisfier is already disabled, mode:{}" + " so ignoring change mode event.", newMode);
return;
}
LOG.info("Disabling StoragePolicySatisfier, mode:{}", newMode);
spsService.stop(true);
clearPathIds();
break;
default :
if (LOG.isDebugEnabled()) {
LOG.debug("Given mode: {} is invalid", newMode);
}
break;
}
// update sps mode
mode = newMode;
} | 3.26 |
hadoop_StoragePolicySatisfyManager_getMode_rdh | /**
*
* @return sps service mode.
*/
public StoragePolicySatisfierMode getMode() {
return mode;} | 3.26 |
hadoop_StoragePolicySatisfyManager_verifyOutstandingPathQLimit_rdh | /**
* Verify that satisfier queue limit exceeds allowed outstanding limit.
*
* @throws IOException
*/
public void verifyOutstandingPathQLimit() throws IOException {
long size
= pathsToBeTraversed.size();
// Checking that the SPS call Q exceeds the allowed limit.
if ((outstandingPathsLimit - size) <= 0) {
LOG.debug("Satisifer Q - outstanding limit:{}, current size:{}", outstandingPathsLimit, size);
throw new IOException(("Outstanding satisfier queue limit: " + outstandingPathsLimit) + " exceeded, try later!");
}
} | 3.26 |
hadoop_CyclicIteration_remove_rdh | /**
* Not supported
*/
@Override
public void remove() {
throw new UnsupportedOperationException("Not supported");
} | 3.26 |
hadoop_DefaultDataType_getValue_rdh | /**
* Get the value of the attribute.
*/
@Override
public String getValue() {
return value;
} | 3.26 |
hadoop_SecureableZone_addRecord_rdh | /**
* Adds a Record to the Zone.
*
* @param r
* The record to be added
* @see Record
*/
@Override
public void addRecord(Record r) {
if (records == null) {
records = new ArrayList<Record>();
}
super.addRecord(r);
records.add(r);
} | 3.26 |
hadoop_SecureableZone_removeRecord_rdh | /**
* Removes a record from the Zone.
*
* @param r
* The record to be removed
* @see Record
*/
@Override
public void removeRecord(Record r) {
if (records == null) {
records = new ArrayList<Record>();
}
super.removeRecord(r);
records.remove(r);} | 3.26 |
hadoop_SecureableZone_getNXTRecord_rdh | /**
* Return a NXT record appropriate for the query.
*
* @param queryRecord
* the query record.
* @param zone
* the zone to search.
* @return the NXT record describing the insertion point.
*/
@SuppressWarnings({ "unchecked" })
public Record getNXTRecord(Record queryRecord, Zone zone) {
Collections.sort(records);
int index = Collections.binarySearch(records, queryRecord, new Comparator<Record>() {
@Override
public int compare(Record r1, Record r2) {
return r1.compareTo(r2);
}
});
if (index >= 0)
{
return null;
}
index = (-index) - 1;
if (index >= records.size()) {
index = records.size() - 1;
}
Record base = records.get(index);
SetResponse sr = zone.findRecords(base.getName(),
Type.ANY);
BitSet bitMap = new BitSet();
bitMap.set(Type.NXT);
for (RRset rRset : sr.answers()) {
int typeCode = rRset.getType();
if ((typeCode > 0) && (typeCode < 128)) {
bitMap.set(typeCode);
}
}
return new NXTRecord(base.getName(), DClass.IN, zone.getSOA().getMinimum(), queryRecord.getName(), bitMap);
} | 3.26 |
hadoop_ItemInfo_getRetryCount_rdh | /**
* Get the attempted retry count of the block for satisfy the policy.
*/
public int getRetryCount() {
return retryCount;
} | 3.26 |
hadoop_ItemInfo_m0_rdh | /**
* Returns the file for which needs to satisfy the policy.
*/
public long m0() {return fileId;
} | 3.26 |
hadoop_ItemInfo_getStartPath_rdh | /**
* Returns the start path of the current file. This indicates that SPS
* was invoked on this path.
*/
public long getStartPath() {
return startPathId;
} | 3.26 |
hadoop_ItemInfo_isDir_rdh | /**
* Returns true if the tracking path is a directory, false otherwise.
*/
public boolean isDir() {
return !(startPathId == fileId);
} | 3.26 |
hadoop_ItemInfo_m1_rdh | /**
* Increments the retry count.
*/
public void m1() {
this.retryCount++;
} | 3.26 |
hadoop_ContentCounts_getSnapshotCount_rdh | // Get the number of snapshots
public long
getSnapshotCount() {
return contents.get(Content.SNAPSHOT);
} | 3.26 |
hadoop_ContentCounts_getDirectoryCount_rdh | // Get the number of directories.
public long getDirectoryCount() {
return contents.get(Content.DIRECTORY);
} | 3.26 |
hadoop_ContentCounts_getSnapshotableDirectoryCount_rdh | // Get the number of snapshottable directories.
public long getSnapshotableDirectoryCount() {
return contents.get(Content.SNAPSHOTTABLE_DIRECTORY);
} | 3.26 |
hadoop_ContentCounts_getStoragespace_rdh | // Get the total of storage space usage in bytes including replication.
public long getStoragespace() {
return contents.get(Content.DISKSPACE);
} | 3.26 |
hadoop_ContentCounts_getFileCount_rdh | // Get the number of files.
public long getFileCount() {
return contents.get(Content.FILE);
} | 3.26 |
hadoop_ContentCounts_getLength_rdh | // Get the total of file length in bytes.
public long getLength() {
return contents.get(Content.LENGTH);
} | 3.26 |
hadoop_RemoteMethod_getMethod_rdh | /**
* Get the represented java method.
*
* @return {@link Method}
* @throws IOException
* If the method cannot be found.
*/
public Method getMethod() throws IOException {
try {
if (types != null) {
return protocol.getDeclaredMethod(methodName, types);
} else {
return protocol.getDeclaredMethod(methodName);
}
} catch (NoSuchMethodException e) {
// Re-throw as an IOException
LOG.error("Cannot get method {} with types {} from {}", methodName, Arrays.toString(types), protocol.getSimpleName(), e);
throw
new IOException(e);
} catch
(SecurityException e) {
LOG.error("Cannot access method {} with types {} from {}", methodName, Arrays.toString(types), protocol.getSimpleName(), e);
throw new IOException(e);
}
} | 3.26 |
hadoop_RemoteMethod_getParams_rdh | /**
* Generate a list of parameters for this specific location. Parameters are
* grouped into 2 categories:
* <ul>
* <li>Static parameters that are immutable across locations.
* <li>Dynamic parameters that are determined for each location by a
* RemoteParam object.
* </ul>
*
* @param context
* The context identifying the location.
* @return A list of parameters for the method customized for the location.
*/
public Object[] getParams(RemoteLocationContext context) {
if (this.params == null) {
return new Object[]{ };
}
Object[] objList = new Object[this.params.length];
for (int i =
0; i < this.params.length; i++) {
Object currentObj = this.params[i];
if (currentObj instanceof RemoteParam) {
RemoteParam paramGetter =
((RemoteParam) (currentObj));
// Map the parameter using the context
if (this.types[i] == CacheDirectiveInfo.class) {
CacheDirectiveInfo path = ((CacheDirectiveInfo) (paramGetter.getParameterForContext(context)));
objList[i]
= new CacheDirectiveInfo.Builder(path).setPath(new Path(context.getDest())).build();
} else {
objList[i] = paramGetter.getParameterForContext(context);
}
} else {
objList[i] = currentObj;
}
}
return objList;
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.