name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hadoop_Result_isDescend_rdh | /**
* Should further directories be descended.
*
* @return if is pass true,not false.
*/
public boolean isDescend() {
return this.descend;
} | 3.26 |
hadoop_Result_combine_rdh | /**
* Returns the combination of this and another result.
*
* @param other
* other.
* @return result.
*/
public Result combine(Result other) {
return new Result(this.isPass() && other.isPass(), this.isDescend() && other.isDescend());
} | 3.26 |
hadoop_Result_negate_rdh | /**
* Negate this result.
*
* @return Result.
*/
public Result negate() {
return new Result(!this.isPass(), this.isDescend());
} | 3.26 |
hadoop_PathHandle_toByteArray_rdh | /**
*
* @return Serialized form in bytes.
*/
default byte[] toByteArray() {
ByteBuffer bb = bytes();
byte[] ret = new byte[bb.remaining()];
bb.get(ret);
return ret;
} | 3.26 |
hadoop_FileUnderConstructionFeature_updateLengthOfLastBlock_rdh | /**
* Update the length for the last block
*
* @param lastBlockLength
* The length of the last block reported from client
* @throws IOException
*/
void updateLengthOfLastBlock(INodeFile f, long lastBlockLength) throws IOException {
BlockInfo lastBlock = f.getLastBlock();
assert lastBlock != null : ("The last block for path " + f.getFullPathName()) + " is null when updating its length";
assert !lastBlock.isComplete() : ("The last block for path " + f.getFullPathName()) + " is not under-construction when updating its length";
lastBlock.setNumBytes(lastBlockLength);
} | 3.26 |
hadoop_FileUnderConstructionFeature_m1_rdh | /**
* When deleting a file in the current fs directory, and the file is contained
* in a snapshot, we should delete the last block if it's under construction
* and its size is 0.
*/
void m1(final INodeFile f, final BlocksMapUpdateInfo collectedBlocks) {
final BlockInfo[]
blocks = f.getBlocks();
if (((blocks != null) && (blocks.length > 0)) && (!blocks[blocks.length - 1].isComplete())) {
BlockInfo lastUC = blocks[blocks.length - 1];
if (lastUC.getNumBytes() == 0) {// this is a 0-sized block. do not need check its UC state here
collectedBlocks.addDeleteBlock(lastUC);
f.removeLastBlock(lastUC); }
}
} | 3.26 |
hadoop_LocatedFileStatusFetcher_registerError_rdh | /**
* Register fatal errors - example an IOException while accessing a file or a
* full execution queue.
*/
private void registerError(Throwable t) {
LOG.debug("Error", t);
lock.lock();
try {
if (unknownError == null) {
unknownError = t;
condition.signal();
}} finally {
lock.unlock();
}} | 3.26 |
hadoop_LocatedFileStatusFetcher_addResultStatistics_rdh | /**
* Add the statistics of an individual thread's scan.
*
* @param stats
* possibly null statistics.
*/
private void addResultStatistics(IOStatistics stats) {
if (stats != null) {
// demand creation of IO statistics.
synchronized(this) {
LOG.debug("Adding IOStatistics: {}", stats);
if (iostats == null) {
// demand create the statistics
iostats = snapshotIOStatistics(stats);
} else {
iostats.aggregate(stats);
}
}
}
} | 3.26 |
hadoop_LocatedFileStatusFetcher_getIOStatistics_rdh | /**
* Return any IOStatistics collected during listing.
*
* @return IO stats accrued.
*/
@Override
public synchronized IOStatistics getIOStatistics() {
return iostats;
} | 3.26 |
hadoop_LocatedFileStatusFetcher_getFileStatuses_rdh | /**
* Start executing and return FileStatuses based on the parameters specified.
*
* @return fetched file statuses
* @throws InterruptedException
* interruption waiting for results.
* @throws IOException
* IO failure or other error.
* @throws InvalidInputException
* on an invalid input and the old API
* @throws org.apache.hadoop.mapreduce.lib.input.InvalidInputException
* on an
* invalid input and the new API.
*/
public Iterable<FileStatus> getFileStatuses() throws InterruptedException, IOException {
// Increment to make sure a race between the first thread completing and the
// rest being scheduled does not lead to a termination.
runningTasks.incrementAndGet();
for (Path v1 : f0) {
LOG.debug("Queuing scan of directory {}", v1);
runningTasks.incrementAndGet();
ListenableFuture<ProcessInitialInputPathCallable.Result> future = exec.submit(new ProcessInitialInputPathCallable(v1, conf, inputFilter));
Futures.addCallback(future, processInitialInputPathCallback, MoreExecutors.directExecutor());
}
runningTasks.decrementAndGet();
lock.lock();try {
LOG.debug("Waiting scan completion");
while ((runningTasks.get() != 0) && (unknownError == null)) {
condition.await(); }
} finally {lock.unlock();
// either the scan completed or an error was raised.
// in the case of an error shutting down the executor will interrupt all
// active threads, which can add noise to the logs.
LOG.debug("Scan complete: shutting down");
this.exec.shutdownNow();
}
if (this.unknownError != null) {
LOG.debug("Scan failed", this.unknownError);
if (this.unknownError instanceof Error) {
throw ((Error) (this.unknownError));
} else if (this.unknownError instanceof RuntimeException) {
throw ((RuntimeException) (this.unknownError));
} else if (this.unknownError instanceof IOException) {
throw ((IOException) (this.unknownError));
} else if (this.unknownError instanceof InterruptedException) {
throw ((InterruptedException) (this.unknownError));
} else {
throw new IOException(this.unknownError);
}
}
if (!this.invalidInputErrors.isEmpty()) {
LOG.debug("Invalid Input Errors raised");
for (IOException error : invalidInputErrors) {
LOG.debug("Error", error);
}
if (this.newApi) {throw new InvalidInputException(invalidInputErrors);
} else {
throw new InvalidInputException(invalidInputErrors);
}
}
return Iterables.concat(resultQueue);
} | 3.26 |
hadoop_LocatedFileStatusFetcher_registerInvalidInputError_rdh | /**
* Collect misconfigured Input errors. Errors while actually reading file info
* are reported immediately.
*/
private void registerInvalidInputError(List<IOException> errors) {
synchronized(this) {
this.invalidInputErrors.addAll(errors);
}
} | 3.26 |
hadoop_LongBitFormat_combine_rdh | /**
* Combine the value to the record.
*/
public long combine(long value, long record) {
if (value < f0) {
throw new IllegalArgumentException((((("Illagal value: " + NAME) + " = ") + value) + " < MIN = ") + f0);
}if (value > MAX) {
throw new IllegalArgumentException((((("Illagal value: " + NAME) + " = ") + value) +
" > MAX = ") + MAX);
}
return (record & (~MASK)) | (value << OFFSET);
} | 3.26 |
hadoop_LongBitFormat_retrieve_rdh | /**
* Retrieve the value from the record.
*/
public long retrieve(long record) {
return (record & MASK) >>> OFFSET;} | 3.26 |
hadoop_BulkDeleteRetryHandler_onDeleteThrottled_rdh | /**
* Handle a delete throttling event.
*
* @param deleteRequest
* request which failed.
*/
private void onDeleteThrottled(final DeleteObjectsRequest deleteRequest)
{
final List<ObjectIdentifier> keys =
deleteRequest.delete().objects();
final int size = keys.size();
m0(STORE_IO_THROTTLED, size);
instrumentation.addValueToQuantiles(STORE_IO_THROTTLE_RATE,
size);
THROTTLE_LOG.info("Bulk delete {} keys throttled -first key = {}; last = {}", size, keys.get(0).key(), keys.get(size -
1).key());
} | 3.26 |
hadoop_BulkDeleteRetryHandler_m0_rdh | /**
* Increment a statistic by a specific value.
* This increments both the instrumentation and storage statistics.
*
* @param statistic
* The operation to increment
* @param count
* the count to increment
*/
protected void m0(Statistic statistic, long count) {
instrumentation.incrementCounter(statistic,
count);
} | 3.26 |
hadoop_BulkDeleteRetryHandler_isSymptomOfBrokenConnection_rdh | /**
* Does this error indicate that the connection was ultimately broken while
* the XML Response was parsed? As this seems a symptom of the far end
* blocking the response (i.e. server-side throttling) while
* the client eventually times out.
*
* @param ex
* exception received.
* @return true if this exception is considered a sign of a broken connection.
*/
private boolean isSymptomOfBrokenConnection(final Exception ex) {
return ((ex instanceof AWSClientIOException) && (ex.getCause() instanceof SdkClientException)) && ex.getMessage().contains(XML_PARSE_BROKEN);
} | 3.26 |
hadoop_FederationPolicyUtils_instantiatePolicyManager_rdh | /**
* A utilize method to instantiate a policy manager class given the type
* (class name) from {@link SubClusterPolicyConfiguration}.
*
* @param newType
* class name of the policy manager to create
* @return Policy manager
* @throws FederationPolicyInitializationException
* if fails
*/
public static FederationPolicyManager instantiatePolicyManager(String newType) throws FederationPolicyInitializationException {
FederationPolicyManager federationPolicyManager = null;
try {
// create policy instance and set queue
Class<?> c = Class.forName(newType);
federationPolicyManager = ((FederationPolicyManager) (c.newInstance()));
} catch (ClassNotFoundException e) {
throw new FederationPolicyInitializationException(e);
} catch (InstantiationException e) {
throw new FederationPolicyInitializationException(e);} catch (IllegalAccessException e) {
throw new FederationPolicyInitializationException(e);
}
return federationPolicyManager;
} | 3.26 |
hadoop_FederationPolicyUtils_validateSubClusterAvailability_rdh | /**
* Validate if there is any active subcluster that is not blacklisted, it will
* throw an exception if there are no usable subclusters.
*
* @param activeSubClusters
* the list of subClusters as identified by
* {@link SubClusterId} currently active.
* @param blackListSubClusters
* the list of subClusters as identified by
* {@link SubClusterId} to blackList from the selection of the home
* subCluster.
* @throws FederationPolicyException
* if there are no usable subclusters.
*/
public static void validateSubClusterAvailability(Collection<SubClusterId> activeSubClusters, Collection<SubClusterId> blackListSubClusters) throws FederationPolicyException {
if ((activeSubClusters != null) && (!activeSubClusters.isEmpty())) {
if (blackListSubClusters == null) {
return;
}
for (SubClusterId scId : activeSubClusters) {
if
(!blackListSubClusters.contains(scId)) {
// There is at least one active subcluster
return;
}
}
}
throw new FederationPolicyException(FederationPolicyUtils.NO_ACTIVE_SUBCLUSTER_AVAILABLE);
} | 3.26 |
hadoop_FederationPolicyUtils_loadPolicyConfiguration_rdh | /**
* Get Federation policy configuration from state store, using default queue
* and configuration as fallback.
*
* @param queue
* the queue of the application
* @param conf
* the YARN configuration
* @param federationFacade
* state store facade
* @return SubClusterPolicyConfiguration recreated
*/
public static SubClusterPolicyConfiguration loadPolicyConfiguration(String queue, Configuration conf, FederationStateStoreFacade federationFacade) {
// The facade might cache this request, based on its parameterization
SubClusterPolicyConfiguration configuration = null;
if (queue != null) {
try {
configuration = federationFacade.getPolicyConfiguration(queue);} catch (YarnException e) {
LOG.warn((("Failed to get policy from FederationFacade with queue " + queue) + ": ") + e.getMessage());
}
}
// If there is no policy configured for this queue, fallback to the baseline
// policy that is configured either in the store or via XML config
if (configuration == null) {
LOG.info("No policy configured for queue {} in StateStore," + " fallback to default queue", queue);
queue
= YarnConfiguration.DEFAULT_FEDERATION_POLICY_KEY;
try {
configuration = federationFacade.getPolicyConfiguration(queue);
}
catch (YarnException e) {
LOG.warn("No fallback behavior defined in store, defaulting to XML " + "configuration fallback behavior.");
}
}
// or from XML conf otherwise.
if (configuration == null) {
LOG.info("No policy configured for default queue {} in StateStore," + " fallback to local config", queue);
String defaultFederationPolicyManager = conf.get(YarnConfiguration.FEDERATION_POLICY_MANAGER, YarnConfiguration.DEFAULT_FEDERATION_POLICY_MANAGER);
String defaultPolicyParamString = conf.get(YarnConfiguration.FEDERATION_POLICY_MANAGER_PARAMS, YarnConfiguration.DEFAULT_FEDERATION_POLICY_MANAGER_PARAMS);
ByteBuffer defaultPolicyParam = ByteBuffer.wrap(defaultPolicyParamString.getBytes(StandardCharsets.UTF_8));
configuration = SubClusterPolicyConfiguration.newInstance(queue, defaultFederationPolicyManager, defaultPolicyParam);
}
return configuration;
} | 3.26 |
hadoop_FederationPolicyUtils_getWeightedRandom_rdh | /**
* Select a random bin according to the weight array for the bins. Only bins
* with positive weights will be considered. If no positive weight found,
* return -1.
*
* @param weights
* the weight array
* @return the index of the sample in the array
*/
public static int getWeightedRandom(ArrayList<Float> weights) {
int i;float totalWeight = 0;
for (i = 0; i < weights.size(); i++) {
if (weights.get(i) > 0) {
totalWeight += weights.get(i);
}
}
if (totalWeight == 0) {
return -1;
}
float samplePoint = rand.nextFloat() * totalWeight;
int lastIndex
= 0;
for (i = 0; i < weights.size(); i++) {
if (weights.get(i) > 0) {
if (samplePoint <= weights.get(i)) {
return i;
} else {
lastIndex = i;
samplePoint -= weights.get(i);
}
}
}
// This can only happen if samplePoint is very close to totalWeight and
// float rounding kicks in during subtractions
return lastIndex;
} | 3.26 |
hadoop_FederationPolicyUtils_m0_rdh | /**
* Get AMRMProxy policy from state store, using default queue and
* configuration as fallback.
*
* @param queue
* the queue of the application
* @param oldPolicy
* the previous policy instance (can be null)
* @param conf
* the YARN configuration
* @param federationFacade
* state store facade
* @param homeSubClusterId
* home sub-cluster id
* @return FederationAMRMProxyPolicy recreated
* @throws FederationPolicyInitializationException
* if fails
*/
public static FederationAMRMProxyPolicy m0(String queue, FederationAMRMProxyPolicy oldPolicy, Configuration conf, FederationStateStoreFacade federationFacade, SubClusterId homeSubClusterId) throws FederationPolicyInitializationException {
// Local policy and its configuration
SubClusterPolicyConfiguration configuration = loadPolicyConfiguration(queue, conf, federationFacade);
// Instantiate the policyManager and get policy
FederationPolicyInitializationContext context
= new FederationPolicyInitializationContext(configuration, federationFacade.getSubClusterResolver(), federationFacade, homeSubClusterId);
LOG.info("Creating policy manager of type: " + configuration.getType());
FederationPolicyManager federationPolicyManager = instantiatePolicyManager(configuration.getType());
// set queue, reinit policy if required (implementation lazily check
// content of conf), and cache it
federationPolicyManager.setQueue(configuration.getQueue());
return federationPolicyManager.getAMRMPolicy(context, oldPolicy);
} | 3.26 |
hadoop_AllocationFileParser_getReservationPlanner_rdh | // Reservation global configuration knobs
public Optional<String> getReservationPlanner() {
return getTextValue(f0);
} | 3.26 |
hadoop_OperationAuditor_noteSpanReferenceLost_rdh | /**
* Span reference lost from GC operations.
* This is only called when an attempt is made to retrieve on
* the active thread or when a prune operation is cleaning up.
*
* @param threadId
* thread ID.
*/
default void noteSpanReferenceLost(long threadId) {
} | 3.26 |
hadoop_OperationAuditor_checkAccess_rdh | /**
* Check for permission to access a path.
* The path is fully qualified and the status is the
* status of the path.
* This is called from the {@code FileSystem.access()} command
* and is a soft permission check used by Hive.
*
* @param path
* path to check
* @param status
* status of the path.
* @param mode
* access mode.
* @return true if access is allowed.
* @throws IOException
* failure
*/
default boolean checkAccess(Path path, S3AFileStatus status, FsAction mode) throws IOException {
return true;
} | 3.26 |
hadoop_HttpFSExceptionProvider_m0_rdh | /**
* Logs the HTTP status code and exception in HttpFSServer's log.
*
* @param status
* HTTP status code.
* @param throwable
* exception thrown.
*/
@Override
protected void m0(Response.Status status, Throwable throwable) {
String method = MDC.get("method");
String path = MDC.get("path");
String message = getOneLineMessage(throwable);
AUDIT_LOG.warn("FAILED [{}:{}] response [{}] {}",
new Object[]{ method, path, status, message });
LOG.warn("[{}:{}] response [{}] {}", method, path, status, message, throwable);
} | 3.26 |
hadoop_HttpFSExceptionProvider_toResponse_rdh | /**
* Maps different exceptions thrown by HttpFSServer to HTTP status codes.
* <ul>
* <li>SecurityException : HTTP UNAUTHORIZED</li>
* <li>FileNotFoundException : HTTP NOT_FOUND</li>
* <li>IOException : INTERNAL_HTTP SERVER_ERROR</li>
* <li>UnsupporteOperationException : HTTP BAD_REQUEST</li>
* <li>all other exceptions : HTTP INTERNAL_SERVER_ERROR </li>
* </ul>
*
* @param throwable
* exception thrown.
* @return mapped HTTP status code
*/
@Override
public Response toResponse(Throwable throwable) {
Response.Status status;
if (throwable instanceof FileSystemAccessException) {
throwable = throwable.getCause();
}
if (throwable instanceof ContainerException) {
throwable = throwable.getCause();
}
if (throwable instanceof SecurityException) {
status = Status.UNAUTHORIZED;
} else if (throwable instanceof FileNotFoundException) {
status = Status.NOT_FOUND;
} else if (throwable instanceof IOException) {
status = Status.INTERNAL_SERVER_ERROR;
logErrorFully(status, throwable);
} else if (throwable instanceof UnsupportedOperationException) {
status = Status.BAD_REQUEST;
logErrorFully(status, throwable);
} else if (throwable instanceof IllegalArgumentException) {
status = Status.BAD_REQUEST;
logErrorFully(status, throwable);
} else {
status = Status.INTERNAL_SERVER_ERROR;
logErrorFully(status, throwable);
}
return createResponse(status,
throwable);} | 3.26 |
hadoop_AddMountAttributes_m1_rdh | /**
* Retrieve mount table object with all attributes derived from this object.
*
* @return MountTable object with updated attributes.
* @throws IOException
* If mount table instantiation fails.
*/
public MountTable m1() throws IOException {
String normalizedMount = RouterAdmin.normalizeFileSystemPath(this.getMount());
return getMountTableForAddRequest(normalizedMount);
} | 3.26 |
hadoop_AddMountAttributes_getNewOrUpdatedMountTableEntryWithAttributes_rdh | /**
* Retrieve mount table object with all attributes derived from this object.
* The returned mount table could be either new or existing one with updated attributes.
*
* @param existingEntry
* Existing mount table entry. If null, new mount table object is created,
* otherwise the existing mount table object is updated.
* @return MountTable object with updated attributes.
* @throws IOException
* If mount table instantiation fails.
*/
public MountTable getNewOrUpdatedMountTableEntryWithAttributes(MountTable existingEntry) throws IOException {
if (existingEntry == null) {
return getMountTableForAddRequest(this.mount);
} else {
// Update the existing entry if it exists
for (String nsId : this.getNss()) {
if (!existingEntry.addDestination(nsId, this.getDest())) {
System.err.println((("Cannot add destination at " + nsId) + " ") + this.getDest());
return null;
}
}
updateCommonAttributes(existingEntry);
return existingEntry;
}
} | 3.26 |
hadoop_AddMountAttributes_getMountTableForAddRequest_rdh | /**
* Create a new mount table object from the given mount point and update its attributes.
*
* @param mountSrc
* mount point src.
* @return MountTable object with updated attributes.
* @throws IOException
* If mount table instantiation fails.
*/ private MountTable getMountTableForAddRequest(String mountSrc) throws IOException {
Map<String, String> destMap = new LinkedHashMap<>();
for (String ns : this.getNss())
{
destMap.put(ns, this.getDest());
}
MountTable newEntry = MountTable.newInstance(mountSrc, destMap);
updateCommonAttributes(newEntry);
return newEntry;
} | 3.26 |
hadoop_AddMountAttributes_updateCommonAttributes_rdh | /**
* Common attributes like read-only, fault-tolerant, dest order, owner, group, mode etc are
* updated for the given mount table object.
*
* @param existingEntry
* Mount table object.
*/private void updateCommonAttributes(MountTable
existingEntry) {
if (this.isReadonly()) {existingEntry.setReadOnly(true);
}
if (this.isFaultTolerant()) {
existingEntry.setFaultTolerant(true);
}
if (this.getOrder() != null)
{
existingEntry.setDestOrder(this.getOrder());
}
RouterAdmin.ACLEntity mountAclInfo = this.getAclInfo();
// Update ACL info of mount table entry
if (mountAclInfo.getOwner() != null) {
existingEntry.setOwnerName(mountAclInfo.getOwner());
}
if (mountAclInfo.getGroup() != null) {
existingEntry.setGroupName(mountAclInfo.getGroup());
}
if (mountAclInfo.getMode() != null) {
existingEntry.setMode(mountAclInfo.getMode());
}
existingEntry.validate();
} | 3.26 |
hadoop_MountInterface_getValue_rdh | /**
*
* @return the int value representing the procedure.
*/
public int getValue() {
return ordinal();
} | 3.26 |
hadoop_MountInterface_fromValue_rdh | /**
* The procedure of given value.
*
* @param value
* specifies the procedure index
* @return the procedure corresponding to the value.
*/
public static MNTPROC fromValue(int value) {
if ((value < 0) || (value >= values().length)) {
return null;
} return values()[value];
} | 3.26 |
hadoop_BlockReaderLocalMetrics_addShortCircuitReadLatency_rdh | /**
* Adds short circuit read elapsed time.
*/
public void
addShortCircuitReadLatency(final long latency) {
shortCircuitReadRollingAverages.add(SHORT_CIRCUIT_LOCAL_READS_METRIC_VALUE_NAME, latency);} | 3.26 |
hadoop_BlockReaderLocalMetrics_collectThreadLocalStates_rdh | /**
* Collects states maintained in {@link ThreadLocal}, if any.
*/
public void collectThreadLocalStates() {
shortCircuitReadRollingAverages.collectThreadLocalStates();
} | 3.26 |
hadoop_BlockReaderLocalMetrics_getShortCircuitReadRollingAverages_rdh | /**
* Get the MutableRollingAverage metric for testing only.
*
* @return */
@VisibleForTestingpublic MutableRollingAverages getShortCircuitReadRollingAverages() {
return shortCircuitReadRollingAverages;} | 3.26 |
hadoop_PublishedConfiguration_asConfiguration_rdh | /**
* Convert to Hadoop XML
*
* @return the configuration as a Hadoop Configuratin
*/
public Configuration asConfiguration() {
Configuration conf = new Configuration(false);
try {
ConfigHelper.addConfigMap(conf, f0, "");
} catch (BadConfigException e) {
// triggered on a null value; switch to a runtime (and discard the stack)
throw new RuntimeException(e.toString());
}
return conf;
} | 3.26 |
hadoop_PublishedConfiguration_asProperties_rdh | /**
* Convert values to properties
*
* @return a property file
*/
public Properties asProperties() {
Properties props = new Properties();
props.putAll(f0);
return props;
} | 3.26 |
hadoop_PublishedConfiguration_shallowCopy_rdh | /**
* This makes a copy without the nested content -so is suitable
* for returning as part of the list of a parent's values
*
* @return the copy
*/
public PublishedConfiguration
shallowCopy() {
PublishedConfiguration that = new PublishedConfiguration();
that.description = this.description;
that.updated = this.updated;
that.updatedTime = this.updatedTime;
return that;
} | 3.26 |
hadoop_PublishedConfiguration_asJson_rdh | /**
* Return the values as json string
*
* @return the JSON representation
* @throws IOException
* marshalling failure
*/
public String asJson() throws IOException {
ObjectMapper mapper = new ObjectMapper();
mapper.configure(SerializationFeature.INDENT_OUTPUT, true);String json = mapper.writeValueAsString(f0);return json;
} | 3.26 |
hadoop_PublishedConfiguration_m1_rdh | /**
* Set the values from an iterable (this includes a Hadoop Configuration
* and Java properties object).
* Any existing value set is discarded
*
* @param entries
* entries to put
*/
public void m1(Iterable<Map.Entry<String, String>> entries) {
this.f0 = new HashMap<String, String>();
for (Map.Entry<String, String> entry : entries) {
this.f0.put(entry.getKey(), entry.getValue());
}
} | 3.26 |
hadoop_LoggedNetworkTopology_setUnknownAttribute_rdh | // for input parameter ignored.
@JsonAnySetter
public void setUnknownAttribute(String attributeName, Object ignored) {
if (!alreadySeenAnySetterAttributes.contains(attributeName)) {
alreadySeenAnySetterAttributes.add(attributeName);
System.err.println(("In LoggedJob, we saw the unknown attribute " + attributeName) + ".");
}
} | 3.26 |
hadoop_VirtualInputFormat_getSplits_rdh | // Number of splits = Number of mappers. Creates fakeSplits to launch
// the required number of mappers
@Override
public List<InputSplit> getSplits(JobContext job) throws IOException {
Configuration conf = job.getConfiguration();int numMappers = conf.getInt(CreateFileMapper.NUM_MAPPERS_KEY, -1);
if (numMappers == (-1)) {
throw new IOException("Number of mappers should be provided as input");}List<InputSplit> splits = new ArrayList<InputSplit>(numMappers);
for (int i = 0; i < numMappers; i++) {
splits.add(new VirtualInputSplit());
}
return splits;
} | 3.26 |
hadoop_MutableRatesWithAggregation_add_rdh | /**
* Add a rate sample for a rate metric.
*
* @param name
* of the rate metric
* @param elapsed
* time
*/
public void add(String name, long elapsed) {
ConcurrentMap<String, ThreadSafeSampleStat> localStats = threadLocalMetricsMap.get();
if (localStats == null) {localStats = new ConcurrentHashMap<>();
threadLocalMetricsMap.set(localStats);
weakReferenceQueue.add(new WeakReference<>(localStats));
}
ThreadSafeSampleStat stat = localStats.get(name);
if (stat == null) {
stat = new ThreadSafeSampleStat();
localStats.put(name, stat);
}
stat.add(elapsed);
} | 3.26 |
hadoop_MutableRatesWithAggregation_init_rdh | /**
* Initialize the registry with all rate names passed in.
* This is an alternative to the above init function since this metric
* can be used more than just for rpc name.
*
* @param names
* the array of all rate names
*/
public void init(String[] names) {
for (String name : names) {
addMetricIfNotExists(name);
}
} | 3.26 |
hadoop_MutableRatesWithAggregation_m0_rdh | /**
* Initialize the registry with all the methods in a protocol
* so they all show up in the first snapshot.
* Convenient for JMX implementations.
*
* @param protocol
* the protocol class
*/
public void m0(Class<?> protocol) {
if (protocolCache.contains(protocol)) {
return;
}
protocolCache.add(protocol);
for (Method method : protocol.getMethods()) { String name = method.getName();
LOG.debug(name);
addMetricIfNotExists(name); }
} | 3.26 |
hadoop_MutableRatesWithAggregation_collectThreadLocalStates_rdh | /**
* Collects states maintained in {@link ThreadLocal}, if any.
*/
synchronized void collectThreadLocalStates() {
final ConcurrentMap<String, ThreadSafeSampleStat> localStats = threadLocalMetricsMap.get();
if (localStats != null) {
aggregateLocalStatesToGlobalMetrics(localStats);
}
} | 3.26 |
hadoop_MutableRatesWithAggregation_aggregateLocalStatesToGlobalMetrics_rdh | /**
* Aggregates the thread's local samples into the global metrics. The caller
* should ensure its thread safety.
*/
private void aggregateLocalStatesToGlobalMetrics(final ConcurrentMap<String, ThreadSafeSampleStat> localStats) {
for (Map.Entry<String, ThreadSafeSampleStat> entry : localStats.entrySet()) {
String name = entry.getKey();
MutableRate globalMetric = addMetricIfNotExists(name);
entry.getValue().snapshotInto(globalMetric);
}
} | 3.26 |
hadoop_PartitionInfo_addTo_rdh | /**
* This method will generate a new PartitionInfo object based on two PartitionInfo objects.
* The combination process is mainly based on the Resources. Add method.
*
* @param left
* left PartitionInfo Object.
* @param right
* right PartitionInfo Object.
* @return new PartitionInfo Object.
*/
public static PartitionInfo addTo(PartitionInfo left, PartitionInfo right) {
Resource leftResource = Resource.newInstance(0, 0);
if ((left != null) && (left.getResourceAvailable() != null)) {
ResourceInfo leftResourceInfo = left.getResourceAvailable();
leftResource = leftResourceInfo.getResource();
}
Resource rightResource = Resource.newInstance(0, 0);
if ((right != null) && (right.getResourceAvailable()
!= null)) {
ResourceInfo rightResourceInfo = right.getResourceAvailable();
rightResource = rightResourceInfo.getResource();
}
Resource resource = Resources.addTo(leftResource, rightResource);
return new PartitionInfo(new ResourceInfo(resource)); } | 3.26 |
hadoop_GenericEventTypeMetricsManager_create_rdh | // Construct a GenericEventTypeMetrics for dispatcher
public static <T extends Enum<T>> GenericEventTypeMetrics create(String dispatcherName, Class<T> eventTypeClass) {
MetricsInfo metricsInfo = info("GenericEventTypeMetrics for " + eventTypeClass.getName(), "Metrics for " + dispatcherName);
return new GenericEventTypeMetrics.EventTypeMetricsBuilder<T>().setMs(DefaultMetricsSystem.instance()).setInfo(metricsInfo).setEnumClass(eventTypeClass).setEnums(eventTypeClass.getEnumConstants()).build().registerMetrics();
} | 3.26 |
hadoop_AlwaysRestartPolicy_hasCompletedSuccessfully_rdh | /**
* This is always false since these components never terminate
*
* @param component
* @return */
@Overridepublic boolean hasCompletedSuccessfully(Component component) {
return false;} | 3.26 |
hadoop_AlwaysRestartPolicy_hasCompleted_rdh | /**
* This is always false since these components never terminate
*
* @param component
* @return */
@Override
public boolean hasCompleted(Component component) {
return false;
} | 3.26 |
hadoop_FlowActivityColumnPrefix_getColumnPrefix_rdh | /**
*
* @return the column name value
*/
public String getColumnPrefix() {
return columnPrefix;
} | 3.26 |
hadoop_DumpUtil_bytesToHex_rdh | /**
* Convert bytes into format like 0x02 02 00 80.
* If limit is negative or too large, then all bytes will be converted.
*
* @param bytes
* bytes.
* @param limit
* limit.
* @return bytesToHex.
*/
public static String bytesToHex(byte[] bytes, int limit) {
if ((limit <= 0) || (limit > bytes.length))
{
limit = bytes.length;
}
int len = limit * 2;
len += limit;// for ' ' appended for each char
len
+= 2;// for '0x' prefix
char[] hexChars = new char[len];
hexChars[0] = '0';
hexChars[1] = 'x';
for (int j = 0; j < limit; j++) {
int v =
bytes[j] & 0xff;
hexChars[(j * 3)
+ 2] = HEX_CHARS[v >>> 4];
hexChars[(j * 3) + 3] = HEX_CHARS[v & 0xf];
hexChars[(j * 3) + 4] = ' ';
}
return new String(hexChars);
} | 3.26 |
hadoop_DumpUtil_dumpChunks_rdh | /**
* Print data in hex format in an array of chunks.
*
* @param header
* header.
* @param chunks
* chunks.
*/
public static void dumpChunks(String header, ECChunk[] chunks) {
System.out.println();
System.out.println(header);
for (int i = 0; i < chunks.length; i++) {
dumpChunk(chunks[i]);
}
System.out.println();
} | 3.26 |
hadoop_DumpUtil_dumpChunk_rdh | /**
* Print data in hex format in a chunk.
*
* @param chunk
* chunk.
*/
public static void dumpChunk(ECChunk chunk) {
String str;
if (chunk == null) {
str = "<EMPTY>";
} else {
byte[] bytes = chunk.toBytesArray();
str = DumpUtil.bytesToHex(bytes, 16);
}
System.out.println(str);
} | 3.26 |
hadoop_CommitUtilsWithMR_getTempJobAttemptPath_rdh | /**
* Compute a path for temporary data associated with a job.
* This data is <i>not magic</i>
*
* @param jobUUID
* unique Job ID.
* @param out
* output directory of job
* @param appAttemptId
* the ID of the application attempt for this job.
* @return the path to store temporary job attempt data.
*/
public static Path getTempJobAttemptPath(String jobUUID, Path out, final int appAttemptId) {
return new Path(new Path(out, TEMP_DATA), formatAppAttemptDir(jobUUID, appAttemptId));
} | 3.26 |
hadoop_CommitUtilsWithMR_getAppAttemptId_rdh | /**
* Get the Application Attempt ID for this job.
*
* @param context
* the context to look in
* @return the Application Attempt ID for a given job, or 0
*/
public static int getAppAttemptId(JobContext context) {
return context.getConfiguration().getInt(MRJobConfig.APPLICATION_ATTEMPT_ID, 0);
} | 3.26 |
hadoop_CommitUtilsWithMR_formatAppAttemptDir_rdh | /**
* Build the name of the job attempt directory.
*
* @param jobUUID
* unique Job ID.
* @param appAttemptId
* the ID of the application attempt for this job.
* @return the directory tree for the application attempt
*/
public static String formatAppAttemptDir(String jobUUID, int appAttemptId) {
return formatJobDir(jobUUID) + String.format("/%02d", appAttemptId);
} | 3.26 |
hadoop_CommitUtilsWithMR_jobIdString_rdh | /**
* Get a string value of a job ID; returns meaningful text if there is no ID.
*
* @param context
* job context
* @return a string for logs
*/
public static String jobIdString(JobContext context) {
JobID jobID = context.getJobID();
return jobID != null ? jobID.toString() : "(no job ID)";
} | 3.26 |
hadoop_CommitUtilsWithMR_getMagicTaskAttemptPath_rdh | /**
* Compute the path where the output of a task attempt is stored until
* that task is committed.
* This path is marked as a base path for relocations, so subdirectory
* information is preserved.
*
* @param context
* the context of the task attempt.
* @param jobUUID
* unique Job ID.
* @param dest
* The output path to commit work into
* @return the path where a task attempt should be stored.
*/
public static Path getMagicTaskAttemptPath(TaskAttemptContext context, String jobUUID, Path dest) {
return new Path(getBaseMagicTaskAttemptPath(context, jobUUID, dest), BASE);
} | 3.26 |
hadoop_CommitUtilsWithMR_getMagicTaskAttemptsPath_rdh | /**
* Compute the path where the output of magic task attempts are stored.
*
* @param jobUUID
* unique Job ID.
* @param dest
* destination of work
* @param appAttemptId
* the ID of the application attempt for this job.
* @return the path where the output of magic task attempts are stored.
*/
public static Path getMagicTaskAttemptsPath(String jobUUID, Path dest, int appAttemptId) {
return new Path(getMagicJobAttemptPath(jobUUID, appAttemptId, dest), "tasks");
} | 3.26 |
hadoop_CommitUtilsWithMR_getTempTaskAttemptPath_rdh | /**
* Compute the path where the output of a given task attempt will be placed.
*
* @param context
* task context
* @param jobUUID
* unique Job ID.
* @param out
* output directory of job
* @return the path to store temporary job attempt data.
*/
public static Path getTempTaskAttemptPath(TaskAttemptContext context, final String jobUUID, Path
out) {
return new Path(getTempJobAttemptPath(jobUUID, out, getAppAttemptId(context)), String.valueOf(context.getTaskAttemptID()));
} | 3.26 |
hadoop_CommitUtilsWithMR_getBaseMagicTaskAttemptPath_rdh | /**
* Get the base Magic attempt path, without any annotations to mark relative
* references.
* If there is an app attempt property in the context configuration, that
* is included.
*
* @param context
* task context.
* @param jobUUID
* unique Job ID.
* @param dest
* The output path to commit work into
* @return the path under which all attempts go
*/
public static Path getBaseMagicTaskAttemptPath(TaskAttemptContext context, String jobUUID, Path dest) {
return new Path(getMagicTaskAttemptsPath(jobUUID, dest, getAppAttemptId(context)), String.valueOf(context.getTaskAttemptID()));
} | 3.26 |
hadoop_CommitUtilsWithMR_getMagicJobAttemptPath_rdh | /**
* Compute the "magic" path for a job attempt.
*
* @param jobUUID
* unique Job ID.
* @param appAttemptId
* the ID of the application attempt for this job.
* @param dest
* the final output directory
* @return the path to store job attempt data.
*/
public static Path getMagicJobAttemptPath(String jobUUID, int appAttemptId, Path dest) {
return new Path(getMagicJobAttemptsPath(dest, jobUUID), formatAppAttemptDir(jobUUID, appAttemptId));
} | 3.26 |
hadoop_CommitUtilsWithMR_getMagicJobAttemptsPath_rdh | /**
* Get the location of magic job attempts.
*
* @param out
* the base output directory.
* @param jobUUID
* unique Job ID.
* @return the location of magic job attempts.
*/public static Path getMagicJobAttemptsPath(Path out, String jobUUID) {
Preconditions.checkArgument((jobUUID != null) && (!jobUUID.isEmpty()), "Invalid job ID: %s", jobUUID);
return new Path(out, MAGIC_PATH_PREFIX + jobUUID);
} | 3.26 |
hadoop_CommitUtilsWithMR_getMagicJobPath_rdh | /**
* Compute the "magic" path for a job.
*
* @param jobUUID
* unique Job ID.
* @param dest
* the final output directory
* @return the path to store job attempt data.
*/
public static Path getMagicJobPath(String jobUUID, Path dest) {
return getMagicJobAttemptsPath(dest, jobUUID);
} | 3.26 |
hadoop_CommitUtilsWithMR_getConfigurationOption_rdh | /**
* Get a configuration option, with any value in the job configuration
* taking priority over that in the filesystem.
* This allows for per-job override of FS parameters.
*
* Order is: job context, filesystem config, default value
*
* @param context
* job/task context
* @param fsConf
* filesystem configuration. Get this from the FS to guarantee
* per-bucket parameter propagation
* @param key
* key to look for
* @param defVal
* default value
* @return the configuration option.
*/
public static String getConfigurationOption(JobContext
context, Configuration fsConf, String key, String defVal) {
return context.getConfiguration().getTrimmed(key, fsConf.getTrimmed(key, defVal));
} | 3.26 |
hadoop_CommitUtilsWithMR_formatJobDir_rdh | /**
* Build the name of the job directory, without
* app attempt.
* This is the path to use for cleanup.
*
* @param jobUUID
* unique Job ID.
* @return the directory name for the job
*/
public static String formatJobDir(String jobUUID) {
return JOB_ID_PREFIX + jobUUID;
} | 3.26 |
hadoop_CommitUtilsWithMR_jobName_rdh | /**
* Get a job name; returns meaningful text if there is no name.
*
* @param context
* job context
* @return a string for logs
*/
public static String jobName(JobContext context) {
String name = context.getJobName();
return (name != null) && (!name.isEmpty()) ? name : "(anonymous)";
} | 3.26 |
hadoop_JobTokenIdentifier_getKind_rdh | /**
* {@inheritDoc }
*/
@Override
public Text getKind() {
return KIND_NAME;} | 3.26 |
hadoop_JobTokenIdentifier_readFields_rdh | /**
* {@inheritDoc }
*/
@Override
public void readFields(DataInput in) throws IOException {
jobid.readFields(in);} | 3.26 |
hadoop_JobTokenIdentifier_getUser_rdh | /**
* {@inheritDoc }
*/
@Override
public UserGroupInformation getUser() {
if ((jobid == null) || "".equals(jobid.toString())) {
return null;
}
return UserGroupInformation.createRemoteUser(jobid.toString());
} | 3.26 |
hadoop_JobTokenIdentifier_write_rdh | /**
* {@inheritDoc }
*/
@Override
public void
write(DataOutput
out) throws IOException {
jobid.write(out);
} | 3.26 |
hadoop_JobTokenIdentifier_getJobId_rdh | /**
* Get the jobid
*
* @return the jobid
*/public Text getJobId() {return jobid;
} | 3.26 |
hadoop_StripedBlockReader_actualReadFromBlock_rdh | /**
* Perform actual reading of bytes from block.
*/
private BlockReadStats actualReadFromBlock() throws IOException {
DataNodeFaultInjector.get().delayBlockReader();
int len = buffer.remaining();
int n = 0;
while (n <
len) {
int nread = blockReader.read(buffer);
if (nread <= 0) {break;
}
n += nread;
stripedReader.getReconstructor().incrBytesRead(isLocal, nread);
}
return new BlockReadStats(n, blockReader.isShortCircuit(), blockReader.getNetworkDistance());
} | 3.26 |
hadoop_StripedBlockReader_closeBlockReader_rdh | // close block reader
void closeBlockReader() {
IOUtils.closeStream(blockReader);
blockReader
= null;
} | 3.26 |
hadoop_DistributedCache_setLocalFiles_rdh | /**
* Set the conf to contain the location for localized files. Used
* by internal DistributedCache code.
*
* @param conf
* The conf to modify to contain the localized caches
* @param str
* a comma separated list of local files
*/
@Deprecated
public static void setLocalFiles(Configuration conf,
String str) {
conf.set(CACHE_LOCALFILES, str);
} | 3.26 |
hadoop_DistributedCache_addLocalArchives_rdh | /**
* Add a archive that has been localized to the conf. Used
* by internal DistributedCache code.
*
* @param conf
* The conf to modify to contain the localized caches
* @param str
* a comma separated list of local archives
*/
@Deprecated
public static void addLocalArchives(Configuration conf,
String str) {
String
archives = conf.get(CACHE_LOCALARCHIVES);
conf.set(CACHE_LOCALARCHIVES, archives == null ? str : (archives + ",") + str);
} | 3.26 |
hadoop_DistributedCache_m0_rdh | /**
* Returns mtime of a given cache file on hdfs. Internal to MapReduce.
*
* @param conf
* configuration
* @param cache
* cache file
* @return mtime of a given cache file on hdfs
* @throws IOException
*/
@Deprecated
public static long m0(Configuration conf, URI cache) throws IOException
{
return getFileStatus(conf, cache).getModificationTime();
} | 3.26 |
hadoop_DistributedCache_setArchiveTimestamps_rdh | /**
* This is to check the timestamp of the archives to be localized.
* Used by internal MapReduce code.
*
* @param conf
* Configuration which stores the timestamp's
* @param timestamps
* comma separated list of timestamps of archives.
* The order should be the same as the order in which the archives are added.
*/
@Deprecated
public static void setArchiveTimestamps(Configuration conf, String timestamps) { conf.set(CACHE_ARCHIVES_TIMESTAMPS, timestamps);
} | 3.26 |
hadoop_DistributedCache_getFileStatus_rdh | /**
* Returns {@link FileStatus} of a given cache file on hdfs. Internal to
* MapReduce.
*
* @param conf
* configuration
* @param cache
* cache file
* @return <code>FileStatus</code> of a given cache file on hdfs
* @throws IOException
*/
@Deprecated
public static FileStatus getFileStatus(Configuration conf, URI cache) throws IOException {
FileSystem fileSystem = FileSystem.get(cache, conf);
return fileSystem.getFileStatus(new Path(cache.getPath()));
} | 3.26 |
hadoop_DistributedCache_setFileTimestamps_rdh | /**
* This is to check the timestamp of the files to be localized.
* Used by internal MapReduce code.
*
* @param conf
* Configuration which stores the timestamp's
* @param timestamps
* comma separated list of timestamps of files.
* The order should be the same as the order in which the files are added.
*/
@Deprecatedpublic static void setFileTimestamps(Configuration conf, String timestamps) {
conf.set(CACHE_FILES_TIMESTAMPS, timestamps);
} | 3.26 |
hadoop_DistributedCache_addLocalFiles_rdh | /**
* Add a file that has been localized to the conf.. Used
* by internal DistributedCache code.
*
* @param conf
* The conf to modify to contain the localized caches
* @param str
* a comma separated list of local files
*/
@Deprecated
public static void addLocalFiles(Configuration conf, String str) {
String files = conf.get(CACHE_LOCALFILES);
conf.set(CACHE_LOCALFILES,
files == null ? str : (files + ",") + str);
} | 3.26 |
hadoop_DistributedCache_setLocalArchives_rdh | /**
* Set the conf to contain the location for localized archives. Used
* by internal DistributedCache code.
*
* @param conf
* The conf to modify to contain the localized caches
* @param str
* a comma separated list of local archives
*/
@Deprecated
public static void setLocalArchives(Configuration conf, String str) {
conf.set(CACHE_LOCALARCHIVES, str);
} | 3.26 |
hadoop_AppReportFetcher_getApplicationReport_rdh | /**
* Get an application report for the specified application id from the RM and
* fall back to the Application History Server if not found in RM.
*
* @param applicationsManager
* what to use to get the RM reports.
* @param appId
* id of the application to get.
* @return the ApplicationReport for the appId.
* @throws YarnException
* on any error.
* @throws IOException
* connection exception.
*/
protected FetchedAppReport getApplicationReport(ApplicationClientProtocol applicationsManager, ApplicationId appId) throws YarnException, IOException {
GetApplicationReportRequest request = this.recordFactory.newRecordInstance(GetApplicationReportRequest.class);
request.setApplicationId(appId);
ApplicationReport appReport;
FetchedAppReport fetchedAppReport;
try {
appReport = applicationsManager.getApplicationReport(request).getApplicationReport();
fetchedAppReport = new FetchedAppReport(appReport, AppReportSource.RM);
} catch (ApplicationNotFoundException e) {
if (!isAHSEnabled) {
// Just throw it as usual if historyService is not enabled.
throw e;
}
// Fetch the application report from AHS
appReport = historyManager.getApplicationReport(request).getApplicationReport();fetchedAppReport = new FetchedAppReport(appReport, AppReportSource.AHS);
}
return fetchedAppReport;
} | 3.26 |
hadoop_Query_getPartial_rdh | /**
* Get the partial record used to query.
*
* @return The partial record used for the query.
*/
public T getPartial() {
return this.partial;
} | 3.26 |
hadoop_Query_matches_rdh | /**
* Check if a record matches the primary keys or the partial record.
*
* @param other
* Record to check.
* @return If the record matches. Don't match if there is no partial.
*/
public boolean matches(T other) {
if (this.partial == null) {
return false;
}
return this.partial.like(other);
} | 3.26 |
hadoop_Error_message_rdh | /**
*/
public Error message(String message) {
this.message = message;
return this;
} | 3.26 |
hadoop_Error_code_rdh | /**
*/
public Error code(Integer code) {
this.code = code;
return this;
} | 3.26 |
hadoop_Error_toIndentedString_rdh | /**
* Convert the given object to string with each line indented by 4 spaces
* (except the first line).
*/
private String toIndentedString(Object o) {
if (o == null) {
return "null";
}
return o.toString().replace("\n", "\n ");
} | 3.26 |
hadoop_Error_m0_rdh | /**
*/
public Error m0(String fields) {
this.fields = fields;
return this;
} | 3.26 |
hadoop_GPGPoliciesBlock_policyWeight2String_rdh | /**
* We will convert the PolicyWeight to string format.
*
* @param weights
* PolicyWeight.
* @return string format PolicyWeight. example: SC-1:0.91, SC-2:0.09
*/
private String policyWeight2String(Map<SubClusterIdInfo, Float> weights) {
StringBuilder sb = new StringBuilder();
for (Map.Entry<SubClusterIdInfo, Float> v8 : weights.entrySet()) {
sb.append(v8.getKey().toId()).append(": ").append(v8.getValue()).append(", ");
}
if (sb.length() > 2) {
sb.setLength(sb.length() - 2);
}
return sb.toString();
} | 3.26 |
hadoop_FederationClientMethod_getTypes_rdh | /**
* Get the calling types for this method.
*
* @return An array of calling types.
*/
public Class<?>[] getTypes() {
return Arrays.copyOf(this.types, this.types.length);
} | 3.26 |
hadoop_FederationClientMethod_invoke_rdh | /**
* We will use the invoke method to call the method in FederationStateStoreService.
*
* @return The result returned after calling the interface.
* @throws YarnException
* yarn exception.
*/
protected R invoke() throws YarnException {
try {
long startTime = clock.getTime();
Method method = FederationStateStore.class.getMethod(methodName, types); R result = clazz.cast(method.invoke(stateStoreClient, params));
long stopTime = clock.getTime();
FederationStateStoreServiceMetrics.succeededStateStoreServiceCall(methodName, stopTime - startTime);
return result;
} catch (Exception e) {
LOG.error("stateStoreClient call method {} error.", methodName, e);
FederationStateStoreServiceMetrics.failedStateStoreServiceCall(methodName);
throw new YarnException(e);
}
} | 3.26 |
hadoop_SaslParticipant_getNegotiatedQop_rdh | /**
* After successful SASL negotation, returns the negotiated quality of
* protection.
*
* @return negotiated quality of protection
*/
public String getNegotiatedQop() {
if (saslClient != null) {
return ((String) (saslClient.getNegotiatedProperty(Sasl.QOP)));
} else {
return ((String)
(saslServer.getNegotiatedProperty(Sasl.QOP)));
}
} | 3.26 |
hadoop_SaslParticipant_createStreamPair_rdh | /**
* Return some input/output streams that may henceforth have their
* communication encrypted, depending on the negotiated quality of protection.
*
* @param out
* output stream to wrap
* @param in
* input stream to wrap
* @return IOStreamPair wrapping the streams
*/
public IOStreamPair createStreamPair(DataOutputStream out, DataInputStream in) {
if (saslClient != null) {return new IOStreamPair(new SaslInputStream(in, saslClient), new SaslOutputStream(out, saslClient));
} else {
return new IOStreamPair(new SaslInputStream(in, saslServer), new SaslOutputStream(out, saslServer));
}
} | 3.26 |
hadoop_SaslParticipant_unwrap_rdh | /**
* Unwraps a byte array.
*
* @param bytes
* The array containing the bytes to unwrap.
* @param off
* The starting position at the array
* @param len
* The number of bytes to unwrap
* @return byte[] unwrapped bytes
* @throws SaslException
* if the bytes cannot be successfully unwrapped
*/
public byte[] unwrap(byte[] bytes, int off, int len) throws
SaslException {
if (saslClient != null) {
return saslClient.unwrap(bytes, off, len);
} else {
return saslServer.unwrap(bytes, off, len);
}
} | 3.26 |
hadoop_SaslParticipant_createServerSaslParticipant_rdh | /**
* Creates a SaslParticipant wrapping a SaslServer.
*
* @param saslProps
* properties of SASL negotiation
* @param callbackHandler
* for handling all SASL callbacks
* @return SaslParticipant wrapping SaslServer
* @throws SaslException
* for any error
*/
public static SaslParticipant createServerSaslParticipant(Map<String, String> saslProps, CallbackHandler callbackHandler) throws SaslException {
initializeSaslServerFactory();
return new SaslParticipant(saslServerFactory.createSaslServer(f0, PROTOCOL, SERVER_NAME, saslProps, callbackHandler));
} | 3.26 |
hadoop_SaslParticipant_isComplete_rdh | /**
* Returns true if SASL negotiation is complete.
*
* @return true if SASL negotiation is complete
*/
public boolean isComplete() {
if (saslClient != null) {
return saslClient.isComplete();
} else { return saslServer.isComplete();
}
} | 3.26 |
hadoop_SaslParticipant_isNegotiatedQopPrivacy_rdh | /**
* After successful SASL negotiation, returns whether it's QOP privacy
*
* @return boolean whether it's QOP privacy
*/
public boolean isNegotiatedQopPrivacy() {
String qop = getNegotiatedQop();
return (qop != null) && "auth-conf".equalsIgnoreCase(qop);
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.