name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
hadoop_SaslParticipant_createClientSaslParticipant_rdh
/** * Creates a SaslParticipant wrapping a SaslClient. * * @param userName * SASL user name * @param saslProps * properties of SASL negotiation * @param callbackHandler * for handling all SASL callbacks * @return SaslParticipant wrapping SaslClient * @throws SaslException * for any error */ public static SaslParticipant createClientSaslParticipant(String userName, Map<String, String> saslProps, CallbackHandler callbackHandler) throws SaslException { initializeSaslClientFactory(); return new SaslParticipant(saslClientFactory.createSaslClient(new String[]{ f0 }, userName, PROTOCOL, SERVER_NAME, saslProps, callbackHandler)); }
3.26
hadoop_SaslParticipant_wrap_rdh
/** * Wraps a byte array. * * @param bytes * The array containing the bytes to wrap. * @param off * The starting position at the array * @param len * The number of bytes to wrap * @return byte[] wrapped bytes * @throws SaslException * if the bytes cannot be successfully wrapped */ public byte[] wrap(byte[] bytes, int off, int len) throws SaslException { if (saslClient != null) { return saslClient.wrap(bytes, off, len); } else { return saslServer.wrap(bytes, off, len); } }
3.26
hadoop_UnknownHostRetryReason_canCapture_rdh
/** * Category that can capture server-response errors for {@link UnknownHostException}. */public class UnknownHostRetryReason extends RetryReasonCategory { @Override Boolean canCapture(final Exception ex, final Integer statusCode, final String serverErrorMessage) { if (ex instanceof UnknownHostException) { return true; } return false; }
3.26
hadoop_JobACL_getAclName_rdh
/** * Get the name of the ACL. Here it is same as the name of the configuration * property for specifying the ACL for the job. * * @return aclName */ public String getAclName() { return aclName; }
3.26
hadoop_ApplicationServiceRecordProcessor_initTypeToInfoMapping_rdh
/** * Initializes the DNS record type to descriptor mapping based on the * provided service record. * * @param serviceRecord * the registry service record. * @throws Exception * if an issue is encountered. */ @Override public void initTypeToInfoMapping(ServiceRecord serviceRecord) throws Exception { if (serviceRecord.external.isEmpty()) { LOG.info(serviceRecord.description + ": No external endpoints defined."); return; } for (int type : getRecordTypes()) { switch (type) { case Type.A : createAInfo(serviceRecord);break; case Type.AAAA : createAAAAInfo(serviceRecord); break; case Type.TXT : createTXTInfo(serviceRecord); break; case Type.CNAME : createCNAMEInfo(serviceRecord); break; case Type.SRV : createSRVInfo(serviceRecord); break; default : throw new IllegalArgumentException("Unknown type " + type); } }}
3.26
hadoop_ApplicationServiceRecordProcessor_createSRVInfo_rdh
/** * Create an application SRV record descriptor. * * @param serviceRecord * the service record. * @throws Exception * if there is an issue during descriptor creation. */ protected void createSRVInfo(ServiceRecord serviceRecord) throws Exception { List<Endpoint> endpoints = serviceRecord.external; List<RecordDescriptor> recordDescriptors = new ArrayList<>(); SRVApplicationRecordDescriptor srvInfo;for (Endpoint endpoint : endpoints) { srvInfo = new SRVApplicationRecordDescriptor(serviceRecord, endpoint); recordDescriptors.add(srvInfo); } registerRecordDescriptor(Type.SRV, recordDescriptors); }
3.26
hadoop_ApplicationServiceRecordProcessor_createAInfo_rdh
/** * Create an application A record descriptor. * * @param record * the service record. * @throws Exception * if there is an issue during descriptor creation. */ protected void createAInfo(ServiceRecord record) throws Exception { AApplicationRecordDescriptor recordInfo = new AApplicationRecordDescriptor(getPath(), record); registerRecordDescriptor(Type.A, recordInfo); }
3.26
hadoop_ApplicationServiceRecordProcessor_init_rdh
/** * Initializes the descriptor parameters. * * @param serviceRecord * the service record. */ @Override protected void init(ServiceRecord serviceRecord) throws Exception { super.init(serviceRecord); if (getTarget() == null) { return; } try { this.setTarget(getIpv6Address(getTarget())); } catch (UnknownHostException e) { throw new IllegalStateException(e); } }
3.26
hadoop_ApplicationServiceRecordProcessor_getRecordTypes_rdh
/** * Returns the record types associated with a container service record. * * @return the record type array */ @Override public int[] getRecordTypes() { return new int[]{ Type.A, Type.AAAA, Type.CNAME, Type.SRV, Type.TXT }; }
3.26
hadoop_ApplicationServiceRecordProcessor_createAAAAInfo_rdh
/** * Create an application AAAA record descriptor. * * @param record * the service record. * @throws Exception * if there is an issue during descriptor creation. */protected void createAAAAInfo(ServiceRecord record) throws Exception { AAAAApplicationRecordDescriptor recordInfo = new AAAAApplicationRecordDescriptor(getPath(), record); registerRecordDescriptor(Type.AAAA, recordInfo); }
3.26
hadoop_ApplicationServiceRecordProcessor_createCNAMEInfo_rdh
/** * Create an application CNAME record descriptor. * * @param serviceRecord * the service record. * @throws Exception * if there is an issue during descriptor creation. */ protected void createCNAMEInfo(ServiceRecord serviceRecord) throws Exception { List<Endpoint> endpoints = serviceRecord.external; List<RecordDescriptor> recordDescriptors = new ArrayList<>(); CNAMEApplicationRecordDescriptor v11; for (Endpoint endpoint : endpoints) { v11 = new CNAMEApplicationRecordDescriptor(serviceRecord, endpoint); recordDescriptors.add(v11); } registerRecordDescriptor(Type.CNAME, recordDescriptors); }
3.26
hadoop_BaseRecord_init_rdh
/** * Initialize the object. */ public void init() { // Call this after the object has been constructed initDefaultTimes(); }
3.26
hadoop_BaseRecord_getDeletionMs_rdh
/** * Get the deletion time for the expired record. The default is disabled. * Override for customized behavior. * * @return Deletion time for the expired record. */ public long getDeletionMs() { return -1; }
3.26
hadoop_BaseRecord_hasOtherFields_rdh
/** * If the record has fields others than the primary keys. This is used by * TestStateStoreDriverBase to skip the modification check. * * @return If the record has more fields. */ @VisibleForTesting public boolean hasOtherFields() { return true; }
3.26
hadoop_BaseRecord_isExpired_rdh
/** * Check if this record is expired. The default is false. Override for * customized behavior. * * @return True if the record is expired. */ public boolean isExpired() {return false; }
3.26
hadoop_BaseRecord_validate_rdh
/** * Validates the record. Called when the record is created, populated from the * state store, and before committing to the state store. If validate failed, * there throws an exception. */ public void validate() {if (getDateCreated() <= 0) { throw new IllegalArgumentException(ERROR_MSG_CREATION_TIME_NEGATIVE); } else if (getDateModified() <= 0) { throw new IllegalArgumentException(ERROR_MSG_MODIFICATION_TIME_NEGATIVE);} }
3.26
hadoop_BaseRecord_like_rdh
/** * Check if this record matches a partial record. * * @param other * Partial record. * @return If this record matches. */ public boolean like(BaseRecord other) { if (other == null) { return false; } Map<String, String> thisKeys = this.getPrimaryKeys(); Map<String, String> otherKeys = other.getPrimaryKeys(); if (thisKeys == null) { return otherKeys == null;} return thisKeys.equals(otherKeys); }
3.26
hadoop_BaseRecord_initDefaultTimes_rdh
/** * Initialize default times. The driver may update these timestamps on insert * and/or update. This should only be called when initializing an object that * is not backed by a data store. */ private void initDefaultTimes() { long now = Time.now(); this.setDateCreated(now); this.setDateModified(now); }
3.26
hadoop_BaseRecord_hashCode_rdh
/** * Override hash code to use primary key(s) for comparison. */ @Override public int hashCode() { Map<String, String> keyset = this.getPrimaryKeys(); return keyset.hashCode(); }
3.26
hadoop_ContainerStatus_getExposedPorts_rdh
/** * Get exposed ports of the container. * * @return List of exposed ports */ @Public @Unstable public String getExposedPorts() { throw new UnsupportedOperationException("subclass must implement this method"); }
3.26
hadoop_ContainerStatus_getIPs_rdh
/** * Get all the IP addresses with which the container run. * * @return The IP address where the container runs. */ @Public @Unstable public List<String> getIPs() { throw new UnsupportedOperationException("subclass must implement this method"); }
3.26
hadoop_ContainerStatus_m0_rdh
/** * Get the <code>ExecutionType</code> of the container. * * @return <code>ExecutionType</code> of the container */ @Public @Evolving public ExecutionType m0() { throw new UnsupportedOperationException("subclass must implement this method"); }
3.26
hadoop_ContainerStatus_setContainerSubState_rdh
/** * Add Extra state information of the container (SCHEDULED, LOCALIZING etc.). * * @param subState * Extra State Information. */ @Private @Unstable public void setContainerSubState(ContainerSubState subState) { throw new UnsupportedOperationException("subclass must implement this method"); }
3.26
hadoop_ContainerStatus_getHost_rdh
/** * Get the hostname where the container runs. * * @return The hostname where the container runs. */@Public @Unstable public String getHost() { throw new UnsupportedOperationException("subclass must implement this method"); }
3.26
hadoop_JobCreator_setDistCacheEmulator_rdh
/** * This method is to be called before calling any other method in JobCreator * except canEmulateDistCacheLoad(), especially if canEmulateDistCacheLoad() * returns true for that job type. * * @param e * Distributed Cache Emulator */ void setDistCacheEmulator(DistributedCacheEmulator e) { this.dce = e; }
3.26
hadoop_PowerShellFencer_buildPSScript_rdh
/** * Build a PowerShell script to kill a java.exe process in a remote machine. * * @param processName * Name of the process to kill. This is an attribute in * CommandLine. * @param host * Host where the process is. * @return Path of the PowerShell script. */ private String buildPSScript(final String processName, final String host) { LOG.info((("Building PowerShell script to kill " + processName) + " at ") + host); String ps1script = null; BufferedWriter writer = null; try { File v11 = File.createTempFile("temp-fence-command", ".ps1"); v11.deleteOnExit(); FileOutputStream fos = new FileOutputStream(v11, false); OutputStreamWriter osw = new OutputStreamWriter(fos, StandardCharsets.UTF_8);writer = new BufferedWriter(osw); // Filter to identify the Namenode process String filter = StringUtils.join(" and ", new String[]{ "Name LIKE '%java.exe%'", ("CommandLine LIKE '%" + processName) + "%'" }); // Identify the process String cmd = "Get-WmiObject Win32_Process"; cmd += (" -Filter \"" + filter) + "\""; // Remote location cmd += " -Computer " + host; // Kill it cmd += " |% { $_.Terminate() }"; LOG.info("PowerShell command: " + cmd); writer.write(cmd); writer.flush(); ps1script = v11.getAbsolutePath(); } catch (IOException ioe) { LOG.error("Cannot create PowerShell script", ioe);} finally { if (writer != null) { try { writer.close(); } catch (IOException ioe) { LOG.error("Cannot close PowerShell script", ioe); } } } return ps1script; }
3.26
hadoop_TimelineV2Client_createTimelineClient_rdh
/** * Creates an instance of the timeline v.2 client. * * @param appId * the application id with which the timeline client is * associated * @return the created timeline client instance */ @Public public static TimelineV2Client createTimelineClient(ApplicationId appId) { TimelineV2Client client = new TimelineV2ClientImpl(appId); return client; }
3.26
hadoop_ManifestStoreOperationsThroughFileSystem_isFile_rdh
/** * Using FileSystem.isFile to offer stores the option to optimize their probes. * * @param path * path to probe * @return true if the path resolves to a file. * @throws IOException * IO failure. */ @SuppressWarnings("deprecation") @Override public boolean isFile(Path path) throws IOException { return fileSystem.isFile(path); }
3.26
hadoop_ManifestStoreOperationsThroughFileSystem_msync_rdh
/** * Invokes FileSystem msync(); swallows UnsupportedOperationExceptions. * This ensures client metadata caches are in sync in an HDFS-HA deployment. * No other filesystems support this; in the absence of a hasPathCapability() * probe, after the operation is rejected, an atomic boolean is set * to stop further attempts from even trying. * * @param path * path * @throws IOException * failure to synchronize. */ @Override public void msync(Path path) throws IOException { // there's need for atomicity here, as the sole cost of // multiple failures if (msyncUnsupported) { return; } // qualify so we can be confident that the FS being synced // is the one we expect. fileSystem.makeQualified(path); try { fileSystem.msync(); } catch (UnsupportedOperationException ignored) { // this exception is the default. // set the unsupported flag so no future attempts are made. msyncUnsupported = true; } }
3.26
hadoop_ManifestStoreOperationsThroughFileSystem_storePreservesEtagsThroughRenames_rdh
/** * Probe filesystem capabilities. * * @param path * path to probe. * @return true if the FS declares its renames work. */ @Override public boolean storePreservesEtagsThroughRenames(Path path) { try { return fileSystem.hasPathCapability(path, CommonPathCapabilities.ETAGS_PRESERVED_IN_RENAME); } catch (IOException ignored) { return false; } }
3.26
hadoop_ManifestStoreOperationsThroughFileSystem_getFileSystem_rdh
/** * Get the filesystem. * * @return the filesystem; null until bound. */ public FileSystem getFileSystem() { return fileSystem; }
3.26
hadoop_AbstractStoreOperation_activateAuditSpan_rdh
/** * Activate the audit span. */ public void activateAuditSpan() { if (auditSpan != null) { auditSpan.activate(); } }
3.26
hadoop_AbstractStoreOperation_getStoreContext_rdh
/** * Get the store context. * * @return the context. */ public final StoreContext getStoreContext() { return storeContext; }
3.26
hadoop_AbstractStoreOperation_getAuditSpan_rdh
/** * Get the audit span this object was created with. * * @return the current span or null */ public AuditSpan getAuditSpan() { return auditSpan; }
3.26
hadoop_CsiAdaptorFactory_getAdaptor_rdh
/** * Load csi-driver-adaptor from configuration. If the configuration is not * specified, the default implementation * for the adaptor is {@link DefaultCsiAdaptorImpl}. If the configured class * is not a valid variation of {@link CsiAdaptorPlugin} or the class cannot * be found, this function will throw a RuntimeException. * * @param driverName * @param conf * @return CsiAdaptorPlugin * @throws YarnException * if unable to create the adaptor class. * @throws RuntimeException * if given class is not found or not * an instance of {@link CsiAdaptorPlugin} */ public static CsiAdaptorPlugin getAdaptor(String driverName, Configuration conf) throws YarnException { // load configuration String configName = (YarnConfiguration.NM_CSI_ADAPTOR_PREFIX + driverName) + YarnConfiguration.NM_CSI_ADAPTOR_CLASS; Class<? extends CsiAdaptorPlugin> impl = conf.getClass(configName, DefaultCsiAdaptorImpl.class, CsiAdaptorPlugin.class); if (impl == null) { throw new YarnException(("Unable to init csi-adaptor from the" + " class specified via ") + configName); } // init the adaptor CsiAdaptorPlugin instance = ReflectionUtils.newInstance(impl, conf); LOG.info("csi-adaptor initiated, implementation: " + impl.getCanonicalName()); return instance; }
3.26
hadoop_RpcServerException_getRpcErrorCodeProto_rdh
/** * * @return get the detailed rpc status corresponding to this exception. */ public RpcErrorCodeProto getRpcErrorCodeProto() { return RpcErrorCodeProto.ERROR_RPC_SERVER; }
3.26
hadoop_RpcServerException_getRpcStatusProto_rdh
/** * * @return get the rpc status corresponding to this exception. */ public RpcStatusProto getRpcStatusProto() { return RpcStatusProto.ERROR; }
3.26
hadoop_LpSolver_generateOverAllocationConstraints_rdh
/** * Generate over-allocation constraints. * * @param lpModel * the LP model. * @param cJobITimeK * actual container allocation for job i in time * interval k. * @param oa * container over-allocation. * @param x * predicted container allocation. * @param indexJobITimeK * index for job i at time interval k. * @param timeK * index for time interval k. */ private void generateOverAllocationConstraints(final ExpressionsBasedModel lpModel, final double cJobITimeK, final Variable[] oa, final Variable[] x, final int indexJobITimeK, final int timeK) { // oa_job_i_timeK >= x_timeK - cJobITimeK Expression overAllocExpression = lpModel.addExpression("over_alloc_" + indexJobITimeK); overAllocExpression.set(oa[indexJobITimeK], 1); overAllocExpression.set(x[timeK], -1); overAllocExpression.lower(-cJobITimeK);// >= }
3.26
hadoop_LpSolver_generateUnderAllocationConstraints_rdh
/** * Generate under-allocation constraints. * * @param lpModel * the LP model. * @param cJobITimeK * actual container allocation for job i in time * interval k. * @param uaPredict * absolute container under-allocation. * @param ua * recursive container under-allocation. * @param x * predicted container allocation. * @param indexJobITimeK * index for job i at time interval k. * @param timeK * index for time interval k. */ private void generateUnderAllocationConstraints(final ExpressionsBasedModel lpModel, final double cJobITimeK, final Variable[] uaPredict, final Variable[] ua, final Variable[] x, final int indexJobITimeK, final int timeK) { // uaPredict_job_i_timeK + x_timeK >= cJobITimeK Expression underAllocPredictExpression = lpModel.addExpression("under_alloc_predict_" + indexJobITimeK); underAllocPredictExpression.set(uaPredict[indexJobITimeK], 1); underAllocPredictExpression.set(x[timeK], 1); underAllocPredictExpression.lower(cJobITimeK);// >= if (timeK >= 1) { /** * Recursively calculate container under-allocation. */ // ua_job_i_timeK >= ua_job_i_time_(k-1) + cJobITimeK - x_timeK Expression underAllocExpression = lpModel.addExpression("under_alloc_" + indexJobITimeK);underAllocExpression.set(ua[indexJobITimeK], 1); underAllocExpression.set(ua[indexJobITimeK - 1], -1); underAllocExpression.set(x[timeK], 1); underAllocExpression.lower(cJobITimeK);// >= } else { /** * Initial value for container under-allocation. */ // ua_job_i_time_0 >= cJobI_time_0 - x_time_0 Expression underAllocExpression = lpModel.addExpression("under_alloc_" + indexJobITimeK); underAllocExpression.set(ua[indexJobITimeK], 1); underAllocExpression.set(x[timeK], 1); underAllocExpression.lower(cJobITimeK);// >= } }
3.26
hadoop_LpSolver_generateObjective_rdh
/** * Generate solver objective. * * @param objective * LP solver objective. * @param numJobs * number of history runs of the recurring pipeline. * @param jobLen * (maximum) job lenght of the recurring pipeline. * @param oa * container over-allocation. * @param ua * recursive container under-allocation. * @param eps * regularization parameter. */ private void generateObjective(final Expression objective, final int numJobs, final int jobLen, final Variable[] oa, final Variable[] ua, final Variable eps) { int indexJobITimeK; // sum Over_Allocation for (int indexJobI = 0; indexJobI < numJobs; indexJobI++) { for (int timeK = 0; timeK < jobLen; timeK++) { indexJobITimeK = (indexJobI * jobLen) + timeK; objective.set(oa[indexJobITimeK], alpha / numJobs); } } // sum Under_Allocation int indexJobITimeN;for (int indexJobI = 0; indexJobI < numJobs; indexJobI++) { indexJobITimeN = ((indexJobI * jobLen) + jobLen) - 1; objective.set(ua[indexJobITimeN], (1 - alpha) / numJobs); } objective.set(eps, beta); objective.weight(BigDecimal.valueOf(1)); }
3.26
hadoop_BlockStorageMovementAttemptedItems_start_rdh
/** * Starts the monitor thread. */ public synchronized void start() { f0 = true; timerThread = new Daemon(new BlocksStorageMovementAttemptMonitor()); timerThread.setName("BlocksStorageMovementAttemptMonitor"); timerThread.start();}
3.26
hadoop_BlockStorageMovementAttemptedItems_stopGracefully_rdh
/** * Timed wait to stop monitor thread. */ synchronized void stopGracefully() { if (timerThread == null) { return; } if (f0) { stop(); } try { timerThread.join(3000); } catch (InterruptedException ie) { } }
3.26
hadoop_BlockStorageMovementAttemptedItems_add_rdh
/** * Add item to block storage movement attempted items map which holds the * tracking/blockCollection id versus time stamp. * * @param startPathId * - start satisfier path identifier * @param fileId * - file identifier * @param monotonicNow * - time now * @param assignedBlocks * - assigned blocks for block movement * @param retryCount * - retry count */ public void add(long startPathId, long fileId, long monotonicNow, Map<Block, Set<StorageTypeNodePair>> assignedBlocks, int retryCount) { AttemptedItemInfo itemInfo = new AttemptedItemInfo(startPathId, fileId, monotonicNow, assignedBlocks.keySet(), retryCount); synchronized(storageMovementAttemptedItems) { storageMovementAttemptedItems.add(itemInfo); } synchronized(scheduledBlkLocs) { scheduledBlkLocs.putAll(assignedBlocks); } }
3.26
hadoop_BlockStorageMovementAttemptedItems_notifyReportedBlock_rdh
/** * Notify the storage movement attempt finished block. * * @param reportedDn * reported datanode * @param type * storage type * @param reportedBlock * reported block */ public void notifyReportedBlock(DatanodeInfo reportedDn, StorageType type, Block reportedBlock) { synchronized(scheduledBlkLocs) { if (scheduledBlkLocs.size() <= 0) { return; } matchesReportedBlock(reportedDn, type, reportedBlock); } }
3.26
hadoop_BlockStorageMovementAttemptedItems_stop_rdh
/** * Sets running flag to false. Also, this will interrupt monitor thread and * clear all the queued up tasks. */ public synchronized void stop() { f0 = false; if (timerThread != null) { timerThread.interrupt(); } this.m0(); }
3.26
hadoop_CryptoUtils_createIV_rdh
/** * This method creates and initializes an IV (Initialization Vector) * * @param conf * configuration * @return byte[] initialization vector * @throws IOException * exception in case of error */ public static byte[] createIV(Configuration conf) throws IOException { CryptoCodec cryptoCodec = CryptoCodec.getInstance(conf); if (isEncryptedSpillEnabled(conf)) { byte[] iv = new byte[cryptoCodec.getCipherSuite().getAlgorithmBlockSize()]; cryptoCodec.generateSecureRandom(iv); cryptoCodec.close(); return iv; } else { return null; }}
3.26
hadoop_HSAuditLogger_addRemoteIP_rdh
/** * A helper api to add remote IP address */ static void addRemoteIP(StringBuilder b) { InetAddress ip = Server.getRemoteIp(); // ip address can be null for testcases if (ip != null) { add(Keys.IP, ip.getHostAddress(), b); } }
3.26
hadoop_HSAuditLogger_logSuccess_rdh
/** * Create a readable and parseable audit log string for a successful event. * * @param user * User who made the service request. * @param operation * Operation requested by the user. * @param target * The target on which the operation is being performed. * * <br> * <br> * Note that the {@link HSAuditLogger} uses tabs ('\t') as a key-val * delimiter and hence the value fields should not contains tabs * ('\t'). */ public static void logSuccess(String user, String operation, String target) {if (LOG.isInfoEnabled()) { LOG.info(createSuccessLog(user, operation, target)); } }
3.26
hadoop_HSAuditLogger_logFailure_rdh
/** * Create a readable and parseable audit log string for a failed event. * * @param user * User who made the service request. * @param operation * Operation requested by the user. * @param perm * Target permissions. * @param target * The target on which the operation is being performed. * @param description * Some additional information as to why the operation failed. * * <br> * <br> * Note that the {@link HSAuditLogger} uses tabs ('\t') as a key-val * delimiter and hence the value fields should not contains tabs * ('\t'). */ public static void logFailure(String user, String operation, String perm, String target, String description) { if (LOG.isWarnEnabled()) { LOG.warn(createFailureLog(user, operation, perm, target, description)); } }
3.26
hadoop_HSAuditLogger_add_rdh
/** * Appends the key-val pair to the passed builder in the following format * <pair-delim>key=value */ static void add(Keys key, String value, StringBuilder b) { b.append(AuditConstants.PAIR_SEPARATOR).append(key.name()).append(AuditConstants.KEY_VAL_SEPARATOR).append(value); }
3.26
hadoop_HSAuditLogger_createFailureLog_rdh
/** * A helper api for creating an audit log for a failure event. */ static String createFailureLog(String user, String operation, String perm, String target, String description) { StringBuilder b = new StringBuilder(); start(Keys.USER, user, b); addRemoteIP(b); add(Keys.OPERATION, operation, b); add(Keys.TARGET, target, b); add(Keys.RESULT, AuditConstants.FAILURE, b); add(Keys.DESCRIPTION, description, b); add(Keys.PERMISSIONS, perm, b); return b.toString(); }
3.26
hadoop_HSAuditLogger_start_rdh
/** * Adds the first key-val pair to the passed builder in the following format * key=value */ static void start(Keys key, String value, StringBuilder b) { b.append(key.name()).append(AuditConstants.KEY_VAL_SEPARATOR).append(value); }
3.26
hadoop_HSAuditLogger_createSuccessLog_rdh
/** * A helper api for creating an audit log for a successful event. */ static String createSuccessLog(String user, String operation, String target) { StringBuilder b = new StringBuilder(); start(Keys.USER, user, b); addRemoteIP(b); add(Keys.OPERATION, operation, b); add(Keys.TARGET, target, b); add(Keys.RESULT, AuditConstants.f0, b); return b.toString(); }
3.26
hadoop_AdminACLsManager_areACLsEnabled_rdh
/** * Returns whether ACLs are enabled * * @see YarnConfiguration#YARN_ACL_ENABLE * @see YarnConfiguration#DEFAULT_YARN_ACL_ENABLE * @return <tt>true</tt> if ACLs are enabled */ public boolean areACLsEnabled() { return aclsEnabled; }
3.26
hadoop_AdminACLsManager_getOwner_rdh
/** * Returns the owner * * @return Current user at the time of object creation */ public UserGroupInformation getOwner() { return owner; }
3.26
hadoop_FedAppReportFetcher_getApplicationReport_rdh
/** * Get an application report for the specified application id from the RM and * fall back to the Application History Server if not found in RM. * * @param appId * id of the application to get. * @return the ApplicationReport for the appId. * @throws YarnException * on any error. * @throws IOException * connection exception. */ @Override public FetchedAppReport getApplicationReport(ApplicationId appId) throws YarnException, IOException { SubClusterId scid = federationFacade.getApplicationHomeSubCluster(appId);createSubclusterIfAbsent(scid); ApplicationClientProtocol applicationsManager = subClusters.get(scid).getRight(); return super.getApplicationReport(applicationsManager, appId); }
3.26
hadoop_TrashPolicy_getInstance_rdh
/** * Get an instance of the configured TrashPolicy based on the value * of the configuration parameter fs.trash.classname. * * @param conf * the configuration to be used * @param fs * the file system to be used * @return an instance of TrashPolicy */ public static TrashPolicy getInstance(Configuration conf, FileSystem fs) { Class<? extends TrashPolicy> trashClass = conf.getClass("fs.trash.classname", TrashPolicyDefault.class, TrashPolicy.class); TrashPolicy trash = ReflectionUtils.newInstance(trashClass, conf); trash.initialize(conf, fs);// initialize TrashPolicy return trash; }
3.26
hadoop_TrashPolicy_getCurrentTrashDir_rdh
/** * Get the current trash directory for path specified based on the Trash * Policy * * @param path * path to be deleted * @return current trash directory for the path to be deleted * @throws IOException * raised on errors performing I/O. */ public Path getCurrentTrashDir(Path path) throws IOException { throw new UnsupportedOperationException(); } /** * Return a {@link Runnable}
3.26
hadoop_TrashPolicy_initialize_rdh
/** * Used to setup the trash policy. Must be implemented by all TrashPolicy * implementations. Different from initialize(conf, fs, home), this one does * not assume trash always under /user/$USER due to HDFS encryption zone. * * @param conf * the configuration to be used * @param fs * the filesystem to be used */ public void initialize(Configuration conf, FileSystem fs) { throw new UnsupportedOperationException(); }
3.26
hadoop_CRC64_init_rdh
/* Initialize a table constructed from POLY (0x9a6c9329ac4bc9b5L). */ private void init() { value = -1; for (int n = 0; n < TABLE_LENGTH; ++n) { long crc = n; for (int i = 0; i < 8; ++i) {if ((crc & 1) == 1) { crc = (crc >>> 1) ^ POLY; } else {crc >>>= 1; }} TABLE[n] = crc; } }
3.26
hadoop_CRC64_compute_rdh
/** * * @param input * byte arrays. * @return long value of the CRC-64 checksum of the data. */ public long compute(byte[] input) { init(); for (int i = 0; i < input.length; i++) { value = TABLE[(input[i] ^ ((int) (value))) & 0xff] ^ (value >>> 8); } return ~value; }
3.26
hadoop_AbstractS3AStatisticsSource_lookupGaugeValue_rdh
/** * {@inheritDoc } */ public Long lookupGaugeValue(final String name) { return ioStatistics.gauges().get(name); }
3.26
hadoop_AbstractS3AStatisticsSource_lookupCounterValue_rdh
/** * {@inheritDoc } */ public Long lookupCounterValue(final String name) { return ioStatistics.counters().get(name); }
3.26
hadoop_AbstractS3AStatisticsSource_setIOStatistics_rdh
/** * Setter. * this must be called in the subclass constructor with * whatever * * @param statistics * statistics to set */ protected void setIOStatistics(final IOStatisticsStore statistics) { this.ioStatistics = statistics; }
3.26
hadoop_AbstractS3AStatisticsSource_incCounter_rdh
/** * DefaultS3ClientFactoryDefaultS3ClientFactory * Increment a named counter by 1. * * @param name * counter name * @param value * value to increment by * @return the updated value or, if the counter is unknown: 0 */ public long incCounter(String name, long value) { return ioStatistics.incrementCounter(name, value); }
3.26
hadoop_ClusterSummarizer_m1_rdh
/** * Summarizes the cluster used for this {@link Gridmix} run. */ @Override public String m1() { StringBuilder builder = new StringBuilder(); builder.append("Cluster Summary:-"); builder.append("\nJobTracker: ").append(getJobTrackerInfo()); builder.append("\nFileSystem: ").append(getNamenodeInfo()); builder.append("\nNumber of blacklisted trackers: ").append(getNumBlacklistedTrackers()); builder.append("\nNumber of active trackers: ").append(getNumActiveTrackers()); builder.append("\nMax map task capacity: ").append(getMaxMapTasks()); builder.append("\nMax reduce task capacity: ").append(m2()); builder.append("\n\n"); return builder.toString(); }
3.26
hadoop_ClusterSummarizer_getNumBlacklistedTrackers_rdh
// Getters protected int getNumBlacklistedTrackers() { return numBlacklistedTrackers;}
3.26
hadoop_DomainRowKey_parseRowKeyFromString_rdh
/** * Given the encoded row key as string, returns the row key as an object. * * @param encodedRowKey * String representation of row key. * @return A <cite>DomainRowKey</cite> object. */ public static DomainRowKey parseRowKeyFromString(String encodedRowKey) { return new DomainRowKeyConverter().decodeFromString(encodedRowKey); }
3.26
hadoop_DomainRowKey_getRowKey_rdh
/** * Constructs a row key prefix for the domain table. * * @return byte array with the row key */ public byte[] getRowKey() { return domainIdKeyConverter.encode(this); }
3.26
hadoop_DomainRowKey_getRowKeyAsString_rdh
/** * Constructs a row key for the domain table as follows: * <p> * {@code clusterId!domainId}. * </p> * * @return String representation of row key. */ public String getRowKeyAsString() { return domainIdKeyConverter.encodeAsString(this); }
3.26
hadoop_DomainRowKey_parseRowKey_rdh
/** * Given the raw row key as bytes, returns the row key as an object. * * @param rowKey * a rowkey represented as a byte array. * @return an <cite>DomainRowKey</cite> object. */ public static DomainRowKey parseRowKey(byte[] rowKey) { return new DomainRowKeyConverter().decode(rowKey); }
3.26
hadoop_MembershipStoreImpl_getRepresentativeQuorum_rdh
/** * Picks the most recent entry in the subset that is most agreeable on the * specified field. 1) If a majority of the collection has the same value for * the field, the first sorted entry within the subset the matches the * majority value 2) Otherwise the first sorted entry in the set of all * entries * * @param records * - Collection of state store record objects of the same type * @return record that is most representative of the field name */private MembershipState getRepresentativeQuorum(Collection<MembershipState> records) { // Collate objects by field value: field value -> order set of records Map<FederationNamenodeServiceState, TreeSet<MembershipState>> occurenceMap = new HashMap<>(); for (MembershipState record : records) { FederationNamenodeServiceState state = record.getState(); TreeSet<MembershipState> matchingSet = occurenceMap.get(state); if (matchingSet == null) { // TreeSet orders elements by descending date via comparators matchingSet = new TreeSet<>(); occurenceMap.put(state, matchingSet); } matchingSet.add(record); } // Select largest group TreeSet<MembershipState> largestSet = new TreeSet<>(); for (TreeSet<MembershipState> matchingSet : occurenceMap.values()) { if (largestSet.size() < matchingSet.size()) { largestSet = matchingSet; } } // If quorum, use the newest element here if (largestSet.size() > (records.size() / 2)) { return largestSet.first(); // Otherwise, return most recent by class comparator } else if (records.size() > 0) { TreeSet<MembershipState> sortedList = new TreeSet<>(records); LOG.debug("Quorum failed, using most recent: {}", sortedList.first()); return sortedList.first(); } else { return null; } }
3.26
hadoop_SelectEventStreamPublisher_response_rdh
/** * The response from the SelectObjectContent call. * * @return the response object */ public SelectObjectContentResponse response() { return response; }
3.26
hadoop_SelectEventStreamPublisher_cancel_rdh
/** * Cancel the operation. */ public void cancel() { selectOperationFuture.cancel(true); }
3.26
hadoop_SelectEventStreamPublisher_toRecordsInputStream_rdh
/** * Retrieve an input stream to the subset of the S3 object that matched the select query. * This is equivalent to loading the content of all RecordsEvents into an InputStream. * This will lazily-load the content from S3, minimizing the amount of memory used. * * @param onEndEvent * callback on the end event * @return the input stream */ public AbortableInputStream toRecordsInputStream(Consumer<EndEvent> onEndEvent) { SdkPublisher<InputStream> recordInputStreams = this.publisher.filter(e -> { if (e instanceof RecordsEvent) { return true; } else if (e instanceof EndEvent) { onEndEvent.accept(((EndEvent) (e))); } return false; }).map(e -> ((RecordsEvent) (e)).payload().asInputStream()); // Subscribe to the async publisher using an enumeration that will // buffer a single chunk (RecordsEvent's payload) at a time and // block until it is consumed. // Also inject an empty stream as the first element that // SequenceInputStream will request on construction. BlockingEnumeration enumeration = new BlockingEnumeration(recordInputStreams, 1, EMPTY_STREAM); return AbortableInputStream.create(new SequenceInputStream(enumeration), this::cancel); }
3.26
hadoop_AbfsTokenRenewer_renew_rdh
/** * Renew the delegation token. * * @param token * token to renew. * @param conf * configuration object. * @return extended expiry time of the token. * @throws IOException * thrown when trying get current user. * @throws InterruptedException * thrown when thread is interrupted */ @Override public long renew(final Token<?> token, Configuration conf) throws IOException, InterruptedException { LOG.debug("Renewing the delegation token"); return getInstance(conf).renewDelegationToken(token); }
3.26
hadoop_AbfsTokenRenewer_isManaged_rdh
/** * Checks if passed token is managed. * * @param token * the token being checked * @return true if it is managed. * @throws IOException * thrown when evaluating if token is managed. */ @Override public boolean isManaged(Token<?> token) throws IOException { return true; }
3.26
hadoop_AbfsTokenRenewer_handleKind_rdh
/** * Checks if this particular object handles the Kind of token passed. * * @param kind * the kind of the token * @return true if it handles passed token kind false otherwise. */ @Override public boolean handleKind(Text kind) { return AbfsDelegationTokenIdentifier.TOKEN_KIND.equals(kind); }
3.26
hadoop_AbfsTokenRenewer_cancel_rdh
/** * Cancel the delegation token. * * @param token * token to cancel. * @param conf * configuration object. * @throws IOException * thrown when trying get current user. * @throws InterruptedException * thrown when thread is interrupted. */ @Override public void cancel(final Token<?> token, Configuration conf) throws IOException, InterruptedException { LOG.debug("Cancelling the delegation token"); getInstance(conf).cancelDelegationToken(token); }
3.26
hadoop_AbstractLaunchableService_bindArgs_rdh
/** * {@inheritDoc } * <p> * The base implementation logs all arguments at the debug level, * then returns the passed in config unchanged. */ @Override public Configuration bindArgs(Configuration config, List<String> args) throws Exception { if (LOG.isDebugEnabled()) { LOG.debug("Service {} passed in {} arguments:", getName(), args.size()); for (String v0 : args) { LOG.debug(v0); } } return config; }
3.26
hadoop_AbstractLaunchableService_execute_rdh
/** * {@inheritDoc } * <p> * The action is to signal success by returning the exit code 0. */ @Override public int execute() throws Exception { return LauncherExitCodes.EXIT_SUCCESS; }
3.26
hadoop_AuditSpan_isValidSpan_rdh
/** * Is the span valid? False == this is a span to indicate unbonded. * * @return true if this span represents a real operation. */ default boolean isValidSpan() { return true; }
3.26
hadoop_AuditSpan_set_rdh
/** * Set an attribute. * This may or may not be propagated to audit logs. * * @param key * attribute name * @param value * value */ default void set(String key, String value) { }
3.26
hadoop_AuditSpan_m0_rdh
/** * Close calls {@link #deactivate()}; subclasses may override * but the audit manager's wrapping span will always relay to * {@link #deactivate()} rather * than call this method on the wrapped span. */ default void m0() { deactivate(); }
3.26
hadoop_DBNameNodeConnector_getNodes_rdh
/** * getNodes function returns a list of DiskBalancerDataNodes. * * @return Array of DiskBalancerDataNodes */ @Override public List<DiskBalancerDataNode> getNodes() throws Exception { Preconditions.checkNotNull(this.connector); List<DiskBalancerDataNode> nodeList = new LinkedList<>(); DatanodeStorageReport[] v1 = this.connector.getLiveDatanodeStorageReport(); for (DatanodeStorageReport report : v1) { DiskBalancerDataNode datanode = getBalancerNodeFromDataNode(report.getDatanodeInfo()); getVolumeInfoFromStorageReports(datanode, report.getStorageReports()); nodeList.add(datanode); } return nodeList; }
3.26
hadoop_DBNameNodeConnector_getVolumeInfoFromStorageReports_rdh
/** * Reads the relevant fields from each storage volume and populate the * DiskBalancer Node. * * @param node * - Disk Balancer Node * @param reports * - Array of StorageReport */ private void getVolumeInfoFromStorageReports(DiskBalancerDataNode node, StorageReport[] reports) throws Exception { Preconditions.checkNotNull(node); Preconditions.checkNotNull(reports); for (StorageReport report : reports) { DatanodeStorage storage = report.getStorage();DiskBalancerVolume volume = new DiskBalancerVolume(); volume.setCapacity(report.getCapacity()); volume.setFailed(report.isFailed()); volume.setUsed(report.getDfsUsed()); // TODO : Should we do BlockPool level balancing at all ? // Does it make sense ? Balancer does do that. Right now // we only deal with volumes and not blockPools volume.setUuid(storage.getStorageID()); // we will skip this volume for disk balancer if // it is read-only since we will not be able to delete // or if it is already failed. volume.setSkip((storage.getState() == State.READ_ONLY_SHARED) || report.isFailed()); volume.setStorageType(storage.getStorageType().name()); volume.setIsTransient(storage.getStorageType().isTransient()); node.addVolume(volume); } }
3.26
hadoop_DBNameNodeConnector_getConnectorInfo_rdh
/** * Returns info about the connector. * * @return String. */ @Override public String getConnectorInfo() { return "Name Node Connector : " + clusterURI.toString(); }
3.26
hadoop_DBNameNodeConnector_getBalancerNodeFromDataNode_rdh
/** * This function maps the required fields from DataNodeInfo to disk * BalancerDataNode. * * @param nodeInfo * @return DiskBalancerDataNode */ private DiskBalancerDataNode getBalancerNodeFromDataNode(DatanodeInfo nodeInfo) { Preconditions.checkNotNull(nodeInfo); DiskBalancerDataNode dbDataNode = new DiskBalancerDataNode(nodeInfo.getDatanodeUuid()); dbDataNode.setDataNodeIP(nodeInfo.getIpAddr()); dbDataNode.setDataNodeName(nodeInfo.getHostName()); dbDataNode.setDataNodePort(nodeInfo.getIpcPort()); return dbDataNode; }
3.26
hadoop_Duration_close_rdh
/** * The close operation relays to {@link #finish()}. * Implementing it allows Duration instances to be automatically * finish()'d in Java7 try blocks for when used in measuring durations. */@Override public final void close() { finish(); }
3.26
hadoop_Duration_start_rdh
/** * Start * * @return self */public Duration start() { start = now(); return this; }
3.26
hadoop_CipherSuite_getName_rdh
/** * * @return name of cipher suite, as in {@link javax.crypto.Cipher} */ public String getName() { return name; }
3.26
hadoop_CipherSuite_getConfigSuffix_rdh
/** * Returns suffix of cipher suite configuration. * * @return String configuration suffix */ public String getConfigSuffix() { String[] parts = name.split("/"); StringBuilder suffix = new StringBuilder(); for (String part : parts) { suffix.append(".").append(StringUtils.toLowerCase(part)); } return suffix.toString(); }
3.26
hadoop_CipherSuite_convert_rdh
/** * Convert to CipherSuite from name, {@link #algoBlockSize} is fixed for * certain cipher suite, just need to compare the name. * * @param name * cipher suite name * @return CipherSuite cipher suite */ public static CipherSuite convert(String name) { CipherSuite[] suites = CipherSuite.values(); for (CipherSuite suite : suites) { if (suite.getName().equals(name)) { return suite; } } throw new IllegalArgumentException("Invalid cipher suite name: " + name); }
3.26
hadoop_CipherSuite_getAlgorithmBlockSize_rdh
/** * * @return size of an algorithm block in bytes */ public int getAlgorithmBlockSize() { return algoBlockSize; }
3.26
hadoop_DataJoinJob_runJob_rdh
/** * Submit/run a map/reduce job. * * @param job * @return true for success * @throws IOException */ public static boolean runJob(JobConf job) throws IOException { JobClient jc = new JobClient(job); boolean sucess = true;RunningJob running = null; try { running = jc.submitJob(job); JobID jobId = running.getID(); System.out.println(("Job " + jobId) + " is submitted"); while (!running.isComplete()) { System.out.println(("Job " + jobId) + " is still running."); try { Thread.sleep(60000); } catch (InterruptedException e) { } running = jc.getJob(jobId); } sucess = running.isSuccessful(); } finally { if ((!sucess) && (running != null)) { running.killJob(); } jc.close(); } return sucess; }
3.26
hadoop_DataJoinJob_m0_rdh
/** * * @param args */ public static void m0(String[] args) { boolean success; if ((args.length < 8) || (args.length > 10)) { System.out.println(((((("usage: DataJoinJob " + "inputdirs outputdir map_input_file_format ") + "numofParts ") + "mapper_class ") + "reducer_class ") + "map_output_value_class ") + "output_value_class [maxNumOfValuesPerGroup [descriptionOfJob]]]"); System.exit(-1); } try { JobConf job = DataJoinJob.createDataJoinJob(args); success = DataJoinJob.runJob(job); if (!success) { System.out.println("Job failed"); } } catch (IOException ioe) { ioe.printStackTrace(); } }
3.26
hadoop_LengthInputStream_getLength_rdh
/** * * @return the length. */public long getLength() { return length; }
3.26
hadoop_TaskTrackerInfo_getReasonForBlacklist_rdh
/** * Gets the reason for which the tasktracker was blacklisted. * * @return reason which tracker was blacklisted */ public String getReasonForBlacklist() { return reasonForBlacklist; }
3.26
hadoop_TaskTrackerInfo_getBlacklistReport_rdh
/** * Gets a descriptive report about why the tasktracker was blacklisted. * * @return report describing why the tasktracker was blacklisted. */ public String getBlacklistReport() { return blacklistReport; }
3.26
hadoop_TaskTrackerInfo_getTaskTrackerName_rdh
/** * Gets the tasktracker's name. * * @return tracker's name. */ public String getTaskTrackerName() { return name; }
3.26