name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hadoop_AbstractRMAdminRequestInterceptor_getConf_rdh | /**
* Gets the {@link Configuration}.
*/
@Override
public Configuration getConf() {
return this.conf;}
/**
* Initializes the {@link RMAdminRequestInterceptor} | 3.26 |
hadoop_AbstractRMAdminRequestInterceptor_setConf_rdh | /**
* Sets the {@link Configuration}.
*/
@Override
public void setConf(Configuration conf) {
this.conf = conf;
if (this.nextInterceptor != null) {
this.nextInterceptor.setConf(conf);
}
} | 3.26 |
hadoop_AbstractRMAdminRequestInterceptor_shutdown_rdh | /**
* Disposes the {@link RMAdminRequestInterceptor}.
*/
@Override
public void shutdown() {
if (this.nextInterceptor != null) { this.nextInterceptor.shutdown();
}} | 3.26 |
hadoop_AbstractRMAdminRequestInterceptor_setNextInterceptor_rdh | /**
* Sets the {@link RMAdminRequestInterceptor} in the chain.
*/
@Override
public void setNextInterceptor(RMAdminRequestInterceptor
nextInterceptor) {
this.nextInterceptor = nextInterceptor;
} | 3.26 |
hadoop_AbstractRMAdminRequestInterceptor_getNextInterceptor_rdh | /**
* Gets the next {@link RMAdminRequestInterceptor} in the chain.
*/
@Override
public RMAdminRequestInterceptor getNextInterceptor() {
return this.nextInterceptor;
} | 3.26 |
hadoop_CertificateUtil_parseRSAPublicKey_rdh | /**
* Gets an RSAPublicKey from the provided PEM encoding.
*
* @param pem
* - the pem encoding from config without the header and footer
* @return RSAPublicKey the RSA public key
* @throws ServletException
* thrown if a processing error occurred
*/ public static RSAPublicKey parseRSAPublicKey(String pem) throws ServletException {
String fullPem = (PEM_HEADER + pem) + PEM_FOOTER;
PublicKey key = null;
try {
CertificateFactory fact = CertificateFactory.getInstance("X.509");
ByteArrayInputStream is = new ByteArrayInputStream(fullPem.getBytes(StandardCharsets.UTF_8));
X509Certificate v4 = ((X509Certificate) (fact.generateCertificate(is))); key = v4.getPublicKey();
} catch (CertificateException ce) {
String message = null;
if (pem.startsWith(PEM_HEADER)) {
message = "CertificateException - be sure not to include PEM header " + "and footer in the PEM configuration element.";
} else {
message = "CertificateException - PEM may be corrupt";}
throw new ServletException(message, ce);
}
return ((RSAPublicKey) (key));
} | 3.26 |
hadoop_OSSListResult_v1_rdh | /**
* Restricted constructors to ensure v1 or v2, not both.
*
* @param result
* v1 result
* @return new list result container
*/
public static OSSListResult v1(ObjectListing result) {
return new OSSListResult(result, null);
} | 3.26 |
hadoop_OSSListResult_v2_rdh | /**
* Restricted constructors to ensure v1 or v2, not both.
*
* @param result
* v2 result
* @return new list result container
*/
public static OSSListResult v2(ListObjectsV2Result result) {
return new OSSListResult(null, result);
} | 3.26 |
hadoop_OSSListResult_logAtDebug_rdh | /**
* Dump the result at debug level.
*
* @param log
* log to use
*/public void logAtDebug(Logger log) {
Collection<String> prefixes = getCommonPrefixes();Collection<OSSObjectSummary> summaries = getObjectSummaries();
log.debug("Prefix count = {}; object count={}", prefixes.size(), summaries.size());
for (OSSObjectSummary summary : summaries) {
log.debug("Summary: {} {}", summary.getKey(), summary.getSize());
}
for (String prefix : prefixes) {
log.debug("Prefix: {}", prefix);
}
} | 3.26 |
hadoop_DistributedShellTimelinePlugin_getTimelineEntityGroupId_rdh | /**
* Timeline v1.5 reader plugin for YARN distributed shell. It tranlsates an
* incoming getEntity request to a set of related timeline entity groups, via
* the information provided in the primary filter or entity id field.
*/public class DistributedShellTimelinePlugin extends TimelineEntityGroupPlugin {@Override
public Set<TimelineEntityGroupId> getTimelineEntityGroupId(String entityType, NameValuePair primaryFilter, Collection<NameValuePair> secondaryFilters)
{
if (DSEntity.DS_CONTAINER.toString().equals(entityType)) {
if (primaryFilter == null) {
return null;
}
return toEntityGroupId(primaryFilter.getValue().toString());
}
return null;
} | 3.26 |
hadoop_MapWritable_write_rdh | // Writable
@Override
public void write(DataOutput out) throws IOException {
super.write(out);
// Write out the number of entries in the map
out.writeInt(instance.size());
// Then write out each key/value pair
for
(Map.Entry<Writable, Writable> e : instance.entrySet()) {
out.writeByte(getId(e.getKey().getClass()));
e.getKey().write(out);
out.writeByte(getId(e.getValue().getClass()));
e.getValue().write(out);
}
} | 3.26 |
hadoop_RouterStateIdContext_receiveRequestState_rdh | /**
* Routers do not update their state using information from clients
* to avoid clients interfering with one another.
*/
@Override
public long receiveRequestState(RpcRequestHeaderProto header, long clientWaitTime) throws RetriableException {
// Do nothing.
return 0;
} | 3.26 |
hadoop_RouterStateIdContext_getRouterFederatedStateMap_rdh | /**
* Utility function to parse routerFederatedState field in RPC headers.
*/
public static Map<String, Long> getRouterFederatedStateMap(ByteString byteString) {
if (byteString != null) {
RouterFederatedStateProto federatedState;
try {
federatedState = RouterFederatedStateProto.parseFrom(byteString);
} catch (InvalidProtocolBufferException e) {
throw new RuntimeException(e);
}
return federatedState.getNamespaceStateIdsMap();
} else {
return Collections.emptyMap();
}
} | 3.26 |
hadoop_RouterStateIdContext_setResponseHeaderState_rdh | /**
* Adds the {@link #namespaceIdMap} to the response header that will be sent to a client.
*/
public void setResponseHeaderState(RpcResponseHeaderProto.Builder headerBuilder) {
if (namespaceIdMap.isEmpty()) {
return;
}
RouterFederatedStateProto.Builder builder = RouterFederatedStateProto.newBuilder();
namespaceIdMap.forEach((k, v) -> builder.putNamespaceStateIds(k, v.get()));
headerBuilder.setRouterFederatedState(builder.build().toByteString());
} | 3.26 |
hadoop_BlockBlobInputStream_available_rdh | /**
* Gets the number of bytes that can be read (or skipped over) without
* performing a network operation.
*
* @throws IOException
* IO failure
*/
@Override
public synchronized int available() throws IOException {
checkState();
if (blobInputStream != null) {
return blobInputStream.available();
} else {
return streamBuffer == null ? 0 : streamBufferLength - streamBufferPosition;
}
} | 3.26 |
hadoop_BlockBlobInputStream_getPos_rdh | /**
* Gets the read position of the stream.
*
* @return the zero-based byte offset of the read position.
* @throws IOException
* IO failure
*/
@Override
public synchronized long getPos() throws IOException {checkState();
return streamBuffer != null ? (streamPosition - streamBufferLength) + streamBufferPosition : streamPosition;
} | 3.26 |
hadoop_BlockBlobInputStream_write_rdh | /**
* Writes a range of bytes to the stream.
*
* @param b
* a byte array.
* @param off
* the start offset in <code>buffer</code> from which the data
* is read.
* @param length
* the number of bytes to be written.
* @throws IOException
* IO failure
*/
public synchronized void write(byte[] b, int off, int length) throws IOException {
if (b == null) {
throw new NullPointerException("Null buffer argument");
}
if (((off < 0) || (length < 0)) || (length >
(b.length - off))) {
throw new IndexOutOfBoundsException("array write offset");
}
System.arraycopy(b, off, buffer, writePosition, length);
writePosition += length;
} | 3.26 |
hadoop_BlockBlobInputStream_size_rdh | /**
* Gets the current size of the stream.
*/
public synchronized int size() {
return writePosition - offset;
} | 3.26 |
hadoop_BlockBlobInputStream_resetStreamBuffer_rdh | /**
* Reset the internal stream buffer but do not release the memory.
* The buffer can be reused to avoid frequent memory allocations of
* a large buffer.
*/
private void resetStreamBuffer() {
streamBufferPosition = 0;
streamBufferLength = 0;
} | 3.26 |
hadoop_BlockBlobInputStream_capacity_rdh | /**
* Gets the current capacity of the stream.
*/
public synchronized int capacity() {
return length;
} | 3.26 |
hadoop_BlockBlobInputStream_close_rdh | /**
* Closes this stream and releases any system resources associated with it.
*
* @throws IOException
* IO failure
*/
@Override
public synchronized void close() throws IOException {
closed = true;
closeBlobInputStream();
streamBuffer = null;
streamBufferPosition = 0;
streamBufferLength = 0;
} | 3.26 |
hadoop_BlockBlobInputStream_seek_rdh | /**
* Sets the read position of the stream.
*
* @param pos
* a zero-based byte offset in the stream.
* @throws EOFException
* if read is out of range
*/
@Override
public synchronized void seek(long pos) throws IOException { checkState();
if (pos < 0) {
throw new EOFException((FSExceptionMessages.NEGATIVE_SEEK + " ") + pos);
}
if (pos > streamLength) {
throw new EOFException((FSExceptionMessages.CANNOT_SEEK_PAST_EOF + " ") + pos);
}
// calculate offset between the target and current position in the stream
long offset = pos - getPos();if (offset == 0) {
// no=op, no state change
return;
}
if (offset >
0) {
// forward seek, data can be skipped as an optimization
if (skip(offset) != offset) {
throw new EOFException(FSExceptionMessages.EOF_IN_READ_FULLY);
}
return;
}
// reverse seek, offset is negative
if (streamBuffer != null) {
if ((streamBufferPosition + offset) >= 0) {
// target position is inside the stream buffer,
// only need to move backwards within the stream buffer
streamBufferPosition += offset;
} else {
// target position is outside the stream buffer,
// need to reset stream buffer and move position for next network read
resetStreamBuffer();
streamPosition = pos;
}
} else {
streamPosition = pos;
}
// close BlobInputStream after seek is invoked because BlobInputStream
// does not support seek
closeBlobInputStream();
} | 3.26 |
hadoop_BlockBlobInputStream_seekToNewSource_rdh | /**
* Seeks an secondary copy of the data. This method is not supported.
*
* @param targetPos
* a zero-based byte offset in the stream.
* @return false
* @throws IOException
* IO failure
*/
@Override
public boolean seekToNewSource(long targetPos) throws
IOException {
return false;
} | 3.26 |
hadoop_BlockBlobInputStream_skip_rdh | /**
* Skips over and discards n bytes of data from this input stream.
*
* @param n
* the number of bytes to be skipped.
* @return the actual number of bytes skipped.
* @throws IOException
* IO failure
* @throws IndexOutOfBoundsException
* if n is negative or if the sum of n
* and the current value of getPos() is greater than the length of the stream.
*/
@Override
public synchronized long skip(long n) throws IOException {
checkState();
if (blobInputStream != null) {
// blobInput stream is open; delegate the work to it
long skipped = blobInputStream.skip(n);
// update position to the actual skip value
streamPosition += skipped;
return skipped;
}
// no blob stream; implement the skip logic directly
if ((n < 0) || (n > (streamLength - getPos()))) {
throw new IndexOutOfBoundsException("skip range");
}
if (streamBuffer != null) {
// there's a buffer, so seek with it
if (n < (streamBufferLength - streamBufferPosition)) {
// new range is in the buffer, so just update the buffer position
// skip within the buffer.
streamBufferPosition += ((int) (n));
} else {
// skip is out of range, so move position to ne value and reset
// the buffer ready for the next read()
streamPosition = getPos() + n;
resetStreamBuffer();
}} else {
// no stream buffer; increment the stream position ready for
// the next triggered connection & read
streamPosition += n;
}
return n;
} | 3.26 |
hadoop_BlockBlobInputStream_read_rdh | /**
* Reads the next byte of data from the stream.
*
* @return the next byte of data, or -1
* @throws IOException
* IO failure
*/
@Override
public int read() throws IOException {
byte[] buffer = new byte[1];
int numberOfBytesRead = read(buffer, 0,
1);
return numberOfBytesRead < 1 ? -1 : buffer[0];
} | 3.26 |
hadoop_ProgressSplitsBlock_burst_rdh | // this coordinates with LoggedTaskAttempt.SplitVectorKind
int[][] burst() {
int[][] result = new int[4][];
result[WALLCLOCK_TIME_INDEX] = progressWallclockTime.getValues();
result[CPU_TIME_INDEX] = progressCPUTime.getValues();result[VIRTUAL_MEMORY_KBYTES_INDEX] = f0.getValues();
result[PHYSICAL_MEMORY_KBYTES_INDEX] = progressPhysicalMemoryKbytes.getValues();
return result;
} | 3.26 |
hadoop_MetricsFilter_accepts_rdh | /**
* Whether to accept the record
*
* @param record
* to filter on
* @return true to accept; false otherwise.
*/
public boolean accepts(MetricsRecord record) {
return m0(record.name()) && m0(record.tags());
} | 3.26 |
hadoop_EditLogBackupInputStream_m0_rdh | /**
* Number of bytes read from the stream so far.
*/
int m0() {return count;
} | 3.26 |
hadoop_ResourceEstimatorService_getHistoryResourceSkyline_rdh | /**
* Get history {@link ResourceSkyline} from {@link SkylineStore}. This
* function supports the following special wildcard operations regarding
* {@link RecurrenceId}: If the {@code pipelineId} is "*", it will return all
* entries in the store; else, if the {@code runId} is "*", it will return all
* {@link ResourceSkyline}s belonging to the {@code pipelineId}; else, it will
* return all {@link ResourceSkyline}s belonging to the {{@code pipelineId},
* {@code runId}}. If the {@link RecurrenceId} does not exist, it will not do
* anything.
*
* @param pipelineId
* pipelineId of the history run.
* @param runId
* runId of the history run.
* @return Json format of history {@link ResourceSkyline}s.
* @throws SkylineStoreException
* if fails to getHistory
* {@link ResourceSkyline} from {@link SkylineStore}.
*/
@GET
@Path("/skylinestore/history/{pipelineId}/{runId}")
@Produces(MediaType.APPLICATION_JSON)
public String getHistoryResourceSkyline(@PathParam("pipelineId")
String pipelineId, @PathParam("runId")
String runId) throws SkylineStoreException {
RecurrenceId recurrenceId = new RecurrenceId(pipelineId, runId);
Map<RecurrenceId, List<ResourceSkyline>> jobHistory = skylineStore.getHistory(recurrenceId);final String skyline =
gson.toJson(jobHistory, skylineStoreType);
f0.debug("Query the skyline store for recurrenceId: {}." + recurrenceId);
return skyline;
}
/**
* Get estimated {code Resource} allocation for the pipeline.
*
* @param pipelineId
* id of the pipeline.
* @return Json format of {@link RLESparseResourceAllocation}.
* @throws SkylineStoreException
* if fails to get estimated {code Resource}
* allocation from {@link SkylineStore} | 3.26 |
hadoop_ResourceEstimatorService_parseFile_rdh | /**
* Parse the log file. See also {@link LogParser#parseStream(InputStream)}.
*
* @param logFile
* file/directory of the log to be parsed.
* @throws IOException
* if fails to parse the log.
* @throws SkylineStoreException
* if fails to addHistory to
* {@link SkylineStore}.
* @throws ResourceEstimatorException
* if the {@link LogParser}
* is not initialized.
*/
@POST
@Path("/translator/{logFile : .+}")
public void parseFile(@PathParam("logFile")
String logFile) throws IOException, SkylineStoreException, ResourceEstimatorException {
logParserUtil.parseLog(logFile);
f0.debug("Parse logFile: {}.", logFile);
} | 3.26 |
hadoop_ResourceEstimatorService_getPrediction_rdh | /**
* Get predicted {code Resource} allocation for the pipeline. If the
* prediction for the pipeline already exists in the {@link SkylineStore}, it
* will directly get the prediction from {@link SkylineStore}, otherwise it
* will call the {@link Solver} to make prediction, and store the predicted
* {code Resource} allocation to the {@link SkylineStore}. Note that invoking
* {@link Solver} could be a time-consuming operation.
*
* @param pipelineId
* the id of the pipeline.
* @return Json format of {@link RLESparseResourceAllocation}.
* @throws SolverException
* if {@link Solver} fails;
* @throws SkylineStoreException
* if fails to get history
* {@link ResourceSkyline} or predicted {code Resource} allocation
* from {@link SkylineStore}.
*/
@GET
@Path("/estimator/{pipelineId}")
@Produces(MediaType.APPLICATION_JSON)
public String getPrediction(@PathParam("pipelineId")
String pipelineId) throws SolverException, SkylineStoreException {
// first, try to grab the predicted resource allocation from the skyline
// store
RLESparseResourceAllocation result = skylineStore.getEstimation(pipelineId);// if received resource allocation is null, then run the solver
if (result == null) {
RecurrenceId recurrenceId = new RecurrenceId(pipelineId, "*");Map<RecurrenceId, List<ResourceSkyline>> jobHistory = skylineStore.getHistory(recurrenceId);
result = solver.solve(jobHistory);
}
final String prediction = gson.toJson(result, rleType);
f0.debug("Predict resource requests for pipelineId: {}." + pipelineId);
return prediction;
} | 3.26 |
hadoop_ResourceEstimatorService_deleteHistoryResourceSkyline_rdh | /**
* Delete history {@link ResourceSkyline}s from {@link SkylineStore}.
* <p> Note that for safety considerations, we only allow users to delete
* history {@link ResourceSkyline}s of one job run.
*
* @param pipelineId
* pipelineId of the history run.
* @param runId
* runId runId of the history run.
* @throws SkylineStoreException
* if fails to deleteHistory
* {@link ResourceSkyline}s.
*/
@DELETE
@Path("/skylinestore/history/{pipelineId}/{runId}")public void deleteHistoryResourceSkyline(@PathParam("pipelineId")
String pipelineId, @PathParam("runId")
String runId) throws SkylineStoreException {
RecurrenceId recurrenceId = new RecurrenceId(pipelineId, runId);
skylineStore.deleteHistory(recurrenceId);
f0.info("Delete ResourceSkyline for recurrenceId: {}.", recurrenceId);
} | 3.26 |
hadoop_ResourceUsage_getCachedUsed_rdh | // Cache Used
public Resource getCachedUsed() {
return _get(NL, ResourceType.CACHED_USED);
} | 3.26 |
hadoop_ResourceUsage_getReserved_rdh | /* Reserved */public Resource getReserved() {
return getReserved(NL);
} | 3.26 |
hadoop_ResourceUsage_getAMLimit_rdh | /* AM-Resource Limit */
public Resource getAMLimit() {
return getAMLimit(NL);
} | 3.26 |
hadoop_ResourceUsage_getAMUsed_rdh | /* AM-Used */
public Resource getAMUsed() {
return getAMUsed(NL);
} | 3.26 |
hadoop_ResourceUsage_m0_rdh | /* Used */
public Resource m0() {
return getUsed(NL);
} | 3.26 |
hadoop_ResourceUsage_getPending_rdh | /* Pending */
public Resource getPending() {
return getPending(NL);
} | 3.26 |
hadoop_JournalProtocolServerSideTranslatorPB_journal_rdh | /**
*
* @see JournalProtocol#journal
*/
@Override
public JournalResponseProto journal(RpcController unused, JournalRequestProto req) throws ServiceException {
try {
impl.journal(PBHelper.convert(req.getJournalInfo()), req.getEpoch(), req.getFirstTxnId(), req.getNumTxns(), req.getRecords().toByteArray());
} catch (IOException e) {throw new ServiceException(e);
}
return VOID_JOURNAL_RESPONSE;
} | 3.26 |
hadoop_JournalProtocolServerSideTranslatorPB_startLogSegment_rdh | /**
*
* @see JournalProtocol#startLogSegment
*/
@Override
public StartLogSegmentResponseProto
startLogSegment(RpcController controller, StartLogSegmentRequestProto req) throws ServiceException {
try {
impl.startLogSegment(PBHelper.convert(req.getJournalInfo()), req.getEpoch(), req.getTxid());
} catch (IOException e) {
throw new ServiceException(e);
}
return VOID_START_LOG_SEGMENT_RESPONSE;
} | 3.26 |
hadoop_ConfigurationUtils_injectDefaults_rdh | /**
* Injects configuration key/value pairs from one configuration to another if the key does not exist in the target
* configuration.
*
* @param source
* source configuration.
* @param target
* target configuration.
*/
public static void injectDefaults(Configuration source, Configuration target) {
Check.notNull(source,
"source");
Check.notNull(target, "target");
for (Map.Entry<String, String> entry : source) {if (target.get(entry.getKey()) == null) {
target.set(entry.getKey(), entry.getValue());
}
}
} | 3.26 |
hadoop_ConfigurationUtils_resolve_rdh | /**
* Returns a new ConfigurationUtils instance with all inline values resolved.
*
* @return a new ConfigurationUtils instance with all inline values resolved.
*/
public static Configuration resolve(Configuration conf) {
Configuration resolved = new Configuration(false);
for (Map.Entry<String, String> entry : conf) {resolved.set(entry.getKey(), conf.get(entry.getKey()));
}
return resolved;
} | 3.26 |
hadoop_ConfigurationUtils_load_rdh | // Canibalized from FileSystemAccess <code>Configuration.loadResource()</code>.
/**
* Create a configuration from an InputStream.
* <p>
* ERROR canibalized from <code>Configuration.loadResource()</code>.
*
* @param is
* inputstream to read the configuration from.
* @throws IOException
* thrown if the configuration could not be read.
*/
public static void load(Configuration conf, InputStream is) throws IOException {
conf.addResource(is);
} | 3.26 |
hadoop_DeregisterSubClusterRequest_newInstance_rdh | /**
* Initialize DeregisterSubClusterRequest according to subClusterId.
*
* @param subClusterId
* subClusterId.
* @return DeregisterSubClusterRequest.
*/
@Private
@Unstable
public static DeregisterSubClusterRequest newInstance(String subClusterId) {
DeregisterSubClusterRequest request = Records.newRecord(DeregisterSubClusterRequest.class);
request.setSubClusterId(subClusterId);
return request;
} | 3.26 |
hadoop_AbstractManagedParentQueue_removeChildQueue_rdh | /**
* Remove the specified child queue.
*
* @param childQueueName
* name of the child queue to be removed
* @return child queue.
* @throws SchedulerDynamicEditException
* when removeChildQueue fails.
*/
public CSQueue removeChildQueue(String childQueueName) throws SchedulerDynamicEditException {
CSQueue childQueue;
writeLock.lock();
try {
childQueue = queueContext.getQueueManager().getQueue(childQueueName);
if (childQueue != null) {
removeChildQueue(childQueue);
} else {
throw new SchedulerDynamicEditException(("Cannot find queue to delete " + ": ") + childQueueName);
}
} finally {
writeLock.unlock();
}
return childQueue;
} | 3.26 |
hadoop_AbstractManagedParentQueue_addChildQueue_rdh | /**
* Add the specified child queue.
*
* @param childQueue
* reference to the child queue to be added
* @throws SchedulerDynamicEditException
* when addChildQueue fails.
* @throws IOException
* an I/O exception has occurred.
*/
public void addChildQueue(CSQueue childQueue) throws SchedulerDynamicEditException, IOException {
writeLock.lock();
try {if (childQueue.getCapacity() > 0) {
throw new SchedulerDynamicEditException(("Queue " + childQueue) + " being added has non zero capacity.");
}
boolean added = this.childQueues.add(childQueue);
if (LOG.isDebugEnabled()) {
LOG.debug((("updateChildQueues (action: add queue): " + added) + " ") + getChildQueuesToPrint());
}
} finally {
writeLock.unlock();
}
} | 3.26 |
hadoop_AbfsConfiguration_getPasswordString_rdh | /**
* Returns the account-specific password in string form if it exists, then
* looks for an account-agnostic value.
*
* @param key
* Account-agnostic configuration key
* @return value in String form if one exists, else null
* @throws IOException
*/
public String getPasswordString(String key) throws IOException {
char[] passchars = rawConfig.getPassword(m0(key));
if (passchars == null) {
passchars = rawConfig.getPassword(key);
}if (passchars != null) {
return new String(passchars);
}
return null;
} | 3.26 |
hadoop_AbfsConfiguration_getAccountSpecificClass_rdh | /**
* Returns the account-specific class if it exists, else returns default value.
*
* @param name
* Account-agnostic configuration key
* @param defaultValue
* Class returned if none is configured
* @param xface
* Interface shared by all possible values
* @param <U>
* Interface class type
* @return Account specific Class object that was found
*/
public <U> Class<? extends U> getAccountSpecificClass(String name, Class<? extends U> defaultValue, Class<U> xface) {
return rawConfig.getClass(m0(name), defaultValue, xface);
} | 3.26 |
hadoop_AbfsConfiguration_getMandatoryPasswordString_rdh | /**
* Returns a value for the key if the value exists and is not null.
* Otherwise, throws {@link ConfigurationPropertyNotFoundException} with
* key name.
*
* @param key
* Account-agnostic configuration key
* @return value if exists
* @throws IOException
* if error in fetching password or
* ConfigurationPropertyNotFoundException for missing key
*/
private String getMandatoryPasswordString(String key) throws IOException {
String value = getPasswordString(key);
if (value == null) {
throw new ConfigurationPropertyNotFoundException(key);
}
return value;
} | 3.26 |
hadoop_AbfsConfiguration_set_rdh | /**
* Sets String in the underlying Configuration object.
* Provided only as a convenience; does not add any account logic.
*
* @param key
* Configuration key
* @param value
* Configuration value
*/
public void set(String key, String value)
{
rawConfig.set(key, value);
} | 3.26 |
hadoop_AbfsConfiguration_getClientCorrelationId_rdh | /**
* Gets client correlation ID provided in config.
*
* @return Client Correlation ID config
*/
public String getClientCorrelationId() {
return clientCorrelationId;
} | 3.26 |
hadoop_AbfsConfiguration_m0_rdh | /**
* Appends an account name to a configuration key yielding the
* account-specific form.
*
* @param key
* Account-agnostic configuration key
* @return Account-specific configuration key
*/
public String m0(String key) {
return (key + ".") + accountName;
} | 3.26 |
hadoop_AbfsConfiguration_getString_rdh | /**
* Returns the account-specific value if it exists, then looks for an
* account-agnostic value.
*
* @param key
* Account-agnostic configuration key
* @return value if one exists, else the default value
*/
public String getString(String key, String defaultValue) {
return rawConfig.get(m0(key), rawConfig.get(key, defaultValue));
} | 3.26 |
hadoop_AbfsConfiguration_getLong_rdh | /**
* Returns the account-specific value if it exists, then looks for an
* account-agnostic value, and finally tries the default value.
*
* @param key
* Account-agnostic configuration key
* @param defaultValue
* Value returned if none is configured
* @return value if one exists, else the default value
*/
public long getLong(String key, long defaultValue) {
return rawConfig.getLong(m0(key), rawConfig.getLong(key, defaultValue));} | 3.26 |
hadoop_AbfsConfiguration_m2_rdh | /**
* Whether {@code AbfsClient} should track and send latency info back to storage servers.
*
* @return a boolean indicating whether latency should be tracked.
*/
public boolean m2() {
return this.trackLatency;
} | 3.26 |
hadoop_AbfsConfiguration_getEnum_rdh | /**
* Returns the account-specific enum value if it exists, then
* looks for an account-agnostic value.
*
* @param name
* Account-agnostic configuration key
* @param defaultValue
* Value returned if none is configured
* @param <T>
* Enum type
* @return enum value if one exists, else null
*/
public <T extends Enum<T>> T getEnum(String name, T defaultValue) {
return rawConfig.getEnum(m0(name), rawConfig.getEnum(name, defaultValue));
} | 3.26 |
hadoop_AbfsConfiguration_setBoolean_rdh | /**
* Sets boolean in the underlying Configuration object.
* Provided only as a convenience; does not add any account logic.
*
* @param key
* Configuration key
* @param value
* Configuration value
*/
public void setBoolean(String key,
boolean value) {
rawConfig.setBoolean(key, value);
} | 3.26 |
hadoop_AbfsConfiguration_getTracingHeaderFormat_rdh | /**
* Enum config to allow user to pick format of x-ms-client-request-id header
*
* @return tracingContextFormat config if valid, else default ALL_ID_FORMAT
*/
public TracingHeaderFormat getTracingHeaderFormat() {
return getEnum(FS_AZURE_TRACINGHEADER_FORMAT, TracingHeaderFormat.ALL_ID_FORMAT);
} | 3.26 |
hadoop_AbfsConfiguration_getAccountAgnosticClass_rdh | /**
* Returns account-agnostic Class if it exists, else returns the default value.
*
* @param name
* Account-agnostic configuration key
* @param defaultValue
* Class returned if none is configured
* @param xface
* Interface shared by all possible values
* @param <U>
* Interface class type
* @return Account-Agnostic Class object that was found
*/
public <U> Class<? extends U> getAccountAgnosticClass(String name, Class<? extends U> defaultValue, Class<U> xface) {
return rawConfig.getClass(name, defaultValue, xface);
} | 3.26 |
hadoop_AbfsConfiguration_getBoolean_rdh | /**
* Returns the account-specific value if it exists, then looks for an
* account-agnostic value, and finally tries the default value.
*
* @param key
* Account-agnostic configuration key
* @param defaultValue
* Value returned if none is configured
* @return value if one exists, else the default value
*/
public boolean getBoolean(String key, boolean defaultValue) {
return rawConfig.getBoolean(m0(key), rawConfig.getBoolean(key, defaultValue));
} | 3.26 |
hadoop_AbfsConfiguration_getAccountName_rdh | /**
* Gets the Azure Storage account name corresponding to this instance of configuration.
*
* @return the Azure Storage account name
*/
public String getAccountName() {
return accountName;
} | 3.26 |
hadoop_AbfsConfiguration_getAccountAgnosticEnum_rdh | /**
* Returns the account-agnostic enum value if it exists, else
* return default.
*
* @param name
* Account-agnostic configuration key
* @param defaultValue
* Value returned if none is configured
* @param <T>
* Enum type
* @return enum value if one exists, else null
*/
public <T extends Enum<T>> T getAccountAgnosticEnum(String name, T defaultValue) {
return rawConfig.getEnum(name, defaultValue);
} | 3.26 |
hadoop_AbfsConfiguration_getTokenProviderClass_rdh | /**
* Returns account-specific token provider class if it exists, else checks if
* an account-agnostic setting is present for token provider class if AuthType
* matches with authType passed.
*
* @param authType
* AuthType effective on the account
* @param name
* Account-agnostic configuration key
* @param defaultValue
* Class returned if none is configured
* @param xface
* Interface shared by all possible values
* @param <U>
* Interface class type
* @return Highest-precedence Class object that was found
*/
public <U> Class<? extends U> getTokenProviderClass(AuthType authType, String name, Class<? extends U> defaultValue, Class<U> xface) {Class<?> tokenProviderClass = getAccountSpecificClass(name, defaultValue, xface);
// If there is none set specific for account
// fall back to generic setting if Auth Type matches
if ((tokenProviderClass == null) && (authType == getAccountAgnosticEnum(FS_AZURE_ACCOUNT_AUTH_TYPE_PROPERTY_NAME, AuthType.SharedKey))) {
tokenProviderClass = getAccountAgnosticClass(name, defaultValue, xface);
}
return tokenProviderClass == null ? null : tokenProviderClass.asSubclass(xface);
} | 3.26 |
hadoop_Cluster_getQueue_rdh | /**
* Get queue information for the specified name.
*
* @param name
* queuename
* @return object of {@link QueueInfo}
* @throws IOException
* @throws InterruptedException
*/
public QueueInfo getQueue(String name) throws IOException, InterruptedException {
return client.getQueue(name);
} | 3.26 |
hadoop_Cluster_getJob_rdh | /**
* Get job corresponding to jobid.
*
* @param jobId
* @return object of {@link Job}
* @throws IOException
* @throws InterruptedException
*/public Job getJob(JobID jobId) throws IOException, InterruptedException {
JobStatus status = client.getJobStatus(jobId);
if (status != null) {
JobConf conf;
try {
conf = new JobConf(status.getJobFile());
} catch (RuntimeException ex) {
// If job file doesn't exist it means we can't find the job
if (ex.getCause() instanceof FileNotFoundException) {
return null;
} else {
throw ex;
}
}
return Job.getInstance(this, status, conf);
}
return null;
} | 3.26 |
hadoop_Cluster_getChildQueues_rdh | /**
* Returns immediate children of queueName.
*
* @param queueName
* @return array of JobQueueInfo which are children of queueName
* @throws IOException
*/
public QueueInfo[] getChildQueues(String queueName) throws IOException, InterruptedException {
return client.getChildQueues(queueName);
} | 3.26 |
hadoop_Cluster_getJobHistoryUrl_rdh | /**
* Get the job history file path for a given job id. The job history file at
* this path may or may not be existing depending on the job completion state.
* The file is present only for the completed jobs.
*
* @param jobId
* the JobID of the job submitted by the current user.
* @return the file path of the job history file
* @throws IOException
* @throws InterruptedException
*/
public String getJobHistoryUrl(JobID jobId) throws IOException, InterruptedException {
if (jobHistoryDir == null) {
jobHistoryDir = new Path(client.getJobHistoryDir());
}
return new Path(jobHistoryDir, (jobId.toString() + "_") + ugi.getShortUserName()).toString();
} | 3.26 |
hadoop_Cluster_getQueues_rdh | /**
* Get all the queues in cluster.
*
* @return array of {@link QueueInfo}
* @throws IOException
* @throws InterruptedException
*/
public QueueInfo[] getQueues() throws IOException, InterruptedException {
return client.getQueues();
} | 3.26 |
hadoop_Cluster_getRootQueues_rdh | /**
* Gets the root level queues.
*
* @return array of JobQueueInfo object.
* @throws IOException
*/
public QueueInfo[] getRootQueues() throws IOException, InterruptedException {
return client.getRootQueues();
} | 3.26 |
hadoop_Cluster_close_rdh | /**
* Close the <code>Cluster</code>.
*
* @throws IOException
*/
public synchronized void close() throws IOException {
clientProtocolProvider.close(client);
} | 3.26 |
hadoop_Cluster_getQueueAclsForCurrentUser_rdh | /**
* Gets the Queue ACLs for current user
*
* @return array of QueueAclsInfo object for current user.
* @throws IOException
*/
public QueueAclsInfo[] getQueueAclsForCurrentUser() throws IOException, InterruptedException {
return client.getQueueAclsForCurrentUser();} | 3.26 |
hadoop_Cluster_getAllJobStatuses_rdh | /**
* Get job status for all jobs in the cluster.
*
* @return job status for all jobs in cluster
* @throws IOException
* @throws InterruptedException
*/
public JobStatus[] getAllJobStatuses() throws IOException, InterruptedException {
return client.getAllJobs();
} | 3.26 |
hadoop_Cluster_getDelegationToken_rdh | /**
* Get a delegation token for the user from the JobTracker.
*
* @param renewer
* the user who can renew the token
* @return the new token
* @throws IOException
*/
public Token<DelegationTokenIdentifier> getDelegationToken(Text renewer) throws IOException, InterruptedException {
// client has already set the service
return client.getDelegationToken(renewer);
} | 3.26 |
hadoop_Cluster_getSystemDir_rdh | /**
* Grab the jobtracker system directory path where
* job-specific files will be placed.
*
* @return the system directory where job-specific files are to be placed.
*/
public Path getSystemDir() throws IOException, InterruptedException {
if (sysDir == null) {
sysDir = new Path(client.getSystemDir());
}
return sysDir;
} | 3.26 |
hadoop_Cluster_getLogParams_rdh | /**
* Get log parameters for the specified jobID or taskAttemptID
*
* @param jobID
* the job id.
* @param taskAttemptID
* the task attempt id. Optional.
* @return the LogParams
* @throws IOException
* @throws InterruptedException
*/
public LogParams getLogParams(JobID jobID, TaskAttemptID taskAttemptID) throws IOException, InterruptedException {
return client.getLogFileParams(jobID, taskAttemptID);}
/**
* Get current cluster status.
*
* @return object of {@link ClusterMetrics} | 3.26 |
hadoop_Cluster_getBlackListedTaskTrackers_rdh | /**
* Get blacklisted trackers.
*
* @return array of {@link TaskTrackerInfo}
* @throws IOException
* @throws InterruptedException
*/
public TaskTrackerInfo[] getBlackListedTaskTrackers() throws IOException, InterruptedException {
return client.getBlacklistedTrackers();
} | 3.26 |
hadoop_Cluster_getAllJobs_rdh | /**
* Get all the jobs in cluster.
*
* @return array of {@link Job}
* @throws IOException
* @throws InterruptedException
* @deprecated Use {@link #getAllJobStatuses()} instead.
*/
@Deprecated
public Job[] getAllJobs() throws IOException, InterruptedException {
return getJobs(client.getAllJobs());
} | 3.26 |
hadoop_Cluster_getStagingAreaDir_rdh | /**
* Grab the jobtracker's view of the staging directory path where
* job-specific files will be placed.
*
* @return the staging directory where job-specific files are to be placed.
*/
public Path getStagingAreaDir() throws IOException, InterruptedException {
if (stagingAreaDir == null) {
stagingAreaDir = new Path(client.getStagingAreaDir());
}
return stagingAreaDir;
} | 3.26 |
hadoop_Cluster_getTaskTrackerExpiryInterval_rdh | /**
* Get the tasktracker expiry interval for the cluster
*
* @return the expiry interval in msec
*/
public long getTaskTrackerExpiryInterval() throws IOException, InterruptedException {
return client.getTaskTrackerExpiryInterval();
} | 3.26 |
hadoop_Cluster_getJobTrackerStatus_rdh | /**
* Get the JobTracker's status.
*
* @return {@link JobTrackerStatus} of the JobTracker
* @throws IOException
* @throws InterruptedException
*/public JobTrackerStatus getJobTrackerStatus() throws IOException, InterruptedException {
return client.getJobTrackerStatus();
} | 3.26 |
hadoop_Cluster_getActiveTaskTrackers_rdh | /**
* Get all active trackers in the cluster.
*
* @return array of {@link TaskTrackerInfo}
* @throws IOException
* @throws InterruptedException
*/
public TaskTrackerInfo[] getActiveTaskTrackers() throws IOException, InterruptedException {
return client.getActiveTrackers();
} | 3.26 |
hadoop_Cluster_cancelDelegationToken_rdh | /**
* Cancel a delegation token from the JobTracker
*
* @param token
* the token to cancel
* @throws IOException
* @deprecated Use {@link Token#cancel} instead
*/
public void cancelDelegationToken(Token<DelegationTokenIdentifier> token) throws IOException, InterruptedException {
token.cancel(getConf());
} | 3.26 |
hadoop_Cluster_getFileSystem_rdh | /**
* Get the file system where job-specific files are stored
*
* @return object of FileSystem
* @throws IOException
* @throws InterruptedException
*/
public synchronized FileSystem getFileSystem() throws IOException, InterruptedException {
if (this.fs == null) {
try {
this.fs = ugi.doAs(new PrivilegedExceptionAction<FileSystem>() {
public FileSystem run() throws IOException, InterruptedException {
final Path sysDir = new Path(client.getSystemDir());
return sysDir.getFileSystem(getConf());
}
});
} catch (InterruptedException e) {
throw
new RuntimeException(e);
}
}
return fs;
} | 3.26 |
hadoop_Cluster_renewDelegationToken_rdh | /**
* Renew a delegation token
*
* @param token
* the token to renew
* @return the new expiration time
* @throws InvalidToken
* @throws IOException
* @deprecated Use {@link Token#renew} instead
*/
public long renewDelegationToken(Token<DelegationTokenIdentifier> token) throws InvalidToken, IOException, InterruptedException {
return token.renew(getConf());
} | 3.26 |
hadoop_HttpFSServerWebServer_deprecateEnv_rdh | /**
* Load the deprecated environment variable into the configuration.
*
* @param varName
* the environment variable name
* @param conf
* the configuration
* @param propName
* the configuration property name
* @param confFile
* the configuration file name
*/
private static void deprecateEnv(String varName, Configuration conf, String propName, String confFile) {
String value = System.getenv(varName);
if (value == null) {
return;
}
LOG.warn("Environment variable {} is deprecated and overriding" + " property {}', please set the property in {} instead.", varName, propName, confFile);
conf.set(propName, value, "environment variable " + varName);
} | 3.26 |
hadoop_AMRMProxyApplicationContextImpl_setLocalAMRMToken_rdh | /**
* Sets the application's AMRMToken.
*
* @param localToken
* amrmToken issued by AMRMProxy
*/ public synchronized void setLocalAMRMToken(Token<AMRMTokenIdentifier> localToken) {
this.localToken = localToken;
this.localTokenKeyId = null;
} | 3.26 |
hadoop_AMRMProxyApplicationContextImpl_m0_rdh | /**
* Sets the application's AMRMToken.
*
* @param amrmToken
* the new amrmToken from RM
* @return whether the saved token is updated to a different value
*/
public synchronized boolean m0(Token<AMRMTokenIdentifier> amrmToken) {
Token<AMRMTokenIdentifier> oldValue = this.amrmToken;this.amrmToken = amrmToken;
return !this.amrmToken.equals(oldValue);
} | 3.26 |
hadoop_ColumnHeader_getCData_rdh | /**
* Get the cdata field for the TH.
*
* @return CData.
*/
public String getCData()
{
return this.cdata;
} | 3.26 |
hadoop_ColumnHeader_getSelector_rdh | /**
* Get the selector field for the TH.
*
* @return Selector.
*/
public String getSelector() {
return this.selector;
} | 3.26 |
hadoop_MutableCounterLong_incr_rdh | /**
* Increment the value by a delta
*
* @param delta
* of the increment
*/
public void incr(long delta) {
value.add(delta);
setChanged();
} | 3.26 |
hadoop_BooleanWritable_set_rdh | /**
* Set the value of the BooleanWritable.
*
* @param value
* value.
*/
public void set(boolean value) {
this.value = value;
} | 3.26 |
hadoop_BooleanWritable_equals_rdh | /**
*/
@Override
public boolean equals(Object o) {
if (!(o instanceof
BooleanWritable)) {
return false;
}
BooleanWritable other = ((BooleanWritable) (o));
return this.value == other.value;
} | 3.26 |
hadoop_BooleanWritable_get_rdh | /**
* Returns the value of the BooleanWritable.
*
* @return the value of the BooleanWritable.
*/
public boolean get() {
return value;
} | 3.26 |
hadoop_BooleanWritable_readFields_rdh | /**
*/
@Override
public void readFields(DataInput in) throws IOException {
value = in.readBoolean();} | 3.26 |
hadoop_BooleanWritable_write_rdh | /**
*/
@Override
public void write(DataOutput out) throws IOException {
out.writeBoolean(value);
} | 3.26 |
hadoop_INodeSymlink_asSymlink_rdh | /**
*
* @return this object.
*/
@Override
public INodeSymlink asSymlink() {
return this;
} | 3.26 |
hadoop_INodeSymlink_isSymlink_rdh | /**
*
* @return true unconditionally.
*/
@Override
public boolean isSymlink() {
return true;
} | 3.26 |
hadoop_Utils_size_rdh | /**
* Get the size of the serialized Version object.
*
* @return serialized size of the version object.
*/
public static int size() {return (Short.SIZE + Short.SIZE) / Byte.SIZE;} | 3.26 |
hadoop_Utils_write_rdh | /**
* Write the objec to a DataOutput. The serialized format of the Version is
* major version followed by minor version, both as big-endian short
* integers.
*
* @param out
* The DataOutput object.
* @throws IOException
* raised on errors performing I/O.
*/
public void write(DataOutput out) throws IOException {
out.writeShort(major);
out.writeShort(minor);
} | 3.26 |
hadoop_Utils_readVLong_rdh | /**
* Decoding the variable-length integer. Suppose the value of the first byte
* is FB, and the following bytes are NB[*].
* <ul>
* <li>if (FB >= -32), return (long)FB;
* <li>if (FB in [-72, -33]), return (FB+52)<<8 + NB[0]&0xff;
* <li>if (FB in [-104, -73]), return (FB+88)<<16 +
* (NB[0]&0xff)<<8 + NB[1]&0xff;
* <li>if (FB in [-120, -105]), return (FB+112)<<24 + (NB[0]&0xff)
* <<16 + (NB[1]&0xff)<<8 + NB[2]&0xff;
* <li>if (FB in [-128, -121]), return interpret NB[FB+129] as a signed
* big-endian integer.
* </ul>
*
* @param in
* input stream
* @return the decoded long integer.
* @throws IOException
* raised on errors performing I/O.
*/
public static long readVLong(DataInput in) throws IOException {
int firstByte = in.readByte();
if (firstByte >= (-32)) {
return firstByte;
}
switch ((firstByte + 128) / 8) {
case 11 :
case 10 :
case 9 :
case 8 :
case 7 :
return ((firstByte + 52) << 8) | in.readUnsignedByte();
case 6 :
case 5 :
case 4 :
case 3 :
return ((firstByte + 88) << 16) | in.readUnsignedShort();
case 2 :
case 1 :
return (((firstByte + 112) << 24) | (in.readUnsignedShort() << 8)) | in.readUnsignedByte();
case 0 :
int len = firstByte + 129;
switch (len) {
case 4 :
return in.readInt();
case 5 :
return (((long) (in.readInt())) << 8) | in.readUnsignedByte();
case 6 :
return (((long) (in.readInt())) << 16) | in.readUnsignedShort();
case 7 :
return ((((long) (in.readInt())) << 24) | (in.readUnsignedShort() << 8)) |
in.readUnsignedByte();
case
8 :
return in.readLong();
default :
throw new IOException("Corrupted VLong encoding");
}
default :
throw new RuntimeException("Internal error");
}
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.