name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hadoop_IdentifierResolver_setOutputReaderClass_rdh | /**
* Sets the {@link OutputReader} class.
*/
protected void setOutputReaderClass(Class<? extends OutputReader> outputReaderClass) {
this.outputReaderClass = outputReaderClass;
} | 3.26 |
hadoop_IdentifierResolver_getOutputKeyClass_rdh | /**
* Returns the resolved output key class.
*/
public Class getOutputKeyClass() {
return outputKeyClass;
} | 3.26 |
hadoop_IdentifierResolver_getOutputReaderClass_rdh | /**
* Returns the resolved {@link OutputReader} class.
*/
public Class<? extends OutputReader> getOutputReaderClass() {
return outputReaderClass;
} | 3.26 |
hadoop_IdentifierResolver_setOutputValueClass_rdh | /**
* Sets the output value class.
*/
protected void setOutputValueClass(Class outputValueClass) {
this.f0 = outputValueClass;
} | 3.26 |
hadoop_IdentifierResolver_resolve_rdh | /**
* Resolves a given identifier. This method has to be called before calling
* any of the getters.
*/
public void resolve(String identifier) {
if (identifier.equalsIgnoreCase(RAW_BYTES_ID)) {setInputWriterClass(RawBytesInputWriter.class);
setOutputReaderClass(RawBytesOutputReader.class);
setOutputKeyClass(BytesWritable.class);
setOutputValueClass(BytesWritable.class);
} else if (identifier.equalsIgnoreCase(TYPED_BYTES_ID)) {
setInputWriterClass(TypedBytesInputWriter.class);
setOutputReaderClass(TypedBytesOutputReader.class);
setOutputKeyClass(TypedBytesWritable.class);
setOutputValueClass(TypedBytesWritable.class);
} else
if (identifier.equalsIgnoreCase(KEY_ONLY_TEXT_ID)) {
setInputWriterClass(KeyOnlyTextInputWriter.class);
setOutputReaderClass(KeyOnlyTextOutputReader.class);
setOutputKeyClass(Text.class);
setOutputValueClass(NullWritable.class);
} else {
// assume TEXT_ID
setInputWriterClass(TextInputWriter.class);
setOutputReaderClass(TextOutputReader.class);
setOutputKeyClass(Text.class);
setOutputValueClass(Text.class);
}} | 3.26 |
hadoop_IdentifierResolver_getInputWriterClass_rdh | /**
* Returns the resolved {@link InputWriter} class.
*/
public Class<? extends InputWriter> getInputWriterClass() {return inputWriterClass;
} | 3.26 |
hadoop_IdentifierResolver_setOutputKeyClass_rdh | /**
* Sets the output key class class.
*/
protected void setOutputKeyClass(Class outputKeyClass) {
this.outputKeyClass = outputKeyClass;
} | 3.26 |
hadoop_IdentifierResolver_setInputWriterClass_rdh | /**
* Sets the {@link InputWriter} class.
*/
protected void setInputWriterClass(Class<? extends InputWriter> inputWriterClass) {
this.inputWriterClass = inputWriterClass;
} | 3.26 |
hadoop_IdentifierResolver_getOutputValueClass_rdh | /**
* Returns the resolved output value class.
*/
public Class getOutputValueClass() {
return f0;
} | 3.26 |
hadoop_ZStandardDecompressor_getRemaining_rdh | /**
* <p>Returns the number of bytes remaining in the input buffers;
* normally called when finished() is true to determine amount of post-stream
* data.</p>
*
* @return the total (non-negative) number of unprocessed bytes in input
*/
@Override
public int getRemaining() {
checkStream();
// userBuf + compressedDirectBuf
return userBufferBytesToConsume + remaining;} | 3.26 |
hadoop_ZStandardDecompressor_setDictionary_rdh | // dictionary is not supported
@Override
public void setDictionary(byte[] b, int off, int len) {
throw new UnsupportedOperationException("Dictionary support is not enabled");} | 3.26 |
hadoop_ZStandardDecompressor_reset_rdh | /**
* Resets everything including the input buffers (user and direct).
*/
@Override
public void reset() {
checkStream();
init(stream);
remaining = 0;
finished = false;
compressedDirectBufOff = 0;
bytesInCompressedBuffer = 0;
uncompressedDirectBuf.limit(directBufferSize);
uncompressedDirectBuf.position(directBufferSize);
userBufOff = 0;
userBufferBytesToConsume = 0;
} | 3.26 |
hadoop_BlockData_getState_rdh | /**
* Gets the state of the given block.
*
* @param blockNumber
* the id of the given block.
* @return the state of the given block.
* @throws IllegalArgumentException
* if blockNumber is invalid.
*/
public State getState(int blockNumber) {
throwIfInvalidBlockNumber(blockNumber);
return state[blockNumber];
} | 3.26 |
hadoop_BlockData_getFileSize_rdh | /**
* Gets the size of the associated file.
*
* @return the size of the associated file.
*/
public long getFileSize() {
return fileSize;
} | 3.26 |
hadoop_BlockData_getBlockNumber_rdh | /**
* Gets the id of the block that contains the given absolute offset.
*
* @param offset
* the absolute offset to check.
* @return the id of the block that contains the given absolute offset.
* @throws IllegalArgumentException
* if offset is invalid.
*/
public int getBlockNumber(long offset) {
throwIfInvalidOffset(offset);
return ((int) (offset / blockSize));
} | 3.26 |
hadoop_BlockData_getBlockSize_rdh | /**
* Gets the size of each block.
*
* @return the size of each block.
*/
public int getBlockSize() {
return blockSize;
} | 3.26 |
hadoop_BlockData_isLastBlock_rdh | /**
* Indicates whether the given block is the last block in the associated file.
*
* @param blockNumber
* the id of the desired block.
* @return true if the given block is the last block in the associated file, false otherwise.
* @throws IllegalArgumentException
* if blockNumber is invalid.
*/
public boolean isLastBlock(int blockNumber) {
if (fileSize ==
0) {
return false;
}
throwIfInvalidBlockNumber(blockNumber);
return blockNumber == (numBlocks - 1);
} | 3.26 |
hadoop_BlockData_getSize_rdh | /**
* Gets the size of the given block.
*
* @param blockNumber
* the id of the desired block.
* @return the size of the given block.
*/
public int getSize(int blockNumber) {
if (fileSize == 0) {
return 0;
}
if (isLastBlock(blockNumber)) {
return ((int) (fileSize - (((long) (blockSize)) * (numBlocks - 1))));
} else {
return blockSize; }
} | 3.26 |
hadoop_BlockData_setState_rdh | /**
* Sets the state of the given block to the given value.
*
* @param blockNumber
* the id of the given block.
* @param blockState
* the target state.
* @throws IllegalArgumentException
* if blockNumber is invalid.
*/
public void setState(int blockNumber, State blockState) {
throwIfInvalidBlockNumber(blockNumber);
state[blockNumber] = blockState;
} | 3.26 |
hadoop_BlockData_getNumBlocks_rdh | /**
* Gets the number of blocks in the associated file.
*
* @return the number of blocks in the associated file.
*/
public int getNumBlocks() {
return numBlocks;
} | 3.26 |
hadoop_BlockData_getStartOffset_rdh | /**
* Gets the start offset of the given block.
*
* @param blockNumber
* the id of the given block.
* @return the start offset of the given block.
* @throws IllegalArgumentException
* if blockNumber is invalid.
*/
public long getStartOffset(int blockNumber) {
throwIfInvalidBlockNumber(blockNumber);
return blockNumber * ((long) (blockSize));
} | 3.26 |
hadoop_BlockData_getStateString_rdh | // Debug helper.
public String getStateString() {StringBuilder sb = new StringBuilder();
int blockNumber = 0;
while (blockNumber < numBlocks) {
State tstate = getState(blockNumber);
int endBlockNumber = blockNumber;
while ((endBlockNumber < numBlocks) && (getState(endBlockNumber) == tstate)) {
endBlockNumber++;
}
sb.append(String.format("[%03d ~ %03d] %s%n", blockNumber, endBlockNumber - 1, tstate));
blockNumber = endBlockNumber;
}
return sb.toString();
} | 3.26 |
hadoop_BlockData_isValidOffset_rdh | /**
* Indicates whether the given absolute offset is valid.
*
* @param offset
* absolute offset in the file..
* @return true if the given absolute offset is valid, false otherwise.
*/
public boolean isValidOffset(long offset) {
return (offset >= 0) && (offset < fileSize);
} | 3.26 |
hadoop_BlockData_getRelativeOffset_rdh | /**
* Gets the relative offset corresponding to the given block and the absolute offset.
*
* @param blockNumber
* the id of the given block.
* @param offset
* absolute offset in the file.
* @return the relative offset corresponding to the given block and the absolute offset.
* @throws IllegalArgumentException
* if either blockNumber or offset is invalid.
*/
public int getRelativeOffset(int blockNumber, long offset) {
throwIfInvalidOffset(offset);
return ((int) (offset - getStartOffset(blockNumber)));
} | 3.26 |
hadoop_BlockReconstructionWork_setNotEnoughRack_rdh | /**
* Mark that the reconstruction work is to replicate internal block to a new
* rack.
*/
void setNotEnoughRack() {
notEnoughRack = true;
} | 3.26 |
hadoop_PlacementPolicy_constraints_rdh | /**
* Placement constraint details.
*/
public PlacementPolicy constraints(List<PlacementConstraint> constraints) {this.constraints = constraints;
return this;
} | 3.26 |
hadoop_FileMetadata_getKey_rdh | /**
* Returns the Azure storage key for the file. Used internally by the framework.
*
* @return The key for the file.
*/
public String getKey() {
return key;
} | 3.26 |
hadoop_ContainerUpdates_getDecreaseRequests_rdh | /**
* Returns Container Decrease Requests.
*
* @return Container Decrease Requests.
*/
public List<UpdateContainerRequest> getDecreaseRequests() {
return decreaseRequests;
} | 3.26 |
hadoop_ContainerUpdates_getPromotionRequests_rdh | /**
* Returns Container Promotion Requests.
*
* @return Container Promotion Requests.
*/
public List<UpdateContainerRequest> getPromotionRequests() {
return promotionRequests;
} | 3.26 |
hadoop_ContainerUpdates_getDemotionRequests_rdh | /**
* Returns Container Demotion Requests.
*
* @return Container Demotion Requests.
*/
public List<UpdateContainerRequest> getDemotionRequests() {
return demotionRequests;
} | 3.26 |
hadoop_ContainerUpdates_getIncreaseRequests_rdh | /**
* Returns Container Increase Requests.
*
* @return Container Increase Requests.
*/
public List<UpdateContainerRequest> getIncreaseRequests() {
return increaseRequests;
} | 3.26 |
hadoop_IOStatisticsContextIntegration_getCurrentIOStatisticsContext_rdh | /**
* Get the current thread's IOStatisticsContext instance. If no instance is
* present for this thread ID, create one using the factory.
*
* @return instance of IOStatisticsContext.
*/
public static IOStatisticsContext getCurrentIOStatisticsContext() {
return isThreadIOStatsEnabled ? ACTIVE_IOSTATS_CONTEXT.getForCurrentThread() : EmptyIOStatisticsContextImpl.getInstance();
} | 3.26 |
hadoop_IOStatisticsContextIntegration_createNewInstance_rdh | /**
* Creating a new IOStatisticsContext instance for a FS to be used.
*
* @param key
* Thread ID that represents which thread the context belongs to.
* @return an instance of IOStatisticsContext.
*/
private static IOStatisticsContext createNewInstance(Long key) {
IOStatisticsContextImpl instance = new IOStatisticsContextImpl(key, INSTANCE_ID.getAndIncrement());
LOG.debug("Created instance {}", instance);
return instance;
} | 3.26 |
hadoop_IOStatisticsContextIntegration_enableIOStatisticsContext_rdh | /**
* A method to enable IOStatisticsContext to override if set otherwise in
* the configurations for tests.
*/
@VisibleForTesting
public static void enableIOStatisticsContext() {
if (!isThreadIOStatsEnabled) {
LOG.info("Enabling Thread IOStatistics..");
isThreadIOStatsEnabled = true;}
} | 3.26 |
hadoop_IOStatisticsContextIntegration_isIOStatisticsThreadLevelEnabled_rdh | /**
* Static probe to check if the thread-level IO statistics enabled.
*
* @return if the thread-level IO statistics enabled.
*/
public static boolean isIOStatisticsThreadLevelEnabled() {
return isThreadIOStatsEnabled;
} | 3.26 |
hadoop_IOStatisticsContextIntegration_setThreadIOStatisticsContext_rdh | /**
* Set the IOStatisticsContext for the current thread.
*
* @param statisticsContext
* IOStatistics context instance for the
* current thread. If null, the context is reset.
*/
public static void setThreadIOStatisticsContext(IOStatisticsContext statisticsContext)
{
if (isThreadIOStatsEnabled) {
if (statisticsContext == null) {
// new value is null, so remove it
ACTIVE_IOSTATS_CONTEXT.removeForCurrentThread();
} else {// the setter is efficient in that it does not create a new
// reference if the context is unchanged.
ACTIVE_IOSTATS_CONTEXT.setForCurrentThread(statisticsContext);
}
}
} | 3.26 |
hadoop_IOStatisticsContextIntegration_getThreadSpecificIOStatisticsContext_rdh | /**
* Get thread ID specific IOStatistics values if
* statistics are enabled and the thread ID is in the map.
*
* @param testThreadId
* thread ID.
* @return IOStatisticsContext if found in the map.
*/
@VisibleForTesting
public static IOStatisticsContext getThreadSpecificIOStatisticsContext(long testThreadId) {
LOG.debug("IOStatsContext thread ID required: {}", testThreadId);
if (!isThreadIOStatsEnabled) {
return null;}
// lookup the weakRef IOStatisticsContext for the thread ID in the
// ThreadMap.
WeakReference<IOStatisticsContext> ioStatisticsSnapshotWeakReference = ACTIVE_IOSTATS_CONTEXT.lookup(testThreadId);
if (ioStatisticsSnapshotWeakReference != null) {
return ioStatisticsSnapshotWeakReference.get();
}
return null;
} | 3.26 |
hadoop_CommonAuditContext_getEvaluatedEntries_rdh | /**
* Get the evaluated operations.
* This is the map unique to this context.
*
* @return the operations map.
*/
public Map<String, Supplier<String>> getEvaluatedEntries() {
return evaluatedEntries;
} | 3.26 |
hadoop_CommonAuditContext_noteEntryPoint_rdh | /**
* Add the entry point as a context entry with the key
* {@link AuditConstants#PARAM_COMMAND}
* if it has not already been recorded.
* This is called via ToolRunner but may be used at any
* other entry point.
*
* @param tool
* object loaded/being launched.
*/
public static void noteEntryPoint(Object tool) {
if
((tool != null) && (!GLOBAL_CONTEXT_MAP.containsKey(PARAM_COMMAND))) {
String classname = tool.getClass().toString();
int lastDot = classname.lastIndexOf('.');
int l = classname.length();
if ((lastDot > 0) && (lastDot < (l - 1))) {
String name = classname.substring(lastDot + 1, l);
setGlobalContextEntry(PARAM_COMMAND, name);
}
}
} | 3.26 |
hadoop_CommonAuditContext_setGlobalContextEntry_rdh | /**
* Set a global entry.
*
* @param key
* key
* @param value
* value
*/
public static void setGlobalContextEntry(String key,
String value) {
GLOBAL_CONTEXT_MAP.put(key, value);
} | 3.26 |
hadoop_CommonAuditContext_reset_rdh | /**
* Rest the context; will set the standard options again.
* Primarily for testing.
*/
public void reset() {
evaluatedEntries.clear();
init();} | 3.26 |
hadoop_CommonAuditContext_createInstance_rdh | /**
* Demand invoked to create the instance for this thread.
*
* @return an instance.
*/
private static CommonAuditContext createInstance() {
CommonAuditContext context = new CommonAuditContext();
context.init();
return context;} | 3.26 |
hadoop_CommonAuditContext_m2_rdh | /**
* Get a context entry.
*
* @param key
* key
* @return value or null
*/public String m2(String key)
{
Supplier<String> v0 = evaluatedEntries.get(key);
return v0 != null ? v0.get() : null;
} | 3.26 |
hadoop_CommonAuditContext_getGlobalContextEntries_rdh | /**
* Get an iterator over the global entries.
* Thread safe.
*
* @return an iterable to enumerate the values.
*/
public static Iterable<Map.Entry<String, String>> getGlobalContextEntries() {
return new GlobalIterable();
} | 3.26 |
hadoop_CommonAuditContext_getGlobalContextEntry_rdh | /**
* Get a global entry.
*
* @param key
* key
* @return value or null
*/
public static String getGlobalContextEntry(String key) {
return GLOBAL_CONTEXT_MAP.get(key);
} | 3.26 |
hadoop_CommonAuditContext_removeGlobalContextEntry_rdh | /**
* Remove a global entry.
*
* @param key
* key to clear.
*/
public static void removeGlobalContextEntry(String key) {
GLOBAL_CONTEXT_MAP.remove(key);
} | 3.26 |
hadoop_CommonAuditContext_init_rdh | /**
* Initialize.
*/
private void init() {
// thread 1 is dynamic
put(PARAM_THREAD1, CommonAuditContext::currentThreadID);
} | 3.26 |
hadoop_CommonAuditContext_m0_rdh | /**
* Put a context entry.
*
* @param key
* key
* @param value
* new value., If null, triggers removal.
* @return old value or null
*/
public Supplier<String> m0(String key, String value) {
if (value != null) {
return evaluatedEntries.put(key, () -> value); } else {
return evaluatedEntries.remove(key);
}
} | 3.26 |
hadoop_CommonAuditContext_currentThreadID_rdh | /**
* A thread ID which is unique for this process and shared across all
* S3A clients on the same thread, even those using different FS instances.
*
* @return a thread ID for reporting.
*/
public static String currentThreadID() {
return Long.toString(Thread.currentThread().getId());
} | 3.26 |
hadoop_CommonAuditContext_containsKey_rdh | /**
* Does the context contain a specific key?
*
* @param key
* key
* @return true if it is in the context.
*/
public boolean containsKey(String key) {
return evaluatedEntries.containsKey(key);
} | 3.26 |
hadoop_CommonAuditContext_m1_rdh | /**
* Remove a context entry.
*
* @param key
* key
*/
public void m1(String key) {
if (LOG.isTraceEnabled()) {
LOG.trace("Remove context entry {}", key);
}
evaluatedEntries.remove(key);
} | 3.26 |
hadoop_CommonAuditContext_put_rdh | /**
* Put a context entry dynamically evaluated on demand.
* Important: as these supplier methods are long-lived,
* the supplier function <i>MUST NOT</i> be part of/refer to
* any object instance of significant memory size.
* Applications SHOULD remove references when they are
* no longer needed.
* When logged at TRACE, prints the key and stack trace of the caller,
* to allow for debugging of any problems.
*
* @param key
* key
* @param value
* new value
* @return old value or null
*/
public Supplier<String>
put(String key, Supplier<String> value) {if (LOG.isTraceEnabled()) {
LOG.trace("Adding context entry {}", key, new Exception(key));
}return evaluatedEntries.put(key, value);
} | 3.26 |
hadoop_CommonAuditContext_currentAuditContext_rdh | /**
* Get the current common audit context. Thread local.
*
* @return the audit context of this thread.
*/
public static CommonAuditContext currentAuditContext() {
return ACTIVE_CONTEXT.get();
} | 3.26 |
hadoop_RandomResolver_getFirstNamespace_rdh | /**
* Get a random name space from the path.
*
* @param path
* Path ignored by this policy.
* @param loc
* Federated location with multiple destinations.
* @return Random name space.
*/
public String getFirstNamespace(final String path, final PathLocation loc) {
final Set<String> namespaces = (loc == null) ? null : loc.getNamespaces();if (CollectionUtils.isEmpty(namespaces)) {
LOG.error("Cannot get namespaces for {}",
loc);return null;
}
final int index = ThreadLocalRandom.current().nextInt(namespaces.size());
return Iterables.get(namespaces, index);
} | 3.26 |
hadoop_EmptyIOStatisticsContextImpl_snapshot_rdh | /**
* Create a new empty snapshot.
* A new one is always created for isolation.
*
* @return a statistics snapshot
*/
@Override
public IOStatisticsSnapshot snapshot() {
return new IOStatisticsSnapshot();
} | 3.26 |
hadoop_EmptyIOStatisticsContextImpl_getID_rdh | /**
* The ID is always 0.
* As the real context implementation counter starts at 1,
* we are guaranteed to have unique IDs even between them and
* the empty context.
*
* @return 0
*/
@Override
public long getID() {return 0;
} | 3.26 |
hadoop_EmptyIOStatisticsContextImpl_getInstance_rdh | /**
* Get the single instance.
*
* @return an instance.
*/
static IOStatisticsContext getInstance() {
return EMPTY_CONTEXT;
} | 3.26 |
hadoop_JsonSerialization_m0_rdh | /**
* Read from an input stream.
*
* @param stream
* stream to read from
* @return the parsed entity
* @throws IOException
* IO problems
* @throws JsonParseException
* If the input is not well-formatted
* @throws JsonMappingException
* failure to map from the JSON to this class
*/
public synchronized T m0(InputStream stream) throws IOException {
return mapper.readValue(stream, classType);
} | 3.26 |
hadoop_JsonSerialization_save_rdh | /**
* Save to a Hadoop filesystem.
*
* @param fs
* filesystem
* @param path
* path
* @param overwrite
* should any existing file be overwritten
* @param instance
* instance
* @throws IOException
* IO exception.
*/
public void save(FileSystem fs, Path path, T instance, boolean overwrite) throws IOException {
writeJsonAsBytes(instance, fs.create(path, overwrite));
} | 3.26 |
hadoop_JsonSerialization_fromJson_rdh | /**
* Convert from JSON.
*
* @param json
* input
* @return the parsed JSON
* @throws IOException
* IO problems
* @throws JsonParseException
* If the input is not well-formatted
* @throws JsonMappingException
* failure to map from the JSON to this class
*/
@SuppressWarnings("unchecked")
public synchronized T fromJson(String json) throws IOException, JsonParseException, JsonMappingException {
if (json.isEmpty()) {
throw new EOFException("No data");
}
try {
return mapper.readValue(json,
classType);
} catch (IOException e) {
LOG.error("Exception while parsing json : {}\n{}", e, json, e);
throw e;
}
} | 3.26 |
hadoop_JsonSerialization_load_rdh | /**
* Load from a Hadoop filesystem.
* If a file status is supplied, it's passed in to the openFile()
* call so that FS implementations can optimize their opening.
*
* @param fs
* filesystem
* @param path
* path
* @param status
* status of the file to open.
* @return a loaded object
* @throws PathIOException
* JSON parse problem
* @throws EOFException
* file status references an empty file
* @throws IOException
* IO problems
*/
public T load(FileSystem fs, Path path, @Nullable
FileStatus status) throws IOException {
if ((status != null) && (status.getLen() == 0)) {
throw new EOFException("No data in " + path);
}
FutureDataInputStreamBuilder builder = fs.openFile(path).opt(FS_OPTION_OPENFILE_READ_POLICY, FS_OPTION_OPENFILE_READ_POLICY_WHOLE_FILE);
if (status != null) {
builder.withFileStatus(status);
}
try (FSDataInputStream dataInputStream = awaitFuture(builder.build())) {
return m0(dataInputStream);
} catch (JsonProcessingException e) {
throw new PathIOException(path.toString(), "Failed to read JSON file " + e, e);
}
} | 3.26 |
hadoop_JsonSerialization_fromInstance_rdh | /**
* clone by converting to JSON and back again.
* This is much less efficient than any Java clone process.
*
* @param instance
* instance to duplicate
* @return a new instance
* @throws IOException
* IO problems.
*/
public T fromInstance(T instance) throws IOException {
return fromJson(toJson(instance));
} | 3.26 |
hadoop_JsonSerialization_fromBytes_rdh | /**
* Deserialize from a byte array.
*
* @param bytes
* byte array
* @throws IOException
* IO problems
* @throws EOFException
* not enough data
* @return byte array.
*/
public T fromBytes(byte[] bytes) throws IOException {
return fromJson(new String(bytes, 0, bytes.length, UTF_8));
} | 3.26 |
hadoop_JsonSerialization_writer_rdh | /**
*
* @return an ObjectWriter which pretty-prints its output
*/
public static ObjectWriter writer() {
return
WRITER;
} | 3.26 |
hadoop_JsonSerialization_getMapper_rdh | /**
* Get the mapper of this class.
*
* @return the mapper
*/
public ObjectMapper getMapper() {
return mapper;
} | 3.26 |
hadoop_JsonSerialization_mapReader_rdh | /**
*
* @return an ObjectReader which returns simple Maps.
*/
public static ObjectReader mapReader() {
return MAP_READER;
} | 3.26 |
hadoop_JsonSerialization_toBytes_rdh | /**
* Convert JSON to bytes.
*
* @param instance
* instance to convert
* @return a byte array
* @throws IOException
* IO problems
*/
public byte[] toBytes(T instance) throws IOException {
return mapper.writeValueAsBytes(instance);
} | 3.26 |
hadoop_JsonSerialization_getName_rdh | /**
* Get the simple name of the class type to be marshalled.
*
* @return the name of the class being marshalled
*/
public String getName() {
return classType.getSimpleName();} | 3.26 |
hadoop_JsonSerialization_writeJsonAsBytes_rdh | /**
* Write the JSON as bytes, then close the stream.
*
* @param instance
* instance to write
* @param dataOutputStream
* an output stream that will always be closed
* @throws IOException
* on any failure
*/
public void writeJsonAsBytes(T instance, OutputStream dataOutputStream) throws IOException {
try {
dataOutputStream.write(toBytes(instance));
} finally {
dataOutputStream.close();
}
} | 3.26 |
hadoop_GlobalPolicy_registerPaths_rdh | /**
* Return a map of the object type and RM path to request it from - the
* framework will query these paths and provide the objects to the policy.
* Delegating this responsibility to the PolicyGenerator enables us to avoid
* duplicate calls to the same * endpoints as the GlobalPolicy is invoked
* once per queue.
*
* @return a map of the object type and RM path.
*/
protected Map<Class<?>, String> registerPaths() {
// Default register nothing
return Collections.emptyMap();
} | 3.26 |
hadoop_TimelineWriteResponse_getErrors_rdh | /**
* Get a list of {@link TimelineWriteError} instances.
*
* @return a list of {@link TimelineWriteError} instances
*/
@XmlElement(name = "errors")
public List<TimelineWriteError> getErrors() {
return f0;
} | 3.26 |
hadoop_TimelineWriteResponse_addErrors_rdh | /**
* Add a list of {@link TimelineWriteError} instances into the existing list.
*
* @param writeErrors
* a list of {@link TimelineWriteError} instances
*/
public void addErrors(List<TimelineWriteError> writeErrors) {this.f0.addAll(writeErrors);
} | 3.26 |
hadoop_TimelineWriteResponse_setErrorCode_rdh | /**
* Set the error code to the given error code.
*
* @param code
* an error code.
*/
public void setErrorCode(int code) {
this.errorCode = code;
} | 3.26 |
hadoop_TimelineWriteResponse_setEntityType_rdh | /**
* Set the entity type.
*
* @param type
* the entity type.
*/
public void setEntityType(String type)
{
this.entityType = type;
} | 3.26 |
hadoop_TimelineWriteResponse_m0_rdh | /**
* Get the error code.
*
* @return an error code
*/
@XmlElement(name = "errorcode")public int m0() {
return errorCode;
} | 3.26 |
hadoop_TimelineWriteResponse_getEntityId_rdh | /**
* Get the entity Id.
*
* @return the entity Id
*/
@XmlElement(name = "entity")
public String getEntityId() {
return entityId;
} | 3.26 |
hadoop_TimelineWriteResponse_getEntityType_rdh | /**
* Get the entity type.
*
* @return the entity type
*/
@XmlElement(name = "entitytype")
public String getEntityType() {
return entityType;
} | 3.26 |
hadoop_TimelineWriteResponse_addError_rdh | /**
* Add a single {@link TimelineWriteError} instance into the existing list.
*
* @param error
* a single {@link TimelineWriteError} instance
*/
public void addError(TimelineWriteError error) {
f0.add(error);
} | 3.26 |
hadoop_TimelineWriteResponse_setErrors_rdh | /**
* Set the list to the given list of {@link TimelineWriteError} instances.
*
* @param writeErrors
* a list of {@link TimelineWriteError} instances
*/
public void setErrors(List<TimelineWriteError> writeErrors) {
this.f0.clear();
this.f0.addAll(writeErrors);
} | 3.26 |
hadoop_TimelineWriteResponse_setEntityId_rdh | /**
* Set the entity Id.
*
* @param id
* the entity Id.
*/
public void setEntityId(String id) {
this.entityId = id;
} | 3.26 |
hadoop_AwsStatisticsCollector_counter_rdh | /**
* Process a counter.
*
* @param collection
* metric collection
* @param metric
* metric
* @param consumer
* consumer
*/
private void counter(MetricCollection collection, SdkMetric<Integer> metric, LongConsumer consumer) {
collection.metricValues(metric).forEach(v -> consumer.accept(v.longValue()));
} | 3.26 |
hadoop_AwsStatisticsCollector_recurseThroughChildren_rdh | /**
* Metric collections can be nested. Exposes a stream of the given
* collection and its nested children.
*
* @param metrics
* initial collection
* @return a stream of all nested metric collections
*/
private static Stream<MetricCollection> recurseThroughChildren(MetricCollection metrics) {
return Stream.concat(Stream.of(metrics), metrics.children().stream().flatMap(c -> recurseThroughChildren(c)));
} | 3.26 |
hadoop_AwsStatisticsCollector_publish_rdh | /**
* This is the callback from the AWS SDK where metrics
* can be collected.
*
* @param metricCollection
* metrics collection
*/
@Override
public void publish(MetricCollection metricCollection) {
// MetricCollections are nested, so we need to traverse through their
// "children" to collect the desired metrics. E.g.:
//
// ApiCall
// βββββββββββββββββββββββββββββββββββββββββββ
// β MarshallingDuration=PT0.002808333S β
// β RetryCount=0 β
// β ApiCallSuccessful=true β
// β OperationName=DeleteObject β
// β ApiCallDuration=PT0.079801458S β
// β CredentialsFetchDuration=PT0.000007083S β
// β ServiceId=S3 β
// βββββββββββββββββββββββββββββββββββββββββββ
// ApiCallAttempt
// βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
// β SigningDuration=PT0.000319375S β
// β ServiceCallDuration=PT0.078908584S β
// β AwsExtendedRequestId=Kmvb2Sz8NuDgIFJPKzLLBhuHgQGmpAjVYBMrSHDvy= β
// β HttpStatusCode=204 β
// β BackoffDelayDuration=PT0S β
// β AwsRequestId=KR0XZCSX β
// βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
// HttpClient
// βββββββββββββββββββββββββββββββββββ
// β AvailableConcurrency=1 β
// β LeasedConcurrency=0 β
// β ConcurrencyAcquireDuration=PT0S β
// β PendingConcurrencyAcquires=0 β
// β MaxConcurrency=96 β
// β HttpClientName=Apache β
// βββββββββββββββββββββββββββββββββββ
final long[] throttling = new long[]{ 0 };
recurseThroughChildren(metricCollection).collect(Collectors.toList()).forEach(m -> {
counter(m, CoreMetric.RETRY_COUNT, retries -> {
collector.updateAwsRetryCount(retries);
collector.updateAwsRequestCount(retries + 1);});
counter(m, HttpMetric.HTTP_STATUS_CODE, statusCode -> {
if (statusCode == HttpStatusCode.THROTTLING) {
throttling[0] +=
1;
}
});
timing(m, CoreMetric.API_CALL_DURATION, collector::noteAwsClientExecuteTime);
timing(m, CoreMetric.SERVICE_CALL_DURATION, collector::noteAwsRequestTime);
timing(m, CoreMetric.MARSHALLING_DURATION, collector::noteRequestMarshallTime);timing(m, CoreMetric.SIGNING_DURATION, collector::noteRequestSigningTime);
timing(m, CoreMetric.UNMARSHALLING_DURATION, collector::noteResponseProcessingTime);
});
collector.updateAwsThrottleExceptionsCount(throttling[0]);
} | 3.26 |
hadoop_AwsStatisticsCollector_timing_rdh | /**
* Process a timing.
*
* @param collection
* metric collection
* @param metric
* metric
* @param durationConsumer
* consumer
*/
private void timing(MetricCollection collection, SdkMetric<Duration> metric, Consumer<Duration> durationConsumer) {
collection.metricValues(metric).forEach(v -> durationConsumer.accept(v));
} | 3.26 |
hadoop_SubApplicationEntityReader_getTable_rdh | /**
* Uses the {@link SubApplicationTableRW}.
*/
protected BaseTableRW<?> getTable() {
return SUB_APPLICATION_TABLE;
} | 3.26 |
hadoop_SubApplicationEntityReader_updateFilterForConfsAndMetricsToRetrieve_rdh | /**
* Updates filter list based on fields for confs and metrics to retrieve.
*
* @param listBasedOnFields
* filter list based on fields.
* @throws IOException
* if any problem occurs while updating filter list.
*/
private void updateFilterForConfsAndMetricsToRetrieve(FilterList listBasedOnFields, Set<String> cfsInFields) throws IOException {
TimelineDataToRetrieve dataToRetrieve = getDataToRetrieve();
// Please note that if confsToRetrieve is specified, we would have added
// CONFS to fields to retrieve in augmentParams() even if not specified.
if (dataToRetrieve.getFieldsToRetrieve().contains(Field.CONFIGS)) {
// Create a filter list for configs.
listBasedOnFields.addFilter(TimelineFilterUtils.createFilterForConfsOrMetricsToRetrieve(dataToRetrieve.getConfsToRetrieve(), SubApplicationColumnFamily.CONFIGS, SubApplicationColumnPrefix.CONFIG));
cfsInFields.add(Bytes.toString(SubApplicationColumnFamily.CONFIGS.getBytes()));}
// Please note that if metricsToRetrieve is specified, we would have added
// METRICS to fields to retrieve in augmentParams() even if not specified.
if (dataToRetrieve.getFieldsToRetrieve().contains(Field.METRICS)) {
// Create a filter list for metrics.
listBasedOnFields.addFilter(TimelineFilterUtils.createFilterForConfsOrMetricsToRetrieve(dataToRetrieve.getMetricsToRetrieve(), SubApplicationColumnFamily.METRICS, SubApplicationColumnPrefix.METRIC));
cfsInFields.add(Bytes.toString(SubApplicationColumnFamily.METRICS.getBytes()));
}
} | 3.26 |
hadoop_SubApplicationEntityReader_createFilterListForColsOfInfoFamily_rdh | /**
* Creates a filter list which indicates that only some of the column
* qualifiers in the info column family will be returned in result.
*
* @param isApplication
* If true, it means operations are to be performed for
* application table, otherwise for entity table.
* @return filter list.
* @throws IOException
* if any problem occurs while creating filter list.
*/
private FilterList createFilterListForColsOfInfoFamily() throws IOException {
FilterList infoFamilyColsFilter = new FilterList(Operator.MUST_PASS_ONE);
// Add filters for each column in entity table.
updateFixedColumns(infoFamilyColsFilter);
EnumSet<Field> fieldsToRetrieve = getDataToRetrieve().getFieldsToRetrieve();
// If INFO field has to be retrieved, add a filter for fetching columns
// with INFO column prefix.
if (hasField(fieldsToRetrieve, Field.INFO)) {
infoFamilyColsFilter.addFilter(TimelineFilterUtils.createHBaseQualifierFilter(CompareOp.EQUAL, SubApplicationColumnPrefix.INFO));
}
TimelineFilterList relatesTo = getFilters().getRelatesTo();if
(hasField(fieldsToRetrieve, Field.RELATES_TO)) {
// If RELATES_TO field has to be retrieved, add a filter for fetching
// columns with RELATES_TO column prefix.
infoFamilyColsFilter.addFilter(TimelineFilterUtils.createHBaseQualifierFilter(CompareOp.EQUAL, SubApplicationColumnPrefix.RELATES_TO));
} else if ((relatesTo != null) && (!relatesTo.getFilterList().isEmpty())) {
// Even if fields to retrieve does not contain RELATES_TO, we still
// need to have a filter to fetch some of the column qualifiers if
// relatesTo filters are specified. relatesTo filters will then be
// matched after fetching rows from HBase.
Set<String> relatesToCols = TimelineFilterUtils.fetchColumnsFromFilterList(relatesTo);
infoFamilyColsFilter.addFilter(createFiltersFromColumnQualifiers(SubApplicationColumnPrefix.RELATES_TO, relatesToCols));
}
TimelineFilterList isRelatedTo = getFilters().getIsRelatedTo();
if (hasField(fieldsToRetrieve, Field.IS_RELATED_TO)) {
// If IS_RELATED_TO field has to be retrieved, add a filter for fetching
// columns with IS_RELATED_TO column prefix.
infoFamilyColsFilter.addFilter(TimelineFilterUtils.createHBaseQualifierFilter(CompareOp.EQUAL, SubApplicationColumnPrefix.IS_RELATED_TO));
} else if ((isRelatedTo != null) && (!isRelatedTo.getFilterList().isEmpty())) {
// Even if fields to retrieve does not contain IS_RELATED_TO, we still
// need to have a filter to fetch some of the column qualifiers if
// isRelatedTo filters are specified. isRelatedTo filters will then be
// matched after fetching rows from HBase.
Set<String> isRelatedToCols = TimelineFilterUtils.fetchColumnsFromFilterList(isRelatedTo);
infoFamilyColsFilter.addFilter(createFiltersFromColumnQualifiers(SubApplicationColumnPrefix.IS_RELATED_TO, isRelatedToCols));
}
TimelineFilterList eventFilters = getFilters().getEventFilters();
if (hasField(fieldsToRetrieve, Field.EVENTS)) {
// If EVENTS field has to be retrieved, add a filter for fetching columns
// with EVENT column prefix.
infoFamilyColsFilter.addFilter(TimelineFilterUtils.createHBaseQualifierFilter(CompareOp.EQUAL, SubApplicationColumnPrefix.EVENT));
} else if ((eventFilters != null) && (!eventFilters.getFilterList().isEmpty())) {
// Even if fields to retrieve does not contain EVENTS, we still need to
// have a filter to fetch some of the column qualifiers on the basis of
// event filters specified. Event filters will then be matched after
// fetching rows from HBase.
Set<String> eventCols = TimelineFilterUtils.fetchColumnsFromFilterList(eventFilters);
infoFamilyColsFilter.addFilter(createFiltersFromColumnQualifiers(SubApplicationColumnPrefix.EVENT, eventCols));
}
return infoFamilyColsFilter;
} | 3.26 |
hadoop_SubApplicationEntityReader_excludeFieldsFromInfoColFamily_rdh | /**
* Exclude column prefixes via filters which are not required(based on fields
* to retrieve) from info column family. These filters are added to filter
* list which contains a filter for getting info column family.
*
* @param infoColFamilyList
* filter list for info column family.
*/
private void excludeFieldsFromInfoColFamily(FilterList infoColFamilyList) {
EnumSet<Field> fieldsToRetrieve = getDataToRetrieve().getFieldsToRetrieve();
// Events not required.
if (!hasField(fieldsToRetrieve, Field.EVENTS)) {
infoColFamilyList.addFilter(TimelineFilterUtils.createHBaseQualifierFilter(CompareOp.NOT_EQUAL, SubApplicationColumnPrefix.EVENT));
}
// info not required.
if (!hasField(fieldsToRetrieve, Field.INFO)) {
infoColFamilyList.addFilter(TimelineFilterUtils.createHBaseQualifierFilter(CompareOp.NOT_EQUAL, SubApplicationColumnPrefix.INFO));
}
// is related to not required.
if (!hasField(fieldsToRetrieve, Field.IS_RELATED_TO)) {
infoColFamilyList.addFilter(TimelineFilterUtils.createHBaseQualifierFilter(CompareOp.NOT_EQUAL, SubApplicationColumnPrefix.IS_RELATED_TO));
}
// relates to not required.
if (!hasField(fieldsToRetrieve,
Field.RELATES_TO)) {
infoColFamilyList.addFilter(TimelineFilterUtils.createHBaseQualifierFilter(CompareOp.NOT_EQUAL, SubApplicationColumnPrefix.RELATES_TO));
}
} | 3.26 |
hadoop_TimelineClient_createTimelineClient_rdh | /**
* Creates an instance of the timeline v.1.x client.
* The current UGI when the user initialize the client will be used to do the
* put and the delegation token operations. The current user may use
* {@link UserGroupInformation#doAs} another user to construct and initialize
* a timeline client if the following operations are supposed to be conducted
* by that user.
*
* @return the created timeline client instance
*/
@Public
public static TimelineClient createTimelineClient() {
TimelineClient client = new TimelineClientImpl();
return client;
} | 3.26 |
hadoop_ClientThrottlingAnalyzer_addBytesTransferred_rdh | /**
* Updates metrics with results from the current storage operation.
*
* @param count
* The count of bytes transferred.
* @param isFailedOperation
* True if the operation failed; otherwise false.
*/
public void addBytesTransferred(long count, boolean isFailedOperation) {
BlobOperationMetrics metrics = blobMetrics.get();
if (isFailedOperation) {
metrics.bytesFailed.addAndGet(count);
metrics.operationsFailed.incrementAndGet();
} else {
metrics.bytesSuccessful.addAndGet(count);
metrics.operationsSuccessful.incrementAndGet();
}
} | 3.26 |
hadoop_ClientThrottlingAnalyzer_suspendIfNecessary_rdh | /**
* Suspends the current storage operation, as necessary, to reduce throughput.
*/
public void suspendIfNecessary() {
int duration = f0;
if (duration > 0) {
try {
Thread.sleep(duration);
} catch (InterruptedException ie) {
Thread.currentThread().interrupt();
}
}
} | 3.26 |
hadoop_HAState_toString_rdh | /**
*
* @return String representation of the service state.
*/
@Override
public String toString() {return state.toString();
} | 3.26 |
hadoop_HAState_getLastHATransitionTime_rdh | /**
* Gets the most recent HA transition time in milliseconds from the epoch.
*
* @return the most recent HA transition time in milliseconds from the epoch.
*/
public long getLastHATransitionTime() {
return lastHATransitionTime;
} | 3.26 |
hadoop_HAState_getServiceState_rdh | /**
*
* @return the generic service state
*/
public HAServiceState getServiceState() {
return state;
} | 3.26 |
hadoop_HAState_prepareToEnterState_rdh | /**
* Method to be overridden by subclasses to prepare to enter a state.
* This method is called <em>without</em> the context being locked,
* and after {@link #prepareToExitState(HAContext)} has been called
* for the previous state, but before {@link #exitState(HAContext)}
* has been called for the previous state.
*
* @param context
* HA context
* @throws ServiceFailedException
* on precondition failure
*/
public void prepareToEnterState(final HAContext context) throws ServiceFailedException {
} | 3.26 |
hadoop_HAState_prepareToExitState_rdh | /**
* Method to be overridden by subclasses to prepare to exit a state.
* This method is called <em>without</em> the context being locked.
* This is used by the standby state to cancel any checkpoints
* that are going on. It can also be used to check any preconditions
* for the state transition.
*
* This method should not make any destructive changes to the state
* (eg stopping threads) since {@link #prepareToEnterState(HAContext)}
* may subsequently cancel the state transition.
*
* @param context
* HA context
* @throws ServiceFailedException
* on precondition failure
*/
public void prepareToExitState(final HAContext context) throws ServiceFailedException {
} | 3.26 |
hadoop_HAState_setStateInternal_rdh | /**
* Internal method to move from the existing state to a new state.
*
* @param context
* HA context
* @param s
* new state
* @throws ServiceFailedException
* on failure to transition to new state.
*/
protected final void setStateInternal(final HAContext context, final HAState s) throws ServiceFailedException {prepareToExitState(context);
s.prepareToEnterState(context);
context.writeLock();
try {
exitState(context);
context.setState(s);
s.enterState(context);
s.m0();
} finally {
context.writeUnlock();
}
} | 3.26 |
hadoop_DatanodeRegistration_getVersion_rdh | // NodeRegistration
@Override
public int getVersion() {
return storageInfo.getLayoutVersion();
} | 3.26 |
hadoop_DatanodeRegistration_getRegistrationID_rdh | // NodeRegistration
@Override
public String getRegistrationID() {
return Storage.getRegistrationID(storageInfo);
} | 3.26 |
hadoop_DatanodeRegistration_getAddress_rdh | // NodeRegistration
@Override
public String getAddress() {
return getXferAddr(); } | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.