name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hadoop_HashFirstResolver_trimPathToChild_rdh | /**
* Hash only up to the immediate child of the mount point. This prevents the
* need to create/maintain subtrees under each multi-destination mount point.
* Each child of a multi-destination mount is mapped to only one hash
* location.
* <p>
* Trims a path to at most the immediate child of a parent path. For example:
* <ul>
* <li>path = /a/b/c, parent = /a will be trimmed to /a/b.
* <li>path = /a/b, parent = /a/b will be trimmed to /a/b
* </ul>
*
* @param path
* The path to trim.
* @param parent
* The parent used to find the immediate child.
* @return Trimmed path.
*/
private static String trimPathToChild(String path, String parent) {
// Path is invalid or equal to the parent
if (path.length() <=
parent.length()) {
return parent;
}String remainder = path.substring(parent.length());
String[] components = remainder.replaceFirst("^/", "").split(Path.SEPARATOR);
if ((components.length > 0) && (components[0].length() > 0)) {
if (parent.endsWith(Path.SEPARATOR)) {
return parent + components[0];
} else {
return (parent + Path.SEPARATOR) + components[0];}
} else {return parent;
}
} | 3.26 |
hadoop_ResourceUsageMetrics_getCumulativeCpuUsage_rdh | /**
* Get the cumulative CPU usage.
*/
public long getCumulativeCpuUsage() {
return cumulativeCpuUsage;
} | 3.26 |
hadoop_ResourceUsageMetrics_setVirtualMemoryUsage_rdh | /**
* Set the virtual memory usage.
*/
public void setVirtualMemoryUsage(long usage) {
virtualMemoryUsage = usage;
} | 3.26 |
hadoop_ResourceUsageMetrics_setHeapUsage_rdh | /**
* Set the total heap usage.
*/
public void setHeapUsage(long usage) {
heapUsage = usage;
} | 3.26 |
hadoop_ResourceUsageMetrics_getPhysicalMemoryUsage_rdh | /**
* Get the physical memory usage.
*/
public long getPhysicalMemoryUsage() {
return physicalMemoryUsage;
} | 3.26 |
hadoop_ResourceUsageMetrics_setPhysicalMemoryUsage_rdh | /**
* Set the physical memory usage.
*/
public void setPhysicalMemoryUsage(long usage) {
physicalMemoryUsage = usage;
} | 3.26 |
hadoop_ResourceUsageMetrics_size_rdh | /**
* Returns the size of the serialized data
*/
public int size() {
int v0 = 0;
v0 += WritableUtils.getVIntSize(cumulativeCpuUsage);// long #1
v0 += WritableUtils.getVIntSize(virtualMemoryUsage);// long #2
v0 += WritableUtils.getVIntSize(physicalMemoryUsage);// long #3
v0 += WritableUtils.getVIntSize(heapUsage);
// long #4
return v0;
} | 3.26 |
hadoop_ResourceUsageMetrics_getVirtualMemoryUsage_rdh | /**
* Get the virtual memory usage.
*/
public long getVirtualMemoryUsage() {
return virtualMemoryUsage;
} | 3.26 |
hadoop_ResourceUsageMetrics_setCumulativeCpuUsage_rdh | /**
* Set the cumulative CPU usage.
*/
public void setCumulativeCpuUsage(long usage) {
cumulativeCpuUsage = usage;
} | 3.26 |
hadoop_ResourceUsageMetrics_getHeapUsage_rdh | /**
* Get the total heap usage.
*/
public long getHeapUsage() {
return heapUsage;
} | 3.26 |
hadoop_BufferPool_tryAcquire_rdh | /**
* Acquires a buffer if one is immediately available. Otherwise returns null.
*
* @param blockNumber
* the id of the block to try acquire.
* @return the acquired block's {@code BufferData} or null.
*/
public synchronized BufferData tryAcquire(int blockNumber) {
return acquireHelper(blockNumber, false);
} | 3.26 |
hadoop_BufferPool_releaseDoneBlocks_rdh | /**
* Releases resources for any blocks marked as 'done'.
*/
private synchronized void releaseDoneBlocks() {
for (BufferData data : getAll()) { if (data.stateEqualsOneOf(State.DONE)) {
release(data);
}
}
} | 3.26 |
hadoop_BufferPool_getAll_rdh | /**
* Gets a list of all blocks in this pool.
*
* @return a list of all blocks in this pool.
*/
public List<BufferData> getAll() {
synchronized(allocated) {return Collections.unmodifiableList(new ArrayList<>(allocated.keySet()));
}
} | 3.26 |
hadoop_BufferPool_acquire_rdh | /**
* Acquires a {@code ByteBuffer}; blocking if necessary until one becomes available.
*
* @param blockNumber
* the id of the block to acquire.
* @return the acquired block's {@code BufferData}.
*/
public synchronized BufferData acquire(int blockNumber) {
BufferData data;
final int maxRetryDelayMs = 600 * 1000;
final int statusUpdateDelayMs = 120 * 1000;
Retryer retryer = new Retryer(10, maxRetryDelayMs, statusUpdateDelayMs);
do {
if (retryer.updateStatus()) {
if (LOG.isDebugEnabled()) {
LOG.debug("waiting to acquire block: {}", blockNumber);
LOG.debug("state = {}", this);
}
releaseReadyBlock(blockNumber);
}
data = tryAcquire(blockNumber);
} while ((data == null) && retryer.continueRetry() );if (data != null)
{
return data;
} else {
String message = String.format("Wait failed for acquire(%d)", blockNumber);
throw new IllegalStateException(message);
}
} | 3.26 |
hadoop_BufferPool_release_rdh | /**
* Releases a previously acquired resource.
*
* @param data
* the {@code BufferData} instance to release.
* @throws IllegalArgumentException
* if data is null.
* @throws IllegalArgumentException
* if data cannot be released due to its state.
*/
public synchronized void release(BufferData data) {
checkNotNull(data, "data");
synchronized(data) {
checkArgument(canRelease(data), String.format("Unable to release buffer: %s", data));
ByteBuffer buffer = allocated.get(data);
if (buffer == null) {
// Likely released earlier.
return;
}
buffer.clear();
pool.release(buffer);
allocated.remove(data);
}
releaseDoneBlocks();
} | 3.26 |
hadoop_BufferPool_releaseReadyBlock_rdh | /**
* If no blocks were released after calling releaseDoneBlocks() a few times,
* we may end up waiting forever. To avoid that situation, we try releasing
* a 'ready' block farthest away from the given block.
*/
private synchronized void releaseReadyBlock(int blockNumber) {
BufferData releaseTarget = null;
for
(BufferData data : getAll()) {
if (data.stateEqualsOneOf(State.READY)) {if (releaseTarget == null) {
releaseTarget = data;
} else if (distance(data, blockNumber) > distance(releaseTarget, blockNumber)) {
releaseTarget = data;
}
}
}
if (releaseTarget != null) {
LOG.warn("releasing 'ready' block: {}", releaseTarget);
releaseTarget.setDone();
}
} | 3.26 |
hadoop_BufferPool_toString_rdh | // For debugging purposes.
@Override
public String toString() {
StringBuilder sb = new
StringBuilder();
sb.append(pool.toString());
sb.append("\n");
List<BufferData> allData = new ArrayList<>(getAll());
Collections.sort(allData, (d1, d2) -> d1.getBlockNumber() - d2.getBlockNumber());
for (BufferData data
: allData) {
sb.append(data.toString());
sb.append("\n");
}
return sb.toString();
} | 3.26 |
hadoop_BufferPool_numCreated_rdh | // Number of ByteBuffers created so far.
public synchronized int numCreated() {
return pool.numCreated();
} | 3.26 |
hadoop_IsActiveServlet_doGet_rdh | /**
* Check whether this instance is the Active one.
*
* @param req
* HTTP request
* @param resp
* HTTP response to write to
*/
@Override
public void doGet(final HttpServletRequest req, final HttpServletResponse resp) throws IOException {
// By default requests are persistent. We don't want long-lived connections
// on server side.
resp.addHeader("Connection", "close");
if
(!isActive()) {
// Report not SC_OK
resp.sendError(HttpServletResponse.SC_METHOD_NOT_ALLOWED, RESPONSE_NOT_ACTIVE);
return;
}
resp.setStatus(HttpServletResponse.SC_OK);
resp.getWriter().write(RESPONSE_ACTIVE);
resp.getWriter().flush();
} | 3.26 |
hadoop_RoleModel_effect_rdh | /**
* Map a bool to an effect.
*
* @param allowed
* is the statement to allow actions?
* @return the appropriate effect.
*/
public static Effects effect(final boolean allowed) {
return allowed ? Effects.Allow : Effects.Deny;
} | 3.26 |
hadoop_RoleModel_validate_rdh | /**
* Validation includes validating all statements.
*/
@Override
public void validate() {
requireNonNull(statement, "Statement");
checkState(VERSION.equals(version), "Invalid Version: %s", version);
statement.stream().forEach(a -> a.validate());
} | 3.26 |
hadoop_RoleModel_newSid_rdh | /**
* Statement ID factory.
*
* @return a statement ID unique for this JVM's life.
*/
public static String newSid() {
SID_COUNTER.incrementAndGet();
return SID_COUNTER.toString();
} | 3.26 |
hadoop_RoleModel_resource_rdh | /**
* Create a resource.
*
* @param bucket
* bucket
* @param key
* key
* @param addWildcard
* add a * to the tail of the key?
* @return a resource for a statement.
*/
@SuppressWarnings("StringConcatenationMissingWhitespace")
public static String resource(String bucket, String key, boolean addWildcard) {
return String.format(BUCKET_RESOURCE_F, bucket, key + (addWildcard ? "*" : ""));
}
/**
* Given a path, return the S3 resource to it.
* If {@code isDirectory} | 3.26 |
hadoop_RoleModel_statement_rdh | /**
* Create a statement.
* If {@code isDirectory} is true, a "/" is added to the path.
* This is critical when adding wildcard permissions under
* a directory, and also needed when locking down dir-as-file
* and dir-as-directory-marker access.
*
* @param allow
* allow or deny
* @param path
* path
* @param isDirectory
* is this a directory?
* @param wildcards
* add a * to the tail of the key?
* @param actions
* action
* @return the formatted json statement
*/
public static Statement statement(final boolean allow, final Path path, final boolean isDirectory, final boolean wildcards, final Collection<String> actions) {
return new Statement(RoleModel.effect(allow)).addActions(actions).addResources(resource(path, isDirectory, wildcards));
} | 3.26 |
hadoop_RoleModel_policy_rdh | /**
* From a set of statements, create a policy.
*
* @param statements
* statements
* @return the policy
*/
public static Policy policy(final List<RoleModel.Statement> statements) {
return new Policy(statements);
} | 3.26 |
hadoop_RoleModel_pathToKey_rdh | /**
* Variant of {@link S3AFileSystem#pathToKey(Path)} which doesn't care
* about working directories, so can be static and stateless.
*
* @param path
* path to map
* @return key or ""
*/
public static String pathToKey(Path path) {
if ((path.toUri().getScheme() != null) && path.toUri().getPath().isEmpty()) {
return "";
}
return path.toUri().getPath().substring(1);
} | 3.26 |
hadoop_RoleModel_directory_rdh | /**
* Given a directory path, return the S3 resource to it.
*
* @param path
* a path
* @return a resource for a statement.
*/
public static String[] directory(Path path) {
String host = path.toUri().getHost();
String key = pathToKey(path);
if (!key.isEmpty()) {
return new String[]{ resource(host, key + "/", true), resource(host, key, false), resource(host, key + "/", false) };
} else {
return new String[]{ resource(host, key, true) };
}
} | 3.26 |
hadoop_RoleModel_addResources_rdh | /**
* Add a list of resources.
*
* @param resources
* resource list
* @return this statement.
*/
public Statement addResources(Collection<String> resources) {
resource.addAll(resources);
return this;
} | 3.26 |
hadoop_RoleModel_add_rdh | /**
* Add a single statement.
*
* @param stat
* new statement.
*/
public void add(Statement stat) {
statement.add(stat);} | 3.26 |
hadoop_JsonSerDeser_save_rdh | /**
* Save to a hadoop filesystem
*
* @param fs
* filesystem
* @param path
* path
* @param instance
* instance to save
* @param overwrite
* should any existing file be overwritten
* @throws IOException
* IO exception
*/
public void save(FileSystem
fs, Path path, T instance, boolean overwrite) throws IOException {
FSDataOutputStream v4 = fs.create(path, overwrite);
writeJsonAsBytes(instance, v4);
} | 3.26 |
hadoop_JsonSerDeser_fromJson_rdh | /**
* Convert from JSON
*
* @param json
* input
* @return the parsed JSON
* @throws IOException
* IO
* @throws JsonMappingException
* failure to map from the JSON to this class
*/
public T fromJson(String json) throws IOException, JsonParseException, JsonMappingException {
try {
return mapper.readValue(json, classType);
} catch (IOException e) {
log.error((("Exception while parsing json : " + e) +
"\n") + json, e);
throw e;
}
} | 3.26 |
hadoop_JsonSerDeser_load_rdh | /**
* Load from a Hadoop filesystem
*
* @param fs
* filesystem
* @param path
* path
* @return a loaded CD
* @throws IOException
* IO problems
* @throws JsonParseException
* parse problems
* @throws JsonMappingException
* O/J mapping problems
*/
public T load(FileSystem fs, Path path) throws IOException {
FSDataInputStream dataInputStream = fs.open(path);
return fromStream(dataInputStream);
} | 3.26 |
hadoop_JsonSerDeser_writeJsonAsBytes_rdh | /**
* Write the json as bytes -then close the file
*
* @param dataOutputStream
* an outout stream that will always be closed
* @throws IOException
* on any failure
*/
private void writeJsonAsBytes(T instance, OutputStream dataOutputStream) throws IOException {
try {
String json = toJson(instance);
byte[] v6 = json.getBytes(StandardCharsets.UTF_8);
dataOutputStream.write(v6);
dataOutputStream.flush();
dataOutputStream.close();
} finally {
IOUtils.closeStream(dataOutputStream);
}
} | 3.26 |
hadoop_JsonSerDeser_fromStream_rdh | /**
* Convert from an input stream, closing the stream afterwards.
*
* @param stream
* @return the parsed JSON
* @throws IOException
* IO problems
*/
public T fromStream(InputStream stream) throws IOException {
try {
return ((T) (mapper.readValue(stream, classType)));
} catch (IOException e) {
log.error("Exception while parsing json input stream", e);
throw e;
} finally {
IOUtils.closeStream(stream);
}
} | 3.26 |
hadoop_JsonSerDeser_fromInstance_rdh | /**
* clone by converting to JSON and back again.
* This is much less efficient than any Java clone process.
*
* @param instance
* instance to duplicate
* @return a new instance
* @throws IOException
* problems.
*/
public T fromInstance(T instance) throws IOException {
return fromJson(toJson(instance));} | 3.26 |
hadoop_JsonSerDeser_fromResource_rdh | /**
* Convert from a JSON file
*
* @param resource
* input file
* @return the parsed JSON
* @throws IOException
* IO problems
* @throws JsonMappingException
* failure to map from the JSON to this class
*/
public T fromResource(String resource) throws IOException, JsonParseException, JsonMappingException {
try (InputStream resStream = this.getClass().getResourceAsStream(resource)) {
if (resStream == null) {
throw new FileNotFoundException(resource);
}
return ((T) (mapper.readValue(resStream, classType)));
}
catch (IOException e) {
log.error("Exception while parsing json resource {}", resource, e);
throw e;
}
} | 3.26 |
hadoop_JsonSerDeser_toJson_rdh | /**
* Convert an object to a JSON string
*
* @param instance
* instance to convert
* @return a JSON string description
* @throws JsonProcessingException
* parse problems
*/
public String toJson(T instance) throws JsonProcessingException
{
mapper.configure(SerializationFeature.INDENT_OUTPUT, true);
return mapper.writeValueAsString(instance);
} | 3.26 |
hadoop_JsonSerDeser_fromBytes_rdh | /**
* Deserialize from a byte array
*
* @param b
* @return the deserialized value
* @throws IOException
* parse problems
*/
public T fromBytes(byte[] b) throws IOException {
String json = new String(b, 0, b.length, StandardCharsets.UTF_8);
return fromJson(json);
} | 3.26 |
hadoop_JsonSerDeser_fromFile_rdh | /**
* Convert from a JSON file
*
* @param jsonFile
* input file
* @return the parsed JSON
* @throws IOException
* IO problems
* @throws JsonMappingException
* failure to map from the JSON to this class
*/
public T fromFile(File jsonFile) throws IOException, JsonParseException, JsonMappingException {
File absoluteFile = jsonFile.getAbsoluteFile();
try {
return mapper.readValue(absoluteFile, classType);
} catch (IOException e) {
log.error("Exception while parsing json file {}", absoluteFile, e);
throw e;
}
} | 3.26 |
hadoop_TrashProcedure_moveToTrash_rdh | /**
* Delete source path to trash.
*/
void moveToTrash() throws IOException {
Path v0 = context.getSrc();
if (srcFs.exists(v0)) {
TrashOption trashOption = context.getTrashOpt();
switch (trashOption) {
case TRASH :
conf.setFloat(FS_TRASH_INTERVAL_KEY, 60);
if (!Trash.moveToAppropriateTrash(srcFs, v0, conf)) {
throw new IOException(("Failed move " + v0) + " to trash.");
}
break;
case DELETE :
if (!srcFs.delete(v0, true)) {
throw new IOException("Failed delete " + v0);
}
LOG.info("{} is deleted.", v0);
break;case SKIP :
break;
default :
throw new IOException("Unexpected trash option=" + trashOption);
}
}
} | 3.26 |
hadoop_NativeS3FileSystem_initialize_rdh | /**
* Always fail to initialize.
*
* @throws IOException
* always.
*/
@Override
public void initialize(URI uri, Configuration conf) throws IOException {
super.initialize(uri, conf);
throw new IOException(UNSUPPORTED);
} | 3.26 |
hadoop_NativeS3FileSystem_getScheme_rdh | /**
* Return the protocol scheme for the FileSystem.
*
* @return <code>s3n</code>
*/
@Override
public String getScheme() {
return "s3n";
} | 3.26 |
hadoop_BytesWritable_getCapacity_rdh | /**
* Get the capacity, which is the maximum size that could handled without
* resizing the backing storage.
*
* @return The number of bytes
*/
public int getCapacity() {
return bytes.length;
} | 3.26 |
hadoop_BytesWritable_equals_rdh | /**
* Are the two byte sequences equal?
*/
@Override
public boolean equals(Object right_obj) {
if (right_obj instanceof BytesWritable)
return super.equals(right_obj);
return false;
} | 3.26 |
hadoop_BytesWritable_getLength_rdh | /**
* Get the current size of the buffer.
*/
@Override
public int getLength() {
return size;
} | 3.26 |
hadoop_BytesWritable_set_rdh | /**
* Set the value to a copy of the given byte range.
*
* @param newData
* the new values to copy in
* @param offset
* the offset in newData to start at
* @param length
* the number of bytes to copy
*/
public void set(byte[] newData, int
offset, int length) {
setSize(0);setSize(length);
System.arraycopy(newData, offset, bytes, 0, size);
} | 3.26 |
hadoop_BytesWritable_get_rdh | /**
* Get the data from the BytesWritable.
*
* @deprecated Use {@link #getBytes()} instead.
* @return data from the BytesWritable.
*/
@Deprecated
public byte[] get() {
return getBytes();
} | 3.26 |
hadoop_BytesWritable_getSize_rdh | /**
* Get the current size of the buffer.
*
* @deprecated Use {@link #getLength()} instead.
* @return current size of the buffer.
*/
@Deprecatedpublic int getSize() {
return getLength();
} | 3.26 |
hadoop_BytesWritable_setCapacity_rdh | /**
* Change the capacity of the backing storage. The data is preserved.
*
* @param capacity
* The new capacity in bytes.
*/
public void setCapacity(final int capacity) {
if (capacity != getCapacity()) {
this.size = Math.min(size, capacity);
this.bytes = Arrays.copyOf(this.bytes, capacity);
}
} | 3.26 |
hadoop_BytesWritable_toString_rdh | /**
* Generate the stream of bytes as hex pairs separated by ' '.
*/
@Override
public String toString()
{
return IntStream.range(0, size).mapToObj(idx -> String.format("%02x", bytes[idx])).collect(Collectors.joining(" "));} | 3.26 |
hadoop_BytesWritable_compare_rdh | /**
* Compare the buffers in serialized form.
*/
@Override
public int compare(byte[] b1, int s1, int l1, byte[] b2, int s2, int l2) {
return compareBytes(b1, s1 + LENGTH_BYTES, l1 - LENGTH_BYTES, b2, s2 + LENGTH_BYTES, l2 - LENGTH_BYTES);
} | 3.26 |
hadoop_BytesWritable_getBytes_rdh | /**
* Get the data backing the BytesWritable. Please use {@link #copyBytes()}
* if you need the returned array to be precisely the length of the data.
*
* @return The data is only valid between 0 and getLength() - 1.
*/
@Override
public byte[] getBytes() {
return bytes;
} | 3.26 |
hadoop_BytesWritable_setSize_rdh | /**
* Change the size of the buffer. The values in the old range are preserved
* and any new values are undefined. The capacity is changed if it is
* necessary.
*
* @param size
* The new number of bytes
*/
public void setSize(int size) {
if (size > getCapacity()) {
// Avoid overflowing the int too early by casting to a long.
long newSize = Math.min(MAX_ARRAY_SIZE, (3L * size) / 2L);
setCapacity(((int) (newSize)));
}
this.size = size;
} | 3.26 |
hadoop_MapTaskAttemptInfo_getMapRuntime_rdh | /**
* Get the runtime for the <b>map</b> phase of the map-task attempt.
*
* @return the runtime for the <b>map</b> phase of the map-task attempt
*/
public long getMapRuntime() {return runtime;
} | 3.26 |
hadoop_LambdaUtils_eval_rdh | /**
* Utility method to evaluate a callable and fill in the future
* with the result or the exception raised.
* Once this method returns, the future will have been evaluated to
* either a return value or an exception.
*
* @param <T>
* type of future
* @param result
* future for the result.
* @param call
* callable to invoke.
* @return the future passed in
*/
public static <T> CompletableFuture<T> eval(final CompletableFuture<T> result, final Callable<T> call) {
try {
result.complete(call.call());
} catch (Throwable tx) {
result.completeExceptionally(tx);
}
return result;
} | 3.26 |
hadoop_TypedBytesOutput_writeBool_rdh | /**
* Writes a boolean as a typed bytes sequence.
*
* @param b
* the boolean to be written
* @throws IOException
*/
public void writeBool(boolean b) throws IOException {
out.write(BOOL.code);
out.writeBoolean(b);
} | 3.26 |
hadoop_TypedBytesOutput_writeBytes_rdh | /**
* Writes a bytes buffer as a typed bytes sequence.
*
* @param buffer
* the bytes buffer to be written
* @throws IOException
*/
public void writeBytes(Buffer buffer) throws IOException {
writeBytes(buffer.get(), BYTES.code, buffer.getCount());
} | 3.26 |
hadoop_TypedBytesOutput_writeLong_rdh | /**
* Writes a long as a typed bytes sequence.
*
* @param l
* the long to be written
* @throws IOException
*/
public void writeLong(long l) throws IOException {out.write(LONG.code);
out.writeLong(l);
} | 3.26 |
hadoop_TypedBytesOutput_writeMapHeader_rdh | /**
* Writes a map header.
*
* @param length
* the number of key-value pairs in the map
* @throws IOException
*/
public void writeMapHeader(int length) throws IOException {
out.write(MAP.code);
out.writeInt(length);
} | 3.26 |
hadoop_TypedBytesOutput_writeString_rdh | /**
* Writes a string as a typed bytes sequence.
*
* @param s
* the string to be written
* @throws IOException
*/public void writeString(String s) throws IOException {
out.write(STRING.code);
WritableUtils.writeString(out, s);
} | 3.26 |
hadoop_TypedBytesOutput_writeListHeader_rdh | /**
* Writes a list header.
*
* @throws IOException
*/
public void writeListHeader() throws IOException {
out.write(LIST.code);
} | 3.26 |
hadoop_TypedBytesOutput_m0_rdh | /**
* Writes a double as a typed bytes sequence.
*
* @param d
* the double to be written
* @throws IOException
*/
public void m0(double d) throws IOException {
out.write(DOUBLE.code);
out.writeDouble(d);
} | 3.26 |
hadoop_TypedBytesOutput_writeFloat_rdh | /**
* Writes a float as a typed bytes sequence.
*
* @param f
* the float to be written
* @throws IOException
*/
public void writeFloat(float f) throws IOException {
out.write(FLOAT.code);
out.writeFloat(f);
} | 3.26 |
hadoop_TypedBytesOutput_writeVectorHeader_rdh | /**
* Writes a vector header.
*
* @param length
* the number of elements in the vector
* @throws IOException
*/
public void writeVectorHeader(int length) throws IOException {
out.write(VECTOR.code);
out.writeInt(length);
} | 3.26 |
hadoop_TypedBytesOutput_write_rdh | /**
* Writes a Java object as a typed bytes sequence.
*
* @param obj
* the object to be written
* @throws IOException
*/
public void write(Object obj) throws IOException {
if (obj instanceof Buffer)
{
writeBytes(((Buffer) (obj)));
} else
if (obj instanceof Byte) {
writeByte(((Byte) (obj)));
} else if (obj instanceof Boolean) {
writeBool(((Boolean) (obj)));
} else if (obj instanceof Integer) {
writeInt(((Integer) (obj)));
} else if (obj instanceof Long) {
writeLong(((Long) (obj)));
} else if (obj instanceof Float) {
writeFloat(((Float) (obj)));
} else if (obj instanceof Double) {
m0(((Double) (obj)));
} else if (obj instanceof String) {
writeString(((String) (obj)));
} else if (obj instanceof ArrayList) {
writeVector(((ArrayList) (obj)));
} else if (obj instanceof List) {
writeList(((List) (obj)));
} else if (obj instanceof Map) {
writeMap(((Map) (obj)));
} else {
throw new RuntimeException("cannot write objects of this type");}
} | 3.26 |
hadoop_TypedBytesOutput_writeVector_rdh | /**
* Writes a vector as a typed bytes sequence.
*
* @param vector
* the vector to be written
* @throws IOException
*/
public void writeVector(ArrayList vector) throws IOException {
writeVectorHeader(vector.size());
for (Object obj : vector) {
write(obj);
}
} | 3.26 |
hadoop_TypedBytesOutput_writeListFooter_rdh | /**
* Writes a list footer.
*
* @throws IOException
*/
public void writeListFooter() throws IOException {
out.write(MARKER.code);
} | 3.26 |
hadoop_TypedBytesOutput_writeRaw_rdh | /**
* Writes a raw sequence of typed bytes.
*
* @param bytes
* the bytes to be written
* @param offset
* an offset in the given array
* @param length
* number of bytes from the given array to write
* @throws IOException
*/
public void writeRaw(byte[] bytes, int offset, int length) throws IOException {
out.write(bytes, offset, length);
} | 3.26 |
hadoop_TypedBytesOutput_writeByte_rdh | /**
* Writes a byte as a typed bytes sequence.
*
* @param b
* the byte to be written
* @throws IOException
*/
public void writeByte(byte b) throws IOException {
out.write(BYTE.code);
out.write(b);
} | 3.26 |
hadoop_TypedBytesOutput_writeInt_rdh | /**
* Writes an integer as a typed bytes sequence.
*
* @param i
* the integer to be written
* @throws IOException
*/
public void writeInt(int i) throws IOException {
out.write(INT.code);
out.writeInt(i);
} | 3.26 |
hadoop_TypedBytesOutput_writeMap_rdh | /**
* Writes a map as a typed bytes sequence.
*
* @param map
* the map to be written
* @throws IOException
*/
@SuppressWarnings("unchecked")
public void writeMap(Map map) throws IOException {
writeMapHeader(map.size());
Set<Entry> v3 = map.entrySet();
for (Entry entry : v3) {
write(entry.getKey());
write(entry.getValue());
}
} | 3.26 |
hadoop_FederationMembershipStateStoreInputValidator_validate_rdh | /**
* Quick validation on the input to check some obvious fail conditions (fail
* fast). Check if the provided {@link GetSubClusterInfoRequest} for querying
* subcluster's information is valid or not.
*
* @param request
* the {@link GetSubClusterInfoRequest} to validate against
* @throws FederationStateStoreInvalidInputException
* if the request is invalid
*/
public static void validate(GetSubClusterInfoRequest request) throws FederationStateStoreInvalidInputException {
// check if the request is present
if (request == null) {
String message = "Missing GetSubClusterInfo Request." + " Please try again by specifying a Get SubCluster information.";
LOG.warn(message);
throw new FederationStateStoreInvalidInputException(message);
}
// validate subcluster id
checkSubClusterId(request.getSubClusterId());
} | 3.26 |
hadoop_FederationMembershipStateStoreInputValidator_checkTimestamp_rdh | /**
* Validate if the timestamp is positive or not.
*
* @param timestamp
* the timestamp to be verified
* @throws FederationStateStoreInvalidInputException
* if the timestamp is
* invalid
*/
private static void checkTimestamp(long timestamp) throws FederationStateStoreInvalidInputException {
if
(timestamp < 0) {
String message = "Invalid timestamp information." + " Please try again by specifying valid Timestamp Information.";
LOG.warn(message);
throw new FederationStateStoreInvalidInputException(message);
}
} | 3.26 |
hadoop_FederationMembershipStateStoreInputValidator_checkAddress_rdh | /**
* Validate if the SubCluster Address is a valid URL or not.
*
* @param address
* the endpoint of the subcluster to be verified
* @throws FederationStateStoreInvalidInputException
* if the address is invalid
*/
private static void checkAddress(String address) throws FederationStateStoreInvalidInputException {
// Ensure url is not null
if ((address == null) || address.isEmpty()) {String message = "Missing SubCluster Endpoint information." + " Please try again by specifying SubCluster Endpoint information.";
LOG.warn(message);
throw new FederationStateStoreInvalidInputException(message);
}
// Validate url is well formed
boolean hasScheme = address.contains("://");
URI uri = null;
try {
uri = (hasScheme) ? URI.create(address) : URI.create("dummyscheme://" + address);
} catch (IllegalArgumentException e) {
String message = ("The provided SubCluster Endpoint does not contain a" + " valid host:port authority: ") + address;
LOG.warn(message);
throw new FederationStateStoreInvalidInputException(message);
}String host = uri.getHost();
int port = uri.getPort();
String path = uri.getPath();
if (((host == null) || (port < 0)) || (((!hasScheme) && (path != null)) && (!path.isEmpty())))
{
String message = ("The provided SubCluster Endpoint does not contain a" + " valid host:port authority: ") + address;
LOG.warn(message);
throw new FederationStateStoreInvalidInputException(message);
}
} | 3.26 |
hadoop_FederationMembershipStateStoreInputValidator_checkSubClusterInfo_rdh | /**
* Validate if all the required fields on {@link SubClusterInfo} are present
* or not. {@code Capability} will be empty as the corresponding
* {@code ResourceManager} is in the process of initialization during
* registration.
*
* @param subClusterInfo
* the information of the subcluster to be verified
* @throws FederationStateStoreInvalidInputException
* if the SubCluster Info
* are invalid
*/
public static void checkSubClusterInfo(SubClusterInfo subClusterInfo) throws FederationStateStoreInvalidInputException {
if (subClusterInfo == null) {
String message = "Missing SubCluster Information." + " Please try again by specifying SubCluster Information.";
LOG.warn(message);
throw new FederationStateStoreInvalidInputException(message);
}
// validate subcluster id
checkSubClusterId(subClusterInfo.getSubClusterId());
// validate AMRM Service address
checkAddress(subClusterInfo.getAMRMServiceAddress());
// validate ClientRM Service address
checkAddress(subClusterInfo.getClientRMServiceAddress());
// validate RMClient Service address
checkAddress(subClusterInfo.getRMAdminServiceAddress());
// validate RMWeb Service address
checkAddress(subClusterInfo.getRMWebServiceAddress());
// validate last heartbeat timestamp
checkTimestamp(subClusterInfo.getLastHeartBeat());
// validate last start timestamp
checkTimestamp(subClusterInfo.getLastStartTime());
// validate subcluster state
checkSubClusterState(subClusterInfo.getState());
} | 3.26 |
hadoop_FederationMembershipStateStoreInputValidator_checkCapability_rdh | /**
* Validate if the Capability is present or not.
*
* @param capability
* the capability of the subcluster to be verified
* @throws FederationStateStoreInvalidInputException
* if the capability is
* invalid
*/
private static void checkCapability(String capability) throws FederationStateStoreInvalidInputException {
if ((capability == null) || capability.isEmpty()) {
String message = "Invalid capability information." + " Please try again by specifying valid Capability Information.";
LOG.warn(message);
throw new FederationStateStoreInvalidInputException(message);
}
} | 3.26 |
hadoop_FederationMembershipStateStoreInputValidator_checkSubClusterId_rdh | /**
* Validate if the SubCluster Id is present or not.
*
* @param subClusterId
* the identifier of the subcluster to be verified
* @throws FederationStateStoreInvalidInputException
* if the SubCluster Id is
* invalid
*/
protected static void checkSubClusterId(SubClusterId subClusterId) throws FederationStateStoreInvalidInputException {
// check if cluster id is present
if (subClusterId == null) {
String message = "Missing SubCluster Id information." + " Please try again by specifying Subcluster Id information.";
LOG.warn(message);
throw new FederationStateStoreInvalidInputException(message);
}
// check if cluster id is valid
if (subClusterId.getId().isEmpty()) {
String
message = "Invalid SubCluster Id information." + " Please try again by specifying valid Subcluster Id.";
LOG.warn(message);
throw new FederationStateStoreInvalidInputException(message);
}
} | 3.26 |
hadoop_StateStoreSerializableImpl_serializeString_rdh | /**
* Serialize a record using the serializer.
*
* @param record
* Record to serialize.
* @param <T>
* Type of the state store record.
* @return String with the serialization of the record.
*/
protected <T extends BaseRecord> String serializeString(T record) {
return f0.serializeString(record);
} | 3.26 |
hadoop_StateStoreSerializableImpl_getPrimaryKey_rdh | /**
* Get the primary key for a record. If we don't want to store in folders, we
* need to remove / from the name.
*
* @param record
* Record to get the primary key for.
* @return Primary key for the record.
*/
protected static String getPrimaryKey(BaseRecord record) {
String primaryKey = record.getPrimaryKey();
primaryKey = primaryKey.replaceAll("/", SLASH_MARK);
primaryKey = primaryKey.replaceAll(":", COLON_MARK);
return primaryKey;
} | 3.26 |
hadoop_StateStoreSerializableImpl_newRecord_rdh | /**
* Creates a record from an input data string.
*
* @param data
* Serialized text of the record.
* @param clazz
* Record class.
* @param includeDates
* If dateModified and dateCreated are serialized.
* @param <T>
* Type of the state store record.
* @return The created record by deserializing the input text.
* @throws IOException
* If the record deserialization fails.
*/
protected <T extends BaseRecord> T newRecord(String data, Class<T> clazz, boolean includeDates) throws IOException {
return f0.deserialize(data, clazz);
} | 3.26 |
hadoop_StateStoreSerializableImpl_serialize_rdh | /**
* Serialize a record using the serializer.
*
* @param record
* Record to serialize.
* @param <T>
* Type of the state store record.
* @return Byte array with the serialization of the record.
*/
protected <T extends BaseRecord> byte[] serialize(T record) {
return f0.serialize(record);
} | 3.26 |
hadoop_ExternalStoragePolicySatisfier_main_rdh | /**
* Main method to start SPS service.
*/
public static void main(String[] args) throws Exception {
NameNodeConnector nnc = null;
ExternalSPSContext context = null;
try {StringUtils.startupShutdownMessage(StoragePolicySatisfier.class, args, LOG);
HdfsConfiguration
spsConf = new HdfsConfiguration();
// login with SPS keytab
secureLogin(spsConf);
StoragePolicySatisfier sps = new StoragePolicySatisfier(spsConf);
nnc = getNameNodeConnector(spsConf);
context = new ExternalSPSContext(sps, nnc);
sps.init(context);
sps.start(StoragePolicySatisfierMode.EXTERNAL);
context.initMetrics(sps);
if (sps != null) {
sps.join();
}
} catch (Throwable e) {
LOG.error("Failed to start storage policy satisfier.", e);
terminate(1, e);
} finally {
if (nnc != null) {
nnc.close();
}
if (context != null) {
if (context.getSpsBeanMetrics() != null) {
context.closeMetrics();
}
}
}
} | 3.26 |
hadoop_TimelineAuthenticationFilterInitializer_initFilter_rdh | /**
* Initializes {@link TimelineAuthenticationFilter}.
* <p>
* Propagates to {@link TimelineAuthenticationFilter} configuration all YARN
* configuration properties prefixed with
* {@value org.apache.hadoop.yarn.conf.YarnConfiguration#TIMELINE_HTTP_AUTH_PREFIX}.
*
* @param container
* The filter container.
* @param conf
* Configuration for run-time parameters.
*/
@Override
public void initFilter(FilterContainer container, Configuration conf) {
setAuthFilterConfig(conf);
String authType = filterConfig.get(AuthenticationFilter.AUTH_TYPE);
if (authType.equals(PseudoAuthenticationHandler.TYPE)) {
filterConfig.put(AuthenticationFilter.AUTH_TYPE, PseudoDelegationTokenAuthenticationHandler.class.getName());
} else if (authType.equals(KerberosAuthenticationHandler.TYPE)) {
filterConfig.put(AuthenticationFilter.AUTH_TYPE, KerberosDelegationTokenAuthenticationHandler.class.getName());
}
filterConfig.put(DelegationTokenAuthenticationHandler.TOKEN_KIND, TimelineDelegationTokenIdentifier.KIND_NAME.toString());
container.addGlobalFilter("Timeline Authentication Filter", TimelineAuthenticationFilter.class.getName(), filterConfig);
} | 3.26 |
hadoop_DoubleValueSum_reset_rdh | /**
* reset the aggregator
*/
public void reset() {
f0 = 0;
} | 3.26 |
hadoop_DoubleValueSum_addNextValue_rdh | /**
* add a value to the aggregator
*
* @param val
* a double value.
*/
public void addNextValue(double val) {
this.f0 += val;
} | 3.26 |
hadoop_DoubleValueSum_getReport_rdh | /**
*
* @return the string representation of the aggregated value
*/
public String getReport() {
return "" + f0;
} | 3.26 |
hadoop_DoubleValueSum_getSum_rdh | /**
*
* @return the aggregated value
*/
public double getSum() {
return this.f0;
} | 3.26 |
hadoop_RenameFailedException_withExitCode_rdh | /**
* Set the exit code.
*
* @param code
* exit code to raise
* @return the exception
*/
public RenameFailedException withExitCode(boolean code) {
this.exitCode = code;
return this;
} | 3.26 |
hadoop_RegexMountPointInterceptorFactory_create_rdh | /**
* interceptorSettingsString string should be like ${type}:${string},
* e.g. replaceresolveddstpath:word1,word2.
*
* @param interceptorSettingsString
* @return Return interceptor based on setting or null on bad/unknown config.
*/
public static RegexMountPointInterceptor create(String interceptorSettingsString) {
int typeTagIndex = interceptorSettingsString.indexOf(RegexMountPoint.INTERCEPTOR_INTERNAL_SEP);
if ((typeTagIndex == (-1)) || (typeTagIndex == (interceptorSettingsString.length() - 1))) {
return null;
}
String typeTag = interceptorSettingsString.substring(0, typeTagIndex).trim().toLowerCase();
RegexMountPointInterceptorType interceptorType = RegexMountPointInterceptorType.get(typeTag);
if (interceptorType == null) {
return null;
}
switch (interceptorType) {
case REPLACE_RESOLVED_DST_PATH :
RegexMountPointInterceptor interceptor = RegexMountPointResolvedDstPathReplaceInterceptor.deserializeFromString(interceptorSettingsString);
return interceptor;
default :
// impossible now
return null;
}
} | 3.26 |
hadoop_FedBalanceContext_setMapNum_rdh | /**
* The map number of the distcp job.
*
* @param value
* the map number of the distcp.
* @return the builder.
*/
public Builder setMapNum(int value) {
this.mapNum = value;
return this;
} | 3.26 |
hadoop_FedBalanceContext_setDelayDuration_rdh | /**
* Specify the delayed duration when the procedures need to retry.
*
* @param value
* the delay duration.
* @return the builder.
*/
public Builder setDelayDuration(long value) {
this.delayDuration = value;
return this;
} | 3.26 |
hadoop_FedBalanceContext_build_rdh | /**
* Build the FedBalanceContext.
*
* @return the FedBalanceContext obj.
*/
public FedBalanceContext build() {
FedBalanceContext v2 = new FedBalanceContext();
v2.src = this.src;
v2.dst = this.dst;
v2.mount = this.mount;
v2.conf = this.conf;
v2.forceCloseOpenFiles = this.forceCloseOpenFiles;
v2.useMountReadOnly = this.useMountReadOnly;
v2.mapNum = this.mapNum;
v2.bandwidthLimit = this.bandwidthLimit;
v2.trashOpt = this.trashOpt;
v2.delayDuration = this.delayDuration;
v2.diffThreshold = this.diffThreshold;
return v2;
} | 3.26 |
hadoop_FedBalanceContext_setBandwidthLimit_rdh | /**
* The bandwidth limit of the distcp job(MB).
*
* @param value
* the bandwidth.
* @return the builder.
*/
public Builder setBandwidthLimit(int value) {
this.bandwidthLimit = value;
return this;
} | 3.26 |
hadoop_FedBalanceContext_setTrash_rdh | /**
* Specify the trash behaviour after all the data is sync to the target.
*
* @param value
* the trash option.
* @return the builder.
*/
public Builder setTrash(TrashOption value) {
this.trashOpt = value;
return this;
} | 3.26 |
hadoop_FedBalanceContext_setDiffThreshold_rdh | /**
* Specify the threshold of diff entries.
*
* @param value
* the diff threshold.
* @return the builder.
*/
public Builder setDiffThreshold(int value) {
this.diffThreshold = value;
return this;
} | 3.26 |
hadoop_FedBalanceContext_setForceCloseOpenFiles_rdh | /**
* Force close open files.
*
* @param value
* true if force close all the open files.
* @return the builder.
*/
public Builder setForceCloseOpenFiles(boolean value) {
this.forceCloseOpenFiles = value;
return this;
} | 3.26 |
hadoop_FedBalanceContext_setUseMountReadOnly_rdh | /**
* Use mount point readonly to disable write.
*
* @param value
* true if disabling write by setting mount point readonly.
* @return the builder.
*/
public Builder setUseMountReadOnly(boolean value) {
this.useMountReadOnly = value;
return this;
} | 3.26 |
hadoop_TupleWritable_clearWritten_rdh | /**
* Clear any record of which writables have been written to, without
* releasing storage.
*/
void clearWritten() {
written.clear();
} | 3.26 |
hadoop_TupleWritable_setWritten_rdh | /**
* Record that the tuple contains an element at the position provided.
*/
void setWritten(int i) {
written.set(i);
} | 3.26 |
hadoop_CheckpointCommand_needToReturnImage_rdh | /**
* Indicates whether the new checkpoint image needs to be transfered
* back to the name-node after the checkpoint is done.
*
* @return true if the checkpoint should be returned back.
*/
public boolean needToReturnImage() {
return needToReturnImage;
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.