name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hadoop_ContainerContext_getContainerType_rdh | /**
* Get {@link ContainerType} the type of the container
* being initialized or stopped.
*
* @return the type of the container
*/
public ContainerType getContainerType() {
return containerType;
} | 3.26 |
hadoop_ContainerContext_getResource_rdh | /**
* Get {@link Resource} the resource capability allocated to the container
* being initialized or stopped.
*
* @return the resource capability.
*/
public Resource getResource() {
return resource;
} | 3.26 |
hadoop_ContainerContext_getContainerId_rdh | /**
* Get {@link ContainerId} of the container being initialized or stopped.
*
* @return the container ID
*/
public ContainerId getContainerId() {
return containerId;
} | 3.26 |
hadoop_UploadHandle_toByteArray_rdh | /**
*
* @return Serialized from in bytes.
*/
default byte[] toByteArray() {
ByteBuffer bb = bytes();
byte[] ret = new byte[bb.remaining()];
bb.get(ret);
return ret;
} | 3.26 |
hadoop_LongValueSum_getReport_rdh | /**
*
* @return the string representation of the aggregated value
*/
public String getReport() {return "" + sum;
} | 3.26 |
hadoop_LongValueSum_addNextValue_rdh | /**
* add a value to the aggregator
*
* @param val
* a long value.
*/
public void addNextValue(long val) {
this.sum += val;
} | 3.26 |
hadoop_LongValueSum_reset_rdh | /**
* reset the aggregator
*/
public void reset() {
sum = 0;
} | 3.26 |
hadoop_LongValueSum_getSum_rdh | /**
*
* @return the aggregated value
*/
public long getSum() {
return this.sum;
} | 3.26 |
hadoop_ApplicationColumnPrefix_getColumnPrefix_rdh | /**
*
* @return the column name value
*/
private String getColumnPrefix() {
return columnPrefix;
} | 3.26 |
hadoop_V2Migration_v1RequestHandlersUsed_rdh | /**
* Notes use of request handlers.
*
* @param handlers
* handlers declared
*/
public static void v1RequestHandlersUsed(final String handlers) {
WARN_OF_REQUEST_HANDLERS.warn("Ignoring V1 SDK request handlers set in {}: {}", AUDIT_REQUEST_HANDLERS, handlers);
} | 3.26 |
hadoop_AuditReplayThread_getException_rdh | /**
* Get the Exception that caused this thread to stop running, if any, else
* null. Should not be called until this thread has already completed (i.e.,
* after {@link #join()} has been called).
*
* @return The exception which was thrown, if any.
*/
Exception getException()
{
return exception;
} | 3.26 |
hadoop_AuditReplayThread_addToQueue_rdh | /**
* Add a command to this thread's processing queue.
*
* @param cmd
* Command to add.
*/
void addToQueue(AuditReplayCommand cmd) {
commandQueue.put(cmd);
} | 3.26 |
hadoop_AuditReplayThread_drainCounters_rdh | /**
* Merge all of this thread's counter values into the counters contained
* within the passed context.
*
* @param context
* The context holding the counters to increment.
*/
void drainCounters(Mapper.Context context) {
for (Map.Entry<REPLAYCOUNTERS, Counter> ent :
replayCountersMap.entrySet()) {
context.getCounter(ent.getKey()).increment(ent.getValue().getValue());
}
for (Map.Entry<String, Counter> ent : individualCommandsMap.entrySet()) {
context.getCounter(INDIVIDUAL_COMMANDS_COUNTER_GROUP, ent.getKey()).increment(ent.getValue().getValue());
}
} | 3.26 |
hadoop_GlobPattern_hasWildcard_rdh | /**
*
* @return true if this is a wildcard pattern (with special chars)
*/
public boolean hasWildcard() {
return hasWildcard;
} | 3.26 |
hadoop_GlobPattern_compiled_rdh | /**
*
* @return the compiled pattern
*/
public Pattern compiled() {
return compiled;
} | 3.26 |
hadoop_GlobPattern_compile_rdh | /**
* Compile glob pattern string
*
* @param globPattern
* the glob pattern
* @return the pattern object
*/
public static Pattern compile(String globPattern) {
return
new GlobPattern(globPattern).compiled();
} | 3.26 |
hadoop_GlobPattern_set_rdh | /**
* Set and compile a glob pattern
*
* @param glob
* the glob pattern string
*/
public void set(String glob) {
StringBuilder regex = new StringBuilder();
int setOpen = 0;
int v2
= 0;int len = glob.length();
hasWildcard = false;
for (int i = 0; i < len; i++) {
char c = glob.charAt(i);
switch (c) {
case BACKSLASH :
if
((++i) >= len) {
error("Missing escaped character", glob, i);
}
regex.append(c).append(glob.charAt(i));
continue;
case '.' :
case '$' :
case '(' :
case ')' :
case '|' :
case
'+' :
// escape regex special chars that are not glob special chars
regex.append(BACKSLASH);
break;
case '*' :
regex.append('.');
hasWildcard = true;
break;
case '?' :
regex.append('.');
hasWildcard = true;
continue;
case '{' :
// start of a group
regex.append("(?:");// non-capturing
v2++;
hasWildcard = true;
continue;
case ',' :
regex.append(v2 > 0 ? '|' : c);
continue;
case '}' :
if (v2 > 0) {
// end of a group
v2--;
regex.append(")");
continue;
}
break;
case '[' :if (setOpen > 0)
{
error("Unclosed character class", glob, i);
}
setOpen++;
hasWildcard = true;
break;
case '^' :
// ^ inside [...] can be unescaped
if (setOpen == 0) {
regex.append(BACKSLASH);
}
break;
case '!' :
// [! needs to be translated to [^
regex.append((setOpen > 0) && ('[' == glob.charAt(i - 1)) ? '^' : '!');
continue;
case ']' :
// Many set errors like [][] could not be easily detected here,
// as []], []-] and [-] are all valid POSIX glob and java regex.
// We'll just let the regex compiler do the real work.
setOpen = 0;
break;
default :
}
regex.append(c);
}
if (setOpen > 0) {
error("Unclosed character class", glob, len);
}
if (v2 > 0) {
error("Unclosed group", glob, len);
}
compiled = Pattern.compile(regex.toString(), Pattern.DOTALL);
} | 3.26 |
hadoop_GlobPattern_matches_rdh | /**
* Match input against the compiled glob pattern
*
* @param s
* input chars
* @return true for successful matches
*/
public boolean matches(CharSequence s) {
return compiled.matcher(s).matches();
} | 3.26 |
hadoop_StateStoreUtils_getHostPortString_rdh | /**
* Returns address in form of host:port, empty string if address is null.
*
* @param address
* address
* @return host:port
*/
public static String getHostPortString(InetSocketAddress address) {
if (null == address) {
return "";
}
String hostName = address.getHostName();
if (hostName.equals("0.0.0.0")) {
try {
hostName = InetAddress.getLocalHost().getHostName();
} catch (UnknownHostException e) {
LOG.error("Failed to get local host name", e);
return "";
}
}
return (hostName + ":") + address.getPort();
} | 3.26 |
hadoop_StateStoreUtils_getRecordClass_rdh | /**
* Get the base class for a record. If we get an implementation of a record we
* will return the real parent record class.
*
* @param <T>
* Type of the class of the data record.
* @param record
* Record to check its main class.
* @return Base class for the record.
*/
public static <T extends BaseRecord> Class<? extends BaseRecord> getRecordClass(final T record) {
return getRecordClass(record.getClass());} | 3.26 |
hadoop_StateStoreUtils_getRecordName_rdh | /**
* Get the base class name for a record. If we get an implementation of a
* record we will return the real parent record class.
*
* @param <T>
* Type of the class of the data record.
* @param clazz
* Class of the data record to check.
* @return Name of the base class for the record.
*/
public static <T extends BaseRecord> String getRecordName(final Class<T> clazz) {
return getRecordClass(clazz).getSimpleName();
} | 3.26 |
hadoop_OneSidedPentomino_main_rdh | /**
* Solve the 3x30 puzzle.
*
* @param args
*/
public static void main(String[] args) {
Pentomino model = new OneSidedPentomino(3, 30);
int solutions = model.solve();
System.out.println(solutions + " solutions found.");
} | 3.26 |
hadoop_OneSidedPentomino_initializePieces_rdh | /**
* Define the one sided pieces. The flipped pieces have the same name with
* a capital letter.
*/
protected void initializePieces() {
pieces.add(new Piece("x", " x /xxx/ x ", false, oneRotation));
pieces.add(new Piece("v", "x /x /xxx", false, fourRotations));
pieces.add(new Piece("t", "xxx/ x / x ", false, fourRotations));
pieces.add(new Piece("w", " x/ xx/xx ", false, fourRotations));
pieces.add(new Piece("u", "x x/xxx", false, fourRotations));pieces.add(new Piece("i", "xxxxx", false, twoRotations));
pieces.add(new Piece("f", " xx/xx / x ", false, fourRotations));
pieces.add(new Piece("p", "xx/xx/x ", false, fourRotations));
pieces.add(new Piece("z", "xx / x / xx", false, twoRotations));
pieces.add(new Piece("n", "xx / xxx", false, fourRotations));
pieces.add(new Piece("y", " x /xxxx", false, fourRotations));
pieces.add(new Piece("l", " x/xxxx", false, fourRotations));
pieces.add(new Piece("F", "xx / xx/ x ",
false, fourRotations));
pieces.add(new Piece("P", "xx/xx/ x", false, fourRotations));pieces.add(new Piece("Z", " xx/ x /xx ", false, twoRotations));
pieces.add(new Piece("N", " xx/xxx ", false, fourRotations));
pieces.add(new Piece("Y", " x /xxxx", false, fourRotations));
pieces.add(new Piece("L", "x /xxxx", false, fourRotations));
} | 3.26 |
hadoop_PlacementConstraints_minCardinality_rdh | /**
* Similar to {@link #minCardinality(String, int, String...)}, but let you
* attach a namespace to the allocation tags.
*
* @param scope
* the scope of the constraint
* @param namespace
* the namespace of these tags
* @param minCardinality
* determines the minimum number of allocations within
* the scope
* @param allocationTags
* the constraint targets allocations with these tags
* @return the resulting placement constraint
*/
public static AbstractConstraint minCardinality(String scope, String namespace, int minCardinality, String... allocationTags) {
return cardinality(scope, namespace, minCardinality, Integer.MAX_VALUE, allocationTags);
} | 3.26 |
hadoop_PlacementConstraints_targetNotIn_rdh | /**
* Creates a constraint that requires allocations to be placed on nodes that
* belong to a scope (e.g., node or rack) that does not satisfy any of the
* target expressions.
*
* @param scope
* the scope within which the target expressions should not be
* true
* @param targetExpressions
* the expressions that need to not be true within
* the scope
* @return the resulting placement constraint
*/
public static AbstractConstraint targetNotIn(String scope, TargetExpression... targetExpressions) {
return new SingleConstraint(scope, 0, 0, targetExpressions);
} | 3.26 |
hadoop_PlacementConstraints_targetNodeAttribute_rdh | /**
* Creates a constraint that requires allocations to be placed on nodes that
* belong to a scope (e.g., node or rack) that satisfy any of the
* target expressions based on node attribute op code.
*
* @param scope
* the scope within which the target expressions should not be
* true
* @param opCode
* Node Attribute code which could be equals, not equals.
* @param targetExpressions
* the expressions that need to not be true within
* the scope
* @return the resulting placement constraint
*/
public static AbstractConstraint targetNodeAttribute(String scope, NodeAttributeOpCode opCode, TargetExpression...
targetExpressions) {
return new SingleConstraint(scope, -1, -1, opCode, targetExpressions);
} | 3.26 |
hadoop_PlacementConstraints_maxCardinality_rdh | /**
* Similar to {@link #maxCardinality(String, int, String...)}, but let you
* specify a namespace for the tags, see supported namespaces in
* {@link AllocationTagNamespaceType}.
*
* @param scope
* the scope of the constraint
* @param tagNamespace
* the namespace of these tags
* @param maxCardinality
* determines the maximum number of allocations within
* the scope
* @param allocationTags
* allocation tags
* @return the resulting placement constraint
*/public static AbstractConstraint maxCardinality(String scope, String tagNamespace, int maxCardinality, String... allocationTags) {
return cardinality(scope,
tagNamespace, 0, maxCardinality, allocationTags);
} | 3.26 |
hadoop_PlacementConstraints_targetIn_rdh | /**
* Creates a constraint that requires allocations to be placed on nodes that
* satisfy all target expressions within the given scope (e.g., node or rack).
*
* For example, {@code targetIn(RACK, allocationTag("hbase-m"))}, allows
* allocations on nodes that belong to a rack that has at least one tag with
* value "hbase-m".
*
* @param scope
* the scope within which the target expressions should be
* satisfied
* @param targetExpressions
* the expressions that need to be satisfied within
* the scope
* @return the resulting placement constraint
*/
public static AbstractConstraint targetIn(String scope, TargetExpression... targetExpressions) {
return new SingleConstraint(scope, 1, Integer.MAX_VALUE, targetExpressions);} | 3.26 |
hadoop_PlacementConstraints_cardinality_rdh | /**
* Similar to {@link #cardinality(String, int, int, String...)}, but let you
* attach a namespace to the given allocation tags.
*
* @param scope
* the scope of the constraint
* @param namespace
* the namespace of the allocation tags
* @param minCardinality
* determines the minimum number of allocations within
* the scope
* @param maxCardinality
* determines the maximum number of allocations within
* the scope
* @param allocationTags
* allocation tags
* @return the resulting placement constraint
*/
public static AbstractConstraint cardinality(String scope, String namespace, int minCardinality, int
maxCardinality, String... allocationTags) {
return new SingleConstraint(scope, minCardinality, maxCardinality, PlacementTargets.allocationTagWithNamespace(namespace, allocationTags));
} | 3.26 |
hadoop_PlacementConstraints_allocationTagWithNamespace_rdh | /**
* Constructs a target expression on a set of allocation tags under
* a certain namespace.
*
* @param namespace
* namespace of the allocation tags
* @param allocationTags
* allocation tags
* @return a target expression
*/
public static TargetExpression allocationTagWithNamespace(String namespace, String... allocationTags) {
return new TargetExpression(TargetType.ALLOCATION_TAG, namespace, allocationTags);
} | 3.26 |
hadoop_PlacementConstraints_build_rdh | /**
* Creates a {@link PlacementConstraint} given a constraint expression.
*
* @param constraintExpr
* the constraint expression
* @return the placement constraint
*/
public static PlacementConstraint build(AbstractConstraint constraintExpr) {
return constraintExpr.build();
} | 3.26 |
hadoop_PlacementConstraints_or_rdh | /**
* A disjunction of constraints.
*
* @param children
* the children constraints, one of which should be satisfied
* @return the resulting placement constraint
*/
public static Or or(AbstractConstraint... children) {
return new Or(children);
} | 3.26 |
hadoop_PlacementConstraints_targetCardinality_rdh | /**
* This constraint generalizes the cardinality and target constraints.
*
* Consider a set of nodes N that belongs to the scope specified in the
* constraint. If the target expressions are satisfied at least minCardinality
* times and at most maxCardinality times in the node set N, then the
* constraint is satisfied.
*
* For example, {@code targetCardinality(RACK, 2, 10, allocationTag("zk"))},
* requires an allocation to be placed within a rack that has at least 2 and
* at most 10 other allocations with tag "zk".
*
* @param scope
* the scope of the constraint
* @param minCardinality
* the minimum number of times the target expressions
* have to be satisfied with the given scope
* @param maxCardinality
* the maximum number of times the target expressions
* have to be satisfied with the given scope
* @param targetExpressions
* the target expressions
* @return the resulting placement constraint
*/
public static AbstractConstraint targetCardinality(String scope, int minCardinality, int maxCardinality, TargetExpression... targetExpressions)
{
return new SingleConstraint(scope, minCardinality, maxCardinality, targetExpressions);
} | 3.26 |
hadoop_PlacementConstraints_delayedOr_rdh | /**
* Creates a composite constraint that includes a list of timed placement
* constraints. The scheduler should try to satisfy first the first timed
* child constraint within the specified time window. If this is not possible,
* it should attempt to satisfy the second, and so on.
*
* @param children
* the timed children constraints
* @return the resulting composite constraint
*/
public static DelayedOr delayedOr(TimedPlacementConstraint... children) {
return new DelayedOr(children);
} | 3.26 |
hadoop_PlacementConstraints_and_rdh | // Creation of compound constraints.
/**
* A conjunction of constraints.
*
* @param children
* the children constraints that should all be satisfied
* @return the resulting placement constraint
*/
public static And and(AbstractConstraint... children) {
return new And(children);
} | 3.26 |
hadoop_PlacementConstraints_timedClockConstraint_rdh | // Creation of timed constraints to be used in a DELAYED_OR constraint.
/**
* Creates a placement constraint that has to be satisfied within a time
* window.
*
* @param constraint
* the placement constraint
* @param delay
* the length of the time window within which the constraint has
* to be satisfied
* @param timeUnit
* the unit of time of the time window
* @return the resulting timed placement constraint
*/
public static TimedPlacementConstraint timedClockConstraint(AbstractConstraint constraint, long delay, TimeUnit timeUnit) {return new TimedPlacementConstraint(constraint, timeUnit.toMillis(delay), DelayUnit.MILLISECONDS);
} | 3.26 |
hadoop_PlacementConstraints_nodeAttribute_rdh | /**
* Constructs a target expression on a node attribute. It is satisfied if
* the specified node attribute has one of the specified values.
*
* @param attributeKey
* the name of the node attribute
* @param attributeValues
* the set of values that the attribute should take
* values from
* @return the resulting expression on the node attribute
*/public static TargetExpression
nodeAttribute(String attributeKey, String... attributeValues) {
return new TargetExpression(TargetType.NODE_ATTRIBUTE, attributeKey, attributeValues);
} | 3.26 |
hadoop_PlacementConstraints_timedOpportunitiesConstraint_rdh | /**
* Creates a placement constraint that has to be satisfied within a number of
* placement opportunities (invocations of the scheduler).
*
* @param constraint
* the placement constraint
* @param delay
* the number of scheduling opportunities within which the
* constraint has to be satisfied
* @return the resulting timed placement constraint
*/
public static TimedPlacementConstraint timedOpportunitiesConstraint(AbstractConstraint constraint, long delay) {
return new TimedPlacementConstraint(constraint, delay, DelayUnit.OPPORTUNITIES);
} | 3.26 |
hadoop_PlacementConstraints_nodePartition_rdh | /**
* Constructs a target expression on a node partition. It is satisfied if
* the specified node partition has one of the specified nodePartitions.
*
* @param nodePartitions
* the set of values that the attribute should take
* values from
* @return the resulting expression on the node attribute
*/
public static TargetExpression nodePartition(String... nodePartitions) {
return new TargetExpression(TargetType.NODE_ATTRIBUTE, NODE_PARTITION, nodePartitions);
}
/**
* Constructs a target expression on an allocation tag. It is satisfied if
* there are allocations with one of the given tags. The default namespace
* for these tags is {@link AllocationTagNamespaceType#SELF} | 3.26 |
hadoop_ReplayJobFactory_start_rdh | /**
* Start the reader thread, wait for latch if necessary.
*/
@Override
public void start() {
this.rThread.start();
} | 3.26 |
hadoop_ReplayJobFactory_update_rdh | /**
*
* @param item
*/
public void
update(Statistics.ClusterStats item) {
} | 3.26 |
hadoop_DockerCommandExecutor_getContainerStatus_rdh | /**
* Get the status of the docker container. This runs a docker inspect to
* get the status. If the container no longer exists, docker inspect throws
* an exception and the nonexistent status is returned.
*
* @param containerId
* the id of the container.
* @param privilegedOperationExecutor
* the privileged operations executor.
* @return a {@link DockerContainerStatus} representing the current status.
*/
public static DockerContainerStatus getContainerStatus(String containerId, PrivilegedOperationExecutor privilegedOperationExecutor, Context nmContext) {
try {
String currentContainerStatus =
executeStatusCommand(containerId, privilegedOperationExecutor, nmContext);DockerContainerStatus dockerContainerStatus = parseContainerStatus(currentContainerStatus);
LOG.debug("Container Status: {} ContainerId: {}", dockerContainerStatus.getName(), containerId);
return dockerContainerStatus;
} catch (ContainerExecutionException e) {
LOG.debug("Container Status: {} ContainerId: {}", DockerContainerStatus.NONEXISTENT.getName(), containerId);
return DockerContainerStatus.NONEXISTENT;
}
} | 3.26 |
hadoop_DockerCommandExecutor_executeDockerCommand_rdh | /**
* Execute a docker command and return the output.
*
* @param dockerCommand
* the docker command to run.
* @param containerId
* the id of the container.
* @param env
* environment for the container.
* @param privilegedOperationExecutor
* the privileged operations executor.
* @param disableFailureLogging
* disable logging for known rc failures.
* @return the output of the operation.
* @throws ContainerExecutionException
* if the operation fails.
*/
public static String executeDockerCommand(DockerCommand dockerCommand, String containerId, Map<String, String> env, PrivilegedOperationExecutor privilegedOperationExecutor, boolean disableFailureLogging, Context nmContext) throws ContainerExecutionException {
PrivilegedOperation dockerOp = dockerCommand.preparePrivilegedOperation(dockerCommand, containerId, env, nmContext);
if (disableFailureLogging) {
dockerOp.disableFailureLogging();
}
LOG.debug("Running docker command: {}", dockerCommand);
try {
String result = privilegedOperationExecutor.executePrivilegedOperation(null, dockerOp, null, env, true, false);
if ((result != null) && (!result.isEmpty())) {
result = result.trim();
}
return result;
} catch (PrivilegedOperationException e) {
throw new ContainerExecutionException("Docker operation failed", e.getExitCode(), e.getOutput(), e.getErrorOutput());
}
} | 3.26 |
hadoop_DockerCommandExecutor_isStoppable_rdh | /**
* Is the container in a stoppable state?
*
* @param containerStatus
* the container's {@link DockerContainerStatus}.
* @return is the container in a stoppable state.
*/
public static boolean isStoppable(DockerContainerStatus containerStatus) {
if (containerStatus.equals(DockerContainerStatus.RUNNING) || containerStatus.equals(DockerContainerStatus.RESTARTING)) {
return true;
}
return false;
} | 3.26 |
hadoop_DockerCommandExecutor_executeStatusCommand_rdh | /**
* Execute the docker inspect command to retrieve the docker container's
* status.
*
* @param containerId
* the id of the container.
* @param privilegedOperationExecutor
* the privileged operations executor.
* @return the current container status.
* @throws ContainerExecutionException
* if the docker operation fails to run.
*/
private static String executeStatusCommand(String containerId, PrivilegedOperationExecutor privilegedOperationExecutor, Context nmContext) throws ContainerExecutionException {
DockerInspectCommand dockerInspectCommand = new DockerInspectCommand(containerId).getContainerStatus();
try {
return DockerCommandExecutor.executeDockerCommand(dockerInspectCommand, containerId, null, privilegedOperationExecutor, true, nmContext);
} catch (ContainerExecutionException e) {
throw new ContainerExecutionException(e);
}
} | 3.26 |
hadoop_DockerCommandExecutor_isKillable_rdh | /**
* Is the container in a killable state?
*
* @param containerStatus
* the container's {@link DockerContainerStatus}.
* @return is the container in a killable state.
*/
public static boolean isKillable(DockerContainerStatus containerStatus) {
return isStoppable(containerStatus);}
/**
* Is the container in a removable state?
*
* @param containerStatus
* the container's {@link DockerContainerStatus} | 3.26 |
hadoop_DockerCommandExecutor_isStartable_rdh | /**
* Is the container in a startable state?
*
* @param containerStatus
* the container's {@link DockerContainerStatus}.
* @return is the container in a startable state.
*/
public static boolean isStartable(DockerContainerStatus containerStatus) {
if (containerStatus.equals(DockerContainerStatus.EXITED) || containerStatus.equals(DockerContainerStatus.STOPPED)) { return true;
}
return false;
} | 3.26 |
hadoop_DockerCommandExecutor_parseContainerStatus_rdh | /**
* Parses the container status string.
*
* @param containerStatusStr
* container status.
* @return a {@link DockerContainerStatus} representing the status.
*/
public static DockerContainerStatus parseContainerStatus(String containerStatusStr) {
DockerContainerStatus dockerContainerStatus;
if (containerStatusStr == null) {
dockerContainerStatus = DockerContainerStatus.UNKNOWN;
} else if
(containerStatusStr.equals(DockerContainerStatus.CREATED.getName())) {
dockerContainerStatus = DockerContainerStatus.CREATED;
} else if (containerStatusStr.equals(DockerContainerStatus.RUNNING.getName())) {
dockerContainerStatus = DockerContainerStatus.RUNNING;
} else
if (containerStatusStr.equals(DockerContainerStatus.STOPPED.getName())) {
dockerContainerStatus = DockerContainerStatus.STOPPED;
} else if (containerStatusStr.equals(DockerContainerStatus.RESTARTING.getName())) {
dockerContainerStatus = DockerContainerStatus.RESTARTING;
} else if (containerStatusStr.equals(DockerContainerStatus.REMOVING.getName())) {
dockerContainerStatus = DockerContainerStatus.REMOVING;
} else if (containerStatusStr.equals(DockerContainerStatus.DEAD.getName())) {
dockerContainerStatus = DockerContainerStatus.DEAD;
} else if (containerStatusStr.equals(DockerContainerStatus.EXITED.getName())) {
dockerContainerStatus = DockerContainerStatus.EXITED;} else if (containerStatusStr.equals(DockerContainerStatus.NONEXISTENT.getName())) {
dockerContainerStatus = DockerContainerStatus.NONEXISTENT;
} else {
dockerContainerStatus = DockerContainerStatus.UNKNOWN;
}
return dockerContainerStatus;
} | 3.26 |
hadoop_SerialJobFactory_start_rdh | /**
* Start the reader thread, wait for latch if necessary.
*/
@Override
public void start() {
LOG.info(" Starting Serial submission ");
this.rThread.start(); } | 3.26 |
hadoop_SerialJobFactory_update_rdh | /**
* SERIAL. Once you get notification from StatsCollector about the job
* completion ,simply notify the waiting thread.
*
* @param item
*/
@Override
public void update(Statistics.JobStats item) {
// simply notify in case of serial submissions. We are just bothered
// if submitted job is completed or not.
lock.lock();
try {
jobCompleted.signalAll();
}
finally {
lock.unlock();
}
} | 3.26 |
hadoop_SerialJobFactory_setDistCacheEmulator_rdh | // it is need for test
void setDistCacheEmulator(DistributedCacheEmulator e) {
jobCreator.setDistCacheEmulator(e);
} | 3.26 |
hadoop_SerialJobFactory_run_rdh | /**
* SERIAL : In this scenario . method waits on notification ,
* that a submitted job is actually completed. Logic is simple.
* ===
* while(true) {
* wait till previousjob is completed.
* break;
* }
* submit newJob.
* previousJob = newJob;
* ==
*/
@Override
public void run() {
try {
startFlag.await();
if (Thread.currentThread().isInterrupted()) {
return;
}
LOG.info("START SERIAL @ " + System.currentTimeMillis());
GridmixJob prevJob;
while (!Thread.currentThread().isInterrupted()) {
final JobStory job;
try {
job = getNextJobFiltered();
if (null == job) {
return;
}
if (LOG.isDebugEnabled()) {
LOG.debug("Serial mode submitting job " + job.getName());
}
prevJob = jobCreator.createGridmixJob(conf, 0L, job, scratch, userResolver.getTargetUgi(UserGroupInformation.createRemoteUser(job.getUser())), sequence.getAndIncrement());
lock.lock();
try {
LOG.info(" Submitted the job " + prevJob);
submitter.add(prevJob);
} finally {
lock.unlock();
}
} catch (IOException e) {
error = e;
// If submission of current job fails , try to submit the next job.
return;
}
if (prevJob != null) {
// Wait till previous job submitted is completed.
lock.lock();
try {
while (true) {
try {
jobCompleted.await();
} catch (InterruptedException ie) {
LOG.error(" Error in SerialJobFactory while waiting for job completion ", ie);
return;
}
if (LOG.isDebugEnabled())
{
LOG.debug((" job " + job.getName()) + " completed ");
}
break;
}
}
finally {
lock.unlock();
}
prevJob = null;
}
}
}
catch (InterruptedException e) {
return;
} finally {IOUtils.cleanupWithLogger(null, jobProducer); }
} | 3.26 |
hadoop_AbfsClient_checkUserError_rdh | /**
* Returns true if the status code lies in the range of user error.
*
* @param responseStatusCode
* http response status code.
* @return True or False.
*/
private boolean checkUserError(int responseStatusCode) {
return (responseStatusCode >= HttpURLConnection.HTTP_BAD_REQUEST) && (responseStatusCode < HttpURLConnection.HTTP_INTERNAL_ERROR);
} | 3.26 |
hadoop_AbfsClient_renameIdempotencyCheckOp_rdh | /**
* Check if the rename request failure is post a retry and if earlier rename
* request might have succeeded at back-end.
*
* If a source etag was passed in, and the error was 404, get the
* etag of any file at the destination.
* If it matches the source etag, then the rename is considered
* a success.
* Exceptions raised in the probe of the destination are swallowed,
* so that they do not interfere with the original rename failures.
*
* @param source
* source path
* @param op
* Rename request REST operation response with non-null HTTP response
* @param destination
* rename destination path
* @param sourceEtag
* etag of source file. may be null or empty
* @param tracingContext
* Tracks identifiers for request header
* @return true if the file was successfully copied
*/
public boolean renameIdempotencyCheckOp(final String source, final String sourceEtag, final AbfsRestOperation op, final String destination, TracingContext tracingContext) {
Preconditions.checkArgument(op.hasResult(), "Operations has null HTTP response");
// removing isDir from debug logs as it can be misleading
LOG.debug("rename({}, {}) failure {}; retry={} etag {}", source, destination, op.getResult().getStatusCode(), op.isARetriedRequest(), sourceEtag);
if (!(op.isARetriedRequest() && (op.getResult().getStatusCode() == HttpURLConnection.HTTP_NOT_FOUND))) {
// only attempt recovery if the failure was a 404 on a retried rename request.
return false;
}
if (isNotEmpty(sourceEtag)) {
// Server has returned HTTP 404, we have an etag, so see
// if the rename has actually taken place,
LOG.info("rename {} to {} failed, checking etag of destination", source, destination);
try {
final AbfsRestOperation destStatusOp = getPathStatus(destination, false, tracingContext);
final AbfsHttpOperation result = destStatusOp.getResult();
final boolean v68 = (result.getStatusCode() == HttpURLConnection.HTTP_OK) && sourceEtag.equals(extractEtagHeader(result));
LOG.info("File rename has taken place: recovery {}", v68 ? "succeeded"
: "failed");
return v68;
} catch (AzureBlobFileSystemException ex) {
// GetFileStatus on the destination failed, the rename did not take place
// or some other failure. log and swallow.
LOG.debug("Failed to get status of path {}", destination, ex);
}
} else { LOG.debug("No source etag; unable to probe for the operation's success");
}
return false;
} | 3.26 |
hadoop_AbfsClient_deleteIdempotencyCheckOp_rdh | /**
* Check if the delete request failure is post a retry and if delete failure
* qualifies to be a success response assuming idempotency.
*
* There are below scenarios where delete could be incorrectly deducted as
* success post request retry:
* 1. Target was originally not existing and initial delete request had to be
* re-tried.
* 2. Parallel delete issued from any other store interface rather than
* delete issued from this filesystem instance.
* These are few corner cases and usually returning a success at this stage
* should help the job to continue.
*
* @param op
* Delete request REST operation response with non-null HTTP response
* @return REST operation response post idempotency check
*/
public AbfsRestOperation deleteIdempotencyCheckOp(final AbfsRestOperation op) {
Preconditions.checkArgument(op.hasResult(), "Operations has null HTTP response");
if ((op.isARetriedRequest() && (op.getResult().getStatusCode() == HttpURLConnection.HTTP_NOT_FOUND)) && DEFAULT_DELETE_CONSIDERED_IDEMPOTENT) {
// Server has returned HTTP 404, which means path no longer
// exists. Assuming delete result to be idempotent, return success.
final AbfsRestOperation successOp =
getAbfsRestOperation(AbfsRestOperationType.DeletePath, HTTP_METHOD_DELETE,
op.getUrl(), op.getRequestHeaders());
successOp.hardSetResult(HttpURLConnection.HTTP_OK);
LOG.debug("Returning success response from delete idempotency logic");
return successOp;
}
return op;
} | 3.26 |
hadoop_AbfsClient_getAbfsRestOperation_rdh | /**
* Creates an AbfsRestOperation with parameters including request headers and SAS token.
*
* @param operationType
* The type of the operation.
* @param httpMethod
* The HTTP method of the operation.
* @param url
* The URL associated with the operation.
* @param requestHeaders
* The list of HTTP headers for the request.
* @param sasTokenForReuse
* The SAS token for reusing authentication.
* @return An AbfsRestOperation instance.
*/
AbfsRestOperation getAbfsRestOperation(final AbfsRestOperationType operationType, final String httpMethod, final URL url, final List<AbfsHttpHeader> requestHeaders, final String sasTokenForReuse) {
return new
AbfsRestOperation(operationType, this, httpMethod, url, requestHeaders, sasTokenForReuse);
} | 3.26 |
hadoop_AbfsClient_appendSASTokenToQuery_rdh | /**
* If configured for SAS AuthType, appends SAS token to queryBuilder.
*
* @param path
* @param operation
* @param queryBuilder
* @param cachedSasToken
* - previously acquired SAS token to be reused.
* @return sasToken - returned for optional re-use.
* @throws SASTokenProviderException
*/
private String appendSASTokenToQuery(String path, String operation, AbfsUriQueryBuilder queryBuilder, String cachedSasToken) throws SASTokenProviderException {
String sasToken = null; if (this.authType == AuthType.SAS) {
try {
LOG.trace("Fetch SAS token for {} on {}", operation, path);
if (cachedSasToken == null) {
sasToken = sasTokenProvider.getSASToken(this.accountName, this.filesystem, path, operation);
if ((sasToken == null) || sasToken.isEmpty()) {
throw new UnsupportedOperationException("SASToken received is empty or null");
}
} else {
sasToken = cachedSasToken;
LOG.trace("Using cached SAS token.");
}
// if SAS Token contains a prefix of ?, it should be removed
if (sasToken.charAt(0) == '?') {
sasToken = sasToken.substring(1);}
queryBuilder.setSASToken(sasToken);
LOG.trace("SAS token fetch complete for {} on {}", operation, path);
} catch (Exception ex) {
throw new SASTokenProviderException(String.format("Failed to acquire a SAS token for %s on %s due to %s", operation, path, ex.toString()));
}
}
return sasToken;
} | 3.26 |
hadoop_AbfsClient_getAbfsCounters_rdh | /**
* Getter for abfsCounters from AbfsClient.
*
* @return AbfsCounters instance.
*/
protected AbfsCounters getAbfsCounters() {
return abfsCounters;
} | 3.26 |
hadoop_AbfsClient_m5_rdh | /**
* Creates an AbfsRestOperation with additional parameters for buffer and SAS token.
*
* @param operationType
* The type of the operation.
* @param httpMethod
* The HTTP method of the operation.
* @param url
* The URL associated with the operation.
* @param requestHeaders
* The list of HTTP headers for the request.
* @param buffer
* The byte buffer containing data for the operation.
* @param bufferOffset
* The offset within the buffer where the data starts.
* @param bufferLength
* The length of the data within the buffer.
* @param sasTokenForReuse
* The SAS token for reusing authentication.
* @return An AbfsRestOperation instance.
*/
AbfsRestOperation m5(final AbfsRestOperationType operationType, final String httpMethod, final URL url, final List<AbfsHttpHeader> requestHeaders, final byte[] buffer, final int bufferOffset, final int bufferLength, final String sasTokenForReuse) {
return new AbfsRestOperation(operationType, this, httpMethod, url, requestHeaders, buffer, bufferOffset, bufferLength, sasTokenForReuse);
} | 3.26 |
hadoop_AbfsClient_checkAccess_rdh | /**
* Talks to the server to check whether the permission specified in
* the rwx parameter is present for the path specified in the path parameter.
*
* @param path
* Path for which access check needs to be performed
* @param rwx
* The permission to be checked on the path
* @param tracingContext
* Tracks identifiers for request header
* @return The {@link AbfsRestOperation} object for the operation
* @throws AzureBlobFileSystemException
* in case of bad requests
*/
public AbfsRestOperation checkAccess(String path, String rwx, TracingContext tracingContext) throws
AzureBlobFileSystemException {
AbfsUriQueryBuilder abfsUriQueryBuilder = createDefaultUriQueryBuilder();
abfsUriQueryBuilder.addQuery(QUERY_PARAM_ACTION, CHECK_ACCESS);
abfsUriQueryBuilder.addQuery(QUERY_FS_ACTION, rwx);
appendSASTokenToQuery(path, SASTokenProvider.CHECK_ACCESS_OPERATION, abfsUriQueryBuilder);
URL url = createRequestUrl(path, abfsUriQueryBuilder.toString());
AbfsRestOperation op = getAbfsRestOperation(AbfsRestOperationType.CheckAccess, AbfsHttpConstants.HTTP_METHOD_HEAD, url, createDefaultHeaders());
op.execute(tracingContext);
return op;
} | 3.26 |
hadoop_AbfsClient_appendSuccessCheckOp_rdh | // For AppendBlob its possible that the append succeeded in the backend but the request failed.
// However a retry would fail with an InvalidQueryParameterValue
// (as the current offset would be unacceptable).
// Hence, we pass/succeed the appendblob append call
// in case we are doing a retry after checking the length of the file
public boolean appendSuccessCheckOp(AbfsRestOperation op, final String path, final
long length, TracingContext tracingContext) throws AzureBlobFileSystemException {
if (op.isARetriedRequest() && (op.getResult().getStatusCode() ==
HttpURLConnection.HTTP_BAD_REQUEST)) {
final AbfsRestOperation destStatusOp = getPathStatus(path, false, tracingContext);
if (destStatusOp.getResult().getStatusCode() == HttpURLConnection.HTTP_OK) {
String fileLength = destStatusOp.getResult().getResponseHeader(HttpHeaderConfigurations.CONTENT_LENGTH);
if (length <= Long.parseLong(fileLength)) {
LOG.debug("Returning success response from append blob idempotency code");
return true;
}
}
}
return false;} | 3.26 |
hadoop_AbfsClient_getDirectoryQueryParameter_rdh | /**
* Get the directory query parameter used by the List Paths REST API and used
* as the path in the continuation token. If the input path is null or the
* root path "/", empty string is returned. If the input path begins with '/',
* the return value is the substring beginning at offset 1. Otherwise, the
* input path is returned.
*
* @param path
* the path to be listed.
* @return the value of the directory query parameter
*/
public static String getDirectoryQueryParameter(final String path) {
String directory = path;
if (Strings.isNullOrEmpty(directory)) {
directory = AbfsHttpConstants.EMPTY_STRING;
} else if (directory.charAt(0) == '/') {
directory = directory.substring(1);
}
return directory;
} | 3.26 |
hadoop_AbfsClient_getAbfsConfiguration_rdh | /**
* Getter for abfsConfiguration from AbfsClient.
*
* @return AbfsConfiguration instance
*/
protected AbfsConfiguration getAbfsConfiguration() {
return abfsConfiguration;
} | 3.26 |
hadoop_RefreshMountTableEntriesRequest_newInstance_rdh | /**
* API request for refreshing mount table cached entries from state store.
*/public abstract class RefreshMountTableEntriesRequest {
public static RefreshMountTableEntriesRequest newInstance() throws IOException {
return StateStoreSerializer.newRecord(RefreshMountTableEntriesRequest.class);
} | 3.26 |
hadoop_SubmitterUserResolver_needsTargetUsersList_rdh | /**
* {@inheritDoc }
* <p>
* Since {@link SubmitterUserResolver} returns the user name who is running
* gridmix, it doesn't need a target list of users.
*/
public boolean needsTargetUsersList() {
return false;
} | 3.26 |
hadoop_ReleaseContainerEvent_getContainer_rdh | /**
* Get RMContainer.
*
* @return RMContainer.
*/
public RMContainer getContainer() {
return container;
} | 3.26 |
hadoop_CoderUtil_getEmptyChunk_rdh | /**
* Make sure to return an empty chunk buffer for the desired length.
*
* @param leastLength
* @return empty chunk of zero bytes
*/
static byte[] getEmptyChunk(int leastLength) {
if (emptyChunk.length >=
leastLength) {
return emptyChunk;// In most time
}
synchronized(CoderUtil.class) {
emptyChunk = new byte[leastLength];
}
return emptyChunk;
} | 3.26 |
hadoop_CoderUtil_toBuffers_rdh | /**
* Convert an array of this chunks to an array of ByteBuffers
*
* @param chunks
* chunks to convertToByteArrayState into buffers
* @return an array of ByteBuffers
*/static ByteBuffer[] toBuffers(ECChunk[] chunks) {
ByteBuffer[] buffers = new ByteBuffer[chunks.length];
ECChunk chunk;
for
(int i = 0; i < chunks.length; i++) {
chunk = chunks[i];
if (chunk == null) {
buffers[i] = null;
} else {
buffers[i] = chunk.getBuffer();
if (chunk.isAllZero()) {
CoderUtil.resetBuffer(buffers[i], buffers[i].remaining());
}
}
}
return buffers;
} | 3.26 |
hadoop_CoderUtil_resetBuffer_rdh | /**
* Ensure the buffer (either input or output) ready to read or write with ZERO
* bytes fully in specified length of len.
*
* @param buffer
* bytes array buffer
* @return the buffer itself
*/
static byte[] resetBuffer(byte[] buffer, int offset, int len) {
byte[] empty = getEmptyChunk(len);System.arraycopy(empty, 0, buffer, offset, len);
return buffer;
} | 3.26 |
hadoop_CoderUtil_findFirstValidInput_rdh | /**
* Find the valid input from all the inputs.
*
* @param inputs
* input buffers to look for valid input
* @return the first valid input
*/
static <T> T findFirstValidInput(T[] inputs) {
for (T input : inputs)
{
if (input != null) {
return input;
}
}
throw new HadoopIllegalArgumentException("Invalid inputs are found, all being null");
} | 3.26 |
hadoop_CoderUtil_getNullIndexes_rdh | /**
* Get indexes array for items marked as null, either erased or
* not to read.
*
* @return indexes array
*/
static <T> int[] getNullIndexes(T[] inputs) {
int[] nullIndexes = new int[inputs.length];
int idx = 0;
for (int i = 0; i
< inputs.length; i++) {
if (inputs[i] == null) {
nullIndexes[idx++] = i;
}
}
return Arrays.copyOf(nullIndexes, idx);
} | 3.26 |
hadoop_CoderUtil_resetOutputBuffers_rdh | /**
* Initialize the output buffers with ZERO bytes.
*/static void resetOutputBuffers(byte[][] buffers, int[] offsets, int dataLen) {
for (int i = 0; i < buffers.length; i++) {
resetBuffer(buffers[i], offsets[i], dataLen);
}
} | 3.26 |
hadoop_CoderUtil_getValidIndexes_rdh | /**
* Picking up indexes of valid inputs.
*
* @param inputs
* decoding input buffers
* @param <T>
*/
static <T> int[] getValidIndexes(T[] inputs) {
int[] validIndexes = new int[inputs.length];
int idx = 0;for (int i = 0; i < inputs.length; i++) {if (inputs[i] != null) {
validIndexes[idx++] = i;
}
}
return Arrays.copyOf(validIndexes, idx);} | 3.26 |
hadoop_StorageUnit_multiply_rdh | /**
* Using BigDecimal so we can throw if we are overflowing the Long.Max.
*
* @param first
* - First Num.
* @param second
* - Second Num.
* @return Returns a double
*/
private static double multiply(double first, double second) {
BigDecimal firstVal = new BigDecimal(first);
BigDecimal v3 = new BigDecimal(second);
return firstVal.multiply(v3).setScale(PRECISION, RoundingMode.HALF_UP).doubleValue();
} | 3.26 |
hadoop_StorageUnit_divide_rdh | /**
* Using BigDecimal to avoid issues with overflow and underflow.
*
* @param value
* - value
* @param divisor
* - divisor.
* @return -- returns a double that represents this value
*/
private static double divide(double value, double divisor) {
BigDecimal val = new BigDecimal(value);
BigDecimal bDivisor = new BigDecimal(divisor);
return val.divide(bDivisor).setScale(PRECISION, RoundingMode.HALF_UP).doubleValue();
} | 3.26 |
hadoop_VolumeFailureInfo_getEstimatedCapacityLost_rdh | /**
* Returns estimate of capacity lost. This is said to be an estimate, because
* in some cases it's impossible to know the capacity of the volume, such as if
* we never had a chance to query its capacity before the failure occurred.
*
* @return estimate of capacity lost in bytes
*/
public long getEstimatedCapacityLost() {
return this.estimatedCapacityLost;
} | 3.26 |
hadoop_VolumeFailureInfo_getFailedStorageLocation_rdh | /**
* Returns the storage location that has failed.
*
* @return storage location that has failed
*/public StorageLocation getFailedStorageLocation() {
return this.failedStorageLocation;
} | 3.26 |
hadoop_ECPolicyLoader_loadECPolicies_rdh | /**
* Load EC policies from a XML configuration file.
*
* @param policyFile
* EC policy file
* @return list of EC policies
* @throws ParserConfigurationException
* if ParserConfigurationException happen
* @throws IOException
* if no such EC policy file
* @throws SAXException
* if the xml file has some invalid elements
*/
private List<ErasureCodingPolicy> loadECPolicies(File policyFile) throws ParserConfigurationException, IOException, SAXException {
LOG.info("Loading EC policy file " + policyFile);
// Read and parse the EC policy file.
DocumentBuilderFactory dbf = XMLUtils.newSecureDocumentBuilderFactory();
dbf.setIgnoringComments(true);
DocumentBuilder builder = dbf.newDocumentBuilder();
Document doc = builder.parse(policyFile);
Element root = doc.getDocumentElement();
if (!"configuration".equals(root.getTagName()))
{
throw new RuntimeException("Bad EC policy configuration file: " + "top-level element not <configuration>");
}
List<ErasureCodingPolicy> policies;
if (root.getElementsByTagName("layoutversion").getLength() > 0) {
if (loadLayoutVersion(root)
== LAYOUT_VERSION) {
if (root.getElementsByTagName("schemas").getLength() > 0) {
Map<String, ECSchema> schemas = loadSchemas(root);
if (root.getElementsByTagName("policies").getLength() > 0) {
policies
= loadPolicies(root, schemas);
} else {
throw new RuntimeException("Bad EC policy configuration file: " + "no <policies> element");
}
} else {
throw new RuntimeException("Bad EC policy configuration file: " + "no <schemas> element");
}
} else {
throw new RuntimeException("The parse failed because of " + "bad layoutversion value");
}
} else {
throw new RuntimeException("Bad EC policy configuration file: " + "no <layoutVersion> element");
}
return policies;
} | 3.26 |
hadoop_ECPolicyLoader_loadPolicy_rdh | /**
* Load a EC policy from a policy element in the XML configuration file.
*
* @param element
* EC policy element
* @param schemas
* all valid schemas of the EC policy file
* @return EC policy
*/
private ErasureCodingPolicy loadPolicy(Element element, Map<String, ECSchema> schemas) {
NodeList fields = element.getChildNodes();
ECSchema schema = null;
int cellSize = 0;
for (int i = 0; i < fields.getLength(); i++) {
Node fieldNode = fields.item(i);
if (fieldNode instanceof Element) {
Element field = ((Element) (fieldNode));
String tagName = field.getTagName();
// Get the nonnull text value.
Text text = ((Text) (field.getFirstChild()));
if (text != null) {
if (!text.isElementContentWhitespace()) {
String value = text.getData().trim();
if ("schema".equals(tagName))
{
schema = schemas.get(value);
} else if ("cellsize".equals(tagName)) {
try {
cellSize = Integer.parseInt(value);
} catch (NumberFormatException e) {
throw new IllegalArgumentException((("Bad EC policy cellsize" + " value ") + value)
+ " is found. It should be an integer");}
} else {
LOG.warn("Invalid tagName: " + tagName);
}
}
} else {
throw new IllegalArgumentException(("Value of <" + tagName) +
"> is null");
}
}
}
if ((schema != null) && (cellSize > 0)) {
return new ErasureCodingPolicy(schema, cellSize);
} else {
throw new RuntimeException("Bad policy is found in" + " EC policy configuration file");
}
} | 3.26 |
hadoop_ECPolicyLoader_loadSchema_rdh | /**
* Load a schema from a schema element in the XML configuration file.
*
* @param element
* EC schema element
* @return ECSchema
*/
private ECSchema loadSchema(Element element) {
Map<String, String> schemaOptions = new HashMap<String, String>();
NodeList fields = element.getChildNodes();
for (int i = 0; i < fields.getLength(); i++) {
Node fieldNode = fields.item(i);
if (fieldNode instanceof Element) {
Element field = ((Element) (fieldNode));
String v30 = field.getTagName();
if ("k".equals(v30)) {
v30 = "numDataUnits";
} else if ("m".equals(v30)) {
v30 = "numParityUnits";
}
// Get the nonnull text value.
Text text = ((Text) (field.getFirstChild()));
if (text
!= null) { String value = text.getData().trim();
schemaOptions.put(v30, value);
} else {
throw new IllegalArgumentException(("Value of <" + v30) + "> is null");
}
}
}
return new ECSchema(schemaOptions);
} | 3.26 |
hadoop_ECPolicyLoader_loadSchemas_rdh | /**
* Load schemas from root element in the XML configuration file.
*
* @param root
* root element
* @return EC schema map
*/
private Map<String, ECSchema> loadSchemas(Element root) {
NodeList elements = root.getElementsByTagName("schemas").item(0).getChildNodes();
Map<String, ECSchema> schemas = new HashMap<String, ECSchema>();
for (int i = 0; i < elements.getLength(); i++) {
Node node = elements.item(i);
if (node instanceof Element) {
Element element = ((Element) (node));
if ("schema".equals(element.getTagName())) {
String schemaId = element.getAttribute("id");
ECSchema schema = loadSchema(element);
if (!schemas.containsValue(schema)) {
schemas.put(schemaId, schema);
} else {
throw new RuntimeException(("Repetitive schemas in EC policy" + " configuration file: ") + schemaId);}
} else {
throw new RuntimeException(("Bad element in EC policy" + " configuration file: ") + element.getTagName());
}
}
}
return schemas;
} | 3.26 |
hadoop_ECPolicyLoader_getPolicyFile_rdh | /**
* Path to the XML file containing user defined EC policies. If the path is
* relative, it is searched for in the classpath.
*
* @param policyFilePath
* path of EC policy file
* @return EC policy file
*/
private File getPolicyFile(String policyFilePath) throws MalformedURLException {
File policyFile = new File(policyFilePath);
if (!policyFile.isAbsolute())
{
URL url = new URL(policyFilePath);
if (!url.getProtocol().equalsIgnoreCase("file")) {
throw new RuntimeException(("EC policy file " + url) + " found on the classpath is not on the local filesystem.");
} else {
policyFile = new File(url.getPath());
}
}
return policyFile;
} | 3.26 |
hadoop_ECPolicyLoader_loadLayoutVersion_rdh | /**
* Load layoutVersion from root element in the XML configuration file.
*
* @param root
* root element
* @return layout version
*/
private int loadLayoutVersion(Element root) {
int layoutVersion;
Text text = ((Text) (root.getElementsByTagName("layoutversion").item(0).getFirstChild()));
if (text != null) {
String value = text.getData().trim();
try {
layoutVersion = Integer.parseInt(value);
} catch (NumberFormatException e) {
throw new IllegalArgumentException(("Bad layoutVersion value " + value) + " is found. It should be an integer");
}
} else {
throw new IllegalArgumentException("Value of <layoutVersion> is null");}
return layoutVersion;
} | 3.26 |
hadoop_FutureDataInputStreamBuilder_withFileStatus_rdh | /**
* A FileStatus may be provided to the open request.
* It is up to the implementation whether to use this or not.
*
* @param status
* status: may be null
* @return the builder.
*/
default FutureDataInputStreamBuilder withFileStatus(@Nullable
FileStatus status) {
return this;
} | 3.26 |
hadoop_RouterDelegationTokenSecretManager_getTokenByRouterStoreToken_rdh | /**
* Get RMDelegationTokenIdentifier according to RouterStoreToken.
*
* @param identifier
* RMDelegationTokenIdentifier
* @return RMDelegationTokenIdentifier
* @throws YarnException
* An internal conversion error occurred when getting the Token
* @throws IOException
* IO exception occurred
*/
public RMDelegationTokenIdentifier getTokenByRouterStoreToken(RMDelegationTokenIdentifier identifier) throws YarnException, IOException {
try {
RouterRMTokenResponse response = federationFacade.getTokenByRouterStoreToken(identifier);
YARNDelegationTokenIdentifier responseIdentifier = response.getRouterStoreToken().getTokenIdentifier();
return ((RMDelegationTokenIdentifier) (responseIdentifier));
} catch (Exception ex) {
throw new YarnException(ex);}
} | 3.26 |
hadoop_RouterDelegationTokenSecretManager_removeStoredToken_rdh | /**
* The Router Supports Remove Token.
*
* @param identifier
* Delegation Token
* @throws IOException
* IO exception occurred.
*/
@Override public void removeStoredToken(RMDelegationTokenIdentifier identifier) throws IOException {
try {
federationFacade.removeStoredToken(identifier);
} catch (Exception e) {
if (!shouldIgnoreException(e)) {
LOG.error("Error in removing RMDelegationToken with sequence number: {}", identifier.getSequenceNumber());
ExitUtil.terminate(1, e);
}
}
} | 3.26 |
hadoop_RouterDelegationTokenSecretManager_updateStoredToken_rdh | /**
* The Router Supports Update Token.
*
* @param identifier
* RMDelegationToken.
* @param tokenInfo
* DelegationTokenInformation.
*/
public void updateStoredToken(RMDelegationTokenIdentifier identifier, DelegationTokenInformation tokenInfo) {
try {
long v2 = tokenInfo.getRenewDate();
String token = RouterDelegationTokenSupport.encodeDelegationTokenInformation(tokenInfo);
federationFacade.updateStoredToken(identifier, v2, token);
} catch (Exception e) {
if (!shouldIgnoreException(e)) {
LOG.error("Error in updating persisted RMDelegationToken with sequence number: {}.", identifier.getSequenceNumber());
ExitUtil.terminate(1, e);
}
}
} | 3.26 |
hadoop_RouterDelegationTokenSecretManager_removeStoredMasterKey_rdh | /**
* The Router Supports Remove the master key.
* During this Process, Facade will call the specific StateStore to remove the MasterKey.
*
* @param delegationKey
* DelegationKey
*/
@Overridepublic void removeStoredMasterKey(DelegationKey delegationKey) {
try {
federationFacade.removeStoredMasterKey(delegationKey);
} catch (Exception e) {
if (!shouldIgnoreException(e)) {
LOG.error("Error in removing master key with KeyID: {}.", delegationKey.getKeyId());
ExitUtil.terminate(1, e);
}
}
} | 3.26 |
hadoop_RouterDelegationTokenSecretManager_storeNewMasterKey_rdh | /**
* The Router Supports Store the New Master Key.
* During this Process, Facade will call the specific StateStore to store the MasterKey.
*
* @param newKey
* DelegationKey
*/
@Override
public void storeNewMasterKey(DelegationKey newKey) {
try {
federationFacade.storeNewMasterKey(newKey);
} catch (Exception e) {
if (!shouldIgnoreException(e)) {
LOG.error("Error in storing master key with KeyID: {}.", newKey.getKeyId());
ExitUtil.terminate(1, e);
}
}
} | 3.26 |
hadoop_RouterDelegationTokenSecretManager_storeNewToken_rdh | /**
* The Router Supports Store new Token.
*
* @param identifier
* RMDelegationToken.
* @param tokenInfo
* DelegationTokenInformation.
*/
public void storeNewToken(RMDelegationTokenIdentifier identifier, DelegationTokenInformation tokenInfo)
{
try {
String token = RouterDelegationTokenSupport.encodeDelegationTokenInformation(tokenInfo);
long renewDate = tokenInfo.getRenewDate();
federationFacade.storeNewToken(identifier, renewDate, token);
} catch (Exception e) {if (!shouldIgnoreException(e)) {
LOG.error("Error in storing RMDelegationToken with sequence number: {}.", identifier.getSequenceNumber());
ExitUtil.terminate(1, e);
}
}
} | 3.26 |
hadoop_RouterDelegationTokenSecretManager_getMasterKeyByDelegationKey_rdh | /**
* The Router supports obtaining the DelegationKey stored in the Router StateStote
* according to the DelegationKey.
*
* @param key
* Param DelegationKey
* @return Delegation Token
* @throws YarnException
* An internal conversion error occurred when getting the Token
* @throws IOException
* IO exception occurred
*/
public DelegationKey getMasterKeyByDelegationKey(DelegationKey key) throws YarnException, IOException
{
try {RouterMasterKeyResponse response = federationFacade.getMasterKeyByDelegationKey(key);
RouterMasterKey masterKey = response.getRouterMasterKey();
ByteBuffer keyByteBuf = masterKey.getKeyBytes();
byte[] keyBytes = new byte[keyByteBuf.remaining()];
keyByteBuf.get(keyBytes);
DelegationKey delegationKey = new DelegationKey(masterKey.getKeyId(), masterKey.getExpiryDate(), keyBytes);
return delegationKey;
} catch (IOException ex) {
throw new IOException(ex);
} catch (YarnException ex) {
throw new YarnException(ex);
}
} | 3.26 |
hadoop_AclEntryType_toStringStable_rdh | /**
* Returns a string representation guaranteed to be stable across versions to
* satisfy backward compatibility requirements, such as for shell command
* output or serialization.
*
* @return stable, backward compatible string representation
*/
public String toStringStable() {
// The base implementation uses the enum value names, which are public API
// and therefore stable.
return super.toString();
} | 3.26 |
hadoop_ProtocolProxy_getProxy_rdh | /* Get the proxy */
public T getProxy() {
return proxy;
} | 3.26 |
hadoop_ProtocolProxy_isMethodSupported_rdh | /**
* Check if a method is supported by the server or not.
*
* @param methodName
* a method's name in String format
* @param parameterTypes
* a method's parameter types
* @return true if the method is supported by the server
* @throws IOException
* raised on errors performing I/O.
*/
public synchronized boolean isMethodSupported(String methodName,
Class<?>... parameterTypes) throws IOException {
if (!supportServerMethodCheck) {
return true;
}
Method method;try {
method = protocol.getDeclaredMethod(methodName, parameterTypes);} catch (SecurityException e) {
throw new IOException(e);
} catch (NoSuchMethodException e) {
throw new IOException(e);
}
if (!serverMethodsFetched) {
fetchServerMethods(method);
}
if (serverMethods == null) {// client & server have the same protocol
return true;
}
return serverMethods.contains(Integer.valueOf(ProtocolSignature.getFingerprint(method)));
} | 3.26 |
hadoop_AbfsOutputStream_getActiveBlock_rdh | /**
* Synchronized accessor to the active block.
*
* @return the active block; null if there isn't one.
*/
private synchronized DataBlock getActiveBlock() {
return activeBlock;
} | 3.26 |
hadoop_AbfsOutputStream_failureWhileSubmit_rdh | /**
* A method to set the lastError if an exception is caught.
*
* @param ex
* Exception caught.
* @throws IOException
* Throws the lastError.
*/
private void failureWhileSubmit(Exception ex) throws IOException {
if (ex instanceof AbfsRestOperationException) {
if (((AbfsRestOperationException) (ex)).getStatusCode() == HttpURLConnection.HTTP_NOT_FOUND) {
throw new FileNotFoundException(ex.getMessage());}
}
if (ex instanceof IOException) {
lastError = ((IOException) (ex));
} else {
lastError = new IOException(ex);
}throw lastError;
} | 3.26 |
hadoop_AbfsOutputStream_getWriteOperationsSize_rdh | /**
* Getter to get the size of the task queue.
*
* @return the number of writeOperations in AbfsOutputStream.
*/
@VisibleForTestingpublic int getWriteOperationsSize() {
return writeOperations.size();
} | 3.26 |
hadoop_AbfsOutputStream_hasActiveBlock_rdh | /**
* Predicate to query whether or not there is an active block.
*
* @return true if there is an active block.
*/
private synchronized boolean hasActiveBlock() {
return activeBlock != null;} | 3.26 |
hadoop_AbfsOutputStream_hasActiveBlockDataToUpload_rdh | /**
* Is there an active block and is there any data in it to upload?
*
* @return true if there is some data to upload in an active block else false.
*/
private boolean hasActiveBlockDataToUpload() {
return hasActiveBlock() && getActiveBlock().hasData();
} | 3.26 |
hadoop_AbfsOutputStream_hasCapability_rdh | /**
* Query the stream for a specific capability.
*
* @param capability
* string to query the stream support for.
* @return true for hsync and hflush.
*/
@Override
public boolean hasCapability(String capability) {
return supportFlush && isProbeForSyncable(capability);
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.